Skip to content

Commit

Permalink
Add simple script to process results of diff experiments
Browse files Browse the repository at this point in the history
  • Loading branch information
cgnorthcutt committed Feb 2, 2020
1 parent 6c4007e commit bf32098
Show file tree
Hide file tree
Showing 3 changed files with 264 additions and 0 deletions.
65 changes: 65 additions & 0 deletions other_methods/sceloss/confident_learning_benchmark_experiments.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
cd ~/sceloss_results
mkdir 0_2
cd 0_2
CUDA_VISIBLE_DEVICES=0 python3 ~/Dropbox\ \(MIT\)/cgn/SCELoss-Reproduce/train.py --fn '/home/cgn/Dropbox (MIT)/cgn/cleanlab/examples/cifar10/cifar10/cifar10_noisy_labels/cifar10_noisy_labels__frac_zero_noise_rates__0.0__noise_amount__0.2.json' > out_0_2.log

cd ~/sceloss_results
mkdir 2_2
cd 2_2
CUDA_VISIBLE_DEVICES=1 python3 ~/Dropbox\ \(MIT\)/cgn/SCELoss-Reproduce/train.py --fn '/home/cgn/Dropbox (MIT)/cgn/cleanlab/examples/cifar10/cifar10/cifar10_noisy_labels/cifar10_noisy_labels__frac_zero_noise_rates__0.2__noise_amount__0.2.json' > out_2_2.log

cd ~/sceloss_results
mkdir 4_2
cd 4_2
CUDA_VISIBLE_DEVICES=2 python3 ~/Dropbox\ \(MIT\)/cgn/SCELoss-Reproduce/train.py --fn '/home/cgn/Dropbox (MIT)/cgn/cleanlab/examples/cifar10/cifar10/cifar10_noisy_labels/cifar10_noisy_labels__frac_zero_noise_rates__0.4__noise_amount__0.2.json' > out_4_2.log


mkdir ~/sceloss_results
cd ~/sceloss_results
mkdir 6_2
cd 6_2
CUDA_VISIBLE_DEVICES=0 python3 ~/Dropbox\ \(MIT\)/cgn/SCELoss-Reproduce/train.py --fn '/home/cgn/Dropbox (MIT)/cgn/cleanlab/examples/cifar10/cifar10/cifar10_noisy_labels/cifar10_noisy_labels__frac_zero_noise_rates__0.6__noise_amount__0.2.json' > out_6_2.log

cd ~/sceloss_results
mkdir 0_4
cd 0_4
CUDA_VISIBLE_DEVICES=1 python3 ~/Dropbox\ \(MIT\)/cgn/SCELoss-Reproduce/train.py --fn '/home/cgn/Dropbox (MIT)/cgn/cleanlab/examples/cifar10/cifar10/cifar10_noisy_labels/cifar10_noisy_labels__frac_zero_noise_rates__0.0__noise_amount__0.4.json' > out_0_4.log

cd ~/sceloss_results
mkdir 2_4
cd 2_4
CUDA_VISIBLE_DEVICES=2 python3 ~/Dropbox\ \(MIT\)/cgn/SCELoss-Reproduce/train.py --fn '/home/cgn/Dropbox (MIT)/cgn/cleanlab/examples/cifar10/cifar10/cifar10_noisy_labels/cifar10_noisy_labels__frac_zero_noise_rates__0.2__noise_amount__0.4.json' > out_2_4.log

cd ~/sceloss_results
mkdir 4_4
cd 4_4
CUDA_VISIBLE_DEVICES=3 python3 ~/Dropbox\ \(MIT\)/cgn/SCELoss-Reproduce/train.py --fn '/home/cgn/Dropbox (MIT)/cgn/cleanlab/examples/cifar10/cifar10/cifar10_noisy_labels/cifar10_noisy_labels__frac_zero_noise_rates__0.4__noise_amount__0.4.json' > out_4_4.log


cd ~/sceloss_results
mkdir 6_4
cd 6_4
CUDA_VISIBLE_DEVICES=0 python3 ~/Dropbox\ \(MIT\)/cgn/SCELoss-Reproduce/train.py --fn '/home/cgn/Dropbox (MIT)/cgn/cleanlab/examples/cifar10/cifar10/cifar10_noisy_labels/cifar10_noisy_labels__frac_zero_noise_rates__0.6__noise_amount__0.4.json' > out_6_4.log

cd ~/sceloss_results
mkdir 0_6
cd 0_6
CUDA_VISIBLE_DEVICES=1 python3 ~/Dropbox\ \(MIT\)/cgn/SCELoss-Reproduce/train.py --fn '/home/cgn/Dropbox (MIT)/cgn/cleanlab/examples/cifar10/cifar10/cifar10_noisy_labels/cifar10_noisy_labels__frac_zero_noise_rates__0.0__noise_amount__0.6.json' > out_0_6.log

cd ~/sceloss_results
mkdir 2_6
cd 2_6
CUDA_VISIBLE_DEVICES=2 python3 ~/Dropbox\ \(MIT\)/cgn/SCELoss-Reproduce/train.py --fn '/home/cgn/Dropbox (MIT)/cgn/cleanlab/examples/cifar10/cifar10/cifar10_noisy_labels/cifar10_noisy_labels__frac_zero_noise_rates__0.2__noise_amount__0.6.json' > out_2_6.log

cd ~/sceloss_results
mkdir 4_6
cd 4_6
CUDA_VISIBLE_DEVICES=3 python3 ~/Dropbox\ \(MIT\)/cgn/SCELoss-Reproduce/train.py --fn '/home/cgn/Dropbox (MIT)/cgn/cleanlab/examples/cifar10/cifar10/cifar10_noisy_labels/cifar10_noisy_labels__frac_zero_noise_rates__0.4__noise_amount__0.6.json' > out_4_6.log



cd ~/sceloss_results
mkdir 6_6
cd 6_6
CUDA_VISIBLE_DEVICES=0 python3 ~/Dropbox\ \(MIT\)/cgn/SCELoss-Reproduce/train.py --fn '/home/cgn/Dropbox (MIT)/cgn/cleanlab/examples/cifar10/cifar10/cifar10_noisy_labels/cifar10_noisy_labels__frac_zero_noise_rates__0.6__noise_amount__0.6.json' > out_6_6.log

142 changes: 142 additions & 0 deletions other_methods/sceloss/sceloss_benchmark_results.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Use this to read in the scores for SCELoss. take the max score of both models for the fairest comparison."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def get_scores(filename):\n",
" with open(filename, 'r') as f:\n",
" results = f.readlines()[-6:-2]\n",
" acc1 = float(results[0].split(\"\\t\")[-1].strip())\n",
" acc1best = float(results[1].split(\"\\t\")[-1].strip())\n",
" acc5 = float(results[2].split(\"\\t\")[-1].strip())\n",
" acc5best = float(results[3].split(\"\\t\")[-1].strip())\n",
" return {\n",
" 'acc1': acc1,\n",
" 'acc1best': acc1best,\n",
" 'acc5': acc5,\n",
" 'acc5best': acc5best,\n",
" }"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0_4: 0.7633\n",
"2_2: 0.8753\n",
"6_2: 0.8435\n",
"0_2: 0.8718\n",
"2_4: 0.741\n",
"4_4: 0.6488\n",
"2_6: 0.2866\n",
"4_6: 0.3086\n",
"4_2: 0.8878\n",
"0_6: 0.3304\n",
"6_4: 0.5827\n",
"6_6: 0.2402\n"
]
}
],
"source": [
"basedir = '/home/cgn/sceloss_results/'\n",
"for f in [f for f in os.listdir(basedir) if '_' in f]:\n",
" print(f, end=': ')\n",
" result = get_scores(basedir + f +\"/out_{}.log\".format(f))\n",
" print(result['acc1'])\n",
"# model1_score = float(result.split('Model1')[-1][:8])\n",
"# model2_score = float(result.split('Model2')[-1][:8])\n",
"# score = max(model1_score, model2_score)\n",
"# print(score)"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0_4: 0.7695\n",
"2_2: 0.8811\n",
"6_2: 0.8435\n",
"0_2: 0.8741\n",
"2_4: 0.7415\n",
"4_4: 0.6571\n",
"2_6: 0.2969\n",
"4_6: 0.3044\n",
"4_2: 0.8895\n",
"0_6: 0.3317\n",
"6_4: 0.5823\n",
"6_6: 0.2443\n"
]
}
],
"source": [
"basedir = '/home/cgn/sceloss_results/'\n",
"for f in [f for f in os.listdir(basedir) if '_' in f]:\n",
" print(f, end=': ')\n",
" result = get_scores(basedir + f +\"/out_{}.log\".format(f))\n",
" print(result['acc1'])\n",
"# model1_score = float(result.split('Model1')[-1][:8])\n",
"# model2_score = float(result.split('Model2')[-1][:8])\n",
"# score = max(model1_score, model2_score)\n",
"# print(score)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
57 changes: 57 additions & 0 deletions other_methods/sceloss/sceloss_benchmark_results.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@

# coding: utf-8

# ## Use this to read in the scores for SCELoss. take the max score of both models for the fairest comparison.

# In[1]:


import os
import numpy as np


# In[3]:


def get_scores(filename):
with open(filename, 'r') as f:
results = f.readlines()[-6:-2]
acc1 = float(results[0].split("\t")[-1].strip())
acc1best = float(results[1].split("\t")[-1].strip())
acc5 = float(results[2].split("\t")[-1].strip())
acc5best = float(results[3].split("\t")[-1].strip())
return {
'acc1': acc1,
'acc1best': acc1best,
'acc5': acc5,
'acc5best': acc5best,
}


# In[4]:


basedir = '/home/cgn/sceloss_results/'
for f in [f for f in os.listdir(basedir) if '_' in f]:
print(f, end=': ')
result = get_scores(basedir + f +"/out_{}.log".format(f))
print(result['acc1'])
# model1_score = float(result.split('Model1')[-1][:8])
# model2_score = float(result.split('Model2')[-1][:8])
# score = max(model1_score, model2_score)
# print(score)


# In[22]:


basedir = '/home/cgn/sceloss_results/'
for f in [f for f in os.listdir(basedir) if '_' in f]:
print(f, end=': ')
result = get_scores(basedir + f +"/out_{}.log".format(f))
print(result['acc1'])
# model1_score = float(result.split('Model1')[-1][:8])
# model2_score = float(result.split('Model2')[-1][:8])
# score = max(model1_score, model2_score)
# print(score)

0 comments on commit bf32098

Please sign in to comment.