In [188]:
import pandas as pd
import os
import json
from collections import OrderedDict
In [189]:
mapping = OrderedDict({
'0_model=BaseModel,network=resnet152,percent_on_k_winner=0.25': 'Resnet-Dense-Kwinners',
'1_model=SparseModel,network=resnet152,percent_on_k_winner=0.25': 'Resnet-Sparse-Kwinners',
'2_model=BaseModel,network=WideResNet,percent_on_k_winner=0.25': 'WideResnet-Dense-Kwinners',
'3_model=SparseModel,network=WideResNet,percent_on_k_winner=0.25': 'WideResnet-Sparse-Kwinners',
'4_model=BaseModel,network=resnet152,percent_on_k_winner=1': 'Resnet-Dense-ReLU',
'5_model=SparseModel,network=resnet152,percent_on_k_winner=1': 'Resnet-Sparse-ReLU',
'6_model=BaseModel,network=WideResNet,percent_on_k_winner=1': 'WideResnet-Dense-ReLU',
'7_model=SparseModel,network=WideResNet,percent_on_k_winner=1': 'WideResnet-Sparse-ReLU',
})
In [182]:
file = os.path.expanduser("~/nta/results/resnet_cifar2/noise_results.json")
with open(file, 'r') as f:
noise_results = json.load(f)
mapped_results = {}
for k,v in noise_results.items():
new_k = mapping[k]
mapped_results[new_k] = v
df = pd.DataFrame.from_dict(mapped_results)
df = df.drop('0.2')
df.columns
Out[182]:
In [183]:
# paper comparison table
(df[['Resnet-Dense-ReLU', 'Resnet-Sparse-Kwinners', 'WideResnet-Dense-ReLU', 'WideResnet-Sparse-Kwinners']]*100).round(2)
Out[183]:
In [184]:
# dense weights only
(df[['Resnet-Dense-ReLU', 'Resnet-Dense-Kwinners', 'WideResnet-Dense-ReLU', 'WideResnet-Dense-Kwinners']]*100).round(2)
Out[184]:
In [185]:
# sparse weights only
(df[['Resnet-Sparse-ReLU', 'Resnet-Sparse-Kwinners', 'WideResnet-Sparse-ReLU', 'WideResnet-Sparse-Kwinners']]*100).round(2)
Out[185]:
In [186]:
# relu only
(df[['Resnet-Dense-ReLU', 'Resnet-Sparse-ReLU', 'WideResnet-Dense-ReLU', 'WideResnet-Sparse-ReLU']]*100).round(2)
Out[186]:
In [187]:
# kwinners only
(df[['Resnet-Dense-Kwinners', 'Resnet-Sparse-Kwinners', 'WideResnet-Dense-Kwinners', 'WideResnet-Sparse-Kwinners']]*100).round(2)
Out[187]:
In [165]:
mapping = OrderedDict({
'Trainable_0_model=BaseModel,network=resnet152,percent_on_k_winner=0.25_2019-10-18_22-51-438_axbl42': 'Resnet-Dense-Kwinners',
'Trainable_1_model=SparseModel,network=resnet152,percent_on_k_winner=0.25_2019-10-18_22-51-450nnmjnv4': 'Resnet-Sparse-Kwinners',
'Trainable_2_model=BaseModel,network=WideResNet,percent_on_k_winner=0.25_2019-10-18_22-51-45w75kg1i1': 'WideResnet-Dense-Kwinners',
'Trainable_3_model=SparseModel,network=WideResNet,percent_on_k_winner=0.25_2019-10-18_22-51-45ncthm6wh': 'WideResnet-Sparse-Kwinners',
'Trainable_4_model=BaseModel,network=resnet152,percent_on_k_winner=1_2019-10-18_22-51-45tbdn5tcn': 'Resnet-Dense-ReLU',
'Trainable_5_model=SparseModel,network=resnet152,percent_on_k_winner=1_2019-10-18_22-51-45l5vvkizn': 'Resnet-Sparse-ReLU',
'Trainable_6_model=BaseModel,network=WideResNet,percent_on_k_winner=1_2019-10-18_22-51-45gcbcbn3z': 'WideResnet-Dense-ReLU',
'Trainable_7_model=SparseModel,network=WideResNet,percent_on_k_winner=1_2019-10-18_22-51-451rb5spc0': 'WideResnet-Sparse-ReLU',
})
In [166]:
file = os.path.expanduser("~/nta/results/resnet_cifar1/noise_results.json")
with open(file, 'r') as f:
noise_results = json.load(f)
mapped_results = {}
for k,v in noise_results.items():
new_k = mapping[k]
mapped_results[new_k] = v
df = pd.DataFrame.from_dict(mapped_results)
df = df.drop('0.2')
df.columns
Out[166]:
In [167]:
# paper comparison table
df[['Resnet-Dense-ReLU', 'Resnet-Sparse-Kwinners', 'WideResnet-Dense-ReLU', 'WideResnet-Sparse-Kwinners']]*100
Out[167]:
In [ ]: