In [188]:
import pandas as pd
import os
import json
from collections import OrderedDict

In [189]:
mapping = OrderedDict({    
    '0_model=BaseModel,network=resnet152,percent_on_k_winner=0.25': 'Resnet-Dense-Kwinners',
    '1_model=SparseModel,network=resnet152,percent_on_k_winner=0.25': 'Resnet-Sparse-Kwinners',
    '2_model=BaseModel,network=WideResNet,percent_on_k_winner=0.25': 'WideResnet-Dense-Kwinners',
    '3_model=SparseModel,network=WideResNet,percent_on_k_winner=0.25': 'WideResnet-Sparse-Kwinners',
    '4_model=BaseModel,network=resnet152,percent_on_k_winner=1': 'Resnet-Dense-ReLU',
    '5_model=SparseModel,network=resnet152,percent_on_k_winner=1': 'Resnet-Sparse-ReLU',
    '6_model=BaseModel,network=WideResNet,percent_on_k_winner=1': 'WideResnet-Dense-ReLU',
    '7_model=SparseModel,network=WideResNet,percent_on_k_winner=1': 'WideResnet-Sparse-ReLU',
})

Run 1


In [182]:
file = os.path.expanduser("~/nta/results/resnet_cifar2/noise_results.json")
with open(file, 'r') as f:
    noise_results = json.load(f)

mapped_results = {}
for k,v in noise_results.items():
    new_k = mapping[k]
    mapped_results[new_k] = v
  
df = pd.DataFrame.from_dict(mapped_results)
df = df.drop('0.2')
df.columns


Out[182]:
Index(['WideResnet-Dense-Kwinners', 'Resnet-Dense-ReLU',
       'WideResnet-Sparse-Kwinners', 'Resnet-Sparse-Kwinners',
       'WideResnet-Sparse-ReLU', 'Resnet-Dense-Kwinners',
       'WideResnet-Dense-ReLU', 'Resnet-Sparse-ReLU'],
      dtype='object')

In [183]:
# paper comparison table
(df[['Resnet-Dense-ReLU', 'Resnet-Sparse-Kwinners', 'WideResnet-Dense-ReLU', 'WideResnet-Sparse-Kwinners']]*100).round(2)


Out[183]:
Resnet-Dense-ReLU Resnet-Sparse-Kwinners WideResnet-Dense-ReLU WideResnet-Sparse-Kwinners
0 93.41 91.92 94.76 94.14
0.025 87.20 86.60 85.41 85.96
0.05 78.52 77.98 74.44 74.76
0.075 66.61 67.99 61.18 62.44
0.1 54.88 57.37 49.03 51.22
0.125 44.84 47.29 39.22 42.31
0.15 36.66 38.73 31.87 35.24
0.175 30.71 31.67 26.23 30.28

In [184]:
# dense weights only
(df[['Resnet-Dense-ReLU', 'Resnet-Dense-Kwinners', 'WideResnet-Dense-ReLU', 'WideResnet-Dense-Kwinners']]*100).round(2)


Out[184]:
Resnet-Dense-ReLU Resnet-Dense-Kwinners WideResnet-Dense-ReLU WideResnet-Dense-Kwinners
0 93.41 93.39 94.76 94.62
0.025 87.20 88.16 85.41 85.68
0.05 78.52 79.92 74.44 75.11
0.075 66.61 69.09 61.18 62.61
0.1 54.88 57.89 49.03 49.31
0.125 44.84 48.14 39.22 38.17
0.15 36.66 39.90 31.87 30.23
0.175 30.71 33.72 26.23 25.00

In [185]:
# sparse weights only
(df[['Resnet-Sparse-ReLU', 'Resnet-Sparse-Kwinners', 'WideResnet-Sparse-ReLU', 'WideResnet-Sparse-Kwinners']]*100).round(2)


Out[185]:
Resnet-Sparse-ReLU Resnet-Sparse-Kwinners WideResnet-Sparse-ReLU WideResnet-Sparse-Kwinners
0 92.97 91.92 94.22 94.14
0.025 88.02 86.60 85.10 85.96
0.05 80.20 77.98 74.05 74.76
0.075 71.08 67.99 62.11 62.44
0.1 62.10 57.37 51.57 51.22
0.125 54.31 47.29 42.04 42.31
0.15 47.39 38.73 35.43 35.24
0.175 42.03 31.67 30.17 30.28

In [186]:
# relu only
(df[['Resnet-Dense-ReLU', 'Resnet-Sparse-ReLU', 'WideResnet-Dense-ReLU', 'WideResnet-Sparse-ReLU']]*100).round(2)


Out[186]:
Resnet-Dense-ReLU Resnet-Sparse-ReLU WideResnet-Dense-ReLU WideResnet-Sparse-ReLU
0 93.41 92.97 94.76 94.22
0.025 87.20 88.02 85.41 85.10
0.05 78.52 80.20 74.44 74.05
0.075 66.61 71.08 61.18 62.11
0.1 54.88 62.10 49.03 51.57
0.125 44.84 54.31 39.22 42.04
0.15 36.66 47.39 31.87 35.43
0.175 30.71 42.03 26.23 30.17

In [187]:
# kwinners only
(df[['Resnet-Dense-Kwinners', 'Resnet-Sparse-Kwinners', 'WideResnet-Dense-Kwinners', 'WideResnet-Sparse-Kwinners']]*100).round(2)


Out[187]:
Resnet-Dense-Kwinners Resnet-Sparse-Kwinners WideResnet-Dense-Kwinners WideResnet-Sparse-Kwinners
0 93.39 91.92 94.62 94.14
0.025 88.16 86.60 85.68 85.96
0.05 79.92 77.98 75.11 74.76
0.075 69.09 67.99 62.61 62.44
0.1 57.89 57.37 49.31 51.22
0.125 48.14 47.29 38.17 42.31
0.15 39.90 38.73 30.23 35.24
0.175 33.72 31.67 25.00 30.28

Run 2


In [165]:
mapping = OrderedDict({    
    'Trainable_0_model=BaseModel,network=resnet152,percent_on_k_winner=0.25_2019-10-18_22-51-438_axbl42': 'Resnet-Dense-Kwinners',
    'Trainable_1_model=SparseModel,network=resnet152,percent_on_k_winner=0.25_2019-10-18_22-51-450nnmjnv4': 'Resnet-Sparse-Kwinners',
    'Trainable_2_model=BaseModel,network=WideResNet,percent_on_k_winner=0.25_2019-10-18_22-51-45w75kg1i1': 'WideResnet-Dense-Kwinners',
    'Trainable_3_model=SparseModel,network=WideResNet,percent_on_k_winner=0.25_2019-10-18_22-51-45ncthm6wh': 'WideResnet-Sparse-Kwinners',
    'Trainable_4_model=BaseModel,network=resnet152,percent_on_k_winner=1_2019-10-18_22-51-45tbdn5tcn': 'Resnet-Dense-ReLU',
    'Trainable_5_model=SparseModel,network=resnet152,percent_on_k_winner=1_2019-10-18_22-51-45l5vvkizn': 'Resnet-Sparse-ReLU',
    'Trainable_6_model=BaseModel,network=WideResNet,percent_on_k_winner=1_2019-10-18_22-51-45gcbcbn3z': 'WideResnet-Dense-ReLU',
    'Trainable_7_model=SparseModel,network=WideResNet,percent_on_k_winner=1_2019-10-18_22-51-451rb5spc0': 'WideResnet-Sparse-ReLU',
})

In [166]:
file = os.path.expanduser("~/nta/results/resnet_cifar1/noise_results.json")
with open(file, 'r') as f:
  noise_results = json.load(f)

mapped_results = {}
for k,v in noise_results.items():
  new_k = mapping[k]
  mapped_results[new_k] = v
  
df = pd.DataFrame.from_dict(mapped_results)
df = df.drop('0.2')
df.columns


Out[166]:
Index(['Resnet-Dense-ReLU', 'WideResnet-Sparse-ReLU', 'WideResnet-Dense-ReLU',
       'Resnet-Sparse-Kwinners', 'WideResnet-Dense-Kwinners',
       'Resnet-Sparse-ReLU', 'Resnet-Dense-Kwinners',
       'WideResnet-Sparse-Kwinners'],
      dtype='object')

In [167]:
# paper comparison table
df[['Resnet-Dense-ReLU', 'Resnet-Sparse-Kwinners', 'WideResnet-Dense-ReLU', 'WideResnet-Sparse-Kwinners']]*100


Out[167]:
Resnet-Dense-ReLU Resnet-Sparse-Kwinners WideResnet-Dense-ReLU WideResnet-Sparse-Kwinners
0 94.54 92.17 95.02 93.75
0.025 88.68 85.44 85.35 85.48
0.05 79.13 75.22 74.53 73.96
0.075 66.38 63.27 61.85 61.33
0.1 53.81 52.42 48.97 47.61
0.125 43.23 43.42 39.11 36.12
0.15 35.42 36.47 31.02 27.87
0.175 30.39 31.47 26.26 22.65

In [ ]: