In [1]:
%load_ext autoreload
%autoreload 2
In [2]:
import sys
sys.path.append("../../")
In [28]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import tabulate
import pprint
import click
import numpy as np
import pandas as pd
from ray.tune.commands import *
from dynamic_sparse.common.browser import *
In [29]:
exps = ['neurips_1_eval1', ]
paths = [os.path.expanduser("~/nta/results/{}".format(e)) for e in exps]
df = load_many(paths)
In [30]:
df.head(5)
Out[30]:
In [31]:
df.columns
Out[31]:
In [32]:
df.shape
Out[32]:
In [33]:
df.iloc[1]
Out[33]:
In [39]:
df.groupby('model')['model'].count()
Out[39]:
Experiment Details
In [40]:
# Did any trials failed?
df[df["epochs"]<30]["epochs"].count()
Out[40]:
In [41]:
# Removing failed trials
df_origin = df.copy()
df = df_origin[df_origin["epochs"]>=30]
df.shape
Out[41]:
In [42]:
# which ones failed?
# failed, or still ongoing?
df_origin['failed'] = df_origin["epochs"]<100
df_origin[df_origin['failed']]['epochs']
Out[42]:
In [43]:
# helper functions
def mean_and_std(s):
return "{:.3f} ± {:.3f}".format(s.mean(), s.std())
def round_mean(s):
return "{:.0f}".format(round(s.mean()))
stats = ['min', 'max', 'mean', 'std']
def agg(columns, filter=None, round=3):
if filter is None:
return (df.groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'model': ['count']})).round(round)
else:
return (df[filter].groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'model': ['count']})).round(round)
In [44]:
agg(['model'])
Out[44]:
In [12]:
high_sparsity = (df['on_perc']==0.05)
avg_sparsity = (df['on_perc']==0.1)
low_sparsity = (df['on_perc']==0.2)
In [13]:
agg(['kwinners'], low_sparsity)
Out[13]:
In [14]:
agg(['kwinners'], high_sparsity)
Out[14]:
In [15]:
agg(['on_perc'])
Out[15]:
In [16]:
agg(['hebbian_grow'])
Out[16]:
In [17]:
agg(['hebbian_grow'], low_sparsity)
Out[17]:
In [18]:
agg(['hebbian_grow'], high_sparsity)
Out[18]:
In [19]:
agg(['hebbian_prune_perc'])
Out[19]:
In [20]:
agg(['hebbian_prune_perc'], low_sparsity)
Out[20]:
In [21]:
agg(['hebbian_prune_perc'], high_sparsity)
Out[21]:
In [22]:
no_magnitude = (df['weight_prune_perc'] == 0)
agg(['hebbian_prune_perc'], no_magnitude)
Out[22]:
In [23]:
no_magnitude = (df['weight_prune_perc'] == 0)
agg(['hebbian_prune_perc'], (no_magnitude & low_sparsity))
Out[23]:
In [24]:
agg(['weight_prune_perc'])
Out[24]:
In [25]:
agg(['weight_prune_perc'], low_sparsity)
Out[25]:
In [26]:
agg(['weight_prune_perc'], high_sparsity)
Out[26]:
In [27]:
agg(['weight_prune_perc'], avg_sparsity)
Out[27]:
In [28]:
no_hebbian = (df['hebbian_prune_perc'] == 0)
agg(['weight_prune_perc'], no_hebbian)
Out[28]:
In [29]:
pd.pivot_table(df,
index='hebbian_prune_perc',
columns='weight_prune_perc',
values='val_acc_max',
aggfunc=mean_and_std)
Out[29]:
In [40]:
pd.pivot_table(df[low_sparsity],
index=['kwinners','hebbian_prune_perc'],
columns='weight_prune_perc',
values='val_acc_max',
aggfunc=mean_and_std)
Out[40]:
In [41]:
pd.pivot_table(df[avg_sparsity],
index=['kwinners','hebbian_prune_perc'],
columns='weight_prune_perc',
values='val_acc_max',
aggfunc=mean_and_std)
Out[41]:
In [39]:
pd.pivot_table(df[avg_sparsity],
index='hebbian_prune_perc',
columns='weight_prune_perc',
values='val_acc_max',
aggfunc=mean_and_std)
Out[39]:
In [31]:
pd.pivot_table(df[high_sparsity],
index='hebbian_prune_perc',
columns='weight_prune_perc',
values='val_acc_max',
aggfunc=mean_and_std)
Out[31]:
In [ ]: