Run Hebbian pruning with non-binary activations.
Attempt pruning given intuition offered up in "Memory Aware Synapses" paper:
* The weights with higher coactivations computed as $x_i \times x_j$
have a greater effect on the L2 norm of the layers output. Here $x_i$ and $x_j$ are
the input and output activations respectively.
In [1]:
from IPython.display import Markdown, display
%load_ext autoreload
%autoreload 2
In [19]:
import sys
import itertools
sys.path.append("../../")
In [3]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import tabulate
import pprint
import click
import numpy as np
import pandas as pd
from ray.tune.commands import *
from nupic.research.frameworks.dynamic_sparse.common.browser import *
In [10]:
base = 'gsc-trials-2019-10-07'
exp_names = [
'gsc-BaseModel',
'gsc-Static',
'gsc-Heb-nonbinary',
'gsc-WeightedMag-nonbinary',
'gsc-WeightedMag',
'gsc-SET',
]
exps = [
os.path.join(base, exp) for exp in exp_names
]
paths = [os.path.expanduser("~/nta/results/{}".format(e)) for e in exps]
for p in paths:
print(os.path.exists(p), p)
df = load_many(paths)
In [11]:
# remove nans where appropriate
df['hebbian_prune_perc'] = df['hebbian_prune_perc'].replace(np.nan, 0.0, regex=True)
df['weight_prune_perc'] = df['weight_prune_perc'].replace(np.nan, 0.0, regex=True)
# distill certain values
df['on_perc'] = df['on_perc'].replace('None-None-0.1-None', 0.1, regex=True)
df['on_perc'] = df['on_perc'].replace('None-None-0.4-None', 0.4, regex=True)
df['on_perc'] = df['on_perc'].replace('None-None-0.02-None', 0.02, regex=True)
df['prune_methods'] = df['prune_methods'].replace('None-None-dynamic-linear-None', 'dynamic-linear', regex=True)
In [186]:
# def model_name(row):
# col = 'Experiment Name'
# for exp in exp_names:
# if exp in row[col]:
# return exp
# # if row[col] == 'DSNNWeightedMag':
# # return 'DSNN-WM'
# # elif row[col] == 'DSNNMixedHeb':
# # if row['hebbian_prune_perc'] == 0.3:
# # return 'SET'
# # elif row['weight_prune_perc'] == 0.3:
# # return 'DSNN-Heb'
# # elif row[col] == 'SparseModel':
# # return 'Static'
# assert False, "This should cover all cases. Got {}".format(row[col])
# df['model2'] = df.apply(model_name, axis=1)
In [169]:
df.iloc[34]
Out[169]:
In [13]:
df.groupby('experiment_base_path')['experiment_base_path'].count()
Out[13]:
In [170]:
# Did anything fail?
df[df["epochs"] < 30]["epochs"].count()
Out[170]:
In [15]:
# helper functions
def mean_and_std(s):
return "{:.3f} ± {:.3f}".format(s.mean(), s.std())
def round_mean(s):
return "{:.0f}".format(round(s.mean()))
stats = ['min', 'max', 'mean', 'std']
def agg(columns, filter=None, round=3):
if filter is None:
return (df.groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'model': ['count']})).round(round)
else:
return (df[filter].groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'model': ['count']})).round(round)
In [143]:
type(np.nan)
Out[143]:
In [150]:
df['on_perc'][0] is nan
Out[150]:
In [16]:
fltr = (df['experiment_base_path'] == 'gsc-BaseModel')
agg(['model'], fltr)
Out[16]:
In [17]:
# 2% sparse
fltr = (df['experiment_base_path'] == 'gsc-Static')
agg(['model'], fltr)
Out[17]:
In [20]:
# 2% sparse
# 2% sparse
combos = {
'experiment_base_path': ['gsc-WeightedMag', 'gsc-WeightedMag-nonbinary'],
'hebbian_grow': [True, False],
}
combos = [[(k, v_i) for v_i in v] for k, v in combos.items()]
combos = list(itertools.product(*combos))
for c in combos:
fltr = None
summary = []
for restraint in c:
rname = restraint[0]
rcond = restraint[1]
summary.append("{}={} ".format(rname, rcond))
new_fltr = df[rname] == rcond
if fltr is not None:
fltr = fltr & new_fltr
else:
fltr = new_fltr
summary = Markdown("### " + " / ".join(summary))
display(summary)
display(agg(['experiment_base_path'], fltr))
print('\n\n\n\n')
In [21]:
# 2% sparse
fltr = (df['experiment_base_path'] == 'gsc-SET')
display(agg(['model'], fltr))
In [236]:
# 2% sparse
combos = {
'hebbian_grow': [True, False],
'moving_average_alpha': [0.6, 0.8, 1.0],
'reset_coactivations': [True, False],
}
combos = [[(k, v_i) for v_i in v] for k, v in combos.items()]
combos = list(itertools.product(*combos))
for c in combos:
fltr = None
summary = []
for restraint in c:
rname = restraint[0]
rcond = restraint[1]
summary.append("{}={} ".format(rname, rcond))
new_fltr = df[rname] == rcond
if fltr is not None:
fltr = fltr & new_fltr
else:
fltr = new_fltr
summary = Markdown("### " + " / ".join(summary))
display(summary)
display(agg(['experiment_base_path'], fltr))
print('\n\n\n\n')
In [22]:
d = {'b':4}
'b' in d
Out[22]:
In [ ]: