In [1]:
import pylearn2.utils
import pylearn2.config
import theano
import neukrill_net.dense_dataset
import neukrill_net.utils
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import holoviews as hl
%load_ext holoviews.ipython
import sklearn.metrics
In [2]:
m = pylearn2.utils.serial.load(
"/disk/scratch/neuroglycerin/models/experiment_more_pooling_recent.pkl")
Checking the input dimensions on each layer.
In [14]:
print(m)
In [7]:
import neukrill_net.plotting as pl
In [4]:
nll_channels = [c for c in m.monitor.channels.keys() if 'nll' in c]
No signs of overfitting:
In [13]:
pl.monitor_channels(m, nll_channels, x_axis="epoch", overlay=True)
Out[13]:
In [12]:
reload(pl)
Out[12]:
Plotting all the monitoring channels at the same time, could see something interesting happening:
In [17]:
pl.monitor_channels(m, [c for c in m.monitor.channels if "norms_mean" in c], x_axis="epoch")
Out[17]:
In [8]:
pl.monitor_channels(m,m.monitor.channels.keys(),x_axis="epoch")
Out[8]:
Looks like we've got problems with the kernel norms diverging again. This is likely because we didn't recompute what they should be after modifying the max pooling strategy, which would change the input spaces to each layer.
In [9]:
%env PYLEARN2_VIEWER_COMMAND=/afs/inf.ed.ac.uk/user/s08/s0805516/repos/neukrill-net-work/image_hack.sh
%run ~/repos/pylearn2/pylearn2/scripts/show_weights.py /disk/scratch/neuroglycerin/models/experiment_more_pooling.pkl
In [10]:
from IPython.display import Image
In [11]:
def plot_recent_pylearn2():
pl2plt = Image(filename="/afs/inf.ed.ac.uk/user/s08/s0805516/tmp/pylearnplot.png", width=700)
return pl2plt
plot_recent_pylearn2()
Out[11]: