This is another example of deploying a CNN, this time on a subset of the Kasthuri data set. This example is essentially the same as that for ISBI 2012, with a few cosmetic differences. This script assumes you have already run the kast-train-{synapse,membranes}.py scripts to train the CNN weights.
In [1]:
%load_ext autoreload
%autoreload 2
%matplotlib inline
import sys, os, os.path, copy, logging, socket, time
import numpy as np
import pylab as plt
#from ndparse.algorithms import nddl as nddl
#import ndparse as ndp
sys.path.append('..'); import ndparse as ndp
try:
logger
except:
# do this precisely once
logger = logging.getLogger("deploy_kast")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('[%(asctime)s:%(name)s:%(levelname)s] %(message)s'))
logger.addHandler(ch)
In [2]:
# note: the weight files choosen here are arbitrary.
w_membrane = os.path.join('membrane_weights', 'weights_epoch_001.h5')
w_synapse = os.path.join('synapse_weights', 'weights_epoch_014.h5')
# load data (from local file - can replace with ndio call if desired)
data = np.load('deep_learning_kasthuri_example_data.npz')
X = data['Xtest']
X = np.transpose(X, [2, 0, 1]).astype(np.float32)
X = X[:,np.newaxis,:,:]
# show some details. Note that data tensors are assumed to have dimensions:
# (#slices, #channels, #rows, #columns)
#
print('Test data shape is: %s' % str(X.shape))
plt.imshow(X[0,0,...], interpolation='none', cmap='bone')
plt.title('test volume, slice 0')
plt.gca().axes.get_xaxis().set_ticks([])
plt.gca().axes.get_yaxis().set_ticks([])
plt.show()
In [7]:
# In the interest of time, only deploy on one slice (z-dimension) of the test volume
# *and* only evaluate a subset of the pixels in that slice.
#
# Note: depending upon your system (e.g. CPU vs GPU) this may take a few minutes...
#
tic = time.time()
P_membrane_0 = ndp.nddl.fit(X, w_membrane, slices=[0,], evalPct=.1, log=logger)
print("Time to deploy: %0.2f sec" % (time.time() - tic))
# The shape of the probability estimate tensor is:
# (#slices, #classes, #rows, #cols)
print('Class probabilities shape: %s' % str(P_membrane_0.shape))
In [9]:
# Use a simple interpolation scheme to fill in "missing" values
# (i.e. those pixels we did not evaluate using the CNN).
#
P_membrane_int_0 = ndp.nddl.interpolate_nn(P_membrane_0)
# visualize
plt.imshow(P_membrane_0[0,0,...]); plt.colorbar()
plt.gca().axes.get_xaxis().set_ticks([])
plt.gca().axes.get_yaxis().set_ticks([])
plt.title('Class Estimates (slice 0, subsampled)')
plt.show()
plt.imshow(P_membrane_int_0[0,0,...]); plt.colorbar()
plt.title('Class Estimates: (slice 0, interpolated)')
plt.gca().axes.get_xaxis().set_ticks([])
plt.gca().axes.get_yaxis().set_ticks([])
plt.show()
In [3]:
tic = time.time()
P_synapse_0 = ndp.nddl.fit(X, w_synapse, slices=[0,], evalPct=.1, log=logger)
print("Time to deploy (synapse): %0.2f sec" % (time.time() - tic))
# The shape of the probability estimate tensor is:
# (#slices, #classes, #rows, #cols)
print('Class probabilities shape: %s' % str(P_synapse_0.shape))
In [4]:
# Use a simple interpolation scheme to fill in "missing" values
# (i.e. those pixels we did not evaluate using the CNN).
#
P_synapse_int_0 = ndp.nddl.interpolate_nn(P_synapse_0)
# visualize
plt.imshow(P_synapse_0[0,0,...]); plt.colorbar()
plt.gca().axes.get_xaxis().set_ticks([])
plt.gca().axes.get_yaxis().set_ticks([])
plt.title('Synapse Estimates (slice 0, subsampled)')
plt.show()
plt.imshow(P_synapse_int_0[0,0,...]); plt.colorbar()
plt.title('Synapse Estimates: (slice 0, interpolated)')
plt.gca().axes.get_xaxis().set_ticks([])
plt.gca().axes.get_yaxis().set_ticks([])
plt.show()
In [ ]: