In [1]:
from emu.torch import TorchAdapter
import numpy as np
import os
In [2]:
def recursivelistims(path):
"""Function to load images [.jpg, .jpeg, .png] from a given path
"""
l = []
if os.path.isfile(path):
return [path]
else:
for dirpath, dirnames, filenames in os.walk(path):
for fname in filenames:
if fname.lower().endswith('.jpg') or fname.lower().endswith('.jpeg') or fname.lower().endswith(
'.png'):
l.append(os.path.join(dirpath, fname))
return l
In [3]:
# Color-channel-Mean of training data
mean = np.array([0.485, 0.456, 0.406])
# Color-channel-Standard deviation of training data
std = np.array([0.229, 0.224, 0.225])
# Expected image input size of the neural network
# (Channels, Height, Width)
inputsize = (3, 224, 224)
In [4]:
# from a Torch7 model file
# nn = TorchAdapter('convnets/mymodel.t7', mean=mean, std=std, inputsize=inputsize, use_gpu=True)
# from the pytorch model zoo
nn = TorchAdapter('alexnet', mean=mean, std=std, inputsize=inputsize, use_gpu=True)
In [5]:
# Let's list what layers are in the network
layers = nn.get_layers()
for identifier, layertype in layers.items():
print('%s: %s' % (identifier, layertype))
In [6]:
# We are only interested in the layer with parameters:
filtered = [identifier for identifier, layertype in layers.items() if 'Conv' in layertype or 'Linear' in layertype]
print(filtered)
In [7]:
# from a Torch7 model file
# note that we set the keyword argument `keep_outputs` which makes the adapter save all the outputs from a forward call
nn = TorchAdapter('alexnet', mean=mean, std=std, inputsize=inputsize, keep_outputs=filtered, use_gpu=True)
In [8]:
# Find images
imagefiles = recursivelistims('MSCOCO/test2014/')
print('Number of images found: %s' % len(imagefiles))
# Lets limit that to 1000 images
imagefiles = imagefiles[:1000]
In [9]:
# Preprocess
# NNAdapter takes care of loading and normalization and returns a 4d-numpy array
images = nn.preprocess(imagefiles)
print('Image tensor shape: %s'%str(images.shape))
In [10]:
# Alternatively for evaluating very large sets of images:
# Loading e.g. 40775 images at once takes way too long and consumes too much memory
# We can load them in batched fashion
# for bi in range(0, len(images), batchsize):
# batch = nn.preprocess(images[bi:(bi + batchsize)])
# nn.forward(batch)
In [11]:
output_by_layer = {}
for layer in filtered:
output_by_layer[layer] = []
batchsize = 480
for bi in range(0, len(images), batchsize):
batch = images[bi:(bi + batchsize)]
nn.forward(batch)
for layer in filtered:
o = nn.get_layeroutput(layer)
output_by_layer[layer].append(o)
# Concatenate the batch-outputs
for layer in filtered:
output_by_layer[layer] = np.concatenate(output_by_layer[layer])
print('%s output shape: %s' % (layer, str(output_by_layer[layer].shape)))
In [12]:
# Access
# e.g. 1st layer
weights, bias = nn.get_layerparams(filtered[0])
print('Shape of weight of layer %s: %s' % (filtered[0], str(weights.shape)))
print('Shape of bias of layer %s: %s' % (filtered[0], str(bias.shape)))
# Output of network for first image:
o = nn.forward(batch[1][np.newaxis, ...])
print('Predicted class: %d' % np.argmax(o))
In [13]:
# Alter
# Set weights to zero
weights.fill(0)
nn.set_weights(filtered[0], weights)
# Output of network for first image:
o = nn.forward(batch[1][np.newaxis, ...])
print('Predicted class: %d' % np.argmax(o))