Feature Visualization

  • This notebook does basic feature visualization of David Parks DLA CNN Model
  • The tools used come from the article "Feature Visualization

Install imports, define and load model


In [3]:
# Imports

import numpy as np
import tensorflow as tf
import scipy.ndimage as nd
import time
import imageio

import matplotlib
import matplotlib.pyplot as plt

import lucid.modelzoo.vision_models as models
from lucid.misc.io import show
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.render as render
import lucid.optvis.transform as transform

In [4]:
from lucid.modelzoo.vision_base import Model

class DLA(Model):
    model_path = '../protobufs/full_model_8_13.pb'
    image_shape = [1, 400]
    image_value_range = [0, 1]
    input_name = 'x'

In [5]:
model = DLA()
model.load_graphdef()

# for node in model.graph_def.node:
#     print(node.name)

Simple Visualization of a neuron

  • Can specify learning rate of the optimizer if you want

In [23]:
# Specify param.image size to work with our models input, must be a multiple of 400.
param_f = lambda: param.image(400, h=1, channels=1)

# Specify the objective we are trying to visualize, which layer, neuron
channel = lambda n: objectives.channel("Conv2D_2", n)
obj = channel(8)

# Specify what threhsolds to display optimization steps
thresholds = (2048,)

# For basic visualizations dont apply any transformation robustness
transforms = []

# Render the objevtive
imgs = render.render_vis(model, obj, param_f, thresholds=thresholds, transforms=transforms, verbose=False)
#show([nd.zoom(img[0], [1,1,1], order=0) for img in imgs])

In [24]:
test = np.array(imgs)
test = test.reshape(400)

In [25]:
fig = plt.figure(frameon=False);
ax = plt.Axes(fig, [0, 0, 1, 1]);
ax.set_axis_off();
fig.add_axes(ax);
ax.plot(test, 'black');
ax.set(xlim=(0, 400));



In [ ]:


In [ ]: