Licensed under the Apache License, Version 2.0 (the "License");


In [0]:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Activation Atlas — Collect Activations

This notebook uses Lucid to reproduce the results in Activation Atlas.

This notebook doesn't introduce the abstractions behind lucid; you may wish to also read the .

Note: The easiest way to use this tutorial is as a colab notebook, which allows you to dive in with no setup.

Install and imports


In [0]:
# Installations
!pip -q install lucid>=0.3.6

In [0]:
# General support
import math
import tensorflow as tf
import numpy as np
import json

# General lucid code
import lucid.modelzoo.vision_models as models

In [0]:
options = {
    'model': 'inceptionv1',
    'split': 'train'
}

In [0]:
# Let's import a model from the modelzoo
model = models.InceptionV1()
model.load_graphdef()

In [0]:
# Write the classification labels once
model.labels

In [0]:
# Setup the data provider for imagenet
# Note you will need to download imagenet data yourself and setup a data provider
# http://image-net.org/download

# data_split = imagenet.get_split(options['split'])
# provider = tf.contrib.slim.dataset_data_provider.DatasetDataProvider(data_split, seed=7)
# image_tensor, t_label, t_record_key, t_label_text = provider.get(["image", "label", "record_key", "label_text"])

In [0]:
sess = tf.InteractiveSession()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)

In [0]:
image_tensor_ = tf.image.resize_images(image_tensor[None], [224, 224])/255

In [0]:
T = render.import_model(model, image_tensor_, image_tensor_)

In [0]:
def save_data(base_dir, options, activations, attributions):

  # spatial activations
  activations_path = optionsToURL(base_dir + "activations", "npy", options)
  with gfile.GFile(activations_path, "w") as f:
    np.save(f, activations)

  # spatial attributions to final classes
  attributions_path = optionsToURL(base_dir + "attribution", "npy", options)
  with gfile.GFile(attributions_path, "w") as f:
    np.save(f, attributions)

In [0]:
def fwd_gradients(ys, xs, d_xs):
  
  """Forward-mode pushforward analogous to the pullback defined by tf.gradients.
  With tf.gradients, grad_ys is the vector being pulled back, and here d_xs is
  the vector being pushed forward.
  
  By mattjj@google.com from
  https://github.com/renmengye/tensorflow-forward-ad/issues/2
  """
  
  v = tf.zeros_like(ys)
  g = tf.gradients(ys, xs, grad_ys=v)
  return tf.gradients(g, v, grad_ys=d_xs)

In [0]:
number_of_images = int(1e6)
options['sample_images'] = number_of_images
number_of_pages = 500
number_of_images_per_page = number_of_images / number_of_pages

layers = [
  "conv2d2",
  "mixed3a",
  "mixed3b",
  "mixed4a",
  "mixed4b",
  "mixed4c",
  "mixed4d",
  "mixed4e",
  "mixed5a",
  "mixed5b",
]

for layer_name in reversed(layers):
  print()
  print(layer_name)
  options['layer'] = layer_name

  d_previous = tf.placeholder("float32")
  d_logit = fwd_gradients(T("softmax2_pre_activation"), T(layer_name), d_previous)[0]

  
  zeros = None
  print number_of_pages
  
  for p in range(number_of_pages):
    activations = []
    attributions = []
  
    for n in range(number_of_images_per_page):

      # evaluate
      vec, label_index, record_key, label_text, image = sess.run([T(layer_name), t_label, t_record_key, t_label_text, image_tensor_])

      # sample one random position in the image, minus the edges
      options['sample_type'] = 'random'
      n_x = np.random.randint(1, vec.shape[1])
      n_y = np.random.randint(1, vec.shape[2])
      
      # Compute logit attribution
      if zeros is None:
        zeros = np.zeros(vec.shape)
      else:
        zeros[:] = 0
      zeros[0, n_x, n_y] = vec[0, n_x, n_y]
      logit_attr = d_logit.eval({T(layer_name): vec, d_previous: zeros})

      # top attributions for spatial activation:
      top_attribution_class_index = int(np.argsort(-logit_attr[0])[0])
      top_attribution_class_label = model.labels[top_attribution_class_index]

      activations.append(vec[0, n_x, n_y])
      attributions.append(logit_attr[0])

    # progress indicator
    print p + 1,

    # save files to bigstore
    options['page'] = '{}_of_{}'.format(p + 1, number_of_pages)
    save_data(base_dir_gcs + layer_name + "/", options, activations, attributions)

In [0]: