In [1]:
import tensorflow.contrib.learn.python.learn as learn
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from sklearn import datasets, metrics
from sklearn.decomposition import PCA

import tensorflow.contrib.slim as slim
import numpy as np

import collections
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import sklearn
import seaborn as sns
import six.moves.cPickle as pickle
import sys
from pandas import *
from sklearn.preprocessing import OneHotEncoder

from sklearn.linear_model import LogisticRegression
from sklearn import svm
%matplotlib inline

In [2]:
def accuracy_fn(predictions, labels):
    return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
          / predictions.shape[0])

In [3]:
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)


Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

In [4]:
train_set= mnist.train.images.reshape(-1,28,28, 1)
test_set = mnist.test.images.reshape(-1,28,28, 1)

In [3]:
def variable_summaries(name, var):
    """Attach a lot of summaries to a Tensor."""
    with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)
        tf.scalar_summary('mean/' + name, mean)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
        tf.scalar_summary('sttdev/' + name, stddev)
        tf.scalar_summary('max/' + name, tf.reduce_max(var))
        tf.scalar_summary('min/' + name, tf.reduce_min(var))
        tf.histogram_summary(name, var)

In [4]:
def train_model(train_dataset, train_labels, test_dataset, test_labels, train_tensor,
                accuracy, tf_batch_data, tf_batch_labels, log_dir='./logs',
                num_steps=20000, batch_size=10, test_steps=1000, log_steps=100, predictor=None, last_test='np'):
    with tf.Session() as session:
        summaries = tf.merge_all_summaries()

        if tf.gfile.Exists(log_dir):
            tf.gfile.DeleteRecursively(log_dir)
            
        train_writer = tf.train.SummaryWriter(log_dir + '/train', session.graph)
        test_writer = tf.train.SummaryWriter(log_dir + '/test')

        session.run(tf.initialize_all_variables())
        
        shuffle_train = np.random.permutation(train_dataset.shape[0])
        train_dataset = train_dataset[shuffle_train]
        train_labels = train_labels[shuffle_train]

        for step in range(num_steps):
            # Pick an offset within the training data, which has been randomized.
            # Note: we could use better randomization across epochs.
            offset = ((step * batch_size) % (train_labels.shape[0] - batch_size))

            # Generate a minibatch.
            batch_data = train_dataset[offset:(offset + batch_size)]
            batch_labels = train_labels[offset:(offset + batch_size)]

            # Prepare a dictionary telling the session where to feed the minibatch.
            # The key of the dictionary is the placeholder node of the graph to be fed,
            # and the value is the numpy array to feed to it.
            feed_dict = {
                tf_batch_data : batch_data, 
                tf_batch_labels : batch_labels,
                keep_prob: 0.5
            }
    
    
            if step % test_steps == 0:
                run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                _, acc, summary = session.run([train_tensor, accuracy, summaries], 
                                             feed_dict=feed_dict,
                                             run_metadata=run_metadata,
                                             options=run_options)
                print("Train accuracy at step %s: %.1f%%" % (step, acc))
                train_writer.add_run_metadata(run_metadata, "step%d" % step)
                train_writer.add_summary(summary, step)
                
            elif step % log_steps == 0:
                run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                _, summary = session.run([train_tensor, summaries], 
                                         feed_dict=feed_dict, 
                                         run_metadata=run_metadata,
                                         options=run_options)
                train_writer.add_run_metadata(run_metadata, "step%d" % step)
                train_writer.add_summary(summary, step)
            else:
                session.run(train_tensor, feed_dict=feed_dict, options=run_options)


        feed_dict = {
            tf_batch_data : test_dataset, 
            tf_batch_labels : test_labels,
            keep_prob: 1
        }
        
        if last_test == 'splitted':
            predictions = np.empty([0,10])
            for batch in np.array_split(test_dataset, test_dataset.shape[0] / 16):
                tmp = session.run(predictor,
                                          feed_dict={
                                                tf_batch_data: batch,
#                                                 batch_labels: np.array([]),
                                                keep_prob: 1.0
                })
                predictions = np.vstack((predictions, tmp))
            acc = accuracy_fn(predictions, test_labels)
        elif accuracy is not None:   
            acc = session.run(accuracy, feed_dict=feed_dict)
        print("Test accuracy: %.3f%%" % acc)

MNIST


In [ ]:
def convnet(inputs, keep_prob):
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      activation_fn=tf.nn.relu,
                      weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
                      weights_regularizer=slim.l2_regularizer(0.0005)):
        
        net = slim.conv2d(inputs, 32, [5, 5], scope='conv1')
        net = slim.max_pool2d(net, [2, 2], scope='pool1')
        
        net = slim.conv2d(net, 64, [5, 5], scope='conv2')
        net = slim.max_pool2d(net, [2, 2], scope='pool2')
        
#         net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
#         net = slim.max_pool2d(net, [2, 2], scope='pool2')
        
#         net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
#         net = slim.max_pool2d(net, [2, 2], scope='pool3')
        
#         net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
#         net = slim.max_pool2d(net, [2, 2], scope='pool4')
        
#         net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
#         net = slim.max_pool2d(net, [2, 2], scope='pool5')
        
        net = slim.flatten(net)
        net = slim.fully_connected(net, 1024, scope='fc6')
        net = slim.dropout(net, keep_prob, scope='dropout6')
        
#         net = slim.fully_connected(net, 4096, scope='fc7')
#         net = slim.dropout(net, 0.5, scope='dropout7')
        
        net = slim.fully_connected(net, 10, activation_fn=None, scope='fc8')
        predictor = slim.softmax(net)
    return net, predictor

image_size = 28
num_labels = 10
num_channels = 1
g = tf.Graph()
with g.as_default():
    batch_data = tf.placeholder(tf.float32, shape=(None, image_size, image_size, num_channels))
    batch_labels = tf.placeholder(tf.float32, shape=(None, num_labels))
    keep_prob = tf.placeholder(tf.float32)
    last_layer, predictor = convnet(batch_data, keep_prob)
    
    print(last_layer)
    print(batch_labels)
    slim.losses.softmax_cross_entropy(last_layer, batch_labels)
    total_loss = slim.losses.get_total_loss()
    tf.scalar_summary('losses/total_loss', total_loss)
    tf.scalar_summary('accuracy', accuracy)
    
    optimizer = tf.train.AdamOptimizer()
    
    train_tensor = slim.learning.create_train_op(total_loss, optimizer)
    correct_prediction = tf.equal(tf.argmax(predictor,1), tf.argmax(batch_labels,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    
    

    train_model(train_dataset=train_set, 
                train_labels=mnist.train.labels, 
                test_dataset=test_set,
                test_labels=mnist.test.labels, 
                train_tensor=train_tensor,
                accuracy=accuracy,
                last_test='splitted',
                predictor=predictor,
                tf_batch_data=batch_data,
                log_dir='mnist_conv_max_conv_max_flatten_fc_d_sm_autoADAM',
                tf_batch_labels=batch_labels,
                batch_size=16, num_steps=20000, test_steps=100)


Tensor("fc8/BiasAdd:0", shape=(?, 10), dtype=float32)
Tensor("Placeholder_1:0", shape=(?, 10), dtype=float32)
Train accuracy at step 0: 0.0%
Train accuracy at step 100: 0.9%
Train accuracy at step 200: 1.0%
Train accuracy at step 300: 0.8%
  • [conv(5,32)-max(2,2)]*1 - flatten - 10, adams, dropout, 20k steps, l2=5e-3: 2.7%
  • [conv(5,32)-max(2,2)]*1 - flatten - fully_1024 - 10, adams, dropout, 20k steps, l2=5e-3: 1.8%

CIFAR - 10


In [6]:
def unpickle(file):
    fo = open(file, 'rb')
    dict = pickle.load(fo, encoding='latin-1')
    fo.close()
    return dict

def from_flat_to_3d(image):
#     print(image.shape)
    return np.dstack((image[0:1024].reshape(32,32),
                       image[1024:2048].reshape(32,32),
                       image[2048:3072].reshape(32,32)))

cifar_test = unpickle('cifar-10-batches-py/test_batch')
cifar_test['data_3d'] = np.array([from_flat_to_3d(image) for image in cifar_test['data']])

cifar = unpickle('cifar-10-batches-py/data_batch_1')
for i in range(2, 6):
    tmp = unpickle('cifar-10-batches-py/data_batch_' + str(i))
    cifar['data'] = np.vstack((cifar['data'], tmp['data']))
    cifar['labels'] = np.concatenate((cifar['labels'], tmp['labels']))

cifar['data_3d'] = np.array([from_flat_to_3d(image) for image in cifar['data']])

# cifar['data_bw'] = (cifar['data'][:,0:1024] + cifar['data'][:,1024:2048] + cifar['data'][:, 2048:3072]) / 3 
# cifar_test['data_bw'] = (cifar_test['data'][:,0:1024] + cifar_test['data'][:,1024:2048] + cifar_test['data'][:, 2048:3072]) / 3 

enc = OneHotEncoder()
cifar['labels_oh'] = enc.fit_transform(cifar['labels'].reshape(-1, 1))
cifar['labels_oh'] = cifar['labels_oh'].toarray()

cifar_test['labels'] = np.array(cifar_test['labels'])
cifar_test['labels_oh'] = enc.fit_transform(cifar_test['labels'].reshape(-1, 1))
cifar_test['labels_oh'] = cifar_test['labels_oh'].toarray()

# pca = PCA(whiten=True)
# cifar['data_bw_whitened'] = pca.fit_transform(cifar['data_bw'])
# cifar_test['data_bw_whitened'] = pca.fit_transform(cifar_test['data_bw'])

In [34]:
cifar['data_3d'].shape


Out[34]:
(50000, 32, 32, 3)

In [7]:
def convnet(inputs, keep_prob, is_training):
    with tf.device('gpu:0'):
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                          activation_fn=tf.nn.relu,
                          weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
                          weights_regularizer=slim.l2_regularizer(0.0005)):

            net = slim.conv2d(inputs, 32, [5, 5], scope='conv1')
            variable_summaries('conv1', net)
            net = slim.max_pool2d(net, [2, 2], scope='pool1')
            net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')

            net = slim.conv2d(net, 64, [5, 5], scope='conv2')
            variable_summaries('conv2', net)
            net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
            net = slim.max_pool2d(net, [2, 2], scope='pool2')      
    #         net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
    #         net = slim.max_pool2d(net, [2, 2], scope='pool2')

    #         net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
    #         net = slim.max_pool2d(net, [2, 2], scope='pool3')

    #         net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
    #         net = slim.max_pool2d(net, [2, 2], scope='pool4')

    #         net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
    #          net = slim.max_pool2d(net, [2, 2], scope='pool5')

            net = slim.flatten(net)
            net = slim.fully_connected(net, 1024, scope='fc6')
            variable_summaries('fc1', net)
            net = slim.dropout(net, keep_prob, scope='dropout6')

            net = slim.fully_connected(net, 1024, scope='fc7')
            variable_summaries('fc2', net)
            net = slim.dropout(net, keep_prob, scope='dropout7')

            net = slim.fully_connected(net, 10, activation_fn=None, scope='fc8')
            predictor = slim.softmax(net)
        return net, predictor

image_size = 32
num_labels = 10
num_channels = 3
g = tf.Graph()
with g.as_default():
    batch_data = tf.placeholder(tf.float32, shape=(None, image_size, image_size, num_channels), name='batch_data')
    batch_labels = tf.placeholder(tf.float32, shape=(None, num_labels), name='batch_labels')
    keep_prob = tf.placeholder(tf.float32)
    is_training = tf.placeholder(tf.bool)
    last_layer, predictor = convnet(batch_data, keep_prob, is_training)
    
    print(last_layer)
    print(batch_labels)
    slim.losses.softmax_cross_entropy(last_layer, batch_labels)
    total_loss = slim.losses.get_total_loss()
    tf.scalar_summary('losses/total_loss', total_loss)
    
    optimizer = tf.train.AdamOptimizer(1e-4)
    
    train_tensor = slim.learning.create_train_op(total_loss, optimizer)
    correct_prediction = tf.equal(tf.argmax(predictor,1), tf.argmax(batch_labels,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.scalar_summary('accuracy', accuracy)

    train_model(train_dataset=cifar['data_3d'], 
                train_labels=cifar['labels_oh'], 
                test_dataset=cifar_test['data_3d'],
                test_labels=cifar_test['labels_oh'], 
                train_tensor=train_tensor,
                accuracy=accuracy,
                last_test='splitted',
                predictor=predictor,
                tf_batch_data=batch_data, 
                tf_batch_labels=batch_labels,
                log_dir='cifar_conv_max_lrn_conv_lrn_max_flatten_fc_d_fc_d_sm_autoADAM_gpu',
                batch_size=32, num_steps=30000, test_steps=1000)


Tensor("fc8/BiasAdd:0", shape=(?, 10), dtype=float32, device=/device:GPU:0)
Tensor("batch_labels:0", shape=(?, 10), dtype=float32)
---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
    729     try:
--> 730       return fn(*args)
    731     except errors.OpError as e:

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
    711                                  feed_dict, fetch_list, target_list,
--> 712                                  status, run_metadata)
    713 

/home/kkari/DevTools/anaconda3/lib/python3.5/contextlib.py in __exit__(self, type, value, traceback)
     65             try:
---> 66                 next(self.gen)
     67             except StopIteration:

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/errors.py in raise_exception_on_not_ok_status()
    449           compat.as_text(pywrap_tensorflow.TF_Message(status)),
--> 450           pywrap_tensorflow.TF_GetCode(status))
    451   finally:

InvalidArgumentError: Cannot assign a device to node 'fc8/biases/Adam_1': Could not satisfy explicit device specification '/device:GPU:0' because no devices matching that specification are registered in this process; available devices: /job:localhost/replica:0/task:0/cpu:0
Colocation Debug Info:
Colocation group had the following types and devices: 
ApplyAdam: CPU 
Const: CPU 
Identity: CPU 
Assign: CPU 
Variable: CPU 
	 [[Node: fc8/biases/Adam_1 = Variable[_class=["loc:@fc8/biases"], container="", dtype=DT_FLOAT, shape=[10], shared_name="", _device="/device:GPU:0"]()]]

During handling of the above exception, another exception occurred:

InvalidArgumentError                      Traceback (most recent call last)
<ipython-input-7-0f140f523f80> in <module>()
     75                 tf_batch_labels=batch_labels,
     76                 log_dir='cifar_conv_max_lrn_conv_lrn_max_flatten_fc_d_fc_d_sm_autoADAM_gpu',
---> 77                 batch_size=32, num_steps=30000, test_steps=1000)

<ipython-input-4-2de30c81c763> in train_model(train_dataset, train_labels, test_dataset, test_labels, train_tensor, accuracy, tf_batch_data, tf_batch_labels, log_dir, num_steps, batch_size, test_steps, log_steps, predictor, last_test)
     11         test_writer = tf.train.SummaryWriter(log_dir + '/test')
     12 
---> 13         session.run(tf.initialize_all_variables())
     14 
     15         shuffle_train = np.random.permutation(train_dataset.shape[0])

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    380     try:
    381       result = self._run(None, fetches, feed_dict, options_ptr,
--> 382                          run_metadata_ptr)
    383       if run_metadata:
    384         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    653     movers = self._update_with_movers(feed_dict_string, feed_map)
    654     results = self._do_run(handle, target_list, unique_fetches,
--> 655                            feed_dict_string, options, run_metadata)
    656 
    657     # User may have fetched the same tensor multiple times, but we

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
    721     if handle is None:
    722       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
--> 723                            target_list, options, run_metadata)
    724     else:
    725       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
    741         except KeyError:
    742           pass
--> 743       raise type(e)(node_def, op, message)
    744 
    745   def _extend_graph(self):

InvalidArgumentError: Cannot assign a device to node 'fc8/biases/Adam_1': Could not satisfy explicit device specification '/device:GPU:0' because no devices matching that specification are registered in this process; available devices: /job:localhost/replica:0/task:0/cpu:0
Colocation Debug Info:
Colocation group had the following types and devices: 
ApplyAdam: CPU 
Const: CPU 
Identity: CPU 
Assign: CPU 
Variable: CPU 
	 [[Node: fc8/biases/Adam_1 = Variable[_class=["loc:@fc8/biases"], container="", dtype=DT_FLOAT, shape=[10], shared_name="", _device="/device:GPU:0"]()]]
Caused by op 'fc8/biases/Adam_1', defined at:
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/runpy.py", line 184, in _run_module_as_main
    "__main__", mod_spec)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/runpy.py", line 85, in _run_code
    exec(code, run_globals)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py", line 3, in <module>
    app.launch_new_instance()
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/traitlets/config/application.py", line 653, in launch_instance
    app.start()
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/ipykernel/kernelapp.py", line 474, in start
    ioloop.IOLoop.instance().start()
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/zmq/eventloop/ioloop.py", line 162, in start
    super(ZMQIOLoop, self).start()
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tornado/ioloop.py", line 887, in start
    handler_func(fd_obj, events)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tornado/stack_context.py", line 275, in null_wrapper
    return fn(*args, **kwargs)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 440, in _handle_events
    self._handle_recv()
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 472, in _handle_recv
    self._run_callback(callback, msg)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 414, in _run_callback
    callback(*args, **kwargs)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tornado/stack_context.py", line 275, in null_wrapper
    return fn(*args, **kwargs)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 276, in dispatcher
    return self.dispatch_shell(stream, msg)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 228, in dispatch_shell
    handler(stream, idents, msg)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 390, in execute_request
    user_expressions, allow_stdin)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/ipykernel/ipkernel.py", line 196, in do_execute
    res = shell.run_cell(code, store_history=store_history, silent=silent)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/ipykernel/zmqshell.py", line 501, in run_cell
    return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2717, in run_cell
    interactivity=interactivity, compiler=compiler, result=result)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2821, in run_ast_nodes
    if self.run_code(code, result):
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2881, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)
  File "<ipython-input-7-0f140f523f80>", line 61, in <module>
    train_tensor = slim.learning.create_train_op(total_loss, optimizer)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/contrib/slim/python/slim/learning.py", line 457, in create_train_op
    grad_updates = optimizer.apply_gradients(grads, global_step=global_step)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/optimizer.py", line 300, in apply_gradients
    self._create_slots(var_list)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/adam.py", line 119, in _create_slots
    self._zeros_slot(v, "v", self._name)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/optimizer.py", line 494, in _zeros_slot
    named_slots[var] = slot_creator.create_zeros_slot(var, op_name)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/slot_creator.py", line 108, in create_zeros_slot
    colocate_with_primary=colocate_with_primary)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/slot_creator.py", line 86, in create_slot
    return _create_slot_var(primary, val, scope)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/slot_creator.py", line 50, in _create_slot_var
    slot = variables.Variable(val, name=scope, trainable=False)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/ops/variables.py", line 211, in __init__
    dtype=dtype)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/ops/variables.py", line 296, in _init_from_args
    name=name)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/ops/state_ops.py", line 140, in variable_op
    container=container, shared_name=shared_name)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/ops/gen_state_ops.py", line 396, in _variable
    name=name)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py", line 703, in apply_op
    op_def=op_def)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 2310, in create_op
    original_op=self._default_original_op, op_def=op_def)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 1232, in __init__
    self._traceback = _extract_stack()
  • cifar_conv_lrn_max_flatten_fc_d_fc_d_sm_autoADAMTest accuracy: 66.770%
  • cifar_conv_max_flatten_fc_d_fc_d_sm_autoADAMTest accuracy: 66.480%
  • cifar_conv_max_conv_max_flatten_fc_d_fc_d_sm_autoADAM accuracy: 73.090%
  • cifar_conv_max_lrn_conv_lrn_max_flatten_fc_d_fc_d_sm_autoADAM: 74.040%

In [10]:
print(1 - 66.77)
print(1 - 66.48)
print(1 - 73.09)


-65.77
-65.48
-72.09

In [22]:
predictions = np.empty([0,10])
for batch in np.array_split(cifar_test['data_3d'], cifar_test['data_3d'].shape[0] / 16):
    tmp = session.run(predictor,
                              feed_dict={
                                    tf_batch_data: batch,
#                                   batch_labels: np.array([]),
                                    keep_prob: 1.0
    })
    predictions = np.vstack((predictions, tmp))
acc = accuracy_fn(predictions, cifar_test['labels'])


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-22-3d27bbcb6453> in <module>()
      1 predictions = np.empty([0,10])
      2 for batch in np.array_split(cifar_test['data_3d'], cifar_test['data_3d'].shape[0] / 16):
----> 3     tmp = session.run(predictor,
      4                               feed_dict={
      5                                     tf_batch_data: batch,

NameError: name 'session' is not defined
  • [conv(5,32)-max(2,2)]*2 - flatten - 10, adams, dropout, 20k steps, l2=5e-3: 52.4%
  • [conv(5,32)-max(2,2)]*2 - flatten - fc(1024) - 10, adams, dropout, 20k steps, l2=5e-3: 66.4%

In [43]:
for batch in np.array_split(cifar_test['data_3d'], cifar_test['data_3d'].shape[0] / 16):
    predictions = session.run(predictions,
                              feed_dict={
                                    batch_data: batch,
                                    batch_labels: np.array([]),
                                    keep_prob: 1.0
            })


ERROR:root:Internal Python error in the inspect module.
Below is the traceback from this internal error.

Traceback (most recent call last):
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2881, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)
  File "<ipython-input-43-fe8ef3147aaa>", line 2, in <module>
    predictions = session.run(predictions,
NameError: name 'session' is not defined

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 1821, in showtraceback
    stb = value._render_traceback_()
AttributeError: 'NameError' object has no attribute '_render_traceback_'

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/IPython/core/ultratb.py", line 1132, in get_records
    return _fixed_getinnerframes(etb, number_of_lines_of_context, tb_offset)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/IPython/core/ultratb.py", line 313, in wrapped
    return f(*args, **kwargs)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/IPython/core/ultratb.py", line 358, in _fixed_getinnerframes
    records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/inspect.py", line 1453, in getinnerframes
    frameinfo = (tb.tb_frame,) + getframeinfo(tb, context)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/inspect.py", line 1410, in getframeinfo
    filename = getsourcefile(frame) or getfile(frame)
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/inspect.py", line 672, in getsourcefile
    if getattr(getmodule(object, filename), '__loader__', None) is not None:
  File "/home/kkari/DevTools/anaconda3/lib/python3.5/inspect.py", line 709, in getmodule
    if ismodule(module) and hasattr(module, '__file__'):
KeyboardInterrupt
---------------------------------------------------------------------------

In [ ]:
a = np.array