In [3]:
import sys
import collections
import six.moves.cPickle as pickle
import numpy as np
from pandas import *

import tensorflow.contrib.learn.python.learn as learn
import tensorflow as tf
import tensorflow.contrib.slim as slim

from tensorflow.examples.tutorials.mnist import input_data

import sklearn
from sklearn import datasets, metrics
from sklearn.decomposition import PCA
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import LogisticRegression
from sklearn import svm

import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns

from my_rbm import Rbm
%matplotlib inline

In [2]:
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)


Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

In [4]:
def variable_summaries(name, var):
    """Attach a lot of summaries to a Tensor."""
    with tf.name_scope('summaries'):
        mean = tf.reduce_mean(var)
        tf.scalar_summary('mean/' + name, mean)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
        tf.scalar_summary('sttdev/' + name, stddev)
        tf.scalar_summary('max/' + name, tf.reduce_max(var))
        tf.scalar_summary('min/' + name, tf.reduce_min(var))
        tf.histogram_summary(name, var)

In [5]:
def train_model(train_dataset, train_labels, test_dataset, test_labels, train_tensor,
                accuracy, tf_batch_data, tf_batch_labels, log_dir='./logs',
                num_steps=20000, batch_size=10, test_steps=1000, log_steps=100):
    with tf.Session() as session:
        summaries = tf.merge_all_summaries()

        if tf.gfile.Exists(log_dir):
            tf.gfile.DeleteRecursively(log_dir)
            
        train_writer = tf.train.SummaryWriter(log_dir + '/train', session.graph)
        test_writer = tf.train.SummaryWriter(log_dir + '/test')

        session.run(tf.initialize_all_variables())
        
        shuffle_train = np.random.permutation(train_dataset.shape[0])
        train_dataset = train_dataset[shuffle_train]
        train_labels = train_labels[shuffle_train]

        for step in range(num_steps):
            # Pick an offset within the training data, which has been randomized.
            # Note: we could use better randomization across epochs.
            offset = ((step * batch_size) % (train_labels.shape[0] - batch_size))

            # Generate a minibatch.
            batch_data = train_dataset[offset:(offset + batch_size)]
            batch_labels = train_labels[offset:(offset + batch_size)]

            # Prepare a dictionary telling the session where to feed the minibatch.
            # The key of the dictionary is the placeholder node of the graph to be fed,
            # and the value is the numpy array to feed to it.
            feed_dict = {
                tf_batch_data : batch_data, 
                tf_batch_labels : batch_labels
            }
    
    
            if step % test_steps == 0:
                run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                _, acc, summary = session.run([train_tensor, accuracy, summaries], 
                                             feed_dict=feed_dict,
                                             run_metadata=run_metadata,
                                             options=run_options)
                print("Train accuracy at step %s: %.1f%%" % (step, acc * 100))
                train_writer.add_run_metadata(run_metadata, "step%d" % step)
                train_writer.add_summary(summary, step)
                
            elif step % log_steps == 0:
                run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                run_metadata = tf.RunMetadata()
                _, summary = session.run([train_tensor, summaries], 
                                         feed_dict=feed_dict, 
                                         run_metadata=run_metadata,
                                         options=run_options)
                train_writer.add_run_metadata(run_metadata, "step%d" % step)
                train_writer.add_summary(summary, step)
            else:
                session.run(train_tensor, feed_dict=feed_dict, options=run_options)


        feed_dict = {
            tf_batch_data : test_dataset, 
            tf_batch_labels : test_labels
        }
        acc = session.run(accuracy, feed_dict=feed_dict)
        print("Train accuracy: %.3f%%" % (acc * 100))

In [11]:
def fully_connected(batch_data):
    with slim.arg_scope([slim.fully_connected],
                      activation_fn=tf.nn.relu,
                      weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
                      weights_regularizer=slim.l2_regularizer(0.0005)):

        x = slim.fully_connected(batch_data, 500, scope='fc/fc_1')
        variable_summaries('fc/fc_1', x)
        
        x = slim.fully_connected(x, 500, scope='fc/fc_2')
        variable_summaries('fc/fc_2', x)
        
        x = slim.fully_connected(x, 2000, scope='fc/fc_3')
        variable_summaries('fc/fc_3', x)
        
        x = slim.fully_connected(x, 30, scope='fc/fc_4')
        variable_summaries('fc/fc_4', x)
        
#         x = slim.fully_connected(x, 1024, scope='fc/fc_4')
#         variable_summaries('fc/fc_4', x)
       
        x = slim.fully_connected(x, 10, activation_fn=None, scope='fc/fc_5')
        variable_summaries('fc/fc_5', x)
#         x = slim.fully_connected(x, 10, activation_fn=None ,scope='fc/fc_3')
        predictions = tf.nn.softmax(x)
        return x, predictions

image_size = 28
num_labels = 10
g = tf.Graph()
with g.as_default():
    batch_data = tf.placeholder(tf.float32, shape=(None, image_size * image_size))
    batch_labels = tf.placeholder(tf.float32, shape=(None, num_labels))
    last_layer, predictions = fully_connected(batch_data)
    
    slim.losses.softmax_cross_entropy(last_layer, batch_labels)
    total_loss = slim.losses.get_total_loss()
    tf.scalar_summary('losses/total_loss', total_loss)
    
    optimizer = tf.train.AdamOptimizer()
    
    train_tensor = slim.learning.create_train_op(total_loss, optimizer)
    correct_prediction = tf.equal(tf.argmax(predictions,1), tf.argmax(batch_labels,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.scalar_summary('accuracy', accuracy)

    train_model(train_dataset=mnist.train.images, 
                train_labels=mnist.train.labels, 
                test_dataset=mnist.test.images,
                test_labels=mnist.test.labels, 
                train_tensor=train_tensor, accuracy=accuracy,
                log_dir='mnist_mlp_500_500_2000_30_10',
                tf_batch_data=batch_data, tf_batch_labels=batch_labels, batch_size=32, num_steps=20000)


Train accuracy at step 0: 3.1%
Train accuracy at step 1000: 43.8%
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-11-82e20a0d8a72> in <module>()
     51                 train_tensor=train_tensor, accuracy=accuracy,
     52                 log_dir='mnist_mlp_500_500_2000_30_10',
---> 53                 tf_batch_data=batch_data, tf_batch_labels=batch_labels, batch_size=32, num_steps=20000)

<ipython-input-8-192cb33f41f7> in train_model(train_dataset, train_labels, test_dataset, test_labels, train_tensor, accuracy, tf_batch_data, tf_batch_labels, log_dir, num_steps, batch_size, test_steps, log_steps)
     56                 train_writer.add_summary(summary, step)
     57             else:
---> 58                 session.run(train_tensor, feed_dict=feed_dict, options=run_options)
     59 
     60 

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    380     try:
    381       result = self._run(None, fetches, feed_dict, options_ptr,
--> 382                          run_metadata_ptr)
    383       if run_metadata:
    384         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    653     movers = self._update_with_movers(feed_dict_string, feed_map)
    654     results = self._do_run(handle, target_list, unique_fetches,
--> 655                            feed_dict_string, options, run_metadata)
    656 
    657     # User may have fetched the same tensor multiple times, but we

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
    721     if handle is None:
    722       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
--> 723                            target_list, options, run_metadata)
    724     else:
    725       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
    728   def _do_call(self, fn, *args):
    729     try:
--> 730       return fn(*args)
    731     except errors.OpError as e:
    732       message = compat.as_text(e.message)

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
    710         return tf_session.TF_Run(session, options,
    711                                  feed_dict, fetch_list, target_list,
--> 712                                  status, run_metadata)
    713 
    714     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

Fully connected results:

  • 768-10: 8.7%
  • 768-1024-10: 2.%
  • 768-400-10 sgd_0.5: 2.04%
  • 768-400-10 adam: 2.02%
  • 768-800-10 sgd_0.5: 2.00%
  • 768-800-10 adam: 2.21%

In [11]:
100 - 97.9


Out[11]:
2.0400000000000063

In [23]:
mnist.train.images.reshape([55000, 28, 28])


Out[23]:
array([[[ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        ..., 
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.]],

       [[ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        ..., 
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.]],

       [[ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        ..., 
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.]],

       ..., 
       [[ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        ..., 
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.]],

       [[ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        ..., 
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.]],

       [[ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        ..., 
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.],
        [ 0.,  0.,  0., ...,  0.,  0.,  0.]]], dtype=float32)

In [27]:
array = np.array([[1,2,3,4],[5,6,7,8]])

In [41]:
narr = np.expand_dims(array.reshape([2,2,2]), axis=3)
narr


Out[41]:
array([[[[1],
         [2]],

        [[3],
         [4]]],


       [[[5],
         [6]],

        [[7],
         [8]]]])

In [45]:
np.expand_dims(mnist.train.images.reshape([-1,28,28]), axis=3).shape


Out[45]:
(55000, 28, 28, 1)

CIFAR-10


In [6]:
def unpickle(file):
    fo = open(file, 'rb')
    dict = pickle.load(fo, encoding='latin-1')
    fo.close()
    return dict

def from_flat_to_3d(image):
#     print(image.shape)
    return np.dstack((image[0:1024].reshape(32,32),
                       image[1024:2048].reshape(32,32),
                       image[2048:3072].reshape(32,32)))

cifar_test = unpickle('cifar-10-batches-py/test_batch')
cifar_test['data'] = cifar_test['data'].astype(np.float32) / 255
cifar_test['data_3d'] = np.array([from_flat_to_3d(image) for image in cifar_test['data']])

cifar = unpickle('cifar-10-batches-py/data_batch_1')
for i in range(2, 6):
    tmp = unpickle('cifar-10-batches-py/data_batch_' + str(i))
    cifar['data'] = np.vstack((cifar['data'], tmp['data']))
    cifar['labels'] = np.concatenate((cifar['labels'], tmp['labels']))
cifar['data'] = cifar['data'].astype(np.float32) / 255
cifar['data_3d'] = np.array([from_flat_to_3d(image) for image in cifar['data']])

# cifar['data_bw'] = (cifar['data'][:,0:1024] + cifar['data'][:,1024:2048] + cifar['data'][:, 2048:3072]) / 3 
# cifar_test['data_bw'] = (cifar_test['data'][:,0:1024] + cifar_test['data'][:,1024:2048] + cifar_test['data'][:, 2048:3072]) / 3 

enc = OneHotEncoder()
cifar['labels_oh'] = enc.fit_transform(cifar['labels'].reshape(-1, 1))
cifar['labels_oh'] = cifar['labels_oh'].toarray()

cifar_test['labels'] = np.array(cifar_test['labels'])
cifar_test['labels_oh'] = enc.fit_transform(cifar_test['labels'].reshape(-1, 1))
cifar_test['labels_oh'] = cifar_test['labels_oh'].toarray()

# pca = PCA(whiten=True)
# cifar['data_bw_whitened'] = pca.fit_transform(cifar['data_bw'])
# cifar_test['data_bw_whitened'] = pca.fit_transform(cifar_test['data_bw'])

Baselines


In [6]:
lr = LogisticRegression(solver='sag')
lr.fit(cifar['data'], cifar['labels'])


/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/sklearn/linear_model/sag.py:267: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge
  "the coef_ did not converge", ConvergenceWarning)
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-6-c4f43a7ffcb1> in <module>()
      2 lr.fit(cifar['data'], cifar['labels'])
      3 lr.predict(cifar['data'])
----> 4 print(lr.score(cifra_test['data'], cifar_test['labels']))

NameError: name 'cifra_test' is not defined

In [12]:
cifar_test['data'].shape


Out[12]:
(10000, 3072)

In [21]:
print(lr.score(cifar_test['data'], cifar_test['labels']))


0.3886

In [ ]:
clf_raw = svm.SVC(decision_function_shape='ovr', kernel='poly')
clf_raw.fit(cifar['data_bw'], cifar['labels'])
print(svm.score(cifar_test['data_bw'], cifar_test['labels']))

In [ ]:
clf_raw = svm.SVC(decision_function_shape='ovr', kernel='poly')
clf_raw.fit(cifar['data_bw_whitened'], cifar['labels'])
print(svm.score(cifar_test['data_bw_whitened'], cifar_test['labels']))

In [ ]:
clf_raw = svm.SVC(decision_function_shape='ovr', kernel='poly')
clf_raw.fit(cifar['data'], cifar['labels'])
print(svm.score(cifar_test['data'], cifar_test['labels']))

In [7]:
def fully_connected(batch_data):
    with slim.arg_scope([slim.fully_connected],
                      activation_fn=tf.nn.relu,
                      weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
                      weights_regularizer=slim.l2_regularizer(0.0005)):

        x = slim.fully_connected(batch_data, 500, scope='fc/fc_1')
        variable_summaries('fc/fc_1', x)
        
        x = slim.fully_connected(x, 500, scope='fc/fc_2')
        variable_summaries('fc/fc_2', x)
        
        x = slim.fully_connected(x, 2000, scope='fc/fc_3')
        variable_summaries('fc/fc_3', x)
        
        x = slim.fully_connected(x, 30, scope='fc/fc_4')
        variable_summaries('fc/fc_4', x)
        
#         x = slim.fully_connected(x, 1024, scope='fc/fc_4')
#         variable_summaries('fc/fc_4', x)
       
        x = slim.fully_connected(x, 10, activation_fn=None, scope='fc/fc_5')
        variable_summaries('fc/fc_5', x)
#         x = slim.fully_connected(x, 10, activation_fn=None ,scope='fc/fc_3')
        predictions = tf.nn.softmax(x)
        return x, predictions

image_size = 32
num_labels = 10
g = tf.Graph()
with g.as_default():
    batch_data = tf.placeholder(tf.float32, shape=(None, 32 * 32 * 3))
    batch_labels = tf.placeholder(tf.float32, shape=(None, num_labels))
    last_layer, predictions = fully_connected(batch_data)
    
    slim.losses.softmax_cross_entropy(last_layer, batch_labels)
    total_loss = slim.losses.get_total_loss()
    tf.scalar_summary('losses/total_loss', total_loss)
    
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
    
    train_tensor = slim.learning.create_train_op(total_loss, optimizer)
    correct_prediction = tf.equal(tf.argmax(predictions,1), tf.argmax(batch_labels,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.scalar_summary('accuracy', accuracy)

    train_model(train_dataset=cifar['data'], 
                train_labels=cifar['labels_oh'], 
                test_dataset=cifar_test['data'],
                test_labels=cifar_test['labels_oh'], 
                train_tensor=train_tensor, accuracy=accuracy,
                log_dir='cifar_mlp_500_500_2000_30_10',
                tf_batch_data=batch_data, tf_batch_labels=batch_labels, batch_size=128, num_steps=20000)


Train accuracy at step 0: 9.4%
Train accuracy at step 1000: 7.8%
Train accuracy at step 2000: 11.7%
Train accuracy at step 3000: 7.8%
Train accuracy at step 4000: 11.7%
Train accuracy at step 5000: 10.9%
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-7-01e934ec56c5> in <module>()
     51                 train_tensor=train_tensor, accuracy=accuracy,
     52                 log_dir='cifar_mlp_500_500_2000_30_10',
---> 53                 tf_batch_data=batch_data, tf_batch_labels=batch_labels, batch_size=128, num_steps=20000)

<ipython-input-5-192cb33f41f7> in train_model(train_dataset, train_labels, test_dataset, test_labels, train_tensor, accuracy, tf_batch_data, tf_batch_labels, log_dir, num_steps, batch_size, test_steps, log_steps)
     56                 train_writer.add_summary(summary, step)
     57             else:
---> 58                 session.run(train_tensor, feed_dict=feed_dict, options=run_options)
     59 
     60 

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    715     try:
    716       result = self._run(None, fetches, feed_dict, options_ptr,
--> 717                          run_metadata_ptr)
    718       if run_metadata:
    719         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    913     if final_fetches or final_targets:
    914       results = self._do_run(handle, final_targets, final_fetches,
--> 915                              feed_dict_string, options, run_metadata)
    916     else:
    917       results = []

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
    963     if handle is None:
    964       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
--> 965                            target_list, options, run_metadata)
    966     else:
    967       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
    970   def _do_call(self, fn, *args):
    971     try:
--> 972       return fn(*args)
    973     except errors.OpError as e:
    974       message = compat.as_text(e.message)

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
    952         return tf_session.TF_Run(session, options,
    953                                  feed_dict, fetch_list, target_list,
--> 954                                  status, run_metadata)
    955 
    956     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

cifar-10 mlp results:

  • 3072-10, rate=0.5, steps=20k: 20.7%
  • 3072-10, rate=0.001, steps=20k: 25%
  • 1024-10, rate=0.001, steps=20k, bw_w: 9.7%
  • 1024-10, rate=0.001, steps=20k, bw: 10.3%

In [11]:
num_hidden = 256
num_epochs=10
rbm = Rbm(num_hidden=num_hidden, num_classes=10, num_features=3072, learning_rate=0.01)
rbm.init_rbm()
rbm.fit(cifar['data'], cifar_test['data'], num_epochs=num_epochs)


Number of features: 3072
Number of classes: 10
logit shape:  (?, 10)
batch_labels shape:  (?, 10)
epoch: 0
batch_number: 0
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-11-7cbaf4ae04e9> in <module>()
      3 rbm = Rbm(num_hidden=num_hidden, num_classes=10, num_features=3072, learning_rate=0.01)
      4 rbm.init_rbm()
----> 5 rbm.fit(cifar['data'], cifar_test['data'], num_epochs=num_epochs)

/home/kkari/UniStuff/HetedikFelev/bsc_thesis/my_rbm.py in fit(self, train_dataset, validation_dataset, num_epochs)
    321                 self.tf_session.run(
    322                     self.updates,
--> 323                     feed_dict=self._create_feed_dict(batch)
    324                 )
    325 

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    380     try:
    381       result = self._run(None, fetches, feed_dict, options_ptr,
--> 382                          run_metadata_ptr)
    383       if run_metadata:
    384         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    653     movers = self._update_with_movers(feed_dict_string, feed_map)
    654     results = self._do_run(handle, target_list, unique_fetches,
--> 655                            feed_dict_string, options, run_metadata)
    656 
    657     # User may have fetched the same tensor multiple times, but we

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
    721     if handle is None:
    722       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
--> 723                            target_list, options, run_metadata)
    724     else:
    725       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
    728   def _do_call(self, fn, *args):
    729     try:
--> 730       return fn(*args)
    731     except errors.OpError as e:
    732       message = compat.as_text(e.message)

/home/kkari/DevTools/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
    710         return tf_session.TF_Run(session, options,
    711                                  feed_dict, fetch_list, target_list,
--> 712                                  status, run_metadata)
    713 
    714     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [13]:
print(100 - 66.77)
print(100 - 66.48)
print(100 - 73.09)


33.230000000000004
33.519999999999996
26.909999999999997

In [ ]: