3D Object Recognition


Zhiang Chen, Wyatt Newman

Aug 2016

1. import packages


In [2]:
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
import matplotlib.pyplot as plt
import random
import operator
import time
import os

2. load data


In [3]:
file_name = 'depth_data'
with open(file_name, 'rb') as f:
    save = pickle.load(f)
    dataset = save['dataset']
    names = save['names']
    orientations = save['orientations']
    del save

3. data preprocess


In [4]:
# generate labels
# for 10 objectives
image_size = 34
num_labels = 24
num_channels = 1

num_images = dataset.shape[0]
num_train = round(num_images*0.7)
num_valid = round(num_images*0.15)
num_test = round(num_images*0.15)

name2value = {'v8':0,'ducky':1,'stapler':2,'pball':3,'tball':4,'sponge':5,'bclip':6,'tape':7,'gstick':8,'cup':9,
              'pen':10,'calc':11,'tmeas':12,'bottle':13,'cpin':14,'scissors':15,'stape':16,'gball':17,'orwidg':18,
             'glue':19,'spoon':20,'fork':21,'nerf':22,'eraser':23}
value2name = dict((value,name) for name,value in name2value.items())    

labels = np.ndarray(num_images, dtype=np.int32)
index = 0
for name in names:
    labels[index] = name2value[name]
    index += 1
        
def randomize(dataset, labels):
    permutation = np.random.permutation(labels.shape[0])
    shuffled_dataset = dataset[permutation,:,:]
    shuffled_labels = labels[permutation]
    return shuffled_dataset, shuffled_labels

rdataset, rlabels = randomize(dataset, labels)
train_dataset = rdataset[0:num_train,:,:]
train_labels = rlabels[0:num_train]
valid_dataset = rdataset[num_train:(num_train+num_valid),:,:]
valid_labels = rlabels[num_train:(num_train+num_valid)]
test_dataset = rdataset[(num_train+num_valid):,:,:]
test_labels = rlabels[(num_train+num_valid):]
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)


Training: (6369, 34, 34) (6369,)
Validation: (1365, 34, 34) (1365,)
Testing: (1365, 34, 34) (1365,)

display some images to verify that the data is still correct


In [5]:
indices = [random.randint(0,train_dataset.shape[0]) for x in range(3)]
for index in indices:
    image = train_dataset[index,:,:]
    print(value2name[train_labels[index]])
    plt.imshow(image,cmap='Greys_r')
    plt.show()


nerf
nerf
gball

In [6]:
print('......Reformatting......')

def reformat(dataset, labels):
  dataset = dataset.reshape(
    (-1, image_size, image_size, num_channels)).astype(np.float32)
  labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
  return dataset, labels

train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)


......Reformatting......
Training set (6369, 34, 34, 1) (6369, 24)
Validation set (1365, 34, 34, 1) (1365, 24)
Test set (1365, 34, 34, 1) (1365, 24)

4. Define Costumed Function


In [7]:
def accuracy(predictions, labels):
  return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
          / predictions.shape[0])

5. Build NN


In [8]:
batch_size = 16
patch_size = 5
kernel_size = 2
depth1 = 6 #the depth of 1st convnet
depth2 = 16 #the depth of 2nd convnet
C5_units = 120
F6_units = 84
F7_units = 10

graph = tf.Graph()

with graph.as_default():
    # Input data
    tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size, num_channels))
    # convolution's input is a tensor of shape [batch,in_height,in_width,in_channels]
    tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
    tf_valid_dataset = tf.constant(valid_dataset)
    tf_test_dataset = tf.constant(test_dataset)
    
    # Variables(weights and biases)
    C1_weights = tf.Variable(tf.truncated_normal([patch_size, patch_size, num_channels, depth1], stddev=0.1))
    # convolution's weights are called filter in tensorflow
    # it is a tensor of shape [kernel_hight,kernel_width,in_channels,out_channels]
    C1_biases = tf.Variable(tf.zeros([depth1]))
                            
    # S1_weights # Sub-sampling doesn't need weights and biases
    # S1_biases
    
    C3_weights = tf.Variable(tf.truncated_normal([patch_size, patch_size, depth1, depth2], stddev=0.1))
    C3_biases = tf.Variable(tf.constant(1.0, shape=[depth2]))
                            
    # S4_weights
    # S4_biases
     
    # C5 actually is a fully-connected layer                        
    C5_weights = tf.Variable(tf.truncated_normal([6 * 6 * depth2, C5_units], stddev=0.1))
    C5_biases = tf.Variable(tf.constant(1.0, shape=[C5_units]))
         
    F6_weights = tf.Variable(tf.truncated_normal([C5_units,F6_units], stddev=0.1))
    F6_biases = tf.Variable(tf.constant(1.0, shape=[F6_units]))
                                
    # FC and logistic regression replace RBF
    F7_weights = tf.Variable(tf.truncated_normal([F6_units,num_labels], stddev=0.1))
    F7_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))

    saver = tf.train.Saver()
    # Model
    def model(data):
        conv = tf.nn.conv2d(data, C1_weights, [1, 1, 1, 1], padding='SAME')
        hidden = tf.nn.relu(conv + C1_biases) # relu is better than tanh
        
        max_pool = tf.nn.max_pool(hidden,[1,kernel_size,kernel_size,1],[1,2,2,1],'VALID')
        hidden = tf.nn.relu(max_pool)
                                
        conv = tf.nn.conv2d(hidden, C3_weights, [1, 1, 1, 1], padding='VALID')
        hidden = tf.nn.relu(conv + C3_biases)

        max_pool = tf.nn.max_pool(hidden,[1,kernel_size,kernel_size,1],[1,2,2,1],'VALID')
        hidden = tf.nn.relu(max_pool)
                            
        shape = hidden.get_shape().as_list()
        reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])
        hidden = tf.nn.relu(tf.matmul(reshape, C5_weights) + C5_biases)
                            
        fc = tf.matmul(hidden,F6_weights)
        hidden = tf.nn.relu(fc + F6_biases)
        
        fc = tf.matmul(hidden,F7_weights)
        output = fc + F7_biases
    
        return output

    
    # Training computation.
    tf_train_dataset = tf.nn.dropout(tf_train_dataset,0.8) # input dropout
    logits = model(tf_train_dataset)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
    
    # Optimizer.
    optimizer = tf.train.GradientDescentOptimizer(0.0008).minimize(loss)
  
    # Predictions for the training, validation, and test data.
    train_prediction = tf.nn.softmax(logits)
    valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
    test_prediction = tf.nn.softmax(model(tf_test_dataset))

6. Train NN


In [9]:
## training
start_time = time.time()

num_steps = 70000
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
config.gpu_options.allow_growth = True
config.log_device_placement = True
with tf.Session(graph=graph, config = config) as session:
  #tf.initialize_all_variables().run()
  saver.restore(session, "model.ckpt")
  print('Initialized')
  for step in range(num_steps):
    offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
    batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
    batch_labels = train_labels[offset:(offset + batch_size), :]
    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
    _, l, predictions = session.run(
      [optimizer, loss, train_prediction], feed_dict=feed_dict)
    if (step % 1000 == 0):
      print('Minibatch loss at step %d: %f' % (step, l))
      print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
      print('Validation accuracy: %.1f%%' % accuracy(valid_prediction.eval(), valid_labels))
      print('--------------------------------------')
  print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
  end_time = time.time()
  duration = (end_time - start_time)/60
  print("Excution time: %0.2fmin" % duration)
  save_path = saver.save(session, "new_model.ckpt")
  #print("Model saved in file: %s" % save_path)
  i_test = 0
  while(i_test!=''):
        i_test = input("Input an index of test image (or Enter to quit): ")
        label = test_labels[int(i_test),:].tolist()
        #print("Correct label: "+value2name[label.index(1)])
        image = test_dataset[int(i_test),:,:,:].reshape((-1,image_size,image_size,num_channels)).astype(np.float32)
        prediction = tf.nn.softmax(model(image))
        pre_dict = dict(zip(list(range(num_labels)),prediction.eval()[0]))
        sorted_pre_dict = sorted(pre_dict.items(), key=operator.itemgetter(1))
        name1 = value2name[sorted_pre_dict[-1][0]]
        value1 = str(sorted_pre_dict[-1][1])
        name2 = value2name[sorted_pre_dict[-2][0]]
        value2 = str(sorted_pre_dict[-2][1])
        tile = name1+': '+value1+'\n'+name2+': '+value2
        image = image.reshape((image_size,image_size)).astype(np.float32)
        plt.imshow(image,cmap='Greys_r')
        plt.suptitle(tile, fontsize=12)
        plt.xlabel(value2name[label.index(1)], fontsize=12)
        plt.show()


Initialized
Minibatch loss at step 0: 13.352912
Minibatch accuracy: 18.8%
Validation accuracy: 26.1%
--------------------------------------
Minibatch loss at step 1000: 1.381321
Minibatch accuracy: 43.8%
Validation accuracy: 58.4%
--------------------------------------
Minibatch loss at step 2000: 0.918441
Minibatch accuracy: 75.0%
Validation accuracy: 68.1%
--------------------------------------
Minibatch loss at step 3000: 1.339274
Minibatch accuracy: 43.8%
Validation accuracy: 70.8%
--------------------------------------
Minibatch loss at step 4000: 1.087052
Minibatch accuracy: 68.8%
Validation accuracy: 74.3%
--------------------------------------
Minibatch loss at step 5000: 0.257039
Minibatch accuracy: 93.8%
Validation accuracy: 77.1%
--------------------------------------
Minibatch loss at step 6000: 0.403238
Minibatch accuracy: 81.2%
Validation accuracy: 77.1%
--------------------------------------
Minibatch loss at step 7000: 0.404615
Minibatch accuracy: 81.2%
Validation accuracy: 78.6%
--------------------------------------
Minibatch loss at step 8000: 0.667443
Minibatch accuracy: 81.2%
Validation accuracy: 81.9%
--------------------------------------
Minibatch loss at step 9000: 0.647818
Minibatch accuracy: 81.2%
Validation accuracy: 81.3%
--------------------------------------
Minibatch loss at step 10000: 0.428819
Minibatch accuracy: 87.5%
Validation accuracy: 81.0%
--------------------------------------
Minibatch loss at step 11000: 0.568009
Minibatch accuracy: 81.2%
Validation accuracy: 85.9%
--------------------------------------
Minibatch loss at step 12000: 0.652045
Minibatch accuracy: 68.8%
Validation accuracy: 84.2%
--------------------------------------
Minibatch loss at step 13000: 0.806413
Minibatch accuracy: 68.8%
Validation accuracy: 85.9%
--------------------------------------
Minibatch loss at step 14000: 0.383349
Minibatch accuracy: 81.2%
Validation accuracy: 87.4%
--------------------------------------
Minibatch loss at step 15000: 0.365121
Minibatch accuracy: 87.5%
Validation accuracy: 87.3%
--------------------------------------
Minibatch loss at step 16000: 0.326139
Minibatch accuracy: 87.5%
Validation accuracy: 86.8%
--------------------------------------
Minibatch loss at step 17000: 0.117603
Minibatch accuracy: 100.0%
Validation accuracy: 90.4%
--------------------------------------
Minibatch loss at step 18000: 0.238482
Minibatch accuracy: 93.8%
Validation accuracy: 90.0%
--------------------------------------
Minibatch loss at step 19000: 0.520544
Minibatch accuracy: 75.0%
Validation accuracy: 90.5%
--------------------------------------
Minibatch loss at step 20000: 0.421573
Minibatch accuracy: 87.5%
Validation accuracy: 91.6%
--------------------------------------
Minibatch loss at step 21000: 0.108577
Minibatch accuracy: 100.0%
Validation accuracy: 91.4%
--------------------------------------
Minibatch loss at step 22000: 0.675667
Minibatch accuracy: 62.5%
Validation accuracy: 88.6%
--------------------------------------
Minibatch loss at step 23000: 0.361797
Minibatch accuracy: 81.2%
Validation accuracy: 90.8%
--------------------------------------
Minibatch loss at step 24000: 0.235820
Minibatch accuracy: 93.8%
Validation accuracy: 90.5%
--------------------------------------
Minibatch loss at step 25000: 0.266976
Minibatch accuracy: 93.8%
Validation accuracy: 91.9%
--------------------------------------
Minibatch loss at step 26000: 0.225786
Minibatch accuracy: 93.8%
Validation accuracy: 91.0%
--------------------------------------
Minibatch loss at step 27000: 0.250573
Minibatch accuracy: 87.5%
Validation accuracy: 89.2%
--------------------------------------
Minibatch loss at step 28000: 0.573944
Minibatch accuracy: 81.2%
Validation accuracy: 88.9%
--------------------------------------
Minibatch loss at step 29000: 0.070726
Minibatch accuracy: 100.0%
Validation accuracy: 91.4%
--------------------------------------
Minibatch loss at step 30000: 0.672722
Minibatch accuracy: 75.0%
Validation accuracy: 87.3%
--------------------------------------
Minibatch loss at step 31000: 0.275050
Minibatch accuracy: 81.2%
Validation accuracy: 90.9%
--------------------------------------
Minibatch loss at step 32000: 0.171859
Minibatch accuracy: 87.5%
Validation accuracy: 91.3%
--------------------------------------
Minibatch loss at step 33000: 0.196412
Minibatch accuracy: 87.5%
Validation accuracy: 91.4%
--------------------------------------
Minibatch loss at step 34000: 0.102589
Minibatch accuracy: 100.0%
Validation accuracy: 93.3%
--------------------------------------
Minibatch loss at step 35000: 0.103044
Minibatch accuracy: 100.0%
Validation accuracy: 92.0%
--------------------------------------
Minibatch loss at step 36000: 0.184930
Minibatch accuracy: 93.8%
Validation accuracy: 93.0%
--------------------------------------
Minibatch loss at step 37000: 0.269569
Minibatch accuracy: 87.5%
Validation accuracy: 87.7%
--------------------------------------
Minibatch loss at step 38000: 0.270516
Minibatch accuracy: 93.8%
Validation accuracy: 94.3%
--------------------------------------
Minibatch loss at step 39000: 0.219511
Minibatch accuracy: 93.8%
Validation accuracy: 94.1%
--------------------------------------
Minibatch loss at step 40000: 0.238111
Minibatch accuracy: 93.8%
Validation accuracy: 94.8%
--------------------------------------
Minibatch loss at step 41000: 0.076796
Minibatch accuracy: 100.0%
Validation accuracy: 95.5%
--------------------------------------
Minibatch loss at step 42000: 0.174779
Minibatch accuracy: 93.8%
Validation accuracy: 93.0%
--------------------------------------
Minibatch loss at step 43000: 0.088568
Minibatch accuracy: 100.0%
Validation accuracy: 94.7%
--------------------------------------
Minibatch loss at step 44000: 0.167920
Minibatch accuracy: 93.8%
Validation accuracy: 93.1%
--------------------------------------
Minibatch loss at step 45000: 0.173531
Minibatch accuracy: 93.8%
Validation accuracy: 93.6%
--------------------------------------
Minibatch loss at step 46000: 0.139143
Minibatch accuracy: 100.0%
Validation accuracy: 96.2%
--------------------------------------
Minibatch loss at step 47000: 0.089795
Minibatch accuracy: 100.0%
Validation accuracy: 94.1%
--------------------------------------
Minibatch loss at step 48000: 0.077640
Minibatch accuracy: 93.8%
Validation accuracy: 95.4%
--------------------------------------
Minibatch loss at step 49000: 0.140015
Minibatch accuracy: 93.8%
Validation accuracy: 94.4%
--------------------------------------
Minibatch loss at step 50000: 0.229509
Minibatch accuracy: 87.5%
Validation accuracy: 94.4%
--------------------------------------
Minibatch loss at step 51000: 0.112484
Minibatch accuracy: 100.0%
Validation accuracy: 95.7%
--------------------------------------
Minibatch loss at step 52000: 0.143606
Minibatch accuracy: 87.5%
Validation accuracy: 96.0%
--------------------------------------
Minibatch loss at step 53000: 0.105780
Minibatch accuracy: 93.8%
Validation accuracy: 96.8%
--------------------------------------
Minibatch loss at step 54000: 0.174531
Minibatch accuracy: 87.5%
Validation accuracy: 92.9%
--------------------------------------
Minibatch loss at step 55000: 0.310782
Minibatch accuracy: 87.5%
Validation accuracy: 92.9%
--------------------------------------
Minibatch loss at step 56000: 0.021368
Minibatch accuracy: 100.0%
Validation accuracy: 96.4%
--------------------------------------
Minibatch loss at step 57000: 0.437142
Minibatch accuracy: 87.5%
Validation accuracy: 93.6%
--------------------------------------
Minibatch loss at step 58000: 0.129235
Minibatch accuracy: 93.8%
Validation accuracy: 95.5%
--------------------------------------
Minibatch loss at step 59000: 0.275089
Minibatch accuracy: 87.5%
Validation accuracy: 91.0%
--------------------------------------
Minibatch loss at step 60000: 0.112258
Minibatch accuracy: 93.8%
Validation accuracy: 93.5%
--------------------------------------
Minibatch loss at step 61000: 0.036378
Minibatch accuracy: 100.0%
Validation accuracy: 97.1%
--------------------------------------
Minibatch loss at step 62000: 0.028859
Minibatch accuracy: 100.0%
Validation accuracy: 93.8%
--------------------------------------
Minibatch loss at step 63000: 0.079996
Minibatch accuracy: 93.8%
Validation accuracy: 95.6%
--------------------------------------
Minibatch loss at step 64000: 0.037926
Minibatch accuracy: 100.0%
Validation accuracy: 95.9%
--------------------------------------
Minibatch loss at step 65000: 0.047300
Minibatch accuracy: 100.0%
Validation accuracy: 96.8%
--------------------------------------
Minibatch loss at step 66000: 0.010440
Minibatch accuracy: 100.0%
Validation accuracy: 95.6%
--------------------------------------
Minibatch loss at step 67000: 0.063077
Minibatch accuracy: 100.0%
Validation accuracy: 97.2%
--------------------------------------
Minibatch loss at step 68000: 0.034991
Minibatch accuracy: 100.0%
Validation accuracy: 96.9%
--------------------------------------
Minibatch loss at step 69000: 0.066298
Minibatch accuracy: 100.0%
Validation accuracy: 92.0%
--------------------------------------
Test accuracy: 97.3%
Excution time: 5.93min
Input an index of test image (or Enter to quit): 
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-9-efdfc1dfc298> in <module>()
     32   while(i_test!=''):
     33         i_test = input("Input an index of test image (or Enter to quit): ")
---> 34         label = test_labels[int(i_test),:].tolist()
     35         #print("Correct label: "+value2name[label.index(1)])
     36         image = test_dataset[int(i_test),:,:,:].reshape((-1,image_size,image_size,num_channels)).astype(np.float32)

ValueError: invalid literal for int() with base 10: ''

Use the Trained Variables


In [7]:
batch_size = 16
patch_size = 5
kernel_size = 2
depth1 = 6 #the depth of 1st convnet
depth2 = 16 #the depth of 2nd convnet
C5_units = 120
F6_units = 84
F7_units = 10

graph = tf.Graph()

with graph.as_default():
    # Input data
    tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size, num_channels))
    # convolution's input is a tensor of shape [batch,in_height,in_width,in_channels]
    tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
    tf_valid_dataset = tf.constant(valid_dataset)
    tf_test_dataset = tf.constant(test_dataset)
    
    # Variables(weights and biases)
    C1_weights = tf.Variable(tf.truncated_normal([patch_size, patch_size, num_channels, depth1], stddev=0.1))
    # convolution's weights are called filter in tensorflow
    # it is a tensor of shape [kernel_hight,kernel_width,in_channels,out_channels]
    C1_biases = tf.Variable(tf.zeros([depth1]))
                            
    # S1_weights # Sub-sampling doesn't need weights and biases
    # S1_biases
    
    C3_weights = tf.Variable(tf.truncated_normal([patch_size, patch_size, depth1, depth2], stddev=0.1))
    C3_biases = tf.Variable(tf.constant(1.0, shape=[depth2]))
                            
    # S4_weights
    # S4_biases
     
    # C5 actually is a fully-connected layer                        
    C5_weights = tf.Variable(tf.truncated_normal([6 * 6 * depth2, C5_units], stddev=0.1))
    C5_biases = tf.Variable(tf.constant(1.0, shape=[C5_units]))
         
    F6_weights = tf.Variable(tf.truncated_normal([C5_units,F6_units], stddev=0.1))
    F6_biases = tf.Variable(tf.constant(1.0, shape=[F6_units]))
                                
    # FC and logistic regression replace RBF
    F7_weights = tf.Variable(tf.truncated_normal([F6_units,num_labels], stddev=0.1))
    F7_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))

    saver = tf.train.Saver()
    # Model
    def model(data):
        conv = tf.nn.conv2d(data, C1_weights, [1, 1, 1, 1], padding='SAME')
        hidden = tf.nn.relu(conv + C1_biases) # relu is better than tanh
        
        max_pool = tf.nn.max_pool(hidden,[1,kernel_size,kernel_size,1],[1,2,2,1],'VALID')
        hidden = tf.nn.relu(max_pool)
                                
        conv = tf.nn.conv2d(hidden, C3_weights, [1, 1, 1, 1], padding='VALID')
        hidden = tf.nn.relu(conv + C3_biases)

        max_pool = tf.nn.max_pool(hidden,[1,kernel_size,kernel_size,1],[1,2,2,1],'VALID')
        hidden = tf.nn.relu(max_pool)
                            
        shape = hidden.get_shape().as_list()
        reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])
        hidden = tf.nn.relu(tf.matmul(reshape, C5_weights) + C5_biases)
                            
        fc = tf.matmul(hidden,F6_weights)
        hidden = tf.nn.relu(fc + F6_biases)
        
        fc = tf.matmul(hidden,F7_weights)
        output = fc + F7_biases
    
        return output

    
    # Training computation.
    tf_train_dataset = tf.nn.dropout(tf_train_dataset,0.8) # input dropout
    logits = model(tf_train_dataset)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
    
    # Optimizer.
    optimizer = tf.train.GradientDescentOptimizer(0.0008).minimize(loss)
  
    # Predictions for the training, validation, and test data.
    train_prediction = tf.nn.softmax(logits)
    valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
    test_prediction = tf.nn.softmax(model(tf_test_dataset))
    
config = tf.ConfigProto()
config.log_device_placement = True   
#wd = os.getcwd()
with tf.Session(graph=graph, config = config) as session:
  saver.restore(session, "model.ckpt")
  print("Model restored.")
  print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
  i_test = 0
  while(i_test!=''):
        i_test = input("Input an index of test image (or Enter to quit): ")
        label = test_labels[int(i_test),:].tolist()
        #print("Correct label: "+value2name[label.index(1)])
        image = test_dataset[int(i_test),:,:,:].reshape((-1,image_size,image_size,num_channels)).astype(np.float32)
        prediction = tf.nn.softmax(model(image))
        pre_dict = dict(zip(list(range(num_labels)),prediction.eval()[0]))
        sorted_pre_dict = sorted(pre_dict.items(), key=operator.itemgetter(1))
        name1 = value2name[sorted_pre_dict[-1][0]]
        value1 = str(sorted_pre_dict[-1][1])
        name2 = value2name[sorted_pre_dict[-2][0]]
        value2 = str(sorted_pre_dict[-2][1])
        tile = name1+': '+value1+'\n'+name2+': '+value2
        image = image.reshape((image_size,image_size)).astype(np.float32)
        plt.imshow(image,cmap='Greys_r')
        plt.suptitle(tile, fontsize=12)
        plt.xlabel(value2name[label.index(1)], fontsize=12)
        plt.show()


Model restored.
-----------------------------------------------------------------------
NameError                             Traceback (most recent call last)
<ipython-input-7-e31310745ad2> in <module>()
     91   saver.restore(session, "model.ckpt")
     92   print("Model restored.")
---> 93   print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
     94   i_test = 0
     95   while(i_test!=''):

NameError: name 'test_prediction' is not defined