Autonomous Braking

Ethan Petersen, Josh Laesch, Ben Wong

The Dream Team


In [1]:
import numpy as np
import matplotlib.pylab as pylab
import imageio
import pandas as pd
import tensorflow as tf
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import accuracy_score
from imageio.core.util import asarray as imgToArr
%matplotlib inline

Data Processing

We perform some preprocessing on the data before feeding into the models. Namely, we will split the data into training, validation, and tests sets that will have equivalent amounts of braking and nonbraking frames. Note: frames 80,000 to 100,000 correspond to congested city center traffic following a large truck.


In [2]:
# videoFile = './data/driving.avi'
# vid = imageio.get_reader(videoFile,  'ffmpeg')

# Columns: Frame, Brake, GazeX, GazeY
dataFile = './data/cleaned_data.csv'
df = pd.read_csv(dataFile, delimiter='\t')

brake = df[df['Brake'] > 0]
nonbrake = df[df['Brake'] == 0]
nonbrake = nonbrake[:len(brake)]  # Braking is far fewer than nonbraking, so trim down
df = pd.concat([brake, nonbrake])
df = df.drop(df[df['GazeX'] < 0].index)
df = df.drop(df[df['GazeY'] < 0].index)
df = df.dropna()
df = df.reset_index(drop=True)  # Resets the index to the usual 0, 1, 2, ...

# One-hot encode brakes
outputs = OneHotEncoder(sparse=False).fit_transform(df['Brake'].reshape(-1,1))  # column 0: no brake, column 1: brake

Helper methods

We define some helper functions: minibatch will get batches of data for training via SGD, get_glimpses processes a list of images and cuts out small glimpses based on a list of center coordinates, and get_glimpse performs the cutting.


In [3]:
def get_sub_seq(seq, start, end):
    """Get the sub sequence starting at the start index and ending at the end index."""
    arr = seq[max([0, start]):end]
    if start < 0:
        arr = np.append(np.zeros((abs(start),2)), arr, axis=0)
    for i in range(len(arr)):
        if np.sum(arr[i]) == 0:
            arr[i] = [1, 0]
    return arr

def minibatch(data, batch_size, data_size):
    """Generates a minibatch from the given data and parameters."""
    randomized = np.random.permutation(data)
    batches = []
    num_batches = 0
    while num_batches * batch_size < data_size:
        new_batch = randomized[num_batches * batch_size:(num_batches + 1) * batch_size]
        batches.append(new_batch)
        num_batches += 1
    return batches

def get_glimpses(images, coords):
    """Gets a batch of glimpses."""
    arr = []
    for img, coord in zip(images, coords):
        arr.append(get_glimpse(img, coord[0], coord[1]))
    return np.array(arr)

def get_glimpse(image, x, y, stride=14):
    """Returns a subsection (glimpse) of the image centered on the given point."""
    x = int(x)  # Force to int
    y = int(y)  # Force to int
    min_x = x - stride
    max_x = x + stride
    
    min_y = y - stride
    max_y = y + stride
    image_glimpse = image[min_y:max_y, min_x:max_x, :]  # NOTE: row, column, RGB
#     image_glimpse = image[min_y:max_y, min_x:max_x, 0]  # NOTE: row, column, RGB; everything is greyscale; flatten RGB layer
    return imgToArr(image_glimpse)

In [4]:
# In training, we sometimes pull straight from the video and specify a stride length
# but we have also stored 28x28x3 glimpses for each frame
input_glimpses = np.zeros((80000, 28, 28, 3))
input_gazes = np.zeros((80000, 2))
outputs = np.zeros((80000, 2))
for batch in range(1, 9):
    file_name = "data/glimpse_batchc_{0}.npz".format(batch)
    array = np.load(file_name)
    input_glimpses[(batch - 1) * 10000: batch * 10000] = array['frames']
    input_gazes[(batch - 1) * 10000: batch * 10000] = array['gazes']
    outputs[(batch - 1) * 10000: batch * 10000] = array['braking']

for i in range(len(outputs)):
    if np.sum(outputs[i]) == 0:
        outputs[i] = [1, 0]

sequences = np.array([get_sub_seq(outputs, i-3, i) for i in range(len(outputs))])
sequences = sequences.reshape(-1, 3*2)

In [5]:
test_inds = minibatch(range(len(input_glimpses)), 10000, len(input_glimpses))[0]
training_inds = [i for i in range(len(input_glimpses)) if i not in test_inds]

Logistic Regression

This first model is the basic logistic regression, which we'll train using SGD. With this model, we can achieve 60% accuracy on the training set in 100 epochs.


In [6]:
"""Create a logistic regression model for brake classification with 28x28x3 image input."""
# Create placeholders for inputs that will be placed via batches
image_input = tf.placeholder(tf.float32, [None, 28*28*3], name="image")
gaze_input = tf.placeholder(tf.float32, [None, 2], name="gaze")
y_ = tf.placeholder(tf.float32, [None, 2], name="output")

image_weights = tf.Variable(tf.truncated_normal([28*28*3, 2], stddev=1), name="image_weights")
gaze_weights = tf.Variable(tf.truncated_normal([2, 2], stddev=1), name="gaze_weights")

image_bias = tf.Variable(tf.truncated_normal([2], stddev=1), name="image_bias")
gaze_bias = tf.Variable(tf.truncated_normal([2], stddev=1), name="gaze_bias")

image_logits = tf.matmul(image_input, image_weights) + image_bias
gaze_logits = tf.matmul(gaze_input, gaze_weights) + gaze_bias

logits = tf.mul(tf.add(image_logits, gaze_logits), 0.5)
y = tf.nn.softmax(logits)

cross_entropy = tf.reduce_mean(tf.reduce_sum(-y_*tf.log(tf.clip_by_value(y, 1e-10,1.0)),reduction_indices=[1]))
optimizer = tf.train.AdamOptimizer().minimize(cross_entropy)

# initialization of variables
init = tf.initialize_all_variables()

# Define computations for accuracy calculation
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

In [7]:
input_glimpse_flat = input_glimpses.reshape(-1, 28*28*3)

with tf.Session() as sess:
    sess = tf.Session()
    sess.run(init)
    indices = range(len(input_glimpses))
    for epoch in range(100):
        batches = minibatch(indices, 10000, len(indices))

        for index_batch in batches:
            glimpses = input_glimpses[index_batch]
            glimpses = glimpses.reshape(-1, 28*28*3)
            gazes = input_gazes[index_batch]
            output = outputs[index_batch]
#             print("Nonbraking: {:.0f}\tBraking: {:.0f}".format(output[:, 0].sum(), output[:, 1].sum()))
            sess.run(optimizer, feed_dict={image_input: glimpses, gaze_input: gazes, y_: output})
        ce = sess.run(cross_entropy, feed_dict={image_input: input_glimpse_flat, gaze_input: input_gazes, y_: outputs})
        acc = sess.run(accuracy, feed_dict={image_input: input_glimpse_flat, gaze_input: input_gazes, y_: outputs})
#         pred = sess.run(y, feed_dict={image_input: glimpses, gaze_input: gazes, y_: output})
#         num_pred_nonbrake = pred[:, 0].sum()
#         num_pred_brake = pred[:, 1].sum()
#         print("\tNon-brake: {:.0f}\tBrake: {:.0f}".format(num_pred_nonbrake, num_pred_brake))
        print("\tCross-entropy: {:.3f}\tAccuracy: {:.3f}".format(ce, acc))


	Cross-entropy: 11.522	Accuracy: 0.498
	Cross-entropy: 10.991	Accuracy: 0.520
	Cross-entropy: 10.120	Accuracy: 0.557
	Cross-entropy: 10.072	Accuracy: 0.559
	Cross-entropy: 10.168	Accuracy: 0.555
	Cross-entropy: 10.046	Accuracy: 0.560
	Cross-entropy: 9.952	Accuracy: 0.564
	Cross-entropy: 9.952	Accuracy: 0.564
	Cross-entropy: 9.949	Accuracy: 0.565
	Cross-entropy: 9.901	Accuracy: 0.567
	Cross-entropy: 9.900	Accuracy: 0.567
	Cross-entropy: 9.863	Accuracy: 0.568
	Cross-entropy: 9.837	Accuracy: 0.569
	Cross-entropy: 9.835	Accuracy: 0.570
	Cross-entropy: 9.842	Accuracy: 0.569
	Cross-entropy: 9.903	Accuracy: 0.567
	Cross-entropy: 9.803	Accuracy: 0.571
	Cross-entropy: 9.764	Accuracy: 0.573
	Cross-entropy: 9.914	Accuracy: 0.567
	Cross-entropy: 9.953	Accuracy: 0.566
	Cross-entropy: 9.654	Accuracy: 0.578
	Cross-entropy: 9.623	Accuracy: 0.580
	Cross-entropy: 9.598	Accuracy: 0.581
	Cross-entropy: 9.550	Accuracy: 0.583
	Cross-entropy: 9.567	Accuracy: 0.583
	Cross-entropy: 9.740	Accuracy: 0.576
	Cross-entropy: 9.539	Accuracy: 0.584
	Cross-entropy: 9.437	Accuracy: 0.588
	Cross-entropy: 9.458	Accuracy: 0.588
	Cross-entropy: 9.403	Accuracy: 0.590
	Cross-entropy: 9.387	Accuracy: 0.591
	Cross-entropy: 9.441	Accuracy: 0.588
	Cross-entropy: 9.477	Accuracy: 0.587
	Cross-entropy: 9.348	Accuracy: 0.592
	Cross-entropy: 9.339	Accuracy: 0.593
	Cross-entropy: 9.347	Accuracy: 0.593
	Cross-entropy: 9.366	Accuracy: 0.592
	Cross-entropy: 9.397	Accuracy: 0.590
	Cross-entropy: 9.329	Accuracy: 0.593
	Cross-entropy: 9.316	Accuracy: 0.594
	Cross-entropy: 9.345	Accuracy: 0.593
	Cross-entropy: 9.279	Accuracy: 0.596
	Cross-entropy: 9.315	Accuracy: 0.594
	Cross-entropy: 9.262	Accuracy: 0.596
	Cross-entropy: 9.249	Accuracy: 0.597
	Cross-entropy: 9.251	Accuracy: 0.597
	Cross-entropy: 9.241	Accuracy: 0.597
	Cross-entropy: 9.230	Accuracy: 0.597
	Cross-entropy: 9.222	Accuracy: 0.598
	Cross-entropy: 9.218	Accuracy: 0.598
	Cross-entropy: 9.217	Accuracy: 0.598
	Cross-entropy: 9.202	Accuracy: 0.599
	Cross-entropy: 9.201	Accuracy: 0.599
	Cross-entropy: 9.206	Accuracy: 0.599
	Cross-entropy: 9.187	Accuracy: 0.600
	Cross-entropy: 9.181	Accuracy: 0.600
	Cross-entropy: 9.184	Accuracy: 0.600
	Cross-entropy: 9.181	Accuracy: 0.600
	Cross-entropy: 9.170	Accuracy: 0.600
	Cross-entropy: 9.187	Accuracy: 0.600
	Cross-entropy: 9.157	Accuracy: 0.601
	Cross-entropy: 9.171	Accuracy: 0.600
	Cross-entropy: 9.167	Accuracy: 0.601
	Cross-entropy: 9.144	Accuracy: 0.602
	Cross-entropy: 9.140	Accuracy: 0.602
	Cross-entropy: 9.137	Accuracy: 0.602
	Cross-entropy: 9.169	Accuracy: 0.601
	Cross-entropy: 9.167	Accuracy: 0.601
	Cross-entropy: 9.161	Accuracy: 0.601
	Cross-entropy: 9.177	Accuracy: 0.600
	Cross-entropy: 9.173	Accuracy: 0.600
	Cross-entropy: 9.146	Accuracy: 0.601
	Cross-entropy: 9.137	Accuracy: 0.602
	Cross-entropy: 9.121	Accuracy: 0.602
	Cross-entropy: 9.120	Accuracy: 0.603
	Cross-entropy: 9.131	Accuracy: 0.602
	Cross-entropy: 9.118	Accuracy: 0.603
	Cross-entropy: 9.131	Accuracy: 0.602
	Cross-entropy: 9.114	Accuracy: 0.603
	Cross-entropy: 9.106	Accuracy: 0.603
	Cross-entropy: 9.104	Accuracy: 0.603
	Cross-entropy: 9.100	Accuracy: 0.603
	Cross-entropy: 9.112	Accuracy: 0.603
	Cross-entropy: 9.119	Accuracy: 0.603
	Cross-entropy: 9.083	Accuracy: 0.604
	Cross-entropy: 9.089	Accuracy: 0.604
	Cross-entropy: 9.100	Accuracy: 0.604
	Cross-entropy: 9.092	Accuracy: 0.604
	Cross-entropy: 9.077	Accuracy: 0.605
	Cross-entropy: 9.070	Accuracy: 0.605
	Cross-entropy: 9.094	Accuracy: 0.604
	Cross-entropy: 9.108	Accuracy: 0.603
	Cross-entropy: 9.055	Accuracy: 0.606
	Cross-entropy: 9.068	Accuracy: 0.605
	Cross-entropy: 9.071	Accuracy: 0.605
	Cross-entropy: 9.056	Accuracy: 0.606
	Cross-entropy: 9.072	Accuracy: 0.605
	Cross-entropy: 9.047	Accuracy: 0.606
	Cross-entropy: 9.044	Accuracy: 0.606
	Cross-entropy: 9.041	Accuracy: 0.606

Feedforward Neural Net

Our next model will be a feedforward neural network with a ReLu activation function with a 1024 neuron hidden layer. We'll train it using the same methodology as the logistic regression model (SGD and cross-entropy).


In [ ]:
"""Create neural network for brake classification with 28x28x3 image input."""
# Create placeholders for inputs that will be placed via batches
image_input = tf.placeholder(tf.float32, [None, 28*28*3], name="image")
gaze_input = tf.placeholder(tf.float32, [None, 2], name="gaze")
y_ = tf.placeholder(tf.float32, [None, 2], name="output")

image_weights = tf.Variable(tf.truncated_normal([28*28*3, 1024], stddev=1), name="image_weights")
image_hidden_weights = tf.Variable(tf.truncated_normal([1024, 2], stddev=1), name="image_hidden_weights")

gaze_weights = tf.Variable(tf.truncated_normal([2, 1024], stddev=1), name="gaze_weights")
gaze_hidden_weights = tf.Variable(tf.truncated_normal([1024, 2], stddev=1), name="gaze_hidden_weights")

image_bias = tf.Variable(tf.truncated_normal([1024], stddev=1), name="image_bias")
image_hidden_bias = tf.Variable(tf.truncated_normal([2], stddev=1), name="image_hidden_bias")

gaze_bias = tf.Variable(tf.truncated_normal([1024], stddev=1), name="gaze_bias")
gaze_hidden_bias = tf.Variable(tf.truncated_normal([2], stddev=1), name="gaze_hidden_bias")

image_input_layer = tf.matmul(image_input, image_weights) + image_bias
image_hidden_layer = tf.matmul(tf.nn.relu(image_input_layer), image_hidden_weights) + image_hidden_bias

gaze_input_layer = tf.matmul(gaze_input, gaze_weights) + gaze_bias
gaze_hidden_layer = tf.matmul(tf.nn.relu(gaze_input_layer), gaze_hidden_weights) + gaze_hidden_bias

logits = tf.mul(tf.add(image_hidden_layer, gaze_hidden_layer), 0.5)
y = tf.nn.softmax(logits)

cross_entropy = tf.reduce_mean(tf.reduce_sum(-y_*tf.log(tf.clip_by_value(y, 1e-10,1.0)),reduction_indices=[1]))
optimizer = tf.train.AdamOptimizer().minimize(cross_entropy)

# initialization of variables
init = tf.initialize_all_variables()

# Define computations for accuracy calculation
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

In [ ]:
input_glimpse_flat = input_glimpses.reshape(-1, 28*28*3)

with tf.Session() as sess:
    sess = tf.Session()
    sess.run(init)
    indices = range(len(input_glimpses))
    for epoch in range(100):
        batches = minibatch(indices, 1000, len(indices))

        for index_batch in batches:
            glimpses = input_glimpses[index_batch]
            glimpses = glimpses.reshape(-1, 28*28*3)
            gazes = input_gazes[index_batch]
            output = outputs[index_batch]
#             print("Nonbraking: {:.0f}\tBraking: {:.0f}".format(output[:, 0].sum(), output[:, 1].sum()))
            sess.run(optimizer, feed_dict={image_input: glimpses, gaze_input: gazes, y_: output})
        ce = sess.run(cross_entropy, feed_dict={image_input: input_glimpse_flat, gaze_input: input_gazes, y_: outputs})
        acc = sess.run(accuracy, feed_dict={image_input: input_glimpse_flat, gaze_input: input_gazes, y_: outputs})
#         pred = sess.run(y, feed_dict={image_input: glimpses, gaze_input: gazes, y_: output})
#         num_pred_nonbrake = pred[:, 0].sum()
#         num_pred_brake = pred[:, 1].sum()
#         print("\tNon-brake: {:.0f}\tBrake: {:.0f}".format(num_pred_nonbrake, num_pred_brake))
        print("\tCross-entropy: {:.3f}\tAccuracy: {:.3f}".format(ce, acc))

Convolutional Neural Network

The feedforward network's results were promising! With just one hidden layer, we brought the initial training cross-entropy down from 11 to 10 (still bad, but it's getting better). Now, we'll try a wide-and-deep network where the deep part is a convnet on the image and the wide portion processes the driver gaze's coordinates.


In [6]:
# Define some helper methods that will abstract variable initialization and layer definitions
def weight_variable(shape, mean=0.0, wd=None):
    initial = tf.truncated_normal(shape, mean=mean, stddev=0.1)
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)  # Store losses in a collection
    return tf.Variable(initial)

def bias_variable(shape, wd=None):
    initial = tf.constant(0.5, shape=shape)
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)  # Store losses in a collection
    return tf.Variable(initial)

def conv2d(x, W, name='conv'):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME', name=name)

def max_pool_2x2(x, name='max_pool_2x2'):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=name)

In [7]:
# Define convolutional computation graph

# Placeholders for parameters: image, gaze (x, y), output, dropout probability
image_input = tf.placeholder(tf.float32, [None, 28, 28, 1], name="image")
gaze_input = tf.placeholder(tf.float32, [None, 2], name="gaze")
brake_seq_input = tf.placeholder(tf.float32, [None, 3*2], name="brake_sequence")
y_ = tf.placeholder(tf.float32, [None, 2], name="output")
keep_prob = 0.5


# Convolutional net for image processing

# First layer
W_conv1 = weight_variable([5, 5, 1, 32])  # 5x5x1 filter with 32 features
b_conv1 = bias_variable([32])             # Bias for each filter

h_conv1 = tf.nn.relu(conv2d(image_input, W_conv1) + b_conv1, name='conv1')
h_pool1 = max_pool_2x2(h_conv1, name='pool1')

# Second layer
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2, name='conv2')
h_pool2 = max_pool_2x2(h_conv2, name='pool2')

# Fully-connected layer hidden layer
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])

h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1, 'hidden1')

# Add dropout for fully-connected hidden layer
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

# Final logits
W_fc2 = weight_variable([1024, 2])
b_fc2 = bias_variable([2])

image_logits = tf.matmul(h_fc1_drop, W_fc2) + b_fc2


# Logistic regression for human gaze
W_g = weight_variable([2, 2])
b_g = bias_variable([2])

gaze_logits = tf.matmul(gaze_input, W_g) + b_g


# Logistic regression for braking sequence
W_bs = weight_variable([3*2, 2])
b_bs = bias_variable([2])

bs_logits = tf.matmul(brake_seq_input, W_bs) + b_bs

# Weights for final logits
image_logits_weights = weight_variable([2, 2], mean=0.3)
gaze_logits_weights = weight_variable([2, 2], mean=0.3)
bs_logits_weights = weight_variable([2, 2], mean=0.3)
bias = bias_variable([2])

# Combine logistic and convnet
logits = tf.add(tf.matmul(image_logits, image_logits_weights) + tf.matmul(gaze_logits, gaze_logits_weights) + tf.matmul(bs_logits, bs_logits_weights), bias)
y = tf.nn.softmax(logits)

cross_entropy = tf.reduce_mean(tf.reduce_sum(-y_*tf.log(tf.clip_by_value(y, 1e-10,1.0)),reduction_indices=[1]))
optimizer = tf.train.AdamOptimizer().minimize(cross_entropy)

# initialization of variables
init = tf.initialize_all_variables()

# Define computations for accuracy calculation
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

In [8]:
CONVNET_FILE_NAME = "model/convnet.ckpt"
with tf.Session() as sess:
    sess = tf.Session()
    sess.run(init)
    saver = tf.train.Saver()
    saver.restore(sess, CONVNET_FILE_NAME)
    for epoch in range(100):
#         batches = minibatch(training_inds, 10000, len(training_inds))
#         print("Batches: {0}".format(len(batches)))
#         for batch_num, index_batch in enumerate(batches):
#             print("\tProcessing batch {0}".format(batch_num + 1))
#             glimpses = input_glimpses[index_batch, :, :, :1]
#             gazes = input_gazes[index_batch]
#             seq = sequences[index_batch]
#             output = outputs[index_batch]
# #             print("Nonbraking: {:.0f}\tBraking: {:.0f}".format(output[:, 0].sum(), output[:, 1].sum()))
#             sess.run(optimizer, feed_dict={image_input: glimpses, gaze_input: gazes, brake_seq_input: seq, y_: output})

#         save_path = saver.save(sess, CONVNET_FILE_NAME)
#         print("Model saved in file: %s" % save_path)
        
        # Calculate cross-entropy, accuracy on last 10,000
        ce, acc = sess.run([cross_entropy, accuracy], feed_dict={image_input: input_glimpses[test_inds, :, :, :1],
                                                                 gaze_input: input_gazes[test_inds],
                                                                 brake_seq_input: sequences[test_inds],
                                                                 y_: outputs[test_inds]})

#         num_pred_nonbrake = pred[:, 0].sum()
#         num_pred_brake = pred[:, 1].sum()
#         print("\tNon-brake: {:.0f}\tBrake: {:.0f}".format(num_pred_nonbrake, num_pred_brake))
        print("\tCross-entropy: {:.3f}\tAccuracy: {:.3f}".format(ce, acc))
#                 if index == 67:
#                     fig = pylab.figure()
#                     pylab.imshow(this_glimpse)
#                     pylab.show()


	Cross-entropy: 0.134	Accuracy: 0.988
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-8-4d53ffdb26d6> in <module>()
     24                                                                  gaze_input: input_gazes[test_inds],
     25                                                                  brake_seq_input: sequences[test_inds],
---> 26                                                                  y_: outputs[test_inds]})
     27 
     28 #         num_pred_nonbrake = pred[:, 0].sum()

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    708     try:
    709       result = self._run(None, fetches, feed_dict, options_ptr,
--> 710                          run_metadata_ptr)
    711       if run_metadata:
    712         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    906     if final_fetches or final_targets:
    907       results = self._do_run(handle, final_targets, final_fetches,
--> 908                              feed_dict_string, options, run_metadata)
    909     else:
    910       results = []

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
    956     if handle is None:
    957       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
--> 958                            target_list, options, run_metadata)
    959     else:
    960       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
    963   def _do_call(self, fn, *args):
    964     try:
--> 965       return fn(*args)
    966     except errors.OpError as e:
    967       message = compat.as_text(e.message)

/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
    945         return tf_session.TF_Run(session, options,
    946                                  feed_dict, fetch_list, target_list,
--> 947                                  status, run_metadata)
    948 
    949     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [ ]: