Import


In [ ]:
import tensorflow as tf
from PIL import Image
import numpy as np
from scipy.misc import imread, imresize
from imagenet_classes import class_names
import os

file_path


In [ ]:
#File Path
# filepath_input  = "./data/run/" #input csv file path
filepath_ckpt  = "./ckpt/model_weight.ckpt" #weight saver check point file path
filepath_pred = "./output/predicted.csv" #predicted value file path
filename_queue_description = tf.train.string_input_producer(['./data/description/raw_data.csv'])
num_record = 50

LSTM - Hyper Params


In [4]:
label_vec_size = 5
input_vec_size = 27
batch_size = 50
state_size_1 = 100
state_size_2 = 4096 + state_size_1
hidden = 15
learning_rate = 0.01

vgg16


In [ ]:
class vgg16:
    def __init__(self, imgs, weights=None, sess=None):
        self.imgs = imgs
        self.convlayers()
        self.fc_layers()
        self.probs = tf.nn.softmax(self.fc3l)
        if weights is not None and sess is not None:
            self.load_weights(weights, sess)


    def convlayers(self):
        self.parameters = []

        # zero-mean input
        with tf.name_scope('preprocess') as scope:
            mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
            images = self.imgs-mean

        # conv1_1
        with tf.name_scope('conv1_1') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32,
                                                     stddev=1e-1), name='weights')
            conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
                                 trainable=True, name='biases')
            out = tf.nn.bias_add(conv, biases)
            self.conv1_1 = tf.nn.relu(out, name=scope)
            self.parameters += [kernel, biases]

        # conv1_2
        with tf.name_scope('conv1_2') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,
                                                     stddev=1e-1), name='weights')
            conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
                                 trainable=True, name='biases')
            out = tf.nn.bias_add(conv, biases)
            self.conv1_2 = tf.nn.relu(out, name=scope)
            self.parameters += [kernel, biases]

        # pool1
        self.pool1 = tf.nn.max_pool(self.conv1_2,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool1')

        # conv2_1
        with tf.name_scope('conv2_1') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,
                                                     stddev=1e-1), name='weights')
            conv = tf.nn.conv2d(self.pool1, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
                                 trainable=True, name='biases')
            out = tf.nn.bias_add(conv, biases)
            self.conv2_1 = tf.nn.relu(out, name=scope)
            self.parameters += [kernel, biases]

        # conv2_2
        with tf.name_scope('conv2_2') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32,
                                                     stddev=1e-1), name='weights')
            conv = tf.nn.conv2d(self.conv2_1, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
                                 trainable=True, name='biases')
            out = tf.nn.bias_add(conv, biases)
            self.conv2_2 = tf.nn.relu(out, name=scope)
            self.parameters += [kernel, biases]

        # pool2
        self.pool2 = tf.nn.max_pool(self.conv2_2,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool2')

        # conv3_1
        with tf.name_scope('conv3_1') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,
                                                     stddev=1e-1), name='weights')
            conv = tf.nn.conv2d(self.pool2, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
                                 trainable=True, name='biases')
            out = tf.nn.bias_add(conv, biases)
            self.conv3_1 = tf.nn.relu(out, name=scope)
            self.parameters += [kernel, biases]

        # conv3_2
        with tf.name_scope('conv3_2') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
                                                     stddev=1e-1), name='weights')
            conv = tf.nn.conv2d(self.conv3_1, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
                                 trainable=True, name='biases')
            out = tf.nn.bias_add(conv, biases)
            self.conv3_2 = tf.nn.relu(out, name=scope)
            self.parameters += [kernel, biases]

        # conv3_3
        with tf.name_scope('conv3_3') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
                                                     stddev=1e-1), name='weights')
            conv = tf.nn.conv2d(self.conv3_2, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
                                 trainable=True, name='biases')
            out = tf.nn.bias_add(conv, biases)
            self.conv3_3 = tf.nn.relu(out, name=scope)
            self.parameters += [kernel, biases]

        # pool3
        self.pool3 = tf.nn.max_pool(self.conv3_3,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool3')

        # conv4_1
        with tf.name_scope('conv4_1') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,
                                                     stddev=1e-1), name='weights')
            conv = tf.nn.conv2d(self.pool3, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
                                 trainable=True, name='biases')
            out = tf.nn.bias_add(conv, biases)
            self.conv4_1 = tf.nn.relu(out, name=scope)
            self.parameters += [kernel, biases]

        # conv4_2
        with tf.name_scope('conv4_2') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
                                                     stddev=1e-1), name='weights')
            conv = tf.nn.conv2d(self.conv4_1, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
                                 trainable=True, name='biases')
            out = tf.nn.bias_add(conv, biases)
            self.conv4_2 = tf.nn.relu(out, name=scope)
            self.parameters += [kernel, biases]

        # conv4_3
        with tf.name_scope('conv4_3') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
                                                     stddev=1e-1), name='weights')
            conv = tf.nn.conv2d(self.conv4_2, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
                                 trainable=True, name='biases')
            out = tf.nn.bias_add(conv, biases)
            self.conv4_3 = tf.nn.relu(out, name=scope)
            self.parameters += [kernel, biases]

        # pool4
        self.pool4 = tf.nn.max_pool(self.conv4_3,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool4')

        # conv5_1
        with tf.name_scope('conv5_1') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
                                                     stddev=1e-1), name='weights')
            conv = tf.nn.conv2d(self.pool4, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
                                 trainable=True, name='biases')
            out = tf.nn.bias_add(conv, biases)
            self.conv5_1 = tf.nn.relu(out, name=scope)
            self.parameters += [kernel, biases]

        # conv5_2
        with tf.name_scope('conv5_2') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
                                                     stddev=1e-1), name='weights')
            conv = tf.nn.conv2d(self.conv5_1, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
                                 trainable=True, name='biases')
            out = tf.nn.bias_add(conv, biases)
            self.conv5_2 = tf.nn.relu(out, name=scope)
            self.parameters += [kernel, biases]

        # conv5_3
        with tf.name_scope('conv5_3') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
                                                     stddev=1e-1), name='weights')
            conv = tf.nn.conv2d(self.conv5_2, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
                                 trainable=True, name='biases')
            out = tf.nn.bias_add(conv, biases)
            self.conv5_3 = tf.nn.relu(out, name=scope)
            self.parameters += [kernel, biases]

        # pool5
        self.pool5 = tf.nn.max_pool(self.conv5_3,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool4')

    def fc_layers(self):
        # fc1
        with tf.name_scope('fc1') as scope:
            shape = int(np.prod(self.pool5.get_shape()[1:]))
            fc1w = tf.Variable(tf.truncated_normal([shape, 4096],
                                                         dtype=tf.float32,
                                                         stddev=1e-1), name='weights')
            fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
                                 trainable=True, name='biases')
            pool5_flat = tf.reshape(self.pool5, [-1, shape])
            fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)
            self.fc1 = tf.nn.relu(fc1l)
            self.parameters += [fc1w, fc1b]

        # fc2
        with tf.name_scope('fc2') as scope:
            fc2w = tf.Variable(tf.truncated_normal([4096, 4096],
                                                         dtype=tf.float32,
                                                         stddev=1e-1), name='weights')
            fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
                                 trainable=True, name='biases')
            fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
            self.fc2 = tf.nn.relu(fc2l)
            self.parameters += [fc2w, fc2b]

        # fc3
        with tf.name_scope('fc3') as scope:
            fc3w = tf.Variable(tf.truncated_normal([4096, 1000],
                                                         dtype=tf.float32,
                                                         stddev=1e-1), name='weights')
            fc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32),
                                 trainable=True, name='biases')
            self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
            self.parameters += [fc3w, fc3b]

    def load_weights(self, weight_file, sess):
        weights = np.load(weight_file)
        keys = sorted(weights.keys())
        for i, k in enumerate(keys):
            print(i, k, np.shape(weights[k]))
            sess.run(self.parameters[i].assign(weights[k]))

load_vgg16


In [ ]:
with tf.Session() as sess_vgg:
    imgs = tf.placeholder(tf.float32, [None, 200, 200, 3])
    vgg = vgg16(imgs, 'vgg16_weights.npz', sess_vgg)
    img_files = ['./data/img/cropped/' + i for i in os.listdir('./data/img/cropped')]
    imgs = [imread(file, mode='RGB') for file in img_files]
    temps = [sess_vgg.run(vgg.fc1, feed_dict={vgg.imgs: [imgs[i]]})[0] for i in range(50)]
    reimgs= np.reshape(a=temps, newshape=[50,-1])
    sess_vgg.close()

File Info


In [ ]:
reader = tf.TextLineReader()
key,value = reader.read(filename_queue_description)
record_defaults =[[-1], [-1], [-1], [-1], [-1], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2]]
lab1, lab2, lab3, lab4, lab5, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15 = tf.decode_csv(value, record_defaults)  

feature_label = tf.stack([lab1, lab2, lab3, lab4, lab5])
feature_word = tf.stack([w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15])

In [ ]:
with tf.Session() as sess_data:
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)
    img_queue = []
    for i in range(num_record):
#         image = sess.run(images)
        
        label, raw_word = sess_data.run([feature_label, feature_word])
        onehot = tf.one_hot(indices=raw_word, depth=27)
        if i == 0:
            full_input = onehot
            full_label = label
        else:
            full_input = tf.concat([full_input, onehot], 0)
            full_label = tf.concat([full_label, label], 0)
#         print(sess.run(tf.shape(image)))
#         batch = tf.train.batch([image, label], 1)
#         print(sess.run(batch))
        
    coord.request_stop()
    coord.join(threads)
    sess_data.close()

Text Reader

def input_pipeline(filenames, batch_size, num_epochs=None): filename_queue = tf.train.string_input_producer(filenames, num_epochs=num_epochs, shuffle=False) images = tf.image.decode_png(value, channels=3, dtype=tf.uint8) reader = tf.TextLineReader() key,value = reader.read(filename_queue_description) record_defaults =[[-1], [-1], [-1], [-1], [-1], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2], [-2]] lab1, lab2, lab3, lab4, lab5, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15 = tf.decode_csv(value, record_defaults)

feature_label = tf.stack([lab1, lab2, lab3, lab4, lab5])
feature_word = tf.stack([w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15])
example_batch, label_batch = tf.train.batch([images, feature_label], batch_size=batch_size)
return example_batch, label_batch

with tf.Session() as sess: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord)

input_pipeline(filename_queue_description, num_epochs=1, batch_size=10)
coord.request_stop()
coord.join(threads)
sess.close()

Batching


In [ ]:
with tf.name_scope('batch') as scope:
    # full_label = tf.reshape(full_label, [batch_size, hidden, label_vec_size])
    full_input = tf.reshape(full_input, [batch_size, hidden, input_vec_size])
    input_batch, label_batch = tf.train.batch([full_input, full_input], batch_size=1)

LSTM First Layer


In [ ]:
with tf.name_scope('lstm_layer_1') as scope:
    with tf.variable_scope('lstm_layer_1'):
        rnn_cell_1 = tf.contrib.rnn.BasicLSTMCell(state_size_1, reuse=None)
        output_1, _ = tf.contrib.rnn.static_rnn(rnn_cell_1, tf.unstack(full_input, axis=1), dtype=tf.float32)
#         output_w_1 = tf.Variable(tf.truncated_normal([hidden, state_size_1, input_vec_size]))
#         output_b_1 = tf.Variable(tf.zeros([input_vec_size]))
#         pred_temp = tf.matmul(output_1, output_w_1) + output_b_1

In [ ]:
with tf.Session() as sess_temp:
    print(sess_temp.run(tf.shape(output_1)))

matrix_concat


In [ ]:
input_2 = [tf.concat([out, reimgs], axis=1) for out in output_1]

LSTM Second Layer


In [ ]:
with tf.name_scope('lstm_layer_2') as scope:
    with tf.variable_scope('lstm_layer_2'):
        rnn_cell_2 = tf.contrib.rnn.BasicLSTMCell(state_size_2, reuse=None)
        output_2, _ = tf.contrib.rnn.static_rnn(rnn_cell_2, tf.unstack(input_2, axis=0), dtype=tf.float32)
        output_w_2 = tf.Variable(tf.truncated_normal([hidden, state_size_2, input_vec_size]))
        output_b_2 = tf.Variable(tf.zeros([input_vec_size]))
        pred = tf.nn.softmax(tf.matmul(output_2, output_w_2) + output_b_2)

In [ ]:
with tf.name_scope('loss') as scope:
    loss = tf.constant(0, tf.float32)
    for i in range(hidden):
        loss += tf.losses.softmax_cross_entropy(tf.unstack(full_input, axis=1)[i], tf.unstack(pred, axis=0)[i])
    train = tf.train.AdamOptimizer(learning_rate).minimize(loss)

In [ ]:
with tf.Session() as sess_train:
    sess_train.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    save_path = saver.save(sess_train, filepath_ckpt)
    
    for i in range(31):
        sess_train.run(train)
        if i % 5 == 0:
            print("loss : ", sess_train.run(loss))
#             print("pred : ", sess.run(pred))
    save_path = saver.save(sess_train, filepath_ckpt)
    print("= Weigths are saved in " + filepath_ckpt)
    sess_train.close()

Test


In [39]:
with tf.Session() as sess_vgg_test:
    imgs = tf.placeholder(tf.float32, [None, 200, 200, 3])
    vgg = vgg16(imgs, 'vgg16_weights.npz', sess_vgg_test)
    test_img_files = ['./data/img/cropped/001.png']
    test_imgs = [imread(file, mode='RGB') for file in test_img_files]
#     bilinear_test_imgs = [imresize(arr=img,interp='bilinear') for img in test_imgs]
    temps = [sess_vgg_test.run(vgg.fc1, feed_dict={vgg.imgs: [img]})[0] for img in test_imgs]
    test_reimgs= np.reshape(a=temps, newshape=[1,-1])
    sess_vgg_test.close()


0 conv1_1_W (3, 3, 3, 64)
1 conv1_1_b (64,)
2 conv1_2_W (3, 3, 64, 64)
3 conv1_2_b (64,)
4 conv2_1_W (3, 3, 64, 128)
5 conv2_1_b (128,)
6 conv2_2_W (3, 3, 128, 128)
7 conv2_2_b (128,)
8 conv3_1_W (3, 3, 128, 256)
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-39-be50d3c1e8f0> in <module>()
      1 with tf.Session() as sess_vgg_test:
      2     imgs = tf.placeholder(tf.float32, [None, 200, 200, 3])
----> 3     vgg = vgg16(imgs, 'vgg16_weights.npz', sess_vgg_test)
      4     test_img_files = ['./data/img/cropped/001.png']
      5     test_imgs = [imread(file, mode='RGB') for file in test_img_files]

<ipython-input-3-9174cde082c2> in __init__(self, imgs, weights, sess)
      6         self.probs = tf.nn.softmax(self.fc3l)
      7         if weights is not None and sess is not None:
----> 8             self.load_weights(weights, sess)
      9 
     10 

<ipython-input-3-9174cde082c2> in load_weights(self, weight_file, sess)
    235         for i, k in enumerate(keys):
    236             print(i, k, np.shape(weights[k]))
--> 237             sess.run(self.parameters[i].assign(weights[k]))

C:\ProgramData\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    787     try:
    788       result = self._run(None, fetches, feed_dict, options_ptr,
--> 789                          run_metadata_ptr)
    790       if run_metadata:
    791         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

C:\ProgramData\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    995     if final_fetches or final_targets:
    996       results = self._do_run(handle, final_targets, final_fetches,
--> 997                              feed_dict_string, options, run_metadata)
    998     else:
    999       results = []

C:\ProgramData\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1130     if handle is None:
   1131       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1132                            target_list, options, run_metadata)
   1133     else:
   1134       return self._do_call(_prun_fn, self._session, handle, feed_dict,

C:\ProgramData\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
   1137   def _do_call(self, fn, *args):
   1138     try:
-> 1139       return fn(*args)
   1140     except errors.OpError as e:
   1141       message = compat.as_text(e.message)

C:\ProgramData\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\client\session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1115                 run_metadata):
   1116       # Ensure any changes to the graph are reflected in the runtime.
-> 1117       self._extend_graph()
   1118       with errors.raise_exception_on_not_ok_status() as status:
   1119         return tf_session.TF_Run(session, options,

C:\ProgramData\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\client\session.py in _extend_graph(self)
   1164         with errors.raise_exception_on_not_ok_status() as status:
   1165           tf_session.TF_ExtendGraph(
-> 1166               self._session, graph_def.SerializeToString(), status)
   1167         self._opened = True
   1168 

KeyboardInterrupt: 

In [ ]:
start_input = tf.zeros([1,15,27])
with tf.Session() as sess_init_generator:
    input_init = sess_init_generator.run(start_input)
sos = [0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
input_init[0][0] = sos

In [ ]:
with tf.name_scope('lstm_layer_1') as scope:
    with tf.variable_scope('lstm_layer_1'):
        rnn_cell_1 = tf.contrib.rnn.BasicLSTMCell(state_size_1, reuse=True)
        output_test_1, _ = tf.contrib.rnn.static_rnn(rnn_cell_1, tf.unstack(input_init, axis=1), dtype=tf.float32)
# output_t_1 = tf.contrib.rnn.static_rnn(rnn_cell, tf.unstack(full_input, axis=1), dtype=tf.float32)
# pred = tf.nn.softmax(tf.matmul(output1, output_w[0]) + output_b[0])

In [ ]:
input_2 = [tf.concat([out, test_reimgs], axis=1) for out in output_test_1]

In [13]:
with tf.name_scope('lstm_layer_2') as scope:
    with tf.variable_scope('lstm_layer_2'):
        rnn_cell_2 = tf.contrib.rnn.BasicLSTMCell(state_size_2, reuse=None)
        output_2, _ = tf.contrib.rnn.static_rnn(rnn_cell_2, tf.unstack(input_2, axis=0), dtype=tf.float32)
        output_w_2 = tf.Variable(tf.truncated_normal([hidden, state_size_2, input_vec_size]))
        output_b_2 = tf.Variable(tf.zeros([input_vec_size]))
        pred = tf.nn.softmax(tf.matmul(output_2, output_w_2) + output_b_2)

In [14]:
sess_model = tf.Session()
saver = tf.train.Saver(allow_empty=True)
saver.restore(sess_model, filepath_ckpt)


INFO:tensorflow:Restoring parameters from ./ckpt/model_weight.ckpt
first_test = sess_model.run(pred)[0] input_init[0][1] = first_test
second_test = sess_model.run(pred)[1] input_init[0][2] = second_test
third_test = sess_model.run(pred)[2] input_init[0][3] = third_test

In [33]:
for i in range(hidden):
    result = sess_model.run(pred)
    result_temp = result[i]
    if i == hidden -1:
        pass
    else:
        input_init[0][i+1] = result_temp

Result Check


In [36]:
print(result.shape)


(15, 1, 27)

In [37]:
decoded_result = np.argmax(a=result, axis=2)

In [34]:
print(result)


[[[  0.00000000e+00   1.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00]]

 [[  0.00000000e+00   0.00000000e+00   1.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00]]

 [[  0.00000000e+00   0.00000000e+00   0.00000000e+00   1.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00]]

 [[  0.00000000e+00   0.00000000e+00   0.00000000e+00   4.27343693e-30
     5.38096428e-01   1.82024389e-32   0.00000000e+00   0.00000000e+00
     1.50284929e-23   2.51590945e-28   0.00000000e+00   5.28880257e-14
     0.00000000e+00   0.00000000e+00   4.61901069e-01   1.28550673e-34
     9.94173141e-14   0.00000000e+00   1.05735739e-32   5.47885447e-38
     0.00000000e+00   2.52688324e-06   0.00000000e+00   1.55476981e-33
     0.00000000e+00   0.00000000e+00   7.86965369e-30]]

 [[  0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     1.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   2.86878677e-17
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     1.29819138e-28   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   1.28736996e-23   0.00000000e+00]]

 [[  0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   1.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00]]

 [[  0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   1.00000000e+00   6.66610328e-19   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00]]

 [[  0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   1.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00]]

 [[  0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     1.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00]]

 [[  0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   1.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   5.50534383e-37
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00]]

 [[  0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     1.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00]]

 [[  9.47464763e-14   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   1.00000000e+00
     0.00000000e+00   5.92610893e-33   0.00000000e+00   0.00000000e+00
     0.00000000e+00   7.40656820e-26   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   2.70786465e-34
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     1.40755670e-20   0.00000000e+00   3.56167183e-36]]

 [[  1.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00]]

 [[  0.00000000e+00   1.20799324e-31   4.17731691e-36   0.00000000e+00
     0.00000000e+00   0.00000000e+00   7.65512594e-34   0.00000000e+00
     0.00000000e+00   0.00000000e+00   2.93898785e-35   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   1.00000000e+00   0.00000000e+00   0.00000000e+00
     8.12473578e-38   0.00000000e+00   0.00000000e+00]]

 [[  1.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00   0.00000000e+00
     0.00000000e+00   0.00000000e+00   0.00000000e+00]]]

In [38]:
print(decoded_result)


[[ 1]
 [ 2]
 [ 3]
 [ 4]
 [ 4]
 [ 7]
 [ 9]
 [11]
 [12]
 [10]
 [16]
 [ 7]
 [ 0]
 [21]
 [ 0]]

Code Storage

def lstm_cell(): tf.contrib.rnn.BasicLSTMCell(size, forget_bias=0.0, state_is_tuple=True) inputs = tf.nn.embedding_lookup(embedding, input_.input_data) if is_training and config.keep_prob < 1: output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, size]) softmax_w = tf.get_variable("softmax_w", [size, vocab_size], dtype=data_type()) softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type()) logits = tf.matmul(output, softmax_w) + softmax_b loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example( [logits], [tf.reshape(input_.targets, [-1])], [tf.ones([batch_size * num_steps], dtype=data_type())]) self._cost = cost = tf.reduce_sum(loss) / batch_size self._final_state = state
sess = tf.Session() imgs = tf.placeholder(tf.float32, [None, 200, 200, 3]) vgg = vgg16(imgs, 'vgg16_weights.npz', sess) img1 = imread('data/img/cropped/002.png', mode='RGB') prob = sess.run(vgg.probs, feed_dict={vgg.imgs: [img1]})[0] preds = (np.argsort(prob)[::-1])[0:5] for p in preds: print(class_names[p], prob[p])