In [1]:
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import os
from scipy.misc import imread,imresize
import inception_v1
import json
# just remove line below if want to use GPU
# os.environ['CUDA_VISIBLE_DEVICES'] = ''
In [2]:
with open('real-label.json', 'r') as fopen:
labels = json.load(fopen)
In [3]:
img = imread('fucking-panda.jpg')
img.shape
Out[3]:
In [4]:
tf.reset_default_graph()
sess = tf.InteractiveSession()
X = tf.placeholder(tf.float32,[None,None,3])
image = X / 128. - 1
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(image, (224, 224))
with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
logits, endpoints = inception_v1.inception_v1(image,num_classes=1001,is_training=False)
sess.run(tf.global_variables_initializer())
var_lists = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'InceptionV1')
saver = tf.train.Saver(var_list = var_lists)
saver.restore(sess, 'inception_v1.ckpt')
In [5]:
%%time
sess.run(logits,feed_dict={X:img})
# first time slow, GPU caching
Out[5]:
In [8]:
%%time
labels[str(np.argmax(sess.run(logits,feed_dict={X:img})[0]))]
# repeat same experiment to get accurate time
Out[8]:
In [ ]: