In [1]:
from model import *
import skimage
import pylab
%matplotlib inline

model_path='./models/model-72'
maxlen=30

with open(vgg_path) as f:
    fileContent = f.read()
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(fileContent)

images = tf.placeholder("float32", [1, 224, 224, 3])
tf.import_graph_def(graph_def, input_map={"images":images})
 
ixtoword = np.load('./data/ixtoword.npy').tolist()
n_words = len(ixtoword)

sess = tf.InteractiveSession()
 
caption_generator = Caption_Generator(
        dim_image=dim_image,
        dim_hidden=dim_hidden,
        dim_embed=dim_embed,
        batch_size=batch_size,
        n_lstm_steps=maxlen,
        n_words=n_words)

graph = tf.get_default_graph()

 
fc7_tf, generated_words_tf = caption_generator.build_generator(maxlen=maxlen)
     
saver = tf.train.Saver()
saver.restore(sess, model_path)

def captioning(test_image_path=None):

 
    pylab.imshow(skimage.io.imread(test_image_path))
    pylab.show()
    
 
    image_val = read_image(test_image_path)
    fc7 = sess.run(graph.get_tensor_by_name("import/fc7_relu:0"), feed_dict={images:image_val})
 
    
 
    generated_word_index= sess.run(generated_words_tf, feed_dict={fc7_tf:fc7})
    generated_word_index = np.hstack(generated_word_index)
 
    generated_words = [ixtoword[x] for x in generated_word_index]
    punctuation = np.argmax(np.array(generated_words) == '.')+1
 
    generated_words = generated_words[:punctuation]
    generated_sentence = ' '.join(generated_words)
    print generated_sentence

In [2]:
captioning('./acoustic-guitar-player.jpg')


---------------------------------------------------------------------------
InternalError                             Traceback (most recent call last)
<ipython-input-2-cf46ac092013> in <module>()
----> 1 captioning('./acoustic-guitar-player.jpg')

<ipython-input-1-6046e76e2a90> in captioning(test_image_path)
     44 
     45     image_val = read_image(test_image_path)
---> 46     fc7 = sess.run(graph.get_tensor_by_name("import/fc7_relu:0"), feed_dict={images:image_val})
     47 
     48 

/home/taeksoo/anaconda/lib/python2.7/site-packages/tensorflow/python/client/session.pyc in run(self, fetches, feed_dict)
    343 
    344     # Run request and get response.
--> 345     results = self._do_run(target_list, unique_fetch_targets, feed_dict_string)
    346 
    347     # User may have fetched the same tensor multiple times, but we

/home/taeksoo/anaconda/lib/python2.7/site-packages/tensorflow/python/client/session.pyc in _do_run(self, target_list, fetch_list, feed_dict)
    417         # pylint: disable=protected-access
    418         raise errors._make_specific_exception(node_def, op, e.error_message,
--> 419                                               e.code)
    420         # pylint: enable=protected-access
    421       raise e_type, e_value, e_traceback

InternalError: CopyCPUTensorToGPU: GPU Memcpy failed
	 [[Node: import/Const = Const[dtype=DT_FLOAT, value=Tensor<type: float shape: [25088,4096] values: 1.9745843e-05 0.00035308721 -0.0018327669...>, _device="/job:localhost/replica:0/task:0/gpu:0"]()]]
Caused by op u'import/Const', defined at:
  File "/home/taeksoo/anaconda/lib/python2.7/runpy.py", line 162, in _run_module_as_main
    "__main__", fname, loader, pkg_name)
  File "/home/taeksoo/anaconda/lib/python2.7/runpy.py", line 72, in _run_code
    exec code in run_globals
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/IPython/kernel/__main__.py", line 3, in <module>
    app.launch_new_instance()
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/IPython/config/application.py", line 574, in launch_instance
    app.start()
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/IPython/kernel/zmq/kernelapp.py", line 373, in start
    ioloop.IOLoop.instance().start()
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/zmq/eventloop/ioloop.py", line 151, in start
    super(ZMQIOLoop, self).start()
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/tornado/ioloop.py", line 866, in start
    handler_func(fd_obj, events)
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/tornado/stack_context.py", line 275, in null_wrapper
    return fn(*args, **kwargs)
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/zmq/eventloop/zmqstream.py", line 433, in _handle_events
    self._handle_recv()
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/zmq/eventloop/zmqstream.py", line 465, in _handle_recv
    self._run_callback(callback, msg)
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/zmq/eventloop/zmqstream.py", line 407, in _run_callback
    callback(*args, **kwargs)
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/tornado/stack_context.py", line 275, in null_wrapper
    return fn(*args, **kwargs)
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/IPython/kernel/zmq/kernelbase.py", line 252, in dispatcher
    return self.dispatch_shell(stream, msg)
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/IPython/kernel/zmq/kernelbase.py", line 213, in dispatch_shell
    handler(stream, idents, msg)
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/IPython/kernel/zmq/kernelbase.py", line 362, in execute_request
    user_expressions, allow_stdin)
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/IPython/kernel/zmq/ipkernel.py", line 181, in do_execute
    shell.run_cell(code, store_history=store_history, silent=silent)
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2871, in run_cell
    interactivity=interactivity, compiler=compiler, result=result)
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 2975, in run_ast_nodes
    if self.run_code(code, result):
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/IPython/core/interactiveshell.py", line 3035, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)
  File "<ipython-input-1-6046e76e2a90>", line 15, in <module>
    tf.import_graph_def(graph_def, input_map={"images":images})
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/tensorflow/python/framework/importer.py", line 201, in import_graph_def
    compute_shapes=False)
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1710, in create_op
    original_op=self._default_original_op, op_def=op_def)
  File "/home/taeksoo/anaconda/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 988, in __init__
    self._traceback = _extract_stack()

In [2]:
captioning('https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcTl_V-lX4Vpze3JJQfVSDfanUnaBfPv61W5gBkqWfQ8Ga5aE5gD')


a young boy is playing baseball .

In [3]:
captioning('http://cs.stanford.edu/people/karpathy/deepimagesent/legos.png')


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-3-1d110cb7fb6d> in <module>()
----> 1 captioning('http://cs.stanford.edu/people/karpathy/deepimagesent/legos.png')

<ipython-input-1-360be0133adc> in captioning(test_image_path, model_path, maxlen)
     30 
     31     graph = tf.get_default_graph()
---> 32     fc7 = sess.run(graph.get_tensor_by_name("import/fc7_relu:0"), feed_dict={images:image_val})
     33 
     34     fc7_tf, generated_words = caption_generator.build_generator(maxlen=maxlen)

/home/taeksoo/anaconda/lib/python2.7/site-packages/tensorflow/python/client/session.pyc in run(self, fetches, feed_dict)
    339                   'which has shape %r'
    340                   % (np_val.shape, subfeed_t.name,
--> 341                      tuple(subfeed_t.get_shape().dims)))
    342           feed_dict_string[str(subfeed_t.name)] = np_val
    343 

ValueError: Cannot feed value of shape (1, 224, 224, 4) for Tensor u'Placeholder_2:0', which has shape (Dimension(1), Dimension(224), Dimension(224), Dimension(3))

In [ ]: