In [2]:
# https://www.tensorflow.org/extend/estimators
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# tensorflow
import tensorflow as tf
import tensorflow.contrib.rnn as rnn
import tensorflow.contrib.learn as tflearn
import tensorflow.contrib.layers as tflayers

# keras
from tensorflow.contrib.keras.python.keras.layers import Dense, LSTM, GRU, Activation
from tensorflow.contrib.keras.python.keras.utils.data_utils import get_file

# input data
from tensorflow.examples.tutorials.mnist import input_data

# estimators
from tensorflow.contrib import learn

# estimator "builder"
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib

# helpers
import numpy as np
import random
import sys

# enable logs
tf.logging.set_verbosity(tf.logging.INFO)

def sample(preds, temperature=1.0):
    #print(preds)
    return np.argmax(preds)

# THE MODEL
def model_fn(features, targets, mode, params):
    """Model function for Estimator."""
    
    # 1. Configure the model via TensorFlow operations
    # First, build all the model, a good idea is using Keras or tf.layers
    # since these are high-level API's
    #lstm = GRU(128, input_shape=(params["maxlen"], params["vocab_size"]))(features)
    #preds = Dense(params["vocab_size"], activation='sigmoid')(lstm)
    
    # 0. Reformat input shape to become a sequence
    lstm1 = GRU(128, input_shape=(params["maxlen"], params["vocab_size"]),
                return_sequences=False)(features)
    #lstm2 = GRU(128)(lstm1)
    preds = Dense(params["vocab_size"])(lstm1)
    preds_softmax = Activation("softmax")(preds)

    # 2. Define the loss function for training/evaluation
    loss = None
    train_op = None
    
    # Calculate Loss (for both TRAIN and EVAL modes)
    if mode != learn.ModeKeys.PREDICT:
        loss = tf.losses.softmax_cross_entropy(
            onehot_labels=targets, logits=preds)

    # 3. Define the training operation/optimizer
    
    # Configure the Training Op (for TRAIN mode)
    if mode == learn.ModeKeys.TRAIN:
        train_op = tf.contrib.layers.optimize_loss(
            loss=loss,
            global_step=tf.contrib.framework.get_global_step(),
            learning_rate=params["learning_rate"],
            optimizer="RMSProp",
        )

    # 4. Generate predictions
    predictions_dict = {
      "preds": preds_softmax
    }
    
    # 5. Define how you want to evaluate the model
    metrics = {
        "accuracy": tf.metrics.accuracy(tf.argmax(input=preds_softmax, axis=1), tf.argmax(input=targets, axis=1))
    }
    
    # 6. Return predictions/loss/train_op/eval_metric_ops in ModelFnOps object
    return model_fn_lib.ModelFnOps(
      mode=mode,
      predictions=predictions_dict,
      loss=loss,
      train_op=train_op,
      eval_metric_ops=metrics)

In [4]:
print('Getting data')

#path = get_file('nietzsche.txt', origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt')
path = 'shakespeare.txt'
text = open(path).read().lower()
print('corpus length:', len(text))

chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))

# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 1
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
    sentences.append(text[i: i + maxlen])
    next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))

print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.float32)
y = np.zeros((len(sentences), len(chars)), dtype=np.float32)
for i, sentence in enumerate(sentences):
    for t, char in enumerate(sentence):
        X[i, t, char_indices[char]] = 1
    y[i, char_indices[next_chars[i]]] = 1

print(X[0])


Getting data
corpus length: 1115394
total chars: 39
nb sequences: 1115354
Vectorization...
[[ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 ..., 
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]
 [ 0.  0.  0. ...,  0.  0.  0.]]

In [6]:
# PARAMETERS
LEARNING_RATE = 0.01
BATCH_SIZE = 64
STEPS = 50

NUM_OUTPUTS_PRED = 40

# Set model params
model_params = {"learning_rate": LEARNING_RATE, "vocab_size": len(chars), "maxlen": maxlen}

# Instantiate Estimator
nn = tf.contrib.learn.Estimator(model_fn=model_fn, params=model_params)

# Score accuracy
for iteration in range(1, 600):
    print()
    print('-' * 50)
    print('Iteration', iteration)
  
    # Fit
    print('-' * 40)
    print("Training")
    print('-' * 40)
    nn.fit(x=X, y=y, steps=STEPS, batch_size=BATCH_SIZE)

# choose a random sentence
start_index = random.randint(0, len(text) - maxlen - 1)
sentence = text[start_index: start_index + maxlen]    
    
# generate output using the RNN model
original_sentence = sentence
generated = sentence
for i in range(NUM_OUTPUTS_PRED):
    x = np.zeros((1, maxlen, len(chars)), dtype=np.float32)
    for t, char in enumerate(sentence):
        x[0, t, char_indices[char]] = 1.

    p = None
    for e in nn.predict(x):
        if p is None: p = e["preds"]
    next_index = sample(p)
    next_char = indices_char[next_index]

    generated += next_char
    sentence = sentence[1:] + next_char

print('\n' * 10, '-' * 100)
print('HERE')
print(generated)
print(original_sentence)
print('-' * 100, '\n' * 10)


INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'_save_checkpoints_steps': None, '_master': '', '_task_type': None, '_num_worker_replicas': 0, '_environment': 'local', '_num_ps_replicas': 0, '_evaluation_master': '', '_model_dir': None, '_keep_checkpoint_max': 5, '_task_id': 0, '_is_chief': True, '_tf_config': gpu_options {
  per_process_gpu_memory_fraction: 1
}
, '_save_summary_steps': 100, '_tf_random_seed': None, '_save_checkpoints_secs': 600, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f89c73b7898>, '_keep_checkpoint_every_n_hours': 10000}
WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmp1sk623ci

--------------------------------------------------
Iteration 1
----------------------------------------
Training
----------------------------------------
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
/usr/local/lib/python3.4/dist-packages/tensorflow/python/util/deprecation.py:248: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
  equality = a == b
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Saving checkpoints for 1 into /tmp/tmp1sk623ci/model.ckpt.
INFO:tensorflow:step = 1, loss = 3.66735
INFO:tensorflow:Saving checkpoints for 10 into /tmp/tmp1sk623ci/model.ckpt.
INFO:tensorflow:Loss for final step: 3.65834.

--------------------------------------------------
Iteration 2
----------------------------------------
Training
----------------------------------------
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from /tmp/tmp1sk623ci/model.ckpt-10
INFO:tensorflow:Saving checkpoints for 11 into /tmp/tmp1sk623ci/model.ckpt.
INFO:tensorflow:step = 11, loss = 3.65527
INFO:tensorflow:Saving checkpoints for 20 into /tmp/tmp1sk623ci/model.ckpt.
INFO:tensorflow:Loss for final step: 3.6418.

--------------------------------------------------
Iteration 3
----------------------------------------
Training
----------------------------------------
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from /tmp/tmp1sk623ci/model.ckpt-20
INFO:tensorflow:Saving checkpoints for 21 into /tmp/tmp1sk623ci/model.ckpt.
INFO:tensorflow:step = 21, loss = 3.63535
INFO:tensorflow:Saving checkpoints for 30 into /tmp/tmp1sk623ci/model.ckpt.
INFO:tensorflow:Loss for final step: 3.61494.

--------------------------------------------------
Iteration 4
----------------------------------------
Training
----------------------------------------
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from /tmp/tmp1sk623ci/model.ckpt-30
INFO:tensorflow:Saving checkpoints for 31 into /tmp/tmp1sk623ci/model.ckpt.
INFO:tensorflow:step = 31, loss = 3.60301
INFO:tensorflow:Saving checkpoints for 40 into /tmp/tmp1sk623ci/model.ckpt.
INFO:tensorflow:Loss for final step: 3.57199.

--------------------------------------------------
Iteration 5
----------------------------------------
Training
----------------------------------------
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from /tmp/tmp1sk623ci/model.ckpt-40
INFO:tensorflow:Saving checkpoints for 41 into /tmp/tmp1sk623ci/model.ckpt.
INFO:tensorflow:step = 41, loss = 3.55113
INFO:tensorflow:Saving checkpoints for 50 into /tmp/tmp1sk623ci/model.ckpt.
INFO:tensorflow:Loss for final step: 3.50294.

--------------------------------------------------
Iteration 6
----------------------------------------
Training
----------------------------------------
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from /tmp/tmp1sk623ci/model.ckpt-50
INFO:tensorflow:Saving checkpoints for 51 into /tmp/tmp1sk623ci/model.ckpt.
INFO:tensorflow:step = 51, loss = 3.46656
INFO:tensorflow:Saving checkpoints for 60 into /tmp/tmp1sk623ci/model.ckpt.
INFO:tensorflow:Loss for final step: 3.38786.

--------------------------------------------------
Iteration 7
----------------------------------------
Training
----------------------------------------
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
WARNING:tensorflow:From <ipython-input-6-70d0ae44af69>:24: calling BaseEstimator.fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.
Instructions for updating:
Estimator is decoupled from Scikit Learn interface by moving into
separate class SKCompat. Arguments x, y and batch_size are only
available in the SKCompat class, Estimator will only accept input_fn.
Example conversion:
  est = Estimator(...) -> est = SKCompat(Estimator(...))
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Restoring parameters from /tmp/tmp1sk623ci/model.ckpt-60
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-6-70d0ae44af69> in <module>()
     22     print("Training")
     23     print('-' * 40)
---> 24     nn.fit(x=X, y=y, steps=STEPS, batch_size=BATCH_SIZE)
     25 
     26 # choose a random sentence

/usr/local/lib/python3.4/dist-packages/tensorflow/python/util/deprecation.py in new_func(*args, **kwargs)
    279             _call_location(), decorator_utils.get_qualified_name(func),
    280             func.__module__, arg_name, date, instructions)
--> 281       return func(*args, **kwargs)
    282     new_func.__doc__ = _add_deprecated_arg_notice_to_docstring(
    283         func.__doc__, date, instructions)

/usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py in fit(self, x, y, input_fn, steps, batch_size, monitors, max_steps)
    412     _verify_input_args(x, y, input_fn, None, batch_size)
    413     if x is not None:
--> 414       SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
    415       return self
    416 

/usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py in fit(self, x, y, batch_size, steps, max_steps, monitors)
   1315                         steps=steps,
   1316                         max_steps=max_steps,
-> 1317                         monitors=all_monitors)
   1318     return self
   1319 

/usr/local/lib/python3.4/dist-packages/tensorflow/python/util/deprecation.py in new_func(*args, **kwargs)
    279             _call_location(), decorator_utils.get_qualified_name(func),
    280             func.__module__, arg_name, date, instructions)
--> 281       return func(*args, **kwargs)
    282     new_func.__doc__ = _add_deprecated_arg_notice_to_docstring(
    283         func.__doc__, date, instructions)

/usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py in fit(self, x, y, input_fn, steps, batch_size, monitors, max_steps)
    428       hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
    429 
--> 430     loss = self._train_model(input_fn=input_fn, hooks=hooks)
    431     logging.info('Loss for final step: %s.', loss)
    432     return self

/usr/local/lib/python3.4/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py in _train_model(self, input_fn, hooks)
    976         loss = None
    977         while not mon_sess.should_stop():
--> 978           _, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
    979       summary_io.SummaryWriterCache.clear()
    980       return loss

/usr/local/lib/python3.4/dist-packages/tensorflow/python/training/monitored_session.py in run(self, fetches, feed_dict, options, run_metadata)
    482                           feed_dict=feed_dict,
    483                           options=options,
--> 484                           run_metadata=run_metadata)
    485 
    486   def should_stop(self):

/usr/local/lib/python3.4/dist-packages/tensorflow/python/training/monitored_session.py in run(self, fetches, feed_dict, options, run_metadata)
    818                               feed_dict=feed_dict,
    819                               options=options,
--> 820                               run_metadata=run_metadata)
    821       except _PREEMPTION_ERRORS as e:
    822         logging.info('An error was raised. This may be due to a preemption in '

/usr/local/lib/python3.4/dist-packages/tensorflow/python/training/monitored_session.py in run(self, *args, **kwargs)
    774 
    775   def run(self, *args, **kwargs):
--> 776     return self._sess.run(*args, **kwargs)
    777 
    778 

/usr/local/lib/python3.4/dist-packages/tensorflow/python/training/monitored_session.py in run(self, fetches, feed_dict, options, run_metadata)
    920     options = options or config_pb2.RunOptions()
    921     feed_dict = self._call_hook_before_run(run_context, actual_fetches,
--> 922                                            feed_dict, options)
    923 
    924     # Do session run.

/usr/local/lib/python3.4/dist-packages/tensorflow/python/training/monitored_session.py in _call_hook_before_run(self, run_context, fetch_dict, user_feed_dict, options)
    946     hook_feeds = {}
    947     for hook in self._hooks:
--> 948       request = hook.before_run(run_context)
    949       if request is not None:
    950         if request.fetches is not None:

/usr/local/lib/python3.4/dist-packages/tensorflow/python/training/basic_session_run_hooks.py in before_run(self, run_context)
    361           saver_def=saver_def)
    362       self._summary_writer.add_graph(graph)
--> 363       self._summary_writer.add_meta_graph(meta_graph_def)
    364 
    365     return SessionRunArgs(self._global_step_tensor)

/usr/local/lib/python3.4/dist-packages/tensorflow/python/summary/writer/writer.py in add_meta_graph(self, meta_graph_def, global_step)
    217       raise TypeError("meta_graph_def must be type MetaGraphDef, saw type: %s"
    218                       % type(meta_graph_def))
--> 219     meta_graph_bytes = meta_graph_def.SerializeToString()
    220     event = event_pb2.Event(meta_graph_def=meta_graph_bytes)
    221     self._add_event(event, global_step)

/usr/local/lib/python3.4/dist-packages/google/protobuf/internal/python_message.py in SerializeToString(self)
   1034           'Message %s is missing required fields: %s' % (
   1035           self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors())))
-> 1036     return self.SerializePartialToString()
   1037   cls.SerializeToString = SerializeToString
   1038 

/usr/local/lib/python3.4/dist-packages/google/protobuf/internal/python_message.py in SerializePartialToString(self)
   1043   def SerializePartialToString(self):
   1044     out = BytesIO()
-> 1045     self._InternalSerialize(out.write)
   1046     return out.getvalue()
   1047   cls.SerializePartialToString = SerializePartialToString

/usr/local/lib/python3.4/dist-packages/google/protobuf/internal/python_message.py in InternalSerialize(self, write_bytes)
   1049   def InternalSerialize(self, write_bytes):
   1050     for field_descriptor, field_value in self.ListFields():
-> 1051       field_descriptor._encoder(write_bytes, field_value)
   1052     for tag_bytes, value_bytes in self._unknown_fields:
   1053       write_bytes(tag_bytes)

/usr/local/lib/python3.4/dist-packages/google/protobuf/internal/encoder.py in EncodeField(write, value)
    763     def EncodeField(write, value):
    764       write(tag)
--> 765       local_EncodeVarint(write, value.ByteSize())
    766       return value._InternalSerialize(write)
    767     return EncodeField

/usr/local/lib/python3.4/dist-packages/google/protobuf/internal/python_message.py in ByteSize(self)
   1011     size = 0
   1012     for field_descriptor, field_value in self.ListFields():
-> 1013       size += field_descriptor._sizer(field_value)
   1014 
   1015     for tag_bytes, value_bytes in self._unknown_fields:

/usr/local/lib/python3.4/dist-packages/google/protobuf/internal/encoder.py in RepeatedFieldSize(value)
    300       result = tag_size * len(value)
    301       for element in value:
--> 302         l = element.ByteSize()
    303         result += local_VarintSize(l) + l
    304       return result

/usr/local/lib/python3.4/dist-packages/google/protobuf/internal/python_message.py in ByteSize(self)
   1011     size = 0
   1012     for field_descriptor, field_value in self.ListFields():
-> 1013       size += field_descriptor._sizer(field_value)
   1014 
   1015     for tag_bytes, value_bytes in self._unknown_fields:

/usr/local/lib/python3.4/dist-packages/google/protobuf/internal/encoder.py in FieldSize(map_value)
    361       total += message_sizer(entry_msg)
    362       if is_message_map:
--> 363         value.ByteSize()
    364     return total
    365 

/usr/local/lib/python3.4/dist-packages/google/protobuf/internal/python_message.py in ByteSize(self)
   1018     self._cached_byte_size = size
   1019     self._cached_byte_size_dirty = False
-> 1020     self._listener_for_children.dirty = False
   1021     return size
   1022 

KeyboardInterrupt: 

In [ ]: