Uncomment lines from this notebook to learn more.


In [1]:
# 1.1 Import tensorflow and other libraries.
import tensorflow as tf
import numpy as np

%matplotlib inline
import pylab

In [2]:
# 1.2 Create input data using NumPy. y = x * 0.1 + 0.3 + noise
x_train = np.random.rand(100).astype(np.float32)
noise = np.random.normal(scale=0.01, size=len(x_train))
y_train = x_train * 0.1 + 0.3 + noise

# Uncomment the following line to plot our input data.
pylab.plot(x_train, y_train, '.')


Out[2]:
[<matplotlib.lines.Line2D at 0x7f0af0bd9518>]

In [3]:
# Create some fake evaluation data
x_eval = np.random.rand(len(x_train)).astype(np.float32)
noise = np.random.normal(scale=0.01, size=len(x_train))
y_eval = x_eval * 0.1 + 0.3 + noise

In [4]:
# 1.3 Buld inference graph.
# Create Variables W and b that compute y_data = W * x_data + b
W = tf.get_variable(shape=[], name='weights')
b = tf.get_variable(shape=[], name='bias')

# Uncomment the following lines to see W and b are.
# print(W)
# print(b)

# Create a placeholder we'll use later to feed x's into the graph for training and eval.
# shape=[None] means we can put in any number of examples. 
# This is used for minibatch training, and to evaluate a lot of examples at once.
x = tf.placeholder(shape=[None], dtype=tf.float32, name='x')

# Uncomment this line to see what x is
# print(x)

# This is the same as tf.add(tf.mul(W, x), b), but looks nicer
y = W * x + b

In [7]:
# Write the graph so we can look at it in TensorBoard
# Now is a good time to try that
sw = tf.summary.FileWriter('/root/tensorboard/', graph=tf.get_default_graph())

In [14]:
# Create a placeholder we'll use later to feed the correct y value into the graph
y_label = tf.placeholder(shape=[None], dtype=tf.float32, name='y_label')
# print (y_label)

In [15]:
# 1.4 Build training graph.
loss = tf.reduce_mean(tf.square(y - y_label))  # Create an operation that calculates loss.
optimizer = tf.train.GradientDescentOptimizer(0.5)  # Create an optimizer.
train = optimizer.minimize(loss)  # Create an operation that minimizes loss.

# Uncomment the following 3 lines to see what 'loss', 'optimizer' and 'train' are.
# print("loss:", loss)
# print("optimizer:", optimizer)
# print("train:", train)

In [16]:
# Create an operation to initialize all the variables.
init = tf.global_variables_initializer()
# print(init)

In [17]:
# 1.6 Create a session and launch the graph.
sess = tf.Session()
sess.run(init)

# Uncomment the following line to see the initial W and b values.
# print(sess.run([W, b]))

In [18]:
# Uncomment these lines to test that we can compute a y from an x (without having trained anything). 
# x must be a vector, hence [3] not just 3.
# x_in = [3]
# sess.run(y, feed_dict={x: x_in})

In [19]:
# Calculate accuracy on the evaluation data before training
def eval():
    return sess.run(loss, feed_dict={x: x_eval, y_label: y_eval})
eval()


Out[19]:
3.2209063

In [35]:
# Add a summary so we can visualize the loss in TensorBoard
tf.summary.scalar('loss', loss)
summary_op = tf.summary.merge

In [36]:
# 1.7 Perform training.
for step in range(201):
    # Run the training op; feed the training data into the graph
    summary_str, _ = sess.run([summary_op, train], feed_dict={x: x_train, y_label: y_train})
    sw.add_summary(summary_str, step)
    # Uncomment the following two lines to watch training happen real time.
    #if step % 20 == 0:
        #print(step, sess.run([W, b]))



TypeErrorTraceback (most recent call last)
/opt/conda/lib/python3.5/site-packages/tensorflow/python/client/session.py in __init__(self, fetches, contraction_fn)
    266         self._unique_fetches.append(ops.get_default_graph().as_graph_element(
--> 267             fetch, allow_tensor=True, allow_operation=True))
    268       except TypeError as e:

/opt/conda/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in as_graph_element(self, obj, allow_tensor, allow_operation)
   2472     with self._lock:
-> 2473       return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
   2474 

/opt/conda/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in _as_graph_element_locked(self, obj, allow_tensor, allow_operation)
   2561       raise TypeError("Can not convert a %s into a %s."
-> 2562                       % (type(obj).__name__, types_str))
   2563 

TypeError: Can not convert a function into a Tensor or Operation.

During handling of the above exception, another exception occurred:

TypeErrorTraceback (most recent call last)
<ipython-input-36-263757254dea> in <module>()
      2 for step in range(201):
      3     # Run the training op; feed the training data into the graph
----> 4     summary_str, _ = sess.run([summary_op, train], feed_dict={x: x_train, y_label: y_train})
      5     sw.add_summary(summary_str, step)
      6     # Uncomment the following two lines to watch training happen real time.

/opt/conda/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    765     try:
    766       result = self._run(None, fetches, feed_dict, options_ptr,
--> 767                          run_metadata_ptr)
    768       if run_metadata:
    769         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/opt/conda/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    950 
    951     # Create a fetch handler to take care of the structure of fetches.
--> 952     fetch_handler = _FetchHandler(self._graph, fetches, feed_dict_string)
    953 
    954     # Run request and get response.

/opt/conda/lib/python3.5/site-packages/tensorflow/python/client/session.py in __init__(self, graph, fetches, feeds)
    406     """
    407     with graph.as_default():
--> 408       self._fetch_mapper = _FetchMapper.for_fetch(fetches)
    409     self._fetches = []
    410     self._targets = []

/opt/conda/lib/python3.5/site-packages/tensorflow/python/client/session.py in for_fetch(fetch)
    228     elif isinstance(fetch, (list, tuple)):
    229       # NOTE(touts): This is also the code path for namedtuples.
--> 230       return _ListFetchMapper(fetch)
    231     elif isinstance(fetch, dict):
    232       return _DictFetchMapper(fetch)

/opt/conda/lib/python3.5/site-packages/tensorflow/python/client/session.py in __init__(self, fetches)
    335     """
    336     self._fetch_type = type(fetches)
--> 337     self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
    338     self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
    339 

/opt/conda/lib/python3.5/site-packages/tensorflow/python/client/session.py in <listcomp>(.0)
    335     """
    336     self._fetch_type = type(fetches)
--> 337     self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
    338     self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
    339 

/opt/conda/lib/python3.5/site-packages/tensorflow/python/client/session.py in for_fetch(fetch)
    236         if isinstance(fetch, tensor_type):
    237           fetches, contraction_fn = fetch_fn(fetch)
--> 238           return _ElementFetchMapper(fetches, contraction_fn)
    239     # Did not find anything.
    240     raise TypeError('Fetch argument %r has invalid type %r' %

/opt/conda/lib/python3.5/site-packages/tensorflow/python/client/session.py in __init__(self, fetches, contraction_fn)
    269         raise TypeError('Fetch argument %r has invalid type %r, '
    270                         'must be a string or Tensor. (%s)'
--> 271                         % (fetch, type(fetch), str(e)))
    272       except ValueError as e:
    273         raise ValueError('Fetch argument %r cannot be interpreted as a '

TypeError: Fetch argument <function merge at 0x7f0af695a8c8> has invalid type <class 'function'>, must be a string or Tensor. (Can not convert a function into a Tensor or Operation.)

In [37]:
# 1.8 Uncomment the following lines to plot the predicted values
# pylab.plot(x_train, y_train, '.', label="target")
# pylab.plot(x_train, sess.run(y, feed_dict={x: x_train, y_label: y_train}), ".", label="predicted")
# pylab.legend()
# pylab.ylim(0, 1.0)

In [38]:
# Check accuracy on eval data after training
eval()


Out[38]:
0.021749681

Demonstrate saving and restoring a model


In [39]:
def predict(x_in): return sess.run(y, feed_dict={x: [x_in]})

In [45]:
# Save the model
saver = tf.train.Saver()
saver.save(sess, './my_checkpoint.ckpt')


Out[45]:
'./my_checkpoint.ckpt'

In [46]:
# Current prediction
predict(3)


Out[46]:
array([-1.25316942], dtype=float32)

In [47]:
# Reset the model by running the init op again
sess.run(init)

In [48]:
# Prediction after variables reinitialized
predict(3)


Out[48]:
array([ 1.9868319], dtype=float32)

In [49]:
saver.restore(sess, './my_checkpoint.ckpt')

In [50]:
# Predictions after variables restored
predict(3)


Out[50]:
array([-1.25316942], dtype=float32)

In [ ]: