In [54]:
import tensorflow as tf
import numpy as np

In [39]:
node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0) # also tf.float32 implicitly
print(node1, node2, sep="\n")


Tensor("Const_6:0", shape=(), dtype=float32)
Tensor("Const_7:0", shape=(), dtype=float32)

In [40]:
sess = tf.Session()
print(sess.run([node1, node2]))


[3.0, 4.0]

In [41]:
node3 = tf.add(node1, node2) # Seems like you can also write node1 + node2 instead
print("node3: ", node3)
print("sess.run(node3): ",sess.run(node3))


node3:  Tensor("Add_3:0", shape=(), dtype=float32)
sess.run(node3):  7.0

In [42]:
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b  # + provides a shortcut for tf.add(a, b)
print(sess.run(adder_node, {a: 3, b:4.5}))
print(sess.run(adder_node, {a: [1,3], b: [2, 4]}))


7.5
[ 3.  7.]

In [43]:
add_and_triple = adder_node * 3.
print(sess.run(add_and_triple, {a: 3, b:4.5}))


22.5

In [44]:
W = tf.Variable([.3], dtype=tf.float32) # Variables need type and initial value
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W * x + b
init = tf.global_variables_initializer()
sess.run(init)
print(sess.run(linear_model, {x:[1,2,3,4]}))


[ 0.          0.30000001  0.60000002  0.90000004]

In [45]:
y = tf.placeholder(tf.float32)
squared_deltas = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_deltas)
print(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))


23.66

In [46]:
fixW = tf.assign(W, [-1.]) # Assign W and b manually its perfect values
fixb = tf.assign(b, [1.])
sess.run([fixW, fixb])
print(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))


0.0

tf.train API


In [48]:
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
sess.run(init) # reset values to incorrect defaults.
for i in range(1000):
    sess.run(train, {x:[1,2,3,4], y:[0,-1,-2,-3]})
print(sess.run([W, b]))


[array([-0.9999969], dtype=float32), array([ 0.99999082], dtype=float32)]

In [53]:
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x:[1,2,3,4], y:[0,-1,-2,-3]})
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))


W: [-0.9999969] b: [ 0.99999082] loss: 5.69997e-11

tf.contrib.learn


In [55]:
# Declare list of features. We only have one real-valued feature. There are many
# other types of columns that are more complicated and useful.
features = [tf.contrib.layers.real_valued_column("x", dimension=1)]
# An estimator is the front end to invoke training (fitting) and evaluation
# (inference). There are many predefined types like linear regression,
# logistic regression, linear classification, logistic classification, and
# many neural network classifiers and regressors. The following code
# provides an estimator that does linear regression.
estimator = tf.contrib.learn.LinearRegressor(feature_columns=features)
# TensorFlow provides many helper methods to read and set up data sets.
# Here we use two data sets: one for training and one for evaluation
# We have to tell the function how many batches
# of data (num_epochs) we want and how big each batch should be.
x_train = np.array([1., 2., 3., 4.])
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7, 0.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x":x_train}, y_train,
                                              batch_size=4,
                                              num_epochs=1000)
eval_input_fn = tf.contrib.learn.io.numpy_input_fn(
    {"x":x_eval}, y_eval, batch_size=4, num_epochs=1000)
# We can invoke 1000 training steps by invoking the  method and passing the
# training data set.
estimator.fit(input_fn=input_fn, steps=1000)
# Here we evaluate how well our model did.
train_loss = estimator.evaluate(input_fn=input_fn)
eval_loss = estimator.evaluate(input_fn=eval_input_fn)
print("train loss: %r"% train_loss)
print("eval loss: %r"% eval_loss)


INFO:tensorflow:Using default config.
WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmp64kh60zp
INFO:tensorflow:Using config: {'_task_id': 0, '_environment': 'local', '_session_config': None, '_num_ps_replicas': 0, '_master': '', '_model_dir': '/tmp/tmp64kh60zp', '_save_checkpoints_secs': 600, '_save_checkpoints_steps': None, '_task_type': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7fd15ddf1e10>, '_keep_checkpoint_every_n_hours': 10000, '_evaluation_master': '', '_tf_config': gpu_options {
  per_process_gpu_memory_fraction: 1.0
}
, '_tf_random_seed': None, '_is_chief': True, '_keep_checkpoint_max': 5, '_save_summary_steps': 100, '_num_worker_replicas': 0}
WARNING:tensorflow:From /home/leyht/bachelorthesis/vix-term-structure/venv/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/estimators/head.py:625: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Saving checkpoints for 1 into /tmp/tmp64kh60zp/model.ckpt.
INFO:tensorflow:step = 1, loss = 4.75
INFO:tensorflow:global_step/sec: 1009.39
INFO:tensorflow:step = 101, loss = 0.0810971 (0.102 sec)
INFO:tensorflow:global_step/sec: 1059.68
INFO:tensorflow:step = 201, loss = 0.0137664 (0.095 sec)
INFO:tensorflow:global_step/sec: 987.139
INFO:tensorflow:step = 301, loss = 0.00101854 (0.099 sec)
INFO:tensorflow:global_step/sec: 988.281
INFO:tensorflow:step = 401, loss = 0.000945688 (0.101 sec)
INFO:tensorflow:global_step/sec: 976.673
INFO:tensorflow:step = 501, loss = 0.000179357 (0.103 sec)
INFO:tensorflow:global_step/sec: 955.943
INFO:tensorflow:step = 601, loss = 7.44669e-05 (0.104 sec)
INFO:tensorflow:global_step/sec: 962.168
INFO:tensorflow:step = 701, loss = 2.03111e-05 (0.104 sec)
INFO:tensorflow:global_step/sec: 1131.05
INFO:tensorflow:step = 801, loss = 5.28096e-06 (0.088 sec)
INFO:tensorflow:global_step/sec: 1282.53
INFO:tensorflow:step = 901, loss = 5.24026e-07 (0.078 sec)
INFO:tensorflow:Saving checkpoints for 1000 into /tmp/tmp64kh60zp/model.ckpt.
INFO:tensorflow:Loss for final step: 2.84218e-07.
WARNING:tensorflow:From /home/leyht/bachelorthesis/vix-term-structure/venv/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/estimators/head.py:625: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
INFO:tensorflow:Starting evaluation at 2017-07-01-14:40:06
INFO:tensorflow:Restoring parameters from /tmp/tmp64kh60zp/model.ckpt-1000
INFO:tensorflow:Finished evaluation at 2017-07-01-14:40:06
INFO:tensorflow:Saving dict for global step 1000: global_step = 1000, loss = 2.47523e-07
WARNING:tensorflow:From /home/leyht/bachelorthesis/vix-term-structure/venv/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/estimators/head.py:625: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.
INFO:tensorflow:Starting evaluation at 2017-07-01-14:40:07
INFO:tensorflow:Restoring parameters from /tmp/tmp64kh60zp/model.ckpt-1000
INFO:tensorflow:Finished evaluation at 2017-07-01-14:40:07
INFO:tensorflow:Saving dict for global step 1000: global_step = 1000, loss = 0.00256144
train loss: {'global_step': 1000, 'loss': 2.4752279e-07}
eval loss: {'global_step': 1000, 'loss': 0.0025614444}

In [75]:
# Declare list of features, we only have one real-valued feature
def model(features, labels, mode):
    # Build a linear model and predict values
    W = tf.get_variable("W", [1], dtype=tf.float64)
    b = tf.get_variable("b", [1], dtype=tf.float64)
    y = W*features['x'] + b
    # Loss sub-graph
    loss = tf.reduce_sum(tf.square(y - labels))
    # Training sub-graph
    global_step = tf.train.get_global_step()
    optimizer = tf.train.GradientDescentOptimizer(0.01)
    train = tf.group(optimizer.minimize(loss),
                     tf.assign_add(global_step, 1))
    # ModelFnOps connects subgraphs we built to the
    # appropriate functionality.
    return tf.contrib.learn.ModelFnOps(
        mode=mode, predictions=y,
        loss=loss,
        train_op=train)
estimator = tf.contrib.learn.Estimator(model_fn=model)
# define our data sets
x_train = np.array([1., 2., 3., 4.])
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7, 0.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x_train}, y_train, 4, num_epochs=1000)
# train
estimator.fit(input_fn=input_fn, steps=1000)
# Here we evaluate how well our model did. 
train_loss = estimator.evaluate(input_fn=input_fn)
eval_loss = estimator.evaluate(input_fn=eval_input_fn)
print("train loss: %r"% train_loss)
print("eval loss: %r"% eval_loss)


INFO:tensorflow:Using default config.
WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmpgeo7ycmy
INFO:tensorflow:Using config: {'_task_id': 0, '_environment': 'local', '_session_config': None, '_num_ps_replicas': 0, '_master': '', '_model_dir': '/tmp/tmpgeo7ycmy', '_save_checkpoints_secs': 600, '_save_checkpoints_steps': None, '_task_type': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7fd12fbc7908>, '_keep_checkpoint_every_n_hours': 10000, '_evaluation_master': '', '_tf_config': gpu_options {
  per_process_gpu_memory_fraction: 1.0
}
, '_tf_random_seed': None, '_is_chief': True, '_keep_checkpoint_max': 5, '_save_summary_steps': 100, '_num_worker_replicas': 0}
INFO:tensorflow:Create CheckpointSaverHook.
INFO:tensorflow:Saving checkpoints for 1 into /tmp/tmpgeo7ycmy/model.ckpt.
INFO:tensorflow:step = 1, loss = 190.698598493
INFO:tensorflow:global_step/sec: 980.317
INFO:tensorflow:step = 101, loss = 0.0696153591903 (0.106 sec)
INFO:tensorflow:global_step/sec: 1012.52
INFO:tensorflow:step = 201, loss = 0.00274743119205 (0.099 sec)
INFO:tensorflow:global_step/sec: 913.348
INFO:tensorflow:step = 301, loss = 0.000751497566326 (0.108 sec)
INFO:tensorflow:global_step/sec: 952.13
INFO:tensorflow:step = 401, loss = 5.58683933656e-05 (0.106 sec)
INFO:tensorflow:global_step/sec: 973.039
INFO:tensorflow:step = 501, loss = 3.3248942859e-06 (0.102 sec)
INFO:tensorflow:global_step/sec: 904.649
INFO:tensorflow:step = 601, loss = 7.28128620586e-07 (0.110 sec)
INFO:tensorflow:global_step/sec: 875.711
INFO:tensorflow:step = 701, loss = 4.80061111939e-08 (0.115 sec)
INFO:tensorflow:global_step/sec: 1051.59
INFO:tensorflow:step = 801, loss = 5.17788187663e-09 (0.095 sec)
INFO:tensorflow:global_step/sec: 1191.6
INFO:tensorflow:step = 901, loss = 1.8531679685e-10 (0.084 sec)
INFO:tensorflow:Saving checkpoints for 1000 into /tmp/tmpgeo7ycmy/model.ckpt.
INFO:tensorflow:Loss for final step: 1.99836539169e-11.
INFO:tensorflow:Starting evaluation at 2017-07-01-17:52:52
INFO:tensorflow:Restoring parameters from /tmp/tmpgeo7ycmy/model.ckpt-1000
INFO:tensorflow:Finished evaluation at 2017-07-01-17:52:53
INFO:tensorflow:Saving dict for global step 1000: global_step = 1000, loss = 2.23792e-11
INFO:tensorflow:Starting evaluation at 2017-07-01-17:52:53
INFO:tensorflow:Restoring parameters from /tmp/tmpgeo7ycmy/model.ckpt-1000
INFO:tensorflow:Finished evaluation at 2017-07-01-17:52:54
INFO:tensorflow:Saving dict for global step 1000: global_step = 1000, loss = 0.0101007
train loss: {'global_step': 1000, 'loss': 2.2379192e-11}
eval loss: {'global_step': 1000, 'loss': 0.010100702}

In [74]:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)


Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

Build a Softmax Regression Model


In [86]:
# Instead you could have used an interactive session here but these are boring.
sess = tf.Session()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())

In [87]:
y = tf.matmul(x,W) + b
cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
for _ in range(1000):
    batch = mnist.train.next_batch(100)
    sess.run(train_step, {x: batch[0], y_: batch[1]})

In [91]:
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(accuracy, {x: mnist.test.images, y_: mnist.test.labels})


Out[91]:
0.91949999

Build a Multilayer Convolutional Network


In [92]:
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

In [93]:
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1], padding='SAME')

In [95]:
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2

In [96]:
cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

In [100]:
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(2000):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:
            train_accuracy = sess.run(accuracy, {x: batch[0], y_: batch[1], keep_prob: 1.0})
            print('step %d, training accuracy %g' % (i, train_accuracy))
        sess.run(train_step, {x: batch[0], y_: batch[1], keep_prob: 0.5})
    print('test accuracy %g' % sess.run(accuracy, {x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))


step 0, training accuracy 0.06
step 100, training accuracy 0.86
step 200, training accuracy 0.76
step 300, training accuracy 0.9
step 400, training accuracy 0.94
step 500, training accuracy 0.96
step 600, training accuracy 0.94
step 700, training accuracy 0.98
step 800, training accuracy 0.98
step 900, training accuracy 1
step 1000, training accuracy 0.96
step 1100, training accuracy 0.92
step 1200, training accuracy 0.98
step 1300, training accuracy 0.94
step 1400, training accuracy 0.94
step 1500, training accuracy 0.94
step 1600, training accuracy 0.98
step 1700, training accuracy 0.94
step 1800, training accuracy 0.96
step 1900, training accuracy 1
test accuracy 0.9753