bash-3.2$ source activate tensorflow
(tensorflow) bash-3.2$ jupyter notebook
In [1]:
import tensorflow as tf
print(tf.__version__)
import numpy as np
# to make this notebook's output stable across runs
np.random.seed(42)
In [2]:
x = tf.placeholder(dtype=tf.float64, name='x')
y = tf.placeholder(dtype=tf.float64, name='y')
In [3]:
w = tf.Variable(0., dtype=tf.float64, name='w')
b = tf.Variable(0., dtype=tf.float64, name='b')
tf.summary.scalar('w', w);
tf.summary.scalar('b', b);
In [4]:
with tf.name_scope('y1'):
y1 = w*x + b
In [5]:
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.square(y - y1))
tf.summary.scalar('loss',loss)
In [6]:
optimizer = tf.train.GradientDescentOptimizer(0.5)
step = tf.Variable(0, name='step', trainable=False)
train = optimizer.minimize(loss, global_step=step)
initialize = tf.global_variables_initializer()
In [12]:
from datetime import datetime
now = datetime.utcnow().strftime("%Y%m%d")
root_logdir = "tf_logs"
logdir = "{}/run-{}/".format(root_logdir, now)
summary = [tf.summary.merge_all(), step]
writer = tf.summary.FileWriter(logdir)
In [13]:
logdir
Out[13]:
$ source activate tensorflow
$ tensorboard --logdir tf_logs/run-20170719/
In [8]:
x_dat = np.random.rand(100)
y_dat = 3.*x_dat + 2. + 0.1*np.random.rand(100)
feed_dict = {x:x_dat, y:y_dat}
In [9]:
sess = tf.Session()
writer.add_graph(sess.graph)
sess.run(initialize)
In [10]:
for step in range(201):
sess.run(train, feed_dict)
if step%20 == 0:
print(step, sess.run([w,b,loss], feed_dict))
writer.add_summary(*sess.run(summary, feed_dict))
In [ ]: