Tensorboard is a suite of visualization tools to simplify analysis of TensorFlow programs.
We can use TensorBoard to
An example snapshot of TensorBoard:
Let's attach an event logger to our OLS example.
In [2]:
%pylab inline
pylab.style.use('ggplot')
import numpy as np
In [3]:
import tensorflow as tf
In [4]:
import os
import shutil
from contextlib import contextmanager
@contextmanager
def event_logger(logdir, session):
"""
Hands out a managed tensorflow summary writer.
Cleans up the event log directory before every run.
"""
if os.path.isdir(logdir):
shutil.rmtree(logdir)
os.makedirs(logdir)
writer = tf.summary.FileWriter(logdir, session.graph)
yield writer
writer.flush()
writer.close()
In [5]:
x1 = np.random.rand(50)
x2 = np.random.rand(50)
y_ = 2*x1 + 3*x2 + 5
X_data = np.column_stack([x1, x2, np.ones(50)])
y_data = np.atleast_2d(y_).T
In [6]:
# Same as before, but this time with TensorBoard output
log_event_dir = r'C:\Temp\ols_logs\run_1'
# This is necessary to avoid appending the tensors in our OLS example
# repeatedly into the default graph each time this cell is re-run.
tf.reset_default_graph()
X = tf.placeholder(shape=[50, 3], dtype=np.float64, name='X')
y = tf.placeholder(shape=[50, 1], dtype=np.float64, name='y')
w = tf.Variable(np.random.rand(3, 1), dtype=np.float64, name='w')
y_hat = tf.matmul(X, w)
loss_func = tf.reduce_mean(tf.squared_difference(y_hat, y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5)
train_op = optimizer.minimize(loss_func)
with tf.Session() as session:
with event_logger(log_event_dir, session):
tensorboard_cmd = 'tensorboard --logdir={}'.format(log_event_dir)
print('Logging events to {}, use \n\n\t{}\n\n to start a new tensorboard session.'.format(
log_event_dir, tensorboard_cmd))
init_op = tf.global_variables_initializer()
session.run(init_op)
feed_dict = {X: X_data, y: y_data}
for step in range(1, 501):
session.run(train_op, feed_dict=feed_dict)
if step % 50 == 0:
current_w = np.squeeze(w.eval(session=session))
print('Result after {} iterations: {}'.format(step, current_w))
If we now launch tensorboard and navigate to http://localhost:6006, we'll see something like this (under the GRAPHS tab):
Since we're using a Gradient Descent based optimizer to minimize the MSE, an important diagnostic information is the value of the loss fuction as a function of number of iterations. So let's add a custom event to record the value of the loss function in the TensorFlow event logger infrastructure that we can later examine with TensorBoard.
In [7]:
# Same as before, but this time with a TensorBoard output for the loss function
log_event_dir = r'C:\Temp\ols_logs\run_2'
# This is necessary to avoid appending the tensors in our OLS example
# repeatedly into the default graph each time this cell is re-run.
tf.reset_default_graph()
X = tf.placeholder(shape=[50, 3], dtype=np.float64, name='X')
y = tf.placeholder(shape=[50, 1], dtype=np.float64, name='y')
w = tf.Variable(np.random.rand(3, 1), dtype=np.float64, name='w')
y_hat = tf.matmul(X, w)
loss_func = tf.reduce_mean(tf.squared_difference(y_hat, y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5)
train_op = optimizer.minimize(loss_func)
# Add a tensor with summary of the loss function
loss_summary = tf.summary.scalar('loss', loss_func)
summary_op = tf.summary.merge_all()
with tf.Session() as session:
with event_logger(log_event_dir, session) as recorder:
tensorboard_cmd = 'tensorboard --logdir={}'.format(log_event_dir)
print('Logging events to {}, use \n\n\t{}\n\n to start a new tensorboard session.'.format(
log_event_dir, tensorboard_cmd))
init_op = tf.global_variables_initializer()
session.run(init_op)
feed_dict = {X: X_data, y: y_data}
for step in range(1, 501):
_, summary_result = session.run([train_op, summary_op], feed_dict=feed_dict)
if step % 10 == 0:
recorder.add_summary(summary_result, step)
if step % 50 == 0:
current_w = np.squeeze(w.eval(session=session))
print('Result after {} iterations: {}'.format(step, current_w))
After re-running with the loss function summary and re-launching tensorboard, we'll see something like this under the SCALARS tab:
In [8]:
# Same as before, but this time with a TensorBoard output for the loss function AND regression coefficients
log_event_dir = r'C:\Temp\ols_logs\run_3'
# This is necessary to avoid appending the tensors in our OLS example
# repeatedly into the default graph each time this cell is re-run.
tf.reset_default_graph()
X = tf.placeholder(shape=[50, 3], dtype=np.float64, name='X')
y = tf.placeholder(shape=[50, 1], dtype=np.float64, name='y')
w = tf.Variable(np.random.rand(3, 1), dtype=np.float64, name='w')
y_hat = tf.matmul(X, w)
loss_func = tf.reduce_mean(tf.squared_difference(y_hat, y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5)
train_op = optimizer.minimize(loss_func)
# summary for the loss function
loss_summary = tf.summary.scalar('loss', loss_func)
# summary for w
w_summary = tf.summary.histogram('coefficients', w)
summary_op = tf.summary.merge_all()
with tf.Session() as session:
with event_logger(log_event_dir, session) as recorder:
tensorboard_cmd = 'tensorboard --logdir={}'.format(log_event_dir)
print('Logging events to {}, use \n\n\t{}\n\n to start a new tensorboard session.'.format(
log_event_dir, tensorboard_cmd))
init_op = tf.global_variables_initializer()
session.run(init_op)
feed_dict = {X: X_data, y: y_data}
for step in range(1, 501):
_, summary_result = session.run([train_op, summary_op], feed_dict=feed_dict)
if step % 10 == 0:
recorder.add_summary(summary_result, step)
if step % 50 == 0:
current_w = np.squeeze(w.eval(session=session))
print('Result after {} iterations: {}'.format(step, current_w))
In [ ]: