In [0]:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
This is a notebook from the TensorBoard talk at the TensorFlow Dev Summit 2019, which can be found here. More detailed documentation about these features can be found at tensorflow.org/tensorboard
This notebook was slightly modified after the demo to remove account-specific data.
In [0]:
# Load the TensorBoard notebook extension
%load_ext tensorboard.notebook
In [0]:
# Clear any logs from previous runs
!rm -rf ./logs/
In [0]:
from __future__ import absolute_import, division, print_function, unicode_literals
%tensorflow_version 2.x
import tensorflow as tf
import datetime
In [0]:
fashion_mnist = tf.keras.datasets.fashion_mnist
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
In [0]:
def train_test_model(run_dir, hparams):
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(hparams['num_units'], activation=tf.nn.relu),
tf.keras.layers.Dropout(hparams['dropout_rate']),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer= hparams['optimizer'],
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train,
validation_data=(x_test, y_test),
epochs=3,
callbacks=[tf.keras.callbacks.TensorBoard(run_dir + "/keras")])
scores = model.evaluate(x_test, y_test)
return scores
In [0]:
train_test_model("logs/sample", {'num_units' : 16, 'dropout_rate' : 0.1, 'optimizer' : 'adam'})
In [0]:
%tensorboard --logdir logs/sample
Note that train and validation now appear as separate runs so you can compare them on the same charts.
In the Graphs dashboard, click on "keras" in the "Tag" dropdown to view the Keras conceptual graph.
Other useful APIs for working with TensorBoard in notebooks:
In [0]:
# from tensorboard import notebook
# notebook.display(height=1000)
# notebook.list()
Today, you can try out different hyperparameters by encoding them in the run names and then comparing them in TensorBoard. This is not ideal, so let's see if we can do something better. Note that the experience below will change over time.
In [0]:
from tensorboard.plugins.hparams import api_pb2
from tensorboard.plugins.hparams import summary as hparams_summary
from google.protobuf import struct_pb2
In [0]:
num_units_list = [16, 32] # Number of units in the dense layer
dropout_rate_list = [0.1, 0.2] # Dropout rate
optimizer_list = ['adam']
In [0]:
def create_experiment_summary(num_units_list, dropout_rate_list, optimizer_list):
num_units_list_val = struct_pb2.ListValue()
num_units_list_val.extend(num_units_list)
dropout_rate_list_val = struct_pb2.ListValue()
dropout_rate_list_val.extend(dropout_rate_list)
optimizer_list_val = struct_pb2.ListValue()
optimizer_list_val.extend(optimizer_list)
return hparams_summary.experiment_pb(
# List our hyperparameters
hparam_infos=[
api_pb2.HParamInfo(name='num_units',
display_name='Number of units',
type=api_pb2.DATA_TYPE_FLOAT64,
domain_discrete=num_units_list_val),
api_pb2.HParamInfo(name='dropout_rate',
display_name='Dropout rate',
type=api_pb2.DATA_TYPE_FLOAT64,
domain_discrete=dropout_rate_list_val),
api_pb2.HParamInfo(name='optimizer',
display_name='Optimizer',
type=api_pb2.DATA_TYPE_STRING,
domain_discrete=optimizer_list_val)
],
# List our metrics
metric_infos=[
api_pb2.MetricInfo(
name=api_pb2.MetricName(
tag='accuracy'),
display_name='Accuracy'),
]
)
exp_summary = create_experiment_summary(num_units_list, dropout_rate_list, optimizer_list)
root_logdir_writer = tf.summary.create_file_writer("logs/hparam_tuning")
with root_logdir_writer.as_default():
tf.summary.import_event(tf.compat.v1.Event(summary=exp_summary).SerializeToString())
In [0]:
def run(run_dir, hparams):
writer = tf.summary.create_file_writer(run_dir)
summary_start = hparams_summary.session_start_pb(hparams=hparams)
with writer.as_default():
tf.summary.import_event(tf.compat.v1.Event(summary=summary_start).SerializeToString())
loss, accuracy = train_test_model(run_dir, hparams)
tf.summary.scalar('accuracy', accuracy, step=0, description="The accuracy")
summary_end = hparams_summary.session_end_pb(api_pb2.STATUS_SUCCESS)
tf.summary.import_event(tf.compat.v1.Event(summary=summary_end).SerializeToString())
In [0]:
%tensorboard --logdir logs/hparam_tuning
In [0]:
session_num = 0
for num_units in num_units_list:
for dropout_rate in dropout_rate_list:
for optimizer in optimizer_list:
hparams = {'num_units': num_units, 'dropout_rate': dropout_rate, 'optimizer': optimizer}
print('--- Running training session %d' % (session_num + 1))
print(hparams)
run_name = "run-%d" % session_num
run("logs/hparam_tuning/" + run_name, hparams)
session_num += 1
Refresh TensorBoard and look at the HParams dashboard for various visualizations
In [0]:
# The Dev Summit demo showed using logs directly from Google Drive, such as
# the following:
# from google.colab import drive
# drive.mount('/content/gdrive')
# %tensorboard --logdir /content/gdrive/My\ Drive/DevSummit/hparams_demo
# For this notebook, download the logs directly (but you can place them in
# Google Drive to replicate the above experience)
In [0]:
%%bash
wget -q 'https://storage.googleapis.com/download.tensorflow.org/tensorboard/hparams_demo_logs.zip'
unzip -q hparams_demo_logs.zip -d logs/hparam_demo
In [0]:
%tensorboard --logdir logs/hparam_demo
We've looked at:
Read the TensorBoard documentation at: tensorflow.org/tensorboard