In [ ]:
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
This notebook demonstrates using Cloud TPUs in colab to build a simple regression model using y = sin(x) to predict y for given x.
This model generates huge amounts of data that and demonstrates the training performance advantage of Cloud TPU.
The model trains for 10 epochs with 512 steps per epoch on TPU and completes in approximately 2 minutes.
This notebook is hosted on GitHub. To view it in its original repository, after opening the notebook, select File > View on GitHub.
In [ ]:
import math
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
In [ ]:
print(tf.__version__)
import distutils
if distutils.version.LooseVersion(tf.__version__) < '2.0':
raise Exception('This notebook is compatible with TensorFlow 2.0 or higher.')
In [ ]:
use_tpu = True #@param {type:"boolean"}
if use_tpu:
assert 'COLAB_TPU_ADDR' in os.environ, 'Missing TPU; did you request a TPU in Notebook Settings?'
if 'COLAB_TPU_ADDR' in os.environ:
TPU_ADDRESS = 'grpc://{}'.format(os.environ['COLAB_TPU_ADDR'])
else:
TPU_ADDRESS = ''
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=TPU_ADDRESS)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
print("All devices: ", tf.config.list_logical_devices('TPU'))
In [ ]:
data_size = 2**18
x = np.linspace(0, 6, data_size, dtype=np.float32)
np.random.shuffle(x)
y = -20 * np.sin(x, dtype=np.float32) + 3 + np.random.normal(0, 1, (data_size,)).astype(np.float32)
x = x.reshape(-1, 1)
y = y.reshape(-1, 1)
train_x, test_x = x[:data_size//2], x[data_size//2:]
train_y, test_y = y[:data_size//2], y[data_size//2:]
plt.plot(x, y, 'bo')
In [ ]:
def get_model():
return tf.keras.models.Sequential([
tf.keras.layers.Dense(1, input_shape=(1,)),
tf.keras.layers.Dense(200, activation='sigmoid'),
tf.keras.layers.Dense(80, activation='sigmoid'),
tf.keras.layers.Dense(1)
])
In [ ]:
strategy = tf.distribute.experimental.TPUStrategy(resolver)
with strategy.scope():
model = get_model()
model.compile(optimizer=tf.keras.optimizers.SGD(.01),
loss='mean_squared_error',
metrics=['mean_squared_error'])
In [ ]:
model.fit(train_x, train_y, epochs=10, steps_per_epoch=512)
In [ ]:
predictions = model.predict(test_x)
plt.plot(test_x, predictions, 'ro')
On Google Cloud Platform, in addition to GPUs and TPUs available on pre-configured deep learning VMs, you will find AutoML(beta) for training custom models without writing code and Cloud ML Engine which will allows you to run parallel trainings and hyperparameter tuning of your custom models on powerful distributed hardware.