This notebook loads a model previously trained in 2_keras.ipynb or 3_eager.ipynb from earlier in the TensorFlow Basics workshop.
Note : The code in this notebook is quite Colab specific and won't work with Jupyter.
In [ ]:
# In Jupyter, you would need to install TF 2 via !pip.
%tensorflow_version 2.x
In [ ]:
## Load models from Drive (Colab only).
models_path = '/content/gdrive/My Drive/amld_data/models'
data_path = '/content/gdrive/My Drive/amld_data/zoo_img'
## Or load models from local machine.
# models_path = './amld_models'
# data_path = './amld_data'
## Or load models from GCS (Colab only).
# models_path = 'gs://amld-datasets/models'
# data_path = 'gs://amld-datasets/zoo_img_small'
In [ ]:
if models_path.startswith('/content/gdrive/'):
from google.colab import drive
drive.mount('/content/gdrive')
if models_path.startswith('gs://'):
# Keras doesn't read directly from GCS -> download.
from google.colab import auth
import os
os.makedirs('./amld_models', exist_ok=True)
auth.authenticate_user()
!gsutil cp -r "$models_path"/\* ./amld_models
models_path = './amld_models'
!ls -lh "$models_path"
In [ ]:
import json, os
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
# Tested with TensorFlow 2.1.0
print('version={}, CUDA={}, GPU={}, TPU={}'.format(
tf.__version__, tf.test.is_built_with_cuda(),
# GPU attached? Note that you can "Runtime/Change runtime type..." in Colab.
len(tf.config.list_physical_devices('GPU')) > 0,
# TPU accessible? (only works on Colab)
'COLAB_TPU_ADDR' in os.environ))
In [ ]:
# Load the label names from the dataset.
labels = [label.strip() for label in
tf.io.gfile.GFile('{}/labels.txt'.format(data_path))]
print('\n'.join(['%2d: %s' % (i, label) for i, label in enumerate(labels)]))
In [ ]:
# Load model from 2_keras.ipynb
model = tf.keras.models.load_model(os.path.join(models_path, 'linear.h5'))
model.summary()
In [ ]:
from google.colab import output
import IPython
def predict(img_64):
"""Get Predictions for provided image.
Args:
img_64: Raw image data (dtype int).
Returns:
A JSON object with the value for `result` being a text representation of the
top predictions.
"""
# Reshape image into batch with single image (extra dimension "1").
preds = model.predict(np.array(img_64, float).reshape([1, 64, 64]))
# Get top three predictions (reverse argsort).
top3 = (-preds[0]).argsort()[:3]
# Return both probability and prediction label name.
result = '\n'.join(['%.3f: %s' % (preds[0, i], labels[i]) for i in top3])
return IPython.display.JSON(dict(result=result))
output.register_callback('amld.predict', predict)
In [ ]:
%%html
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no">
<canvas width="256" height="256" id="canvas" style="border:1px solid black"></canvas><br />
<button id="clear">clear</button><br />
<pre id="output"></pre>
<script>
let upscaleFactor = 4, halfPenSize = 2
let canvas = document.getElementById('canvas')
let output = document.getElementById('output')
let ctx = canvas.getContext('2d')
let img_64 = new Uint8Array(64*64)
let dragging = false
let timeout
let predict = () => {
google.colab.kernel.invokeFunction('amld.predict', [Array.from(img_64)], {}).then(
obj => output.textContent = obj.data['application/json'].result)
}
const getPos = e => {
let x = e.offsetX, y = e.offsetY
if (e.touches) {
const rect = canvas.getBoundingClientRect()
x = e.touches[0].clientX - rect.left
y = e.touches[0].clientY - rect.left
}
return {
x: Math.floor((x - 2*halfPenSize*upscaleFactor/2)/upscaleFactor),
y: Math.floor((y - 2*halfPenSize*upscaleFactor/2)/upscaleFactor),
}
}
const handler = e => {
const { x, y } = getPos(e)
ctx.fillStyle = 'black'
ctx.fillRect(x*upscaleFactor, y*upscaleFactor,
2*halfPenSize*upscaleFactor, 2*halfPenSize*upscaleFactor)
for (let yy = y - halfPenSize; yy < y + halfPenSize; yy++)
for (let xx = x - halfPenSize; xx < x + halfPenSize; xx++)
img_64[64*Math.min(63, Math.max(0, yy)) + Math.min(63, Math.max(0, xx))] = 1
clearTimeout(timeout)
timeout = setTimeout(predict, 500)
}
canvas.addEventListener('touchstart', e => {dragging=true; handler(e)})
canvas.addEventListener('touchmove', e => {e.preventDefault(); dragging && handler(e)})
canvas.addEventListener('touchend', () => dragging=false)
canvas.addEventListener('mousedown', e => {dragging=true; handler(e)})
canvas.addEventListener('mousemove', e => {dragging && handler(e)})
canvas.addEventListener('mouseup', () => dragging=false)
canvas.addEventListener('mouseleave', () => dragging=false)
document.getElementById('clear').addEventListener('click', () => {
ctx.fillStyle = 'white'
ctx.fillRect(0, 0, 64*upscaleFactor, 64*upscaleFactor)
output.textContent = ''
img_64 = new Uint8Array(64*64)
})
</script>
In [ ]:
# YOUR ACTION REQUIRED:
# Load another model from 2_keras.ipynb and observe:
# - Do you get better/worse predictions?
# - Do you feel a difference in latency?
# - Can you figure out by how the model "thinks" by providing similar images
# that yield different predictions, or different images that yield the same
# picture?
Read about basic concepts in TensorFlow.js: https://js.tensorflow.org/tutorials/core-concepts.html
If you find the Colab %%html way cumbersome to explore the JS API, then have a try codepen by clicking on the "Try TensorFlow.js" button on https://js.tensorflow.org/
In [ ]:
# Getting the data of a tensor in TensorFlow.js: Use the async .data() method
# to show the output in the "output" element.
# See output in javascript console (e.g. Chrome developer tools).
# For convenience, you can also use the following Codepen:
# https://codepen.io/amld-tensorflow-basics/pen/OJPagyN
%%html
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.0.0/dist/tf.min.js"></script>
<pre id="output"></pre>
<script>
let output = document.getElementById('output')
let t = tf.tensor([1, 2, 3])
output.textContent = t
// YOUR ACTION REQUIRED:
// Use "t.data()" to append the tensor's data values to "output.textContent".
In [ ]:
# Get top 3 predictions using TensorFlow Eager.
preds = tf.constant([0.1, 0.5, 0.2, 0.0])
topk = tf.math.top_k(preds, 3)
for idx, value in zip(topk.indices.numpy(), topk.values.numpy()):
print('idx', idx, 'value', value)
In [ ]:
# Implement the same top 3 functionality in TensorFlow.js, showing the output
# in the "output" element.
# See https://js.tensorflow.org/api/latest/index.html#topk
%%html
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.0.0/dist/tf.min.js"></script>
<pre id="output"></pre>
<script>
let output = document.getElementById('output')
let preds = tf.tensor([0.1, 0.5, 0.2, 0.0])
// YOUR ACTION REQUIRED:
// Use tf.topk() to get top 3 predictions in "preds" and append both the
// index and the value of these predictions to "output".
We can convert the Keras model into TensorFlow.js format using the Python package tensorflowjs.
Read more about importing Keras models: https://js.tensorflow.org/tutorials/import-keras.html
In [ ]:
# (Never mind the incompatible package complaints - it just works fine.)
!pip install -q tensorflowjs
In [ ]:
# Specify directory where to store model.
tfjs_model_path = './tfjs/model'
!mkdir -p "$tfjs_model_path"
In [ ]:
import tensorflowjs as tfjs
# Convert model
tf.keras.backend.clear_session() # Clean up variable names before exporting.
# (You can safely ignore the H5pyDeprecationWarning here...)
model = tf.keras.models.load_model(os.path.join(models_path, 'linear.h5'))
tfjs.converters.save_keras_model(model, tfjs_model_path)
!ls -lh "$tfjs_model_path"
In [ ]:
import json
# You can copy this into the JavaScript code in the next cell if you load a
# model trained on a custom dataset (code below assumes dataset="zoo").
print(json.dumps(labels))
1. write index.html
This is essentially the same drawing code as above in "Live Predictions", additionally some code to load the exported Javascript model for calling model.predict().
In [ ]:
with open('./tfjs/index.html', 'w') as f:
f.write('''
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no">
<canvas width="256" height="256" id="canvas" style="border:1px solid black"></canvas><br />
<button id="clear">clear</button><br />
<pre id="output"></pre>
<script>
let upscaleFactor = 4, halfPenSize = 2
let canvas = document.getElementById('canvas')
let output = document.getElementById('output')
let ctx = canvas.getContext('2d')
let img_64 = new Uint8Array(64*64)
let dragging = false
let timeout
let predict = () => {
google.colab.kernel.invokeFunction('amld.predict', [Array.from(img_64)], {}).then(
obj => output.textContent = obj.data['application/json'].result)
}
const getPos = e => {
let x = e.offsetX, y = e.offsetY
if (e.touches) {
const rect = canvas.getBoundingClientRect()
x = e.touches[0].clientX - rect.left
y = e.touches[0].clientY - rect.left
}
return {
x: Math.floor((x - 2*halfPenSize*upscaleFactor/2)/upscaleFactor),
y: Math.floor((y - 2*halfPenSize*upscaleFactor/2)/upscaleFactor),
}
}
const handler = e => {
const { x, y } = getPos(e)
ctx.fillStyle = 'black'
ctx.fillRect(x*upscaleFactor, y*upscaleFactor,
2*halfPenSize*upscaleFactor, 2*halfPenSize*upscaleFactor)
for (let yy = y - halfPenSize; yy < y + halfPenSize; yy++)
for (let xx = x - halfPenSize; xx < x + halfPenSize; xx++)
img_64[64*Math.min(63, Math.max(0, yy)) + Math.min(63, Math.max(0, xx))] = 1
clearTimeout(timeout)
timeout = setTimeout(predict, 500)
}
canvas.addEventListener('touchstart', e => {dragging=true; handler(e)})
canvas.addEventListener('touchmove', e => {e.preventDefault(); dragging && handler(e)})
canvas.addEventListener('touchend', () => dragging=false)
canvas.addEventListener('mousedown', e => {dragging=true; handler(e)})
canvas.addEventListener('mousemove', e => {dragging && handler(e)})
canvas.addEventListener('mouseup', () => dragging=false)
canvas.addEventListener('mouseleave', () => dragging=false)
document.getElementById('clear').addEventListener('click', () => {
ctx.fillStyle = 'white'
ctx.fillRect(0, 0, 64*upscaleFactor, 64*upscaleFactor)
output.textContent = ''
img_64 = new Uint8Array(64*64)
})
</script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.0.0/dist/tf.min.js"></script>
<script>
const labels = %s
const modelPath = './model/model.json'
let model = null
tf.loadLayersModel(modelPath)
.then(response => model = response)
.catch(error => output.textContent = 'ERROR : ' + error.message)
predict = () => {
const preds = model.predict(tf.tensor(img_64).reshape([1, 64, -1]))
const { values, indices } = tf.topk(preds, 3)
Promise.all([values.data(), indices.data()]).then(data => {
const [ values, indices ] = data
output.textContent = ''
values.forEach((v, i) => output.textContent += `${labels[indices[i]]} : ${v.toFixed(3)}\n`)
})
}
</script>''' % json.dumps(labels))
2. A static web server
Serving both index.html and the converted model.
In [ ]:
# Download ngrok for tunneling.
!if [ ! -f ./ngrok ]; then \
wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip; \
unzip -o ngrok-stable-linux-amd64.zip; \
fi
In [ ]:
# Then start a mini web server at a random port.
import random
port = random.randint(1000, 2**16)
!pkill ngrok
!kill $(ps x | grep -v grep | grep http.server | awk '{print $1}') 2>/dev/null
get_ipython().system_raw(
'cd ./tfjs && python3 -m http.server {} &'
.format(port)
)
# And, forward the port using ngrok.
get_ipython().system_raw('./ngrok http {} &'.format(port))
3. Port forwarding
Via a ngrok tunnel from the local machine to the internet.
In [ ]:
# Get the public address from localhost:4040 (ngrok's web interface).
import time, urllib
time.sleep(1) # Give ngrok time to startup.
ngrok_data = json.load(
urllib.request.urlopen('http://localhost:4040/api/tunnels'))
ngrok_data['tunnels'][0]['public_url']
In [ ]:
# You can connect to this external address using your mobile phone!
# Once the page is loaded you can turn on flight modus and verify that
# predictions are really generated on-device. :-)
!pip install -q qrcode
import qrcode
qrcode.make(ngrok_data['tunnels'][0]['public_url'])