In [0]:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
An Example of a Key TFX Library
This example colab notebook illustrates how TensorFlow Model Analysis (TFMA) can be used to investigate and visualize the characteristics of a dataset and the performance of a model. We'll use a model that we trained previously, and now you get to play with the results!
The model we trained was for the Chicago Taxi Example, which uses the Taxi Trips dataset released by the City of Chicago.
Note: This site provides applications using data that has been modified for use from its original source, www.cityofchicago.org, the official website of the City of Chicago. The City of Chicago makes no claims as to the content, accuracy, timeliness, or completeness of any of the data provided at this site. The data provided at this site is subject to change at any time. It is understood that the data provided at this site is being used at one’s own risk.
Read more about the dataset in Google BigQuery. Explore the full dataset in the BigQuery UI.
Key Point: As a modeler and developer, think about how this data is used and the potential benefits and harm a model's predictions can cause. A model like this could reinforce societal biases and disparities. Is a feature relevant to the problem you want to solve or will it introduce bias? For more information, read about ML fairness.
Key Point: In order to understand TFMA
and how it works with Apache Beam, you'll need to know a little bit about Apache Beam itself. The Beam Programming Guide is a great place to start.
The columns in the dataset are:
pickup_community_area | fare | trip_start_month |
trip_start_hour | trip_start_day | trip_start_timestamp |
pickup_latitude | pickup_longitude | dropoff_latitude |
dropoff_longitude | trip_miles | pickup_census_tract |
dropoff_census_tract | payment_type | company |
trip_seconds | dropoff_community_area | tips |
Note: If running TFMA in a local Jupyter notebook, then these Jupyter extensions must be installed in the environment before running Jupyter.
jupyter nbextension enable --py widgetsnbextension
jupyter nbextension install --py --symlink tensorflow_model_analysis
jupyter nbextension enable --py tensorflow_model_analysis
In [0]:
!pip install -q -U \
tensorflow==2.0.0 \
tfx==0.15.0rc0
In [0]:
import csv
import io
import os
import requests
import tempfile
import zipfile
from google.protobuf import text_format
import tensorflow as tf
import tensorflow_data_validation as tfdv
import tensorflow_model_analysis as tfma
from tensorflow_metadata.proto.v0 import schema_pb2
In [0]:
tf.__version__
In [0]:
tfma.version.VERSION_STRING
In [0]:
# Download the zip file from GCP and unzip it
BASE_DIR = tempfile.mkdtemp()
TFMA_DIR = os.path.join(BASE_DIR, 'eval_saved_models-2.0')
DATA_DIR = os.path.join(TFMA_DIR, 'data')
OUTPUT_DIR = os.path.join(TFMA_DIR, 'output')
SCHEMA = os.path.join(TFMA_DIR, 'schema.pbtxt')
response = requests.get('https://storage.googleapis.com/tfx-colab-datasets/eval_saved_models-2.0.zip', stream=True)
zipfile.ZipFile(io.BytesIO(response.content)).extractall(BASE_DIR)
print("Here's what we downloaded:")
!cd {TFMA_DIR} && find .
Among the things we downloaded was a schema for our data that was created by TensorFlow Data Validation. Let's parse that now so that we can use it with TFMA.
In [0]:
schema = schema_pb2.Schema()
contents = tf.io.read_file(SCHEMA).numpy()
schema = text_format.Parse(contents, schema)
tfdv.display_schema(schema)
In [0]:
datafile = os.path.join(DATA_DIR, 'eval', 'data.csv')
reader = csv.DictReader(open(datafile))
examples = []
for line in reader:
example = tf.train.Example()
for feature in schema.feature:
key = feature.name
if len(line[key]) > 0:
if feature.type == schema_pb2.FLOAT:
example.features.feature[key].float_list.value[:] = [float(line[key])]
elif feature.type == schema_pb2.INT:
example.features.feature[key].int64_list.value[:] = [int(line[key])]
elif feature.type == schema_pb2.BYTES:
example.features.feature[key].bytes_list.value[:] = [line[key].encode('utf8')]
else:
if feature.type == schema_pb2.FLOAT:
example.features.feature[key].float_list.value[:] = []
elif feature.type == schema_pb2.INT:
example.features.feature[key].int64_list.value[:] = []
elif feature.type == schema_pb2.BYTES:
example.features.feature[key].bytes_list.value[:] = []
examples.append(example)
TFRecord_file = os.path.join(BASE_DIR, 'train_data.rio')
with tf.io.TFRecordWriter(TFRecord_file) as writer:
for example in examples:
writer.write(example.SerializeToString())
writer.flush()
writer.close()
!ls {TFRecord_file}
Now we're ready to create a function that we'll use to run TFMA and render metrics. It requires an EvalSavedModel
, a list of SliceSpecs
, and an index into the SliceSpec list. It will create an EvalResult using tfma.run_model_analysis
, and use it to create a SlicingMetricsViewer
using tfma.view.render_slicing_metrics
, which will render a visualization of our dataset using the slice we created.
In [0]:
def run_and_render(eval_model=None, slice_list=None, slice_idx=0):
"""Runs the model analysis and renders the slicing metrics
Args:
eval_model: An instance of tf.saved_model saved with evaluation data
slice_list: A list of tfma.slicer.SingleSliceSpec giving the slices
slice_idx: An integer index into slice_list specifying the slice to use
Returns:
A SlicingMetricsViewer object if in Jupyter notebook; None if in Colab.
"""
eval_result = tfma.run_model_analysis(eval_shared_model=eval_model,
data_location=TFRecord_file,
file_format='tfrecords',
slice_spec=slice_list,
output_path='sample_data',
extractors=None)
return tfma.view.render_slicing_metrics(eval_result, slicing_spec=slice_list[slice_idx] if slice_list else None)
We previously trained a model, and now we've loaded the results. Let's take a look at our visualizations, starting with using TFMA to slice along particular features. But first we need to read in the EvalSavedModel from one of our previous training runs.
To define the slice you want to visualize you create a tfma.slicer.SingleSliceSpec
To use tfma.view.render_slicing_metrics
you can either use the name of the column (by setting slicing_column
) or provide a tfma.slicer.SingleSliceSpec
(by setting slicing_spec
)
Plots are interactive:
Simply hover over the desired data point to see more details. Select from four different types of plots using the selections at the bottom.
For example, we'll be setting slicing_column
to look at the trip_start_hour
feature in our SliceSpec
.
In [0]:
# Load the TFMA results for the first training run
# This will take a minute
eval_model_base_dir_0 = os.path.join(TFMA_DIR, 'run_0', 'eval_model_dir')
eval_model_dir_0 = os.path.join(eval_model_base_dir_0,
max(os.listdir(eval_model_base_dir_0)))
eval_shared_model_0 = tfma.default_eval_shared_model(
eval_saved_model_path=eval_model_dir_0)
# Slice our data by the trip_start_hour feature
slices = [tfma.slicer.SingleSliceSpec(columns=['trip_start_hour'])]
run_and_render(eval_model=eval_shared_model_0, slice_list=slices, slice_idx=0)
The default visualization is the Slices Overview when the number of slices is small. It shows the values of metrics for each slice. Since we've selected trip_start_hour
above, it's showing us metrics like accuracy and AUC for each hour, which allows us to look for issues that are specific to some hours and not others.
In the visualization above:
trip_start_hours
feature, by clicking on the column headerThe chart also allows us to select and display different metrics in our slices.
It is also possible to set a threshold to filter out slices with smaller numbers of examples, or "weights". You can type a minimum number of examples, or use the slider.
This view also supports a Metrics Histogram as an alternative visualization, which is also the default view when the number of slices is large. The results will be divided into buckets and the number of slices / total weights / both can be visualized. Columns can be sorted by clicking on the column header. Slices with small weights can be filtered out by setting the threshold. Further filtering can be applied by dragging the grey band. To reset the range, double click the band. Filtering can also be used to remove outliers in the visualization and the metrics tables. Click the gear icon to switch to a logarithmic scale instead of a linear scale.
In [0]:
slices = [tfma.slicer.SingleSliceSpec(columns=['trip_start_hour']),
tfma.slicer.SingleSliceSpec(columns=['trip_start_day']),
tfma.slicer.SingleSliceSpec(columns=['trip_start_month'])]
run_and_render(eval_model=eval_shared_model_0, slice_list=slices, slice_idx=0)
You can create feature crosses to analyze combinations of features. Let's create a SliceSpec
to look at a cross of trip_start_day
and trip_start_hour
:
In [0]:
slices = [tfma.slicer.SingleSliceSpec(columns=['trip_start_day', 'trip_start_hour'])]
run_and_render(eval_shared_model_0, slices, 0)
Crossing the two columns creates a lot of combinations! Let's narrow down our cross to only look at trips that start at noon. Then let's select accuracy
from the visualization:
In [0]:
slices = [tfma.slicer.SingleSliceSpec(columns=['trip_start_day'], features=[('trip_start_hour', 12)])]
run_and_render(eval_shared_model_0, slices, 0)
Your training dataset will be used for training your model, and will hopefully be representative of your test dataset and the data that will be sent to your model in production. However, while the data in inference requests may remain the same as your training data, in many cases it will start to change enough so that the performance of your model will change.
That means that you need to monitor and measure your model's performance on an ongoing basis, so that you can be aware of and react to changes. Let's take a look at how TFMA can help.
In [0]:
def get_eval_result(base_dir, run_name, data_loc, slice_spec):
eval_model_base_dir = os.path.join(base_dir, run_name, "eval_model_dir")
versions = os.listdir(eval_model_base_dir)
eval_model_dir = os.path.join(eval_model_base_dir, max(versions))
output_dir = os.path.join(base_dir, "output", run_name)
eval_shared_model = tfma.default_eval_shared_model(eval_saved_model_path=eval_model_dir)
return tfma.run_model_analysis(eval_shared_model=eval_shared_model,
data_location=data_loc,
file_format='tfrecords',
slice_spec=slice_spec,
output_path=output_dir,
extractors=None)
slices = [tfma.slicer.SingleSliceSpec()]
result_ts0 = get_eval_result(TFMA_DIR, 'run_0', TFRecord_file, slices)
result_ts1 = get_eval_result(TFMA_DIR, 'run_1', TFRecord_file, slices)
result_ts2 = get_eval_result(TFMA_DIR, 'run_2', TFRecord_file, slices)
Next, let's use TFMA to see how these runs compare using render_time_series
.
First, we'll imagine that we've trained and deployed our model yesterday, and now we want to see how it's doing on the new data coming in today. We can specify particular slices to look at. Let's compare our training runs for trips that started at noon.
Note:
In [0]:
output_dirs = [os.path.join(TFMA_DIR, "output", run_name)
for run_name in ("run_0", "run_1", "run_2")]
eval_results_from_disk = tfma.load_eval_results(
output_dirs[:2], tfma.constants.MODEL_CENTRIC_MODE)
tfma.view.render_time_series(eval_results_from_disk, slices[0])
Now we'll imagine that another day has passed and we want to see how it's doing on the new data coming in today, compared to the previous two days. Again add AUC and average loss by using the "Add metric series" menu:
In [0]:
eval_results_from_disk = tfma.load_eval_results(
output_dirs, tfma.constants.MODEL_CENTRIC_MODE)
tfma.view.render_time_series(eval_results_from_disk, slices[0])
In [0]: