In [0]:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

TF Lattice Custom Estimators

Overview

You can use custom estimators to create arbitrarily monotonic models using TFL layers. This guide outlines the steps needed to create such estimators.

Setup

Installing TF Lattice package:


In [0]:
#@test {"skip": true}
!pip install tensorflow-lattice

Importing required packages:


In [0]:
import tensorflow as tf

import logging
import numpy as np
import pandas as pd
import sys
import tensorflow_lattice as tfl
from tensorflow import feature_column as fc

from tensorflow_estimator.python.estimator.canned import optimizers
from tensorflow_estimator.python.estimator.head import binary_class_head
logging.disable(sys.maxsize)

Downloading the UCI Statlog (Heart) dataset:


In [0]:
csv_file = tf.keras.utils.get_file(
    'heart.csv', 'http://storage.googleapis.com/applied-dl/heart.csv')
df = pd.read_csv(csv_file)
target = df.pop('target')
train_size = int(len(df) * 0.8)
train_x = df[:train_size]
train_y = target[:train_size]
test_x = df[train_size:]
test_y = target[train_size:]
df.head()

Setting the default values used for training in this guide:


In [0]:
LEARNING_RATE = 0.1
BATCH_SIZE = 128
NUM_EPOCHS = 1000

Feature Columns

As for any other TF estimator, data needs to be passed to the estimator, which is typically via an input_fn and parsed using FeatureColumns.


In [0]:
# Feature columns.
# - age
# - sex
# - ca        number of major vessels (0-3) colored by flourosopy
# - thal      3 = normal; 6 = fixed defect; 7 = reversable defect
feature_columns = [
    fc.numeric_column('age', default_value=-1),
    fc.categorical_column_with_vocabulary_list('sex', [0, 1]),
    fc.numeric_column('ca'),
    fc.categorical_column_with_vocabulary_list(
        'thal', ['normal', 'fixed', 'reversible']),
]

Note that categorical features do not need to be wrapped by a dense feature column, since tfl.laysers.CategoricalCalibration layer can directly consume category indices.

Creating input_fn

As for any other estimator, you can use input_fn to feed data to the model for training and evaluation.


In [0]:
train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
    x=train_x,
    y=train_y,
    shuffle=True,
    batch_size=BATCH_SIZE,
    num_epochs=NUM_EPOCHS,
    num_threads=1)

test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
    x=test_x,
    y=test_y,
    shuffle=False,
    batch_size=BATCH_SIZE,
    num_epochs=1,
    num_threads=1)

Creating model_fn

There are several ways to create a custom estimator. Here we will construct a model_fn that calls a Keras model on the parsed input tensors. To parse the input features, you can use tf.feature_column.input_layer, tf.keras.layers.DenseFeatures, or tfl.estimators.transform_features. If you use the latter, you will not need to wrap categorical features with dense feature columns, and the resulting tensors will not be concatenated, which makes it easier to use the features in the calibration layers.

To construct a model, you can mix and match TFL layers or any other Keras layers. Here we create a calibrated lattice Keras model out of TFL layers and impose several monotonicity constraints. When then use the Keras model to create the custom estimator.


In [0]:
def model_fn(features, labels, mode, config):
  """model_fn for the custom estimator."""
  del config
  input_tensors = tfl.estimators.transform_features(features, feature_columns)
  inputs = {
      key: tf.keras.layers.Input(shape=(1,), name=key) for key in input_tensors
  }

  lattice_sizes = [3, 2, 2, 2]
  lattice_monotonicities = ['increasing', 'none', 'increasing', 'increasing']
  lattice_input = tf.keras.layers.Concatenate(axis=1)([
      tfl.layers.PWLCalibration(
          input_keypoints=np.linspace(10, 100, num=8, dtype=np.float32),
          # The output range of the calibrator should be the input range of
          # the following lattice dimension.
          output_min=0.0,
          output_max=lattice_sizes[0] - 1.0,
          monotonicity='increasing',
      )(inputs['age']),
      tfl.layers.CategoricalCalibration(
          # Number of categories including any missing/default category.
          num_buckets=2,
          output_min=0.0,
          output_max=lattice_sizes[1] - 1.0,
      )(inputs['sex']),
      tfl.layers.PWLCalibration(
          input_keypoints=[0.0, 1.0, 2.0, 3.0],
          output_min=0.0,
          output_max=lattice_sizes[0] - 1.0,
          # You can specify TFL regularizers as tuple
          # ('regularizer name', l1, l2).
          kernel_regularizer=('hessian', 0.0, 1e-4),
          monotonicity='increasing',
      )(inputs['ca']),
      tfl.layers.CategoricalCalibration(
          num_buckets=3,
          output_min=0.0,
          output_max=lattice_sizes[1] - 1.0,
          # Categorical monotonicity can be partial order.
          # (i, j) indicates that we must have output(i) <= output(j).
          # Make sure to set the lattice monotonicity to 'increasing' for this
          # dimension.
          monotonicities=[(0, 1), (0, 2)],
      )(inputs['thal']),
  ])
  output = tfl.layers.Lattice(
      lattice_sizes=lattice_sizes, monotonicities=lattice_monotonicities)(
          lattice_input)

  training = (mode == tf.estimator.ModeKeys.TRAIN)
  model = tf.keras.Model(inputs=inputs, outputs=output)
  logits = model(input_tensors, training=training)

  if training:
    optimizer = optimizers.get_optimizer_instance_v2('Adagrad', LEARNING_RATE)
  else:
    optimizer = None

  head = binary_class_head.BinaryClassHead()
  return head.create_estimator_spec(
      features=features,
      mode=mode,
      labels=labels,
      optimizer=optimizer,
      logits=logits,
      trainable_variables=model.trainable_variables,
      update_ops=model.updates)

Training and Estimator

Using the model_fn we can create and train the estimator.


In [0]:
estimator = tf.estimator.Estimator(model_fn=model_fn)
estimator.train(input_fn=train_input_fn)
results = estimator.evaluate(input_fn=test_input_fn)
print('AUC: {}'.format(results['auc']))