In [0]:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Synthetic Features and Outliers

Learning Objectives:

  • Create a synthetic feature that is the ratio of two other features
  • Use this new feature as an input to a linear regression model
  • Improve the effectiveness of the model by identifying and clipping (removing) outliers out of the input data

Let's revisit our model from the previous First Steps with TensorFlow exercise.

First, we'll import the California housing data into a pandas DataFrame:

Setup


In [1]:
from __future__ import print_function

import math

from IPython import display
from matplotlib import cm
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.metrics as metrics
import tensorflow as tf
from tensorflow.python.data import Dataset

tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format

california_housing_dataframe = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv", sep=",")

california_housing_dataframe = california_housing_dataframe.reindex(
    np.random.permutation(california_housing_dataframe.index))
california_housing_dataframe["median_house_value"] /= 1000.0
california_housing_dataframe


Out[1]:
longitude latitude housing_median_age total_rooms total_bedrooms population households median_income median_house_value
12641 -121.7 38.1 22.0 1910.0 326.0 1001.0 345.0 4.8 115.8
13821 -122.0 38.4 16.0 2495.0 331.0 1118.0 338.0 6.5 198.0
12023 -121.4 36.8 40.0 2352.0 536.0 1430.0 535.0 3.1 155.3
12879 -121.8 40.7 14.0 821.0 170.0 477.0 129.0 3.1 87.5
9685 -119.6 36.1 29.0 424.0 78.0 284.0 73.0 1.5 43.8
... ... ... ... ... ... ... ... ... ...
8782 -118.6 34.2 33.0 1636.0 275.0 866.0 289.0 5.6 241.3
3206 -117.8 33.9 18.0 329.0 72.0 209.0 71.0 4.7 187.5
3216 -117.8 33.8 40.0 1251.0 336.0 729.0 343.0 2.5 236.4
2689 -117.7 34.0 16.0 2859.0 668.0 1946.0 591.0 3.0 124.3
9791 -119.7 36.6 31.0 834.0 229.0 616.0 211.0 1.7 61.2

17000 rows × 9 columns

Next, we'll set up our input function, and define the function for model training:


In [0]:
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
    """Trains a linear regression model of one feature.
  
    Args:
      features: pandas DataFrame of features
      targets: pandas DataFrame of targets
      batch_size: Size of batches to be passed to the model
      shuffle: True or False. Whether to shuffle the data.
      num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
    Returns:
      Tuple of (features, labels) for next data batch
    """
    
    # Convert pandas data into a dict of np arrays.
    features = {key:np.array(value) for key,value in dict(features).items()}                                           
 
    # Construct a dataset, and configure batching/repeating.
    ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
    ds = ds.batch(batch_size).repeat(num_epochs)
    
    # Shuffle the data, if specified.
    if shuffle:
      ds = ds.shuffle(buffer_size=10000)
    
    # Return the next batch of data.
    features, labels = ds.make_one_shot_iterator().get_next()
    return features, labels

In [0]:
def train_model(learning_rate, steps, batch_size, input_feature):
  """Trains a linear regression model.
  
  Args:
    learning_rate: A `float`, the learning rate.
    steps: A non-zero `int`, the total number of training steps. A training step
      consists of a forward and backward pass using a single batch.
    batch_size: A non-zero `int`, the batch size.
    input_feature: A `string` specifying a column from `california_housing_dataframe`
      to use as input feature.
      
  Returns:
    A Pandas `DataFrame` containing targets and the corresponding predictions done
    after training the model.
  """
  
  periods = 10
  steps_per_period = steps / periods

  my_feature = input_feature
  my_feature_data = california_housing_dataframe[[my_feature]].astype('float32')
  my_label = "median_house_value"
  targets = california_housing_dataframe[my_label].astype('float32')

  # Create input functions.
  training_input_fn = lambda: my_input_fn(my_feature_data, targets, batch_size=batch_size)
  predict_training_input_fn = lambda: my_input_fn(my_feature_data, targets, num_epochs=1, shuffle=False)
  
  # Create feature columns.
  feature_columns = [tf.feature_column.numeric_column(my_feature)]
    
  # Create a linear regressor object.
  my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
  my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
  linear_regressor = tf.estimator.LinearRegressor(
      feature_columns=feature_columns,
      optimizer=my_optimizer
  )

  # Set up to plot the state of our model's line each period.
  plt.figure(figsize=(15, 6))
  plt.subplot(1, 2, 1)
  plt.title("Learned Line by Period")
  plt.ylabel(my_label)
  plt.xlabel(my_feature)
  sample = california_housing_dataframe.sample(n=300)
  plt.scatter(sample[my_feature], sample[my_label])
  colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)]

  # Train the model, but do so inside a loop so that we can periodically assess
  # loss metrics.
  print("Training model...")
  print("RMSE (on training data):")
  root_mean_squared_errors = []
  for period in range (0, periods):
    # Train the model, starting from the prior state.
    linear_regressor.train(
        input_fn=training_input_fn,
        steps=steps_per_period,
    )
    # Take a break and compute predictions.
    predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
    predictions = np.array([item['predictions'][0] for item in predictions])
    
    # Compute loss.
    root_mean_squared_error = math.sqrt(
      metrics.mean_squared_error(predictions, targets))
    # Occasionally print the current loss.
    print("  period %02d : %0.2f" % (period, root_mean_squared_error))
    # Add the loss metrics from this period to our list.
    root_mean_squared_errors.append(root_mean_squared_error)
    # Finally, track the weights and biases over time.
    # Apply some math to ensure that the data and line are plotted neatly.
    y_extents = np.array([0, sample[my_label].max()])
    
    weight = linear_regressor.get_variable_value('linear/linear_model/%s/weights' % input_feature)[0]
    bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')
    
    x_extents = (y_extents - bias) / weight
    x_extents = np.maximum(np.minimum(x_extents,
                                      sample[my_feature].max()),
                           sample[my_feature].min())
    y_extents = weight * x_extents + bias
    plt.plot(x_extents, y_extents, color=colors[period]) 
  print("Model training finished.")

  # Output a graph of loss metrics over periods.
  plt.subplot(1, 2, 2)
  plt.ylabel('RMSE')
  plt.xlabel('Periods')
  plt.title("Root Mean Squared Error vs. Periods")
  plt.tight_layout()
  plt.plot(root_mean_squared_errors)

  # Create a table with calibration data.
  calibration_data = pd.DataFrame()
  calibration_data["predictions"] = pd.Series(predictions)
  calibration_data["targets"] = pd.Series(targets)
  display.display(calibration_data.describe())

  print("Final RMSE (on training data): %0.2f" % root_mean_squared_error)
  
  return calibration_data

Task 1: Try a Synthetic Feature

Both the total_rooms and population features count totals for a given city block.

But what if one city block were more densely populated than another? We can explore how block density relates to median house value by creating a synthetic feature that's a ratio of total_rooms and population.

In the cell below, create a feature called rooms_per_person, and use that as the input_feature to train_model().

What's the best performance you can get with this single feature by tweaking the learning rate? (The better the performance, the better your regression line should fit the data, and the lower the final RMSE should be.)

NOTE: You may find it helpful to add a few code cells below so you can try out several different learning rates and compare the results. To add a new code cell, hover your cursor directly below the center of this cell, and click CODE.


In [4]:
california_housing_dataframe.head()


Out[4]:
longitude latitude housing_median_age total_rooms total_bedrooms population households median_income median_house_value
12641 -121.7 38.1 22.0 1910.0 326.0 1001.0 345.0 4.8 115.8
13821 -122.0 38.4 16.0 2495.0 331.0 1118.0 338.0 6.5 198.0
12023 -121.4 36.8 40.0 2352.0 536.0 1430.0 535.0 3.1 155.3
12879 -121.8 40.7 14.0 821.0 170.0 477.0 129.0 3.1 87.5
9685 -119.6 36.1 29.0 424.0 78.0 284.0 73.0 1.5 43.8

In [5]:
#
# YOUR CODE HERE
#
california_housing_dataframe["rooms_per_person"] = california_housing_dataframe['total_rooms'] / california_housing_dataframe['population']
california_housing_dataframe.head()


Out[5]:
longitude latitude housing_median_age total_rooms total_bedrooms population households median_income median_house_value rooms_per_person
12641 -121.7 38.1 22.0 1910.0 326.0 1001.0 345.0 4.8 115.8 1.9
13821 -122.0 38.4 16.0 2495.0 331.0 1118.0 338.0 6.5 198.0 2.2
12023 -121.4 36.8 40.0 2352.0 536.0 1430.0 535.0 3.1 155.3 1.6
12879 -121.8 40.7 14.0 821.0 170.0 477.0 129.0 3.1 87.5 1.7
9685 -119.6 36.1 29.0 424.0 78.0 284.0 73.0 1.5 43.8 1.5

In [9]:
calibration_data = train_model(
    learning_rate=0.5,
    steps=500,
    batch_size=10,
    input_feature="rooms_per_person"
)


Training model...
RMSE (on training data):
  period 00 : 235.00
  period 01 : 232.46
  period 02 : 229.95
  period 03 : 227.45
  period 04 : 224.96
  period 05 : 222.48
  period 06 : 220.01
  period 07 : 217.57
  period 08 : 215.14
  period 09 : 212.72
Model training finished.
predictions targets
count 17000.0 17000.0
mean 27.7 207.3
std 13.1 116.0
min 5.7 15.0
25% 22.6 119.4
50% 27.3 180.4
75% 31.2 265.0
max 623.8 500.0
Final RMSE (on training data): 212.72

Solution

Click below for a solution.


In [0]:
california_housing_dataframe["rooms_per_person"] = (
    california_housing_dataframe["total_rooms"] / california_housing_dataframe["population"])

calibration_data = train_model(
    learning_rate=0.05,
    steps=500,
    batch_size=5,
    input_feature="rooms_per_person")

Task 2: Identify Outliers

We can visualize the performance of our model by creating a scatter plot of predictions vs. target values. Ideally, these would lie on a perfectly correlated diagonal line.

Use Pyplot's scatter() to create a scatter plot of predictions vs. targets, using the rooms-per-person model you trained in Task 1.

Do you see any oddities? Trace these back to the source data by looking at the distribution of values in rooms_per_person.


In [16]:
calibration_data.plot(kind='scatter', x='targets', y='predictions')


Out[16]:
<matplotlib.axes._subplots.AxesSubplot at 0x7f5a643603c8>

In [12]:
california_housing_dataframe['rooms_per_person'].plot(kind='hist', bins=50)


Out[12]:
<matplotlib.axes._subplots.AxesSubplot at 0x7f5a64853748>

Solution

Click below for the solution.


In [0]:
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
plt.scatter(calibration_data["predictions"], calibration_data["targets"])

The calibration data shows most scatter points aligned to a line. The line is almost vertical, but we'll come back to that later. Right now let's focus on the ones that deviate from the line. We notice that they are relatively few in number.

If we plot a histogram of rooms_per_person, we find that we have a few outliers in our input data:


In [0]:
plt.subplot(1, 2, 2)
_ = california_housing_dataframe["rooms_per_person"].hist()

Task 3: Clip Outliers

See if you can further improve the model fit by setting the outlier values of rooms_per_person to some reasonable minimum or maximum.

For reference, here's a quick example of how to apply a function to a Pandas Series:

clipped_feature = my_dataframe["my_feature_name"].apply(lambda x: max(x, 0))

The above clipped_feature will have no values less than 0.


In [0]:
# YOUR CODE HERE

Solution

Click below for the solution.

The histogram we created in Task 2 shows that the majority of values are less than 5. Let's clip rooms_per_person to 5, and plot a histogram to double-check the results.


In [17]:
california_housing_dataframe["rooms_per_person"] = (
    california_housing_dataframe["rooms_per_person"]).apply(lambda x: min(x, 5))

_ = california_housing_dataframe["rooms_per_person"].hist()


To verify that clipping worked, let's train again and print the calibration data once more:


In [18]:
calibration_data = train_model(
    learning_rate=0.05,
    steps=500,
    batch_size=5,
    input_feature="rooms_per_person")


Training model...
RMSE (on training data):
  period 00 : 212.82
  period 01 : 189.07
  period 02 : 166.77
  period 03 : 147.16
  period 04 : 130.95
  period 05 : 118.68
  period 06 : 113.59
  period 07 : 110.11
  period 08 : 108.89
  period 09 : 107.98
Model training finished.
predictions targets
count 17000.0 17000.0
mean 199.2 207.3
std 52.4 116.0
min 45.5 15.0
25% 165.7 119.4
50% 199.2 180.4
75% 227.8 265.0
max 443.8 500.0
Final RMSE (on training data): 107.98

In [19]:
_ = plt.scatter(calibration_data["predictions"], calibration_data["targets"])