In [0]:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Validation

Learning Objectives:

  • Use multiple features, instead of a single feature, to further improve the effectiveness of a model
  • Debug issues in model input data
  • Use a test data set to check if a model is overfitting the validation data

As in the prior exercises, we're working with the California housing data set, to try and predict median_house_value at the city block level from 1990 census data.

Setup

First off, let's load up and prepare our data. This time, we're going to work with multiple features, so we'll modularize the logic for preprocessing the features a bit:


In [2]:
from __future__ import print_function

import math

from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset

tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format

california_housing_dataframe = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv", sep=",")

california_housing_dataframe.head()
# california_housing_dataframe = california_housing_dataframe.reindex(
#     np.random.permutation(california_housing_dataframe.index))


Out[2]:
longitude latitude housing_median_age total_rooms total_bedrooms population households median_income median_house_value
0 -114.3 34.2 15.0 5612.0 1283.0 1015.0 472.0 1.5 66900.0
1 -114.5 34.4 19.0 7650.0 1901.0 1129.0 463.0 1.8 80100.0
2 -114.6 33.7 17.0 720.0 174.0 333.0 117.0 1.7 85700.0
3 -114.6 33.6 14.0 1501.0 337.0 515.0 226.0 3.2 73400.0
4 -114.6 33.6 20.0 1454.0 326.0 624.0 262.0 1.9 65500.0

In [0]:
def preprocess_features(california_housing_dataframe):
  """Prepares input features from California housing data set.

  Args:
    california_housing_dataframe: A Pandas DataFrame expected to contain data
      from the California housing data set.
  Returns:
    A DataFrame that contains the features to be used for the model, including
    synthetic features.
  """
  selected_features = california_housing_dataframe[
    ["latitude",
     "longitude",
     "housing_median_age",
     "total_rooms",
     "total_bedrooms",
     "population",
     "households",
     "median_income"]]
  processed_features = selected_features.copy()
  # Create a synthetic feature.
  processed_features["rooms_per_person"] = (
    california_housing_dataframe["total_rooms"] /
    california_housing_dataframe["population"])
  return processed_features

def preprocess_targets(california_housing_dataframe):
  """Prepares target features (i.e., labels) from California housing data set.

  Args:
    california_housing_dataframe: A Pandas DataFrame expected to contain data
      from the California housing data set.
  Returns:
    A DataFrame that contains the target feature.
  """
  output_targets = pd.DataFrame()
  # Scale the target to be in units of thousands of dollars.
  output_targets["median_house_value"] = (
    california_housing_dataframe["median_house_value"] / 1000.0)
  return output_targets

For the training set, we'll choose the first 12000 examples, out of the total of 17000.


In [4]:
training_examples = preprocess_features(california_housing_dataframe.head(12000))
training_examples.describe()


Out[4]:
latitude longitude housing_median_age total_rooms total_bedrooms population households median_income rooms_per_person
count 12000.0 12000.0 12000.0 12000.0 12000.0 12000.0 12000.0 12000.0 12000.0
mean 34.6 -118.5 27.5 2655.7 547.1 1476.0 505.4 3.8 1.9
std 1.6 1.2 12.1 2258.1 434.3 1174.3 391.7 1.9 1.3
min 32.5 -121.4 1.0 2.0 2.0 3.0 2.0 0.5 0.0
25% 33.8 -118.9 17.0 1451.8 299.0 815.0 283.0 2.5 1.4
50% 34.0 -118.2 28.0 2113.5 438.0 1207.0 411.0 3.5 1.9
75% 34.4 -117.8 36.0 3146.0 653.0 1777.0 606.0 4.6 2.3
max 41.8 -114.3 52.0 37937.0 5471.0 35682.0 5189.0 15.0 55.2

In [5]:
training_targets = preprocess_targets(california_housing_dataframe.head(12000))
training_targets.describe()


Out[5]:
median_house_value
count 12000.0
mean 198.0
std 111.9
min 15.0
25% 117.1
50% 170.5
75% 244.4
max 500.0

For the validation set, we'll choose the last 5000 examples, out of the total of 17000.


In [6]:
validation_examples = preprocess_features(california_housing_dataframe.tail(5000))
validation_examples.describe()


Out[6]:
latitude longitude housing_median_age total_rooms total_bedrooms population households median_income rooms_per_person
count 5000.0 5000.0 5000.0 5000.0 5000.0 5000.0 5000.0 5000.0 5000.0
mean 38.1 -122.2 31.3 2614.8 521.1 1318.1 491.2 4.1 2.1
std 0.9 0.5 13.4 1979.6 388.5 1073.7 366.5 2.0 0.6
min 36.1 -124.3 1.0 8.0 1.0 8.0 1.0 0.5 0.1
25% 37.5 -122.4 20.0 1481.0 292.0 731.0 278.0 2.7 1.7
50% 37.8 -122.1 31.0 2164.0 424.0 1074.0 403.0 3.7 2.1
75% 38.4 -121.9 42.0 3161.2 635.0 1590.2 603.0 5.1 2.4
max 42.0 -121.4 52.0 32627.0 6445.0 28566.0 6082.0 15.0 18.3

In [7]:
validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))
validation_targets.describe()


Out[7]:
median_house_value
count 5000.0
mean 229.5
std 122.5
min 15.0
25% 130.4
50% 213.0
75% 303.2
max 500.0

Task 1: Examine the Data

Okay, let's look at the data above. We have 9 input features that we can use.

Take a quick skim over the table of values. Everything look okay? See how many issues you can spot. Don't worry if you don't have a background in statistics; common sense will get you far.

After you've had a chance to look over the data yourself, check the solution for some additional thoughts on how to verify data.

Solution

Click below for the solution.

Let's check our data against some baseline expectations:

  • For some values, like median_house_value, we can check to see if these values fall within reasonable ranges (keeping in mind this was 1990 data — not today!).

  • For other values, like latitude and longitude, we can do a quick check to see if these line up with expected values from a quick Google search.

If you look closely, you may see some oddities:

  • median_income is on a scale from about 3 to 15. It's not at all clear what this scale refers to—looks like maybe some log scale? It's not documented anywhere; all we can assume is that higher values correspond to higher income.

  • The maximum median_house_value is 500,001. This looks like an artificial cap of some kind.

  • Our rooms_per_person feature is generally on a sane scale, with a 75th percentile value of about 2. But there are some very large values, like 18 or 55, which may show some amount of corruption in the data.

We'll use these features as given for now. But hopefully these kinds of examples can help to build a little intuition about how to check data that comes to you from an unknown source.

Task 2: Plot Latitude/Longitude vs. Median House Value

Let's take a close look at two features in particular: latitude and longitude. These are geographical coordinates of the city block in question.

This might make a nice visualization — let's plot latitude and longitude, and use color to show the median_house_value.


In [8]:
plt.figure(figsize=(13, 8))

ax = plt.subplot(1, 2, 1)
ax.set_title("Validation Data")

ax.set_autoscaley_on(False)
ax.set_ylim([32, 43])
ax.set_autoscalex_on(False)
ax.set_xlim([-126, -112])
plt.scatter(validation_examples["longitude"],
            validation_examples["latitude"],
            cmap="coolwarm",
            c=validation_targets["median_house_value"] / validation_targets["median_house_value"].max())

ax = plt.subplot(1,2,2)
ax.set_title("Training Data")

ax.set_autoscaley_on(False)
ax.set_ylim([32, 43])
ax.set_autoscalex_on(False)
ax.set_xlim([-126, -112])
plt.scatter(training_examples["longitude"],
            training_examples["latitude"],
            cmap="coolwarm",
            c=training_targets["median_house_value"] / training_targets["median_house_value"].max())
_ = plt.plot()


Wait a second...this should have given us a nice map of the state of California, with red showing up in expensive areas like the San Francisco and Los Angeles.

The training set sort of does, compared to a real map, but the validation set clearly doesn't.

Go back up and look at the data from Task 1 again.

Do you see any other differences in the distributions of features or targets between the training and validation data?

Solution

Click below for the solution.

Looking at the tables of summary stats above, it's easy to wonder how anyone would do a useful data check. What's the right 75th percentile value for total_rooms per city block?

The key thing to notice is that for any given feature or column, the distribution of values between the train and validation splits should be roughly equal.

The fact that this is not the case is a real worry, and shows that we likely have a fault in the way that our train and validation split was created.

Task 3: Return to the Data Importing and Pre-Processing Code, and See if You Spot Any Bugs

If you do, go ahead and fix the bug. Don't spend more than a minute or two looking. If you can't find the bug, check the solution.

When you've found and fixed the issue, re-run latitude / longitude plotting cell above and confirm that our sanity checks look better.

By the way, there's an important lesson here.

Debugging in ML is often data debugging rather than code debugging.

If the data is wrong, even the most advanced ML code can't save things.

Solution

Click below for the solution.

Take a look at how the data is randomized when it's read in.

If we don't randomize the data properly before creating training and validation splits, then we may be in trouble if the data is given to us in some sorted order, which appears to be the case here.

Task 4: Train and Evaluate a Model

Spend 5 minutes or so trying different hyperparameter settings. Try to get the best validation performance you can.

Next, we'll train a linear regressor using all the features in the data set, and see how well we do.

Let's define the same input function we've used previously for loading the data into a TensorFlow model.


In [0]:
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
    """Trains a linear regression model of multiple features.
  
    Args:
      features: pandas DataFrame of features
      targets: pandas DataFrame of targets
      batch_size: Size of batches to be passed to the model
      shuffle: True or False. Whether to shuffle the data.
      num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
    Returns:
      Tuple of (features, labels) for next data batch
    """
    
    # Convert pandas data into a dict of np arrays.
    features = {key:np.array(value) for key,value in dict(features).items()}                                           
 
    # Construct a dataset, and configure batching/repeating.
    ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
    ds = ds.batch(batch_size).repeat(num_epochs)
    
    # Shuffle the data, if specified.
    if shuffle:
      ds = ds.shuffle(10000)
    
    # Return the next batch of data.
    features, labels = ds.make_one_shot_iterator().get_next()
    return features, labels

Because we're now working with multiple input features, let's modularize our code for configuring feature columns into a separate function. (For now, this code is fairly simple, as all our features are numeric, but we'll build on this code as we use other types of features in future exercises.)


In [0]:
def construct_feature_columns(input_features):
  """Construct the TensorFlow Feature Columns.

  Args:
    input_features: The names of the numerical input features to use.
  Returns:
    A set of feature columns
  """ 
  return set([tf.feature_column.numeric_column(my_feature)
              for my_feature in input_features])

Next, go ahead and complete the train_model() code below to set up the input functions and calculate predictions.

NOTE: It's okay to reference the code from the previous exercises, but make sure to call predict() on the appropriate data sets.

Compare the losses on training data and validation data. With a single raw feature, our best root mean squared error (RMSE) was of about 180.

See how much better you can do now that we can use multiple features.

Check the data using some of the methods we've looked at before. These might include:

  • Comparing distributions of predictions and actual target values

  • Creating a scatter plot of predictions vs. target values

  • Creating two scatter plots of validation data using latitude and longitude:

    • One plot mapping color to actual target median_house_value
    • A second plot mapping color to predicted median_house_value for side-by-side comparison.

In [0]:
def train_model(
    learning_rate,
    steps,
    batch_size,
    training_examples,
    training_targets,
    validation_examples,
    validation_targets):
  """Trains a linear regression model of multiple features.
  
  In addition to training, this function also prints training progress information,
  as well as a plot of the training and validation loss over time.
  
  Args:
    learning_rate: A `float`, the learning rate.
    steps: A non-zero `int`, the total number of training steps. A training step
      consists of a forward and backward pass using a single batch.
    batch_size: A non-zero `int`, the batch size.
    training_examples: A `DataFrame` containing one or more columns from
      `california_housing_dataframe` to use as input features for training.
    training_targets: A `DataFrame` containing exactly one column from
      `california_housing_dataframe` to use as target for training.
    validation_examples: A `DataFrame` containing one or more columns from
      `california_housing_dataframe` to use as input features for validation.
    validation_targets: A `DataFrame` containing exactly one column from
      `california_housing_dataframe` to use as target for validation.
      
  Returns:
    A `LinearRegressor` object trained on the training data.
  """

  periods = 10
  steps_per_period = steps / periods
  
  # Create a linear regressor object.
  my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
  my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
  linear_regressor = tf.estimator.LinearRegressor(
      feature_columns=construct_feature_columns(training_examples),
      optimizer=my_optimizer
  )
  
  # 1. Create input functions.
  training_input_fn = lambda:my_input_fn(training_examples, training_targets["median_house_value"], batch_size=batch_size)
  predict_training_input_fn = lambda:my_input_fn(training_examples, training_targets["median_house_value"], num_epochs=1, shuffle=False)
  predict_validation_input_fn = lambda:my_input_fn(validation_examples, validation_targets["median_house_value"], num_epochs=1, shuffle=False)
  
  # Train the model, but do so inside a loop so that we can periodically assess
  # loss metrics.
  print("Training model...")
  print("RMSE (on training data):")
  training_rmse = []
  validation_rmse = []
  for period in range (0, periods):
    # Train the model, starting from the prior state.
    linear_regressor.train(
        input_fn=training_input_fn,
        steps=steps_per_period,
    )
    # 2. Take a break and compute predictions.
    training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
    training_predictions = np.array([item['predictions'][0] for item in training_predictions])
    validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
    validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
    
    # Compute training and validation loss.
    training_root_mean_squared_error = math.sqrt(
        metrics.mean_squared_error(training_predictions, training_targets))
    validation_root_mean_squared_error = math.sqrt(
        metrics.mean_squared_error(validation_predictions, validation_targets))
    # Occasionally print the current loss.
    print("  period %02d : %0.2f" % (period, training_root_mean_squared_error))
    # Add the loss metrics from this period to our list.
    training_rmse.append(training_root_mean_squared_error)
    validation_rmse.append(validation_root_mean_squared_error)
  print("Model training finished.")

  # Output a graph of loss metrics over periods.
  plt.ylabel("RMSE")
  plt.xlabel("Periods")
  plt.title("Root Mean Squared Error vs. Periods")
  plt.tight_layout()
  plt.plot(training_rmse, label="training")
  plt.plot(validation_rmse, label="validation")
  plt.legend()

  return linear_regressor

In [13]:
linear_regressor = train_model(
    # TWEAK THESE VALUES TO SEE HOW MUCH YOU CAN IMPROVE THE RMSE
    learning_rate=0.001,
    steps=500,
    batch_size=1,
    training_examples=training_examples,
    training_targets=training_targets,
    validation_examples=validation_examples,
    validation_targets=validation_targets)


Training model...
RMSE (on training data):
  period 00 : 190.23
  period 01 : 205.84
  period 02 : 233.65
  period 03 : 207.55
  period 04 : 187.25
  period 05 : 211.57
  period 06 : 203.62
  period 07 : 194.90
  period 08 : 193.63
  period 09 : 220.05
Model training finished.

Solution

Click below for a solution.


In [0]:
def train_model(
    learning_rate,
    steps,
    batch_size,
    training_examples,
    training_targets,
    validation_examples,
    validation_targets):
  """Trains a linear regression model of multiple features.
  
  In addition to training, this function also prints training progress information,
  as well as a plot of the training and validation loss over time.
  
  Args:
    learning_rate: A `float`, the learning rate.
    steps: A non-zero `int`, the total number of training steps. A training step
      consists of a forward and backward pass using a single batch.
    batch_size: A non-zero `int`, the batch size.
    training_examples: A `DataFrame` containing one or more columns from
      `california_housing_dataframe` to use as input features for training.
    training_targets: A `DataFrame` containing exactly one column from
      `california_housing_dataframe` to use as target for training.
    validation_examples: A `DataFrame` containing one or more columns from
      `california_housing_dataframe` to use as input features for validation.
    validation_targets: A `DataFrame` containing exactly one column from
      `california_housing_dataframe` to use as target for validation.
      
  Returns:
    A `LinearRegressor` object trained on the training data.
  """

  periods = 10
  steps_per_period = steps / periods
  
  # Create a linear regressor object.
  my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
  my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
  linear_regressor = tf.estimator.LinearRegressor(
      feature_columns=construct_feature_columns(training_examples),
      optimizer=my_optimizer
  )
  
  # Create input functions.
  training_input_fn = lambda: my_input_fn(
      training_examples, 
      training_targets["median_house_value"], 
      batch_size=batch_size)
  predict_training_input_fn = lambda: my_input_fn(
      training_examples, 
      training_targets["median_house_value"], 
      num_epochs=1, 
      shuffle=False)
  predict_validation_input_fn = lambda: my_input_fn(
      validation_examples, validation_targets["median_house_value"], 
      num_epochs=1, 
      shuffle=False)

  # Train the model, but do so inside a loop so that we can periodically assess
  # loss metrics.
  print("Training model...")
  print("RMSE (on training data):")
  training_rmse = []
  validation_rmse = []
  for period in range (0, periods):
    # Train the model, starting from the prior state.
    linear_regressor.train(
        input_fn=training_input_fn,
        steps=steps_per_period,
    )
    # Take a break and compute predictions.
    training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
    training_predictions = np.array([item['predictions'][0] for item in training_predictions])
    
    validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
    validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
    
    
    # Compute training and validation loss.
    training_root_mean_squared_error = math.sqrt(
        metrics.mean_squared_error(training_predictions, training_targets))
    validation_root_mean_squared_error = math.sqrt(
        metrics.mean_squared_error(validation_predictions, validation_targets))
    # Occasionally print the current loss.
    print("  period %02d : %0.2f" % (period, training_root_mean_squared_error))
    # Add the loss metrics from this period to our list.
    training_rmse.append(training_root_mean_squared_error)
    validation_rmse.append(validation_root_mean_squared_error)
  print("Model training finished.")

  # Output a graph of loss metrics over periods.
  plt.ylabel("RMSE")
  plt.xlabel("Periods")
  plt.title("Root Mean Squared Error vs. Periods")
  plt.tight_layout()
  plt.plot(training_rmse, label="training")
  plt.plot(validation_rmse, label="validation")
  plt.legend()

  return linear_regressor

In [14]:
linear_regressor = train_model(
    learning_rate=0.00003,
    steps=500,
    batch_size=5,
    training_examples=training_examples,
    training_targets=training_targets,
    validation_examples=validation_examples,
    validation_targets=validation_targets)


Training model...
RMSE (on training data):
  period 00 : 207.41
  period 01 : 189.94
  period 02 : 177.38
  period 03 : 169.85
  period 04 : 164.78
  period 05 : 162.63
  period 06 : 161.15
  period 07 : 160.90
  period 08 : 161.07
  period 09 : 161.58
Model training finished.

Task 5: Evaluate on Test Data

In the cell below, load in the test data set and evaluate your model on it.

We've done a lot of iteration on our validation data. Let's make sure we haven't overfit to the pecularities of that particular sample.

Test data set is located here.

How does your test performance compare to the validation performance? What does this say about the generalization performance of your model?


In [18]:
california_housing_test_data = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_test.csv", sep=",")
#
# YOUR CODE HERE

test_features = preprocess_features(california_housing_test_data)
test_features.describe()


Out[18]:
latitude longitude housing_median_age total_rooms total_bedrooms population households median_income rooms_per_person
count 3000.0 3000.0 3000.0 3000.0 3000.0 3000.0 3000.0 3000.0 3000.0
mean 35.6 -119.6 28.8 2599.6 530.0 1402.8 489.9 3.8 2.0
std 2.1 2.0 12.6 2155.6 415.7 1030.5 365.4 1.9 1.1
min 32.6 -124.2 1.0 6.0 2.0 5.0 2.0 0.5 0.0
25% 33.9 -121.8 18.0 1401.0 291.0 780.0 273.0 2.5 1.5
50% 34.3 -118.5 29.0 2106.0 437.0 1155.0 409.5 3.5 1.9
75% 37.7 -118.0 37.0 3129.0 636.0 1742.8 597.2 4.7 2.3
max 41.9 -114.5 52.0 30450.0 5419.0 11935.0 4930.0 15.0 33.8

In [19]:
test_targets = preprocess_targets(california_housing_test_data)
test_targets.describe()


Out[19]:
median_house_value
count 3000.0
mean 205.8
std 113.1
min 22.5
25% 121.2
50% 177.6
75% 264.0
max 500.0

In [20]:
plt.figure(figsize=(13, 8))

ax = plt.subplot(1, 2, 1)
ax.set_title("Test Data")

ax.set_autoscaley_on(False)
ax.set_ylim([32, 43])
ax.set_autoscalex_on(False)
ax.set_xlim([-126, -112])
plt.scatter(test_features["longitude"],
            test_features["latitude"],
            cmap="coolwarm",
            c=test_targets["median_house_value"] / test_targets["median_house_value"].max())

_ = plt.plot()



In [0]:
predict_test_input_fn = lambda: my_input_fn(
      test_features, 
      test_targets["median_house_value"], 
      num_epochs=1, 
      shuffle=False)

In [22]:
test_predictions = linear_regressor.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['predictions'][0] for item in test_predictions])

root_mean_squared_error = math.sqrt(
    metrics.mean_squared_error(test_predictions, test_targets))

print("Final RMSE (on test data): %0.2f" % root_mean_squared_error)


Final RMSE (on test data): 160.71

Solution

Click below for the solution.


In [23]:
california_housing_test_data = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_test.csv", sep=",")

test_examples = preprocess_features(california_housing_test_data)
test_targets = preprocess_targets(california_housing_test_data)

predict_test_input_fn = lambda: my_input_fn(
      test_examples, 
      test_targets["median_house_value"], 
      num_epochs=1, 
      shuffle=False)

test_predictions = linear_regressor.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['predictions'][0] for item in test_predictions])

root_mean_squared_error = math.sqrt(
    metrics.mean_squared_error(test_predictions, test_targets))

print("Final RMSE (on test data): %0.2f" % root_mean_squared_error)


Final RMSE (on test data): 160.71