In [ ]:
import h2o
import csv
import time
import numpy as np
import matplotlib.pyplot as plt

In [ ]:
h2o.init()

In [ ]:
from h2o.utils.shared_utils import _locate # private function. used to find files within h2o git project directory.

# Import and parse ACS 2013 5-year DP02 demographic data
acs_orig = h2o.upload_file(path=_locate("bigdata/laptop/census/ACS_13_5YR_DP02_cleaned.zip"), col_types = (["enum"] + ["numeric"]*149))
acs_orig.describe()

acs_zcta_col = acs_orig["ZCTA5"]
acs_full = acs_orig.drop("ZCTA5")

In [ ]:
# Import and parse WHD 2014-2015 labor violations data
whd_zcta = h2o.upload_file(path=_locate("bigdata/laptop/census/whd_zcta_cleaned.zip"), col_types = (["enum"]*7 + ["numeric"]*97))
whd_zcta.describe()

In [ ]:
# Run GLRM to reduce ZCTA demographics to 10 archetypes
acs_model = h2o.glrm(x = acs_full, 
                     k = 10,
                     transform = "STANDARDIZE",
                     loss = "Quadratic",
                     regularization_x = "Quadratic",
                     regularization_y = "L1",
                     gamma_x = 0.25,
                     gamma_y = 0.5,
                     max_iterations = 100)
print acs_model

In [ ]:
# Plot objective function value each iteration
acs_model_score = acs_model.score_history()
plt.xlabel("Iteration")
plt.ylabel("Objective")
plt.title("Objective Function Value per Iteration")
plt.plot(acs_model_score["iteration"], acs_model_score["objective"])
plt.show()

In [ ]:
# Embedding of ZCTAs into archetypes (X)
zcta_arch_x = h2o.get_frame(acs_model._model_json["output"]["representation_name"])
zcta_arch_x.head()

In [ ]:
# Archetypes to full feature mapping (Y)
arch_feat_y = acs_model._model_json["output"]["archetypes"]
print arch_feat_y

In [ ]:
# Split WHD data into test/train with 20/80 ratio
split = whd_zcta["flsa_repeat_violator"].runif()
train = whd_zcta[split <= 0.8]
test = whd_zcta[split > 0.8]

# Build a DL model to predict repeat violators and score
s = time.time()
dl_orig = h2o.deeplearning(x = train[4:].drop("flsa_repeat_violator"),
                           y = train["flsa_repeat_violator"],
                           validation_x = test[4:].drop("flsa_repeat_violator"),
                           validation_y = test["flsa_repeat_violator"],
                           epochs = 0.1, 
                           hidden = [50,50,50], 
                           distribution = "multinomial")
orig_elapsed = time.time() - s

In [ ]:
# Replace zcta5_cd column in WHD data with GLRM archetypes
zcta_arch_x["zcta5_cd"] = acs_zcta_col
whd_arch = whd_zcta.merge(zcta_arch_x, all_x = True, all_y = False)
whd_arch = whd_arch.drop("zcta5_cd")
whd_arch.describe()

In [ ]:
# Split WHD data into test/train with 20/80 ratio
train_mod = whd_arch[split <= 0.8]
test_mod = whd_arch[split > 0.8]

# Build a GBM model to predict repeat violators and score
s = time.time()
dl_mod = h2o.deeplearning(x = train_mod[4:].drop("flsa_repeat_violator"),
                          y = train_mod["flsa_repeat_violator"],
                          validation_x = test_mod[4:].drop("flsa_repeat_violator"),
                          validation_y = test_mod["flsa_repeat_violator"],
                          epochs = 0.1, 
                          hidden = [50,50,50], 
                          distribution = "multinomial")
mod_elapsed = time.time() - s

In [ ]:
# Model performance comparison
train_ll_orig = dl_orig.model_performance(train).logloss()
test_ll_orig  = dl_orig.model_performance(test ).logloss()
train_ll_mod  = dl_mod .model_performance(train).logloss()
test_ll_mod   = dl_mod .model_performance(test ).logloss()

# Print results in pretty HTML table
header = ["Metric"   , "Original"    , "Reduced"    ]
table = [
         ["Runtime"  , orig_elapsed  , mod_elapsed  ],
         ["Train LogLoss", train_ll_orig, train_ll_mod],
         ["Test LogLoss" , test_ll_orig , test_ll_mod ],
        ]
h2o.display.H2ODisplay(table,header)

In [ ]: