Objective:
Wine Quality Dataset:
Steps:
Full Technical Reference:
In [1]:
# Import all required modules
import h2o
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
from h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator
from h2o.grid.grid_search import H2OGridSearch
# Start and connect to a local H2O cluster
h2o.init(nthreads = -1)
In [2]:
# Import wine quality data from a local CSV file
wine = h2o.import_file("winequality-white.csv")
wine.head(5)
Out[2]:
In [3]:
# Define features (or predictors)
features = list(wine.columns) # we want to use all the information
features.remove('quality') # we need to exclude the target 'quality' (otherwise there is nothing to predict)
features
Out[3]:
In [4]:
# Split the H2O data frame into training/test sets
# so we can evaluate out-of-bag performance
wine_split = wine.split_frame(ratios = [0.8], seed = 1234)
wine_train = wine_split[0] # using 80% for training
wine_test = wine_split[1] # using the rest 20% for out-of-bag evaluation
In [5]:
wine_train.shape
Out[5]:
In [6]:
wine_test.shape
Out[6]:
In [7]:
# define the criteria for random grid search
search_criteria = {'strategy': "RandomDiscrete",
'max_models': 9,
'seed': 1234}
In [8]:
# define the range of hyper-parameters for GBM grid search
# 27 combinations in total
hyper_params = {'sample_rate': [0.7, 0.8, 0.9],
'col_sample_rate': [0.7, 0.8, 0.9],
'max_depth': [3, 5, 7]}
In [9]:
# Set up GBM grid search
# Add a seed for reproducibility
gbm_rand_grid = H2OGridSearch(
H2OGradientBoostingEstimator(
model_id = 'gbm_rand_grid',
seed = 1234,
ntrees = 10000,
nfolds = 5,
fold_assignment = "Modulo", # needed for stacked ensembles
keep_cross_validation_predictions = True, # needed for stacked ensembles
stopping_metric = 'mse',
stopping_rounds = 15,
score_tree_interval = 1),
search_criteria = search_criteria,
hyper_params = hyper_params)
In [10]:
# Use .train() to start the grid search
gbm_rand_grid.train(x = features,
y = 'quality',
training_frame = wine_train)
In [11]:
# Sort and show the grid search results
gbm_rand_grid_sorted = gbm_rand_grid.get_grid(sort_by='mse', decreasing=False)
print(gbm_rand_grid_sorted)
In [12]:
# Extract the best model from random grid search
best_gbm_model_id = gbm_rand_grid_sorted.model_ids[0]
best_gbm_from_rand_grid = h2o.get_model(best_gbm_model_id)
best_gbm_from_rand_grid.summary()
Out[12]:
In [13]:
# define the range of hyper-parameters for DRF grid search
# 27 combinations in total
hyper_params = {'sample_rate': [0.5, 0.6, 0.7],
'col_sample_rate_per_tree': [0.7, 0.8, 0.9],
'max_depth': [3, 5, 7]}
In [14]:
# Set up DRF grid search
# Add a seed for reproducibility
drf_rand_grid = H2OGridSearch(
H2ORandomForestEstimator(
model_id = 'drf_rand_grid',
seed = 1234,
ntrees = 200,
nfolds = 5,
fold_assignment = "Modulo", # needed for stacked ensembles
keep_cross_validation_predictions = True), # needed for stacked ensembles
search_criteria = search_criteria,
hyper_params = hyper_params)
In [15]:
# Use .train() to start the grid search
drf_rand_grid.train(x = features,
y = 'quality',
training_frame = wine_train)
In [16]:
# Sort and show the grid search results
drf_rand_grid_sorted = drf_rand_grid.get_grid(sort_by='mse', decreasing=False)
print(drf_rand_grid_sorted)
In [17]:
# Extract the best model from random grid search
best_drf_model_id = drf_rand_grid_sorted.model_ids[0]
best_drf_from_rand_grid = h2o.get_model(best_drf_model_id)
best_drf_from_rand_grid.summary()
Out[17]:
In [18]:
# define the range of hyper-parameters for DNN grid search
# 81 combinations in total
hyper_params = {'activation': ['tanh', 'rectifier', 'maxout'],
'hidden': [[50], [50,50], [50,50,50]],
'l1': [0, 1e-3, 1e-5],
'l2': [0, 1e-3, 1e-5]}
In [19]:
# Set up DNN grid search
# Add a seed for reproducibility
dnn_rand_grid = H2OGridSearch(
H2ODeepLearningEstimator(
model_id = 'dnn_rand_grid',
seed = 1234,
epochs = 20,
nfolds = 5,
fold_assignment = "Modulo", # needed for stacked ensembles
keep_cross_validation_predictions = True), # needed for stacked ensembles
search_criteria = search_criteria,
hyper_params = hyper_params)
In [20]:
# Use .train() to start the grid search
dnn_rand_grid.train(x = features,
y = 'quality',
training_frame = wine_train)
In [21]:
# Sort and show the grid search results
dnn_rand_grid_sorted = dnn_rand_grid.get_grid(sort_by='mse', decreasing=False)
print(dnn_rand_grid_sorted)
In [22]:
# Extract the best model from random grid search
best_dnn_model_id = dnn_rand_grid_sorted.model_ids[0]
best_dnn_from_rand_grid = h2o.get_model(best_dnn_model_id)
best_dnn_from_rand_grid.summary()
Out[22]:
In [23]:
# Define a list of models to be stacked
# i.e. best model from each grid
all_ids = [best_gbm_model_id, best_drf_model_id, best_dnn_model_id]
In [24]:
# Set up Stacked Ensemble
ensemble = H2OStackedEnsembleEstimator(model_id = "my_ensemble",
base_models = all_ids)
In [25]:
# use .train to start model stacking
# GLM as the default metalearner
ensemble.train(x = features,
y = 'quality',
training_frame = wine_train)
In [26]:
print('Best GBM model from Grid (MSE) : ', best_gbm_from_rand_grid.model_performance(wine_test).mse())
print('Best DRF model from Grid (MSE) : ', best_drf_from_rand_grid.model_performance(wine_test).mse())
print('Best DNN model from Grid (MSE) : ', best_dnn_from_rand_grid.model_performance(wine_test).mse())
print('Stacked Ensembles (MSE) : ', ensemble.model_performance(wine_test).mse())