ML using Decision Trees


In [1]:
import warnings
warnings.filterwarnings('ignore')

In [2]:
%matplotlib inline
%pylab inline


Populating the interactive namespace from numpy and matplotlib

In [3]:
import pandas as pd
print(pd.__version__)


0.22.0

First Step: Load Data and disassemble for our purposes


In [4]:
df = pd.read_csv('./insurance-customers-300.csv', sep=';')

In [5]:
y=df['group']

In [6]:
df.drop('group', axis='columns', inplace=True)

In [7]:
X = df.as_matrix()

In [8]:
df.describe()


Out[8]:
max speed age thousand km per year
count 300.000000 300.000000 300.000000
mean 171.863333 44.006667 31.220000
std 18.807545 16.191784 15.411792
min 132.000000 18.000000 5.000000
25% 159.000000 33.000000 18.000000
50% 171.000000 42.000000 30.000000
75% 187.000000 52.000000 43.000000
max 211.000000 90.000000 99.000000

Second Step: Decision Trees


In [9]:
# ignore this, it is just technical code
# should come from a lib, consider it to appear magically 
# http://scikit-learn.org/stable/auto_examples/neighbors/plot_classification.html

import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap

cmap_print = ListedColormap(['#AA8888', '#004000', '#FFFFDD'])
cmap_bold = ListedColormap(['#AA4444', '#006000', '#AAAA00'])
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#FFFFDD'])
font_size=25

def meshGrid(x_data, y_data):
    h = 1  # step size in the mesh
    x_min, x_max = x_data.min() - 1, x_data.max() + 1
    y_min, y_max = y_data.min() - 1, y_data.max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))
    return (xx,yy)
    
def plotPrediction(clf, x_data, y_data, x_label, y_label, colors, title="", mesh=True, fname=None, print=False):
    xx,yy = meshGrid(x_data, y_data)
    plt.figure(figsize=(20,10))

    if clf and mesh:
        Z = clf.predict(np.c_[yy.ravel(), xx.ravel()])
        # Put the result into a color plot
        Z = Z.reshape(xx.shape)
        plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
    
    plt.xlim(xx.min(), xx.max())
    plt.ylim(yy.min(), yy.max())
    if print:
        plt.scatter(x_data, y_data, c=colors, cmap=cmap_print, s=200, marker='o', edgecolors='k')
    else:
        plt.scatter(x_data, y_data, c=colors, cmap=cmap_bold, s=80, marker='o', edgecolors='k')
    plt.xlabel(x_label, fontsize=font_size)
    plt.ylabel(y_label, fontsize=font_size)
    plt.title(title, fontsize=font_size)
    if fname:
        plt.savefig(fname)

In [10]:
from sklearn.model_selection import train_test_split

In [11]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42, stratify=y)

In [12]:
X_train.shape, y_train.shape, X_test.shape, y_test.shape


Out[12]:
((180, 3), (180,), (120, 3), (120,))

In [13]:
X_train_kmh_age = X_train[:, :2]
X_test_kmh_age = X_test[:, :2]
X_train_2_dim = X_train_kmh_age
X_test_2_dim = X_test_kmh_age

In [14]:
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier()
%time clf.fit(X_train_2_dim, y_train)


Wall time: 2.01 ms
Out[14]:
DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None,
            max_features=None, max_leaf_nodes=None,
            min_impurity_decrease=0.0, min_impurity_split=None,
            min_samples_leaf=1, min_samples_split=2,
            min_weight_fraction_leaf=0.0, presort=False, random_state=None,
            splitter='best')

In [15]:
clf.tree_.max_depth


Out[15]:
13

In [16]:
clf.predict(X_train_2_dim[0:10])


Out[16]:
array([2, 2, 2, 1, 2, 1, 2, 2, 0, 0], dtype=int64)

In [17]:
y_train[0:10]


Out[17]:
282    2
182    2
207    2
167    1
90     2
152    1
136    2
149    2
50     0
151    0
Name: group, dtype: int64

In [18]:
clf.predict_proba(X_train_2_dim[0:10])


Out[18]:
array([[0., 0., 1.],
       [0., 0., 1.],
       [0., 0., 1.],
       [0., 1., 0.],
       [0., 0., 1.],
       [0., 1., 0.],
       [0., 0., 1.],
       [0., 0., 1.],
       [1., 0., 0.],
       [1., 0., 0.]])

In [19]:
plotPrediction(clf, X_train_2_dim[:, 1], X_train_2_dim[:, 0], 
               'Age', 'Max Speed', y_train,
                title="Train Data Max Speed vs Age with Classification")


Look how great it is doing!


In [20]:
clf.score(X_train_2_dim, y_train)


Out[20]:
0.9777777777777777

But really?


In [21]:
plotPrediction(clf, X_test_2_dim[:, 1], X_test_2_dim[:, 0], 
               'Age', 'Max Speed', y_test,
                title="Test Data Max Speed vs Age with Prediction")



In [22]:
clf.score(X_test_2_dim, y_test)


Out[22]:
0.6

Probably still better than our manual result, but this is clearly overfitting

How does the decision tree look like?


In [23]:
# PDF
# Needs GraphViz installed (dot)
# http://scikit-learn.org/stable/modules/tree.html
# !conda install python-graphviz -y

In [24]:
import graphviz 
from sklearn.tree import export_graphviz

# http://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html
dot_data = export_graphviz(clf, out_file=None,
                feature_names=['Spped', 'Age'],
                class_names=['red', 'green', 'yellow'],
#                 filled=True, 
                rounded=True,
                special_characters=True)

# graph = graphviz.Source(dot_data) 
# graph.render("tree") 
graphviz.Source(dot_data)


Out[24]:
Tree 0 Spped ≤ 146.5 gini = 0.667 samples = 180 value = [60, 60, 60] class = red 1 gini = 0.0 samples = 20 value = [0, 0, 20] class = yellow 0->1 True 2 Spped ≤ 196.5 gini = 0.656 samples = 160 value = [60, 60, 40] class = red 0->2 False 3 Age ≤ 64.0 gini = 0.646 samples = 133 value = [37, 60, 36] class = green 2->3 120 Age ≤ 52.0 gini = 0.252 samples = 27 value = [23, 0, 4] class = red 2->120 4 Age ≤ 39.5 gini = 0.607 samples = 112 value = [22, 59, 31] class = green 3->4 101 Age ≤ 77.5 gini = 0.431 samples = 21 value = [15, 1, 5] class = red 3->101 5 Spped ≤ 175.0 gini = 0.642 samples = 39 value = [18, 11, 10] class = red 4->5 50 Age ≤ 49.5 gini = 0.482 samples = 73 value = [4, 48, 21] class = green 4->50 6 Age ≤ 26.5 gini = 0.563 samples = 25 value = [13, 10, 2] class = red 5->6 37 Age ≤ 28.5 gini = 0.541 samples = 14 value = [5, 1, 8] class = yellow 5->37 7 Spped ≤ 159.0 gini = 0.37 samples = 9 value = [7, 1, 1] class = red 6->7 14 Age ≤ 34.5 gini = 0.539 samples = 16 value = [6, 9, 1] class = green 6->14 8 Age ≤ 21.0 gini = 0.625 samples = 4 value = [2, 1, 1] class = red 7->8 13 gini = 0.0 samples = 5 value = [5, 0, 0] class = red 7->13 9 Spped ≤ 156.0 gini = 0.444 samples = 3 value = [2, 1, 0] class = red 8->9 12 gini = 0.0 samples = 1 value = [0, 0, 1] class = yellow 8->12 10 gini = 0.0 samples = 2 value = [2, 0, 0] class = red 9->10 11 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 9->11 15 Spped ≤ 165.5 gini = 0.449 samples = 7 value = [1, 5, 1] class = green 14->15 24 Spped ≤ 172.0 gini = 0.494 samples = 9 value = [5, 4, 0] class = red 14->24 16 Age ≤ 30.5 gini = 0.278 samples = 6 value = [1, 5, 0] class = green 15->16 23 gini = 0.0 samples = 1 value = [0, 0, 1] class = yellow 15->23 17 Age ≤ 29.0 gini = 0.444 samples = 3 value = [1, 2, 0] class = green 16->17 22 gini = 0.0 samples = 3 value = [0, 3, 0] class = green 16->22 18 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 17->18 19 Spped ≤ 154.5 gini = 0.5 samples = 2 value = [1, 1, 0] class = red 17->19 20 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 19->20 21 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 19->21 25 Age ≤ 37.0 gini = 0.469 samples = 8 value = [5, 3, 0] class = red 24->25 36 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 24->36 26 Spped ≤ 160.0 gini = 0.32 samples = 5 value = [4, 1, 0] class = red 25->26 31 Spped ≤ 165.5 gini = 0.444 samples = 3 value = [1, 2, 0] class = green 25->31 27 Spped ≤ 158.5 gini = 0.5 samples = 2 value = [1, 1, 0] class = red 26->27 30 gini = 0.0 samples = 3 value = [3, 0, 0] class = red 26->30 28 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 27->28 29 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 27->29 32 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 31->32 33 Age ≤ 38.5 gini = 0.5 samples = 2 value = [1, 1, 0] class = red 31->33 34 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 33->34 35 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 33->35 38 gini = 0.0 samples = 2 value = [2, 0, 0] class = red 37->38 39 Age ≤ 35.5 gini = 0.486 samples = 12 value = [3, 1, 8] class = yellow 37->39 40 Spped ≤ 191.5 gini = 0.219 samples = 8 value = [1, 0, 7] class = yellow 39->40 43 Age ≤ 38.5 gini = 0.625 samples = 4 value = [2, 1, 1] class = red 39->43 41 gini = 0.0 samples = 6 value = [0, 0, 6] class = yellow 40->41 42 gini = 0.5 samples = 2 value = [1, 0, 1] class = red 40->42 44 Age ≤ 37.0 gini = 0.444 samples = 3 value = [2, 1, 0] class = red 43->44 49 gini = 0.0 samples = 1 value = [0, 0, 1] class = yellow 43->49 45 Spped ≤ 182.0 gini = 0.5 samples = 2 value = [1, 1, 0] class = red 44->45 48 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 44->48 46 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 45->46 47 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 45->47 51 Spped ≤ 180.0 gini = 0.374 samples = 48 value = [3, 37, 8] class = green 50->51 78 Spped ≤ 170.0 gini = 0.534 samples = 25 value = [1, 11, 13] class = yellow 50->78 52 Spped ≤ 164.5 gini = 0.517 samples = 24 value = [2, 15, 7] class = green 51->52 69 Spped ≤ 190.0 gini = 0.156 samples = 24 value = [1, 22, 1] class = green 51->69 53 gini = 0.0 samples = 12 value = [0, 12, 0] class = green 52->53 54 Spped ≤ 174.0 gini = 0.569 samples = 12 value = [2, 3, 7] class = yellow 52->54 55 Age ≤ 46.5 gini = 0.656 samples = 8 value = [2, 3, 3] class = green 54->55 68 gini = 0.0 samples = 4 value = [0, 0, 4] class = yellow 54->68 56 Age ≤ 44.0 gini = 0.653 samples = 7 value = [2, 2, 3] class = yellow 55->56 67 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 55->67 57 Age ≤ 42.0 gini = 0.611 samples = 6 value = [1, 2, 3] class = yellow 56->57 66 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 56->66 58 Spped ≤ 168.5 gini = 0.5 samples = 2 value = [1, 1, 0] class = red 57->58 61 Spped ≤ 168.0 gini = 0.375 samples = 4 value = [0, 1, 3] class = yellow 57->61 59 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 58->59 60 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 58->60 62 gini = 0.0 samples = 1 value = [0, 0, 1] class = yellow 61->62 63 Spped ≤ 172.0 gini = 0.444 samples = 3 value = [0, 1, 2] class = yellow 61->63 64 gini = 0.5 samples = 2 value = [0, 1, 1] class = green 63->64 65 gini = 0.0 samples = 1 value = [0, 0, 1] class = yellow 63->65 70 Spped ≤ 187.5 gini = 0.292 samples = 12 value = [1, 10, 1] class = green 69->70 77 gini = 0.0 samples = 12 value = [0, 12, 0] class = green 69->77 71 gini = 0.0 samples = 7 value = [0, 7, 0] class = green 70->71 72 Age ≤ 46.5 gini = 0.56 samples = 5 value = [1, 3, 1] class = green 70->72 73 Age ≤ 44.5 gini = 0.667 samples = 3 value = [1, 1, 1] class = red 72->73 76 gini = 0.0 samples = 2 value = [0, 2, 0] class = green 72->76 74 gini = 0.5 samples = 2 value = [0, 1, 1] class = green 73->74 75 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 73->75 79 Age ≤ 57.5 gini = 0.403 samples = 12 value = [1, 9, 2] class = green 78->79 92 Spped ≤ 187.0 gini = 0.26 samples = 13 value = [0, 2, 11] class = yellow 78->92 80 Spped ≤ 161.0 gini = 0.32 samples = 10 value = [0, 8, 2] class = green 79->80 89 Spped ≤ 157.0 gini = 0.5 samples = 2 value = [1, 1, 0] class = red 79->89 81 Spped ≤ 153.0 gini = 0.48 samples = 5 value = [0, 3, 2] class = green 80->81 88 gini = 0.0 samples = 5 value = [0, 5, 0] class = green 80->88 82 gini = 0.0 samples = 2 value = [0, 2, 0] class = green 81->82 83 Spped ≤ 157.0 gini = 0.444 samples = 3 value = [0, 1, 2] class = yellow 81->83 84 gini = 0.0 samples = 1 value = [0, 0, 1] class = yellow 83->84 85 Age ≤ 54.0 gini = 0.5 samples = 2 value = [0, 1, 1] class = green 83->85 86 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 85->86 87 gini = 0.0 samples = 1 value = [0, 0, 1] class = yellow 85->87 90 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 89->90 91 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 89->91 93 gini = 0.0 samples = 8 value = [0, 0, 8] class = yellow 92->93 94 Spped ≤ 189.0 gini = 0.48 samples = 5 value = [0, 2, 3] class = yellow 92->94 95 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 94->95 96 Spped ≤ 191.5 gini = 0.375 samples = 4 value = [0, 1, 3] class = yellow 94->96 97 gini = 0.0 samples = 2 value = [0, 0, 2] class = yellow 96->97 98 Age ≤ 53.0 gini = 0.5 samples = 2 value = [0, 1, 1] class = green 96->98 99 gini = 0.0 samples = 1 value = [0, 0, 1] class = yellow 98->99 100 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 98->100 102 Spped ≤ 159.5 gini = 0.524 samples = 15 value = [9, 1, 5] class = red 101->102 119 gini = 0.0 samples = 6 value = [6, 0, 0] class = red 101->119 103 gini = 0.0 samples = 2 value = [0, 0, 2] class = yellow 102->103 104 Spped ≤ 171.0 gini = 0.462 samples = 13 value = [9, 1, 3] class = red 102->104 105 Age ≤ 67.5 gini = 0.219 samples = 8 value = [7, 1, 0] class = red 104->105 112 Age ≤ 71.0 gini = 0.48 samples = 5 value = [2, 0, 3] class = yellow 104->112 106 Spped ≤ 163.0 gini = 0.444 samples = 3 value = [2, 1, 0] class = red 105->106 111 gini = 0.0 samples = 5 value = [5, 0, 0] class = red 105->111 107 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 106->107 108 Spped ≤ 167.0 gini = 0.5 samples = 2 value = [1, 1, 0] class = red 106->108 109 gini = 0.0 samples = 1 value = [0, 1, 0] class = green 108->109 110 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 108->110 113 Age ≤ 69.0 gini = 0.444 samples = 3 value = [2, 0, 1] class = red 112->113 118 gini = 0.0 samples = 2 value = [0, 0, 2] class = yellow 112->118 114 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 113->114 115 Spped ≤ 178.0 gini = 0.5 samples = 2 value = [1, 0, 1] class = red 113->115 116 gini = 0.0 samples = 1 value = [0, 0, 1] class = yellow 115->116 117 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 115->117 121 Spped ≤ 199.5 gini = 0.08 samples = 24 value = [23, 0, 1] class = red 120->121 130 gini = 0.0 samples = 3 value = [0, 0, 3] class = yellow 120->130 122 Spped ≤ 198.5 gini = 0.165 samples = 11 value = [10, 0, 1] class = red 121->122 129 gini = 0.0 samples = 13 value = [13, 0, 0] class = red 121->129 123 gini = 0.0 samples = 6 value = [6, 0, 0] class = red 122->123 124 Age ≤ 34.0 gini = 0.32 samples = 5 value = [4, 0, 1] class = red 122->124 125 Age ≤ 31.0 gini = 0.444 samples = 3 value = [2, 0, 1] class = red 124->125 128 gini = 0.0 samples = 2 value = [2, 0, 0] class = red 124->128 126 gini = 0.0 samples = 1 value = [1, 0, 0] class = red 125->126 127 gini = 0.5 samples = 2 value = [1, 0, 1] class = red 125->127

In [25]:
# this gives us nice pngs
# https://medium.com/@rnbrown/creating-and-visualizing-decision-trees-with-python-f8e8fa394176
# https://graphviz.gitlab.io/download/
# !conda install -c conda-forge pydotplus -y

In [26]:
from sklearn.externals.six import StringIO
from IPython.display import Image  
from sklearn.tree import export_graphviz
import pydotplus

# http://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html
def plot_dt(clf):
    dot_data = StringIO()
    export_graphviz(clf, out_file=dot_data,
                    feature_names=['Spped', 'Age'],
                    class_names=['red', 'green', 'yellow'],
    #                 filled=True, 
                    rounded=True,
                    special_characters=True)
    graph = pydotplus.graph_from_dot_data(dot_data.getvalue())  
    return Image(graph.create_png())

plot_dt(clf)


Out[26]:

In [27]:
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data,
                feature_names=['Spped', 'Age'],
                class_names=['green', 'red', 'yellow'],
#                 filled=True, 
                max_depth=3,
                rounded=True,
                special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())  
Image(graph.create_png())


Out[27]:

Third Step: Less overfitting, paying with underfitting


In [28]:
# DecisionTreeClassifier?

In [29]:
clf = DecisionTreeClassifier(max_depth=3,
                              min_samples_leaf=10,
                              min_samples_split=20)
%time clf.fit(X_train_2_dim, y_train)


Wall time: 998 µs
Out[29]:
DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=3,
            max_features=None, max_leaf_nodes=None,
            min_impurity_decrease=0.0, min_impurity_split=None,
            min_samples_leaf=10, min_samples_split=20,
            min_weight_fraction_leaf=0.0, presort=False, random_state=None,
            splitter='best')

In [30]:
plot_dt(clf)


Out[30]:

In [31]:
plotPrediction(clf, X_train_2_dim[:, 1], X_train_2_dim[:, 0], 
               'Age', 'Max Speed', y_train,
                title="Train Data Max Speed vs Age with Classification")



In [32]:
clf.score(X_train_2_dim, y_train)


Out[32]:
0.65

In [33]:
plotPrediction(clf, X_test_2_dim[:, 1], X_test_2_dim[:, 0], 
               'Age', 'Max Speed', y_test,
                title="Test Data Max Speed vs Age with Prediction")



In [34]:
clf.score(X_test_2_dim, y_test)


Out[34]:
0.5916666666666667

Fourth Step: Ensemble Methods

  • Decision Trees have the tendency to overfit
  • Ensemble Methods train a number of Decision Trees and combine their output to reduce overfitting

Random Forest


In [35]:
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(n_estimators=100, max_depth=3, n_jobs=4)

In [36]:
%time rf_clf.fit(X_train_2_dim, y_train)


Wall time: 222 ms
Out[36]:
RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
            max_depth=3, max_features='auto', max_leaf_nodes=None,
            min_impurity_decrease=0.0, min_impurity_split=None,
            min_samples_leaf=1, min_samples_split=2,
            min_weight_fraction_leaf=0.0, n_estimators=100, n_jobs=4,
            oob_score=False, random_state=None, verbose=0,
            warm_start=False)

In [37]:
plotPrediction(rf_clf, X_train_2_dim[:, 1], X_train_2_dim[:, 0], 
               'Age', 'Max Speed', y_train,
                title="Train Data Max Speed vs Age with Classification")



In [38]:
rf_clf.score(X_train_2_dim, y_train)


Out[38]:
0.7111111111111111

In [39]:
plotPrediction(rf_clf, X_test_2_dim[:, 1], X_test_2_dim[:, 0], 
               'Age', 'Max Speed', y_test,
                title="Test Data Max Speed vs Age with Prediction")



In [40]:
rf_clf.score(X_test_2_dim, y_test)


Out[40]:
0.6666666666666666

Cross Validation is a way to make the score more telling

  • we run the training and scoring many times (10 in our case)
  • each time we use a different part of the data for validation
  • this way we have many runs that take out a random factor
  • additionally we use all data for training
  • only works when training time is reasonably short

In [47]:
# http://scikit-learn.org/stable/modules/cross_validation.html
from sklearn.model_selection import cross_val_score

In [54]:
scores = cross_val_score(rf_clf, X[:, :2], y, cv=10)

In [55]:
scores


Out[55]:
array([0.56666667, 0.66666667, 0.73333333, 0.66666667, 0.66666667,
       0.9       , 0.63333333, 0.63333333, 0.66666667, 0.63333333])

In [56]:
# https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule
print("Accuracy: %0.2f (+/- %0.2f for 95 percent of runs)" % (scores.mean(), scores.std() * 2))


Accuracy: 0.68 (+/- 0.17 for 95 percent of runs)

AdaBoost


In [41]:
from sklearn.ensemble import AdaBoostClassifier
boost_clf = AdaBoostClassifier(n_estimators=100, learning_rate=3, random_state=42)

In [42]:
%time boost_clf.fit(X_train_2_dim, y_train)


Wall time: 113 ms
Out[42]:
AdaBoostClassifier(algorithm='SAMME.R', base_estimator=None, learning_rate=3,
          n_estimators=100, random_state=42)

In [43]:
plotPrediction(boost_clf, X_train_2_dim[:, 1], X_train_2_dim[:, 0], 
               'Age', 'Max Speed', y_train,
                title="Train Data Max Speed vs Age with Classification")



In [44]:
boost_clf.score(X_train_2_dim, y_train)


Out[44]:
0.4722222222222222

In [45]:
plotPrediction(boost_clf, X_test_2_dim[:, 1], X_test_2_dim[:, 0], 
               'Age', 'Max Speed', y_test,
                title="Test Data Max Speed vs Age with Prediction")



In [46]:
boost_clf.score(X_test_2_dim, y_test)


Out[46]:
0.475

In [ ]: