In [0]:
import matplotlib.pyplot as plt
# plt.xkcd()
# plt.style.use('ggplot')
%matplotlib inline
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (20, 8)
In [2]:
!curl -O https://raw.githubusercontent.com/DJCordhose/deep-learning-crash-course-notebooks/master/data/insurance-customers-1500.csv
In [3]:
!head insurance-customers-1500.csv
In [0]:
import pandas as pd
df = pd.read_csv('./insurance-customers-1500.csv', sep=';')
In [5]:
df.head()
Out[5]:
In [6]:
df.describe()
Out[6]:
In [8]:
import seaborn as sns
sample_df = df.sample(n=250, random_state=42)
sns.pairplot(sample_df, hue="group", kind="scatter", diag_kind="kde",
vars=['speed', 'age', 'miles'],
palette={0: '#AA4444', 1: '#000060', 2: '#EEEE44'});
In [0]:
# we deliberately decide this is going to be our label, it is often called lower case y
y = df['group']
In [10]:
y.head()
Out[10]:
In [0]:
# since 'group' is now the label we want to predict, we need to remove it from the training data
df.drop('group', axis='columns', inplace=True)
In [12]:
df.head()
Out[12]:
In [0]:
# input data often is named upper case X, the upper case indicates, that each row is a vector
X = df.to_numpy()
In [0]:
import numpy as np
# ignore this, it is just technical code to plot decision boundaries
# Adapted from:
# http://scikit-learn.org/stable/auto_examples/neighbors/plot_classification.html
# http://jponttuset.cat/xkcd-deep-learning/
from matplotlib.colors import ListedColormap
from matplotlib.ticker import FuncFormatter
# https://matplotlib.org/users/colormaps.html
# https://matplotlib.org/gallery/images_contours_and_fields/contourf_demo.html#sphx-glr-gallery-images-contours-and-fields-contourf-demo-py
cmap = ListedColormap(['#FF0000', '#0000FF', '#FFFF00'])
font_size=25
title_font_size=40
def meshGrid(x_data, y_data):
h = 1 # step size in the mesh
x_min, x_max = 10, 100
y_min, y_max = 80, 170
# x_min, x_max = x_data.min() - 1, x_data.max() + 1
# y_min, y_max = y_data.min() - 1, y_data.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return (xx,yy)
def plotPrediction(clf, x_data, y_data, x_label, y_label, ground_truth, title="",
size=(15, 8), n_samples=300, proba=False, prediction=True,
legend=False # colab has old version of matplotlib, does not support
):
xx,yy = meshGrid(x_data, y_data)
fig, ax = plt.subplots(figsize=size)
if clf:
Z = clf.predict_proba(np.c_[yy.ravel(), xx.ravel()])
if proba:
probaZ = Z.max(axis=1)
probaZ = probaZ.reshape(xx.shape)
ax.contourf(xx, yy, probaZ, cmap=plt.cm.binary, alpha=.4)
if prediction:
Z = Z.argmax(axis=1)
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cmap, alpha=.3)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
samples = pd.DataFrame(np.array([x_data, y_data, ground_truth]).T).sample(n_samples, random_state=42)
classes = samples[2]
scatter = ax.scatter(samples[0], samples[1], c=classes, cmap=cmap, s=100, marker='o', edgecolors='k')
ax.set_xlabel(x_label, fontsize=font_size)
ax.set_ylabel(y_label, fontsize=font_size)
ax.set_title(title, fontsize=title_font_size)
# https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/scatter_with_legend.html
formatter = FuncFormatter(lambda s,t: "Many Accidents" if s == 0 else "No Accidents" if s == 1 else "Few Accidents")
if legend:
class_legend = ax.legend(*scatter.legend_elements(alpha=0.6, prop='colors', fmt=formatter), title="Classes")
ax.add_artist(class_legend)
return ax, scatter
In [15]:
plotPrediction(None, X[:, 1], X[:, 0],
'Age', 'Max Speed', y,
title="All Data");
In [16]:
# just two dimensions
X[:, :2]
Out[16]:
In [17]:
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier()
%time clf.fit(X[:, :2], y)
Out[17]:
In [18]:
input = [[100.0, 48.0]]
clf.predict(input)
Out[18]:
In [19]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
X_train.shape, y_train.shape, X_test.shape, y_test.shape
Out[19]:
In [0]:
assert X_train.shape == (len(X) * (1 - 0.2), 3)
assert y_train.shape == (len(X) * (1 - 0.2),)
assert X_test.shape == (len(X) * 0.2, 3)
assert y_test.shape == (len(X) * 0.2, )
In [0]:
X_train_2_dim = X_train[:, :2]
X_test_2_dim = X_test[:, :2]
In [22]:
plotPrediction(None, X_train_2_dim[:, 1], X_train_2_dim[:, 0],
'Age', 'Max Speed', y_train, title="Train Data");
In [23]:
plotPrediction(None, X_test_2_dim[:, 1], X_test_2_dim[:, 0],
'Age', 'Max Speed', y_test, title="Test Data");
In [24]:
clf = DecisionTreeClassifier()
%time clf.fit(X_train_2_dim, y_train)
Out[24]:
https://machinelearningmastery.com/classification-and-regression-trees-for-machine-learning/
In [0]:
# we perform at most 18 splits of our data until we make a decision where the data point belongs
clf.tree_.max_depth
Out[0]:
In [0]:
plotPrediction(clf, X_train_2_dim[:, 1], X_train_2_dim[:, 0],
'Age', 'Max Speed', y_train,
title="Train Data, Decision Tree");
In [0]:
clf.score(X_train_2_dim, y_train)
Out[0]:
In [0]:
plotPrediction(clf, X_test_2_dim[:, 1], X_test_2_dim[:, 0],
'Age', 'Max Speed', y_test,
title="Test Data, Decision Tree");
In [0]:
clf.score(X_test_2_dim, y_test)
Out[0]:
In [0]:
clf = DecisionTreeClassifier(max_depth=10)
%time clf.fit(X_train_2_dim, y_train)
Out[0]:
In [0]:
clf.tree_.max_depth
Out[0]:
In [0]:
plotPrediction(clf, X_train_2_dim[:, 1], X_train_2_dim[:, 0],
'Age', 'Max Speed', y_train,
title="Train Data, Regularized Decision Tree");
In [0]:
clf.score(X_train_2_dim, y_train)
Out[0]:
In [0]:
plotPrediction(clf, X_test_2_dim[:, 1], X_test_2_dim[:, 0],
'Age', 'Max Speed', y_test,
title="Test Data, Regularized Decision Tree");
In [0]:
clf.score(X_test_2_dim, y_test)
Out[0]: