Create a multi-layer perceptron neural network model to predict on a labeled dataset of your choosing
Compare this model to either a boosted tree or a random forest model and describe the relative tradeoffs between complexity and accuracy. Be sure to vary the hyperparameters of your MLP!
In [1]:
%matplotlib inline
import numpy as np
import pandas as pd
import scipy
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split,cross_val_score, KFold, cross_val_predict
from sklearn.decomposition import PCA as sklearn_pca
from sklearn.decomposition import PCA
from sklearn.utils import resample
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn import preprocessing, decomposition
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_selection import RFE
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.neural_network import MLPClassifier
In [2]:
# Read and import data
data = pd.read_csv('breastcancerdata.csv')
data.head()
Out[2]:
In [3]:
#Check the columns in the raw data
data.columns
Out[3]:
In [4]:
#Check the kind of variables in the raw data
data.info()
In [5]:
#deleting the "id" column
data.drop("id",axis=1,inplace=True)
#deleting the "Unnamed: 32" column
data.drop("Unnamed: 32",axis=1,inplace=True)
In [6]:
#Check variables type after deleting the ones we are not using
data.info()
In [7]:
#counting the diagnosis variable
data.diagnosis.value_counts()
Out[7]:
In [8]:
#Transform classifying variable into numeric variable [0,1] and add a column
data.loc[data['diagnosis'] == 'M', 'Diagclass'] = 1
data.loc[data['diagnosis'] == 'B', 'Diagclass'] = 0
#Check dataset
data.head()
Out[8]:
In [9]:
#Dataset with new classifying variable "Diagclass" and withouth the diagnosis column
data.drop("diagnosis",axis=1,inplace=True)
In [10]:
#counting the diagnosis variable
data.Diagclass.value_counts()
Out[10]:
In [11]:
#UPsample the minority class
# Separate majority and minority classes
Diagclass_majority = data[data.Diagclass==0]
Diagclass_minority = data[data.Diagclass==1]
# Downsample Diaclass majority
Diagclass_majority_downsampled = resample(Diagclass_majority, replace=False, n_samples=212, random_state=123)
# Combine majority class with downsampled najority class
data1 = pd.concat([Diagclass_majority_downsampled, Diagclass_minority])
# Display new class counts
data1.Diagclass.value_counts()
Out[11]:
In [12]:
#Define predictors and predicted variables
X = data1.drop('Diagclass', axis = 1)
Y = data1['Diagclass']
In [13]:
#Preprocess and scale data
names = X.columns
X1 = pd.DataFrame(preprocessing.scale(X), columns = names)
X1.head(2)
Out[13]:
PCA Analysis
In [14]:
# Build up the correlation mtrix
Z = X1
correlation_matrix = Z.corr()
#Eigenvectores & Eigenvalues
eig_vals, eig_vecs = np.linalg.eig(correlation_matrix)
sklearn_pca = PCA(n_components=len(Z.columns))
Y_sklearn = sklearn_pca.fit_transform(correlation_matrix)
#From the Scree plot.
plt.plot(eig_vals)
plt.show()
print(
'The percentage of total variance in the dataset explained by each',
'component from Sklearn PCA.\n',
sklearn_pca.explained_variance_ratio_
)
In [15]:
#PCA features
# Create a scaler object
sc = StandardScaler()
# Fit the scaler to the features and transform
X_std = sc.fit_transform(X1)
# Create a PCA object from Scree plot the number of components is 3
pca = decomposition.PCA(n_components=3)
# Fit the PCA and transform the data
X_std_pca = pca.fit_transform(X_std)
# View the new feature data's shape
X_std_pca.shape
# Create a new dataframe with the new features
XPCA = pd.DataFrame(X_std_pca)
XPCA.head()
Out[15]:
In [16]:
#Calculate Feature Importance using Random Forest
rf = RandomForestClassifier()
rf.fit(X1, Y)
#Define feature importance
feature_importance = rf.feature_importances_
# Make importances relative to max importance.
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.figure(figsize=(7, 30))
plt.subplot(1, 1, 1)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, X1.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Diagclass')
plt.show()
In [17]:
#Feature Selection. Scores for the most relevant features (should we start with the one that has more explanatory power)
# feature extraction
test = SelectKBest()
fit = test.fit(X1, Y)
#Identify features with highest score from a predictive perspective (for all programs)
names2 = X1.columns
Bestfeatures = pd.DataFrame(fit.scores_, index = names2)
Bestfeatures.columns = ['Best Features']
Bestfeatures.sort_values(by=['Best Features'], ascending=False)
Out[17]:
In [18]:
# create the RFE model and select features
#From PCA analyis the number of components is 3
nfeatures = 3
lr = LogisticRegression()
rfe = RFE(lr,nfeatures)
fit = rfe.fit(X1,Y)
# summarize the selection of the features
result_RFE = pd.DataFrame(list(zip(X1.head(0), rfe.ranking_, rfe.support_)),columns=['Features','Ranking','Support'] )
result_RFE.sort_values('Ranking')
Out[18]:
Feature Selection
In [19]:
#View all the predictors to make the feature selection
X1.columns
Out[19]:
In [20]:
#Feature Selection using Random Forest
X3 = X1[['perimeter_worst', 'area_worst', 'concave points_mean', 'concavity_mean','radius_worst','perimeter_mean',
'concavity_worst', 'compactness_mean','concave points_worst','compactness_worst']]
#Feature Selection using RFE & PCA
X2 = X1[['radius_worst','concave points_worst','perimeter_worst']]
In [21]:
#Split the data into training and testing datasets. Split: 70/30; train/test
X_train, X_test, y_train, y_test = train_test_split(X2,Y, test_size=0.3, random_state=123)
#Initiating the cross validation generator, N splits = 5
kf = KFold(5)
Random Forest
In [32]:
# Initialize the model
rf = RandomForestClassifier(n_jobs = -1)
#Create range of values to fit parameters
k1 = [20,100,300]
parameters = {'n_estimators':k1}
#Fit parameters
rf1 = GridSearchCV(rf, param_grid=parameters, cv=kf)
#Fit the tunned model
rf1.fit(X_train, y_train)
#The best hyper parameters set
print("Best Hyper Parameters:", rf1.best_params_)
In [33]:
#Fit in test dataset
rf1.fit(X_test, y_test)
#Predict on test dataset
predtestrf_y = rf1.predict(X_test)
In [34]:
#Test Scores
target_names = ['0', '1']
print(classification_report(y_test, predtestrf_y, target_names=target_names))
cnf = confusion_matrix(y_test, predtestrf_y)
print(cnf)
table_test = pd.crosstab(y_test, predtestrf_y, margins=True)
test_tI_errors = table_test.loc[0.0,1.0]/table_test.loc['All','All']
test_tII_errors = table_test.loc[1.0,0.0]/table_test.loc['All','All']
accrf1 = cross_val_score(rf1,X_test,y_test,cv=kf).mean()
accrf1pca = cross_val_score(rf1,XPCA,Y,cv=kf).mean()
print((
'Random Forest accuracy:{}\n'
'Random Forest accuracy PCA:{}\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}'
).format(accrf1,accrf1pca,test_tI_errors, test_tII_errors))
Gradient Boosting
In [41]:
# Train model
GBC = GradientBoostingClassifier()
k1 = ['deviance','exponential']
k2 = np.arange(100)+1
k5 = ['friedman_mse','mse','mae']
parameters = {'loss': k1,
'n_estimators': k2,
'criterion': k5}
#Fit parameters
GBC1 = GridSearchCV(GBC, param_grid=parameters, cv=kf)
#Fit the tunned model
GBC1.fit(X_train, y_train)
#The best hyper parameters set
print("Best Hyper Parameters:", GBC1.best_params_)
In [42]:
#Fit on the test set
GBC1.fit(X_test, y_test)
# Predict on test set
predtestgb_y = GBC1.predict(X_test)
In [43]:
#Test Scores
target_names = ['0', '1']
print(classification_report(y_test, predtestgb_y, target_names=target_names))
cnf = confusion_matrix(y_test, predtestgb_y)
print(cnf)
table_test = pd.crosstab(y_test, predtestgb_y, margins=True)
test_tI_errors = table_test.loc[0.0,1.0]/table_test.loc['All','All']
test_tII_errors = table_test.loc[1.0,0.0]/table_test.loc['All','All']
accGBC1 = cross_val_score(GBC1,X_test,y_test,cv=kf).mean()
accGBC1pca = cross_val_score(GBC1,XPCA,Y,cv=kf).mean()
print((
'Gradient Boosting accuracy:{}\n'
'Gradient Boosting accuracy PCA:{}\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}'
).format(accGBC1,accGBC1pca,test_tI_errors, test_tII_errors))
Neural Network
In [23]:
# Initialize and fit the model.
mlp = MLPClassifier(max_iter=1000, tol=0.01)
#Tune hyperparameters
#Create range of values to fit parameters
hidden_layer_sizes= [(50,),(100,),(1000,),(1000,10),(100,20)]
alpha = 10.0 ** -np.arange(1, 3)
activation = ['identity', 'logistic', 'tanh', 'relu']
parameters = {'hidden_layer_sizes': hidden_layer_sizes,
'alpha': alpha,
'activation': activation}
#Fit parameters using gridsearch
mlp_tuned = GridSearchCV(mlp, param_grid=parameters, cv=5)
#Fit the tunned model
mlp_tuned.fit(X_train, y_train)
Out[23]:
In [24]:
#Fit on the test set
mlp_tuned.fit(X_test, y_test)
# Predict on test set
predtest_y = mlp_tuned.predict(X_test)
In [25]:
#Test Scores
target_names = ['0', '1']
print(classification_report(y_test, predtest_y, target_names=target_names))
cnf = confusion_matrix(y_test, predtest_y)
print(cnf)
table_test = pd.crosstab(y_test, predtest_y, margins=True)
test_tI_errors = table_test.loc[0.0,1.0]/table_test.loc['All','All']
test_tII_errors = table_test.loc[1.0,0.0]/table_test.loc['All','All']
accmlp = cross_val_score(mlp_tuned,X_test,y_test,cv=kf).mean()
accmlppca = cross_val_score(mlp_tuned,XPCA,Y,cv=kf).mean()
print((
'MLP accuracy:{}\n'
'MLP accuracy PCA:{}\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}'
).format(accmlp,accmlppca,test_tI_errors, test_tII_errors))
In [27]:
#Print the best parameters
print(mlp_tuned.best_params_)
Conclusion
A multi-layer perceptron neural network model has been set up to predict on the labeled Breast Cancer diagnosis dataset. In this case, hyperparameters regarding the activation, alpha and the hidden layer sizes have been tuned achieving an accuracy of 94%. The model is a multilayered MLP (size 2 with 100, 20 neurons respectively has been set up with an alpha parameter of 0.1 that uses identity as an activartion function.
Compared to the accuracy that has been achieved with Random Forest (93%) and Gradient Bossting (89%), the complexity of the model is much higher in the case of the MLP. Additionally, the hyperparameter tuning is much more restricted than in the case of the Random Forest and Gradient Boosting due to computational restrictions.