In [1]:
# Load neccessary libraries changed pandas import for convinience
import pandas as pd
import seaborn as sns
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
#from sklearn import cross_validation
# from sklearn import cross_validation
# !! Divergence to tutorial as the module cross_validation is deprecated
# Use of train_test_split instead
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
In [2]:
# Load dataset
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = pd.read_csv(url, names=names)
In [3]:
print(dataset.shape)
In [3]:
print(dataset.head(20))
In [4]:
# box and whisker plot for each attribute
dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
plt.show()
In [5]:
dataset.hist()
plt.show()
In [6]:
scatter_matrix(dataset)
plt.show()
In [18]:
# Split-out validation dataset
array = dataset.values
X = array[:,0:4]
Y = array[:,-1]
validation_size = 0.20
seed = 7
# Usage of new sklearn method train_test_split instead of cross validation
X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=validation_size, random_state=seed)
In [19]:
# Test options and evaluation metric
seed = 7
scoring = 'accuracy'
In [31]:
# Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
#Evaluate each model in turn
results = []
names = []
for name, model in models:
model.fit(X_train, Y_train)
cv_results = cross_val_score(model, X_train, Y_train, cv=10)
#cv_results = model.score(X_validation, Y_validation, cv=10)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
In [33]:
# Compare Algorithms
fig = plt.figure()
fig.suptitle('Algorithm Comparsion')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
In [34]:
knn = KNeighborsClassifier()
knn.fit(X_train, Y_train)
predictions = knn.predict(X_validation)
In [63]:
print("Accuracy: %.2f" % accuracy_score(Y_validation, predictions))
indexes = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']
#accuracy_score(Y_validation,predictions)[0:,:]
df = pd.DataFrame(
data = confusion_matrix(Y_validation, predictions),
index = indexes,
columns = indexes
)
print(df)
print(classification_report(Y_validation, predictions))
In [ ]: