In [1]:
import sys
print('Python: {}'.format(sys.version))
# scipy
import scipy
print('scipy: {}'.format(scipy.__version__))
# numpy
import numpy
print('numpy: {}'.format(numpy.__version__))
# matplotlib
import matplotlib
print('matplotlib: {}'.format(matplotlib.__version__))
# pandas
import pandas
print('pandas: {}'.format(pandas.__version__))
# scikit-learn
import sklearn
print('sklearn: {}'.format(sklearn.__version__))
In [2]:
import pandas
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import KFold
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# Following http://machinelearningmastery.com/machine-learning-in-python-step-by-step/
In [4]:
names = ['direction', 'pitch', 'roll', 'yaw']
dataset = pandas.read_csv("../rawsample-munged.csv", names=names, header=1)
In [5]:
# shape
print(dataset.shape)
In [6]:
# head
print(dataset.head(20))
In [7]:
# descriptions
print(dataset.describe())
In [8]:
# class distribution
print(dataset.groupby('direction').size())
In [9]:
# box and whisker plots
dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
plt.show()
In [10]:
# histograms
dataset.hist()
plt.show()
In [11]:
# scatter plot matrix
scatter_matrix(dataset)
plt.show()
In [13]:
array = dataset.values
X = array[:,1:]
Y = array[:,0]
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=validation_size, random_state=seed)
In [14]:
print array[:,1:] # Lables
print array[:,0]
In [15]:
# Split-out validation dataset
array = dataset.values
X = array[:,1:]
Y = array[:,0]
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=validation_size, random_state=seed)
In [16]:
# Test options and evaluation metric
seed = 7
scoring = 'accuracy'
In [18]:
# Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = KFold(n=10, random_state=seed)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
In [7]:
from sklearn.externals import joblib
clf = joblib.load('../filename.pkl')
In [17]:
target_names = [u'Top', u'Right', u'Bottom', u'Left']
In [24]:
target_names[clf.predict([-13,-10,3])]
Out[24]:
In [ ]: