In [1]:
import datasets.cifar10 as cifar10
import numpy as np
mnist = np.loadtxt('datasets/mnist/train.csv', skiprows=1, delimiter=',')
trainX = mnist[:32000, 1:]
trainY = mnist[:32000, 0]
testX = mnist[32000:, 1:]
testY = mnist[32000:, 0]
oneHotTrainY = np.zeros(shape=(len(np.unique(trainY)), trainX.shape[0]))
for i in range(0, trainX.shape[0]):
oneHotTrainY[trainY[i], i] = 1
mean, std = np.mean(trainX), np.std(trainX)
trainX = (trainX - mean)/std
testX = (testX - mean)/std
oneHotTestY = np.zeros(shape=(len(np.unique(trainY)), testX.shape[0]))
for i in range(0, testX.shape[0]):
oneHotTestY[testY[i], i] = 1
In [5]:
import NeuralNetwork2Layer
nn = NeuralNetwork2Layer.NeuralNetworkClassifier(50, 10, epochs=700, learningRate=0.01, batchSize=256, l2=0.001)
nn.fit(trainX, oneHotTrainY, validationFunc = lambda model: np.sum(model.predict(testX) == testY)/len(testX))
In [ ]:
import matplotlib.pyplot as plt
%matplotlib inline
fig, ax = plt.subplots(nrows=2, ncols=2)
ax[0, 0].scatter(np.arange(0, len(nn.batchLosses)), nn.batchLosses)
plt.xlabel('Batch Epoch')
plt.ylabel('Batch Loss')
ax[0, 1].scatter(np.arange(0, len(nn.losses)), nn.losses)
plt.xlabel('Epoch')
plt.ylabel('Loss')
ax[1, 0].scatter(np.arange(0, len(nn.trainAccuracies)), nn.trainAccuracies, color='g')
ax[1, 0].axhline(1)
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
fig.set_size_inches((20, 15))