In [1]:
from partSix import *
In [2]:
NN = Neural_Network()
In [3]:
# X = (hours sleeping, hours studying), y = score on test
X = np.array(([3,5], [5,1], [10,2], [6,1.5]), dtype=float)
y = np.array(([75], [82], [93], [70]), dtype=float)
In [4]:
#Plot projections of our new data:
fig = figure(0, (8,3))
subplot(1,2,1)
scatter(X[:,0], y)
grid(1)
xlabel('Hours Sleeping')
ylabel('Test Score')
subplot(1,2,2)
scatter(X[:,1], y)
grid(1)
xlabel('Hours Studying')
ylabel('Test Score')
Out[4]:
In [5]:
#Normalize
X = X/np.amax(X, axis = 0)
y = y/100 #Max test score is 100
In [6]:
#Train network with new data:
T = trainer(NN)
T.train(X,y)
In [7]:
#Plot cost during training:
plot(T.J)
grid(1)
xlabel('Iterations')
ylabel('Cost')
Out[7]:
In [8]:
#Test network for various combinations of sleep/study:
hoursSleep = linspace(0, 10, 100)
hoursStudy = linspace(0, 5, 100)
#Normalize data (same way training data was normalized)
hoursSleepNorm = hoursSleep/10.
hoursStudyNorm = hoursStudy/5.
#Create 2-d version of input for plotting
a, b = meshgrid(hoursSleepNorm, hoursStudyNorm)
#Join into a single input matrix:
allInputs = np.zeros((a.size, 2))
allInputs[:, 0] = a.ravel()
allInputs[:, 1] = b.ravel()
In [9]:
allOutputs = NN.forward(allInputs)
In [10]:
#Contour Plot:
yy = np.dot(hoursStudy.reshape(100,1), np.ones((1,100)))
xx = np.dot(hoursSleep.reshape(100,1), np.ones((1,100))).T
CS = contour(xx,yy,100*allOutputs.reshape(100, 100))
clabel(CS, inline=1, fontsize=10)
xlabel('Hours Sleep')
ylabel('Hours Study')
Out[10]:
In [13]:
#Training Data:
trainX = np.array(([3,5], [5,1], [10,2], [6,1.5]), dtype=float)
trainY = np.array(([75], [82], [93], [70]), dtype=float)
#Testing Data:
testX = np.array(([4,5.5], [4.5, 1], [9, 2.5], [6,2]), dtype=float)
testY = np.array(([70], [89], [85], [75]), dtype=float)
#NormalizeL
trainX = trainX/np.amax(trainX, axis=0)
trainY = trainY/100.
testX = testX/np.amax(trainX, axis=0)
testY = testY/100.
In [17]:
#Modify trainer class to check testing error during training:
class trainer(object):
def __init__(self, N):
#Make Local reference to network:
self.N = N
def callbackF(self, params):
self.N.setParams(params)
self.J.append(self.N.costFunction(self.X, self.y))
self.testJ.append(self.N.costFunction(self.testX, self.testY))
def costFunctionWrapper(self, params, X, y):
self.N.setParams(params)
cost = self.N.costFunction(X, y)
grad = self.N.computeGradients(X,y)
return cost, grad
def train(self, trainX, trainY, testX, testY):
#Make an internal variable for the callback function:
self.X = trainX
self.y = trainY
self.testX = testX
self.testY = testY
#Make empty list to store costs:
self.J = []
self.testJ = []
params0 = self.N.getParams()
options = {'maxiter': 200, 'disp' : True}
_res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='BFGS', \
args=(trainX, trainY), options=options, callback=self.callbackF)
self.N.setParams(_res.x)
self.optimizationResults = _res
In [18]:
#Train network with new data:
NN = Neural_Network()
In [19]:
T = trainer(NN)
T.train(trainX, trainY, testX, testY)
In [22]:
#Plot costs during training:
plot(T.J)
plot(T.testJ)
grid(1)
xlabel('Iterations')
ylabel('Cost')
Out[22]:
In [23]:
#Regularization Parameter:
Lambda = 0.0001
In [ ]:
#Need to make changes to costFunction and costFunctionPrime
def costFunction(self, X, y):
#Compute cost for given X,y, use weights already stored in class.
self.yHat = self.forward(X)
J = 0.5*sum((y-self.yHat)**2)/X.shape[0] + (self.Lambda/2)*(sum(self.W1**2)+sum(self.W2**2))
return J
def costFunctionPrime(self, X, y):
#Compute derivative with respect to W and W2 for a given X and y:
self.yHat = self.forward(X)
delta3 = np.multiply(-(y-self.yHat), self.sigmoidPrime(self.z3))
#Add gradient of regularization term:
dJdW2 = np.dot(self.a2.T, delta3) + self.Lambda*self.W2
delta2 = np.dot(delta3, self.W2.T)*self.sigmoidPrime(self.z2)
dJdW1 = np.dot(X.T, delta2)
return dJdW1, dJdW2