In [3]:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
from mpl_toolkits.mplot3d import Axes3D
import IPython.html.widgets as widg
from IPython.display import clear_output
import sys
%matplotlib inline
In [4]:
class Network:
def __init__(self, shape):
"""The base network class. This defines a simple feed-forward network with appropriate weights and biases.
Arguments:
shape (list-like): This defines the # of layers and # of neurons per layer in your network.
Each element of the array or list adds a new layer with the number neurons specified by the element.
Variables:
self.shape: see shape.
self.weights: A list of numpy arrays containing the weights corresponding to each channel between neurons.
self.biases: A list of numpy arrays containing the biases corresponding to each neuron.
self.errors: A list of numpy arrays containing the error of each neurons in any iteration of the training process.
self.eta: A float representing the learning rate.
self.lam: A scale factor used in L2 regularization
"""
self.shape = np.array(shape) #shape is array-like, i.e. (2,3,4) is a 2 input, 3 hidden node, 4 output network
self.weights = [np.random.ranf((self.shape[i],self.shape[i-1]))*.1 for i in range(1,len(self.shape))]
self.biases = [np.random.ranf((self.shape[i],))*.1 for i in range(1,len(self.shape))]
self.errors = [np.random.ranf((self.shape[i],)) for i in range(1,len(self.shape))]
self.eta = .1
self.lam = .01
self.wrong = 0
self.total = 0
def sigmoid(self, inputs):
"""Computes the sigmoid function of some input.
Arguments:
inputs (float or numpy array): The input or inputs to be fed through the sigmoid function.
"""
return 1/(1+np.exp(-inputs))
def feedforward(self, inputs):
"""Feeds inputs through the network and returns the output.
Arguments:
inputs (numpy array): The inputs to the network, must be the same size as the first(input) layer.
Variables:
self.activation: A list of numpy arrays corresponding to the output of each neuron in your network.
"""
assert inputs.shape==self.shape[0] #inputs must feed directly into the first layer.
self.activation = [np.zeros((self.shape[i],)) for i in range(len(self.shape))]
self.activation[0] = inputs
for i in range(1,len(self.shape)):
self.activation[i]=self.sigmoid(np.dot(self.weights[i-1],self.activation[i-1])+self.biases[i-1])
return self.activation[-1]
def calc_learning_rate(self,grad):
if grad>.85:
self.eta=.1/grad**.1*1/(.25*(2*np.pi)**.5)*np.exp(-(grad)**2/(2*(.25)**2))
self.wrong+=1
else:
self.eta=.1/grad**.6*1/(.4*(2*np.pi)**.5)*np.exp(-(grad)**2/(2*(.4)**2))*(grad+.08)
self.total+=1
def comp_error(self, answer):
"""Computes the errors of each neuron.(Typically called Back Propagation)
Arguments:
answers (numpy array): The expected output from the network.
"""
# if (self.activation[-1]-answer).any>.15:
# self.eta = .005
# else:
# self.eta = .5
self.calc_learning_rate(np.amax(np.abs((self.activation[-1]-answer))))
#print(np.amax(np.abs((self.activation[-1]-answer))))
assert answer.shape==self.activation[-1].shape
self.errors[-1] = np.pi*np.tan(np.pi/2*(self.activation[-1]-answer))*1/np.cos(np.pi/2*(self.activation[-1]-answer))**2*np.exp(np.dot(self.weights[-1],self.activation[-2])+self.biases[-1])/(np.exp(np.dot(self.weights[-1],self.activation[-2])+self.biases[-1])+1)**2
for i in range(len(self.shape)-2, 0, -1):
self.errors[i-1] = self.weights[i].transpose().dot(self.errors[i])*np.exp(np.dot(self.weights[i-1],self.activation[i-1])+self.biases[i-1])/(np.exp(np.dot(self.weights[i-1],self.activation[i-1])+self.biases[i-1])+1)**2
def grad_descent(self):
"""Changes each variable based on the gradient descent algorithm."""
#for i in range(len(self.biases)):
# self.biases[i]=self.biases[i]-self.eta*self.errors[i]
for i in range(len(self.weights)):
self.biases[i]=self.biases[i]-self.eta*self.errors[i]
for j in range(self.weights[i].shape[0]):
for k in range(self.weights[i].shape[1]):
self.weights[i][j,k] = (1-self.eta*self.lam/1000)*self.weights[i][j,k] - self.eta*self.activation[i][k]*self.errors[i][j]
def train(self, inputs, answer):
"""Trains the network.
Arguments:
inputs (numpy array): The inputs to the network, must be the same size as the first(input) layer.
answers (numpy array): The expected output from the network, must be the same size as the last(output) layer.
"""
self.feedforward(inputs)
self.comp_error(answer)
self.grad_descent()
def get_fractional_err(self):
return(self.wrong/(self.total*1.0))
In [5]:
n1 = Network([2,15,1])
print n1.feedforward(np.array([1,2]))
for i in range(1000):
n1.train(np.array([1,2]), np.array([.5]))
print n1.feedforward(np.array([1,2]))
In [6]:
from sklearn.datasets import load_digits
digits = load_digits()
print(digits.data[0]*.01)
In [6]:
num = []
for i in range(50,60):
num.append(Network([64,i,10]))
In [7]:
# %timeit num.feedforward(digits.data[89]*.01)
# %timeit num.comp_error(np.eye(10)[digits.target[89]])
# %timeit num.grad_descent()
In [8]:
def Train_it(num, itera):
iden = np.eye(10)
acc = np.zeros((itera,))
trainer = zip(digits.data,digits.target)
perm = np.random.permutation(trainer)
trains = perm[:1000]
test = perm[1001:]
#num = Network([64, 14, 10])
print num.feedforward(digits.data[89]*.01)
for i in range(itera):
print(float(100*i/(itera*1.0)))
for dig, ans in trains:
num.train(dig*.01,iden[ans])
cor = 0
tot = 0
for dig, ans in test:
if num.feedforward(dig*.01).argmax()==ans:
cor += 1
tot += 1
acc[i] = cor/float(tot)
return acc
In [9]:
acc = Train_it(num[8], 20)
print(acc)
print(num[8].get_fractional_err())
In [10]:
accu = np.zeros((20,50))
fracerr = np.zeros((20,))
for i in range(20):
accu[i] = Train_it(num[i], 50)
fracerr[i] = num[i].get_fractional_err()
print(accu)
print(fracerr)
In [12]:
for i in range(20):
plt.figure(figsize=(15,10))
plt.plot(np.linspace(0,50,50),accu[i])
In [7]:
acc0 = np.loadtxt("Accuracy_Data_run_11.dat")
In [14]:
plt.figure(figsize=(15,10))
plt.plot(np.linspace(0,20,20), fracerr)
Out[14]:
In [8]:
def plot_epochs(az_angle, eleva):
fig = plt.figure(figsize=(15, 10))
ax = fig.add_subplot(111, projection='3d')
X, Y = np.meshgrid(np.linspace(0,50,50), np.linspace(0,20, 20))
ax.plot_surface(X, Y, acc0)
ax.view_init(elev=eleva, azim=az_angle)
In [9]:
widg.interact(plot_epochs, az_angle=(0, 360, 1), eleva=(0,20,1))
In [ ]: