In [1]:
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.pylab import *
from IPython.html.widgets import interact
from scipy.interpolate import griddata
import numpy as np


:0: FutureWarning: IPython widgets are experimental and may change in the future.

In [2]:
from sklearn.datasets import load_digits
digits = load_digits()
print(digits.data.shape)


(1797, 64)

In [3]:
"""Initialize
Creates the random weights that connect the input to the output"""
#ni = number of inputs
#nh = hidden nodes
#no = output nodes

ni = 144
nh = 144
no = 10

w = [np.random.rand(ni,nh),np.random.rand(nh,no)]
bias = [np.zeros(nh),np.zeros(no)]

f = open('x_files', 'r')
r = np.load(f)
f.close()

f = open('y_files', 'r')
s = np.load(f)
f.close()

In [4]:
def feed_forward(n,w):
    out = [0,0]
    out[0] = np.dot(n,w[0])
    out[1] = np.dot(out[0],w[1])
    return out

In [5]:
def activation(z):
    num = np.argmax(z)
    return [z,num]

In [6]:
def update_batch(n,w,bias,batch,c,r,s):
    
    c = c + 1
    error = 0
    
    x = np.matrix(r[n])
    x = x/(np.matrix.sum(x)*5)
    x = np.reshape(x,(1,144))
    
    p = int(s[n])
    
    ideal = np.zeros((1,10))
    ideal[0,p] = 1 
    
    out = feed_forward(x,w)
    err1 = (ideal - out[1])
    modw1 = np.dot(np.reshape(out[0],(nh,1)),np.reshape(err1,(1,no)))
    err0 = np.dot(err1,np.transpose(w[1]))
    modw0 = np.dot(np.reshape(np.transpose(x),(ni,1)),np.reshape(err0,(1,nh)))
    
    if batch == 0:
        batch = [err0,modw0,err1,modw1]
    else:
        batch = batch + [err0,modw0,err1,modw1]
    #batch = batch + [err0,modw0,err1,modw1]
    
    if c > 10:
        back = backprop(w,bias,batch)
        w = back[0]
        bias = back[1]
        batch = 0
        c = 0
        
    if p != np.argmax(out[1]):
        error =  1
    
    return [w,bias,error,batch,c]

In [7]:
def backprop(w,bias,batch):
    LC = .01
    err0 = batch[0]
    modw0 = batch[1]
    err1 = batch[2]
    modw1 = batch[3]
    w[1] = w[1] + LC*modw1
    w[0] = w[0] + LC*modw0
    #bias[0] = bias[0] + LC*err0
    #bias[1] = bias[1] + LC*err1
    return [w,bias]

In [8]:
x = np.random.permutation(1700)
x1 = x[0:1000]
x2 = x[1000:1700]

In [11]:
h = 0
error = 0
c = 0
batch = 0
while h < 10:
    error = 0
    for n in x1:
        bat = update_batch(n,w,bias,batch,c,r,s)
        w = bat[0]
        bias = bat[1]
        error = error + bat[2]
        batch = bat[3]
        c = bat[4]
    h = h + 1
print (error)


228

In [12]:
error = 0
for n in x2:

    bat = update_batch(n,w,bias,batch,c,r,s)
    bias = bat[1]
    error = error + bat[2]
    batch = bat[3]
    c = bat[4]
    n = n + 1
print (error)


174

In [89]:
def guess(n,w,r,s):
    x = np.matrix(r[n])
    x = np.reshape(x,(1,144))
    x = x/(np.sum(x)*5)
    print("Guess: ",np.argmax(feed_forward(x,w)[1]), "Actual: ", int(s[n]))

In [97]:
b = 2000

In [178]:
guess(b,w,r,s)
b = b + 1


('Guess: ', 3, 'Actual: ', 5)

In [ ]: