In [1]:
import numpy as np
#We are defining the NN here which will take only the length of nodes in each of the 3 layers as 
class NN:
    def __init__(self,i,h,o):
        self.i=i;
        self.o=o;
        self.h=h;
        self.w1=np.random.normal(0,1,(i+1,h));
        self.w2=np.random.normal(0,1,(h+1,o));
        self.I=np.zeros((i+1));
        self.H=np.zeros((h+1));
        self.O=np.zeros((o));
    def sigmoid(self,x):
        return(1/(1+np.exp(-(x))))
    def dsigmoid(self,y):
        return(np.multiply(y,(1-y)))
    def feedforward(self,inputs):
        self.I=np.append(inputs,[1])
        #print(self.I.shape)
        self.H=np.append(self.sigmoid(np.dot(self.I,self.w1)),[1])
        self.O=self.sigmoid(np.dot(self.H,self.w2))
        return([self.H,self.O])
    def backpropagate(self,inputs,targets,lr=0.001,lossfunc="logloss"):
        if(lossfunc=="squareloss"):
            outputs=self.feedforward(inputs)
            hiddens=outputs[0];
            outputs=outputs[1];
            errorterm2=outputs-targets #actually it is (-error)
            w2gradient=np.dot((self.H).reshape(((self.h+1),1)),(errorterm2*self.dsigmoid(outputs)).reshape((1,self.o)))
            xgrad=np.multiply(self.w2,(errorterm2*self.dsigmoid(outputs)))
            errorterm1=np.sum(xgrad[:-1],axis=1)
            w1gradient=np.dot((self.I).reshape(((self.i+1),1)),(errorterm1*self.dsigmoid(hiddens[:-1])).reshape((1,self.h)))
        elif(lossfunc=="logloss"):
            outputs=self.feedforward(inputs)
            hiddens=outputs[0];
            outputs=outputs[1];
            errorterm2=0-targets/(0.0001+outputs)+(1-targets)/(1.0001-outputs) #actually it is (-error)... 0.0001 is added to avoid hitting infinity
            w2gradient=np.dot((self.H).reshape(((self.h+1),1)),(errorterm2*self.dsigmoid(outputs)).reshape((1,self.o)))
            xgrad=np.multiply(self.w2,(errorterm2*self.dsigmoid(outputs)))
            errorterm1=np.sum(xgrad[:-1],axis=1)
            w1gradient=np.dot((self.I).reshape(((self.i+1),1)),(errorterm1*self.dsigmoid(hiddens[:-1])).reshape((1,self.h)))
   
        #update step
        self.w1=self.w1-w1gradient*lr;
        self.w2=self.w2-w2gradient*lr;
    def train(self,inputs,targets,lr=0.001,niter=1000,lossfunc="logloss"):
        for u in range(niter):
            for row in range(len(inputs)):
                self.backpropagate(inputs[row],targets[row],lr,lossfunc)
            self.evaluate(inputs,targets,lossfunc)
            
    def evaluate(self,inputs,targets,lossfunc="logloss"):
        if(lossfunc=="squareloss"):
            print(np.mean(([np.square(self.feedforward(inputs[ii])[1]-targets[ii]) for ii in range(len(inputs))])))
            print("   *****error printed***** ")
        elif(lossfunc=="logloss"):
            print(np.mean(([np.square(self.feedforward(inputs[ii])[1]-targets[ii]) for ii in range(len(inputs))])))
            print("   *****error printed***** ")

In [2]:
model=NN(2,5,2)

In [3]:
ins=np.random.randint(0,2,(4000,2))
outs=np.array([[g[0]*g[1],(g[0]+g[1]>0)*1] for g in ins])

In [4]:
model.train(ins,outs,0.5,20,"squareloss")


0.00162436449696
   *****error printed***** 
0.000646099418856
   *****error printed***** 
0.000390682500925
   *****error printed***** 
0.00027669592903
   *****error printed***** 
0.000212908552547
   *****error printed***** 
0.000172396017692
   *****error printed***** 
0.000144490381554
   *****error printed***** 
0.000124150593622
   *****error printed***** 
0.00010869477713
   *****error printed***** 
9.65686033664e-05
   *****error printed***** 
8.68108197803e-05
   *****error printed***** 
7.87958650359e-05
   *****error printed***** 
7.20995467743e-05
   *****error printed***** 
6.64243029203e-05
   *****error printed***** 
6.1555379881e-05
   *****error printed***** 
5.73339918629e-05
   *****error printed***** 
5.36402619141e-05
   *****error printed***** 
5.03820303676e-05
   *****error printed***** 
4.74873076053e-05
   *****error printed***** 
4.48990601161e-05
   *****error printed***** 

In [5]:
model2=NN(2,5,2)
model2.train(ins,outs,0.5,20,"logloss")


5.44910980478e-06
   *****error printed***** 
1.13107953975e-06
   *****error printed***** 
4.6005458597e-07
   *****error printed***** 
2.44670966884e-07
   *****error printed***** 
1.50407552678e-07
   *****error printed***** 
1.01247517461e-07
   *****error printed***** 
7.25329977389e-08
   *****error printed***** 
5.43733219828e-08
   *****error printed***** 
4.21919524357e-08
   *****error printed***** 
3.3640920502e-08
   *****error printed***** 
2.74174149589e-08
   *****error printed***** 
2.2752595616e-08
   *****error printed***** 
1.9169570746e-08
   *****error printed***** 
1.63601199906e-08
   *****error printed***** 
1.41180352106e-08
   *****error printed***** 
1.23012348437e-08
   *****error printed***** 
1.08093031287e-08
   *****error printed***** 
9.56968239453e-09
   *****error printed***** 
8.52891321952e-09
   *****error printed***** 
7.64692139739e-09
   *****error printed***** 

In [ ]: