In [ ]:
from math import *

In [ ]:
import pandas as pd
import numpy as np

In [ ]:
import scipy as scp

In [ ]:
%matplotlib inline
import matplotlib.pylab as plt

In [ ]:
def sigmoid(z):
    return (1/(1+np.exp(-z)))

In [ ]:
def derivative_sigmoid(sig):
    return sig*(1.0-sig)

In [ ]:
def tanh(z):
    return np.tanh(z)

In [ ]:
def derivative_tanh(tanh):
    return 1.0-tanh**2

In [ ]:
#class ANN_BASE:
#    def __init__(self)
Nums_Layer = 3

Nodes_In_Layer  = 6

Nodes_Hidden_Layer = 4

Nodes_Out_Layer = 3

In [ ]:
class Layer_ANN:
    
    
    def __init__(self,N_nodes,V_nodes,weight,isFinal = False,isInput = False):
        
        self.Nums_Nodes = N_nodes
        
        self.In_Value = V_nodes

        self.isInput = isInput
        
        if isInput:
            self.Out_Value = V_nodes
        
        self.Bias_Nodes = np.ones(N_nodes).reshape(1,N_nodes)
        
        self.weight = weight
        
        self.isFinal = isFinal
        
        
    
    def set_nodes_values(self,V_nodes):
        self.In_Value = V_nodes
        
    def set_bias_nodes(self,bias_nodes):
        self.Bias_Nodes = bias_nodes
        
    def get_nodes_values(self):
        return self.Value_Nodes
    
    def get_bias_nodes(self):
        return self.Bias_Nodes
    
    def set_link_weight(self,weight):
        self.weight = weight
        
    def get_link_weight(self):
        return self.Link_Weight
    
    def set_target(self,target):
        
        if self.isFinal:
            self.Target = target
        
    
    def prop_forword_layer(self,next_layer):
    
        
        next_layer.In_Value = np.dot(self.Out_Value,next_layer.weight) + next_layer.Bias_Nodes
        
        next_layer.Out_Value = sigmoid(next_layer.In_Value)
        
        return next_layer.In_Value
        
    
    def revise_backword_weight(self,previous_layer):
    
        n_rate = 0.001
        #m_rate = 0.001

        if self.isInput:
            
            return
        
        elif self.isFinal:
            
            self.error = self.Target - self.Out_Value
            
            delta_WGT = n_rate*np.dot(derivative_sigmoid(previous_layer.Out_Value.T),self.error)
            
            print delta_WGT
            
            previous_layer.error = np.dot(self.error,self.weight.T)

            
            self.weight =self.weight + delta_WGT
            
            
            print previous_layer.error
            
        else: 
            
            delta_WGT = n_rate*np.dot(derivative_sigmoid(previous_layer.Out_Value.T),self.error)
                                                
            print delta_WGT
            
            previous_layer.error = np.dot(self.error,self.weight.T)

            
            self.weight = self.weight + delta_WGT
            
            
            print previous_layer.error
            
        return self.weight

In [ ]:
lay1 = np.random.randn(1,6).reshape(1,6)

lay2 = np.zeros(4).reshape(1,4)

lay3 = np.zeros(3).reshape(1,3)

In [ ]:
weight2 = np.random.rand(6,4).reshape(6,4)

weight3 = np.random.rand(4,3).reshape(4,3)

In [ ]:
L1 = Layer_ANN(6,lay1,1,isInput = True)

L2 = Layer_ANN(4,lay2,weight2)

L3 = Layer_ANN(3,lay3,weight3,isFinal = True)

In [ ]:
print L1.weight
print L1.Out_Value

In [ ]:
lv2_in = L1.prop_forword_layer(L2)
print L2.weight
print lv2_in

In [ ]:
rw2

In [ ]:
lv3_in = L2.prop_forword_layer(L3)
print L3.weight
print lv3_in

In [ ]:
print L2.Out_Value
print L3.Out_Value

In [ ]:
print L2.In_Value
print L3.In_Value

In [ ]:
L3.set_target(np.ones(3).reshape(1,3))
L3.Target

In [ ]:
rw3 = L3.revise_backword_weight(L2)
rw3

In [ ]:
rw2 = L2.revise_backword_weight(L1)
rw2

In [ ]:


In [ ]:
x = np.linspace(-5,5,1000)

y = derivative_sigmoid(sigmoid(x))

In [ ]: