In [1]:
import numpy as np
In [2]:
X=np.array([[1,0,1,0],[1,0,1,1],[0,1,0,1]])
In [4]:
y = np.array([[1],[1],[0]])
In [5]:
#sigmoid function
def sigmoid(x):
return 1/(1+np.exp(-x))
In [6]:
#derivative function
def derivative(x):
return x * (1-x)
In [12]:
tf = np.random.uniform(size=(4,3))
tf
Out[12]:
In [13]:
#declare varibles
epoches = 5000
lr = 0.1 #learning rate
input_layer_nueron = X.shape[1] #give input layer based on input feaure in dataset
hidden_layer_nueron = 3 #give hidden layer
output_neurons = 1 #outpulayer
In [14]:
#weights & bias init
wh = np.random.uniform(size=(input_layer_nueron,hidden_layer_nueron))
bh = np.random.uniform(size=(1, hidden_layer_nueron))
wout = np.random.uniform(size=(hidden_layer_nueron, output_neurons))
bout = np.random.uniform(size = (1, output_neurons))
In [19]:
for i in range(epoches):
#forward propogation
hidden_layer_input1 = np.dot(X, wh)
hidden_layer_input = hidden_layer_input1 +bh
hidden_layer_activation = sigmoid(hidden_layer_input)
output_layer_input1 = np.dot(hidden_layer_activation, wout)
output_layer_input = output_layer_input1 + bout
output = sigmoid(output_layer_input)
#backward propogation
E = y - output
# print E
slope_output_layer = derivative(output)
# print slope_output_layer
slope_hidden_layer = derivative(hidden_layer_activation)
# print slope_hidden_layer
d_output = E * slope_output_layer
# print d_output
Error_at_hidden_layer = d_output.dot(wout.T)
# print Error_at_hidden_layer
d_hiddenlayer = Error_at_hidden_layer * slope_hidden_layer
wout += hidden_layer_activation.T.dot(d_output) * lr
bout += np.sum(d_output, axis =0, keepdims=True) * lr
wh += X.T.dot(d_hiddenlayer) * lr
bh += np.sum(d_hiddenlayer, axis=0, keepdims=True) *lr
print('actual: \n' , y,'\n')
print('predicted: \n', output)
In [ ]: