In [7]:
import numpy as np
from sklearn.datasets import load_digits
digits = load_digits()
print(digits.images[0])
In [111]:
class Neuron:
def __init__(self, num_inputs, bias = .5, weights=None):
if weights == None:
self.weights = np.ones((num_inputs,))
for i in range(num_inputs):
self.weights[i] = np.random.rand()
else:
self.weights = weights
if self.weights.shape != (num_inputs,):
raise ValueError("Weights must be the same size as the number of inputs.")
self.bias = float(bias)
def calc_output(self, inputs):
if inputs.shape!=self.weights.shape:
raise ValueError("Inputs and weights must have the same shape.")
inwsum = inputs.dot(self.weights)
output = 1/(1+np.exp(-inwsum-self.bias))
return output
In [112]:
n1 = Neuron(2, .5, np.array((.9, .5)))
assert n1.calc_output(np.array([0,0]))!=0
assert n1.calc_output(np.array([0,2]))!=1
assert n1.calc_output(np.array([2,4]))!=3
try:
assert n1.calc_output(np.array([0,0,0,0]))==0
except ValueError:
print("There was an error")
In [113]:
class Layer:
def __init__(self, num_neurons, num_inputs, output_weights=None):
self.neurons = np.empty((num_neurons,), dtype=Neuron)
self.num_neurons = num_neurons
self.output_weights=output_weights
self.output_node = Neuron(num_neurons, .5, output_weights)
for i in range(num_neurons):
self.neurons[i] = Neuron(num_inputs)
def propForward(self, inputs):
outputs = np.zeros((self.num_neurons,))
i=0
for neuron in self.neurons:
outputs[i]=neuron.calc_output(inputs)
print(outputs[i])#vector of activations
i+=1
output = self.output_node.calc_output(outputs)
print(output)
return output
def training(self, inputs,answer):
grad_b = [np.zeros(self.num_neurons) for ]
In [114]:
net1 = Layer(20, 1, np.linspace(0,10,20))
assert net1.propForward(np.array([1]))!=0
In [ ]: