In [1]:
import numpy as np

In [19]:
shape = (2, 9)

np.vstack((2 * np.random.random(shape) - 1, 2 * np.random.random((1, shape[1])) - 1))


Out[19]:
array([[ 0.50162421,  0.45199597,  0.76661218,  0.24734441,  0.50188487,
        -0.30220332, -0.46014422,  0.79177244, -0.14381762],
       [ 0.92968009,  0.326883  ,  0.24339144, -0.77050805,  0.89897852,
        -0.10017573,  0.15677923, -0.18372639, -0.52594604],
       [ 0.80675904,  0.14735897, -0.99425935,  0.23428983, -0.3467102 ,
         0.0541162 ,  0.7718842 , -0.28546048,  0.8170703 ]])

In [7]:
class NeuralNetwork():
    def __init__(self):
        np.random.seed(1)  # Seed the random number generator
        self.weights = {}  # Create dict to hold weights
        self.num_layers = 1  # Set initial number of layer to one (input layer)
        self.adjustments = {}  # Create dict to hold adjustements

    def add_layer(self, shape):
        # Create weights with shape specified + biases
        self.weights[self.num_layers] = np.vstack((2 * np.random.random(shape) - 1, 2 * np.random.random((1, shape[1])) - 1))
        # Initialize the adjustements for these weights to zero
        self.adjustments[self.num_layers] = np.zeros(shape)
        self.num_layers += 1

    def __sigmoid(self, x):
        return 1 / (1 + np.exp(-x))

    def __sigmoid_derivative(self, x):
        return x * (1 - x)

    def predict(self, data):
        # Pass data through pretrained network
        for layer in range(1, self.num_layers+1):
            data = np.dot(data, self.weights[layer-1][:, :-1]) + self.weights[layer-1][:, -1] # + self.biases[layer]
            data = self.__sigmoid(data)
        return data
    
    def __forward_propagate(self, data):
        # Progapagate through network and hold values for use in back-propagation
        activation_values = {}
        activation_values[1] = data
        for layer in range(2, self.num_layers+1):
            data = np.dot(data.T, self.weights[layer-1][:-1, :]) + self.weights[layer-1][-1, :].T # + self.biases[layer]
            data = self.__sigmoid(data).T
            activation_values[layer] = data
        return activation_values

    def simple_error(self, outputs, targets):
        return targets - outputs

    def sum_squared_error(self, outputs, targets):
        return 0.5 * np.mean(np.sum(np.power(outputs - targets, 2), axis=1))

    def __back_propagate(self, output, target):
        deltas = {}
        # Delta of output Layer
        deltas[self.num_layers] = output[self.num_layers] - target

        # Delta of hidden Layers
        for layer in reversed(range(2, self.num_layers)):  # All layers except input/output
            a_val = output[layer]
            weights = self.weights[layer][:-1, :]
            prev_deltas = deltas[layer+1]
            deltas[layer] = np.multiply(np.dot(weights, prev_deltas), self.__sigmoid_derivative(a_val))

        # Caclculate total adjustements based on deltas
        for layer in range(1, self.num_layers):
            self.adjustments[layer] += np.dot(deltas[layer+1], output[layer].T).T

    def __gradient_descente(self, batch_size, learning_rate):
        # Calculate partial derivative and take a step in that direction
        for layer in range(1, self.num_layers):
            partial_d = (1/batch_size) * self.adjustments[layer]
            self.weights[layer][:-1, :] += learning_rate * -partial_d
            self.weights[layer][-1, :] += learning_rate*1e-3 * -partial_d[-1, :]


    def train(self, inputs, targets, num_epochs, learning_rate=1, stop_accuracy=1e-5):
        error = []
        for iteration in range(num_epochs):
            for i in range(len(inputs)):
                x = inputs[i]
                y = targets[i]
                # Pass the training set through our neural network
                output = self.__forward_propagate(x)

                # Calculate the error
                loss = self.sum_squared_error(output[self.num_layers], y)
                error.append(loss)

                # Calculate Adjustements
                self.__back_propagate(output, y)

            self.__gradient_descente(i, learning_rate)

            # Check if accuarcy criterion is satisfied
            if np.mean(error[-(i+1):]) < stop_accuracy and iteration > 0:
                break

        return(np.asarray(error), iteration+1)

In [10]:
if __name__ == "__main__":

    # ----------- XOR Function -----------------

    # Create instance of a neural network
    nn = NeuralNetwork()

    # Add Layers (Input layer is created by default)
    nn.add_layer((2, 9))
    nn.add_layer((9, 1))

    # XOR function
    training_data = np.asarray([[0, 0], [0, 1], [1, 0], [1, 1]]).reshape(4, 2, 1)
    training_labels = np.asarray([[0], [1], [1], [0]])

    error, iteration = nn.train(training_data, training_labels, 5000)
    print('Error = ', np.mean(error[-4:]))
    print('Epoches needed to train = ', iteration)


('Error = ', 0.12919882118364132)
('Epoches needed to train = ', 5000)

In [11]:
from numpy import exp, array, random, dot

class NeuralNetwork():
	def __init__(self):
		random.seed(1)

		# setting the number of nodes in layer 2 and layer 3
		# more nodes --> more confidence in predictions (?)
		l2 = 5
		l3 = 4

		# assign random weights to matrices in network
		# format is (no. of nodes in previous layer) x (no. of nodes in following layer)
		self.synaptic_weights1 = 2 * random.random((3, l2)) -1
		self.synaptic_weights2 = 2 * random.random((l2, l3)) -1
		self.synaptic_weights3 = 2 * random.random((l3, 1)) -1
		
	def __sigmoid(self, x):
		return 1/(1+exp(-x))

	# derivative of sigmoid function, indicates confidence about existing weight
	def __sigmoid_derivative(self, x):
		return x*(1-x)

	# train neural network, adusting synaptic weights each time
	def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):
		for iteration in xrange(number_of_training_iterations):

			# pass training set through our neural network
			# a2 means the activations fed to second layer
			a2 = self.__sigmoid(dot(training_set_inputs, self.synaptic_weights1))
			a3 = self.__sigmoid(dot(a2, self.synaptic_weights2))
			output = self.__sigmoid(dot(a3, self.synaptic_weights3))

			# calculate 'error'
			del4 = (training_set_outputs - output)*self.__sigmoid_derivative(output)

			# find 'errors' in each layer
			del3 = dot(self.synaptic_weights3, del4.T)*(self.__sigmoid_derivative(a3).T)
			del2 = dot(self.synaptic_weights2, del3)*(self.__sigmoid_derivative(a2).T)

			# get adjustments (gradients) for each layer
			adjustment3 = dot(a3.T, del4)
			adjustment2 = dot(a2.T, del3.T)
			adjustment1 = dot(training_set_inputs.T, del2.T)

			# adjust weights accordingly
			self.synaptic_weights1 += adjustment1
			self.synaptic_weights2 += adjustment2
			self.synaptic_weights3 += adjustment3

	def forward_pass(self, inputs):
		# pass our inputs through our neural network
		a2 = self.__sigmoid(dot(inputs, self.synaptic_weights1))
		a3 = self.__sigmoid(dot(a2, self.synaptic_weights2))
		output = self.__sigmoid(dot(a3, self.synaptic_weights3)) 
		return output

if __name__ == "__main__":
	# initialise single neuron neural network
	neural_network = NeuralNetwork()

	print "Random starting synaptic weights (layer 1): "
	print neural_network.synaptic_weights1
	print "\nRandom starting synaptic weights (layer 2): "
	print neural_network.synaptic_weights2
	print "\nRandom starting synaptic weights (layer 3): "
	print neural_network.synaptic_weights3

	# the training set.
	training_set_inputs = array([[0,0,1],[1,1,1],[1,0,1],[0,1,1]])
	training_set_outputs = array([[0,1,1,0]]).T

	neural_network.train(training_set_inputs, training_set_outputs, 10000)

	print "\nNew synaptic weights (layer 1) after training: "
	print neural_network.synaptic_weights1
	print "\nNew synaptic weights (layer 2) after training: "
	print neural_network.synaptic_weights2
	print "\nNew synaptic weights (layer 3) after training: "
	print neural_network.synaptic_weights3

	# test with new input
	print "\nConsidering new situation [1,0,0] -> ?"
	print neural_network.forward_pass(array([1,0,0]))


Random starting synaptic weights (layer 1): 
[[-0.16595599  0.44064899 -0.99977125 -0.39533485 -0.70648822]
 [-0.81532281 -0.62747958 -0.30887855 -0.20646505  0.07763347]
 [-0.16161097  0.370439   -0.5910955   0.75623487 -0.94522481]]

Random starting synaptic weights (layer 2): 
[[ 0.34093502 -0.1653904   0.11737966 -0.71922612]
 [-0.60379702  0.60148914  0.93652315 -0.37315164]
 [ 0.38464523  0.7527783   0.78921333 -0.82991158]
 [-0.92189043 -0.66033916  0.75628501 -0.80330633]
 [-0.15778475  0.91577906  0.06633057  0.38375423]]

Random starting synaptic weights (layer 3): 
[[-0.36896874]
 [ 0.37300186]
 [ 0.66925134]
 [-0.96342345]]

New synaptic weights (layer 1) after training: 
[[-0.39042717  4.02220543 -1.52322523  2.40451717 -2.77177632]
 [-0.86817904 -0.33659723 -0.245578   -0.31292608  0.26079733]
 [-0.00600591 -1.69046817  0.12647375 -0.79367455  1.04614   ]]

New synaptic weights (layer 2) after training: 
[[ 0.9614375  -0.15372521 -0.67703076 -0.00498486]
 [-2.7714058   0.77362787  2.71638353 -2.4249225 ]
 [ 1.88550044  0.70717346 -0.71729366  0.7730995 ]
 [-1.59473372 -0.55756571  1.23221965 -1.28695185]
 [ 1.92232578  0.86077523 -2.13676866  2.54238247]]

New synaptic weights (layer 3) after training: 
[[-4.392069  ]
 [ 0.66563256]
 [ 5.76280212]
 [-3.88936424]]

Considering new situation [1,0,0] -> ?
[ 0.99650838]

In [12]:



---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-12-b62d233dac99> in <module>()
----> 1 np.random(3,5)

TypeError: 'module' object is not callable

In [ ]: