In [1]:
reload("nnadl-julia/neuralnet.jl")
In [2]:
n = NeuralNet.Network([2;3;2])
Out[2]:
In [3]:
length(n.layers)
Out[3]:
In [4]:
test_input = [1 1; 2 2]
test_output = [1 1;2 2]
Out[4]:
In [5]:
delta = 1e-5
function test_bias(network, layer, neuron, delta = 1e-5)
nb_upper = deepcopy(network)
nb_lower = deepcopy(network)
nb_upper.bias[layer][neuron] += delta
nb_lower.bias[layer][neuron] -= delta
cost_upper = NeuralNet.cost(nb_upper, test_input, test_output)
cost_lower = NeuralNet.cost(nb_lower, test_input, test_output)
(cost_upper - cost_lower)/(2*delta)
end
Out[5]:
In [6]:
function test_weights(network, layer, neuronA,neuronB, delta = 1e-5)
nw_upper = deepcopy(network)
nw_lower = deepcopy(network)
nw_upper.weights[layer][neuronA,neuronB] += delta
nw_lower.weights[layer][neuronA,neuronB] -= delta
costw_upper = NeuralNet.cost(nw_upper, test_input, test_output)
costw_lower = NeuralNet.cost(nw_lower, test_input, test_output)
(costw_upper - costw_lower)/(2*delta)
end
Out[6]:
In [7]:
testDerive = NeuralNet.backpropagate(n,test_input,test_output)
for (layer, numNeurons) in enumerate(n.layers[2:end])
@printf("Layer %d\n",layer)
for neuron in 1:numNeurons
@printf("Bias Neuron %d\n", neuron)
a = test_bias(n, layer, neuron)
b = testDerive[1][layer][neuron]
@Test.test_approx_eq_eps a b 1e-6
end
@printf("Weights")
for neuronA in 1:n.layers[layer], neuronB in 1:n.layers[layer+1]
a = test_weights(n, layer, neuronB, neuronA)
b = testDerive[2][layer][neuronB,neuronA]
@Test.test_approx_eq_eps a b 1e-6
end
end
In [10]: