In [1]:
from neuralnet import *
from activations import *
import numpy as np
In [2]:
layer1 = Layer("layer1", 3, 1, LinearActivation(), SimpleLearner(0.005))
In [3]:
data = np.random.uniform(0, 10, (100,3))
targets = data[:, -1].reshape((100,1))
In [4]:
training = zip(data, targets)
In [5]:
nn = NeuralNet([layer1], MeanSquaredError(), 0.9)
In [6]:
nn.train(training, 50)
In [7]:
layer1.weights
Out[7]:
In [8]:
layer = Layer("layer1", 3, 3, TanhActivation())
layer2 = Layer("layer2", 3, 1, LinearActivation())
nn = NeuralNet([layer, layer2], MeanSquaredError(), 0)
numGrads = nn.numerical_gradient(data, targets, 1e-5)
grads = nn.get_gradients(data, targets)
zipped = zip(numGrads, grads)
numGrad, grad = zipped[0]
sub = numGrad - grad
plus = numGrad + grad
numGrad2, grad2 = zipped[-1]
sub2 = numGrad2 - grad2
plus2 = numGrad2 + grad2
print numGrads
print grads