In [89]:
%matplotlib inline
import theano
from theano import tensor as T
"""
theano.tensor contains functions for operations on theano variables
similar interface to numpy operating on numpy arrays.
"""
import numpy as np
from matplotlib import pyplot as plt
In [90]:
## Generate training data
train_x = np.linspace(-1, 1, 101)
train_y = 2 * train_x + np.random.randn(*train_x.shape) * 0.33
In [91]:
## Symbolic variable initialization
X = T.scalar()
Y = T.scalar()
In [92]:
def model(X, w): # Takes in features X and weights w
return X * w # Just multiplies them together
In [93]:
## Theano.shared hybrid variable. Symbolic and real-valued variable.
## Parameters for models will be hybrid variables.
w = theano.shared(np.asarray(0., dtype=theano.config.floatX)) # Model Parameter Initialization
y = model(X, w) # Calculate y using our simple model
In [97]:
## Metric to be optimized by model
## learning signal for parameter(s)
cost = T.mean(T.sqr(y - Y)) # Cost function: Mean Squared Error
gradient = T.grad(cost=cost, wrt=w) # Finding the gradient with respect to(wrt) w (our weights)
# Update w at the next time step with w - gradient * 0.01 (learning rate)
updates = [[w, w - gradient * 0.001]]
In [98]:
# GPU=float32, CPU=usually float64, allow_input_downcast allows type-casting to same type internally.
## Compile a python function
train = theano.function(inputs=[X, Y], outputs=[cost, y], updates=updates, allow_input_downcast=True)
In [101]:
## Iterate through the data 100 time and train model one each example of input/output pairs.
plt.plot(train_x, train_y, 'bo')
def run_training(iterations=100):
epoch_cost = []
for i in range(iterations):
for x, y in zip(train_x, train_y):
each_cost, y_predict = train(x, y) # y_predict returned here for testing the function; not used
out_cost = each_cost
epoch_cost.append(float(out_cost))
return(epoch_cost)
costs = run_training(100)
plt.figure()
plt.plot(costs, 'bo')
Out[101]:
In [ ]: