In [40]:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
In [2]:
x = T.scalar()
y = T.scalar()
z = x + y
In [3]:
fntSum = theano.function(inputs=[x, y], outputs=z)
In [4]:
r1 = fntSum(2, 3)
print r1
In [5]:
# data type
x = T.scalar() # default
x = T.iscalar() # int32
x = T.dscalar() # float64
x = T.vector() # 1-D
x = T.matrix() # 2-D
x = T.tensor3() # 3-D
x = T.tensor4() # 4-D
In [6]:
# default datatype
print theano.config.device
print theano.config.floatX
In [7]:
x = T.matrix()
y = T.matrix()
z = T.dot(x, y)
fntSumMat = theano.function([x, y], z)
In [8]:
x = np.random.random((3,3))
y = np.random.random((3,3))
x = np.array(x, dtype=theano.config.floatX)
y = np.array(y, dtype=theano.config.floatX)
print fntSumMat(x, y)
In [9]:
# shared variable
# shared variable can't be input or output
t = np.array([[1,2], [3,4]], dtype=theano.config.floatX)
t = theano.shared(t)
# t = theano.shared(t, borrow=True) # default : false
print t
print t.get_value()
t.set_value(t.get_value() + 1)
print t.get_value()
In [11]:
# differentiation
x = T.scalar()
y = x ** 2
diff = theano.function([x], T.grad(y, [x]))
for i in range(5):
print "input : %s, output : %s" %(i, diff(i))
In [12]:
# differentiation
x = T.scalar()
y = x ** 2
diff = theano.function([x], T.grad(y, [x])[0])
for i in range(5):
print "input : %s, output : %s" %(i, diff(i))
In [13]:
# evaluate & update
x = T.scalar()
w = theano.shared(np.array(3, dtype=theano.config.floatX), borrow=True)
obj = (1 -x*w)**2
# 0.1 is learning rate
learn_w = (w, w-0.1*T.grad(obj, w))
learn = theano.function([x], obj, updates=[learn_w])
for i in range(10):
print "input : %s, w : %s, output : %s" %(i, w.get_value(), learn(i))
In [25]:
np.random.randn(*(2,2))
Out[25]:
In [30]:
x = T.matrix() # 2-D
y = T.matrix() # 2-D
f_dot = theano.function([x, y], T.dot(x, y))
x = np.array([[1, 1], [1, 1]], dtype=theano.config.floatX)
y = np.array([[1, 1], [1, 1]], dtype=theano.config.floatX)
print f_dot(x, y)
In [32]:
x = T.matrix() # 2-D
y = T.matrix() # 2-D
f_sigmoid = theano.function([x, y], T.nnet.sigmoid(T.dot(x, y)))
x = np.array([[1, 1], [1, 1]], dtype=theano.config.floatX)
y = np.array([[1, 1], [1, 1]], dtype=theano.config.floatX)
print f_sigmoid(x, y)
In [41]:
x = T.vector()
f_softmax = theano.function([x], T.nnet.softmax(x))
x = range(10)
print f_softmax(x)
In [42]:
srng = RandomStreams()
In [70]:
x = T.matrix()
p = 0.5
retain_prob = 1 - p
f_binomia = theano.function([x], srng.binomial(x.shape, p=retain_prob, dtype=theano.config.floatX))
In [72]:
x = np.array(np.random.random((10, 10)), dtype=theano.config.floatX)
r = f_binomia(x)
print np.unique(r, return_counts=True)
In [68]: