In [25]:
import theano.tensor as T
from theano import *

In [2]:
x = T.dscalar('x')
y = T.dscalar('y')
z = x + y
f = function([x, y], z)

In [5]:
print type(x)
print x.type
print T.dscalar
print x.type is T.dscalar


<class 'theano.tensor.var.TensorVariable'>
TensorType(float64, scalar)
TensorType(float64, scalar)
True

In [8]:
print pp(z)


(x + y)

In [9]:
z.eval({x : 16.3, y : 12.1})


Out[9]:
array(28.4)

In [3]:
print f(2, 3)
print f(16.3, 12.1)


5.0
28.4

In [10]:
x = T.dmatrix('x')
y = T.dmatrix('y')
z = x + y
f = function([x, y], z)

In [11]:
f([[1,2], [3,4]], [[10,20], [30,40]])


Out[11]:
array([[ 11.,  22.],
       [ 33.,  44.]])

The following types are available:

byte: bscalar, bvector, bmatrix, brow, bcol, btensor3, btensor4
16-bit integers: wscalar, wvector, wmatrix, wrow, wcol, wtensor3, wtensor4
32-bit integers: iscalar, ivector, imatrix, irow, icol, itensor3, itensor4
64-bit integers: lscalar, lvector, lmatrix, lrow, lcol, ltensor3, ltensor4
float: fscalar, fvector, fmatrix, frow, fcol, ftensor3, ftensor4
double: dscalar, dvector, dmatrix, drow, dcol, dtensor3, dtensor4
complex: cscalar, cvector, cmatrix, crow, ccol, ctensor3, ctensor4


In [12]:
a = T.vector()
out = a + a**10
f = function([a], out)
print f([0, 1, 2])


[    0.     2.  1026.]

In [15]:
a = T.vector()
b = T.vector()
out = a**2 + b**2 + 2*a*b
f = function([a,b], out)
print f([1,2], [4,5])


[ 25.  49.]


In [17]:
x = T.dmatrix('x')
s = 1 / (1 + T.exp(-x))
logistic = function([x], s)
print logistic([[0,1], [-1, -2]])


[[ 0.5         0.73105858]
 [ 0.26894142  0.11920292]]

In [18]:
s2 = (1 + T.tanh(x / 2)) / 2
logistic = function([x], s)
print logistic([[0,1], [-1, -2]])


[[ 0.5         0.73105858]
 [ 0.26894142  0.11920292]]

In [24]:
a, b = T.dmatrices('a', 'b')
diff = a - b
abs_diff = abs(diff)
diff_squared = diff*2
f = function([a,b], [diff, abs_diff, diff_squared])
print f([[1,1], [1,1]], [[0,1], [2,3]])


[array([[ 1.,  0.],
       [-1., -2.]]), array([[ 1.,  0.],
       [ 1.,  2.]]), array([[ 2.,  0.],
       [-2., -4.]])]

In [27]:
x, y = T.dscalars('x', 'y')
z = x + y
f = function([x, Param(y, default=1)], z)
print f(33)
print f(33, 2)


34.0
35.0

In [32]:
x, y, w = T.dscalars('x', 'y', 'w')
z = (x + y) * w
f = function([x, Param(y, default=1), Param(w, default=2, name='w_by_name')], z)
print f(33)
print f(33, 2)
print f(33, 0, 1)
print f(33, w_by_name=1)
print f(33, w_by_name=1, y=0)


68.0
70.0
33.0
34.0
33.0

In [34]:
state = shared(0)
inc = T.iscalar('inc')
accumulator = function([inc], state, updates=[(state, state+inc)])

In [35]:
state.get_value()


Out[35]:
array(0)

In [36]:
accumulator(1)


Out[36]:
array(0)

In [37]:
state.get_value()


Out[37]:
array(1)

In [38]:
accumulator(300)


Out[38]:
array(1)

In [39]:
state.get_value()


Out[39]:
array(301)

In [41]:
state.set_value(-1)

In [42]:
accumulator(3)


Out[42]:
array(-1)

In [43]:
state.get_value()


Out[43]:
array(2)

In [44]:
decrementor = function([inc], state, updates=[(state, state-inc)])

In [45]:
decrementor(2)


Out[45]:
array(2)

In [46]:
state.get_value()


Out[46]:
array(0)

In [47]:
fn_of_state = state * 2 + inc
foo = T.scalar(dtype=state.dtype)
# In practice, a good way of thinking about the givens is as a mechanism that allows you to replace any part of your formula with a different expression that evaluates to a tensor of same shape and dtype.
skip_shared = function([inc, foo], fn_of_state, givens=[(state, foo)])

In [48]:
skip_shared(1, 3)


Out[48]:
array(7)

In [49]:
state.get_value()


Out[49]:
array(0)

In [53]:
srng = T.shared_randomstreams.RandomStreams(seed=234)
rv_u = srng.uniform((2,2))
rv_n = srng.normal((2,2))
f = function([], rv_u)
g = function([], rv_n, no_default_updates=True)
nearly_zeros = function([], rv_u + rv_u - 2 * rv_u)

In [ ]: