In [1]:
import numpy as np
import theano as th
import theano.tensor as T
import theano.tensor.nlinalg as la
In [2]:
a, b = T.dscalar(), T.dscalar()
c = 2 * a + b
c.eval({a: 4, b: 2})
Out[2]:
In [3]:
x = T.vector()
y = T.vector()
square_sum = th.function((x, y), x*x + y*y + 2*x*y)
In [4]:
square_sum(np.arange(4), np.arange(4))
Out[4]:
In [5]:
def compression_np(mat, rank):
"""PU, PV ... projectors on left/right eigenspaces"""
U, s, V = np.linalg.svd(mat)
return (s[:rank] * U[:, :rank]) @ V[:rank, :]
In [6]:
testmat = np.random.randn(1000, 1000)
In [7]:
%%timeit
compression_np(testmat, 100)
In [11]:
A = T.dmatrix()
rank = T.iscalar()
U, s, V = la.svd(A)
A_compressed = T.dot(s[:rank] * U[:, :rank], V[:rank, :])
compression_th = th.function((A, rank), A_compressed)
In [9]:
%%timeit
compression_th(testmat, 100)
In [10]:
from theano import function, config, shared, sandbox
import theano.tensor as T
import numpy
import time
vlen = 10 * 30 * 768 # 10 x #cores x # threads per core
iters = 1000
rng = numpy.random.RandomState(22)
x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
f = function([], T.exp(x))
print(f.maker.fgraph.toposort())
t0 = time.time()
for i in range(iters):
r = f()
t1 = time.time()
print("Looping %d times took %f seconds" % (iters, t1 - t0))
print("Result is %s" % (r,))
if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]):
print('Used the cpu')
else:
print('Used the gpu')
In [74]:
counter = th.shared(0)
f = function(inputs=[], outputs=None, updates=[(counter, counter + 1)])
In [78]:
print(counter.get_value())
f()
Out[78]:
In [83]:
def _compresseion_theano():
mat = th.ma
Out[83]:
In [ ]: