In [1]:
import numpy as np
import theano.tensor as T
from theano import shared, function
rng = np.random.RandomState(123)

In [2]:
# Create a sample logistic regression problem.
true_w = rng.randn(100)
true_b = rng.randn()
xdata = rng.randn(50, 100)
ydata = (np.dot(xdata, true_w) + true_b) > 0.0

In [7]:
print xdata.shape
print ydata.shape


(50, 100)
(50,)

In [9]:
# Step 1. Declare Theano variables
x = T.dmatrix()
y = T.dvector()
w = shared(rng.randn(100))
b = shared(np.zeros(()))
print "Initial model"
print w.get_value()
print b.get_value()


Initial model
[  6.61059266e-01   8.80420889e-01  -3.67921474e-01   1.20765018e+00
  -2.14610529e-01  -1.19636732e+00   1.18865883e-01   7.33034960e-01
  -6.98609914e-01  -1.36392378e+00   1.16824539e-03  -1.03206682e-01
  -7.87036977e-01   1.33362070e+00   8.17370853e-01   1.00688984e+00
  -1.28817580e+00  -2.83361054e-01  -4.83204073e-01  -3.75577195e-01
   1.02197703e+00  -5.67505190e-01  -6.41691769e-01   1.03204580e+00
   1.79862507e+00   1.30371339e+00  -9.02156610e-01  -2.03707648e+00
  -7.72719144e-01  -1.29461239e-01   3.22318779e-01  -5.05242963e-01
   1.14594573e+00   5.76318198e-01   1.22296878e-02  -1.41357054e+00
   1.15908539e+00  -4.40720242e-02  -1.77498582e+00   1.22743304e+00
   1.89512652e+00  -2.24029939e-01  -1.02021225e+00   1.11353251e+00
   1.43122637e+00   7.08663795e-01  -2.06833658e+00  -6.41221430e-01
  -9.05293785e-01   5.65369342e-01   1.47212572e+00   1.13207895e-02
  -1.54788945e-01  -1.47914598e+00  -1.33069579e+00   4.35851675e-03
   1.43251885e+00  -4.92613717e-01  -5.81063848e-01  -1.09153678e+00
  -1.27317059e+00   9.55086031e-01  -2.53030805e-01   8.21210582e-01
  -1.84118581e+00  -3.58702745e-01   1.50412867e+00  -1.16200496e+00
  -1.02486821e+00  -1.26812652e-01  -8.66453050e-03   2.88198454e-01
  -1.18904933e-01   5.77063623e-02   7.95361677e-03  -9.18402661e-01
  -4.45241436e-01  -1.34150481e+00   1.87076505e-01  -8.86069228e-01
   5.39031181e-01   5.23505059e-01  -2.10460840e+00   8.34642948e-01
   6.79722952e-01   1.07699547e-01  -2.19304814e+00  -1.87515483e+00
   7.94940943e-01  -5.14104275e-01   2.91403153e-01  -2.82796468e+00
   4.78010635e-01  -7.36536398e-01  -7.67162750e-01   9.75267569e-01
  -3.62772390e-01   3.54360101e-01  -8.55132332e-01   1.94588392e+00]
0.0

In [10]:
# Step 2. Construct Theano expression graph
p_1 = 1 / (1 + T.exp(-T.dot(x, w) - b))
xent = -y * T.log(p_1) - (1 - y) * T.log(1 - p_1)
prediction = p_1 > 0.5
cost = xent.mean() + 0.01 * (w ** 2).sum()
gw, gb = T.grad(cost, [w, b])

In [11]:
# Step 3. Compile expressions to functions
train = function(inputs=[x, y],
                 outputs=[prediction, xent],
                 updates={w:w - 0.1 * gw,
                          b:b - 0.1 * gb})


/Users/dikien/anaconda/lib/python2.7/site-packages/IPython/kernel/__main__.py:5: UserWarning: The parameter 'updates' of theano.function() expects an OrderedDict, got <type 'dict'>. Using a standard dictionary here results in non-deterministic behavior. You should use an OrderedDict if you are using Python 2.7 (theano.compat.python2x.OrderedDict for older python), or use a list of (shared, update) pairs. Do not just convert your dictionary to this type before the call as the conversion will still be non-deterministic.

In [12]:
# Step 4. Perform computation
for loop in range(100):
    pval, xval = train(xdata, ydata)
    print xval.mean()


3.56002568818
3.45421095955
3.3502921538
3.24825434089
3.14810517968
3.04986922862
2.95358263204
2.85928854187
2.76703339608
2.67686394499
2.58882477064
2.50295596218
2.41929058046
2.33785168081
2.25864911398
2.18167698303
2.10691298547
2.03432038932
1.96385219392
1.89545593769
1.82907741597
1.76466230259
1.70215573756
1.6415007364
1.58263652986
1.52549773193
1.47001478408
1.41611563667
1.36372833056
1.31278390263
1.26321911555
1.21497858834
1.16801609445
1.12229497951
1.07778781896
1.03447556326
0.992346485795
0.951395238177
0.911622224404
0.873033351425
0.835640045626
0.799459296496
0.764513437272
0.730829403771
0.698437308016
0.667368293676
0.637651779303
0.609312321721
0.582366427491
0.55681968859
0.532664607464
0.509879403351
0.488427963664
0.468260940256
0.449317820898
0.431529669144
0.414822154808
0.399118507231
0.384342102934
0.370418515951
0.357276976755
0.344851277368
0.333080215306
0.321907689952
0.311282561533
0.301158365789
0.291492955489
0.28224811851
0.273389204169
0.264884775692
0.256706296983
0.248827855617
0.241225920327
0.233879129528
0.226768106899
0.219875300263
0.213184840586
0.206682418618
0.200355177368
0.19419161918
0.188181526617
0.182315896617
0.17658688757
0.170987778937
0.165512942915
0.160157827346
0.154918948597
0.14979389245
0.144781320108
0.139880975268
0.135093686737
0.130421359546
0.1258669459
0.121434386026
0.117128508477
0.112954880135
0.108919598834
0.105029026426
0.101289467478
0.0977068079676