In [75]:
import numpy as np
import theano
from theano import tensor as T


W_values = np.array([[1,2],[1,1]], dtype=theano.config.floatX)
bvis_values = np.array([1,1], dtype=theano.config.floatX)
bhid_values = np.array([2,3], dtype=theano.config.floatX)

W = theano.shared(W_values) # we assume that ``W_values`` contains the
                            # initial values of your weight matrix
bvis = theano.shared(bvis_values)
bhid = theano.shared(bhid_values)

def t_propup(vis,vis_sum):
    pre_sigmoid_activation = T.dot(vis, W) + T.dot(bhid.reshape([1,bhid.shape[0]]).T,vis_sum).T
    return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]

t_ipt = T.matrix()
t_ipt_sum = t_ipt.sum(axis=1).reshape([1,t_ipt.shape[0]])

t_results, t_updates = theano.scan( fn = t_propup, 
                                non_sequences = [t_ipt, t_ipt_sum],
                                n_steps=1
                                  )

tmp_f = theano.function( [t_ipt], t_results, updates = t_updates)

In [11]:
tmp = np.array([[10,10],[-10,-10],[-10,-10]], dtype = theano.config.floatX)

In [18]:
tmp_f(tmp)


Out[18]:
[array([[[ 60.,  20.],
         [-60., -20.],
         [-60., -20.]]], dtype=float32),
 array([[[  1.00000000e+00,   1.00000000e+00],
         [  8.75653169e-27,   2.06115347e-09],
         [  8.75653169e-27,   2.06115347e-09]]], dtype=float32)]

In [78]:
def propdown(hid):
    pre_softmax_activation = T.dot(hid, W.T) + bvis                               #---------------------------[edited]
    return [pre_softmax_activation, T.nnet.softmax(pre_softmax_activation)]

ipt = T.matrix()

results, updates = theano.scan( fn = propdown, 
                                non_sequences = ipt,
                                n_steps=1
                                  )

tmp_f2 = theano.function( [ipt], results, updates = updates)

In [79]:
tmp_f2(np.array([[1,1],[0,0],[0,1],[1,0]], dtype = theano.config.floatX))


Out[79]:
[array([[[ 4.,  3.],
         [ 1.,  1.],
         [ 3.,  2.],
         [ 2.,  2.]]], dtype=float32), array([[[ 0.7310586 ,  0.26894143],
         [ 0.5       ,  0.5       ],
         [ 0.7310586 ,  0.26894143],
         [ 0.5       ,  0.5       ]]], dtype=float32)]

In [76]:
(T.dot(np.array([[1,20],[0,0],[0,1],[1,0]], dtype = theano.config.floatX), W.T) + bhid).eval()


Out[76]:
array([[ 43.,  24.],
       [  2.,   3.],
       [  4.,   4.],
       [  3.,   4.]], dtype=float32)

In [77]:
T.nnet.softmax((T.dot(np.array([[1,20],[0,0],[0,1],[1,0]], dtype = theano.config.floatX), W.T) + bhid).eval()).eval()


Out[77]:
array([[  1.00000000e+00,   5.60279645e-09],
       [  2.68941432e-01,   7.31058598e-01],
       [  5.00000000e-01,   5.00000000e-01],
       [  2.68941432e-01,   7.31058598e-01]], dtype=float32)

In [39]:
T.nnet.softmax(np.array([0,0,0,1], dtype = theano.config.floatX)).eval()


Out[39]:
array([[ 0.1748777 ,  0.1748777 ,  0.1748777 ,  0.47536689]], dtype=float32)