In [1]:
!sudo unlink /usr/local/cuda
!sudo ln -s /usr/local/cuda-7.5 /usr/local/cuda

In [2]:
import tensorflow as tf
import numpy as np

In [3]:
NUM_CORES = 6

config = tf.ConfigProto(
    inter_op_parallelism_threads = NUM_CORES,
    intra_op_parallelism_threads = NUM_CORES
)

In [4]:
# Creating the placeholders. Note that we include names for more informative errors
# and shapes as tensorflow will do static size checking
x = tf.placeholder(tf.float32, shape=(1, 10), name='x')
W = tf.placeholder(tf.float32, shape=(10, 4), name='W')
b = tf.placeholder(tf.float32, shape=(1,  4), name='b')

In [5]:
# The fan-in to the summing junction and the summing operation
y = tf.matmul(x, W) + b

In [6]:
# The activation function
a = tf.nn.sigmoid(y)

In [7]:
# Adding a softmax filter
m = tf.nn.softmax(a)

In [8]:
# The activation function doesn't really change here
with tf.Session(config=config) as s:
    s.run(tf.initialize_all_variables())
    
    # Let's create some numpy matrices.
    # This is for a single lyaer of four neurons
    W_in = np.random.rand(10, 4)
    x_in = np.random.rand(1, 10)
    b_in = np.random.rand(1,  4)
    
    val = s.run(m,
        feed_dict={
            x: x_in,
            W: W_in,
            b: b_in
        })
    
    print val


[[ 0.25243405  0.24619542  0.25511307  0.24625748]]

In [ ]: