In [1]:
!sudo unlink /usr/local/cuda
!sudo ln -s /usr/local/cuda-7.5 /usr/local/cuda
In [2]:
import tensorflow as tf
import numpy as np
In [3]:
NUM_CORES = 6
config = tf.ConfigProto(
inter_op_parallelism_threads=NUM_CORES,
intra_op_parallelism_threads=NUM_CORES
)
In [4]:
# Creating the placeholders. Note that we include names for more informative errors
# and shapes as tensorflow will do static size checking.
x_0 = tf.placeholder(tf.float32, shape=(1, 10), name='x_0')
W_0 = tf.placeholder(tf.float32, shape=(10, 4), name='W_0')
b_0 = tf.placeholder(tf.float32, shape=(1, 4), name='b_0')
In [5]:
# The fan-in to the summing junction and the summing operation.
y_0 = tf.matmul(x_0, W_0) + b_0
In [6]:
# The activation function
a_0 = tf.nn.sigmoid(y_0)
In [7]:
# Now for the second layer.
# x_1 = tf.placeholder(tf.float32, shape=(1, 4), name='x_1')
W_1 = tf.placeholder(tf.float32, shape=(4, 2), name='W_1')
b_1 = tf.placeholder(tf.float32, shape=(1, 2), name='b_1')
In [8]:
# The fan-in to the summing junction and the summing operation.
y_1 = tf.matmul(a_0, W_1) + b_1
In [9]:
# The activation function
a_1 = tf.nn.sigmoid(y_1)
In [10]:
# Adding a softmax filter
m = tf.nn.softmax(a_1)
In [11]:
# The activation function doesn't really change here
with tf.Session(config=config) as s:
s.run(tf.initialize_all_variables())
# Let's create some numpy matrices.
# This is for a single layer of four neurons
W_0_in = np.random.rand(10, 4)
x_0_in = np.random.rand(1, 10)
b_0_in = np.random.rand(1, 4)
W_1_in = np.random.rand(4, 2)
b_1_in = np.random.rand(1, 2)
val = s.run(m,
feed_dict={
x_0: x_0_in,
W_0: W_0_in,
b_0: b_0_in,
W_1: W_1_in,
b_1: b_1_in
})
print val
In [ ]: