In [1]:
##http://adventuresinmachinelearning.com/python-tensorflow-tutorial/
## a=(b+c)∗(c+2) .
#a = d*e where d = b+c and e = c+2
import tensorflow as tf

In [5]:
const = tf.constant(2.0,name='const')

b = tf.Variable(2.0, name='b')
c = tf.Variable(1.0,name='c')

d = tf.add(b,c,name='d')
e = tf.add(c,const, name='e')
a = tf.multiply(d,e,name='a')

## The next step is to setup an object to initialise the variables and the graph structure:
init_op = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init_op)
    a_out = sess.run(a)
    print('Variable is {}'.format(a_out))


Variable is 9.0

The difference is that with tf.Variable you have to provide an initial value when you declare it. With tf.placeholder you don't have to provide an initial value and you can specify it at run time with the feed_dict argument inside Session.run


In [8]:
import numpy as np
x = tf.placeholder(tf.float32,shape=(4,4))
y = tf.matmul(x,x)
with tf.Session() as sess:
    rand_arr = np.random.rand(4,4)
    print(sess.run(y,feed_dict={x:rand_arr}))


[[ 0.90976769  0.52786791  0.54067051  0.76289129]
 [ 0.87171447  0.72640431  0.75379997  0.6465416 ]
 [ 1.0617522   0.66384751  0.82435614  0.90027297]
 [ 0.49445933  0.40498492  0.35097817  0.35593358]]

In [10]:
b = tf.placeholder(tf.float32,[None,1], name='b')
x = np.array([[[1],[2],[3]], [[4],[5],[6]]])

In [11]:
x.shape


Out[11]:
(2, 3, 1)

In [12]:
x


Out[12]:
array([[[1],
        [2],
        [3]],

       [[4],
        [5],
        [6]]])

In [13]:
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)

adder_node = a+b
sess = tf.Session()

print(sess.run(adder_node,{a:[1,3],b:[2,4]}))


[ 3.  7.]

In [15]:
import tensorflow as tf
#Model parameters
W = tf.Variable([.3],tf.float32)
b = tf.Variable([-.3], tf.float32)

#Inputs and Outputs
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)  # Actual output which we already know

linear_model = W*x+b

#Loss ffunction
squared_delta = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_delta)

##optimize
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)


init = tf.global_variables_initializer()

sess = tf.Session()
sess.run(init)
#print(sess.run(loss,{x:[1,2,3,4],y:[0,-1,-2,-3]}))
for i in range(1000):
    sess.run(train,{x:[1,2,3,4],y:[0,-1,-2,-3]})
    
print(sess.run([W,b]))


[array([-0.9999969], dtype=float32), array([ 0.99999082], dtype=float32)]

In [ ]: