01 TensorFlow Basics


In [1]:
# import
import tensorflow as tf

Tensors

Tensor is an n-dimensional matrix
0-d tensor: scalar (number)
1-d tensor: vector
2-d tensor: matrix
...

Tensor's Rank

The number of dimensions in a tensor.
[]: a rank 0 tensor
[1,2,3]: a rank 1 tensor - a vector with shape [3]
[[1,2,3], [4,5,6]]: a rank 2 tensor- matrix with shape [2,3]
[[[1,2,3]], [[7,8,9]]]: a rank 3 tensor shape [2,1,3]

Computational Graph

Series of tensorflow operations arranged into graph of nodes. To actually evaluate the nodes, we must run the computational graph within a session. A session encapsulates the control and state of the TensorFlow runtime.

In [2]:
#add a constant to the graph
hello = tf.constant("TensorFlow Playground")

#create tf session
sess = tf.Session()

#run the session
print(sess.run(hello))


b'TensorFlow Playground'

In [3]:
#tf.constant
a = tf.constant(3.0, tf.float32) #to specify a constant right away
b = tf.constant(5)

sess = tf.Session()

print(sess.run(a))
print(sess.run(b), b.dtype)


3.0
5 <dtype: 'int32'>

In [4]:
#tf.constant for matrix multiplications
mat1 = tf.constant([[6., 0.]])
print("mat1 shape:", mat1.shape)

mat2 = tf.constant([[-0.5], [9]])
print("mat2 shape:", mat2.shape)

with tf.Session() as sess:
    prod = tf.matmul(mat1, mat2)
    print(sess.run(prod))
    print("finally shape", prod.shape)


mat1 shape: (1, 2)
mat2 shape: (2, 1)
[[-3.]]
finally shape (1, 1)

In [5]:
#tf.placeholder
#to specify a placeholder and value will be provided later
c = tf.placeholder(tf.float32) 
d = tf.placeholder(tf.float32)

#operation 
addition = tf.add(c,d)
product = tf.multiply(c,d)

sess = tf.Session()

print(sess.run(addition, feed_dict={c: 10, d: -2}))
print(sess.run(product, {c: 25, d: 1.2}))


8.0
30.0

In [6]:
#tf.Variables allow us to add trainable parameters to a graph. 
#They are constructed with a type and initial value:
w = tf.Variable([.3], tf.float32)
b = tf.Variable([-.3], tf.float32)
x = tf.placeholder(tf.float32)

model = w*x + b

#To initialize all the variables in a TensorFlow program, you must explicitly call a special operation as follows:
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

#Since x is a placeholder, we can evaluate linear_model for several values of x simultaneously as follows:
print(sess.run(model, {x: [1,2,3,4]}))

#We've created a model, but we don't know how good it is yet. 
#To evaluate the model on training data, we need a y placeholder to provide the desired values, 
#and we need to write a loss function.
y = tf.placeholder(tf.float32)

#squaring the error
squared_deltas = tf.square(model - y)

#sum all the sqaured errors
loss = tf.reduce_sum(squared_deltas)

print(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))

sess.close()


[ 0.          0.30000001  0.60000002  0.90000004]
23.66

In [7]:
#after getting the optimal parameteres we can assign the final optimal values to our tf.Variable using tf.assign
fixw = tf.assign(w, [-1])
fixb = tf.assign(b, [1])

sess = tf.Session()
sess.run([fixw, fixb])

print(sess.run(loss,{x:[1,2,3,4], y:[0,-1,-2,-3]}))

sess.close()


0.0

complete program


In [8]:
#imports
import numpy as np
import tensorflow as tf

#model parameters
w = tf.Variable([.3], tf.float32)
b = tf.Variable([.3], tf.float32)

#model input and output
x = tf.placeholder(tf.float32)
model = w * x + b
y = tf.placeholder(tf.float32)

#loss
loss = tf.reduce_sum(tf.square(model-y))

#optimiser
optimiser = tf.train.GradientDescentOptimizer(0.01)
train = optimiser.minimize(loss)

#trainings data
x_train = [1,2,3,4]
y_train = [0,-1,-2,-3]

#initialise the variables
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

#training loop
for i in range(1000):
    sess.run(train, {x:x_train, y:y_train})

#accuracy
final_w, final_b, final_loss = sess.run([w,b,loss], {x:x_train, y:y_train})
print("w: %s b: %s loss: %s" %(final_w, final_b, final_loss))
sess.close()


w: [-0.99999791] b: [ 0.99999392] loss: 2.52847e-11