In [1]:
# import
import tensorflow as tf
In [2]:
#add a constant to the graph
hello = tf.constant("TensorFlow Playground")
#create tf session
sess = tf.Session()
#run the session
print(sess.run(hello))
In [3]:
#tf.constant
a = tf.constant(3.0, tf.float32) #to specify a constant right away
b = tf.constant(5)
sess = tf.Session()
print(sess.run(a))
print(sess.run(b), b.dtype)
In [4]:
#tf.constant for matrix multiplications
mat1 = tf.constant([[6., 0.]])
print("mat1 shape:", mat1.shape)
mat2 = tf.constant([[-0.5], [9]])
print("mat2 shape:", mat2.shape)
with tf.Session() as sess:
prod = tf.matmul(mat1, mat2)
print(sess.run(prod))
print("finally shape", prod.shape)
In [5]:
#tf.placeholder
#to specify a placeholder and value will be provided later
c = tf.placeholder(tf.float32)
d = tf.placeholder(tf.float32)
#operation
addition = tf.add(c,d)
product = tf.multiply(c,d)
sess = tf.Session()
print(sess.run(addition, feed_dict={c: 10, d: -2}))
print(sess.run(product, {c: 25, d: 1.2}))
In [6]:
#tf.Variables allow us to add trainable parameters to a graph.
#They are constructed with a type and initial value:
w = tf.Variable([.3], tf.float32)
b = tf.Variable([-.3], tf.float32)
x = tf.placeholder(tf.float32)
model = w*x + b
#To initialize all the variables in a TensorFlow program, you must explicitly call a special operation as follows:
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
#Since x is a placeholder, we can evaluate linear_model for several values of x simultaneously as follows:
print(sess.run(model, {x: [1,2,3,4]}))
#We've created a model, but we don't know how good it is yet.
#To evaluate the model on training data, we need a y placeholder to provide the desired values,
#and we need to write a loss function.
y = tf.placeholder(tf.float32)
#squaring the error
squared_deltas = tf.square(model - y)
#sum all the sqaured errors
loss = tf.reduce_sum(squared_deltas)
print(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))
sess.close()
In [7]:
#after getting the optimal parameteres we can assign the final optimal values to our tf.Variable using tf.assign
fixw = tf.assign(w, [-1])
fixb = tf.assign(b, [1])
sess = tf.Session()
sess.run([fixw, fixb])
print(sess.run(loss,{x:[1,2,3,4], y:[0,-1,-2,-3]}))
sess.close()
In [8]:
#imports
import numpy as np
import tensorflow as tf
#model parameters
w = tf.Variable([.3], tf.float32)
b = tf.Variable([.3], tf.float32)
#model input and output
x = tf.placeholder(tf.float32)
model = w * x + b
y = tf.placeholder(tf.float32)
#loss
loss = tf.reduce_sum(tf.square(model-y))
#optimiser
optimiser = tf.train.GradientDescentOptimizer(0.01)
train = optimiser.minimize(loss)
#trainings data
x_train = [1,2,3,4]
y_train = [0,-1,-2,-3]
#initialise the variables
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
#training loop
for i in range(1000):
sess.run(train, {x:x_train, y:y_train})
#accuracy
final_w, final_b, final_loss = sess.run([w,b,loss], {x:x_train, y:y_train})
print("w: %s b: %s loss: %s" %(final_w, final_b, final_loss))
sess.close()