In [2]:
import tensorflow as tf
In [10]:
node1 = tf.constant(3.0, dtype= tf.float32)
node2 = tf.constant(4.0)
In [11]:
sess = tf.Session()
print(sess.run([node1, node2]))
In [12]:
print(sess.run([node1,node2]))
In [13]:
node3 = tf.add(node1,node2)
print("Addition is : ", sess.run([node3]))
In [106]:
M = tf.Variable(.6 ,dtype=tf.float32)
C = tf.Variable(-.6, dtype = tf.float32)
x = tf.placeholder(tf.float32)
In [107]:
y = M*x + C
init = tf.global_variables_initializer()
sess.run(init)
In [108]:
print(sess.run(y, {x : [1,2,3,4]}))
Loss function is created and used for evaluation.
In [109]:
B = tf.placeholder(tf.float32)
sq_deltas = tf.square(y - B)
loss_function = tf.reduce_sum(sq_deltas)
In [110]:
print(sess.run(loss_function, {x : [1,2,3,4], B : [0,-5,-4,-3]}))
That is totally not okay as is nearly 80 and its too high. But if we want to lower our loss_function, we need to fix the input values.
In [115]:
fixM = tf.assign(M,-1.)
fixC = tf.assign(C, 1)
sess.run([fixM,fixC])
Out[115]:
In [116]:
print(sess.run(loss_function, {x : [1,2,3,4], B : [0,-5,-4,-3]}))
See, the loss's function is lower than what we got before.
In [118]:
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss_function)
In [121]:
sess.run(init) # reset values to incorrect defaults.
for i in range(1000):
sess.run(train, {x: [1, 2, 3, 4], B: [0,-5,-4,-3]})
In [122]:
print(sess.run([M, C]))
We werent even close to guessing these values, but we got the exact and accurate values of the paramters defined earlier through proper training of the model.
Lets sum up the steps of the linear model.
In [128]:
import tensorflow as tf
# Model parameters
M = tf.Variable([.6], dtype=tf.float32)
C = tf.Variable([-.6], dtype=tf.float32)
# Model input and output
x = tf.placeholder(tf.float32)
y = M * x + C
B = tf.placeholder(tf.float32)
# loss_function
loss_function = tf.reduce_sum(tf.square(y - B)) # sum of the squares
# optimizer
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss_function)
# training data
x_train = [1, 2, 3, 4]
y_train = [0,-5,-4,-3]
In [129]:
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init) # reset values to wrong
for i in range(1000):
sess.run(train, {x: x_train, B : [0,-5,-4,-3]})
In [130]:
# evaluate training accuracy
curr_M, curr_C, curr_loss_f = sess.run([M, C, loss_function], {x: x_train, B: y_train})
print("W: %s b: %s loss: %s"%(curr_M, curr_C, curr_loss_f))
In [140]:
import tensorflow as tf
# NumPy is often used to load, manipulate and preprocess data.
import numpy as np
# Declare list of features. We only have one numeric feature. There are many
# other types of columns that are more complicated and useful.
feature_cols = [tf.feature_column.numeric_column("a", shape=[1])]
# An estimator is the front end to invoke training (fitting) and evaluation
# (inference). There are many predefined types like linear regression,
# linear classification, and many neural network classifiers and regressors.
# The following code provides an estimator that does linear regression.
estimator = tf.estimator.LinearRegressor(feature_columns=feature_cols)
In [143]:
# TensorFlow provides many helper methods to read and set up data sets.
# Here we use two data sets: one for training and one for evaluation
# We have to tell the function how many batches
# of data (num_epochs) we want and how big each batch should be.
a_train = np.array([0., 5., 3., 4.])
y_train = np.array([0., -6., -7., -3.])
a_eval = np.array([2., 5., 8., 10.])
y_eval = np.array([-1.01, -6.1, -7., 4.])
input_fun = tf.estimator.inputs.numpy_input_fn(
{"a": a_train}, y_train, batch_size=4, num_epochs=None, shuffle=True)
train_input_fun = tf.estimator.inputs.numpy_input_fn(
{"a": a_train}, y_train, batch_size=4, num_epochs=1000, shuffle=False)
eval_input_fun = tf.estimator.inputs.numpy_input_fn(
{"a": a_eval}, y_eval, batch_size=4, num_epochs=1000, shuffle=False)
# We can invoke 1000 training steps by invoking the method and passing the
# training data set.
estimator.train(input_fn=input_fun, steps=1000)
Out[143]:
In [145]:
# Here we evaluate how well our model did.
train_metrics = estimator.evaluate(input_fn=train_input_fun)
eval_metrics = estimator.evaluate(input_fn=eval_input_fun)
print("train metrics: %r"% train_metrics)
print("eval metrics: %r"% eval_metrics)
In [ ]: