In [1]:
import numpy as np
import tensorflow as tf
Taken from Andrew Ng's Coursera Deep Learning series
https://www.coursera.org/learn/deep-neural-network/lecture/zcZlH/tensorflow
In [4]:
# Implement forward path
# TensorFlow figures out backprop
w = tf.Variable(0, dtype=tf.float32)
cost = tf.add(tf.add(w ** 2, tf.multiply(-10.0, w)), 25)
learning_rate = 0.01
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
In [5]:
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
print(session.run(w))
In [6]:
session.run(train)
print(session.run(w))
In [7]:
for _ in range(1000):
session.run(train)
print(session.run(w))
An alternative way of identifying the cost
is to use overloaded operations
In [8]:
# This is equivalent to
# tf.add(tf.add(w ** 2, tf.multiply(-10.0, w)), 25)
cost = w ** 2 - 10 * w + 25
In [12]:
coefficients = np.array([[1.], [-10.], [25.]])
x = tf.placeholder(tf.float32, [3, 1])
cost = x[0][0] * w ** 2 + x[1][0] * w + x[2][0]
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
for _ in range(1000):
session.run(train, feed_dict={x: coefficients})
print(session.run(w))
In [14]:
with tf.Session() as session:
session.run(init)
for _ in range(1000):
session.run(train, feed_dict={x: coefficients})
print(session.run(w))