In [ ]:
"""tf.train API
TensorFlow provides optimizers that slowly change each variable in order to minimize the loss function. 
The simplest optimizer is gradient descent. 
It modifies each variable according to the magnitude of the derivative of loss with respect to that variable.
computing symbolic derivatives manually is tedious and error-prone. Consequently, TensorFlow can 
automatically produce derivatives given only a description of the model using the function tf.gradients.
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
"""

In [ ]:
import numpy as np
import tensorflow as tf

In [ ]:
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W * x + b
y = tf.placeholder(tf.float32)
loss = tf.reduce_sum(tf.square(linear_model - y))
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
x_train = [1,2,3,4]
y_train = [0,-1,-2,-3]
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(1000):
    sess.run(train,{x:x_train,y:y_train})
curr_W, curr_b, curr_loss = sess.run([W, b, loss],{x:x_train,y:y_train})
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))