In [1]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

%matplotlib inline

np.set_printoptions(precision=2)

In [2]:
data = np.loadtxt('data.dat', unpack=True, dtype='float32')

x = data[:-1]
y = data[-1]

In [3]:
W = tf.Variable(tf.random_uniform([1, len(x)], -1, 1))
b = tf.Variable(tf.random_uniform([1], -1, 1))

In [4]:
hypothesis = tf.matmul(W, x)

cost = tf.reduce_mean(tf.square(hypothesis - y))

a = tf.Variable(0.1)  # learning rate, alpha
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)  # goal is minimize cost

In [5]:
init = tf.global_variables_initializer()

sess = tf.Session()
sess.run(init)

for _ in range(1000):
    sess.run(train)
    
    if _ % 100 == 0:
        print((_, sess.run(cost), sess.run(W)))


(0, 2.0934718, array([[ 0.34,  1.45,  0.79]], dtype=float32))
(100, 2.1342099e-05, array([[ 0.01,  1.  ,  1.  ]], dtype=float32))
(200, 4.3403496e-08, array([[  4.94e-04,   1.00e+00,   1.00e+00]], dtype=float32))
(300, 8.7084118e-11, array([[  2.23e-05,   1.00e+00,   1.00e+00]], dtype=float32))
(400, 2.046363e-13, array([[  1.00e-06,   1.00e+00,   1.00e+00]], dtype=float32))
(500, 4.8316907e-14, array([[  1.62e-07,   1.00e+00,   1.00e+00]], dtype=float32))
(600, 4.8316907e-14, array([[  1.62e-07,   1.00e+00,   1.00e+00]], dtype=float32))
(700, 4.8316907e-14, array([[  1.62e-07,   1.00e+00,   1.00e+00]], dtype=float32))
(800, 4.8316907e-14, array([[  1.62e-07,   1.00e+00,   1.00e+00]], dtype=float32))
(900, 4.8316907e-14, array([[  1.62e-07,   1.00e+00,   1.00e+00]], dtype=float32))