In [32]:
import tensorflow as tf
import numpy as np
data1 = [(0.000000,95.364693) ,
    (1.000000,97.217205) ,
    (2.0000,75.195834),
    (3.000000,60.105519) ,
    (4.,49.342380),
    (5.,37.400286),
    (11.0, -4.383926),
    (12.0,-22.858197),
    (13.0,-37.758333),
    (14.0,-45.606221)]



def grident_d(data, learning_rate, variance):

    theta0_guess = 1.
    theta1_guess = 1.


    theta0_priv = 10.
    theta1_priv = 10.

    m = len(data)

    while (abs(theta1_guess-theta1_priv) > variance or abs(theta0_guess - theta0_priv) > variance):

        theta1_priv = theta1_guess
        theta0_priv = theta0_guess

        hypothesis =lambda x: theta1_guess*x + theta0_guess

        theta0_guess -=  learning_rate * (1./(2*m)) * sum([hypothesis(point[0]) - point[1] for point in data])
        theta1_guess -=  learning_rate * (1./(2*m)) * sum([ (hypothesis(point[0]) - point[1]) * point[0] for point in data])   

    return ( theta0_guess,theta1_guess )



points = [(float(x),float(y)) for (x,y) in data1]
v=np.var(data1)

res = grident_d(points,0.001,v)
print(res)


(1.0, 1.0)