In [26]:
import numpy as np
import matplotlib.pyplot as plt
import random
%matplotlib inline
N = 10
x = np.random.rand(N)
y = np.random.rand(N)
#plotting
plt.scatter(x,y)
Out[26]:
In [27]:
#linear regression y = m*x + b
m = 0
b = 0
#implement using m = sum((x - x_mean)(y - y_mean)) / sum((x - x_mean)^2)
def lr(X, Y):
global m,b
x_mean = np.mean(X)
y_mean = np.mean(Y)
num = 0
den = 0
## iterative
#for x, y in zip(X, Y):
# num += (x - x_mean)*(y - y_mean)
# den += (x - x_mean)*(x - x_mean)
## vectorial
num = sum((x -x_mean)*(y -y_mean))
den = sum((x -x_mean)**2)
m = num/den
b = y_mean - (m * x_mean)
#calculate
lr(x, y)
print("m:{} b:{}".format(m, b))
plt.scatter(x, y)
plt.plot(x, m*x+b, 'r')
Out[27]:
In [28]:
#using linear regression formula: y = m*x + b
def gd(X, Y, lr, iterations=100):
m = 0
b = 0
for i in range(iterations):
m, b = step_gd(m, b, X, Y, lr)
return m, b
#implement step gradient descent
def step_gd(m, b, X, Y, lr):
m_grad = 0
b_grad = 0
for x, y in zip(X,Y):
pred = m*x + b
err = y - pred
b_grad += err / len(X)
m_grad += x * err / len(X)
m += m_grad * lr
b += b_grad * lr
return m, b
#calculate
learning_rate = 0.1
m1, b1 = gd(x, y, learning_rate)
print("m1:{} b1:{} (red)".format(m1, b1))
learning_rate = 0.5
m2, b2 = gd(x, y, learning_rate)
print("m2:{} b2:{} (yellow)".format(m2, b2))
learning_rate = 1
m3, b3 = gd(x, y, learning_rate)
print("m3:{} b3:{} (green)".format(m3, b3))
plt.scatter(x, y)
plt.plot(x, m1*x+b1, 'r')
plt.plot(x, m2*x+b2, 'y')
plt.plot(x, m3*x+b3, 'g')
Out[28]: