In [1]:
import numpy as np
In [2]:
def linearRegCostFunction(theta, X, y, Lambda):
'''
theta = conjunto de pesos
X = características (features)
y = alvo (target)
Lambda = Taxa de regularização
'''
m = y.size
H = X.dot(theta)
J = (1/(2*m)) * np.sum(np.square(H-y))
Reg = (Lambda/(2*m)) * np.sum(np.square(theta))
return J+Reg
In [3]:
def linearRegGradient(theta, X, y, Lambda):
'''
theta = conjunto de pesos
X = características (features)
y = alvo (target)
Lambda = Taxa de regularização
'''
m = y.size
h = X.dot(theta.reshape(-1,1))
Reg = (Lambda/m)*np.r_[[[0]],theta[1:].reshape(-1,1)]
grad = (1/m)*(X.T.dot(h-y))+ Reg
return grad.flatten()