In [2]:
import numpy as np
import scipy.stats as st
import sklearn.linear_model as lm
import matplotlib.pyplot as plt
%matplotlib inline
In [3]:
f = lambda x: np.exp(3*x)
In [5]:
x_tr = np.linspace(0., 2, 200)
y_tr = f(x_tr)
In [6]:
x = np.array([0, .1, .2, .5, .8, .9, 1])
y=f(x) + np.random.randn(len(x))
In [9]:
plt.figure(figsize=(12,6))
plt.plot(x_tr[:100], y_tr[:100], '--k')
plt.plot(x, y, 'ok', ms=10)
Out[9]:
In [12]:
# Create empty model
lr = lm.LinearRegression()
# train the model with our training dataset
lr.fit(x[:, np.newaxis], y)
# Predict points using trained model
y_lr = lr.predict(x_tr[:, np.newaxis])
In [13]:
plt.figure(figsize=(12,6))
plt.plot(x_tr, y_tr, '--k')
plt.plot(x_tr, y_lr, 'g')
plt.plot(x, y, 'ok', ms=10)
plt.xlim(0,1)
plt.ylim(y.min() - 1, y.max() + 1)
plt.title('Linear Regression')
Out[13]:
In [25]:
lrp = lm.LinearRegression()
plt.figure(figsize=(12,6))
plt.plot(x_tr, y_tr, '--k')
#for deg, s in zip(range(2,5), ['-', '.', '-', '.', '-', '.', '-', '.']):
for deg, s in zip([2, 5], ['-', '.']):
print(deg)
print(s)
lrp.fit(np.vander(x, deg + 1), y)
y_lrp = lrp.predict(np.vander(x_tr, deg + 1))
plt.plot(x_tr, y_lrp, s, label='degree ' + str(deg))
plt.legend(loc=2)
plt.xlim(0, 1.4)
plt.ylim(-10, 40)
# print the model's coefficients
print(' '.join(['%.2f' % c for c in lrp.coef_]))
plt.plot(x, y, 'ok', ms=10)
plt.title('Linear Regression')
Out[25]:
In [26]:
lrp = lm.RidgeCV()
plt.figure(figsize=(12,6))
plt.plot(x_tr, y_tr, '--k')
#for deg, s in zip(range(2,5), ['-', '.', '-', '.', '-', '.', '-', '.']):
for deg, s in zip([2, 5], ['-', '.']):
print(deg)
print(s)
lrp.fit(np.vander(x, deg + 1), y)
y_lrp = lrp.predict(np.vander(x_tr, deg + 1))
plt.plot(x_tr, y_lrp, s, label='degree ' + str(deg))
plt.legend(loc=2)
plt.xlim(0, 1.4)
plt.ylim(-10, 40)
# print the model's coefficients
print(' '.join(['%.2f' % c for c in lrp.coef_]))
plt.plot(x, y, 'ok', ms=10)
plt.title('Linear Regression')
Out[26]:
In [ ]: