In [1]:
import sys; print('Python \t\t{0[0]}.{0[1]}'.format(sys.version_info))
import tensorflow as tf; print('Tensorflow \t{}'.format(tf.__version__))
import keras; print('Keras \t\t{}'.format(keras.__version__))
In [2]:
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import numpy as np
In [3]:
#linear regression, with additive noise
f = lambda x, e: 2*x + 3 + e
f_vec = np.vectorize(f)
In [4]:
samples = 1000
e = np.random.normal(0.0, 0.2, samples)
x = np.random.rand(samples)
y = f_vec(x,e)
# transform into columns
x = x.reshape(-1,1)
y = y.reshape(-1,1)
In [5]:
plt.figure(figsize=(5,5))
plt.plot(x[:100], y[:100], 'r.')
plt.show()
In [6]:
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.01, random_state=42)
In [7]:
from keras.models import Sequential
from keras.layers.core import Dense
In [8]:
model = Sequential()
model.add(Dense(1, input_dim=1))
In [9]:
model.compile(optimizer='sgd', loss='mean_squared_error', metrics=["accuracy"])
In [10]:
model.fit(x_train, y_train,
batch_size=5, epochs=10, verbose=1,
validation_data=(x_test, y_test))
Out[10]:
In [11]:
score = model.evaluate(x_test, y_test, verbose=0)
print('Test accuracy (MSE):', score[0])
In [12]:
y_test_predict = model.predict(x_test)
np.hstack([x_test[:10],y_test_predict[:10]])
Out[12]:
In [13]:
w, b = model.layers[0].get_weights()
w_ = w[0][0]
b_ = b[0]
print("Regression a = {:.2}, b = {:.2}".format(w_, b_))
In [14]:
plt.figure(figsize=(5,5))
plt.plot(x[:100], y[:100], 'r.')
plt.plot(x_test, y_test_predict, 'bo')
plt.plot([b_,w_*1+b_], 'b-')
plt.show()
In [ ]: