In [2]:
import numpy as np
from matplotlib import pyplot as pl
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils import np_utils
from sklearn import metrics as me
%matplotlib inline
In [3]:
import math
step_radians = 0.01
steps_of_history = 200
steps_in_future = 1
index = 0
# Training set
x = np.sin(np.arange(0, 20*math.pi, step_radians))
seq = []
next_val = []
for i in range(0, len(x) - steps_of_history, steps_in_future):
seq.append(x[i: i + steps_of_history])
next_val.append(x[i + steps_of_history])
seq = np.reshape(seq, [-1, steps_of_history, 1])
next_val = np.reshape(next_val, [-1, 1])
print(np.shape(seq))
trainX = np.array(seq)
trainY = np.array(next_val)
# Testing set
x = np.sin(np.arange(20*math.pi, 24*math.pi, step_radians))
seq = []
for i in range(0, len(x) - steps_of_history, steps_in_future):
seq.append(x[i: i + steps_of_history])
seq = np.reshape(seq, [-1, steps_of_history, 1])
testX = np.array(seq)
In [4]:
batch_size = 234
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(200, 1)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.summary()
In [5]:
model.fit(trainX, trainY, epochs=25, batch_size=batch_size, verbose=1)
Out[5]:
In [6]:
import matplotlib.pyplot as plt
# Predict the future values
predictY = model.predict(testX)
print(predictY)
# Plot the results
plt.figure(figsize=(20,4))
plt.suptitle('Prediction')
plt.title('History='+str(steps_of_history)+', Future='+str(steps_in_future))
# The data is shiffted of 200 values, it is due to the fact that the 1st value is predicted from the 200 previous values.
# Therefore, we have to remove from the plot the first 200 values of the training set.
plt.plot(x[200:], 'r-', label='Actual')
plt.plot(predictY, 'gx', label='Predicted')
plt.legend()
Out[6]:
In [ ]: