Sine LSTM prediction

The aim of this notebook is to train a LSTM model being able to predict the values of a sine.


In [2]:
import numpy as np
from matplotlib import pyplot as pl

from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils import np_utils

from sklearn import metrics as me

%matplotlib inline


Using TensorFlow backend.

In [3]:
import math

step_radians = 0.01
steps_of_history = 200
steps_in_future = 1
index = 0

# Training set
x = np.sin(np.arange(0, 20*math.pi, step_radians))

seq = []
next_val = []

for i in range(0, len(x) - steps_of_history, steps_in_future):
    seq.append(x[i: i + steps_of_history])
    next_val.append(x[i + steps_of_history])

seq = np.reshape(seq, [-1, steps_of_history, 1])
next_val = np.reshape(next_val, [-1, 1])
print(np.shape(seq))

trainX = np.array(seq)
trainY = np.array(next_val)

# Testing set
x = np.sin(np.arange(20*math.pi, 24*math.pi, step_radians))

seq = []

for i in range(0, len(x) - steps_of_history, steps_in_future):
    seq.append(x[i: i + steps_of_history])

seq = np.reshape(seq, [-1, steps_of_history, 1])
testX = np.array(seq)


(6084, 200, 1)

In [4]:
batch_size = 234
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(200, 1)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
lstm_1 (LSTM)                (None, 4)                 96        
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 5         
=================================================================
Total params: 101
Trainable params: 101
Non-trainable params: 0
_________________________________________________________________

In [5]:
model.fit(trainX, trainY, epochs=25, batch_size=batch_size, verbose=1)


Epoch 1/25
6084/6084 [==============================] - 9s - loss: 0.5374      
Epoch 2/25
6084/6084 [==============================] - 8s - loss: 0.4234     
Epoch 3/25
6084/6084 [==============================] - 8s - loss: 0.3207     
Epoch 4/25
6084/6084 [==============================] - 8s - loss: 0.2257     
Epoch 5/25
6084/6084 [==============================] - 8s - loss: 0.1425     
Epoch 6/25
6084/6084 [==============================] - 8s - loss: 0.0789     
Epoch 7/25
6084/6084 [==============================] - 8s - loss: 0.0390     
Epoch 8/25
6084/6084 [==============================] - 8s - loss: 0.0186     
Epoch 9/25
6084/6084 [==============================] - 8s - loss: 0.0109     
Epoch 10/25
6084/6084 [==============================] - 8s - loss: 0.0083     
Epoch 11/25
6084/6084 [==============================] - 8s - loss: 0.0069     
Epoch 12/25
6084/6084 [==============================] - 8s - loss: 0.0060     
Epoch 13/25
6084/6084 [==============================] - 8s - loss: 0.0052     
Epoch 14/25
6084/6084 [==============================] - 8s - loss: 0.0046     
Epoch 15/25
6084/6084 [==============================] - 8s - loss: 0.0040     
Epoch 16/25
6084/6084 [==============================] - 8s - loss: 0.0036     
Epoch 17/25
6084/6084 [==============================] - 8s - loss: 0.0032     
Epoch 18/25
6084/6084 [==============================] - 8s - loss: 0.0028     
Epoch 19/25
6084/6084 [==============================] - 8s - loss: 0.0026     
Epoch 20/25
6084/6084 [==============================] - 8s - loss: 0.0023     
Epoch 21/25
6084/6084 [==============================] - 8s - loss: 0.0021     
Epoch 22/25
6084/6084 [==============================] - 8s - loss: 0.0020     
Epoch 23/25
6084/6084 [==============================] - 8s - loss: 0.0018     
Epoch 24/25
6084/6084 [==============================] - 8s - loss: 0.0017     
Epoch 25/25
6084/6084 [==============================] - 8s - loss: 0.0016     
Out[5]:
<keras.callbacks.History at 0x11a7198d0>

In [6]:
import matplotlib.pyplot as plt

# Predict the future values
predictY = model.predict(testX)
print(predictY)

# Plot the results
plt.figure(figsize=(20,4))
plt.suptitle('Prediction')
plt.title('History='+str(steps_of_history)+', Future='+str(steps_in_future))

# The data is shiffted of 200 values, it is due to the fact that the 1st value is predicted from the 200 previous values. 
# Therefore, we have to remove from the plot the first 200 values of the training set.
plt.plot(x[200:], 'r-', label='Actual')
plt.plot(predictY, 'gx', label='Predicted')
plt.legend()


[[ 0.92619288]
 [ 0.92308122]
 [ 0.91986775]
 ..., 
 [-0.06815492]
 [-0.05748086]
 [-0.04681687]]
Out[6]:
<matplotlib.legend.Legend at 0x11af4cdd8>

In [ ]: