In [1]:
# Tutorial: http://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/
# Theoretical explanation: http://colah.github.io/posts/2015-08-Understanding-LSTMs/

In [2]:
import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error


Using TensorFlow backend.

In [3]:
dataset = pandas.read_csv('international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)
plt.plot(dataset)
plt.show()


LSTM in Regression Modeling in Time Series


In [4]:
# fix random seed for reproducibility
numpy.random.seed(12345)

In [5]:
# load the dataset
dataframe = pandas.read_csv('international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)
dataset = dataframe.values
dataset = dataset.astype('float32')

In [6]:
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)

In [7]:
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
print(len(train), len(test))


(96, 48)

In [8]:
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
	dataX, dataY = [], []
	for i in range(len(dataset)-look_back-1):
		a = dataset[i:(i+look_back), 0]
		dataX.append(a)
		dataY.append(dataset[i + look_back, 0])
	return numpy.array(dataX), numpy.array(dataY)

In [9]:
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)

In [10]:
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))

In [11]:
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)


Epoch 1/100
2s - loss: 0.0425
Epoch 2/100
1s - loss: 0.0203
Epoch 3/100
0s - loss: 0.0151
Epoch 4/100
0s - loss: 0.0139
Epoch 5/100
0s - loss: 0.0131
Epoch 6/100
0s - loss: 0.0121
Epoch 7/100
1s - loss: 0.0113
Epoch 8/100
1s - loss: 0.0103
Epoch 9/100
0s - loss: 0.0093
Epoch 10/100
0s - loss: 0.0084
Epoch 11/100
0s - loss: 0.0075
Epoch 12/100
1s - loss: 0.0067
Epoch 13/100
1s - loss: 0.0059
Epoch 14/100
0s - loss: 0.0052
Epoch 15/100
0s - loss: 0.0047
Epoch 16/100
0s - loss: 0.0040
Epoch 17/100
0s - loss: 0.0035
Epoch 18/100
0s - loss: 0.0031
Epoch 19/100
0s - loss: 0.0029
Epoch 20/100
1s - loss: 0.0026
Epoch 21/100
1s - loss: 0.0024
Epoch 22/100
0s - loss: 0.0023
Epoch 23/100
0s - loss: 0.0022
Epoch 24/100
0s - loss: 0.0022
Epoch 25/100
0s - loss: 0.0021
Epoch 26/100
1s - loss: 0.0021
Epoch 27/100
0s - loss: 0.0021
Epoch 28/100
0s - loss: 0.0021
Epoch 29/100
0s - loss: 0.0020
Epoch 30/100
0s - loss: 0.0021
Epoch 31/100
0s - loss: 0.0020
Epoch 32/100
0s - loss: 0.0021
Epoch 33/100
1s - loss: 0.0022
Epoch 34/100
1s - loss: 0.0021
Epoch 35/100
0s - loss: 0.0021
Epoch 36/100
0s - loss: 0.0021
Epoch 37/100
0s - loss: 0.0020
Epoch 38/100
0s - loss: 0.0021
Epoch 39/100
1s - loss: 0.0021
Epoch 40/100
1s - loss: 0.0021
Epoch 41/100
0s - loss: 0.0021
Epoch 42/100
0s - loss: 0.0021
Epoch 43/100
0s - loss: 0.0021
Epoch 44/100
0s - loss: 0.0020
Epoch 45/100
1s - loss: 0.0020
Epoch 46/100
0s - loss: 0.0020
Epoch 47/100
0s - loss: 0.0022
Epoch 48/100
0s - loss: 0.0021
Epoch 49/100
0s - loss: 0.0021
Epoch 50/100
1s - loss: 0.0020
Epoch 51/100
1s - loss: 0.0020
Epoch 52/100
0s - loss: 0.0021
Epoch 53/100
0s - loss: 0.0021
Epoch 54/100
0s - loss: 0.0021
Epoch 55/100
0s - loss: 0.0021
Epoch 56/100
0s - loss: 0.0022
Epoch 57/100
0s - loss: 0.0021
Epoch 58/100
1s - loss: 0.0020
Epoch 59/100
0s - loss: 0.0020
Epoch 60/100
0s - loss: 0.0022
Epoch 61/100
0s - loss: 0.0021
Epoch 62/100
0s - loss: 0.0020
Epoch 63/100
1s - loss: 0.0021
Epoch 64/100
1s - loss: 0.0020
Epoch 65/100
0s - loss: 0.0021
Epoch 66/100
0s - loss: 0.0020
Epoch 67/100
0s - loss: 0.0021
Epoch 68/100
0s - loss: 0.0021
Epoch 69/100
1s - loss: 0.0020
Epoch 70/100
0s - loss: 0.0020
Epoch 71/100
0s - loss: 0.0020
Epoch 72/100
0s - loss: 0.0021
Epoch 73/100
0s - loss: 0.0021
Epoch 74/100
0s - loss: 0.0021
Epoch 75/100
0s - loss: 0.0021
Epoch 76/100
1s - loss: 0.0021
Epoch 77/100
0s - loss: 0.0021
Epoch 78/100
0s - loss: 0.0020
Epoch 79/100
0s - loss: 0.0021
Epoch 80/100
0s - loss: 0.0021
Epoch 81/100
1s - loss: 0.0020
Epoch 82/100
0s - loss: 0.0021
Epoch 83/100
0s - loss: 0.0021
Epoch 84/100
0s - loss: 0.0020
Epoch 85/100
0s - loss: 0.0020
Epoch 86/100
1s - loss: 0.0020
Epoch 87/100
1s - loss: 0.0021
Epoch 88/100
0s - loss: 0.0020
Epoch 89/100
0s - loss: 0.0021
Epoch 90/100
0s - loss: 0.0020
Epoch 91/100
1s - loss: 0.0021
Epoch 92/100
1s - loss: 0.0020
Epoch 93/100
1s - loss: 0.0021
Epoch 94/100
0s - loss: 0.0020
Epoch 95/100
1s - loss: 0.0020
Epoch 96/100
1s - loss: 0.0021
Epoch 97/100
0s - loss: 0.0020
Epoch 98/100
0s - loss: 0.0020
Epoch 99/100
0s - loss: 0.0020
Epoch 100/100
0s - loss: 0.0019
Out[11]:
<keras.callbacks.History at 0x117cb84d0>

In [12]:
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])

# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))


Train Score: 23.73 RMSE
Test Score: 47.73 RMSE

In [13]:
# shift train predictions for plotting
trainPredictPlot = numpy.empty_like(dataset)
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
# shift test predictions for plotting
testPredictPlot = numpy.empty_like(dataset)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
# plot baseline and predictions
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()


LSTM in Window Time modeling


In [14]:
# load the dataset
dataframe = pandas.read_csv('international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)
dataset = dataframe.values
dataset = dataset.astype('float32')

In [15]:
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)

In [16]:
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]

In [17]:
# reshape into X=t and Y=t+1
look_back = 3
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)

In [18]:
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))

In [19]:
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)


Epoch 1/100
1s - loss: 0.0396
Epoch 2/100
1s - loss: 0.0155
Epoch 3/100
0s - loss: 0.0113
Epoch 4/100
0s - loss: 0.0099
Epoch 5/100
0s - loss: 0.0088
Epoch 6/100
0s - loss: 0.0077
Epoch 7/100
1s - loss: 0.0068
Epoch 8/100
1s - loss: 0.0062
Epoch 9/100
0s - loss: 0.0057
Epoch 10/100
0s - loss: 0.0052
Epoch 11/100
0s - loss: 0.0049
Epoch 12/100
1s - loss: 0.0046
Epoch 13/100
1s - loss: 0.0044
Epoch 14/100
0s - loss: 0.0042
Epoch 15/100
0s - loss: 0.0041
Epoch 16/100
0s - loss: 0.0040
Epoch 17/100
0s - loss: 0.0041
Epoch 18/100
1s - loss: 0.0039
Epoch 19/100
0s - loss: 0.0039
Epoch 20/100
0s - loss: 0.0038
Epoch 21/100
0s - loss: 0.0038
Epoch 22/100
0s - loss: 0.0038
Epoch 23/100
1s - loss: 0.0038
Epoch 24/100
0s - loss: 0.0038
Epoch 25/100
0s - loss: 0.0036
Epoch 26/100
0s - loss: 0.0036
Epoch 27/100
0s - loss: 0.0036
Epoch 28/100
0s - loss: 0.0036
Epoch 29/100
1s - loss: 0.0035
Epoch 30/100
1s - loss: 0.0035
Epoch 31/100
0s - loss: 0.0035
Epoch 32/100
0s - loss: 0.0034
Epoch 33/100
0s - loss: 0.0034
Epoch 34/100
1s - loss: 0.0033
Epoch 35/100
1s - loss: 0.0033
Epoch 36/100
0s - loss: 0.0033
Epoch 37/100
0s - loss: 0.0033
Epoch 38/100
0s - loss: 0.0033
Epoch 39/100
0s - loss: 0.0032
Epoch 40/100
1s - loss: 0.0032
Epoch 41/100
0s - loss: 0.0031
Epoch 42/100
0s - loss: 0.0031
Epoch 43/100
0s - loss: 0.0031
Epoch 44/100
0s - loss: 0.0031
Epoch 45/100
0s - loss: 0.0031
Epoch 46/100
0s - loss: 0.0031
Epoch 47/100
1s - loss: 0.0030
Epoch 48/100
1s - loss: 0.0030
Epoch 49/100
0s - loss: 0.0030
Epoch 50/100
0s - loss: 0.0029
Epoch 51/100
0s - loss: 0.0028
Epoch 52/100
0s - loss: 0.0029
Epoch 53/100
1s - loss: 0.0028
Epoch 54/100
1s - loss: 0.0028
Epoch 55/100
0s - loss: 0.0028
Epoch 56/100
0s - loss: 0.0028
Epoch 57/100
0s - loss: 0.0028
Epoch 58/100
0s - loss: 0.0027
Epoch 59/100
0s - loss: 0.0027
Epoch 60/100
0s - loss: 0.0028
Epoch 61/100
1s - loss: 0.0027
Epoch 62/100
0s - loss: 0.0027
Epoch 63/100
0s - loss: 0.0027
Epoch 64/100
0s - loss: 0.0026
Epoch 65/100
1s - loss: 0.0026
Epoch 66/100
0s - loss: 0.0026
Epoch 67/100
1s - loss: 0.0025
Epoch 68/100
1s - loss: 0.0025
Epoch 69/100
0s - loss: 0.0025
Epoch 70/100
0s - loss: 0.0025
Epoch 71/100
0s - loss: 0.0025
Epoch 72/100
0s - loss: 0.0025
Epoch 73/100
1s - loss: 0.0024
Epoch 74/100
1s - loss: 0.0024
Epoch 75/100
0s - loss: 0.0024
Epoch 76/100
0s - loss: 0.0025
Epoch 77/100
0s - loss: 0.0024
Epoch 78/100
0s - loss: 0.0023
Epoch 79/100
0s - loss: 0.0024
Epoch 80/100
1s - loss: 0.0024
Epoch 81/100
0s - loss: 0.0023
Epoch 82/100
0s - loss: 0.0023
Epoch 83/100
0s - loss: 0.0023
Epoch 84/100
0s - loss: 0.0023
Epoch 85/100
1s - loss: 0.0022
Epoch 86/100
1s - loss: 0.0022
Epoch 87/100
0s - loss: 0.0022
Epoch 88/100
0s - loss: 0.0022
Epoch 89/100
0s - loss: 0.0022
Epoch 90/100
0s - loss: 0.0022
Epoch 91/100
0s - loss: 0.0021
Epoch 92/100
1s - loss: 0.0021
Epoch 93/100
0s - loss: 0.0022
Epoch 94/100
0s - loss: 0.0021
Epoch 95/100
0s - loss: 0.0021
Epoch 96/100
0s - loss: 0.0021
Epoch 97/100
0s - loss: 0.0021
Epoch 98/100
0s - loss: 0.0021
Epoch 99/100
1s - loss: 0.0021
Epoch 100/100
1s - loss: 0.0020
Out[19]:
<keras.callbacks.History at 0x1192f6c10>

In [20]:
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)

In [21]:
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])

In [22]:
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))


Train Score: 23.21 RMSE
Test Score: 59.36 RMSE

In [23]:
# shift train predictions for plotting
trainPredictPlot = numpy.empty_like(dataset)
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict

In [24]:
# shift test predictions for plotting
testPredictPlot = numpy.empty_like(dataset)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict

In [25]:
# plot baseline and predictions
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()


Stacked LSTMs with Memory Between Batches


In [26]:
# Stacked LSTM for international airline passengers problem with memory
import numpy
import matplotlib.pyplot as plt
from pandas import read_csv
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error

In [27]:
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
	dataX, dataY = [], []
	for i in range(len(dataset)-look_back-1):
		a = dataset[i:(i+look_back), 0]
		dataX.append(a)
		dataY.append(dataset[i + look_back, 0])
	return numpy.array(dataX), numpy.array(dataY)

In [28]:
# fix random seed for reproducibility
numpy.random.seed(7)

In [29]:
# load the dataset
dataframe = read_csv('international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)
dataset = dataframe.values
dataset = dataset.astype('float32')

In [30]:
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)

In [31]:
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]

In [32]:
# reshape into X=t and Y=t+1
look_back = 3
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)

In [33]:
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = numpy.reshape(testX, (testX.shape[0], testX.shape[1], 1))

In [34]:
# create and fit the LSTM network
batch_size = 1
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True, return_sequences=True))
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(100):
	model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=2, shuffle=False)
	model.reset_states()


Epoch 1/1
4s - loss: 0.0057
Epoch 1/1
2s - loss: 0.0141
Epoch 1/1
2s - loss: 0.0100
Epoch 1/1
1s - loss: 0.0072
Epoch 1/1
2s - loss: 0.0060
Epoch 1/1
1s - loss: 0.0056
Epoch 1/1
2s - loss: 0.0056
Epoch 1/1
1s - loss: 0.0055
Epoch 1/1
1s - loss: 0.0055
Epoch 1/1
1s - loss: 0.0055
Epoch 1/1
1s - loss: 0.0055
Epoch 1/1
1s - loss: 0.0055
Epoch 1/1
2s - loss: 0.0055
Epoch 1/1
2s - loss: 0.0055
Epoch 1/1
2s - loss: 0.0055
Epoch 1/1
3s - loss: 0.0055
Epoch 1/1
2s - loss: 0.0055
Epoch 1/1
2s - loss: 0.0055
Epoch 1/1
2s - loss: 0.0055
Epoch 1/1
2s - loss: 0.0055
Epoch 1/1
1s - loss: 0.0055
Epoch 1/1
2s - loss: 0.0055
Epoch 1/1
1s - loss: 0.0055
Epoch 1/1
2s - loss: 0.0055
Epoch 1/1
1s - loss: 0.0055
Epoch 1/1
2s - loss: 0.0055
Epoch 1/1
1s - loss: 0.0055
Epoch 1/1
1s - loss: 0.0054
Epoch 1/1
2s - loss: 0.0054
Epoch 1/1
1s - loss: 0.0054
Epoch 1/1
2s - loss: 0.0054
Epoch 1/1
2s - loss: 0.0054
Epoch 1/1
2s - loss: 0.0054
Epoch 1/1
1s - loss: 0.0054
Epoch 1/1
2s - loss: 0.0054
Epoch 1/1
1s - loss: 0.0053
Epoch 1/1
1s - loss: 0.0053
Epoch 1/1
2s - loss: 0.0053
Epoch 1/1
1s - loss: 0.0053
Epoch 1/1
1s - loss: 0.0053
Epoch 1/1
2s - loss: 0.0053
Epoch 1/1
2s - loss: 0.0052
Epoch 1/1
2s - loss: 0.0052
Epoch 1/1
1s - loss: 0.0052
Epoch 1/1
1s - loss: 0.0052
Epoch 1/1
1s - loss: 0.0052
Epoch 1/1
1s - loss: 0.0052
Epoch 1/1
2s - loss: 0.0051
Epoch 1/1
1s - loss: 0.0051
Epoch 1/1
1s - loss: 0.0051
Epoch 1/1
2s - loss: 0.0051
Epoch 1/1
1s - loss: 0.0051
Epoch 1/1
2s - loss: 0.0051
Epoch 1/1
2s - loss: 0.0050
Epoch 1/1
1s - loss: 0.0050
Epoch 1/1
2s - loss: 0.0050
Epoch 1/1
1s - loss: 0.0050
Epoch 1/1
1s - loss: 0.0049
Epoch 1/1
2s - loss: 0.0049
Epoch 1/1
2s - loss: 0.0049
Epoch 1/1
2s - loss: 0.0049
Epoch 1/1
1s - loss: 0.0048
Epoch 1/1
2s - loss: 0.0048
Epoch 1/1
1s - loss: 0.0047
Epoch 1/1
1s - loss: 0.0047
Epoch 1/1
2s - loss: 0.0047
Epoch 1/1
1s - loss: 0.0046
Epoch 1/1
2s - loss: 0.0046
Epoch 1/1
1s - loss: 0.0045
Epoch 1/1
1s - loss: 0.0044
Epoch 1/1
2s - loss: 0.0044
Epoch 1/1
1s - loss: 0.0043
Epoch 1/1
2s - loss: 0.0042
Epoch 1/1
1s - loss: 0.0042
Epoch 1/1
2s - loss: 0.0041
Epoch 1/1
1s - loss: 0.0040
Epoch 1/1
1s - loss: 0.0039
Epoch 1/1
2s - loss: 0.0038
Epoch 1/1
1s - loss: 0.0037
Epoch 1/1
2s - loss: 0.0036
Epoch 1/1
1s - loss: 0.0035
Epoch 1/1
1s - loss: 0.0034
Epoch 1/1
2s - loss: 0.0032
Epoch 1/1
1s - loss: 0.0031
Epoch 1/1
2s - loss: 0.0029
Epoch 1/1
1s - loss: 0.0028
Epoch 1/1
1s - loss: 0.0026
Epoch 1/1
2s - loss: 0.0025
Epoch 1/1
1s - loss: 0.0023
Epoch 1/1
2s - loss: 0.0022
Epoch 1/1
1s - loss: 0.0021
Epoch 1/1
1s - loss: 0.0020
Epoch 1/1
2s - loss: 0.0019
Epoch 1/1
1s - loss: 0.0018
Epoch 1/1
1s - loss: 0.0018
Epoch 1/1
2s - loss: 0.0017
Epoch 1/1
1s - loss: 0.0017
Epoch 1/1
2s - loss: 0.0017
Epoch 1/1
1s - loss: 0.0017
Epoch 1/1
1s - loss: 0.0017

In [35]:
# make predictions
trainPredict = model.predict(trainX, batch_size=batch_size)
model.reset_states()
testPredict = model.predict(testX, batch_size=batch_size)

In [36]:
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])

In [37]:
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))


Train Score: 20.58 RMSE
Test Score: 56.39 RMSE

In [38]:
# shift train predictions for plotting
trainPredictPlot = numpy.empty_like(dataset)
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict

In [39]:
# shift test predictions for plotting
testPredictPlot = numpy.empty_like(dataset)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict

In [40]:
# plot baseline and predictions
plt.plot(scaler.inverse_transform(dataset))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()