In [12]:
%matplotlib inline
import pandas
import matplotlib.pyplot as plt
dataset = pandas.read_csv('data/international-airline-passengers.csv',
usecols=[1], engine='python', skipfooter=3)
plt.plot(dataset)
plt.show()
In [13]:
dataset
Out[13]:
In [58]:
import numpy as np
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense, LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
In [15]:
# 再現性を担保するために固定したほうがよい
np.random.seed(7)
In [17]:
# load the dataset
dataframe = pandas.read_csv('data/international-airline-passengers.csv', usecols=[1], engine='python', skipfooter=3)
dataset = dataframe.values
type(dataframe), type(dataset)
dataset = dataset.astype('float32')
In [18]:
dataset.shape
Out[18]:
MinMaxScalerが便利
In [20]:
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
In [21]:
dataset[:10]
Out[21]:
In [77]:
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]
print(len(train), len(test))
In [103]:
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset[i:(i + look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
In [104]:
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
In [105]:
print(trainX.shape)
print(trainY.shape)
print(testX.shape)
print(testY.shape)
trainXは (samples, features) の配列
In [106]:
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
In [107]:
print(trainX.shape)
print(testX.shape)
In [57]:
model = Sequential()
# input_shape=(input_length, input_dim)
# look_back次元の系列長1のデータが入力、出力は4次元ベクトル
# 系列長1なので記憶は使われない? LSTMに入れたらすぐ出てくる
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
Out[57]:
In [108]:
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
In [109]:
# 出力は正規化されているため元のスケールに戻す
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
In [110]:
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
In [111]:
print(trainPredict.shape, trainY.shape)
print(testPredict.shape, testY.shape)
In [112]:
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))
In [113]:
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))
In [114]:
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict
In [115]:
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict
In [119]:
# 元データをプロット(青)
plt.plot(scaler.inverse_transform(dataset))
# 訓練内データの予測をプロット(緑)
plt.plot(trainPredictPlot)
# テストデータの予測をプロット
plt.plot(testPredictPlot)
Out[119]:
In [120]:
look_back = 3
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
In [121]:
print(trainX.shape)
print(trainY.shape)
print(testX.shape)
print(testY.shape)
In [122]:
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
print(trainX.shape)
print(testX.shape)
In [123]:
model = Sequential()
# input_shape=(input_length, input_dim)
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
Out[123]:
In [124]:
# 予測
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# 元のスケールに戻す
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))
In [132]:
look_back = 3
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# [samples, time steps, features]
# 3次元の系列長1のデータ => 1次元の系列長3のデータ
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
In [134]:
print(trainX.shape, testX.shape)
In [135]:
model = Sequential()
# input_shape=(input_length, input_dim)
# 入力データの次元が1で系列長がlook_backになった!
model.add(LSTM(4, input_shape=(look_back, 1)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
Out[135]:
In [136]:
# 予測
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# 元のスケールに戻す
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))
In [137]:
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict
# 元データをプロット(青)
plt.plot(scaler.inverse_transform(dataset))
# 訓練内データの予測をプロット(緑)
plt.plot(trainPredictPlot)
# テストデータの予測をプロット
plt.plot(testPredictPlot)
Out[137]:
# LSTMオブジェクトの作成
model.add(LSTM(4,
batch_input_shape=(batch_size, time_steps, features),
stateful=True)
# 訓練ループの書き方
for i in range(100):
model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=2, shuffle=False)
model.reset_states()
# 予測の仕方
model.predict(trainX, batch_size=batch_size)
stateful=Trueを指定するbatch_input_shapeでバッチサイズなどの情報も追加するfit時はshuffle=Falseにするreset_states()するpredict時もbatch_sizeを与える
In [145]:
look_back = 3
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# [samples, time steps, features]
# 3次元の系列長1のデータ => 1次元の系列長3のデータ
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
In [146]:
batch_size = 1
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
#model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
for i in range(100):
model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=2, shuffle=False)
model.reset_states()
In [147]:
# 予測
trainPredict = model.predict(trainX, batch_size=batch_size)
testPredict = model.predict(testX, batch_size=batch_size)
# 元のスケールに戻す
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict
# 元データをプロット(青)
plt.plot(scaler.inverse_transform(dataset))
# 訓練内データの予測をプロット(緑)
plt.plot(trainPredictPlot)
# テストデータの予測をプロット
plt.plot(testPredictPlot)
Out[147]:
In [148]:
look_back = 3
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# [samples, time steps, features]
# 3次元の系列長1のデータ => 1次元の系列長3のデータ
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
batch_size = 1
model = Sequential()
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True,
return_sequences=True))
model.add(LSTM(4, batch_input_shape=(batch_size, look_back, 1), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
#model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
for i in range(100):
model.fit(trainX, trainY, epochs=1, batch_size=batch_size, verbose=2, shuffle=False)
model.reset_states()
In [149]:
# 予測
trainPredict = model.predict(trainX, batch_size=batch_size)
testPredict = model.predict(testX, batch_size=batch_size)
# 元のスケールに戻す
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))
print('Test Score: %.2f RMSE' % (testScore))
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict) + look_back, :] = trainPredict
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict) + (look_back * 2) + 1:len(dataset) - 1, :] = testPredict
# 元データをプロット(青)
plt.plot(scaler.inverse_transform(dataset))
# 訓練内データの予測をプロット(緑)
plt.plot(trainPredictPlot)
# テストデータの予測をプロット
plt.plot(testPredictPlot)
Out[149]:
In [ ]: