Modelo de pronóstico para la pierna con datos de entrada TSM, CLa

In [6]:
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
from datetime import datetime
from matplotlib import pyplot
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
import lstm, time 
import numpy as np
 
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
    """
    Frame a time series as a supervised learning dataset.
    Arguments:
        data: Sequence of observations as a list or NumPy array.
        n_in: Number of lag observations as input (X).
        n_out: Number of observations as output (y).
        dropnan: Boolean whether or not to drop rows with NaN values.
    Returns:
        Pandas DataFrame of series framed for supervised learning.
    """
    n_vars = 1 if type(data) is list else data.shape[1]
    df = DataFrame(data)
    cols, names = list(), list()
    # input sequence (t-n, ... t-1)
    for i in range(n_in, 0, -1):
        cols.append(df.shift(i))
    names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
    # forecast sequence (t, t+1, ... t+n)
    for i in range(0, n_out):
        cols.append(df.shift(-i))
        if i == 0:
            names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
        else:
            names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
    # put it all together
    agg = concat(cols, axis=1)
    agg.columns = names
    # drop rows with NaN values
    if dropnan:
        agg.dropna(inplace=True)
    return agg


Using TensorFlow backend.

In [7]:
dataset = read_csv('pierna.csv', header=0, index_col=0)
values = dataset.values
encoder = LabelEncoder()
values[:,1] = encoder.fit_transform(values[:,1])

In [8]:
# ensure all data is float
values = values.astype('float32')

In [9]:
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
reframed = series_to_supervised(scaled, 1, 1)
reframed.drop(reframed.columns[[4,5]], axis=1, inplace=True)

In [10]:
print(values.shape)


(132, 3)

In [39]:
# split into train and test sets
values = reframed.values
n_train_hours = 132-24
train = values[:n_train_hours, :]
test = values[n_train_hours:, :]
print(values.shape,train.shape,test.shape)
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]

# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))


(131, 4) (108, 4) (23, 4)

In [40]:
# design network
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dropout(0.2))
model.add(Activation('tanh'))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')

history = model.fit(train_X, train_y, epochs=200, batch_size=92,validation_data=(test_X, test_y),shuffle=False)


Train on 108 samples, validate on 23 samples
Epoch 1/200
108/108 [==============================] - 0s - loss: 0.1125 - val_loss: 0.3152
Epoch 2/200
108/108 [==============================] - 0s - loss: 0.1018 - val_loss: 0.3032
Epoch 3/200
108/108 [==============================] - 0s - loss: 0.0924 - val_loss: 0.2911
Epoch 4/200
108/108 [==============================] - 0s - loss: 0.0802 - val_loss: 0.2791
Epoch 5/200
108/108 [==============================] - 0s - loss: 0.0726 - val_loss: 0.2671
Epoch 6/200
108/108 [==============================] - 0s - loss: 0.0644 - val_loss: 0.2554
Epoch 7/200
108/108 [==============================] - 0s - loss: 0.0611 - val_loss: 0.2442
Epoch 8/200
108/108 [==============================] - 0s - loss: 0.0595 - val_loss: 0.2336
Epoch 9/200
108/108 [==============================] - 0s - loss: 0.0571 - val_loss: 0.2236
Epoch 10/200
108/108 [==============================] - 0s - loss: 0.0547 - val_loss: 0.2147
Epoch 11/200
108/108 [==============================] - 0s - loss: 0.0554 - val_loss: 0.2072
Epoch 12/200
108/108 [==============================] - 0s - loss: 0.0583 - val_loss: 0.2011
Epoch 13/200
108/108 [==============================] - 0s - loss: 0.0593 - val_loss: 0.1961
Epoch 14/200
108/108 [==============================] - 0s - loss: 0.0578 - val_loss: 0.1918
Epoch 15/200
108/108 [==============================] - 0s - loss: 0.0624 - val_loss: 0.1885
Epoch 16/200
108/108 [==============================] - 0s - loss: 0.0648 - val_loss: 0.1861
Epoch 17/200
108/108 [==============================] - 0s - loss: 0.0660 - val_loss: 0.1845
Epoch 18/200
108/108 [==============================] - 0s - loss: 0.0646 - val_loss: 0.1835
Epoch 19/200
108/108 [==============================] - 0s - loss: 0.0640 - val_loss: 0.1829
Epoch 20/200
108/108 [==============================] - 0s - loss: 0.0643 - val_loss: 0.1825
Epoch 21/200
108/108 [==============================] - 0s - loss: 0.0670 - val_loss: 0.1825
Epoch 22/200
108/108 [==============================] - 0s - loss: 0.0640 - val_loss: 0.1826
Epoch 23/200
108/108 [==============================] - 0s - loss: 0.0645 - val_loss: 0.1829
Epoch 24/200
108/108 [==============================] - 0s - loss: 0.0647 - val_loss: 0.1832
Epoch 25/200
108/108 [==============================] - 0s - loss: 0.0622 - val_loss: 0.1836
Epoch 26/200
108/108 [==============================] - 0s - loss: 0.0642 - val_loss: 0.1838
Epoch 27/200
108/108 [==============================] - 0s - loss: 0.0619 - val_loss: 0.1840
Epoch 28/200
108/108 [==============================] - 0s - loss: 0.0627 - val_loss: 0.1841
Epoch 29/200
108/108 [==============================] - 0s - loss: 0.0599 - val_loss: 0.1841
Epoch 30/200
108/108 [==============================] - 0s - loss: 0.0596 - val_loss: 0.1840
Epoch 31/200
108/108 [==============================] - 0s - loss: 0.0605 - val_loss: 0.1836
Epoch 32/200
108/108 [==============================] - 0s - loss: 0.0605 - val_loss: 0.1833
Epoch 33/200
108/108 [==============================] - 0s - loss: 0.0611 - val_loss: 0.1830
Epoch 34/200
108/108 [==============================] - 0s - loss: 0.0624 - val_loss: 0.1828
Epoch 35/200
108/108 [==============================] - 0s - loss: 0.0605 - val_loss: 0.1826
Epoch 36/200
108/108 [==============================] - 0s - loss: 0.0588 - val_loss: 0.1823
Epoch 37/200
108/108 [==============================] - 0s - loss: 0.0587 - val_loss: 0.1818
Epoch 38/200
108/108 [==============================] - 0s - loss: 0.0602 - val_loss: 0.1815
Epoch 39/200
108/108 [==============================] - 0s - loss: 0.0605 - val_loss: 0.1813
Epoch 40/200
108/108 [==============================] - 0s - loss: 0.0605 - val_loss: 0.1810
Epoch 41/200
108/108 [==============================] - 0s - loss: 0.0615 - val_loss: 0.1802
Epoch 42/200
108/108 [==============================] - 0s - loss: 0.0580 - val_loss: 0.1790
Epoch 43/200
108/108 [==============================] - 0s - loss: 0.0610 - val_loss: 0.1781
Epoch 44/200
108/108 [==============================] - 0s - loss: 0.0604 - val_loss: 0.1775
Epoch 45/200
108/108 [==============================] - 0s - loss: 0.0604 - val_loss: 0.1768
Epoch 46/200
108/108 [==============================] - 0s - loss: 0.0607 - val_loss: 0.1759
Epoch 47/200
108/108 [==============================] - 0s - loss: 0.0603 - val_loss: 0.1751
Epoch 48/200
108/108 [==============================] - 0s - loss: 0.0593 - val_loss: 0.1742
Epoch 49/200
108/108 [==============================] - 0s - loss: 0.0597 - val_loss: 0.1736
Epoch 50/200
108/108 [==============================] - 0s - loss: 0.0598 - val_loss: 0.1730
Epoch 51/200
108/108 [==============================] - 0s - loss: 0.0604 - val_loss: 0.1726
Epoch 52/200
108/108 [==============================] - 0s - loss: 0.0612 - val_loss: 0.1723
Epoch 53/200
108/108 [==============================] - 0s - loss: 0.0613 - val_loss: 0.1722
Epoch 54/200
108/108 [==============================] - 0s - loss: 0.0618 - val_loss: 0.1721
Epoch 55/200
108/108 [==============================] - 0s - loss: 0.0620 - val_loss: 0.1718
Epoch 56/200
108/108 [==============================] - 0s - loss: 0.0608 - val_loss: 0.1714
Epoch 57/200
108/108 [==============================] - 0s - loss: 0.0588 - val_loss: 0.1707
Epoch 58/200
108/108 [==============================] - 0s - loss: 0.0594 - val_loss: 0.1701
Epoch 59/200
108/108 [==============================] - 0s - loss: 0.0604 - val_loss: 0.1693
Epoch 60/200
108/108 [==============================] - 0s - loss: 0.0628 - val_loss: 0.1687
Epoch 61/200
108/108 [==============================] - 0s - loss: 0.0605 - val_loss: 0.1683
Epoch 62/200
108/108 [==============================] - 0s - loss: 0.0585 - val_loss: 0.1681
Epoch 63/200
108/108 [==============================] - 0s - loss: 0.0636 - val_loss: 0.1677
Epoch 64/200
108/108 [==============================] - 0s - loss: 0.0619 - val_loss: 0.1675
Epoch 65/200
108/108 [==============================] - 0s - loss: 0.0608 - val_loss: 0.1674
Epoch 66/200
108/108 [==============================] - 0s - loss: 0.0596 - val_loss: 0.1674
Epoch 67/200
108/108 [==============================] - 0s - loss: 0.0595 - val_loss: 0.1672
Epoch 68/200
108/108 [==============================] - 0s - loss: 0.0611 - val_loss: 0.1671
Epoch 69/200
108/108 [==============================] - 0s - loss: 0.0573 - val_loss: 0.1669
Epoch 70/200
108/108 [==============================] - 0s - loss: 0.0591 - val_loss: 0.1666
Epoch 71/200
108/108 [==============================] - 0s - loss: 0.0585 - val_loss: 0.1663
Epoch 72/200
108/108 [==============================] - 0s - loss: 0.0592 - val_loss: 0.1659
Epoch 73/200
108/108 [==============================] - 0s - loss: 0.0579 - val_loss: 0.1652
Epoch 74/200
108/108 [==============================] - 0s - loss: 0.0577 - val_loss: 0.1643
Epoch 75/200
108/108 [==============================] - 0s - loss: 0.0592 - val_loss: 0.1634
Epoch 76/200
108/108 [==============================] - 0s - loss: 0.0572 - val_loss: 0.1624
Epoch 77/200
108/108 [==============================] - 0s - loss: 0.0575 - val_loss: 0.1610
Epoch 78/200
108/108 [==============================] - 0s - loss: 0.0606 - val_loss: 0.1597
Epoch 79/200
108/108 [==============================] - 0s - loss: 0.0602 - val_loss: 0.1585
Epoch 80/200
108/108 [==============================] - 0s - loss: 0.0621 - val_loss: 0.1573
Epoch 81/200
108/108 [==============================] - 0s - loss: 0.0606 - val_loss: 0.1561
Epoch 82/200
108/108 [==============================] - 0s - loss: 0.0616 - val_loss: 0.1551
Epoch 83/200
108/108 [==============================] - 0s - loss: 0.0628 - val_loss: 0.1543
Epoch 84/200
108/108 [==============================] - 0s - loss: 0.0624 - val_loss: 0.1537
Epoch 85/200
108/108 [==============================] - 0s - loss: 0.0605 - val_loss: 0.1535
Epoch 86/200
108/108 [==============================] - 0s - loss: 0.0612 - val_loss: 0.1534
Epoch 87/200
108/108 [==============================] - 0s - loss: 0.0622 - val_loss: 0.1536
Epoch 88/200
108/108 [==============================] - 0s - loss: 0.0613 - val_loss: 0.1538
Epoch 89/200
108/108 [==============================] - 0s - loss: 0.0615 - val_loss: 0.1539
Epoch 90/200
108/108 [==============================] - 0s - loss: 0.0605 - val_loss: 0.1541
Epoch 91/200
108/108 [==============================] - 0s - loss: 0.0590 - val_loss: 0.1541
Epoch 92/200
108/108 [==============================] - 0s - loss: 0.0600 - val_loss: 0.1540
Epoch 93/200
108/108 [==============================] - 0s - loss: 0.0594 - val_loss: 0.1537
Epoch 94/200
108/108 [==============================] - 0s - loss: 0.0607 - val_loss: 0.1533
Epoch 95/200
108/108 [==============================] - 0s - loss: 0.0597 - val_loss: 0.1528
Epoch 96/200
108/108 [==============================] - 0s - loss: 0.0615 - val_loss: 0.1523
Epoch 97/200
108/108 [==============================] - 0s - loss: 0.0610 - val_loss: 0.1519
Epoch 98/200
108/108 [==============================] - 0s - loss: 0.0602 - val_loss: 0.1516
Epoch 99/200
108/108 [==============================] - 0s - loss: 0.0613 - val_loss: 0.1515
Epoch 100/200
108/108 [==============================] - 0s - loss: 0.0578 - val_loss: 0.1511
Epoch 101/200
108/108 [==============================] - 0s - loss: 0.0600 - val_loss: 0.1506
Epoch 102/200
108/108 [==============================] - 0s - loss: 0.0581 - val_loss: 0.1500
Epoch 103/200
108/108 [==============================] - 0s - loss: 0.0591 - val_loss: 0.1496
Epoch 104/200
108/108 [==============================] - 0s - loss: 0.0593 - val_loss: 0.1490
Epoch 105/200
108/108 [==============================] - 0s - loss: 0.0612 - val_loss: 0.1484
Epoch 106/200
108/108 [==============================] - 0s - loss: 0.0606 - val_loss: 0.1480
Epoch 107/200
108/108 [==============================] - 0s - loss: 0.0588 - val_loss: 0.1477
Epoch 108/200
108/108 [==============================] - 0s - loss: 0.0623 - val_loss: 0.1475
Epoch 109/200
108/108 [==============================] - 0s - loss: 0.0613 - val_loss: 0.1474
Epoch 110/200
108/108 [==============================] - 0s - loss: 0.0598 - val_loss: 0.1472
Epoch 111/200
108/108 [==============================] - 0s - loss: 0.0576 - val_loss: 0.1469
Epoch 112/200
108/108 [==============================] - 0s - loss: 0.0619 - val_loss: 0.1466
Epoch 113/200
108/108 [==============================] - 0s - loss: 0.0630 - val_loss: 0.1463
Epoch 114/200
108/108 [==============================] - 0s - loss: 0.0598 - val_loss: 0.1458
Epoch 115/200
108/108 [==============================] - 0s - loss: 0.0614 - val_loss: 0.1452
Epoch 116/200
108/108 [==============================] - 0s - loss: 0.0594 - val_loss: 0.1447
Epoch 117/200
108/108 [==============================] - 0s - loss: 0.0611 - val_loss: 0.1444
Epoch 118/200
108/108 [==============================] - 0s - loss: 0.0611 - val_loss: 0.1441
Epoch 119/200
108/108 [==============================] - 0s - loss: 0.0621 - val_loss: 0.1435
Epoch 120/200
108/108 [==============================] - 0s - loss: 0.0609 - val_loss: 0.1428
Epoch 121/200
108/108 [==============================] - 0s - loss: 0.0613 - val_loss: 0.1423
Epoch 122/200
108/108 [==============================] - 0s - loss: 0.0616 - val_loss: 0.1421
Epoch 123/200
108/108 [==============================] - 0s - loss: 0.0616 - val_loss: 0.1421
Epoch 124/200
108/108 [==============================] - 0s - loss: 0.0608 - val_loss: 0.1421
Epoch 125/200
108/108 [==============================] - 0s - loss: 0.0593 - val_loss: 0.1422
Epoch 126/200
108/108 [==============================] - 0s - loss: 0.0587 - val_loss: 0.1424
Epoch 127/200
108/108 [==============================] - 0s - loss: 0.0608 - val_loss: 0.1428
Epoch 128/200
108/108 [==============================] - 0s - loss: 0.0612 - val_loss: 0.1431
Epoch 129/200
108/108 [==============================] - 0s - loss: 0.0614 - val_loss: 0.1431
Epoch 130/200
108/108 [==============================] - 0s - loss: 0.0593 - val_loss: 0.1431
Epoch 131/200
108/108 [==============================] - 0s - loss: 0.0602 - val_loss: 0.1429
Epoch 132/200
108/108 [==============================] - 0s - loss: 0.0615 - val_loss: 0.1427
Epoch 133/200
108/108 [==============================] - 0s - loss: 0.0590 - val_loss: 0.1424
Epoch 134/200
108/108 [==============================] - 0s - loss: 0.0568 - val_loss: 0.1419
Epoch 135/200
108/108 [==============================] - 0s - loss: 0.0589 - val_loss: 0.1415
Epoch 136/200
108/108 [==============================] - 0s - loss: 0.0593 - val_loss: 0.1412
Epoch 137/200
108/108 [==============================] - 0s - loss: 0.0596 - val_loss: 0.1407
Epoch 138/200
108/108 [==============================] - 0s - loss: 0.0584 - val_loss: 0.1401
Epoch 139/200
108/108 [==============================] - 0s - loss: 0.0601 - val_loss: 0.1393
Epoch 140/200
108/108 [==============================] - 0s - loss: 0.0595 - val_loss: 0.1383
Epoch 141/200
108/108 [==============================] - 0s - loss: 0.0582 - val_loss: 0.1375
Epoch 142/200
108/108 [==============================] - 0s - loss: 0.0599 - val_loss: 0.1369
Epoch 143/200
108/108 [==============================] - 0s - loss: 0.0608 - val_loss: 0.1363
Epoch 144/200
108/108 [==============================] - 0s - loss: 0.0602 - val_loss: 0.1359
Epoch 145/200
108/108 [==============================] - 0s - loss: 0.0608 - val_loss: 0.1357
Epoch 146/200
108/108 [==============================] - 0s - loss: 0.0612 - val_loss: 0.1356
Epoch 147/200
108/108 [==============================] - 0s - loss: 0.0639 - val_loss: 0.1357
Epoch 148/200
108/108 [==============================] - 0s - loss: 0.0601 - val_loss: 0.1358
Epoch 149/200
108/108 [==============================] - 0s - loss: 0.0622 - val_loss: 0.1358
Epoch 150/200
108/108 [==============================] - 0s - loss: 0.0588 - val_loss: 0.1356
Epoch 151/200
108/108 [==============================] - 0s - loss: 0.0599 - val_loss: 0.1352
Epoch 152/200
108/108 [==============================] - 0s - loss: 0.0628 - val_loss: 0.1349
Epoch 153/200
108/108 [==============================] - 0s - loss: 0.0613 - val_loss: 0.1350
Epoch 154/200
108/108 [==============================] - 0s - loss: 0.0594 - val_loss: 0.1349
Epoch 155/200
108/108 [==============================] - 0s - loss: 0.0593 - val_loss: 0.1347
Epoch 156/200
108/108 [==============================] - 0s - loss: 0.0591 - val_loss: 0.1342
Epoch 157/200
108/108 [==============================] - 0s - loss: 0.0596 - val_loss: 0.1334
Epoch 158/200
108/108 [==============================] - 0s - loss: 0.0606 - val_loss: 0.1325
Epoch 159/200
108/108 [==============================] - 0s - loss: 0.0598 - val_loss: 0.1317
Epoch 160/200
108/108 [==============================] - 0s - loss: 0.0620 - val_loss: 0.1313
Epoch 161/200
108/108 [==============================] - 0s - loss: 0.0643 - val_loss: 0.1312
Epoch 162/200
108/108 [==============================] - 0s - loss: 0.0622 - val_loss: 0.1315
Epoch 163/200
108/108 [==============================] - 0s - loss: 0.0625 - val_loss: 0.1320
Epoch 164/200
108/108 [==============================] - 0s - loss: 0.0606 - val_loss: 0.1326
Epoch 165/200
108/108 [==============================] - 0s - loss: 0.0606 - val_loss: 0.1333
Epoch 166/200
108/108 [==============================] - 0s - loss: 0.0621 - val_loss: 0.1340
Epoch 167/200
108/108 [==============================] - 0s - loss: 0.0592 - val_loss: 0.1347
Epoch 168/200
108/108 [==============================] - 0s - loss: 0.0602 - val_loss: 0.1351
Epoch 169/200
108/108 [==============================] - 0s - loss: 0.0571 - val_loss: 0.1353
Epoch 170/200
108/108 [==============================] - 0s - loss: 0.0586 - val_loss: 0.1353
Epoch 171/200
108/108 [==============================] - 0s - loss: 0.0604 - val_loss: 0.1351
Epoch 172/200
108/108 [==============================] - 0s - loss: 0.0599 - val_loss: 0.1348
Epoch 173/200
108/108 [==============================] - 0s - loss: 0.0589 - val_loss: 0.1343
Epoch 174/200
108/108 [==============================] - 0s - loss: 0.0591 - val_loss: 0.1337
Epoch 175/200
108/108 [==============================] - 0s - loss: 0.0571 - val_loss: 0.1330
Epoch 176/200
108/108 [==============================] - 0s - loss: 0.0592 - val_loss: 0.1325
Epoch 177/200
108/108 [==============================] - 0s - loss: 0.0589 - val_loss: 0.1319
Epoch 178/200
108/108 [==============================] - 0s - loss: 0.0627 - val_loss: 0.1312
Epoch 179/200
108/108 [==============================] - 0s - loss: 0.0586 - val_loss: 0.1306
Epoch 180/200
108/108 [==============================] - 0s - loss: 0.0625 - val_loss: 0.1301
Epoch 181/200
108/108 [==============================] - 0s - loss: 0.0609 - val_loss: 0.1299
Epoch 182/200
108/108 [==============================] - 0s - loss: 0.0604 - val_loss: 0.1298
Epoch 183/200
108/108 [==============================] - 0s - loss: 0.0592 - val_loss: 0.1296
Epoch 184/200
108/108 [==============================] - 0s - loss: 0.0615 - val_loss: 0.1296
Epoch 185/200
108/108 [==============================] - 0s - loss: 0.0603 - val_loss: 0.1297
Epoch 186/200
108/108 [==============================] - 0s - loss: 0.0613 - val_loss: 0.1299
Epoch 187/200
108/108 [==============================] - 0s - loss: 0.0609 - val_loss: 0.1300
Epoch 188/200
108/108 [==============================] - 0s - loss: 0.0589 - val_loss: 0.1301
Epoch 189/200
108/108 [==============================] - 0s - loss: 0.0580 - val_loss: 0.1302
Epoch 190/200
108/108 [==============================] - 0s - loss: 0.0613 - val_loss: 0.1302
Epoch 191/200
108/108 [==============================] - 0s - loss: 0.0601 - val_loss: 0.1302
Epoch 192/200
108/108 [==============================] - 0s - loss: 0.0592 - val_loss: 0.1303
Epoch 193/200
108/108 [==============================] - 0s - loss: 0.0591 - val_loss: 0.1302
Epoch 194/200
108/108 [==============================] - 0s - loss: 0.0602 - val_loss: 0.1301
Epoch 195/200
108/108 [==============================] - 0s - loss: 0.0609 - val_loss: 0.1300
Epoch 196/200
108/108 [==============================] - 0s - loss: 0.0596 - val_loss: 0.1301
Epoch 197/200
108/108 [==============================] - 0s - loss: 0.0603 - val_loss: 0.1302
Epoch 198/200
108/108 [==============================] - 0s - loss: 0.0597 - val_loss: 0.1304
Epoch 199/200
108/108 [==============================] - 0s - loss: 0.0611 - val_loss: 0.1307
Epoch 200/200
108/108 [==============================] - 0s - loss: 0.0590 - val_loss: 0.1309

In [41]:
pyplot.figure(figsize=(20,10))
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()



In [42]:
# make a prediction
yhat = model.predict(test_X)
test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))

In [43]:
print(test_X.shape)
print(yhat.shape)


(23, 3)
(23, 1)

In [44]:
# invert scaling for forecast
inv_yhat = np.concatenate((yhat, test_X[:, 1:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[1:,0]

In [45]:
# invert scaling for actual
test_y = test_y.reshape((len(test_y), 1))
inv_y = np.concatenate((test_y, test_X[:, 1:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:22,0]

In [46]:
from sklearn.metrics import mean_squared_error
from scipy.stats.stats import pearsonr
# calculate RMSE
rmse = np.sqrt(mean_squared_error(inv_y, inv_yhat))
r=pearsonr(inv_y, inv_yhat)
print('Test RMSE: %.3f' % rmse)
print('Test R %.3f' %r[0])


Test RMSE: 390553.969
Test R 0.973

In [47]:
pyplot.figure(figsize=(20,10))
pyplot.plot(inv_y, label='y')
pyplot.plot(inv_yhat, label='yhat')
pyplot.legend()


Out[47]:
<matplotlib.legend.Legend at 0x7ff76fe9bf28>

In [48]:
pyplot.show()



In [ ]:


In [ ]:


In [ ]:


In [ ]: