Modelo de pronóstico para la pierna con datos de entrada TSM, CLa

In [1]:
from pandas import DataFrame
from pandas import concat
from pandas import read_csv
from datetime import datetime
from matplotlib import pyplot
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
import lstm, time 
import numpy as np
 
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
    """
    Frame a time series as a supervised learning dataset.
    Arguments:
        data: Sequence of observations as a list or NumPy array.
        n_in: Number of lag observations as input (X).
        n_out: Number of observations as output (y).
        dropnan: Boolean whether or not to drop rows with NaN values.
    Returns:
        Pandas DataFrame of series framed for supervised learning.
    """
    n_vars = 1 if type(data) is list else data.shape[1]
    df = DataFrame(data)
    cols, names = list(), list()
    # input sequence (t-n, ... t-1)
    for i in range(n_in, 0, -1):
        cols.append(df.shift(i))
    names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
    # forecast sequence (t, t+1, ... t+n)
    for i in range(0, n_out):
        cols.append(df.shift(-i))
        if i == 0:
            names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
        else:
            names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
    # put it all together
    agg = concat(cols, axis=1)
    agg.columns = names
    # drop rows with NaN values
    if dropnan:
        agg.dropna(inplace=True)
    return agg


Using TensorFlow backend.

In [2]:
dataset = read_csv('verdillo.csv', header=0, index_col=0)
values = dataset.values
encoder = LabelEncoder()
values[:,1] = encoder.fit_transform(values[:,1])
print(values)


[[  5.28537000e+05   6.60000000e+01   5.46022841e-01]
 [  5.02627000e+05   5.70000000e+01   4.78221960e-01]
 [  1.98093000e+05   3.80000000e+01   1.54769652e+00]
 [  9.20300000e+04   3.60000000e+01   1.51302835e+00]
 [  2.01849000e+05   3.90000000e+01   2.67901019e+00]
 [  4.68211000e+05   4.40000000e+01   3.07956265e+00]
 [  5.56083000e+05   7.80000000e+01   2.29148396e+00]
 [  1.28738600e+06   1.07000000e+02   1.17100765e+00]
 [  4.36327000e+05   1.18000000e+02   1.09039818e+00]
 [  4.50525000e+05   1.15000000e+02   7.54990195e-01]
 [  2.38511000e+05   9.50000000e+01   6.08370582e-01]
 [  2.50040000e+05   7.70000000e+01   7.68892100e-01]
 [  2.44178000e+05   5.80000000e+01   7.07643131e-01]
 [  3.91003000e+05   4.00000000e+01   1.31635097e+00]
 [  4.22160000e+05   2.50000000e+01   2.42552952e+00]
 [  3.31256000e+05   1.40000000e+01   3.16312083e+00]
 [  5.65022000e+05   4.30000000e+01   2.81570330e+00]
 [  3.30223000e+05   5.10000000e+01   3.55553988e+00]
 [  9.63541000e+05   9.00000000e+01   1.95502845e+00]
 [  1.38195000e+06   1.14000000e+02   1.29825135e+00]
 [  5.81100000e+05   1.24000000e+02   9.29341592e-01]
 [  2.82538500e+05   9.70000000e+01   6.75135091e-01]
 [  2.19697000e+05   8.50000000e+01   5.34873524e-01]
 [  2.37699000e+05   7.30000000e+01   5.33311342e-01]
 [  3.17807000e+05   6.20000000e+01   6.84087486e-01]
 [  2.79753000e+05   4.90000000e+01   6.28167113e-01]
 [  2.48857000e+05   3.50000000e+01   1.76884428e+00]
 [  3.53523500e+05   3.00000000e+01   1.68957204e+00]
 [  2.49860000e+05   1.80000000e+01   2.18874777e+00]
 [  3.67232500e+05   4.20000000e+01   1.80382168e+00]
 [  1.07096350e+06   7.10000000e+01   2.55275937e+00]
 [  7.97073500e+05   1.01000000e+02   8.69431690e-01]
 [  5.48714000e+05   1.20000000e+02   5.63430833e-01]
 [  2.12748000e+05   9.30000000e+01   6.08354094e-01]
 [  2.32081000e+05   8.00000000e+01   6.21579995e-01]
 [  1.89465000e+05   7.00000000e+01   5.76044196e-01]
 [  3.94530000e+04   5.00000000e+01   6.84087486e-01]
 [  2.93590000e+04   3.10000000e+01   6.28167113e-01]
 [  3.34680000e+04   3.00000000e+00   1.76884428e+00]
 [  1.62080000e+04   1.00000000e+00   1.68957204e+00]
 [  1.15530000e+04   0.00000000e+00   2.18874777e+00]
 [  5.05100000e+04   2.20000000e+01   1.80382168e+00]
 [  1.20159000e+05   8.30000000e+01   2.55275937e+00]
 [  1.49450000e+05   1.11000000e+02   8.69431690e-01]
 [  9.22200000e+04   1.26000000e+02   5.63430833e-01]
 [  7.63000000e+04   1.25000000e+02   6.08354094e-01]
 [  7.94040000e+04   1.16000000e+02   6.21579995e-01]
 [  8.56650000e+04   9.60000000e+01   5.76044196e-01]
 [  1.67432600e+05   6.80000000e+01   6.21100000e-01]
 [  1.55399000e+05   5.60000000e+01   1.15140000e+00]
 [  1.29610800e+05   4.60000000e+01   2.35690000e+00]
 [  5.71300000e+04   2.40000000e+01   3.39490000e+00]
 [  1.19368000e+05   4.00000000e+00   3.15840000e+00]
 [  1.22581000e+05   3.70000000e+01   4.40090000e+00]
 [  1.85048000e+05   7.20000000e+01   2.59180000e+00]
 [  2.61362000e+05   9.20000000e+01   1.17520000e+00]
 [  7.55150000e+04   1.02000000e+02   8.15500000e-01]
 [  7.52750000e+04   1.04000000e+02   6.31000000e-01]
 [  7.43850000e+04   8.90000000e+01   5.30900000e-01]
 [  6.87980000e+04   6.40000000e+01   4.63900000e-01]
 [  3.29160000e+04   4.80000000e+01   6.95400000e-01]
 [  8.72760000e+04   1.60000000e+01   6.16100000e-01]
 [  6.70500000e+04   1.20000000e+01   1.93780000e+00]
 [  3.61000000e+04   7.00000000e+00   1.83330000e+00]
 [  2.94000000e+04   1.10000000e+01   2.60430000e+00]
 [  3.28960000e+04   2.80000000e+01   2.07690000e+00]
 [  2.55822000e+05   9.10000000e+01   2.82670000e+00]
 [  2.08100000e+05   1.17000000e+02   9.71700000e-01]
 [  5.21000000e+04   1.27000000e+02   6.00200000e-01]
 [  1.39550000e+04   1.28000000e+02   6.54500000e-01]
 [  1.99500000e+04   1.09000000e+02   6.38800000e-01]
 [  1.06071000e+05   8.10000000e+01   5.87000000e-01]
 [  1.33877000e+05   6.50000000e+01   8.37100000e-01]
 [  9.30530000e+04   6.10000000e+01   1.24670000e+00]
 [  2.33413000e+05   5.40000000e+01   2.17470000e+00]
 [  2.15739000e+05   3.30000000e+01   2.39780000e+00]
 [  6.92060000e+04   1.70000000e+01   3.73450000e+00]
 [  3.46189000e+05   1.30000000e+01   4.72320000e+00]
 [  3.22726000e+05   7.90000000e+01   3.26290000e+00]
 [  3.71881000e+05   1.23000000e+02   1.45410000e+00]
 [  9.52270000e+04   1.29000000e+02   8.10700000e-01]
 [  2.97577000e+05   1.22000000e+02   5.33400000e-01]
 [  1.29638000e+05   9.90000000e+01   5.86700000e-01]
 [  3.61960000e+04   8.40000000e+01   5.67000000e-01]
 [  3.24522000e+05   7.50000000e+01   7.91600000e-01]
 [  6.84102000e+05   6.30000000e+01   1.14280000e+00]
 [  7.04352000e+05   5.30000000e+01   1.29830000e+00]
 [  5.91170000e+05   4.70000000e+01   2.61390000e+00]
 [  4.73009500e+05   2.30000000e+01   2.46880000e+00]
 [  9.17946000e+05   3.20000000e+01   2.94300000e+00]
 [  1.09780700e+06   5.20000000e+01   2.10560000e+00]
 [  1.17273400e+06   8.80000000e+01   1.54390000e+00]
 [  1.14199000e+06   1.00000000e+02   8.28300000e-01]
 [  8.22594000e+05   8.20000000e+01   7.91100000e-01]
 [  6.25329000e+05   6.90000000e+01   6.72800000e-01]
 [  7.23294000e+05   6.00000000e+01   8.99000000e-01]
 [  1.96364000e+05   2.10000000e+01   9.57300000e-01]
 [  9.56800000e+04   9.00000000e+00   2.15720000e+00]
 [  1.35588000e+05   5.00000000e+00   2.18640000e+00]
 [  8.27840000e+04   2.00000000e+00   3.68370000e+00]
 [  6.43500000e+04   6.00000000e+00   4.37220000e+00]
 [  9.39100000e+04   1.90000000e+01   5.66250000e+00]
 [  4.56710000e+05   7.40000000e+01   5.66250000e+00]
 [  3.90241000e+05   1.03000000e+02   3.98850000e+00]
 [  1.52499000e+05   1.10000000e+02   1.05330000e+00]
 [  2.16730000e+05   1.06000000e+02   9.86000000e-01]
 [  2.10548000e+05   9.40000000e+01   8.44000000e-01]
 [  1.87880000e+05   6.70000000e+01   9.53200000e-01]
 [  2.74770000e+05   5.50000000e+01   9.24500000e-01]
 [  2.80135000e+05   2.70000000e+01   8.44100000e-01]
 [  3.27865000e+05   1.50000000e+01   1.09920000e+00]
 [  1.75214000e+05   8.00000000e+00   2.49420000e+00]
 [  1.68455000e+05   1.00000000e+01   4.05620000e+00]
 [  1.26718800e+06   3.40000000e+01   3.16990000e+00]
 [  9.19096000e+05   8.60000000e+01   2.19600000e+00]
 [  5.94154000e+05   1.19000000e+02   1.01850000e+00]
 [  1.51303000e+05   1.30000000e+02   9.81300000e-01]
 [  1.30824000e+05   1.21000000e+02   4.92900000e-01]
 [  2.89430000e+05   1.08000000e+02   5.30600000e-01]
 [  1.85410000e+05   8.70000000e+01   4.49600000e-01]
 [  1.86097000e+05   5.90000000e+01   3.62800000e-01]
 [  2.93066000e+05   4.10000000e+01   5.04100000e-01]
 [  2.65030000e+05   2.90000000e+01   1.07780000e+00]
 [  1.77850000e+05   2.60000000e+01   5.81700000e-01]
 [  5.07467000e+05   2.00000000e+01   1.76920000e+00]
 [  4.35950000e+05   4.50000000e+01   1.70800000e+00]
 [  3.19246000e+05   7.60000000e+01   1.58330000e+00]
 [  2.96421000e+05   9.80000000e+01   1.27620000e+00]
 [  2.43678000e+05   1.13000000e+02   7.37200000e-01]
 [  2.71690000e+05   1.12000000e+02   7.20200000e-01]
 [  1.90936000e+05   1.05000000e+02   6.49200000e-01]
 [  2.14636000e+05   1.05000000e+02   7.36600000e-01]]

In [3]:
# ensure all data is float
values = values.astype('float32')

In [4]:
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
reframed = series_to_supervised(scaled, 1, 1)
reframed.drop(reframed.columns[[4,5]], axis=1, inplace=True)

In [5]:
print(values.shape)


(132, 3)

In [16]:
# split into train and test sets
values = reframed.values
n_train_hours = 132-24
train = values[:n_train_hours, :]
test = values[n_train_hours:, :]
print(values.shape,train.shape,test.shape)
# split into input and outputs
train_X, train_y = train[:, :-1], train[:, -1]
test_X, test_y = test[:, :-1], test[:, -1]

# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))


(131, 4) (108, 4) (23, 4)

In [17]:
# design network
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dropout(0.2))
model.add(Activation('tanh'))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')

history = model.fit(train_X, train_y, epochs=35, batch_size=10,validation_data=(test_X, test_y),shuffle=False)


Train on 108 samples, validate on 23 samples
Epoch 1/35
108/108 [==============================] - 0s - loss: 0.1898 - val_loss: 0.1903
Epoch 2/35
108/108 [==============================] - 0s - loss: 0.1617 - val_loss: 0.1509
Epoch 3/35
108/108 [==============================] - 0s - loss: 0.1491 - val_loss: 0.1281
Epoch 4/35
108/108 [==============================] - 0s - loss: 0.1430 - val_loss: 0.1162
Epoch 5/35
108/108 [==============================] - 0s - loss: 0.1398 - val_loss: 0.1114
Epoch 6/35
108/108 [==============================] - 0s - loss: 0.1357 - val_loss: 0.1099
Epoch 7/35
108/108 [==============================] - 0s - loss: 0.1342 - val_loss: 0.1082
Epoch 8/35
108/108 [==============================] - 0s - loss: 0.1339 - val_loss: 0.1076
Epoch 9/35
108/108 [==============================] - 0s - loss: 0.1317 - val_loss: 0.1063
Epoch 10/35
108/108 [==============================] - 0s - loss: 0.1290 - val_loss: 0.1052
Epoch 11/35
108/108 [==============================] - 0s - loss: 0.1279 - val_loss: 0.1058
Epoch 12/35
108/108 [==============================] - 0s - loss: 0.1250 - val_loss: 0.1064
Epoch 13/35
108/108 [==============================] - 0s - loss: 0.1256 - val_loss: 0.1045
Epoch 14/35
108/108 [==============================] - 0s - loss: 0.1230 - val_loss: 0.1046
Epoch 15/35
108/108 [==============================] - 0s - loss: 0.1191 - val_loss: 0.1021
Epoch 16/35
108/108 [==============================] - 0s - loss: 0.1159 - val_loss: 0.0991
Epoch 17/35
108/108 [==============================] - 0s - loss: 0.1174 - val_loss: 0.0975
Epoch 18/35
108/108 [==============================] - 0s - loss: 0.1145 - val_loss: 0.0975
Epoch 19/35
108/108 [==============================] - 0s - loss: 0.1130 - val_loss: 0.0974
Epoch 20/35
108/108 [==============================] - 0s - loss: 0.1127 - val_loss: 0.0985
Epoch 21/35
108/108 [==============================] - 0s - loss: 0.1113 - val_loss: 0.0999
Epoch 22/35
108/108 [==============================] - 0s - loss: 0.1133 - val_loss: 0.0991
Epoch 23/35
108/108 [==============================] - 0s - loss: 0.1111 - val_loss: 0.0951
Epoch 24/35
108/108 [==============================] - 0s - loss: 0.1091 - val_loss: 0.0930
Epoch 25/35
108/108 [==============================] - 0s - loss: 0.1102 - val_loss: 0.0926
Epoch 26/35
108/108 [==============================] - 0s - loss: 0.1071 - val_loss: 0.0928
Epoch 27/35
108/108 [==============================] - 0s - loss: 0.1108 - val_loss: 0.0925
Epoch 28/35
108/108 [==============================] - 0s - loss: 0.1114 - val_loss: 0.0908
Epoch 29/35
108/108 [==============================] - 0s - loss: 0.1009 - val_loss: 0.0879
Epoch 30/35
108/108 [==============================] - 0s - loss: 0.1013 - val_loss: 0.0874
Epoch 31/35
108/108 [==============================] - 0s - loss: 0.1028 - val_loss: 0.0877
Epoch 32/35
108/108 [==============================] - 0s - loss: 0.1009 - val_loss: 0.0878
Epoch 33/35
108/108 [==============================] - 0s - loss: 0.1012 - val_loss: 0.0880
Epoch 34/35
108/108 [==============================] - 0s - loss: 0.1004 - val_loss: 0.0875
Epoch 35/35
108/108 [==============================] - 0s - loss: 0.1025 - val_loss: 0.0882

In [18]:
pyplot.figure(figsize=(20,10))
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()



In [19]:
# make a prediction
yhat = model.predict(test_X)
test_X = test_X.reshape((test_X.shape[0], test_X.shape[2]))

In [20]:
print(test_X.shape)
print(yhat.shape)


(23, 3)
(23, 1)

In [21]:
# invert scaling for forecast
inv_yhat = np.concatenate((yhat, test_X[:, 1:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[1:,0]

In [22]:
# invert scaling for actual
test_y = test_y.reshape((len(test_y), 1))
inv_y = np.concatenate((test_y, test_X[:, 1:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:22,0]

In [23]:
from sklearn.metrics import mean_squared_error
from scipy.stats.stats import pearsonr
# calculate RMSE
rmse = np.sqrt(mean_squared_error(inv_y, inv_yhat))
r=pearsonr(inv_y, inv_yhat)
print('Test RMSE: %.3f' % rmse)
print('Test R %.3f' %r[0])


Test RMSE: 122022.086
Test R 0.992

In [24]:
pyplot.figure(figsize=(20,10))
pyplot.plot(inv_y, label='y')
pyplot.plot(inv_yhat, label='yhat')
pyplot.legend()


Out[24]:
<matplotlib.legend.Legend at 0x7fb8531beda0>

In [25]:
pyplot.show()



In [ ]:


In [ ]: