reg,SVM and logit in the myfunc as input to LSTM
In [1]:
%run myfunc.py
In [3]:
#path = 'C:\\Users\Michal\Dropbox\IB_data'
path = 'C:\\Users\Michal\Desktop'+ '\SPY4Aug17.csv'
#path = '/home/octo/Dropbox'+ '/SPY4Aug17.csv'
#df=dataframe_notime(path)
df=get_csv_pd(path)
In [ ]:
#data=strat_lr(df)
data=strat_class(df)
In [54]:
df=data[['Close','vel','sigma','P','pREG','predict_svm','predict_lm']]
In [55]:
import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
In [56]:
#plt.plot(data.predict_svm)
#plt.show()
In [57]:
dataset = df.values
dataset = df.astype('float32')
In [58]:
# fix random seed for reproducibility
numpy.random.seed(7)
In [59]:
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
In [60]:
# split into train and test sets
train_size = int(len(dataset) * 0.80)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
print(len(train), len(test))
In [61]:
#train[:,0]
In [62]:
# reshape into X=t and Y=t+1
look_back = 3
trainX, trainY = create_dataset(train,look_back)
testX, testY = create_dataset(test,look_back)
In [63]:
#trainX.shape
#trainX.shape[0]
#trainX.shape[1]
In [64]:
# reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0],trainX.shape[1],trainX.shape[2]))
testX = numpy.reshape(testX, (testX.shape[0],testX.shape[1],testX.shape[2]))
In [65]:
#trainX.shape
In [66]:
epochs=3
batch_size=25
In [67]:
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(look_back,7)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs, batch_size, verbose=2)
Out[67]:
In [68]:
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
In [90]:
# shift train predictions for plotting
trainPredictPlot = numpy.empty_like(dataset)
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
# shift test predictions for plotting
testPredictPlot = numpy.empty_like(dataset)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
# plot baseline and predictions
#plt.plot(scaler.inverse_transform(dataset))
plt.plot(dataset[:,1])
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
In [91]:
model.save("elevenaug.h5")
In [ ]: