In [1]:
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import csv
import glob

from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.arima_model import ARIMAResults

import pickle
#from sklearn.cross_validation import train_test_split
from sklearn import linear_model
from sklearn.svm import SVR
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC


/home/octo/anaconda2/envs/carnd-term1/lib/python3.5/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.
  from pandas.core import datetools

In [2]:
filename = '/home/octo/Dropbox'+ '/SPY7Dec.csv'

In [3]:
# loading csv file
def get_csv_pd(path):
    #spy_pd=pd.read_csv('C:\\Users\Michal\Dropbox\IB_data\SPY.csv',sep=' ',names=['askPrice','askSize','bidPrice','bidSize'],index_col=0,parse_dates=True)
    #spy_pd=pd.read_csv(path+'\SPY.csv',sep=',',names=['askPrice','askSize','bidPrice','bidSize'],index_col=0,parse_dates=True)
    spy_pd=pd.read_csv(path,sep=',',dtype={'askPrice':np.float32,'askSize':np.float32,
                                           'bidPrice':np.float32,'bidSize':np.float32},index_col=0,parse_dates=True)
    #spy_pd = pd.read_csv(path, usecols=['askPrice','askSize','bidPrice','bidSize'], engine='python', skipfooter=3)
    return spy_pd
'''
def get_csv_pd_notime(path):
    #spy_pd=pd.read_csv('C:\\Users\Michal\Dropbox\IB_data\SPY.csv',sep=' ',names=['askPrice','askSize','bidPrice','bidSize'],index_col=0,parse_dates=True)
    #spy_pd=pd.read_csv(path+'\SPY.csv',sep=',',names=['askPrice','askSize','bidPrice','bidSize'],index_col=0,parse_dates=True)
    spy_pd = pd.read_csv(path, usecols=['askPrice','askSize','bidPrice','bidSize'], engine='python', skipfooter=3)
    return spy_pd
'''

def preprocessing(df):
    df.bidPrice=df.loc[:,'bidPrice'].replace(to_replace=0, method='ffill')
    df.bidSize=df.loc[:,'bidSize'].replace(to_replace=0, method='ffill')
    df.askPrice=df.loc[:,'askPrice'].replace(to_replace=0, method='ffill')
    df.askSize=df.loc[:,'askSize'].replace(to_replace=0, method='ffill')
    df=df.dropna()
    # to exclude 0
    df=df[df['bidPrice']>df.bidPrice.mean()-df.bidPrice.std()]
    df=df[df['askPrice']>df.askPrice.mean()-df.askPrice.std()]
    df['mid']=(df.askPrice+df.bidPrice)/2
    df['vwap']=((df.loc[:,'bidPrice']*df.loc[:,'bidSize'])+(df.loc[:,'askPrice']*df.loc[:,'askSize']))/(df.loc[:,'bidSize']+df.loc[:,'askSize'])
    df['spread']=df.vwap-(df.askPrice+df.bidPrice)/2
    df['v']=(df.askPrice+df.bidPrice)/2-((df.askPrice+df.bidPrice)/2).shift(60)
    df['return']=(df.askPrice/df.bidPrice.shift(1))-1
    df['sigma']=df.spread.rolling(60).std()
    return df

'''
def normalise(df,window_length=60):
    dfn=(df-df.rolling(window_length).min())/(df.rolling(window_length).max()-df.rolling(window_length).min())
    return dfn

def de_normalise(data,df,window_length=60):
    dn=(df*(data.rolling(window_length).max()-data.rolling(window_length).min()))+data.rolling(window_length).min()
    return dn

def normalise_z(df,window_length=12):
    dfn=(df-df.rolling(window_length).mean())/(df.rolling(window_length).std())
    return dfn

'''
def normalise(df,window_length=60):
    data=df[['askPrice','askSize','bidPrice','bidSize','vwap','spread','v','return','sigma']]
    dfn=data/data.shift(60)
    return dfn

def de_normalise(dfn,window_length=60):
    data=df[['askPrice','askSize','bidPrice','bidSize','vwap','spread','v','return','sigma']]
    data=dfn*data.shift(60)
    return data

#https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
def chunks(l, n):
    """Yield successive n-sized chunks from l."""
    for i in range(0, len(l), n):
        yield l[i:i + n]

In [4]:
# monkey patch around bug in ARIMA class
def __getnewargs__(self):
	return ((self.endog),(self.k_lags, self.k_diff, self.k_ma))
ARIMA.__getnewargs__ = __getnewargs__


def arima_processing(df):
    #data=df[['vwap','mid']]
    df=df.dropna()
    df['Lvwap']=np.log(df.vwap)
    df['Lmid']=np.log(df.mid)
    df['LDvwap']=df.Lvwap-df.Lvwap.shift(60)
    df['LDmid']=df.Lmid-df.Lmid.shift(60)
    df=df.dropna()
    return df  

def ARIMA_saving(data):
    data=data.dropna()
    data1=data.LDvwap
    data2=data.LDmid
    
    model_vwap = ARIMA(data1,order=(2,1,2))  # tested from ARIMA.ipynb
    #predictions = model.fit(disp=0).predict()
    predictions_vwap =model_vwap.fit(disp=0).fittedvalues
    # save model
    model_vwap.fit().save('vwap_arima.pkl')
    vwap_arima=np.exp(predictions_vwap+data.Lvwap.shift(60))
    
    model_mid = ARIMA(data2,order=(2,1,2))  # tested from ARIMA.ipynb
    #predictions = model.fit(disp=0).predict()
    predictions_mid =model_mid.fit(disp=0).fittedvalues
    # save model
    model_mid.fit().save('mid_arima.pkl')

In [5]:
data=get_csv_pd(filename)
data=preprocessing(data)
data=data.dropna()
data=arima_processing(data)
data=data.dropna().tail(10000)
data= ARIMA_saving(data)


/home/octo/anaconda2/envs/carnd-term1/lib/python3.5/site-packages/statsmodels/base/model.py:496: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  "Check mle_retvals", ConvergenceWarning)
/home/octo/anaconda2/envs/carnd-term1/lib/python3.5/site-packages/statsmodels/base/model.py:496: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  "Check mle_retvals", ConvergenceWarning)

No need to save KM model

Linear Regression, sklearn, svm:SVR,linear_model


In [6]:
data=get_csv_pd(filename)
data=preprocessing(data)
#data=normalise(data)
data=data.dropna()

In [7]:
data.tail()


Out[7]:
askPrice askSize bidPrice bidSize mid vwap spread v return sigma
2017-12-08 03:00:32.244925 263.839996 1.0 263.829987 576.0 263.834991 263.830017 -0.004974 0.019989 0.000038 0.003702
2017-12-08 03:00:34.284669 263.839996 1.0 263.829987 563.0 263.834991 263.830017 -0.004974 0.014984 0.000038 0.003723
2017-12-08 03:00:35.874931 263.839996 1.0 263.839996 563.0 263.839996 263.839996 0.000000 0.019989 0.000038 0.003723
2017-12-08 03:00:37.499670 263.839996 1.0 263.839996 17.0 263.839996 263.839966 -0.000031 0.014984 0.000000 0.003723
2017-12-08 03:00:39.356631 263.850006 1.0 263.839996 17.0 263.845001 263.840546 -0.004456 0.019989 0.000038 0.003652

In [8]:
df=data.tail(10000)
X=df[['askPrice','askSize','bidPrice','bidSize','vwap','spread','v','return','sigma']]
y=df.mid

saving linear model


In [9]:
regr = linear_model.LinearRegression()
regr_model=regr.fit(X,y)
# save the model to disk
filename_rgr = 'rgr.sav'
pickle.dump(regr_model, open(filename_rgr, 'wb'))

svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.9) #kernel='linear' #kernel='poly'
svr_model = svr_rbf.fit(X, y)
# save the model to disk
filename_svr = 'svr.sav'
pickle.dump(svr_model, open(filename_svr, 'wb'))

Classification is based on the previous predictions, so need to build the dataframe df_ml


In [ ]:
###Model is already saved from "/Dropbox/DataScience/ARIMA_model_saving.ipynb". Here loaded and added to "df_ml"
def ARIMA_(data):
    ### load model
    data=data.dropna()
    predictions_mid=ARIMA_mid(data.LDmid)
    predictions_vwap=ARIMA_vwap(data.LDvwap) 
    vwap_arima=np.exp(predictions_vwap+data.Lvwap.shift(60))
    mid_arima=np.exp(predictions_mid+data.Lmid.shift(60))
    df_ml['arima']=data.mid+vwap_arima-mid_arima
    
def ARIMA_mid(data):
    ### load model
    mid_arima_loaded = ARIMAResults.load('mid_arima.pkl')
    predictions_mid = mid_arima_loaded.predict()
    return predictions_mid

def ARIMA_vwap(data):
    ### load model
    vwap_arima_loaded = ARIMAResults.load('vwap_arima.pkl')
    predictions_vwap = vwap_arima_loaded.predict()
    return predictions_vwap

#### KALMAN moving average

##KF moving average
#https://github.com/pykalman/pykalman

# Import a Kalman filter and other useful libraries
from pykalman import KalmanFilter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import poly1d

def kalman_ma(data):
    #x=data.mid
    x=data.mid
    # Construct a Kalman filter
    kf = KalmanFilter(transition_matrices = [1],
                  observation_matrices = [1],
                  initial_state_mean = 248,
                  initial_state_covariance = 1,
                  observation_covariance=1,
                  transition_covariance=.01)

    # Use the observed values of the price to get a rolling mean
    state_means, _ = kf.filter(x.values)
    state_means = pd.Series(state_means.flatten(), index=x.index)
    df_ml['km']=state_means

### Linear Regression, sklearn, svm:SVR,linear_model
import pickle
#from sklearn.cross_validation import train_test_split
from sklearn import linear_model
from sklearn.svm import SVR
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC


## loading model saved from /Dropbox/DataScience/REG_model_saving.ipynb
filename_rgr = 'rgr.sav'
filename_svr = 'svr.sav'
# load the model from disk
loaded_rgr_model = pickle.load(open(filename_rgr, 'rb'))
loaded_svr_model = pickle.load(open(filename_svr, 'rb'))

def strat_lr(data):
    #no normalization
    
    data=data.dropna()
    X=data[['askPrice','askSize','bidPrice','bidSize','vwap','spread','v','return','sigma']]
    #y=df[['mid']]
    predict_regr=loaded_rgr_model.predict(X)
    predict_svr=loaded_svr_model.predict(X)
    
    df_ml['REG']=predict_regr
    df_ml['SVR']=predict_svr
    
    ## strat_lr(data,dfn) and below needed for normalized data
    #df_ml['predict_regr']=predict_regr
    #df_ml['predict_svr']=predict_svr
    #df_ml['REG']=de_normalise(data.mid,df.predict_regr)
    #df_ml['SVR']=de_normalise(data.mid,df.predict_svr)

In [11]:
df_ml=pd.DataFrame()

#creating the ml dataset
data=get_csv_pd(filename)
data=preprocessing(data)
data=data.dropna()
dfn=normalise(data)
df_arima=arima_processing(data)
### prediction for last 60 points
data=data.dropna().tail(5000)
dfn=dfn.dropna().tail(5000)
df_arima=df_arima.dropna().tail(5000)

df_ml['mid']=data.mid
df_ml['vwap']=data.vwap


ARIMA_(df_arima)
kalman_ma(data)
strat_lr(data)

In [12]:
len(df_ml)


Out[12]:
5000

Classification


In [13]:
df_ml=df_ml.dropna()
data_cl=data.tail(len(df_ml))
'''
a= np.where(df_ml.mid>df_ml.km,1,0)
b= np.where(df_ml.mid<df_ml.km,-1,0)
c=np.where(df_ml.mid>df_ml.arima,1,0)
d=np.where(df_ml.mid<df_ml.arima,-1,0)
e=np.where(df_ml.mid>df_ml.REG,1,0)
f=np.where(df_ml.mid<df_ml.REG,-1,0)
g=np.where(df_ml.mid>df_ml.SVR,1,0)
h=np.where(df_ml.mid<df_ml.SVR,-1,0)
'''
data_cl['U']=np.where(df_ml.mid>df_ml.vwap,1,0)
data_cl['D']=np.where(df_ml.mid<df_ml.vwap,-1,0)
data_cl=data_cl.dropna()


/home/octo/anaconda2/envs/carnd-term1/lib/python3.5/site-packages/ipykernel/__main__.py:13: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
/home/octo/anaconda2/envs/carnd-term1/lib/python3.5/site-packages/ipykernel/__main__.py:14: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy

In [14]:
df_ml.tail()


Out[14]:
mid vwap arima km REG SVR
2017-12-08 03:00:32.244925 263.834991 263.830017 263.829376 263.828279 263.835012 263.734688
2017-12-08 03:00:34.284669 263.834991 263.830017 263.832971 263.828917 263.835012 263.740069
2017-12-08 03:00:35.874931 263.839996 263.839996 263.838402 263.829971 263.840017 263.740093
2017-12-08 03:00:37.499670 263.839996 263.839966 263.843578 263.830925 263.840017 263.743573
2017-12-08 03:00:39.356631 263.845001 263.840546 263.846789 263.832264 263.845022 263.744920

saving classification model


In [15]:
X=data_cl[['askPrice','askSize','bidPrice','bidSize','vwap','spread','v','return','sigma']]
y1=data_cl[['U']]
y2=data_cl[['D']]

svm = SVC(kernel='linear')
lm = linear_model.LogisticRegression(C=1e4)

In [ ]:
svm_model_up=svm.fit(X,y1)
lm_model_up=lm.fit(X,y1)
svm_model_dn=svm.fit(X,y2)
lm_model_dn =lm.fit(X,y2)


/home/octo/anaconda2/envs/carnd-term1/lib/python3.5/site-packages/sklearn/utils/validation.py:526: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().
  y = column_or_1d(y, warn=True)

In [36]:
# save the model to disk
filename_svm_model_up = 'svm_model_up.sav'
filename_lm_model_up = 'lm_model_up.sav'
filename_svm_model_dn = 'svm_model_dn.sav'
filename_lm_model_dn = 'lm_model_dn.sav'
pickle.dump(svm_model_up, open(filename_svm_model_up, 'wb'))
pickle.dump(lm_model_up, open(filename_lm_model_up, 'wb'))
pickle.dump(svm_model_dn, open(filename_svm_model_dn, 'wb'))
pickle.dump(lm_model_dn, open(filename_lm_model_dn , 'wb'))

loading classification for LSTM model saving


In [37]:
#### loading classification model from /Dropbox/DataScience/ML_20Sep
filename_svm_model_up = 'svm_model_up.sav'
filename_lm_model_up = 'lm_model_up.sav'
filename_svm_model_dn = 'svm_model_dn.sav'
filename_lm_model_dn = 'lm_model_dn.sav'
# load the model from disk
loaded_svm_up_model = pickle.load(open(filename_svm_model_up, 'rb'))
loaded_lm_up_model = pickle.load(open(filename_lm_model_up, 'rb'))
loaded_svm_dn_model = pickle.load(open(filename_svm_model_dn, 'rb'))
loaded_lm_dn_model = pickle.load(open(filename_lm_model_dn, 'rb'))

def classification_up_dn(data):
    X=data[['askPrice','askSize','bidPrice','bidSize','vwap','spread','v','return','sigma']]
    y1=data.U
    y2=data.D
    
    
    predict_svm_up=loaded_svm_up_model.predict(X)
    predict_lm_up=loaded_lm_up_model.predict(X)
    predict_svm_dn=loaded_svm_dn_model.predict(X)
    predict_lm_dn=loaded_lm_dn_model.predict(X)
    
    data['predict_svm_up']=predict_svm_up
    data['predict_lm_up']=predict_lm_up
    data['predict_svm_dn']=predict_svm_dn
    data['predict_lm_dn']=predict_lm_dn
    
    data['predict_svm']=data.predict_svm_up+data.predict_svm_dn
    data['predict_lm']=data.predict_lm_up+data.predict_lm_dn
    
    data['UD']=np.where(np.logical_and(data.predict_svm>0,data.predict_lm>0),1,np.where(np.logical_and(data.predict_svm<0,data.predict_lm<0),-1,0))  
       
    df_ml['UD']=data.UD

In [38]:
classification_up_dn(data_cl)

In [39]:
df_ml.tail()


Out[39]:
mid vwap arima km REG SVR UD
2017-08-04 20:55:36.097306 247.320007 247.312500 247.326894 247.328544 247.320012 247.288138 0
2017-08-04 20:55:37.144216 247.330002 247.318756 247.339441 247.328683 247.330006 247.288138 0
2017-08-04 20:55:38.206749 247.330002 247.328201 247.345436 247.328808 247.330006 247.288138 0
2017-08-04 20:55:39.269286 247.330002 247.328201 247.340588 247.328922 247.330006 247.288138 0
2017-08-04 20:55:40.331817 247.330002 247.328201 247.346310 247.329025 247.330006 247.288138 0

In [40]:
### LSTM

#df.loc[:, cols].prod(axis=1)
def lstm_processing(df):
    df=df.dropna()
    df_price=df[['mid','vwap','arima','km','REG','SVR']]
    #normalization
    dfn=normalise(df_price,12)
    dfn['UD']=df.UD
    return dfn

import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error

# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
    dataX, dataY = [], []
    for i in range(len(dataset)-look_back-1):
        a = dataset[i:(i+look_back), 0]
        b = dataset[i:(i+look_back), 1]
        c = dataset[i:(i+look_back), 2]
        d = dataset[i:(i+look_back), 3]
        e=  dataset[i:(i+look_back), 4]
        f = dataset[i:(i+look_back), 5]
        g=  dataset[i:(i+look_back), 6]
        dataX.append(np.c_[b,c,d,e,f,g])
        #dataX.append(b)
        #dataX.append(c)
        #dataX.append(d)
        #dataX.append(e)
        #dataX.concatenate((a,bT,cT,dT,eT),axis=1)
        dataY.append(dataset[i + look_back,0])
    return np.array(dataX), np.array(dataY)


Using TensorFlow backend.

In [41]:
# Another function to handle normalization. normalizing and adding UD is not done rather nl.log() only of the 6 columns.
def lstm_processing(df):
    df=df.dropna()
    df_price=df[['mid','vwap','arima','km','REG','SVR']]
    #normalization
    dfn=np.log(df_price)
    return dfn

import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error

# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
    dataX, dataY = [], []
    for i in range(len(dataset)-look_back-1):
        a = dataset[i:(i+look_back), 0]
        b = dataset[i:(i+look_back), 1]
        c = dataset[i:(i+look_back), 2]
        d = dataset[i:(i+look_back), 3]
        e=  dataset[i:(i+look_back), 4]
        f = dataset[i:(i+look_back), 5]
    
        dataX.append(np.c_[b,c,d,e,f])
        #dataX.append(b)
        #dataX.append(c)
        #dataX.append(d)
        #dataX.append(e)
        #dataX.concatenate((a,bT,cT,dT,eT),axis=1)
        dataY.append(dataset[i + look_back,0])
    return np.array(dataX), np.array(dataY)

In [42]:
#normalization
df_lstm=lstm_processing(df_ml)
df_lstm=df_lstm.dropna()
dataset=df_lstm.values
dataset = dataset.astype('float32')
# reshape into X=t and Y=t+1
look_back = 3
X_,Y_ = create_dataset(dataset,look_back)
    
# reshape input to be [samples, time steps, features]
X_ = numpy.reshape(X_, (X_.shape[0],X_.shape[1],X_.shape[2]))

In [45]:
epochs=10
batch_size=50

In [46]:
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(look_back,5)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(X_,Y_, epochs, batch_size, verbose=2)


Epoch 1/50
1s - loss: 28.8755
Epoch 2/50
0s - loss: 23.1425
Epoch 3/50
1s - loss: 18.9896
Epoch 4/50
1s - loss: 15.3697
Epoch 5/50
1s - loss: 12.2387
Epoch 6/50
1s - loss: 9.5556
Epoch 7/50
1s - loss: 7.2847
Epoch 8/50
1s - loss: 5.3936
Epoch 9/50
1s - loss: 3.8522
Epoch 10/50
1s - loss: 2.6306
Epoch 11/50
1s - loss: 1.6975
Epoch 12/50
1s - loss: 1.0189
Epoch 13/50
1s - loss: 0.5568
Epoch 14/50
1s - loss: 0.2690
Epoch 15/50
1s - loss: 0.1104
Epoch 16/50
1s - loss: 0.0365
Epoch 17/50
1s - loss: 0.0091
Epoch 18/50
1s - loss: 0.0015
Epoch 19/50
1s - loss: 1.6366e-04
Epoch 20/50
1s - loss: 9.3355e-06
Epoch 21/50
1s - loss: 2.7005e-07
Epoch 22/50
1s - loss: 3.2517e-08
Epoch 23/50
1s - loss: 2.9866e-08
Epoch 24/50
1s - loss: 2.9819e-08
Epoch 25/50
1s - loss: 2.9771e-08
Epoch 26/50
1s - loss: 2.9775e-08
Epoch 27/50
1s - loss: 2.9805e-08
Epoch 28/50
1s - loss: 2.9800e-08
Epoch 29/50
1s - loss: 2.9809e-08
Epoch 30/50
1s - loss: 2.9828e-08
Epoch 31/50
1s - loss: 2.9860e-08
Epoch 32/50
1s - loss: 2.9813e-08
Epoch 33/50
1s - loss: 2.9872e-08
Epoch 34/50
1s - loss: 2.9917e-08
Epoch 35/50
1s - loss: 3.0050e-08
Epoch 36/50
1s - loss: 2.9951e-08
Epoch 37/50
1s - loss: 3.0194e-08
Epoch 38/50
1s - loss: 3.0446e-08
Epoch 39/50
1s - loss: 3.0458e-08
Epoch 40/50
1s - loss: 3.0473e-08
Epoch 41/50
1s - loss: 3.0672e-08
Epoch 42/50
1s - loss: 3.0904e-08
Epoch 43/50
1s - loss: 3.2025e-08
Epoch 44/50
1s - loss: 3.1720e-08
Epoch 45/50
1s - loss: 3.2731e-08
Epoch 46/50
1s - loss: 3.2512e-08
Epoch 47/50
1s - loss: 3.3936e-08
Epoch 48/50
1s - loss: 3.4977e-08
Epoch 49/50
1s - loss: 3.5999e-08
Epoch 50/50
1s - loss: 3.7006e-08
Out[46]:
<keras.callbacks.History at 0x7fbe143366a0>

In [47]:
model.save("28sep.h5")

In [ ]: