In [1]:
# load the packages needed
import numpy as np
import sys
if "../" not in sys.path:
sys.path.append("../")
from tsap.solver import Solver
from tsap.model import AR, MA
from tsap.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
import tsap.data_processor as dp
from tsap.ts_gen import ar1_gen
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
In [10]:
data = np.loadtxt("../data/SP500array.csv", delimiter=',')
X = np.array([data[0:100,0]])
Y = dp.get_return(X)
In [11]:
# generate a fake time series
from tsap.ts_gen import ar1_gen
Y = ar1_gen(0.5, sigma = 1.0, time = 200, num = 1, burnin = 2000)
plt.plot(Y.T)
plt.xlabel('time')
plt.ylabel('price')
plt.title('Time Series')
plt.show()
In [12]:
lag = 1
sigma = 2.0
intercept = 0.1
phi = np.random.randn(1, 1)
AR_model = AR(lag=lag, phi=phi, sigma=sigma, intercept=intercept)
AR_model.params
Out[12]:
In [13]:
_, grads = AR_model.loss(Y)
# define a useful mathod for future use
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
d_phi = grads['phi']
d_intercept = grads['intercept']
d_sigma = grads['sigma']
d_phi_num = eval_numerical_gradient_array(lambda phi: AR_model.loss(Y, lag,phi,sigma,intercept)[0], phi, 1)
print 'd_phi error: ', rel_error(d_phi_num, d_phi)
d_intercept_num = eval_numerical_gradient_array(lambda intercept: AR_model.loss(Y, lag,phi,sigma,intercept)[0], intercept, 1)
print 'd_intercept error: ', rel_error(d_intercept_num, d_intercept)
d_sigma_num = eval_numerical_gradient_array(lambda sigma: AR_model.loss(Y, lag,phi,sigma,intercept)[0], sigma, 1)
print 'd_sigma error: ', rel_error(d_sigma_num, d_sigma)
In [14]:
solver = Solver(AR_model, Y,
update_rule='sgd',
optim_config={
'learning_rate': 1e-5,
},
num_epochs=3000, batch_size=1,
print_every=100)
solver.train()
AR_model.params
Out[14]:
In [15]:
plt.plot(solver.loss_history)
plt.xlabel('iterations')
plt.ylabel('negative log likelihood')
plt.title('loss history')
plt.show()
In [16]:
solver = Solver(AR_model, Y,
update_rule='sgd_momentum',
optim_config={
'learning_rate': 1e-6,
},
num_epochs=3000, batch_size=1,
print_every=100)
solver.train()
AR_model.params
Out[16]:
In [17]:
plt.plot(solver.loss_history)
plt.xlabel('iterations')
plt.ylabel('negative log likelihood')
plt.title('loss history')
plt.show()
In [16]:
AR_model.predict(X,10)
Out[16]:
In [8]:
from tsap.ts_gen import ar1_gen
import tsap.inference as inf
x = ar1_gen(0.5, sigma = 1, time = 200, num = 1, burnin = 2000)
X=np.array([data[0:100,0]])
Y=dp.get_return(X)
y = inf.yule_walker(x, order =1, method = "mle")
phi,sigma = inf.yule_walker(Y, order =3, method = "mle")
print phi
print sigma
In [9]:
phi
Out[9]:
In [10]:
AR_model = AR(lag=3, phi=phi, sigma=sigma, intercept=0)
In [11]:
rt=AR_model.predict(Y,10)
rt
Out[11]:
In [13]:
z=dp.get_price(X[0,99],rt)
z
Out[13]:
In [15]:
X.shape[0]
Out[15]:
In [18]:
data[100:110,0]
Out[18]:
In [17]:
import tsap.trading as tra
X[0,0:100].shape[0]
Out[17]:
In [18]:
tra.trade(np.array([X[0,0:100]]),z,AR_model,4,4,100)
Out[18]:
In [ ]: