In [1]:
%pylab inline
import matplotlib.pyplot as plt

import tensorflow as tf
#from tensorflow.models.rnn import rnn, rnn_cell
import numpy as np
import numpy.random as rng

import time

import pandas.io.data as web
from pykalman import KalmanFilter
import numpy as np
import pandas as pd
import seaborn


Populating the interactive namespace from numpy and matplotlib

In [2]:
def get_prices(symbol):
    start, end = '2007-05-02', '2016-04-11'
    data = web.DataReader(symbol, 'yahoo', start, end)
    data=pd.DataFrame(data)
    prices=data['Adj Close']
    prices=prices.astype(float)
    return prices

def get_returns(prices):
        return ((prices-prices.shift(-1))/prices)[:-1]
    
def get_data(list):
    l = []
    for symbol in list:
        rets = get_returns(get_prices(symbol))
        l.append(rets)
    return np.array(l).T
        
rets = get_data(['C', 'GS'])

In [3]:
def lstm_iterator(raw_data, batch_size, num_steps, data_size):
 
  raw_data = np.array(raw_data, dtype=np.float32)

  data_len = len(raw_data)
  batch_len = data_len // batch_size
  data = np.zeros([batch_size, batch_len, data_size], dtype=np.float32)
  for i in range(batch_size):
    data[i] = raw_data[batch_len * i:batch_len * (i + 1),:]

  epoch_size = (batch_len - 1) // num_steps

  if epoch_size == 0:
    raise ValueError("epoch_size == 0, decrease batch_size or num_steps")

  for i in range(epoch_size):
    x = data[:, i*num_steps:(i+1)*num_steps]
    y = data[:, i*num_steps+1:(i+1)*num_steps+1]
    yield (x, y)

In [4]:
def run_epoch(session, m, data, eval_op, verbose=False, get_out=False):
  """Runs the model on the given data."""
  epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
  start_time = time.time()
  costs = 0.0
  iters = 0
  state = m.initial_state.eval()
  output = []
  inp = []
  targ = []
  for step, (x, y) in enumerate(lstm_iterator(data, m.batch_size, m.num_steps, m.data_size)):
    cost, state, _ = session.run([m.cost, m.final_state, eval_op],
                                 {m.input_data: x,
                                  m.targets: y,
                                  m.initial_state: state})
    costs += cost
    #print(cost)
    iters += m.num_steps
    if get_out: 
      output.append(_)
      inp.append(x)
      targ.append(y)
    
  if len(output) > 0: 
    return output, inp, targ

  ''' if verbose and step % (epoch_size // 10) == 10:
      print("%.3f perplexity: %.3f speed: %.0f wps" %
            (step * 1.0 / epoch_size, np.exp(costs / iters),
             iters * m.batch_size / (time.time() - start_time)))
  '''
  return np.exp(costs / iters)

In [5]:
class SmallConfig(object):
    """Small config."""
    init_scale = 1.1
    learning_rate = .60
    max_grad_norm = 5
    num_layers = 4
    num_steps = 25
    hidden_size = 200
    max_epoch = 4
    max_max_epoch = 13
    keep_prob = 1.0
    lr_decay = 0.9
    batch_size = 20
    #vocab_size = 10000
    
class TestConfig(object):
  """Tiny config, for testing."""
  init_scale = 0.1
  learning_rate = 1.0
  max_grad_norm = 1
  num_layers = 4
  num_steps = 25
  hidden_size = 20
  max_epoch = 1
  max_max_epoch = 1
  keep_prob = 1.0
  lr_decay = 0.5
  batch_size = 20
  #vocab_size = 10000
  #data_size= 2

def get_config(test=False):
    if test:
        return TestConfig()
    return SmallConfig()

In [6]:
class LSTMModel(object):

    def __init__(self, is_training, config):
        self.batch_size = batch_size = config.batch_size
        self.num_steps = num_steps = config.num_steps
        self.data_size = data_size = config.data_size
        size = config.hidden_size
        
        
        
        position_transform = tf.constant([[-1.,0.,1]])
        pb_number = 3
        
        # set PLACEHOLDERS
        self._input_data = tf.placeholder(tf.float32, [batch_size, num_steps, data_size])
        self._targets = tf.placeholder(tf.float32, [batch_size, num_steps, data_size])
        # set VARIABLES
        lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=0.0)
        cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers)
        cell = tf.nn.rnn_cell.InputProjectionWrapper(cell,  size)
        cell = tf.nn.rnn_cell.OutputProjectionWrapper(cell, pb_number*data_size)
        if is_training and config.keep_prob < 1:
            cell = tf.nn.rnn_cell.DropoutWrapper(
                cell, output_keep_prob=config.keep_prob)
        
        # loop through timesteps and record output
        outputs = []
        self._initial_state = cell.zero_state(batch_size, tf.float32)
        state = self._initial_state
        with tf.variable_scope("RNN"):
            for time_step in range(num_steps):
                if time_step > 0: tf.get_variable_scope().reuse_variables()
                    
                (cell_output, state) = cell(tf.reshape(self._input_data[:, time_step,:], [-1, data_size]), state)
                outputs.append(cell_output)
        
        self._final_state = state    #shape: [20,160]
        
        #reshape to [batchsize*num_steps, 3*data_size]
        self._output = output = tf.reshape(tf.concat(1, outputs), [-1, data_size*pb_number])
        targets = tf.reshape(self._targets, [-1,data_size])
        
        
        
        pos_one_hot = {}
        t_dict = {}
        returns = []
        for i in range(data_size):
            relevant_output = output[:,i*pb_number:(i+1)*pb_number]
            sample = tf.argmax(relevant_output) #this should be SAMPLE
            pos_one_hot[i] = tf.one_hot(sample, depth= 3, on_value=-1.0, off_value=0.0)
            t_dict[i] = tf.matmul(tf.reshape(targets[:,i], [-1,1]) , position_transform)
            cross_entropy = tf.nn.softmax_cross_entropy_with_logits(output, pos_one_hot[i])
            
            returns.append( tf.mul(sample , t_dict[i])  )
        returns = tf.reduce_sum(tf.concat(1,returns), 1)
        total_return = tf.reduce_prod(returns+1)
        
        
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(output, tf.nn.softmax(target_menu))
        
        #pos_one_hot = tf.concat(1, [ v for v in pos_one_hot.values() ])
        self._cost = cost  =  total_return #* -100
        cost = cost*-1
        
        #vol = tf.(returns)
        #sharpe = total_return/vol
        
        
        
        
        
        '''
        self._output = output = tf.reshape(tf.concat(1, outputs), [-1, data_size*3]) # shape: [500, 3]=[??,data_size*3]
        self._final_state = state    #shape: [20,160]    =     [hidden size, ??]
        targets = tf.reshape(self._targets, [-1,data_size])
        
        outdict = {}
        tm_dict = {}
        xe_list = []
        for i in range(data_size):
            outdict[i] = output[:,i*3:(i+1)*3]
            tm_dict[i] = tf.matmul(tf.reshape(targets[:,i], [-1,1]) , position_transform)
            xe_list.append(tf.nn.softmax_cross_entropy_with_logits(outdict[i], tf.nn.softmax(tm_dict[i])))
        
        loss = tf.add_n(xe_list)
        self._cost = cost = tf.reduce_sum(loss) / batch_size
        
        #drets = tf.mul(target_menu, tf.nn.softmax(output) )   # shape: [500, 3]   =   [??, data_size*3]
        #max_possible_ret = tf.reduce_max(target_menu, 1)  # shape: [500]

        #position = tf.cast(tf.argmax(output, 1) - 1, tf.float32)
        #drets_position = tf.mul(position, tf.reshape(self._targets, [-1]))
        
        #set LOSS
        #loss = tf.nn.softmax_cross_entropy_with_logits(outputdict, tf.nn.softmax(target_menu))
        #loss=max_possible_ret-tf.reduce_sum(drets,1)#tf.nn.l2_loss(output - tf.reshape(self._targets, [-1, 1]))
        
        #set COST
        #self._cost = cost = tf.reduce_sum(loss) / batch_size
        '''
        
        if not is_training:
            return
        #TRAINING STEP
        self._lr = tf.Variable(0.0, trainable=False)
        tvars = tf.trainable_variables()
        print(cost)
        #grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),  config.max_grad_norm)
        #print(grads)
        optimizer = tf.train.GradientDescentOptimizer(self.lr)#GradientDescentOptimizer(self.lr)
        self._train_op = optimizer.minimize(cost, var_list=tvars)#apply_gradients(zip(grads, tvars))
        
        
        
        #####TODO
        # whats up with grads?
        #normalize input 
        #try lm nn at the end, see line 31 of https://github.com/sherjilozair/char-rnn-tensorflow/blob/master/model.py
        # seq2seq.rnn_decoder and seq2seq.sequence_loss_by_example

    def assign_lr(self, session, lr_value):
        session.run(tf.assign(self.lr, lr_value))

    @property
    def input_data(self):
        return self._input_data

    @property
    def targets(self):
        return self._targets

    @property
    def initial_state(self):
        return self._initial_state

    @property
    def cost(self):
        return self._cost

    @property
    def final_state(self):
        return self._final_state

    @property
    def lr(self):
        return self._lr

    @property
    def train_op(self):
        return self._train_op
    
    @property
    def output(self):
        return self._output

In [7]:
raw_data = rets, rets[:2200], rets[2200:]
train_data, valid_data, test_data = raw_data
#train_data =valid_data = test_data =  raw_data

config = eval_config = get_config()
eval_config = get_config(test=True)
eval_config.batch_size = 1
eval_config.num_steps = 1

config.data_size = eval_config.data_size = rets.shape[1]

with tf.Graph().as_default(), tf.Session() as session:
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                            config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
        m = LSTMModel(is_training=True, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
        mvalid = LSTMModel(is_training=False, config=config)
        #mtest = LSTMModel(is_training=False, config=eval_config)

    tf.initialize_all_variables().run()

    for i in range(40):#config.max_max_epoch):
        lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
        m.assign_lr(session, config.learning_rate * lr_decay)

        print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
        train_perplexity = run_epoch(session, m, train_data, m.train_op,
                                   verbose=True)

        print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
        #valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
        #print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

    test_output, test_inp, test_targ = run_epoch(session, mvalid, train_data, mvalid.output, get_out = True)
    #print(test_perplexity)
    #print("Test Perplexity: %.3f" % test_perplexity)


WARNING:tensorflow:<tensorflow.python.ops.rnn_cell.BasicLSTMCell object at 0x10fbcb6a0>: Using a concatenated state is slower and will soon be deprecated.  Use state_is_tuple=True.
WARNING:tensorflow:<tensorflow.python.ops.rnn_cell.BasicLSTMCell object at 0x10fbcb6a0>: Using a concatenated state is slower and will soon be deprecated.  Use state_is_tuple=True.
WARNING:tensorflow:<tensorflow.python.ops.rnn_cell.BasicLSTMCell object at 0x11a8cdac8>: Using a concatenated state is slower and will soon be deprecated.  Use state_is_tuple=True.
WARNING:tensorflow:<tensorflow.python.ops.rnn_cell.BasicLSTMCell object at 0x11a8cdac8>: Using a concatenated state is slower and will soon be deprecated.  Use state_is_tuple=True.
Tensor("model/mul:0", shape=(), dtype=float32)
Epoch: 1 Learning rate: 0.600
Epoch: 1 Train Perplexity: 1.026
Epoch: 2 Learning rate: 0.600
Epoch: 2 Train Perplexity: 1.028
Epoch: 3 Learning rate: 0.600
Epoch: 3 Train Perplexity: 1.028
Epoch: 4 Learning rate: 0.600
Epoch: 4 Train Perplexity: 1.028
Epoch: 5 Learning rate: 0.600
Epoch: 5 Train Perplexity: 1.029
Epoch: 6 Learning rate: 0.540
Epoch: 6 Train Perplexity: 1.029
Epoch: 7 Learning rate: 0.486
Epoch: 7 Train Perplexity: 1.029
Epoch: 8 Learning rate: 0.437
Epoch: 8 Train Perplexity: 1.029
Epoch: 9 Learning rate: 0.394
Epoch: 9 Train Perplexity: 1.029
Epoch: 10 Learning rate: 0.354
Epoch: 10 Train Perplexity: 1.029
Epoch: 11 Learning rate: 0.319
Epoch: 11 Train Perplexity: 1.029
Epoch: 12 Learning rate: 0.287
Epoch: 12 Train Perplexity: 1.029
Epoch: 13 Learning rate: 0.258
Epoch: 13 Train Perplexity: 1.029
Epoch: 14 Learning rate: 0.232
Epoch: 14 Train Perplexity: 1.029
Epoch: 15 Learning rate: 0.209
Epoch: 15 Train Perplexity: 1.029
Epoch: 16 Learning rate: 0.188
Epoch: 16 Train Perplexity: 1.029
Epoch: 17 Learning rate: 0.169
Epoch: 17 Train Perplexity: 1.029
Epoch: 18 Learning rate: 0.153
Epoch: 18 Train Perplexity: 1.029
Epoch: 19 Learning rate: 0.137
Epoch: 19 Train Perplexity: 1.029
Epoch: 20 Learning rate: 0.124
Epoch: 20 Train Perplexity: 1.029
Epoch: 21 Learning rate: 0.111
Epoch: 21 Train Perplexity: 1.029
Epoch: 22 Learning rate: 0.100
Epoch: 22 Train Perplexity: 1.029
Epoch: 23 Learning rate: 0.090
Epoch: 23 Train Perplexity: 1.029
Epoch: 24 Learning rate: 0.081
Epoch: 24 Train Perplexity: 1.029
Epoch: 25 Learning rate: 0.073
Epoch: 25 Train Perplexity: 1.029
Epoch: 26 Learning rate: 0.066
Epoch: 26 Train Perplexity: 1.029
Epoch: 27 Learning rate: 0.059
Epoch: 27 Train Perplexity: 1.029
Epoch: 28 Learning rate: 0.053
Epoch: 28 Train Perplexity: 1.029
Epoch: 29 Learning rate: 0.048
Epoch: 29 Train Perplexity: 1.029
Epoch: 30 Learning rate: 0.043
Epoch: 30 Train Perplexity: 1.029
Epoch: 31 Learning rate: 0.039
Epoch: 31 Train Perplexity: 1.029
Epoch: 32 Learning rate: 0.035
Epoch: 32 Train Perplexity: 1.029
Epoch: 33 Learning rate: 0.031
Epoch: 33 Train Perplexity: 1.029
Epoch: 34 Learning rate: 0.028
Epoch: 34 Train Perplexity: 1.029
Epoch: 35 Learning rate: 0.025
Epoch: 35 Train Perplexity: 1.029
Epoch: 36 Learning rate: 0.023
Epoch: 36 Train Perplexity: 1.029
Epoch: 37 Learning rate: 0.021
Epoch: 37 Train Perplexity: 1.029
Epoch: 38 Learning rate: 0.019
Epoch: 38 Train Perplexity: 1.029
Epoch: 39 Learning rate: 0.017
Epoch: 39 Train Perplexity: 1.029
Epoch: 40 Learning rate: 0.015
Epoch: 40 Train Perplexity: 1.029

In [8]:
plot(test_output[3])


Out[8]:
[<matplotlib.lines.Line2D at 0x11cd64668>,
 <matplotlib.lines.Line2D at 0x11cd64a58>,
 <matplotlib.lines.Line2D at 0x11cd64d68>,
 <matplotlib.lines.Line2D at 0x11cd6b048>,
 <matplotlib.lines.Line2D at 0x11cd6b2e8>,
 <matplotlib.lines.Line2D at 0x11cd6b588>]

In [78]:
get_positions(test_output, 2)[:,1]


Out[78]:
array([-1, -1,  0, ...,  0, -1, -1])

In [79]:
plot(get_positions(test_output, 2)[:,1])


Out[79]:
[<matplotlib.lines.Line2D at 0x1217bb860>]

In [80]:
pb_number = 2

def get_positions(output, ds):
    ordered = np.concatenate([x.reshape(-1, 25, pb_number*ds) for x in test_output], 1).reshape(-1,pb_number*ds)
    ls = []
    for i in range(ds):
        ls.append(np.argmax(ordered[:,i*pb_number:(i+1)*pb_number], 1)-1)
    return np.concatenate([x.reshape([-1,1]) for x in ls],1)
    #return np.argmax(np.concatenate(output), 1)-1
    #return np.argmax(np.concatenate([x.reshape(-1, 25, 3*2) for x in output], 1).reshape(-1,3*2), 1)-1

def get_target(targ):
    #return np.concatenate([(x.reshape(-1)) for x in test_targ])
    return np.concatenate(targ, 1).reshape(-1,2)

def get_equitycurve(pos, tar):
    ls = []
    ks = []
    for i in range(pos.shape[1]):
        rets = (pos*tar)[:,i]
        eq = np.cumprod((pos*tar)[:,i] + 1)
        ls.append(eq)
        ks.append(rets)
        plot(eq)
    show()
    plot(np.cumprod(np.sum(ks,0)+1))
    return 
        
    #return [np.cumprod((pos*tar)[:,i] + 1) for i in range(pos.shape[1])]

get_equitycurve(get_positions(test_output, 2), get_target(test_targ))

#plot(get_equitycurve(get_positions(test_output, 2), get_target(test_targ)))


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-80-85b95d90264f> in <module>()
     29     #return [np.cumprod((pos*tar)[:,i] + 1) for i in range(pos.shape[1])]
     30 
---> 31 get_equitycurve(get_positions(test_output, 2), get_target(test_targ))
     32 
     33 #plot(get_equitycurve(get_positions(test_output, 2), get_target(test_targ)))

<ipython-input-80-85b95d90264f> in get_equitycurve(pos, tar)
     18     ks = []
     19     for i in range(pos.shape[1]):
---> 20         rets = (pos*tar)[:,i]
     21         eq = np.cumprod((pos*tar)[:,i] + 1)
     22         ls.append(eq)

ValueError: operands could not be broadcast together with shapes (3000,2) (2000,2) 

In [73]:
plot(eq[1])


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-73-c4ecaab94ecb> in <module>()
----> 1 plot(eq[1])

NameError: name 'eq' is not defined

In [80]:
plot(pos[100:125])


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-80-4452cc5f64ed> in <module>()
----> 1 plot(pos[100:125])

NameError: name 'pos' is not defined

In [85]:
x=0
n = 6
plot(test_output[0][25:50].reshape(-1,3))


Out[85]:
[<matplotlib.lines.Line2D at 0x135a37ef0>,
 <matplotlib.lines.Line2D at 0x135a18320>,
 <matplotlib.lines.Line2D at 0x135a185f8>]

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [229]:
plot(np.cumprod((np.argmax(test_output[0],1)-1) * test_targ[0].reshape(-1)+1))


Out[229]:
[<matplotlib.lines.Line2D at 0x127872d30>]

In [214]:
test_output[0].shape


Out[214]:
(500, 3)

In [218]:
test_targ[0].reshape(-1)


Out[218]:
array([ -2.59504002e-03,   3.88248474e-03,   2.96951691e-03,
        -7.44601479e-03,   1.69992819e-02,   1.69177400e-03,
         4.70721256e-03,   1.32415968e-03,  -4.01590280e-02,
         2.00323318e-03,  -3.64963547e-03,   2.90904660e-03,
        -4.37630061e-03,   1.27090258e-03,   1.45425892e-03,
        -3.45899235e-03,   3.80993495e-03,  -5.28141344e-03,
         1.28622735e-02,  -3.66948894e-04,   6.60424680e-03,
         4.61680628e-03,   1.05751613e-02,   1.51884034e-02,
        -1.54226488e-02,   1.00798355e-02,  -2.50318591e-02,
         9.31291468e-03,   3.40505913e-02,   3.13581303e-02,
         2.90240673e-03,   1.85849052e-02,   3.35386321e-02,
        -5.90179348e-03,   3.98970395e-03,   1.46088153e-02,
         1.41081400e-02,  -3.39558572e-02,  -1.40747463e-03,
         1.35862939e-02,   4.98695113e-03,   6.89071193e-02,
         2.02545281e-02,   4.85024862e-02,   2.28412598e-02,
         4.76054214e-02,   1.52649088e-02,  -6.07902650e-03,
        -1.41994311e-02,  -6.94071725e-02,   1.44857559e-02,
        -1.02400750e-01,  -3.42222117e-02,  -6.44606585e-03,
         5.84969968e-02,   1.17914136e-02,   4.40568626e-02,
        -2.83245016e-02,  -1.12978511e-01,  -7.55034992e-03,
        -1.41549092e-02,   1.14942668e-02,  -2.15946753e-02,
         3.41463201e-02,   7.57577177e-03,  -5.51316328e-03,
         1.47617059e-02,   3.63869853e-02,  -1.28831351e-02,
        -2.80701462e-02,  -2.51706913e-02,  -4.49438319e-02,
         3.18599096e-03,  -3.59566044e-03,   1.95063446e-02,
         2.92171314e-02,  -1.30606899e-02,  -1.56950857e-02,
        -5.29801659e-02,   4.71703103e-03,  -6.31918060e-03,
        -2.61643101e-02,   6.68026805e-02,  -4.20764834e-02,
        -6.55479804e-02,   7.08661154e-02,   1.05932206e-02,
         3.74730583e-03,   3.49274538e-02,   1.51447728e-01,
        -3.34646031e-02,   1.09206326e-01,  -1.86742663e-01,
        -2.40240246e-01,   3.09927091e-02,   9.99559183e-04,
         5.15257046e-02,  -2.37341747e-02,  -3.81246470e-02,
         1.19106695e-01,  -2.81690117e-02,   5.20548299e-02,
        -8.67059268e-03,  -1.14612589e-02,  -1.07648715e-01,
        -1.02302041e-02,   1.51898727e-01,  -1.01492584e-01,
         2.16802955e-02,   3.32409404e-02,   1.23209193e-01,
         4.90195863e-02,   1.37457058e-01,   2.23107591e-01,
        -9.74359140e-02,  -2.14953303e-01,   3.07692271e-02,
         2.38095205e-02,   3.90243948e-01,   1.99999988e-01,
        -1.66666657e-02,   7.37704411e-02,   9.73451659e-02,
        -9.80397034e-03,  -1.94174722e-02,   3.71747166e-02,
        -7.33590499e-02,  -5.03597818e-02,  -8.56164098e-02,
         4.41640653e-02,   3.30027984e-03,   7.61589780e-02,
         5.01792058e-02,  -5.66037856e-02,   1.07142664e-02,
         1.44405039e-02,   1.46519765e-02,  -1.04089245e-01,
        -8.41750875e-02,   2.48447489e-02,  -9.55415517e-03,
        -3.15452600e-03,  -2.20126249e-02,  -1.01538435e-01,
        -6.14525378e-02,  -1.31578799e-02,  -2.33766604e-02,
         6.34517521e-02,  -7.85907060e-02,  -2.01005004e-02,
         3.78378890e-02,   3.08988318e-02,   7.24637732e-02,
        -6.24999925e-02,  -5.88238193e-03,   2.33917814e-02,
         1.49700725e-02,  -1.82370488e-02,  -1.19403275e-02,
         5.89970406e-03,   1.48368087e-02,   3.01212375e-03,
        -2.71903761e-02,  -3.82352769e-02,  -3.11615430e-02,
        -2.74721067e-03,   1.64382998e-02,  -1.11419754e-02,
         3.03029865e-02,   5.68184629e-03,  -2.85709929e-03,
         2.56409533e-02,  -3.50877456e-02,   2.25989521e-02,
         5.49132191e-02,   0.00000000e+00,  -2.11639926e-02,
        -4.14508544e-02,   1.49254464e-02,   2.77777389e-02,
        -1.81818567e-02,  -1.02040032e-02,   4.29292023e-02,
         3.95778455e-02,  -2.19779909e-02,  -4.03225906e-02,
        -7.75192399e-03,   5.12823043e-03,   0.00000000e+00,
        -2.83505786e-02,   0.00000000e+00,   7.51888426e-03,
        -1.26262745e-02,  -2.49382830e-03,   1.99004952e-02,
         1.26903411e-02,   2.82776952e-02,  -4.23280932e-02,
        -1.52283991e-02,   6.75000325e-02,   0.00000000e+00,
         4.79616271e-03,  -4.81927674e-03,  -4.79618693e-03,
        -3.34128365e-02,  -3.69515456e-02,   1.11358445e-02,
         3.15315723e-02,  -2.79069971e-02,   1.35747045e-02,
         1.60549860e-02,  -6.99299434e-03,   2.31481455e-02,
         7.10899197e-03,  -2.62529440e-02,   6.97670830e-03,
         2.10773405e-02,   1.91387311e-02,  -1.70731805e-02,
         1.43885361e-02,  -9.73240845e-03,  -1.20481793e-02,
        -2.38095224e-02,  -2.79069971e-02,  -6.78731827e-03,
         2.17870390e-03,   4.36683372e-03,   6.57893578e-03,
        -4.41503292e-03,   1.09890206e-02,   1.55555652e-02,
         2.25730194e-03,   0.00000000e+00,  -2.48868410e-02,
        -8.83006584e-03,   4.37638909e-03,   6.59339502e-03,
         2.21244502e-03,   0.00000000e+00,  -1.77384485e-02,
         0.00000000e+00,   2.17865109e-02,  -6.68150233e-03,
         0.00000000e+00,   8.84957891e-03,  -8.92859325e-03,
         2.30088774e-02,  -9.05829016e-04,   2.89593358e-02,
         1.16495918e-02,  -1.25692999e-02,  -4.38116603e-02,
        -1.39908856e-02,   4.41531837e-02,   2.81486567e-02,
         5.23579568e-02,   6.11285865e-02,  -4.25709821e-02,
        -6.96557313e-02,  -1.01048052e-02,   3.96442860e-02,
        -3.78086194e-02,   4.75835986e-02,   9.79703739e-02,
        -5.53872250e-02,  -1.31201297e-02,  -5.30150458e-02,
         5.34204915e-02,  -7.55176991e-02,  -5.09626269e-02,
        -4.88505848e-02,   5.34247085e-02,  -2.74964049e-02,
         1.65492818e-02,  -6.98173866e-02,   2.99760066e-02,
        -1.08157787e-02,   1.10058226e-02,  -1.79289375e-02,
        -1.67020801e-02,   4.77895886e-03,  -2.43097395e-02,
         8.79066996e-04,   1.23166433e-02,   4.63183485e-02,
        -3.48693281e-02,  -2.28639673e-02,  -5.88238053e-03,
        -2.63159699e-03,  -6.29921183e-02,   3.40192579e-02,
        -3.01051326e-02,  -1.15797492e-02,  -1.30825741e-02,
        -2.44822185e-02,   7.35301198e-03,   2.38094479e-02,
        -6.23304723e-03,  -8.07970297e-03,   1.73658021e-02,
         5.86080458e-03,   3.68452806e-04,   1.29008247e-02,
         2.20395997e-02,  -4.66004014e-02,  -4.23357673e-02,
        -1.15546361e-02,   1.03839755e-03,   0.00000000e+00,
        -1.38598087e-03,   4.15222254e-03,   3.47467110e-04,
         1.04276370e-03,  -2.78357067e-03,  -7.28664733e-03,
        -3.27247232e-02,  -2.50166692e-02,   7.80994073e-03,
         2.95178834e-02,  -8.11086409e-03,   4.69324272e-03,
         1.17885014e-02,  -1.94273982e-02,   8.69276747e-03,
        -2.02359585e-03,   2.35518324e-04,   9.89161059e-03,
        -1.87916346e-02,   1.14406003e-02,   2.83418410e-03,
        -8.28987733e-03,   2.11416534e-03,   2.91901696e-02,
        -1.01842564e-02,  -2.88042123e-03,  -5.74441720e-03,
        -1.85625628e-02,  -2.57011503e-03,   1.32836048e-02,
         4.25132364e-03,   4.98098601e-03,  -5.24555845e-03,
        -2.03985013e-02,   1.88284852e-02,  -1.68206487e-02,
        -3.02891503e-03,   4.18119458e-03,   4.43196855e-03,
        -1.10122003e-02,  -2.78099049e-02,  -2.84436764e-03,
        -1.31685026e-02,   9.59807727e-03,   3.29092853e-02,
         2.15031952e-02,   3.05099115e-02,  -3.43310162e-02,
        -1.29787168e-02,  -1.40726352e-02,   6.42082235e-03,
        -5.83695108e-03,   2.07249983e-03,   9.96890571e-03,
        -1.80407073e-02,  -2.03997809e-02,  -1.39337434e-02,
         1.15514630e-02,  -8.26114137e-03,  -1.53876953e-02,
        -1.96811706e-02,  -3.86022468e-04,   7.71766412e-04,
        -1.73778366e-02,   6.45283842e-03,  -1.24164540e-02,
         7.81703275e-03,  -1.89087708e-02,  -1.31451963e-02,
        -1.67907123e-02,   5.25433291e-03,  -7.54594686e-04,
         2.45054089e-03,   5.66890463e-03,   9.31202341e-03,
         1.72645552e-03,   1.88316554e-02,  -8.42147693e-03,
        -1.20411506e-02,   7.10033579e-03,   1.99072957e-02,
        -3.94402072e-03,  -1.17857184e-03,   1.37333910e-03,
         4.12580185e-03,  -2.50542685e-02,   1.53960939e-03,
        -6.36078324e-03,  -3.83070856e-03,  -3.81607097e-04,
         1.52586529e-03,   2.92280177e-03,   6.28111535e-04,
         1.15231285e-02,   1.73802339e-02,  -7.33389985e-03,
        -9.42180492e-03,   3.18194856e-03,  -5.95868006e-03,
        -3.17321881e-03,   6.32642629e-03,   1.27334092e-02,
         1.71973300e-03,  -7.10598100e-03,   4.70389472e-03,
        -6.44466421e-03,  -6.18997356e-03,  -3.18205520e-03,
        -5.92087908e-03,   5.25540859e-03,   8.45318835e-04,
        -6.13367744e-03,  -3.99408350e-03,  -9.00335982e-03,
         6.43280754e-03,  -1.56641454e-02,   1.93190333e-02,
        -2.47708391e-02,   2.66463906e-02,   2.01407671e-02,
         4.19077044e-03,  -3.14629339e-02,   3.47775742e-02,
        -1.40902121e-03,  -6.43214490e-03,  -9.18712933e-03,
        -1.46447904e-02,   1.11175980e-02,  -1.40039362e-02,
        -7.58606056e-03,   4.05404437e-03,  -1.64760407e-02,
        -4.00462467e-03,  -9.68844164e-03,  -7.14953290e-03,
         1.12082832e-03,   5.23661450e-03,  -1.56044625e-02,
         5.55330829e-04,   4.44530137e-03,  -2.60464917e-03,
        -2.79383897e-03,   2.22879928e-03,   2.49441527e-02,
        -1.14547303e-02,   9.43752378e-03,   1.02896634e-02,
         1.07816188e-02,   3.69799021e-03,   3.71163758e-03,
        -1.25490064e-02,   2.32375204e-03,  -1.94097043e-03,
        -4.64938255e-03,   4.62786574e-03,   1.93721033e-03,
        -7.18164910e-03,  -4.62520961e-03,  -5.75483218e-03,
        -4.38678684e-03,  -1.32928521e-03,  -9.10296291e-03,
        -1.52227273e-02,   1.75860897e-02,  -2.44960329e-03,
         1.35338418e-02,   3.12669948e-02,   6.08208738e-02,
         9.13783628e-03,  -4.81154248e-02,  -2.21882220e-02,
         2.99400999e-03,  -3.75377014e-03,   4.74944152e-02,
        -1.59010887e-02,  -7.72965723e-04,   2.31704991e-02,
        -2.09527835e-02,   1.49080437e-02,  -3.73423472e-03,
        -5.87409013e-04,   2.34831544e-03,  -2.00078525e-02,
        -1.13461660e-02,   1.78740900e-02,   2.63310950e-02,
        -8.35153554e-03,   6.50756806e-03,   5.16081462e-03,
         1.97525416e-02,  -2.89028697e-02], dtype=float32)

In [217]:
test_targ[0][0]


Out[217]:
array([[-0.00259504],
       [ 0.00388248],
       [ 0.00296952],
       [-0.00744601],
       [ 0.01699928],
       [ 0.00169177],
       [ 0.00470721],
       [ 0.00132416],
       [-0.04015903],
       [ 0.00200323],
       [-0.00364964],
       [ 0.00290905],
       [-0.0043763 ],
       [ 0.0012709 ],
       [ 0.00145426],
       [-0.00345899],
       [ 0.00380993],
       [-0.00528141],
       [ 0.01286227],
       [-0.00036695],
       [ 0.00660425],
       [ 0.00461681],
       [ 0.01057516],
       [ 0.0151884 ],
       [-0.01542265]], dtype=float32)

In [206]:
test_targ[0][0].reshape(-1)


Out[206]:
array([-0.00259504,  0.00388248,  0.00296952, -0.00744601,  0.01699928,
        0.00169177,  0.00470721,  0.00132416, -0.04015903,  0.00200323,
       -0.00364964,  0.00290905, -0.0043763 ,  0.0012709 ,  0.00145426,
       -0.00345899,  0.00380993, -0.00528141,  0.01286227, -0.00036695,
        0.00660425,  0.00461681,  0.01057516,  0.0151884 , -0.01542265], dtype=float32)

In [137]:
plot(test_inp[0][0])


Out[137]:
[<matplotlib.lines.Line2D at 0x12e4df7b8>]

In [138]:
plot(test_targ[0][0])


Out[138]:
[<matplotlib.lines.Line2D at 0x128dbff98>]

In [167]:
test_output[0][:25,]


Out[167]:
array([[-0.50313473, -9.14566231,  0.35615453],
       [-0.37630498, -7.11112261, -0.70588076],
       [-0.37363619, -7.10911226, -0.7056942 ],
       [-0.37376261, -7.10809135, -0.70627677],
       [-0.37625742, -7.13504505, -0.69292498],
       [-0.37384361, -7.10783291, -0.70645559],
       [-0.37367171, -7.107337  , -0.7067076 ],
       [-0.3738786 , -7.10826683, -0.70620131],
       [-0.3762539 , -7.13502169, -0.69293654],
       [-0.37384126, -7.10775185, -0.70650387],
       [-0.37365335, -7.1073513 , -0.70669627],
       [-0.37384212, -7.10807276, -0.70630753],
       [-0.37625468, -7.13503265, -0.69293034],
       [-0.37382513, -7.10767174, -0.70654678],
       [-0.37368315, -7.1073904 , -0.70667815],
       [-0.37379837, -7.10780811, -0.70645642],
       [-0.37611288, -7.13384342, -0.69351137],
       [-0.3738367 , -7.10773849, -0.70650995],
       [-0.3736918 , -7.10737848, -0.70668697],
       [-0.37380508, -7.10784388, -0.70643675],
       [-0.37624282, -7.13494444, -0.69297278],
       [-0.37383717, -7.10773134, -0.70651484],
       [-0.37368673, -7.10735941, -0.70669711],
       [-0.3738032 , -7.10783863, -0.70643926],
       [-0.37602016, -7.13306904, -0.69388962]], dtype=float32)

In [119]:
plot(rets)


Out[119]:
[<matplotlib.lines.Line2D at 0x11a744c88>]

In [41]:
inv = test_output[0][25*x*1:25*1*(x+1)].reshape(-1,6)

In [235]:
test_output[0][0:25*6].reshape(-1,6).shape


Out[235]:
(150, 6)

In [256]:
np.mul(inv[:,:])


---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-256-e865e56656d6> in <module>()
----> 1 np.mul(inv[:,:])

AttributeError: 'module' object has no attribute 'mul'

In [42]:
np.sum(inv,1)


Out[42]:
array([ -6.98216486,  -6.98215246,  -6.98232746,  -6.98221302,
        -6.98204231,  -6.98219585,  -6.98207426,  -6.98217106,
        -6.98215723,  -6.98219633,  -6.98216343,  -6.98217678,
        -6.98221302,  -6.98215961,  -6.98213148,  -6.9821701 ,
        -6.98215151,  -6.98215818,  -6.98216295,  -6.98211956,
       -14.57940769, -14.57942486, -14.57956505, -14.57947636, -14.57931995], dtype=float32)

In [ ]: