In [16]:
# Data: time-serie data from smartwatch or smartwatch data
# %matplotlib inline # for plt.show()
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

# Data reading
# The smartwatch historical/time-seris data to visualize
# data_path = 'data/smartwatch_data/experimental_data_analysis/Basis_Watch_Data.csv'
data_path = 'data/financial_data/USD_INR.csv'
data = pd.read_csv(data_path)

# # Data: cleaning
# # Getting rid of NaN
# data = data.fillna(value=0.0)

# # # # Plotting the smartwatch data before scaling/batch normalization
# # data[:10000]['Price'].plot() #x='dteday', y='cnt'
# data[:3].plot()
# plt.legend()
# plt.show()

# # # data[:100].plot()
# np.array(data).shape, np.array(data).dtype, np.array(data, dtype=None).dtype, 
# np.array(data[:, 1:], dtype=None).dtype

data_price = np.array(data['Price'], dtype=float)
# data_price = np.array(data)
data_price.shape, data_price.dtype

plt.plot(data_price, label='data price')
plt.legend()
plt.show()



In [17]:
# test_data = np.array([[1, 2, 3]], dtype=float)
# test_data.shape, test_data.dtype
# np.mean(test_data, axis=0), test_data

# data_ = np.array(data[:, 1], dtype=float)
# data_.shape, data_.dtype
data_mean = np.mean(data_price, axis=0)
data_std = np.std(data_price, axis=0)
# std, mean, np.sqrt(np.var(data_price))
# data_norm = (data_price - mean) / std
# data_norm.dtype, data_norm.shape

# plt.plot(data_norm, label='data price norm')
# # plt.plot(data_price[:10000], label='data price')
# plt.legend()
# plt.show()

data_price_norm = (data_price - data_mean) / data_std
plt.plot(data_price_norm)
plt.show()
data_price_norm.mean(), data_price_norm.std(), data_price_norm.var(), data_price_norm.shape, data_price_norm.dtype


Out[17]:
(4.6895674011185329e-17, 1.0, 1.0, (9697,), dtype('float64'))

In [18]:
train_data = data_price_norm[:7000]
test_data = data_price_norm[7000:]
train_data.shape, test_data.shape
X_train = train_data[0:6999]
Y_train = train_data[1:7000]
X_train.shape, Y_train.shape

plt.plot(X_train, label='X_train')
plt.plot(Y_train, label='Y_train')
plt.legend()
plt.show()



In [19]:
X_valid = test_data[0:2696] 
Y_valid = test_data[1:2697]
X_valid.shape, Y_valid.shape
plt.plot(X_valid, label='X_valid')
plt.plot(Y_valid, label='Y_valid')
plt.legend()
plt.show()



In [22]:
# Model or Network
import impl.layer as l
from impl.loss import *

class GRU:
    def __init__(self, D, H):
        self.D = D
        self.H = H
        self.losses = {'train':[], 'smooth train':[], 'valid': []}
        
        # Model params
        Z = H + D
        m = dict(
            Wz=np.random.randn(Z, H) / np.sqrt(Z / 2.),
            Wr=np.random.randn(Z, H) / np.sqrt(Z / 2.),
            Wh=np.random.randn(Z, H) / np.sqrt(Z / 2.),
            Wy=np.random.randn(H, D) / np.sqrt(H / 2.),
            bz=np.zeros((1, H)),
            br=np.zeros((1, H)),
            bh=np.zeros((1, H)),
            by=np.zeros((1, D))
        )
        self.model = m
        
    def initial_state(self):
        return np.zeros((1, self.H))

    def forward(self, X, h, m):
        Wz, Wr, Wh, Wy = m['Wz'], m['Wr'], m['Wh'], m['Wy']
        bz, br, bh, by = m['bz'], m['br'], m['bh'], m['by']

        X_in = X.copy()
        h_in = h.copy()

        X = np.column_stack((h_in, X_in))

        hz, hz_cache = l.fc_forward(X, Wz, bz)
        hz, hz_sigm_cache = l.sigmoid_forward(hz)

        hr, hr_cache = l.fc_forward(X, Wr, br)
        hr, hr_sigm_cache = l.sigmoid_forward(hr)

        X = np.column_stack((hr * h_in, X_in))
        
        hh, hh_cache = l.fc_forward(X, Wh, bh)
        hh, hh_tanh_cache = l.tanh_forward(hh)

        # h = (1. - hz) * h_old + hz * hh
        # or
        h = ((1. - hz) * h_in) + (hz * hh)
        # or
        # h = h_in + hz (hh - h_in)

        y, y_cache = l.fc_forward(h, Wy, by)
        
        cache = (h_in, hz, hz_cache, hz_sigm_cache, hr, hr_cache, hr_sigm_cache, hh, hh_cache, hh_tanh_cache, 
                 y_cache)

        return y, h, cache

    def backward(self, dy, dh, cache):
        h_in, hz, hz_cache, hz_sigm_cache, hr, hr_cache, hr_sigm_cache, hh, hh_cache, hh_tanh_cache, y_cache = cache
        
        dh_out = dh.copy()

        dh, dWy, dby = l.fc_backward(dy, y_cache)
        dh += dh_out

        dh_in1 = (1. - hz) * dh
        dhh = hz * dh
        dhz = (hh * dh) - (h_in * dh)
        # or
        # dhz = (hh - h_in) * dh

        dhh = l.tanh_backward(dhh, hh_tanh_cache)
        dXh, dWh, dbh = l.fc_backward(dhh, hh_cache)

        dh = dXh[:, :self.H]
        dX_in2 = dXh[:, self.H:]
        dh_in2 = hr * dh

        dhr = h_in * dh
        dhr = l.sigmoid_backward(dhr, hr_sigm_cache)
        dXr, dWr, dbr = l.fc_backward(dhr, hr_cache)

        dhz = l.sigmoid_backward(dhz, hz_sigm_cache)
        dXz, dWz, dbz = l.fc_backward(dhz, hz_cache)

        dX = dXr + dXz
        dh_in3 = dX[:, :self.H]
        dX_in1 = dX[:, self.H:]

        dh = dh_in1 + dh_in2 + dh_in3
        dX = dX_in1 + dX_in2

        grad = dict(Wz=dWz, Wr=dWr, Wh=dWh, Wy=dWy, bz=dbz, br=dbr, bh=dbh, by=dby)
        
        return dX, dh, grad

    def train_forward(self, X_train, h):
        ys, caches = [], []

        for X in X_train:
            X = X.reshape(1, -1) # X_1xn
            y, h, cache = self.forward(X, h, self.model)
            caches.append(cache)
            ys.append(y)
        
        ys = np.array(ys, dtype=float).reshape(len(ys), -1) # ys_txn instead of ys_tx1xn
        
        return ys, caches
                                
    def loss_function(self, y_pred, y_train): # , alpha alpha: learning rate
        loss, dys = 0.0, []

        for y, Y in zip(y_pred, y_train):
            #             loss += l2_regression(y_pred=y, y_train=Y)
            #             dy = dl2_regression(y_pred=y, y_train=Y)
            loss += l2_regression_reg(model=self.model, y_pred=y, y_train=Y) #, lam=alpha
            dy = dl2_regression_reg(y_pred=y, y_train=Y)
            #             loss += l1_regression_reg(model=self.model, y_pred=y, y_train=Y) #, lam=alpha
            #             dy = dl1_regression_reg(y_pred=y, y_train=Y)
            dys.append(dy)
            
        return loss, dys
    
    def train_backward(self, dys, caches):
        dh=(np.zeros((1, self.H)))
        grad=({key: np.zeros_like(val) for key, val in self.model.items()})
        grads=({key: np.zeros_like(val) for key, val in self.model.items()})

        for t in reversed(range(len(dys))):
            dy = dys[t].reshape(1, -1) # dy_1xn
            dX, dh, grad = self.backward(dy, dh, caches[t])
            for key in grad.keys():
                grads[key] += grad[key]
                
        return grads
    
    def test(self, X_seed, h, size):
        ys = []
        X = X_seed.reshape(1, -1)
        for _ in range(size):
            y, h, _ = self.forward(X, h, self.model)
            X = y.copy() # previous out for the next input for prediction
            ys.append(y) # list array
        
        ys = np.array(ys, dtype=float).reshape(len(ys), -1) # ys_txn instead of ys_tx1xn
        return ys

In [23]:
def get_minibatch(X, y, minibatch_size, shuffle):
    minibatches = []

    for i in range(0, X.shape[0], minibatch_size):
    # for i in range(0, X.shape[0] - minibatch_size + 1, 1):
        X_mini = X[i:i + minibatch_size]
        y_mini = y[i:i + minibatch_size]
        minibatches.append((X_mini, y_mini))

    return minibatches

def adam_rnn(nn, XY_train, alpha, mb_size, n_iter, print_after, XY_valid):
    X_train, y_train = XY_train
    X_valid, y_valid = XY_valid

    M=({key: np.zeros_like(val) for key, val in nn.model.items()})
    R=({key: np.zeros_like(val) for key, val in nn.model.items()})
        
    beta1 = .99
    beta2 = .999
    state = nn.initial_state()
    smooth_loss = 1.
    minibatches = get_minibatch(X_train, y_train, mb_size, shuffle=False)
    
    for iter in range(1, n_iter + 1):
        for idx in range(len(minibatches)):
            # Train model
            X_mini, y_mini = minibatches[idx]
            ys, caches = nn.train_forward(X_mini, state)
            loss, dys = nn.loss_function(y_pred=ys, y_train=y_mini) #, alpha=alpha
            grads = nn.train_backward(dys, caches)
            nn.losses['train'].append(loss)
            smooth_loss = (0.999 * smooth_loss) + (0.001 * loss)
            nn.losses['smooth train'].append(smooth_loss)
            
            # Update model
            for k in grads.keys(): #key, value: items
                M[k] = l.exp_running_avg(M[k], grads[k], beta1)
                R[k] = l.exp_running_avg(R[k], grads[k]**2, beta2)
                m_k_hat = M[k] / (1. - (beta1**(iter)))
                r_k_hat = R[k] / (1. - (beta2**(iter)))
                nn.model[k] -= alpha * m_k_hat / (np.sqrt(r_k_hat) + l.eps)

            # Validate/test model
            # Validation loss to avoid overfitting & underfitting
            # Prediction or predicted sequence
            ys = nn.test(X_seed=X_valid[0], h=state, size=X_valid.shape[0]) # ys_tx1xn
            valid_loss, _ = nn.loss_function(y_pred=ys, y_train=Y_valid) #, alpha=alpha
            nn.losses['valid'].append(valid_loss)

        # Print model loss/ error
        if iter % print_after == 0:
            print('Iter-{}, train loss: {:.8f}, valid loss: {:.8f}'.format(iter, loss, valid_loss))

    return nn

In [24]:
# Hyper-parameters
time_step = 10 # minibatch size
n_iter = 100 # epochs
alpha = 1e-4 # learning_rate
print_after = 1 # print training loss, valid, and test
num_hidden_units = 8 # num_hidden_units in hidden layer
num_input_units = 1

# Build the network and learning it or optimizing it using SGD
net = GRU(D=num_input_units, H=num_hidden_units) #, L=num_layers, p_dropout=p_dropout

# Start learning using BP-SGD-ADAM
adam_rnn(nn=net, XY_train=(X_train, Y_train), XY_valid=(X_valid, Y_valid), alpha=alpha, mb_size=time_step,
         n_iter=n_iter, print_after=print_after)


Iter-1, train loss: 2.40422162, valid loss: 3154.43670359
Iter-2, train loss: 1.11134761, valid loss: 954.71782128
Iter-3, train loss: 0.88975513, valid loss: 706.24501080
Iter-4, train loss: 0.81714623, valid loss: 693.14271208
Iter-5, train loss: 0.75237006, valid loss: 681.33854506
Iter-6, train loss: 0.68470451, valid loss: 649.00060476
Iter-7, train loss: 0.61912489, valid loss: 601.69069057
Iter-8, train loss: 0.55940805, valid loss: 547.02898607
Iter-9, train loss: 0.50771702, valid loss: 493.00894502
Iter-10, train loss: 0.46446193, valid loss: 445.41497702
Iter-11, train loss: 0.42899263, valid loss: 407.61264098
Iter-12, train loss: 0.40014405, valid loss: 380.86040136
Iter-13, train loss: 0.37663019, valid loss: 364.94468063
Iter-14, train loss: 0.35730432, valid loss: 358.99481181
Iter-15, train loss: 0.34127891, valid loss: 362.20933590
Iter-16, train loss: 0.32792217, valid loss: 374.14300540
Iter-17, train loss: 0.31678314, valid loss: 394.41315601
Iter-18, train loss: 0.30749969, valid loss: 422.00395096
Iter-19, train loss: 0.29974815, valid loss: 454.67913282
Iter-20, train loss: 0.29324131, valid loss: 488.97923162
Iter-21, train loss: 0.28774791, valid loss: 521.13101788
Iter-22, train loss: 0.28309632, valid loss: 548.38512040
Iter-23, train loss: 0.27915479, valid loss: 569.72015964
Iter-24, train loss: 0.27581068, valid loss: 585.50226900
Iter-25, train loss: 0.27296296, valid loss: 596.71417166
Iter-26, train loss: 0.27052273, valid loss: 604.38263804
Iter-27, train loss: 0.26841509, valid loss: 609.36164603
Iter-28, train loss: 0.26657916, valid loss: 612.31468943
Iter-29, train loss: 0.26496639, valid loss: 613.75548077
Iter-30, train loss: 0.26353845, valid loss: 614.08509806
Iter-31, train loss: 0.26226508, valid loss: 613.61736851
Iter-32, train loss: 0.26112227, valid loss: 612.59555518
Iter-33, train loss: 0.26009085, valid loss: 611.20533727
Iter-34, train loss: 0.25915538, valid loss: 609.58601722
Iter-35, train loss: 0.25830330, valid loss: 607.84049134
Iter-36, train loss: 0.25752429, valid loss: 606.04361393
Iter-37, train loss: 0.25680979, valid loss: 604.24887646
Iter-38, train loss: 0.25615263, valid loss: 602.49358324
Iter-39, train loss: 0.25554678, valid loss: 600.80286964
Iter-40, train loss: 0.25498705, valid loss: 599.19282623
Iter-41, train loss: 0.25446902, valid loss: 597.67290824
Iter-42, train loss: 0.25398884, valid loss: 596.24775988
Iter-43, train loss: 0.25354312, valid loss: 594.91857700
Iter-44, train loss: 0.25312893, valid loss: 593.68412062
Iter-45, train loss: 0.25274362, valid loss: 592.54147218
Iter-46, train loss: 0.25238488, valid loss: 591.48659524
Iter-47, train loss: 0.25205061, valid loss: 590.51474958
Iter-48, train loss: 0.25173893, valid loss: 589.62079283
Iter-49, train loss: 0.25144816, valid loss: 588.79939765
Iter-50, train loss: 0.25117674, valid loss: 588.04520651
Iter-51, train loss: 0.25092329, valid loss: 587.35294039
Iter-52, train loss: 0.25068654, valid loss: 586.71747337
Iter-53, train loss: 0.25046532, valid loss: 586.13388183
Iter-54, train loss: 0.25025856, valid loss: 585.59747493
Iter-55, train loss: 0.25006529, valid loss: 585.10381150
Iter-56, train loss: 0.24988462, valid loss: 584.64870724
Iter-57, train loss: 0.24971571, valid loss: 584.22823493
Iter-58, train loss: 0.24955781, valid loss: 583.83871993
Iter-59, train loss: 0.24941020, valid loss: 583.47673236
Iter-60, train loss: 0.24927225, valid loss: 583.13907712
Iter-61, train loss: 0.24914334, valid loss: 582.82278266
Iter-62, train loss: 0.24902292, valid loss: 582.52508911
Iter-63, train loss: 0.24891048, valid loss: 582.24343611
Iter-64, train loss: 0.24880554, valid loss: 581.97545077
Iter-65, train loss: 0.24870764, valid loss: 581.71893589
Iter-66, train loss: 0.24861639, valid loss: 581.47185865
Iter-67, train loss: 0.24853139, valid loss: 581.23233971
Iter-68, train loss: 0.24845228, valid loss: 580.99864302
Iter-69, train loss: 0.24837874, valid loss: 580.76916611
Iter-70, train loss: 0.24831046, valid loss: 580.54243101
Iter-71, train loss: 0.24824714, valid loss: 580.31707576
Iter-72, train loss: 0.24818851, valid loss: 580.09184649
Iter-73, train loss: 0.24813433, valid loss: 579.86559001
Iter-74, train loss: 0.24808436, valid loss: 579.63724691
Iter-75, train loss: 0.24803838, valid loss: 579.40584519
Iter-76, train loss: 0.24799619, valid loss: 579.17049423
Iter-77, train loss: 0.24795759, valid loss: 578.93037934
Iter-78, train loss: 0.24792240, valid loss: 578.68475651
Iter-79, train loss: 0.24789046, valid loss: 578.43294764
Iter-80, train loss: 0.24786160, valid loss: 578.17433611
Iter-81, train loss: 0.24783569, valid loss: 577.90836255
Iter-82, train loss: 0.24781257, valid loss: 577.63452100
Iter-83, train loss: 0.24779213, valid loss: 577.35235531
Iter-84, train loss: 0.24777424, valid loss: 577.06145571
Iter-85, train loss: 0.24775878, valid loss: 576.76145569
Iter-86, train loss: 0.24774565, valid loss: 576.45202908
Iter-87, train loss: 0.24773474, valid loss: 576.13288723
Iter-88, train loss: 0.24772596, valid loss: 575.80377652
Iter-89, train loss: 0.24771923, valid loss: 575.46447587
Iter-90, train loss: 0.24771445, valid loss: 575.11479454
Iter-91, train loss: 0.24771155, valid loss: 574.75456999
Iter-92, train loss: 0.24771046, valid loss: 574.38366593
Iter-93, train loss: 0.24771110, valid loss: 574.00197041
Iter-94, train loss: 0.24771340, valid loss: 573.60939414
Iter-95, train loss: 0.24771731, valid loss: 573.20586882
Iter-96, train loss: 0.24772276, valid loss: 572.79134563
Iter-97, train loss: 0.24772970, valid loss: 572.36579380
Iter-98, train loss: 0.24773808, valid loss: 571.92919923
Iter-99, train loss: 0.24774785, valid loss: 571.48156326
Iter-100, train loss: 0.24775896, valid loss: 571.02290150
Out[24]:
<__main__.GRU at 0x10c1a5358>

In [25]:
import matplotlib.pyplot as plt

plt.plot(net.losses['train'], label='Train loss')
plt.plot(net.losses['smooth train'], label='Train smooth loss')
plt.plot(net.losses['valid'], label='Validation loss')
plt.legend()
plt.show()



In [26]:
import matplotlib.pyplot as plt

y_pred = net.test(X_seed=X_valid[0], h=net.initial_state(), size=X_valid.shape[0]) # ys_tx1xn
y_pred.shape, Y_valid.shape

plt.plot(y_pred, label='y_pred')
plt.plot(Y_valid, label='Y_valid')
plt.plot(X_valid, label='X_valid')
plt.legend()
plt.show()



In [ ]:


In [ ]: