In [1]:
import tensorflow as tf
import numpy as np
import matplotlib
import os
import matplotlib.pyplot as plt
tf.set_random_seed(777)
#Min Max Normalization
def MinMaxScaler(data):
numerator = data - np.min(data, 0)
denominator = np.max(data, 0) - np.min(data, 0)
return numerator / (denominator + 1e-7)
In [3]:
pwd
Out[3]:
In [5]:
ls -l ./Tests
In [6]:
# train Parameters
seq_length = 7
data_dim = 6
hidden_dim = 10
output_dim = 1
learning_rate = 0.01
# bid ask low high volume
xy = np.genfromtxt('./Tests/bitcoin_ticker_eth_krw_hour.csv', delimiter=',', dtype=np.str)[:,[4,7,8,9,10,11]]
print(xy[0])
xy = xy[1:].astype(np.float)
xy = MinMaxScaler(xy[1:]) #normalize
x = xy[:]
y = xy[:,[0]]
print(x[:2])
print(y[:2])
In [7]:
# build a dataset
dataX = []
dataY = []
for i in range(0, len(y) - seq_length):
_x = x[i:i + seq_length]
_y = y[i + seq_length] # Next last price
if i < 2 : print(_x, "->", _y)
dataX.append(_x)
dataY.append(_y)
In [8]:
# train/test split
train_size = int(len(dataY) * 0.7)
test_size = len(dataY) - train_size
trainX, testX = np.array(dataX[0:train_size]), np.array(dataX[train_size:len(dataX)])
trainY, testY = np.array(dataY[0:train_size]), np.array(dataY[train_size:len(dataY)])
print(trainX[:2])
print(trainY[:2])
In [9]:
# input place holders
X = tf.placeholder(tf.float32, [None, seq_length, data_dim])
Y = tf.placeholder(tf.float32, [None, 1])
# build a LSTM network
cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
# outputs[:, -1] : all batch of data_dim of last seq_length
Y_pred = tf.contrib.layers.fully_connected(outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
# cost/loss
loss = tf.reduce_sum(tf.square(Y_pred - Y)) # sum of the squares
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
train = optimizer.minimize(loss)
In [10]:
# train Parameters
iterations = 200
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Training step
for i in range(iterations):
_, step_loss = sess.run([train, loss], feed_dict={X: trainX, Y: trainY})
print("[step: {}] loss: {}".format(i, step_loss))
# print(sess.run(outputs[:, -1], feed_dict={X: trainX, Y: trainY}))
# Test step
test_predict = sess.run(Y_pred, feed_dict={X: testX})
# Plot predictions
plt.plot(testY)
plt.plot(test_predict)
plt.xlabel("Time Period")
plt.ylabel("Stock Price")
plt.show()
In [ ]: