In [11]:
from systemml import MLContext, dml, jvm_stdout
ml = MLContext(sc)
ml.version()
Out[11]:
In [18]:
script="""
source("nn/layers/cross_entropy_loss.dml") as cross_entropy_loss
source("nn/layers/l2_loss.dml") as l2_loss
source("nn/layers/lstm.dml") as lstm
source("nn/layers/sigmoid.dml") as sigmoid
source("nn/optim/sgd_nesterov.dml") as sgd_nesterov
source("nn/optim/rmsprop.dml") as rmsprop
X = seq(0, 999, 1) * 3.14 / 100 #0 to 10 pi radians
X = sin(X)
#print(toString(X))
in_TS = 75 #in_TS
out_TS = 25 #out_TS
N = nrow(X) / (in_TS + out_TS)
M = out_TS
idx_mat = outer(seq(0,N-1,1), t(seq(0,in_TS+out_TS-1,1)), "+") + 1
#print(toString(idx_mat))
idx_col = matrix(idx_mat, rows=nrow(idx_mat)*ncol(idx_mat), cols=1)
rordrd_X = table(seq(1, nrow(idx_col), 1), idx_col, nrow(idx_col), nrow(idx_col)) %*% X
X = matrix(rordrd_X, rows=nrow(idx_mat), cols=ncol(idx_mat))
#print(toString(X))
Y = X[,in_TS+1:in_TS+out_TS]
X = X[,1:in_TS]
max_iterations = 2000
iter = 0
learningRate = 0.01
decayRate = 0.95
[W, b, out0, c0] = lstm::init(N,1,M)
rmspropCache = rmsprop::init(W)
while( iter < max_iterations ){
[a1, c, c_out, c_c, c_ifog] = lstm::forward(X, W, b, in_TS, 1, FALSE, out0, c0)
loss = l2_loss::forward(a1, Y)
if(iter %% 100 == 0) print("iter=" + iter + " loss=" + loss)
loss_grad = l2_loss::backward(a1, Y)
[dX, dW, db, dout0, dc0] = lstm::backward(loss_grad, c0, X, W, b, in_TS, 1, FALSE, out0, c0, c_out, c_c, c_ifog)
[W, rmspropCache] = rmsprop::update(W, dW, learningRate, decayRate, 1e-6, rmspropCache)
iter = iter + 1
}
[a1, c, c_out, c_c, c_ifog] = lstm::forward(X, W, b, in_TS, 1, FALSE, out0, c0)
print(toString(cbind(a1, Y)))
"""
In [19]:
prog = dml(script).output("a1").output("X").output("Y")
with jvm_stdout(True):
result = ml.execute(prog)
In [20]:
X = result.get("X").toNumPy()
a1 = result.get("a1").toNumPy()
In [21]:
import matplotlib.pyplot as plt
import numpy as np
In [22]:
complete = np.vstack((np.transpose(X),np.transpose(a1)))
In [23]:
plt.plot(complete)
plt.ylabel('some numbers')
plt.show()