In [1]:
import tensorflow as tf
import numpy as np
import math
from matplotlib import pyplot as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
%matplotlib inline

#Model Parameters
sigmoid_ammount = 8
m = 20

mass = tf.constant(1.0, dtype=tf.double)
k = tf.constant(1.0, dtype=tf.double)
om2 = k/mass
v_0 = tf.constant(1.0, dtype=tf.double)
x_0 = tf.constant(0.0, dtype=tf.double)

ham_0 = (mass/2)*tf.square(v_0) + (k/2)*tf.square(x_0)

#------------------------------------
sess = tf.Session()

In [2]:
#ham_0

In [3]:
#Time Parameter
tTr = np.linspace(0, 2*math.pi, m).reshape(m,1)
time = tf.placeholder(tf.double)
#--------------------------------------------------------------------------------------
#Weights
W = tf.get_variable("W", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
V = tf.get_variable("V", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
B = tf.get_variable("B", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
#--------------------------------------------------------------------------------------
#Some declarations
alpha = tf.get_variable("Learning_Rate", initializer=tf.constant(0.0001, dtype=tf.double))
num = tf.constant(m, dtype=tf.double)
#--------------------------------------------------------------------------------------

In [ ]:


In [4]:
#Forward
sigmoids_matrix = tf.sigmoid(tf.matmul(time, V) + B)
approximator = tf.matmul(sigmoids_matrix, tf.transpose(W))
#--------------------------------------------------------------------------------------
#First and second time-deriatives from forward
vel = tf.gradients(approximator, time)[0]
acc = tf.gradients(vel, time)[0]
#--------------------------------------------------------------------------------------
#Cost
eq = acc + om2*approximator
ham_diff = (mass/2)*tf.square(vel) + (k/2)*tf.square(approximator) - ham_0
#--------------------------------------------------------------------------------------
J = (tf.reduce_sum(tf.square(eq))*(1/num) +
     tf.square(approximator[0][0] - x_0) + tf.square(vel[0][0] - v_0) +
     tf.reduce_sum(tf.square(ham_diff))*(1/num))
#--------------------------------------------------------------------------------------
#Gradients
grads_weights = tf.gradients(J, [W, V, B])
#--------------------------------------------------------------------------------------
#Updates
update_W = W.assign_sub(alpha*grads_weights[0]) 
update_V = V.assign_sub(alpha*grads_weights[1]) 
update_B = B.assign_sub(alpha*grads_weights[2])

In [6]:
def init_weights():
    init = tf.global_variables_initializer()
    sess.run(init)
#---------------------------------------------------------------------------
def get_weights():
    weights = sess.run([W, V, B])
    return np.asarray(weights)
#---------------------------------------------------------------------------
def show_grads():            
    grads = sess.run(grads_weights, {time: tTr})            
    return np.asarray(grads)
#---------------------------------------------------------------------------
def show_sigmoids(observation_time):
    matrix = sess.run(sigmoids_matrix, {time: observation_time})
    plt.title('Sigmoids system')
    plt.grid(True)
    for i in range(sigmoid_ammount):
        si = matrix[:,i] 
        plt.plot(observation_time, si)
#---------------------------------------------------------------------------
def show_approx(observation_time):
    approx = sess.run(approximator, {time: observation_time})
    plt.grid(True)
    plt.title('Approximation')
    plt.plot(observation_time, approx)

In [7]:
init_weights()
#---------------------------------------------------------------------------
#Initial weights of network and sigmoids system
initial_weights = get_weights()
print(get_weights())
some_time = np.linspace(-15, 7*math.pi, 300).reshape(300,1)
show_sigmoids(some_time)


[[[-0.8841825   0.20141982 -0.08111091 -0.98980409  0.42763244  0.84405378
   -0.85274802 -0.09010485]]

 [[ 0.66547032 -0.15562776  0.86838293 -0.00417212  0.06019709  0.97729413
    0.25083711 -0.01173016]]

 [[-0.91783553  0.80270678  0.25198959  0.22145386  0.60741804  0.93993446
   -0.37226066  0.79857982]]]

In [8]:
show_approx(some_time)



In [9]:
Err = []
I = []
#---------------------------------------------------------------------------
def grad_descent(N, learn_rate):
    Err = []
    I = []
    sess.run(tf.assign(alpha, learn_rate))
    for i in tqdm_notebook(range(N)):
        I.append(i)
        _, _, _, j = sess.run([update_W, update_V, update_B, J], {time: tTr})
        Err.append(1/j)
        if i%1800==0:
            clear_output()
            print(j)
        if (j<1e-6):
            break
#---------------------------------------------------------------------------

In [10]:
ham = sess.run((mass/2)*tf.square(vel) + (k/2)*tf.square(approximator), {time:tTr})
diff = sess.run(ham_diff, {time:tTr})
ham0=sess.run(ham_0)
print(ham)
print(diff)
print(ham-ham0)


[[ 0.02777198]
 [ 0.03233655]
 [ 0.0411592 ]
 [ 0.05413265]
 [ 0.07096459]
 [ 0.0912698 ]
 [ 0.11459116]
 [ 0.14039223]
 [ 0.16806028]
 [ 0.19693427]
 [ 0.2263519 ]
 [ 0.25570085]
 [ 0.28446055]
 [ 0.31222624]
 [ 0.33871428]
 [ 0.36375245]
 [ 0.38726117]
 [ 0.40923134]
 [ 0.42970291]
 [ 0.44874665]]
[[-0.47222802]
 [-0.46766345]
 [-0.4588408 ]
 [-0.44586735]
 [-0.42903541]
 [-0.4087302 ]
 [-0.38540884]
 [-0.35960777]
 [-0.33193972]
 [-0.30306573]
 [-0.2736481 ]
 [-0.24429915]
 [-0.21553945]
 [-0.18777376]
 [-0.16128572]
 [-0.13624755]
 [-0.11273883]
 [-0.09076866]
 [-0.07029709]
 [-0.05125335]]
[[-0.47222802]
 [-0.46766345]
 [-0.4588408 ]
 [-0.44586735]
 [-0.42903541]
 [-0.4087302 ]
 [-0.38540884]
 [-0.35960777]
 [-0.33193972]
 [-0.30306573]
 [-0.2736481 ]
 [-0.24429915]
 [-0.21553945]
 [-0.18777376]
 [-0.16128572]
 [-0.13624755]
 [-0.11273883]
 [-0.09076866]
 [-0.07029709]
 [-0.05125335]]

In [26]:
grad_descent(300000, 10**-3)


0.000536360004625


In [27]:
np.set_printoptions(precision=4)
plt.plot(I, Err)


Out[27]:
[<matplotlib.lines.Line2D at 0x7f4c9917f278>]

In [28]:
print(show_grads())


[[[  1.0012e-04  -7.4213e-05  -1.5743e-04   1.4395e-04  -2.4808e-05
    -1.3527e-04   1.7875e-04  -8.3167e-05]]

 [[  1.4410e-04  -3.0259e-05  -1.5213e-04  -9.1932e-05   1.1899e-04
    -1.2427e-04  -9.4842e-05  -2.9369e-05]]

 [[ -3.6360e-04   1.1473e-05   8.1499e-05  -2.8482e-04  -4.6693e-05
     6.4746e-04   4.6126e-04   2.6690e-06]]]

In [29]:
print(get_weights())


[[[-4.4309 -0.558   1.5668 -2.0099  2.0783  5.4194 -2.0459  0.414 ]]

 [[ 1.0765 -0.1943  1.4514 -1.3626  0.1083  0.9124  1.0418  0.6819]]

 [[-3.9555  1.0328  0.3307  1.0208 -0.2663 -5.3667 -2.4508  0.9175]]]

In [30]:
show_sigmoids(some_time)



In [31]:
observe_time = np.linspace(0, 2*math.pi, 400).reshape(400,1)
plt.title('Approximation and error')
plt.grid(True)
approx = sess.run(approximator, {time: observe_time}).reshape((400,1))
true = np.sin(observe_time)
plt.plot(observe_time, approx)
plt.plot(observe_time, true - approx, 'red')


Out[31]:
[<matplotlib.lines.Line2D at 0x7f4c99165ef0>]

In [32]:
plt.plot(observe_time, approx)


Out[32]:
[<matplotlib.lines.Line2D at 0x7f4c9a614cf8>]