In [1]:
import tensorflow as tf
import numpy as np
import math
from matplotlib import pyplot as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
%matplotlib inline
#Model Parameters
sigmoid_ammount = 8
m = 20
mass = tf.constant(1.0, dtype=tf.double)
k = tf.constant(1.0, dtype=tf.double)
om2 = k/mass
v_0 = tf.constant(1.0, dtype=tf.double)
x_0 = tf.constant(0.0, dtype=tf.double)
ham_0 = (mass/2)*tf.square(v_0) + (k/2)*tf.square(x_0)
#------------------------------------
sess = tf.Session()
In [2]:
#ham_0
In [3]:
#Time Parameter
tTr = np.linspace(0, 2*math.pi, m).reshape(m,1)
time = tf.placeholder(tf.double)
#--------------------------------------------------------------------------------------
#Weights
W = tf.get_variable("W", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
V = tf.get_variable("V", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
B = tf.get_variable("B", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
#--------------------------------------------------------------------------------------
#Some declarations
alpha = tf.get_variable("Learning_Rate", initializer=tf.constant(0.0001, dtype=tf.double))
num = tf.constant(m, dtype=tf.double)
#--------------------------------------------------------------------------------------
In [ ]:
In [4]:
#Forward
sigmoids_matrix = tf.sigmoid(tf.matmul(time, V) + B)
approximator = tf.matmul(sigmoids_matrix, tf.transpose(W))
#--------------------------------------------------------------------------------------
#First and second time-deriatives from forward
vel = tf.gradients(approximator, time)[0]
acc = tf.gradients(vel, time)[0]
#--------------------------------------------------------------------------------------
#Cost
eq = acc + om2*approximator
ham_diff = (mass/2)*tf.square(vel) + (k/2)*tf.square(approximator) - ham_0
#--------------------------------------------------------------------------------------
J = (tf.reduce_sum(tf.square(eq))*(1/num) +
tf.square(approximator[0][0] - x_0) + tf.square(vel[0][0] - v_0) +
tf.reduce_sum(tf.square(ham_diff))*(1/num))
#--------------------------------------------------------------------------------------
#Gradients
grads_weights = tf.gradients(J, [W, V, B])
#--------------------------------------------------------------------------------------
#Updates
update_W = W.assign_sub(alpha*grads_weights[0])
update_V = V.assign_sub(alpha*grads_weights[1])
update_B = B.assign_sub(alpha*grads_weights[2])
In [6]:
def init_weights():
init = tf.global_variables_initializer()
sess.run(init)
#---------------------------------------------------------------------------
def get_weights():
weights = sess.run([W, V, B])
return np.asarray(weights)
#---------------------------------------------------------------------------
def show_grads():
grads = sess.run(grads_weights, {time: tTr})
return np.asarray(grads)
#---------------------------------------------------------------------------
def show_sigmoids(observation_time):
matrix = sess.run(sigmoids_matrix, {time: observation_time})
plt.title('Sigmoids system')
plt.grid(True)
for i in range(sigmoid_ammount):
si = matrix[:,i]
plt.plot(observation_time, si)
#---------------------------------------------------------------------------
def show_approx(observation_time):
approx = sess.run(approximator, {time: observation_time})
plt.grid(True)
plt.title('Approximation')
plt.plot(observation_time, approx)
In [7]:
init_weights()
#---------------------------------------------------------------------------
#Initial weights of network and sigmoids system
initial_weights = get_weights()
print(get_weights())
some_time = np.linspace(-15, 7*math.pi, 300).reshape(300,1)
show_sigmoids(some_time)
In [8]:
show_approx(some_time)
In [9]:
Err = []
I = []
#---------------------------------------------------------------------------
def grad_descent(N, learn_rate):
Err = []
I = []
sess.run(tf.assign(alpha, learn_rate))
for i in tqdm_notebook(range(N)):
I.append(i)
_, _, _, j = sess.run([update_W, update_V, update_B, J], {time: tTr})
Err.append(1/j)
if i%1800==0:
clear_output()
print(j)
if (j<1e-6):
break
#---------------------------------------------------------------------------
In [10]:
ham = sess.run((mass/2)*tf.square(vel) + (k/2)*tf.square(approximator), {time:tTr})
diff = sess.run(ham_diff, {time:tTr})
ham0=sess.run(ham_0)
print(ham)
print(diff)
print(ham-ham0)
In [26]:
grad_descent(300000, 10**-3)
In [27]:
np.set_printoptions(precision=4)
plt.plot(I, Err)
Out[27]:
In [28]:
print(show_grads())
In [29]:
print(get_weights())
In [30]:
show_sigmoids(some_time)
In [31]:
observe_time = np.linspace(0, 2*math.pi, 400).reshape(400,1)
plt.title('Approximation and error')
plt.grid(True)
approx = sess.run(approximator, {time: observe_time}).reshape((400,1))
true = np.sin(observe_time)
plt.plot(observe_time, approx)
plt.plot(observe_time, true - approx, 'red')
Out[31]:
In [32]:
plt.plot(observe_time, approx)
Out[32]: