In [2]:
import tensorflow as tf
import numpy as np
import math
from matplotlib import pyplot as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
%matplotlib inline

#Model Parameters
sigmoid_ammount = 15
m = 150

mass = 1
k = 1
om2 = k/mass
v_0 = 1
x_0 = 0

ham_0 = (mass/2.0)*v_0*v_0 + (k/2.0)*x_0*x_0

#------------------------------------
sess = tf.Session()


D:\Anaconda\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [3]:
#Time Parameter
tTr = np.linspace(0, 2*math.pi, m).reshape(m,1)
time = tf.placeholder(tf.double)
#--------------------------------------------------------------------------------------
#Weights
W = tf.get_variable("W", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
V = tf.get_variable("V", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
B = tf.get_variable("B", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
C = tf.get_variable("C", initializer = tf.random_uniform(shape=[1, 1], dtype=tf.double, minval = -1, maxval = 1))
#--------------------------------------------------------------------------------------
#Some declarations
alpha = tf.get_variable("Learning_Rate", initializer=tf.constant(1e-3, dtype=tf.double))
num = tf.constant(m, dtype=tf.double)
#--------------------------------------------------------------------------------------

In [4]:
#Forward
sigmoids_matrix = tf.sigmoid(tf.matmul(time, V) + B)
approximator = tf.matmul(sigmoids_matrix, tf.transpose(W)) + C
#--------------------------------------------------------------------------------------
#First and second time-deriatives from forward
vel = tf.gradients(approximator, time)[0]
acc = tf.gradients(vel, time)[0]
#--------------------------------------------------------------------------------------
#Cost
eq = acc + om2*approximator
ham_diff = (mass/2)*tf.square(vel) + (k/2)*tf.square(approximator) - ham_0
#--------------------------------------------------------------------------------------
J = (tf.reduce_sum(tf.square(eq))*(1/num) +
     tf.square(approximator[0][0] - x_0) + tf.square(vel[0][0] - v_0) +
     tf.reduce_sum(tf.square(ham_diff))*(1/num))
#--------------------------------------------------------------------------------------
#Gradients
grads_weights = tf.gradients(J, [W, V, B, C])
#--------------------------------------------------------------------------------------
#Updates
update_W = W.assign_sub(alpha*grads_weights[0]) 
update_V = V.assign_sub(alpha*grads_weights[1]) 
update_B = B.assign_sub(alpha*grads_weights[2])
update_C = C.assign_sub(alpha*grads_weights[3])

In [5]:
def init_weights():
    init = tf.global_variables_initializer()
    sess.run(init)
#---------------------------------------------------------------------------
def get_weights():
    weights = sess.run([W, V, B])
    return np.asarray(weights)
#---------------------------------------------------------------------------
def show_grads():            
    grads = sess.run(grads_weights, {time: tTr})            
    return np.asarray(grads)
#---------------------------------------------------------------------------
def show_sigmoids(observation_time):
    matrix = sess.run(sigmoids_matrix, {time: observation_time})
    plt.title('Sigmoids system')
    plt.grid(True)
    for i in range(sigmoid_ammount):
        si = matrix[:,i] 
        plt.plot(observation_time, si)
#---------------------------------------------------------------------------
def show_approx(observation_time):
    approx = sess.run(approximator, {time: observation_time})
    plt.grid(True)
    plt.title('Approximation')
    plt.plot(observation_time, approx)

In [6]:
init_weights()
#---------------------------------------------------------------------------
#Initial weights of network and sigmoids system
initial_weights = get_weights()
print(get_weights())
some_time = np.linspace(-15, 7*math.pi, 300).reshape(300,1)
show_sigmoids(some_time)


[[[-0.04965326  0.31773187  0.44768637 -0.40245856  0.01430596
   -0.25726194  0.28724229  0.9193347   0.70476139  0.74924138
    0.35523003  0.39079553  0.92433165 -0.2523055  -0.57600578]]

 [[ 0.14734456  0.95787525 -0.63884882  0.86256157 -0.80951386
   -0.70087496  0.178183    0.60015654 -0.91164054  0.67356627
    0.89181723  0.38452747  0.47478178  0.63651825  0.41786929]]

 [[ 0.0976997   0.45336483 -0.31614447  0.15576854 -0.044882
    0.19311369  0.08570837 -0.94841846  0.44373329 -0.39862842
   -0.6181556   0.77672431  0.70540521 -0.56843451 -0.61266318]]]

In [7]:
show_approx(some_time)



In [8]:
Err = []
I = []
#---------------------------------------------------------------------------
def grad_descent(N, learn_rate):
    #Err = []
    #I = []
    sess.run(tf.assign(alpha, learn_rate))
    for i in tqdm_notebook(range(N)):
        I.append(i)
        _, _, _, _, j = sess.run([update_W, update_V, update_B, update_C, J], {time: tTr})
        Err.append(1/j)
        if i%1800==0:
            clear_output()
            print(j)
        if (j<1e-6):
            break
#---------------------------------------------------------------------------

In [9]:
grad_descent(500000, 10e-3)


5.531933657413025e-05


In [10]:
np.set_printoptions(precision=4)
plt.plot(I, Err)


Out[10]:
[<matplotlib.lines.Line2D at 0x273041d43c8>]

In [11]:
#print(show_grads())

In [12]:
#print(get_weights())

In [13]:
#show_sigmoids(some_time)

In [30]:
observe_time = np.linspace(0, 2*math.pi, 400).reshape(400,1)
approx = sess.run(approximator, {time: observe_time}).reshape((400,1))
true = np.sin(observe_time)

fig = plt.figure(figsize = (8,6)) 
ax = plt.gca()
plt.title('Analytical and ANN solutions with error ', fontsize=15)
plt.xlabel('t, sec', fontsize=20)
plt.ylabel('x, m', fontsize=20)
plt.plot(observe_time, true, 'b--', label= 'Analytical solution')
plt.plot(observe_time, approx, 'k', label= 'Neural net solution')
plt.plot(observe_time, true - approx, 'r-.',  label= 'Error')
plt.grid(which='both')
plt.legend()
#ax.set_axisbelow(True)
#ax.grid(linestyle='-', linewidth='0.5', color='red')


Out[30]:
<matplotlib.legend.Legend at 0x273058fe390>

In [15]:
err = true - approx
np.sum(np.square(err))*(1/np.array(err).size)


Out[15]:
1.1208337744128003e-06