In [1]:
import tensorflow as tf
import numpy as np
import math
from matplotlib import pyplot as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
%matplotlib inline

#Model Parameters
sigmoid_ammount = 8
m = 20

mass = tf.constant(1.0, dtype=tf.double)
k = tf.constant(1.0, dtype=tf.double)
om2 = k/m
v_0 = tf.constant(0.0, dtype=tf.double)
x_0 = tf.constant(1.0, dtype=tf.double)
ham_0 = (m/const_2)*tf.square(v_0) + (k/const_2)*tf.square(x_0)

#------------------------------------
sess = tf.Session()

In [ ]:


In [37]:
#Time Parameter
time = tf.placeholder(tf.double)
#-------------------------------------------------------------
#Weights
W = tf.get_variable("W", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
V = tf.get_variable("V", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
B = tf.get_variable("B", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-37-c4c5e32ac8b9> in <module>()
      3 #-------------------------------------------------------------
      4 #Weights
----> 5 W = tf.get_variable("W", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
      6 V = tf.get_variable("V", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
      7 B = tf.get_variable("B", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))

/home/rodion/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py in get_variable(name, shape, dtype, initializer, regularizer, trainable, collections, caching_device, partitioner, validate_shape, use_resource, custom_getter)
   1063       collections=collections, caching_device=caching_device,
   1064       partitioner=partitioner, validate_shape=validate_shape,
-> 1065       use_resource=use_resource, custom_getter=custom_getter)
   1066 get_variable_or_local_docstring = (
   1067     """%s

/home/rodion/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py in get_variable(self, var_store, name, shape, dtype, initializer, regularizer, reuse, trainable, collections, caching_device, partitioner, validate_shape, use_resource, custom_getter)
    960           collections=collections, caching_device=caching_device,
    961           partitioner=partitioner, validate_shape=validate_shape,
--> 962           use_resource=use_resource, custom_getter=custom_getter)
    963 
    964   def _get_partitioned_variable(self,

/home/rodion/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py in get_variable(self, name, shape, dtype, initializer, regularizer, reuse, trainable, collections, caching_device, partitioner, validate_shape, use_resource, custom_getter)
    365           reuse=reuse, trainable=trainable, collections=collections,
    366           caching_device=caching_device, partitioner=partitioner,
--> 367           validate_shape=validate_shape, use_resource=use_resource)
    368 
    369   def _get_partitioned_variable(

/home/rodion/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py in _true_getter(name, shape, dtype, initializer, regularizer, reuse, trainable, collections, caching_device, partitioner, validate_shape, use_resource)
    350           trainable=trainable, collections=collections,
    351           caching_device=caching_device, validate_shape=validate_shape,
--> 352           use_resource=use_resource)
    353 
    354     if custom_getter is not None:

/home/rodion/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py in _get_single_variable(self, name, shape, dtype, initializer, regularizer, partition_info, reuse, trainable, collections, caching_device, validate_shape, use_resource)
    662                          " Did you mean to set reuse=True in VarScope? "
    663                          "Originally defined at:\n\n%s" % (
--> 664                              name, "".join(traceback.format_list(tb))))
    665       found_var = self._vars[name]
    666       if not shape.is_compatible_with(found_var.get_shape()):

ValueError: Variable W already exists, disallowed. Did you mean to set reuse=True in VarScope? Originally defined at:

  File "<ipython-input-2-c4c5e32ac8b9>", line 5, in <module>
    W = tf.get_variable("W", initializer = tf.random_uniform(shape=[1, sigmoid_ammount], dtype=tf.double, minval = -1, maxval = 1))
  File "/home/rodion/anaconda3/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2881, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)
  File "/home/rodion/anaconda3/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2821, in run_ast_nodes
    if self.run_code(code, result):

In [36]:



[[ 0.2837  0.5842  0.3965  0.5262  0.3638  0.6984  0.3802  0.2769]
 [ 0.2383  0.6187  0.4339  0.5154  0.3759  0.686   0.3434  0.3397]
 [ 0.1981  0.6521  0.4721  0.5046  0.3882  0.6733  0.3085  0.4087]
 [ 0.1633  0.684   0.5106  0.4938  0.4005  0.6604  0.2756  0.4815]
 [ 0.1335  0.7143  0.549   0.483   0.413   0.6472  0.2449  0.5551]
 [ 0.1085  0.7428  0.5868  0.4722  0.4257  0.6338  0.2167  0.6264]
 [ 0.0877  0.7694  0.6236  0.4615  0.4384  0.6201  0.1909  0.6926]
 [ 0.0705  0.7939  0.659   0.4507  0.4512  0.6063  0.1675  0.7516]
 [ 0.0565  0.8165  0.6928  0.4401  0.464   0.5923  0.1464  0.8026]
 [ 0.0452  0.8371  0.7246  0.4295  0.4769  0.5782  0.1276  0.8453]
 [ 0.036   0.8558  0.7542  0.4189  0.4899  0.5639  0.1109  0.8801]
 [ 0.0287  0.8727  0.7817  0.4084  0.5028  0.5495  0.0962  0.9079]
 [ 0.0228  0.8879  0.8068  0.398   0.5158  0.5351  0.0832  0.9298]
 [ 0.0181  0.9015  0.8297  0.3877  0.5287  0.5205  0.0718  0.9468]
 [ 0.0143  0.9135  0.8504  0.3775  0.5416  0.506   0.0619  0.9599]
 [ 0.0114  0.9243  0.869   0.3674  0.5544  0.4914  0.0533  0.9698]
 [ 0.009   0.9338  0.8855  0.3574  0.5672  0.4769  0.0458  0.9774]
 [ 0.0071  0.9421  0.9003  0.3475  0.5799  0.4623  0.0393  0.9831]
 [ 0.0056  0.9495  0.9133  0.3378  0.5924  0.4479  0.0337  0.9873]
 [ 0.0044  0.956   0.9247  0.3282  0.6049  0.4335  0.0289  0.9905]]
[[ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]
 [ 0.  0.  0.  0.  0.  0.  0.  0.]]

In [3]:
#Forward
sigmoids_matrix = tf.sigmoid(tf.matmul(time, V) + B)
total_matrix = tf.multiply(sigmoids_matrix, W)
approximator = tf.reduce_sum(total_matrix, 1)
#--------------------------------------------------------------------------------------
#Some declarations
alpha = tf.get_variable("Learning_Rate", initializer=tf.constant(0.0001, dtype=tf.double))
num = tf.constant(m, dtype=tf.double)

#--------------------------------------------------------------------------------------
#First and second time-deriatives from forward
vel = tf.gradients(approximator, time)[0]
acc = tf.gradients(vel, time)[0]
#--------------------------------------------------------------------------------------
#Cost
eq = acc + om2*approximator
ham_diff = (mass/2)*tf.square(vel) + (k/2)*tf.square(approximator) - ham_0
#--------------------------------------------------------------------------------------
J = (tf.reduce_sum(tf.square(eq))*(1/num) +
     tf.square(approximator[0] -x_0) + tf.square(vel[0] - v_0) +
     tf.reduce_sum(tf.square(ham_diff))*(1/num))
#--------------------------------------------------------------------------------------
#Gradients
grads_weights = tf.gradients(J, [W, V, B])
#--------------------------------------------------------------------------------------
#Updates
update_W = W.assign_sub(alpha*grads_weights[0]) 
update_V = V.assign_sub(alpha*grads_weights[1]) 
update_B = B.assign_sub(alpha*grads_weights[2])

In [44]:
init_weights()
#sigmatr = sess.run(tf.sigmoid(tf.matmul(time, V) + B), {time: tTr})
resmatr1 = sess.run(tf.matmul(tf.sigmoid(tf.matmul(time, V) + B), tf.transpose(W)), {time: tTr})
resmatr2 = sess.run(tf.reduce_sum(tf.multiply(tf.sigmoid(tf.matmul(time, V) + B), W), 1), {time: tTr})

print(resmatr1)
print(resmatr2)
print(resmatr1-resmatr2)


[[-0.133 ]
 [ 0.0106]
 [ 0.1437]
 [ 0.2621]
 [ 0.3633]
 [ 0.4461]
 [ 0.5112]
 [ 0.5601]
 [ 0.595 ]
 [ 0.6184]
 [ 0.6327]
 [ 0.6401]
 [ 0.6423]
 [ 0.6407]
 [ 0.6367]
 [ 0.6309]
 [ 0.6241]
 [ 0.6167]
 [ 0.6092]
 [ 0.6016]]
[-0.133   0.0106  0.1437  0.2621  0.3633  0.4461  0.5112  0.5601  0.595
  0.6184  0.6327  0.6401  0.6423  0.6407  0.6367  0.6309  0.6241  0.6167
  0.6092  0.6016]
[[  0.0000e+00  -1.4360e-01  -2.7669e-01  -3.9511e-01  -4.9624e-01
   -5.7910e-01  -6.4418e-01  -6.9307e-01  -7.2798e-01  -7.5139e-01
   -7.6570e-01  -7.7305e-01  -7.7523e-01  -7.7371e-01  -7.6963e-01
   -7.6386e-01  -7.5705e-01  -7.4970e-01  -7.4214e-01  -7.3462e-01]
 [  1.4360e-01   0.0000e+00  -1.3309e-01  -2.5151e-01  -3.5264e-01
   -4.3551e-01  -5.0059e-01  -5.4947e-01  -5.8438e-01  -6.0779e-01
   -6.2210e-01  -6.2945e-01  -6.3163e-01  -6.3011e-01  -6.2603e-01
   -6.2026e-01  -6.1346e-01  -6.0610e-01  -5.9854e-01  -5.9102e-01]
 [  2.7669e-01   1.3309e-01   0.0000e+00  -1.1842e-01  -2.1955e-01
   -3.0242e-01  -3.6750e-01  -4.1638e-01  -4.5129e-01  -4.7471e-01
   -4.8901e-01  -4.9636e-01  -4.9854e-01  -4.9702e-01  -4.9294e-01
   -4.8717e-01  -4.8037e-01  -4.7301e-01  -4.6545e-01  -4.5793e-01]
 [  3.9511e-01   2.5151e-01   1.1842e-01   0.0000e+00  -1.0113e-01
   -1.8400e-01  -2.4908e-01  -2.9796e-01  -3.3287e-01  -3.5629e-01
   -3.7059e-01  -3.7794e-01  -3.8012e-01  -3.7860e-01  -3.7452e-01
   -3.6875e-01  -3.6195e-01  -3.5459e-01  -3.4703e-01  -3.3951e-01]
 [  4.9624e-01   3.5264e-01   2.1955e-01   1.0113e-01   0.0000e+00
   -8.2864e-02  -1.4795e-01  -1.9683e-01  -2.3174e-01  -2.5515e-01
   -2.6946e-01  -2.7681e-01  -2.7899e-01  -2.7747e-01  -2.7339e-01
   -2.6762e-01  -2.6082e-01  -2.5346e-01  -2.4590e-01  -2.3838e-01]
 [  5.7910e-01   4.3551e-01   3.0242e-01   1.8400e-01   8.2864e-02
    0.0000e+00  -6.5081e-02  -1.1396e-01  -1.4887e-01  -1.7229e-01
   -1.8660e-01  -1.9394e-01  -1.9613e-01  -1.9461e-01  -1.9052e-01
   -1.8475e-01  -1.7795e-01  -1.7060e-01  -1.6303e-01  -1.5552e-01]
 [  6.4418e-01   5.0059e-01   3.6750e-01   2.4908e-01   1.4795e-01
    6.5081e-02   0.0000e+00  -4.8881e-02  -8.3794e-02  -1.0721e-01
   -1.2152e-01  -1.2886e-01  -1.3104e-01  -1.2952e-01  -1.2544e-01
   -1.1967e-01  -1.1287e-01  -1.0551e-01  -9.7954e-02  -9.0434e-02]
 [  6.9307e-01   5.4947e-01   4.1638e-01   2.9796e-01   1.9683e-01
    1.1396e-01   4.8881e-02   0.0000e+00  -3.4913e-02  -5.8326e-02
   -7.2635e-02  -7.9980e-02  -8.2163e-02  -8.0644e-02  -7.6562e-02
   -7.0793e-02  -6.3989e-02  -5.6633e-02  -4.9073e-02  -4.1553e-02]
 [  7.2798e-01   5.8438e-01   4.5129e-01   3.3287e-01   2.3174e-01
    1.4887e-01   8.3794e-02   3.4913e-02   0.0000e+00  -2.3413e-02
   -3.7722e-02  -4.5067e-02  -4.7250e-02  -4.5731e-02  -4.1649e-02
   -3.5880e-02  -2.9076e-02  -2.1720e-02  -1.4160e-02  -6.6404e-03]
 [  7.5139e-01   6.0779e-01   4.7471e-01   3.5629e-01   2.5515e-01
    1.7229e-01   1.0721e-01   5.8326e-02   2.3413e-02   0.0000e+00
   -1.4309e-02  -2.1654e-02  -2.3837e-02  -2.2317e-02  -1.8236e-02
   -1.2466e-02  -5.6629e-03   1.6929e-03   9.2534e-03   1.6773e-02]
 [  7.6570e-01   6.2210e-01   4.8901e-01   3.7059e-01   2.6946e-01
    1.8660e-01   1.2152e-01   7.2635e-02   3.7722e-02   1.4309e-02
    0.0000e+00  -7.3447e-03  -9.5282e-03  -8.0083e-03  -3.9269e-03
    1.8427e-03   8.6462e-03   1.6002e-02   2.3562e-02   3.1082e-02]
 [  7.7305e-01   6.2945e-01   4.9636e-01   3.7794e-01   2.7681e-01
    1.9394e-01   1.2886e-01   7.9980e-02   4.5067e-02   2.1654e-02
    7.3447e-03   0.0000e+00  -2.1835e-03  -6.6361e-04   3.4178e-03
    9.1874e-03   1.5991e-02   2.3347e-02   3.0907e-02   3.8427e-02]
 [  7.7523e-01   6.3163e-01   4.9854e-01   3.8012e-01   2.7899e-01
    1.9613e-01   1.3104e-01   8.2163e-02   4.7250e-02   2.3837e-02
    9.5282e-03   2.1835e-03   0.0000e+00   1.5199e-03   5.6013e-03
    1.1371e-02   1.8174e-02   2.5530e-02   3.3091e-02   4.0610e-02]
 [  7.7371e-01   6.3011e-01   4.9702e-01   3.7860e-01   2.7747e-01
    1.9461e-01   1.2952e-01   8.0644e-02   4.5731e-02   2.2317e-02
    8.0083e-03   6.6361e-04  -1.5199e-03   0.0000e+00   4.0814e-03
    9.8510e-03   1.6654e-02   2.4010e-02   3.1571e-02   3.9090e-02]
 [  7.6963e-01   6.2603e-01   4.9294e-01   3.7452e-01   2.7339e-01
    1.9052e-01   1.2544e-01   7.6562e-02   4.1649e-02   1.8236e-02
    3.9269e-03  -3.4178e-03  -5.6013e-03  -4.0814e-03   0.0000e+00
    5.7696e-03   1.2573e-02   1.9929e-02   2.7489e-02   3.5009e-02]
 [  7.6386e-01   6.2026e-01   4.8717e-01   3.6875e-01   2.6762e-01
    1.8475e-01   1.1967e-01   7.0793e-02   3.5880e-02   1.2466e-02
   -1.8427e-03  -9.1874e-03  -1.1371e-02  -9.8510e-03  -5.7696e-03
    0.0000e+00   6.8035e-03   1.4159e-02   2.1720e-02   2.9239e-02]
 [  7.5705e-01   6.1346e-01   4.8037e-01   3.6195e-01   2.6082e-01
    1.7795e-01   1.1287e-01   6.3989e-02   2.9076e-02   5.6629e-03
   -8.6462e-03  -1.5991e-02  -1.8174e-02  -1.6654e-02  -1.2573e-02
   -6.8035e-03   0.0000e+00   7.3558e-03   1.4916e-02   2.2436e-02]
 [  7.4970e-01   6.0610e-01   4.7301e-01   3.5459e-01   2.5346e-01
    1.7060e-01   1.0551e-01   5.6633e-02   2.1720e-02  -1.6929e-03
   -1.6002e-02  -2.3347e-02  -2.5530e-02  -2.4010e-02  -1.9929e-02
   -1.4159e-02  -7.3558e-03   0.0000e+00   7.5605e-03   1.5080e-02]
 [  7.4214e-01   5.9854e-01   4.6545e-01   3.4703e-01   2.4590e-01
    1.6303e-01   9.7954e-02   4.9073e-02   1.4160e-02  -9.2534e-03
   -2.3562e-02  -3.0907e-02  -3.3091e-02  -3.1571e-02  -2.7489e-02
   -2.1720e-02  -1.4916e-02  -7.5605e-03   0.0000e+00   7.5194e-03]
 [  7.3462e-01   5.9102e-01   4.5793e-01   3.3951e-01   2.3838e-01
    1.5552e-01   9.0434e-02   4.1553e-02   6.6404e-03  -1.6773e-02
   -3.1082e-02  -3.8427e-02  -4.0610e-02  -3.9090e-02  -3.5009e-02
   -2.9239e-02  -2.2436e-02  -1.5080e-02  -7.5194e-03   0.0000e+00]]

In [4]:
def init_weights():
    init = tf.global_variables_initializer()
    sess.run(init)
#---------------------------------------------------------------------------
def get_weights():
    weights = sess.run([W, V, B])
    return np.asarray(weights)
#---------------------------------------------------------------------------
def show_grads():            
    grads = sess.run(grads_weights, {time: tTr})            
    return np.asarray(grads)
#---------------------------------------------------------------------------
def show_sigmoids(observation_time):
    matrix = sess.run(total_matrix, {time: observation_time})
    plt.title('Sigmoids system')
    plt.grid(True)
    for i in range(sigmoid_ammount):
        si = matrix[:,i] 
        plt.plot(observation_time, si)
#---------------------------------------------------------------------------
def show_approx(observation_time):
    approx = sess.run(approximator, {time: observation_time})
    plt.grid(True)
    plt.title('Approximation')
    plt.plot(observation_time, approx)

In [5]:
init_weights()
#---------------------------------------------------------------------------
#Initial weights of network and sigmoids system
initial_weights = get_weights()
print(get_weights())
some_time = np.linspace(-15, 7*math.pi, 300).reshape(300,1)
show_sigmoids(some_time)


[[[ 0.15439912  0.86168779  0.80130171  0.31083218 -0.94530543  0.16870898
   -0.71380726 -0.28403429]]

 [[-0.70251845  0.66699147  0.35546567  0.15677445 -0.15366605 -0.16434056
   -0.68240581  0.45493697]]

 [[-0.98826777  0.27421291 -0.44309173  0.0274645  -0.34229351  0.70468399
   -0.40745927 -0.91451224]]]

In [6]:
show_approx(some_time)



In [7]:
tTr = np.linspace(0, 2*math.pi, m).reshape(m,1)
Err = []
I = []
#---------------------------------------------------------------------------
def grad_descent(N, learn_rate):
    sess.run(tf.assign(alpha, learn_rate))
    for i in tqdm_notebook(range(N)):
        I.append(i)
        _, _, _, j = sess.run([update_W, update_V, update_B, J], {time: tTr})
        Err.append(1/j)
        if i%1800==0:
            clear_output()
            print(j)
        if (j<1e-6):
            break
#---------------------------------------------------------------------------

In [ ]:


In [17]:
grad_descent(300000, 10**-3)


[ 0.0349]
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-17-ea95111510fc> in <module>()
----> 1 grad_descent(300000, 10**-3)

<ipython-input-7-1db57e56e5c8> in grad_descent(N, learn_rate)
      7     for i in tqdm_notebook(range(N)):
      8         I.append(i)
----> 9         _, _, _, j = sess.run([update_W, update_V, update_B, J], {time: tTr})
     10         Err.append(1/j)
     11         if i%1800==0:

/home/rodion/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    893     try:
    894       result = self._run(None, fetches, feed_dict, options_ptr,
--> 895                          run_metadata_ptr)
    896       if run_metadata:
    897         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/rodion/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1122     if final_fetches or final_targets or (handle and feed_dict_tensor):
   1123       results = self._do_run(handle, final_targets, final_fetches,
-> 1124                              feed_dict_tensor, options, run_metadata)
   1125     else:
   1126       results = []

/home/rodion/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1319     if handle is None:
   1320       return self._do_call(_run_fn, self._session, feeds, fetches, targets,
-> 1321                            options, run_metadata)
   1322     else:
   1323       return self._do_call(_prun_fn, self._session, handle, feeds, fetches)

/home/rodion/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1325   def _do_call(self, fn, *args):
   1326     try:
-> 1327       return fn(*args)
   1328     except errors.OpError as e:
   1329       message = compat.as_text(e.message)

/home/rodion/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1304           return tf_session.TF_Run(session, options,
   1305                                    feed_dict, fetch_list, target_list,
-> 1306                                    status, run_metadata)
   1307 
   1308     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [9]:
np.set_printoptions(precision=4)
plt.plot(I, Err)


Out[9]:
[<matplotlib.lines.Line2D at 0x7fd17f550668>]

In [18]:
print(show_grads())


[[[  2.5552e-04  -6.8851e-05  -5.0938e-04  -2.3637e-04   9.0618e-04
    -5.1641e-04   5.1069e-04   3.4545e-04]]

 [[  1.2252e-04   6.8384e-05  -1.4676e-05  -6.7912e-06  -1.7503e-05
     1.3021e-05  -3.5591e-04   1.4950e-04]]

 [[  6.8707e-06   3.9898e-04   6.6487e-04  -1.4563e-04   4.4665e-04
    -3.5796e-04  -2.0159e-04   3.0102e-04]]]

In [11]:
print(get_weights())


[[[ 0.3206  0.9404  0.8046  0.4579 -0.8544  0.5199 -0.4578 -0.3339]]

 [[-0.7053  0.4876  0.2095 -0.1025  0.1658 -0.1913 -0.574   0.8477]]

 [[-0.9303  0.4061 -0.3343  0.0633 -0.4423  0.7484 -0.5119 -0.8663]]]

In [19]:
show_sigmoids(some_time)



In [20]:
observe_time = np.linspace(-1, 2.5*math.pi, 400).reshape(400,1)
plt.title('Approximation and error')
plt.grid(True)
approx = sess.run(approximator, {time: observe_time}).reshape((400,1))
true = np.sin(observe_time)
plt.plot(observe_time, approx)
plt.plot(observe_time, true - approx, 'red')


Out[20]:
[<matplotlib.lines.Line2D at 0x7fd17d298eb8>]

In [21]:
plt.plot(observe_time, approx)


Out[21]:
[<matplotlib.lines.Line2D at 0x7fd17d1c54e0>]
200922/|/ 67%|| 200922/300000 [04:16<02:06, 783.12it/s]