In [2]:
# Необходмые команды импорта.
import sys
import os
sys.path.append(os.path.join(sys.path[0], '../'))
sys.path.append(os.path.join(sys.path[0], '../source/'))
import numpy as np
from numpy import linalg as LA
from numpy.polynomial.hermite import hermroots
import tensorflow as tf
from matplotlib import pylab as plt
from IPython.display import clear_output
from physlearn.NeuralNet.NeuralNetPro import NeuralNetPro
from physlearn.Optimizer.NelderMead.NelderMead import NelderMead

import d1_osc
import ann_constructor
import math_util
from visualiser import Visualiser

# Model Parameters
sigmoid_amount = 25
m = 450 # размер сеток обучения
M = 10 # количество выходных нейронов(базисных функций)
a = -10
b = 10
hidden_amount = 35
# Сетка для обучения(пока нету оптимизатора, устройчивого к стохастической замене)
train_xi = np.linspace(a, b, m, endpoint=True).reshape(1, m)

# colloc_xi = np.linspace(a/2.0 -1, b/2.0 + 1, M, endpoint=True).reshape(1, M)
# obs_xi = np.linspace(a, b, m, endpoint=True).reshape(1, m) 
%matplotlib inline

In [ ]:
def root_of_n_herm(n):
    shape = []
    if n == 0:
        shape = [1]
    else:
        for i in range(n):
            shape.append(0)
        shape.append(1)
    if n!=0:
        return hermroots(shape)
    else:
        return "no roots"

def calc_herm_grid_xi(amount):
    grid = []
    i = 0
    while (amount > len(grid)):
        i = i + 1
        set_of_roots = root_of_n_herm(i)
        for j in range(len(set_of_roots)):
            flag = 0
            for k in range(len(grid)):
                if ((grid[k] - set_of_roots[j]) < 0.1):
                    flag = 1
                    break
            if (flag == 0):
                grid.append(set_of_roots[j])
    return np.array(grid)

colloc_xi = calc_herm_grid_xi(M).reshape(1, M)

In [3]:
colloc_xi


[[-0.         -0.70710678 -1.22474487 -1.65068012 -2.02018287 -2.35060497
  -2.65196136 -2.93063742 -3.1909932  -3.43615912]]

In [4]:
# ANN
net, net_output, net_sum, sess = ann_constructor.return_deep_net_expressions(M, sigmoid_amount, hidden_amount)
# Выражение, определяющеие образ выходов сети при действии гамильтонианом. Task-dependant
first_deriative = tf.gradients(net_output, net.x)[0]
net_images = (-(tf.gradients(first_deriative, net.x)[0]) + tf.multiply(tf.square(net.x), net.output))
net_images_sum = tf.reduce_sum(input_tensor = net_images, axis = 0)


def net_outs_value(x):
    return net.calc(net_output, {net.x : x})


def net_sum_value(x):
    return net.calc(net_sum, {net.x : x})


def net_images_value(x):
    return net.calc(net_images, {net.x: x})


def net_images_sum_value(x):
    return net.calc(net_images_sum, {net.x: x})


dim = net.return_unroll_dim()
print(dim)


1320

In [5]:
colloc_matrix = net.calc(net_output, {net.x : colloc_xi})
diag_colloc_matrix = np.eye(M)
diag_colloc_matrix.flat[:: M + 1] += -1 + colloc_matrix.diagonal()
normal_colloc_matrix = np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix)
np.fill_diagonal(normal_colloc_matrix, 0)
linear_factor = math_util.norm(normal_colloc_matrix)
print('Colloc matrix: \n', colloc_matrix)
print('Diag of colloc matrix: \n', diag_colloc_matrix)
print('Normal colloc matrix: \n', np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix))
print('Normal colloc matrix - diag: \n', normal_colloc_matrix)
print('Measure of linearity: \n', linear_factor)


Colloc matrix: 
 [[-8.10563766 -6.70908133 -6.33590516 -5.46880746 -4.50389609 -3.94222088
  -3.68190223 -3.54593088 -3.43302727 -3.32194358]
 [-1.55458309 -2.4382095  -2.50179246 -2.22673263 -1.55994311 -1.04990665
  -0.86905237 -0.79637828 -0.73945026 -0.66828179]
 [-3.06594727 -2.62125325 -0.80091956  0.65042214  1.25036649  2.0149921
   2.67140147  3.04655301  3.25028702  3.34166962]
 [-1.71278522 -3.86097565 -5.99672409 -6.91085208 -7.50828402 -7.71168103
  -7.49005141 -7.23031149 -7.01552752 -6.83787584]
 [ 5.92166599  6.64873063  7.03831719  6.66889009  5.99596058  5.16533451
   4.47713047  3.95630992  3.56394228  3.25446771]
 [-2.44493244 -0.61091039  0.34704567  0.04110345 -0.07989334  0.12272563
   0.41518936  0.55302361  0.56332823  0.47597682]
 [-3.40127663 -4.01168972 -2.86408687 -2.67888993 -2.29911301 -1.408069
  -0.6166718  -0.07025145  0.35101958  0.71967975]
 [ 3.72518309  4.24341839  3.11218776  2.79209576  3.33682654  3.93796779
   4.15160571  4.19126332  4.15068722  4.07372505]
 [-0.89536918  1.33807627  2.36100879  2.72306137  2.60591733  2.68388371
   2.92918099  3.03410135  3.00702372  2.88455595]
 [-8.4933535  -9.19640859 -7.23213108 -7.09134857 -7.09493126 -6.92041768
  -6.65678067 -6.37290857 -6.12228128 -5.9106905 ]]
Diag of colloc matrix: 
 [[-8.10563766  0.          0.          0.          0.          0.
   0.          0.          0.          0.        ]
 [ 0.         -2.4382095   0.          0.          0.          0.
   0.          0.          0.          0.        ]
 [ 0.          0.         -0.80091956  0.          0.          0.
   0.          0.          0.          0.        ]
 [ 0.          0.          0.         -6.91085208  0.          0.
   0.          0.          0.          0.        ]
 [ 0.          0.          0.          0.          5.99596058  0.
   0.          0.          0.          0.        ]
 [ 0.          0.          0.          0.          0.          0.12272563
   0.          0.          0.          0.        ]
 [ 0.          0.          0.          0.          0.          0.
  -0.6166718   0.          0.          0.        ]
 [ 0.          0.          0.          0.          0.          0.
   0.          4.19126332  0.          0.        ]
 [ 0.          0.          0.          0.          0.          0.
   0.          0.          3.00702372  0.        ]
 [ 0.          0.          0.          0.          0.          0.
   0.          0.          0.         -5.9106905 ]]
Normal colloc matrix: 
 [[  1.           0.82770556   0.78166647   0.67469183   0.55564982
    0.48635543   0.45423968   0.43746477   0.42353574   0.40983125]
 [  0.63759209   1.           1.02607773   0.91326551   0.63979043
    0.43060559   0.35643056   0.32662422   0.30327593   0.27408711]
 [  3.82803395   3.27280464   1.          -0.81209422  -1.56116363
   -2.51584829  -3.33541794  -3.80381896  -4.05819408  -4.17229118]
 [  0.24783995   0.55868301   0.86772572   1.           1.08644838
    1.11587992   1.08381012   1.04622576   1.01514653   0.98944034]
 [  0.98760923   1.1088683    1.17384314   1.11223048   1.
    0.86146906   0.74669111   0.65982921   0.59439055   0.5427767 ]
 [-19.92193917  -4.97785515   2.82781746   0.33492153  -0.65099147
    1.           3.3830698    4.50617877   4.59014346   3.87838174]
 [  5.51553778   6.50538859   4.64442649   4.34410964   3.72826031
    2.28333611   1.           0.11392032  -0.5692162   -1.16703853]
 [  0.8887972    1.01244376   0.74254169   0.66617045   0.79613861
    0.93956583   0.99053803   1.           0.99031889   0.97195636]
 [ -0.29775927   0.44498361   0.78516467   0.90556697   0.86661017
    0.89253826   0.97411303   1.00900479   1.           0.95927276]
 [  1.43694776   1.55589412   1.22356789   1.1997496    1.20035574
    1.17083066   1.12622724   1.07820035   1.03579798   1.        ]]
Normal colloc matrix - diag: 
 [[  0.           0.82770556   0.78166647   0.67469183   0.55564982
    0.48635543   0.45423968   0.43746477   0.42353574   0.40983125]
 [  0.63759209   0.           1.02607773   0.91326551   0.63979043
    0.43060559   0.35643056   0.32662422   0.30327593   0.27408711]
 [  3.82803395   3.27280464   0.          -0.81209422  -1.56116363
   -2.51584829  -3.33541794  -3.80381896  -4.05819408  -4.17229118]
 [  0.24783995   0.55868301   0.86772572   0.           1.08644838
    1.11587992   1.08381012   1.04622576   1.01514653   0.98944034]
 [  0.98760923   1.1088683    1.17384314   1.11223048   0.
    0.86146906   0.74669111   0.65982921   0.59439055   0.5427767 ]
 [-19.92193917  -4.97785515   2.82781746   0.33492153  -0.65099147
    0.           3.3830698    4.50617877   4.59014346   3.87838174]
 [  5.51553778   6.50538859   4.64442649   4.34410964   3.72826031
    2.28333611   0.           0.11392032  -0.5692162   -1.16703853]
 [  0.8887972    1.01244376   0.74254169   0.66617045   0.79613861
    0.93956583   0.99053803   0.           0.99031889   0.97195636]
 [ -0.29775927   0.44498361   0.78516467   0.90556697   0.86661017
    0.89253826   0.97411303   1.00900479   0.           0.95927276]
 [  1.43694776   1.55589412   1.22356789   1.1997496    1.20035574
    1.17083066   1.12622724   1.07820035   1.03579798   0.        ]]
Measure of linearity: 
 774.8087005876141

In [6]:
# Линейная регрессия
A = tf.transpose(net_output)
A_T = net_output
y = net_images_sum
y = tf.expand_dims(y, -1)
omega = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(A_T, A)), A_T), y)

# Определение функционала J
regression_fit = tf.matmul(tf.transpose(net_output), omega)
noninvariance_measure = (1/m) * tf.reduce_sum(tf.square(tf.expand_dims(net_images_sum, -1) - regression_fit))
J_expression = noninvariance_measure

def J(params):
    net.roll_matrixes(params)
    
    colloc_matrix = net.calc(net_output, {net.x : colloc_xi})
    diag_colloc_matrix = np.eye(M)
    diag_colloc_matrix.flat[:: M + 1] += -1 + colloc_matrix.diagonal()
    normal_colloc_matrix = np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix)
    np.fill_diagonal(normal_colloc_matrix, 0)
    linear_factor = math_util.norm(normal_colloc_matrix)
    
    
    cost = net.calc(J_expression, {net.x : train_xi}) + linear_factor
    cost += np.sum(np.square(net.run(np.linspace(-7, 7, 2, endpoint=True).reshape(1, 2) )))
    cost += 2*np.sum(np.square(net.run(np.linspace(-8, 8, 2, endpoint=True).reshape(1, 2) )))
    cost += 3*np.sum(np.square(net.run(np.linspace(-11, 11, 2, endpoint=True).reshape(1, 2) )))
    return cost

# Оптимизация
iter_number_de = 10000
#iter_number_nm = 1000000
#opt_de = DifferentialEvolution(amount_of_individuals = 6*dim, end_cond = iter_number_de, f=0.55 )
opt_nm = NelderMead(-2,2)
opt_nm.set_epsilon_and_sd(0.3, 100)

In [7]:
optimisation_result = opt_nm.optimize(J, dim, 10**8, 10**(-8))
print("J after optimisation: ", J(optimisation_result.x))
net.roll_matrixes(optimisation_result.x)
print("Ебала: ", optimisation_result)


  0%|▏                                                                 | 225916/100000000 [48:44<220:50:48, 125.49it/s]
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-7-1ace14e10f8d> in <module>()
----> 1 optimisation_result = opt_nm.optimize(J, dim, 10**8, 10**(-8))
      2 print("J after optimisation: ", J(optimisation_result.x))
      3 net.roll_matrixes(optimisation_result.x)
      4 print("Ебала: ", optimisation_result)

~\GitKraken\SPBU_COMP_PHYS_NN_QM\physlearn\Optimizer\NelderMead\NelderMeadAbstract.py in optimize(self, func, dim, end_cond, min_cost)
    123                 self.update()
    124 
--> 125             method_type = self.iteration()
    126             self.variance_list.append(numpy.var(self.y_points))
    127             self.types_list.append(method_type)

~\GitKraken\SPBU_COMP_PHYS_NN_QM\physlearn\Optimizer\NelderMead\NelderMeadAbstract.py in iteration(self)
    175         self.x_reflected = self.calculate_reflected_point()  # Вычисляем отраженную
    176         # точку
--> 177         y_reflected = self.calc_func(self.x_reflected)
    178         # Далее мы делаем ряд действий, в зависимости от соотношения между значениями функции в найденных точках
    179         # Объяснять подробно нет смысла, так что смотри просто "Метод Нелдера - Мида" в вики

~\GitKraken\SPBU_COMP_PHYS_NN_QM\physlearn\Optimizer\NelderMead\NelderMead.py in calc_func(self, params)
      8 
      9     def calc_func(self, params):
---> 10         return self.func(params)

<ipython-input-6-3ca03c186674> in J(params)
     22     cost += np.sum(np.square(net.run(np.linspace(-7, 7, 2, endpoint=True).reshape(1, 2) )))
     23     cost += 2*np.sum(np.square(net.run(np.linspace(-8, 8, 2, endpoint=True).reshape(1, 2) )))
---> 24     cost += 3*np.sum(np.square(net.run(np.linspace(-11, 11, 2, endpoint=True).reshape(1, 2) )))
     25     return cost
     26 

~\GitKraken\SPBU_COMP_PHYS_NN_QM\physlearn\NeuralNet\NeuralNetAbstract.py in run(self, inputs)
    157         # Вычисление результата работы НС на выходных данных inputs
    158         # inputs = numpy.array([[...], ...])
--> 159         result = self.calc(self.output, {self.x: inputs})
    160         return result
    161 

~\GitKraken\SPBU_COMP_PHYS_NN_QM\physlearn\NeuralNet\NeuralNetPro.py in calc(self, calc_var, d)
     13     def calc(self, calc_var, d):
     14         d.update(self.placeholders_dict)  # Добавляем в словарь d placeholder для матриц весов
---> 15         return self.sess.run(calc_var, d)
     16 
     17 

D:\Anaconda\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    875     try:
    876       result = self._run(None, fetches, feed_dict, options_ptr,
--> 877                          run_metadata_ptr)
    878       if run_metadata:
    879         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

D:\Anaconda\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1079 
   1080           feed_dict_tensor[subfeed_t] = np_val
-> 1081           feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
   1082 
   1083     # Create a fetch handler to take care of the structure of fetches.

D:\Anaconda\lib\site-packages\tensorflow\python\framework\ops.py in name(self)
    324   def name(self):
    325     """The string name of this tensor."""
--> 326     if not self._op.name:
    327       raise ValueError("Operation was not named: %s" % self._op)
    328     return "%s:%d" % (self._op.name, self._value_index)

D:\Anaconda\lib\site-packages\tensorflow\python\framework\ops.py in name(self)
   1836   def name(self):
   1837     """The full name of this operation."""
-> 1838     return c_api.TF_OperationName(self._c_op)
   1839 
   1840   @property

KeyboardInterrupt: 

In [ ]:


In [ ]:
print("J after optimisation: ", J(optimisation_result))
net.roll_matrixes(optimisation_result)

In [8]:
x_obss = np.linspace(-2, 2, 100).reshape(1, 100)
plt.plot(np.sum(x_obss, 0), net_images_value(x_obss)[3,:] )
plt.plot(np.sum(x_obss, 0), net_outs_value(x_obss)[3,:] )


Out[8]:
[<matplotlib.lines.Line2D at 0x18a450a40b8>]

In [9]:
vis = Visualiser(net, net_output, net_sum, M)
vis.plot_four(np.linspace(-15, 15, m, endpoint=True).reshape(1, m) )


<Figure size 432x288 with 0 Axes>

In [ ]:
#outs_matrix = net.calc(net_output, {net.x : train_xi})
#outs_matrix_file = open("outs_matrix1.txt", "wb")
#np.savetxt(outs_matrix_file, outs_matrix, delimiter=' ')
#outs_matrix_file.close()

In [13]:
number_col = M
#net.roll_matrixes(optimisation_result)
#collocation_grid = rand.random((1,200)) * (np.abs(x_0_max) + np.abs(x_0_min)) - np.abs(x_0_min)
#collocation_grid = np.linspace(x_0_min, x_0_max, number_col, endpoint=True).reshape(1, number_col)
#collocation_grid = np.linspace(a, b, number_col, endpoint=True).reshape(1, number_col)
collocation_grid = calc_herm_grid_xi(number_col).reshape(1, number_col)
hamiltonian_matrix = net_images_value(collocation_grid)
bind_matrix = net.run(collocation_grid)
from scipy.linalg import eig
#eigvals, eigvecs = eig(np.matmul(np.transpose(bind_matrix), hamiltonian_matrix), np.matmul(np.transpose(bind_matrix), bind_matrix))
eigvals, eigvecs = eig(hamiltonian_matrix, bind_matrix)
eigvals_norm = np.abs(eigvals)
eigvals_real = np.real(eigvals)
for eigval in eigvals:
    print(eigval)
#print(eigvecs[2,:])


(-24.293857899751323+23.35872616850596j)
(-24.29385789975133-23.358726168505964j)
(16.674465518267002+2.526912151372231j)
(16.674465518267002-2.526912151372231j)
(6.88559490773723+6.307192752559939j)
(6.885594907737229-6.30719275255994j)
(-0.25417549337660816+0j)
(-0.0719199228513567+0j)
(3.2882592942049977+0j)
(3.313244881762154+0j)

In [ ]:
d1_osc.show_wf(4,train_xi)

In [14]:
x = np.linspace(0,25,1000)
i = 0

for eigval in eigvals:
    i+=1
    plt.plot(x, eigval+0*x)
    plt.plot(1+i, eigval, 'x')


D:\Anaconda\lib\site-packages\numpy\core\numeric.py:492: ComplexWarning: Casting complex values to real discards the imaginary part
  return array(a, dtype, copy=False, order=order)

In [15]:
x_obs = np.linspace(-5, 5, 100).reshape(1, 100)
for i in range(M):
    y = eigvals[i]*((net.run(x_obs)).transpose()*eigvecs[i,:])
    y = np.sum(y,1)
    y1 = net_images_value(x_obs)[i,:]
    plt.plot(np.sum(x_obs, 0), y)# - y1))


D:\Anaconda\lib\site-packages\numpy\core\numeric.py:492: ComplexWarning: Casting complex values to real discards the imaginary part
  return array(a, dtype, copy=False, order=order)

In [ ]:
eigvals[k]

In [ ]:
a = -10

In [ ]:
x_obs = np.linspace(-5, 5, 1000).reshape(1, 1000)
k = 5

y = eigvals[k]*(net.run(x_obs)).transpose()*eigvecs[k,:]
plt.plot(np.sum(x_obs, 0), np.sum(y,1))

h = net_images_value(x_obs).transpose()*eigvecs[k,:]
plt.plot(np.sum(x_obs, 0), np.sum(h,1))

In [ ]: