In [1]:
# Необходмые команды импорта.
import sys, os
sys.path.append(os.path.join(sys.path[0], '../source/'))
sys.path.append(os.path.join(sys.path[0], '../../'))
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
from matplotlib import pylab as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
import numpy.random as rand
from physlearn.NeuralNet.NeuralNetPro import NeuralNetPro
#from physlearn.DifferentialEvolution import DifferentialEvolution
from physlearn.Optimizer.NelderMead.NelderMead import NelderMead
import d1_osc
import ann_constructor
import math_util
from visualiser import Visualiser
from numpy.polynomial.hermite import Hermite
from numpy.polynomial.hermite import hermroots, hermfromroots

# Model Parameters
sigmoid_ammount = 25
m = 450 # размер сеток обучения
M = 10 # количество выходных нейронов(базисных функций)
a = -10
b = 10
hidden_ammount = 35
# Сетка для обучения(пока нету оптимизатора, устройчивого к стохастической замене)
train_xi = np.linspace(a, b, m, endpoint=True).reshape(1, m)

#colloc_xi = np.linspace(a/2.0 -1, b/2.0 + 1, M, endpoint=True).reshape(1, M)
#obs_xi = np.linspace(a, b, m, endpoint=True).reshape(1, m) 
%matplotlib inline


D:\Anaconda\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [2]:
def root_of_n_herm(n):
    shape = []
    if n == 0:
        shape = [1]
    else:
        for i in range(n):
            shape.append(0)
        shape.append(1)
    if n!=0:
        return hermroots(shape)
    else:
        return "no roots"

def calc_herm_grid_xi(amount):
    grid = []
    i = 0
    while (amount > len(grid)):
        i = i + 1
        set_of_roots = root_of_n_herm(i)
        for j in range(len(set_of_roots)):
            flag = 0
            for k in range(len(grid)):
                if ((grid[k] - set_of_roots[j]) < 0.1):
                    flag = 1
                    break
                if (flag == 0):
                    grid.append(set_of_roots[j])
    return np.array(grid)

colloc_xi = calc_herm_grid_xi(M).reshape(1, M)


  File "<ipython-input-2-be68b3b93454>", line 22
    for k range(len(grid)):
              ^
SyntaxError: invalid syntax

In [ ]:
colloc_xi

In [ ]:
# ANN
net, net_output, net_sum, sess = ann_constructor.return_deep_net_expressions(M, sigmoid_ammount, hidden_ammount)
# Выражение, определяющеие образ выходов сети при действии гамильтонианом. Task-dependant
first_deriative = tf.gradients(net_output, net.x)[0]
net_images = (-(tf.gradients(first_deriative, net.x)[0]) + tf.multiply(tf.square(net.x), net.output))
net_images_sum = tf.reduce_sum(input_tensor = net_images, axis = 0)

def net_outs_value(x):
    return net.calc(net_output, {net.x : x})
def net_sum_value(x):
    return net.calc(net_sum, {net.x : x})
def net_images_value(x):
    return net.calc(net_images, {net.x: x})
def net_images_sum_value(x):
    return net.calc(net_images_sum, {net.x: x})

dim = net.return_unroll_dim()
print(dim)

In [ ]:
colloc_matrix = net.calc(net_output, {net.x : colloc_xi})
diag_colloc_matrix = np.eye(M)
diag_colloc_matrix.flat[:: M + 1] += -1 + colloc_matrix.diagonal()
normal_colloc_matrix = np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix)
np.fill_diagonal(normal_colloc_matrix, 0)
linear_factor = math_util.norm(normal_colloc_matrix)
print('Colloc matrix: \n', colloc_matrix)
print('Diag of colloc matrix: \n', diag_colloc_matrix)
print('Normal colloc matrix: \n', np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix))
print('Normal colloc matrix - diag: \n', normal_colloc_matrix)
print('Measure of linearity: \n', linear_factor)

In [ ]:
# Линейная регрессия
A = tf.transpose(net_output)
A_T = net_output
y = net_images_sum
y = tf.expand_dims(y, -1)
omega = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(A_T, A)), A_T), y)

# Определение функционала J
regression_fit = tf.matmul(tf.transpose(net_output), omega)
noninvariance_measure = (1/m) * tf.reduce_sum(tf.square(tf.expand_dims(net_images_sum, -1) - regression_fit))
J_expression = noninvariance_measure

def J(params):
    net.roll_matrixes(params)
    colloc_matrix = net.calc(net_output, {net.x : colloc_xi})
    diag_colloc_matrix = np.eye(M)
    diag_colloc_matrix.flat[:: M + 1] += -1 + colloc_matrix.diagonal()
    normal_colloc_matrix = np.matmul(LA.inv(diag_colloc_matrix), colloc_matrix)
    np.fill_diagonal(normal_colloc_matrix, 0)
    linear_factor = math_util.norm(normal_colloc_matrix)
    cost = net.calc(J_expression, {net.x : train_xi}) + linear_factor
    cost += np.sum(np.square(net.run(np.linspace(-7, 7, 2, endpoint=True).reshape(1, 2) )))
    cost += 2*np.sum(np.square(net.run(np.linspace(-8, 8, 2, endpoint=True).reshape(1, 2) )))
    cost += 3*np.sum(np.square(net.run(np.linspace(-11, 11, 2, endpoint=True).reshape(1, 2) )))
    return cost

# Оптимизация
iter_number_de = 10000
#iter_number_nm = 1000000
#opt_de = DifferentialEvolution(amount_of_individuals = 6*dim, end_cond = iter_number_de, f=0.55 )
opt_nm = NelderMead(-2,2)
opt_nm.set_epsilon_and_sd(0.3, 100)

In [ ]:
optimisation_result = opt_nm.optimize(J, dim, 10**8, 10**(-8))
print("J after optimisation: ", J(optimisation_result.x))
net.roll_matrixes(optimisation_result.x)
print("Ебала: ", optimisation_result)

In [ ]:


In [ ]:
print("J after optimisation: ", J(optimisation_result))
net.roll_matrixes(optimisation_result)

In [ ]:
x_obss = np.linspace(-2, 2, 100).reshape(1, 100)
plt.plot(np.sum(x_obss, 0), net_images_value(x_obss)[3,:] )
plt.plot(np.sum(x_obss, 0), net_outs_value(x_obss)[3,:] )

In [ ]:
vis = Visualiser(net, net_output, net_sum, M)
vis.plot_four(np.linspace(-15, 15, m, endpoint=True).reshape(1, m) )

In [ ]:
#outs_matrix = net.calc(net_output, {net.x : train_xi})
#outs_matrix_file = open("outs_matrix1.txt", "wb")
#np.savetxt(outs_matrix_file, outs_matrix, delimiter=' ')
#outs_matrix_file.close()

In [ ]:
number_col = M
#net.roll_matrixes(optimisation_result)
#collocation_grid = rand.random((1,200)) * (np.abs(x_0_max) + np.abs(x_0_min)) - np.abs(x_0_min)
#collocation_grid = np.linspace(x_0_min, x_0_max, number_col, endpoint=True).reshape(1, number_col)
#collocation_grid = np.linspace(a, b, number_col, endpoint=True).reshape(1, number_col)
collocation_grid = calc_herm_grid_xi(number_col)
hamiltonian_matrix = net_images_value(collocation_grid)
bind_matrix = net.run(collocation_grid)
from scipy.linalg import eig
#eigvals, eigvecs = eig(np.matmul(np.transpose(bind_matrix), hamiltonian_matrix), np.matmul(np.transpose(bind_matrix), bind_matrix))
eigvals, eigvecs = eig(hamiltonian_matrix, bind_matrix)
eigvals_norm = np.abs(eigvals)
eigvals_real = np.real(eigvals)
for eigval in eigvals:
    print(eigval)
#print(eigvecs[2,:])

In [ ]:
d1_osc.show_wf(4,train_xi)

In [ ]:
x = np.linspace(0,25,1000)
i = 0

for eigval in eigvals:
    i+=1
    plt.plot(x, eigval+0*x)
    plt.plot(1+i, eigval, 'x')

In [ ]:
x_obs = np.linspace(-5, 5, 100).reshape(1, 100)
for i in range(M):
    y = eigvals[i]*((net.run(x_obs)).transpose()*eigvecs[i,:])
    y = np.sum(y,1)
    y1 = net_images_value(x_obs)[i,:]
    plt.plot(np.sum(x_obs, 0), y)# - y1))

In [ ]:
eigvals[k]

In [ ]:
a = -10

In [ ]:
x_obs = np.linspace(-5, 5, 1000).reshape(1, 1000)
k = 5

y = eigvals[k]*(net.run(x_obs)).transpose()*eigvecs[k,:]
plt.plot(np.sum(x_obs, 0), np.sum(y,1))

h = net_images_value(x_obs).transpose()*eigvecs[k,:]
plt.plot(np.sum(x_obs, 0), np.sum(h,1))

In [ ]: