In [1]:
# Необходмые команды импорта.
import sys
#import os
sys.path.append('../physlearn/')
sys.path.append('../source')
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
from matplotlib import pylab as plt
from IPython.display import clear_output
from physlearn.NeuralNet.NeuralNet import NeuralNet
from physlearn.Optimizer.NelderMead.NelderMead import NelderMead
from ann_solver import AnnSolver
import d1_osc
import ann_constructor
import math_util
from visualiser import Visualiser
# Model Parameters
n_hid1 = 15
m = 450 # размер сеток обучения
M = 6 # количество выходных нейронов(базисных функций)
a = -10
b = 10
n_hid2 = 15
%matplotlib inline
# ANN
net, net_output, net_sum, sess = ann_constructor.return_separated_deep_net_expressions(M, n_hid1, n_hid2)
# Выражение, определяющеие образ выходов сети при действии гамильтонианом. Task-dependant
dim = net.return_unroll_dim()
print(dim)
herm_grid = [0.0, -0.70710678, 1.22474487e+00, -1.65068012, 9.58572465e-01, 2.02018287e+00]
herm_grid = np.array(herm_grid)
solver = AnnSolver(net, M, 'gaus')
solver.define_approximation_grid(a, b, m)
#herm_grid = math_util.root_of_n_hermite(5)
solver.set_linearity_grid(herm_grid)
solver.compile()
J = solver.get_cost_func()
trial_func = solver.get_trial_func()
func_sum = tf.reduce_sum(input_tensor=trial_func, axis=0)
images = solver.get_images()
images_sum = tf.reduce_sum(input_tensor=images, axis=0)
# Оптимизация
opt_nm = NelderMead(-25,25)
opt_nm.set_epsilon_and_sd(0.3, 100)
def opt(J, dim, n_it, eps):
optimisation_result = opt_nm.optimize(J, dim+1, n_it, eps)
return optimisation_result
In [2]:
xi_obs = np.linspace(a, b, 1000, endpoint=True)
d1_osc.show_wf(15, xi_obs.reshape(1, 1000))
In [3]:
print(herm_grid)
plt.grid(True)
plt.plot(herm_grid, np.zeros(6), 'x')
Out[3]:
In [ ]:
optimisation_result = opt(J, dim, int(9e6), 1e-3)
print("J after optimisation: ", J(optimisation_result.x))
print("Информация: ", optimisation_result)
In [ ]:
xi_obs = np.linspace(a, b, 1000, endpoint=True)
vis = Visualiser(solver)
vis.plot_four(xi_obs.reshape(1, 1000))
In [ ]:
#y1 = net.calc(trial_func, {net.x : xi_obs.reshape(1, 1000)})
#for i in range(M):
# func_i = y1[i,:]
# plt.plot(xi_obs.reshape(1, 1000)[0], func_i)
In [ ]:
from scipy.linalg import eig
#coll_grid = np.linspace(-2,2,M).reshape(1, M)
ham_matrix = net.calc(images, {net.x:herm_grid.reshape(1, M)})
ham_matrix = ham_matrix.transpose()
print(ham_matrix.shape)
bind_matrix = net.calc(trial_func, {net.x:herm_grid.reshape(1, M)})
bind_matrix = bind_matrix.transpose()
print(bind_matrix.shape)
A = np.linalg.inv(bind_matrix)
eigvals, eigvecs = eig(np.matmul(A, ham_matrix))
print(eigvals)
xi_obs_small = np.linspace(-4, 4, 1000, endpoint=True)
func_matrix = net.calc(trial_func, {net.x:xi_obs_small.reshape(1, 1000)})
func_matrix = func_matrix.transpose()
for i in range(M):
y = np.matmul(func_matrix, eigvecs[:,i])
plt.plot(xi_obs_small, y)
In [ ]:
def calc_i_eigfunc(i, x_in):
func_matrix = net.calc(trial_func, {net.x:x_in.reshape(1, 1000)})
return np.matmul(np.transpose(func_matrix), eigvecs[:,i])
def show_i_eigfunc(i, x_in):
plt.plot(x_in, calc_i_eigfunc(i, x_in))
In [ ]:
d1_osc.show_wf(2, xi_obs_small.reshape(1, 1000))
show_i_eigfunc(0, xi_obs_small)
In [ ]:
#my_file = open("some2.txt", "w")
In [ ]:
#my_file = open("some.txt", "w")
#optimisation_result.x.tofile("rex.txt")
#my_file.close()