In [1]:
# Необходмые команды импорта.
import sys
#import os
sys.path.append('../physlearn/')
sys.path.append('../source')
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
from matplotlib import pylab as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
import numpy.random as rand
from physlearn.NeuralNet.NeuralNet import NeuralNet
from physlearn.Optimizer.NelderMead.NelderMead import NelderMead
import d1_osc
import ann_constructor
import math_util
from visualiser import Visualiser
from scipy.integrate import quad

# Model Parameters
sigmoid_ammount = 10
m = 200 # размер сеток обучения
a = -6
b = 6

# Сетка для обучения(пока нету оптимизатора, устройчивого к стохастической замене)
train_xi = np.linspace(a, b, m, endpoint = True).reshape(1, m) 

%matplotlib inline

In [2]:
#beta
beta = tf.placeholder(tf.float64, shape = ())

#ANN

net, net_output, net_sum, sess = ann_constructor.return_net_expressions(1, sigmoid_ammount)

potential = tf.square(net.x)

trial_func = tf.exp(-tf.abs(beta)*tf.square(net.x))*net_output

def hamiltonian(func, V):
    first_deriative = tf.gradients(func, net.x)[0]
    res = (-(tf.gradients(first_deriative, net.x)[0]) + tf.multiply(V, func))
    return res

hamilt_image = hamiltonian(trial_func, potential)

func_norm_under_integral = tf.square(trial_func)
mean_energy_under_integral = tf.multiply(trial_func, hamilt_image)

def calc_image(x, beta_loc):
    return net.calc(hamilt_image, {net.x : x, beta : beta_loc})[0]

def calc_func(x, beta_loc):
    return net.calc(trial_func, {net.x : x, beta : beta_loc})[0]

dim = net.return_unroll_dim()
print(dim)


WARNING:tensorflow:From C:\Users\Mikhail\Anaconda3\lib\site-packages\tensorflow\python\ops\math_grad.py:80: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
WARNING:tensorflow:From C:\Users\Mikhail\Anaconda3\lib\site-packages\tensorflow\python\ops\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
31

In [3]:
#epsilon and norm integrals
def mean_energy_integrand(x, beta_loc):
    x = np.reshape(x, (1, np.size(x)))
    return net.calc(mean_energy_under_integral, {net.x : x, beta : beta_loc})

def norm_integrand(x, beta_loc):
    x = np.reshape(x, (1, np.size(x)))
    return net.calc(func_norm_under_integral, {net.x : x, beta : beta_loc})

#quad
def norm(beta_l):
    res, _ = quad(norm_integrand, a, b, args=(beta_l))
    return res

def epsilon_norm(beta_l):
    mean_energy, _ = quad(mean_energy_integrand, a, b, args=(beta_l))
    norm_l = norm(beta_l)
    eps = mean_energy / norm_l
    return eps, norm_l

# Monte Carlo
def norm_MC(beta_l, x):
    return math_util.monte_carlo(norm_integrand, (x, beta_l))

def epsilon_MC(beta_l, x):
    mean_E = math_util.monte_carlo(mean_energy_integrand, (x, beta_l))
    norm_l = norm_MC(beta_l, x)
    eps = mean_E / norm_l
    return eps

In [4]:
eps_quad, norm_quad = epsilon_norm(0.5)
norm_monte_carlo = norm_MC(0.5, train_xi)
eps_monte_carlo = epsilon_MC(0.5, train_xi)

print('Energy:')
print('Quad: ', eps_quad, 'Monte Carlo over 150 eqdist points: ', eps_monte_carlo, 'Error: ', np.abs(eps_monte_carlo - eps_quad))
print('Norm:')
print('Quad: ', norm_quad, 'Monte Carlo over 150 eqdist points: ', norm_monte_carlo, 'Error: ', np.abs(norm_monte_carlo - norm_quad))


Energy:
Quad:  1.0423938848805914 Monte Carlo over 150 eqdist points:  1.042393884880591 Error:  4.440892098500626e-16
Norm:
Quad:  1.989029726470721 Monte Carlo over 150 eqdist points:  1.9790845778383679 Error:  0.009945148632353185

In [5]:
def J(params):
    net.roll_matrixes(params[0:-1])
    beta_loc = params[-1]
    func_array = calc_func(train_xi, beta_loc)
    image_array = calc_image(train_xi, beta_loc)
    # QUAD: eps_l, norm_l = epsilon_norm(beta_loc)
    eps_l = epsilon_MC(beta_loc, train_xi)
    norm_l = norm_MC(beta_loc, train_xi)
    non_prop = np.sum(np.square(image_array - eps_l*func_array))
    cost = non_prop / norm_l + (norm_l-1)**2
    return cost

In [6]:
#J(np.random.rand(dim+1))
# Оптимизация
#iter_number = 15000
#opt = NelderMead(end_cond = iter_number)

opt_nm = NelderMead(-2.5,2.5)
opt_nm.set_epsilon_and_sd(0.3, 100)
def opt(J, dim, n_it, eps):
    optimisation_result = opt_nm.optimize(J, dim+1, n_it, eps)
    return optimisation_result

#opt = DifferentialEvolution(amount_of_individuals = 6*dim, end_cond = iter_number, f = 0.65 )

In [7]:
optimisation_result = opt(J, dim, int(3e4), 1e-7)
print("J after optimisation: ", J(optimisation_result.x))
print("Информация: ", optimisation_result)


.    1154 (3%) 263.285 it\s
J after optimisation:  9.67445053644283e-08
Информация:  Is converge: True
Amount of iterations: 1154
Total time: 4.48 s
Reached function value: 9.67445053644283e-08
Reason of break: Minimum cost reached


In [8]:
eps_l, norm_l = epsilon_norm(optimisation_result.x[-1])
print(eps_l, ' ', norm_l)
print(np.abs(eps_l-1)/1)
print(optimisation_result.x[-1])


1.0000000005560346   1.0048705344880884
5.560345517352516e-10
0.5006575652628882

In [9]:
func_array = np.abs(calc_func(train_xi, 0.5))
image_array = calc_image(train_xi, 0.5)
plt.title('Difference between trial function and real:')
plt.plot(train_xi[0], func_array)
#plt.plot(train_xi[0], image_array)
#plt.plot(train_xi[0], d1_osc.wf(0, train_xi))
Visualiser.show_wf(0, train_xi)
plt.plot(train_xi[0], func_array - d1_osc.wf(0, train_xi), 'g--')


Out[9]:
[<matplotlib.lines.Line2D at 0x1bb4b24aa58>]

In [10]:
data = func_array - d1_osc.wf(0, train_xi)
np.sqrt(math_util.variance(data))


Out[10]:
0.0006464290855317998