In [1]:
# Необходмые команды импорта.
import sys, os
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
from matplotlib import pylab as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output
import numpy.random as rand
from DifferentialEvolution import DifferentialEvolution
from NelderMead import NelderMead

sys.path.append(os.path.join(sys.path[0], '../../'))
from physlearn.NeuralNet.NeuralNetPro import NeuralNetPro
#from physlearn.DifferentialEvolution import DifferentialEvolution
#from physlearn.NelderMead import NelderMead
%matplotlib inline


C:\Work\Programms\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [2]:
# Task parameters
mass = 1
omega_square = 1
# Model Parameters
sigmoid_ammount = 10
m = 650 # Размер сеток обучения
N = 1 # Количество выходных нейронов(базисных функций)
x_0_min = -6
x_0_max = 6
# На деле подразумеваются кси, а не икс, так как решается обезразмеренное уравнение
#np.linspace(x_0_min, x_0_max, m, endpoint=True).reshape(1, m) 

print(np.linspace(x_0_min, x_0_max , 2, endpoint=True).reshape(1, 2))


[[-6.  6.]]

In [3]:
# Построение сети
net = NeuralNetPro(-2,2)
net.add_input_layer(1)
net.add(sigmoid_ammount, tf.sigmoid)
net.add_output_layer(N, net.linear)
net.compile()

net.set_random_matrixes()

# "Вытаскивание" выражений для выходов и свертки выходов в 
net_output = net.return_graph()
net_conv = tf.reduce_sum(input_tensor = net_output, axis = 0)
sess = net.return_session()

def net_conv_value(x):
    return net.calc(net_conv, {net.x : x})

dim = net.return_unroll_dim()
print(dim)


31

In [4]:
# Выражение, определяющеие образ выходов сети при действии гамильтонианом.

first_deriative = tf.gradients(net_output, net.x)[0]

net_images =( -(tf.gradients(first_deriative, net.x)[0]) 
             + tf.multiply(tf.square(net.x), net.output) )

net_images_conv = tf.reduce_sum(input_tensor = net_images, axis = 0)

def net_images_value(x):
    return net.calc(net_images, {net.x: x})

def net_images_conv_value(x):
    return net.calc(net_images_conv, {net.x: x})

# Линейная регрессия
A = tf.transpose(net_output)
A_T = net_output
y = net_images_conv
y = tf.expand_dims(y, -1)
omega = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(A_T, A)), A_T), y)

In [5]:
#x_for_test = np.linspace(x_0_min, x_0_max, 500, endpoint=True).reshape(1, 500) 
#im = net.calc(tf.expand_dims(net_images_conv, -1), {net.x : x_for_test})
#o = net.calc(omega, {net.x : x_for_test})
#outs = net.calc(net_output, {net.x : x_for_test})
#U = np.transpose(outs)
#pred = np.matmul(U,o)
#epsilon = im - pred
#print(epsilon.shape)

In [6]:
def stochstic_grid1():
    #grid = rand.random((1,m)) * (np.abs(x_0_max) + np.abs(x_0_min)) - np.abs(x_0_min)
    grid = np.linspace(x_0_min, x_0_max, m, endpoint=True).reshape(1, m) 
    return grid

In [7]:
# Выражение для ценовой функции:
regression_fit = tf.matmul(tf.transpose(net_output), omega)
noninvariance_measure = (1/m) * tf.reduce_sum(tf.square(tf.expand_dims(net_images_conv, -1) - regression_fit))
J_expression = noninvariance_measure
'''
def J(params, grid):
    net.roll_matrixes(params)    
    cost = net.calc(J_expression, {net.x : grid})
    cost += np.sum(np.square(net.run(np.linspace(x_0_min - 2, x_0_max + 2 , 2, endpoint=True).reshape(1, 2) )))
    cost += np.sum(np.square(net.run(np.linspace(x_0_min - 3, x_0_max + 3 , 2, endpoint=True).reshape(1, 2) )))
    return cost

print(J(np.random.uniform(-5, 5, dim), stochstic_grid1()))
'''
grid = stochstic_grid1()
def J(params):
    net.roll_matrixes(params)    
    cost = 15*net.calc(J_expression, {net.x : grid})
    #cost += np.sum(np.square(net.run(np.linspace(x_0_min - 4, x_0_max + 4 , 2, endpoint=True).reshape(1, 2) )))
    #cost += 3*np.sum(np.square(net.run(np.linspace(x_0_min - 5, x_0_max + 5 , 2, endpoint=True).reshape(1, 2) )))
    #cost += 5*np.sum(np.square(net.run(np.linspace(x_0_min - 7, x_0_max + 7 , 2, endpoint=True).reshape(1, 2) )))
    return cost

In [8]:
np.sum(np.square(net.run(np.linspace(x_0_min, x_0_max , 2, endpoint=True).reshape(1, 2) )))


Out[8]:
40.72561458837492

In [ ]:
iter_number = 250000
#opt = DifferentialEvolution(dim, iter_number, min_element = -5, max_element = 5)
opt = NelderMead(iter_number, min_element=-5, max_element=5)

In [ ]:
#optimisation_result = opt.optimize_stochastic(func=J, grid_func = stochstic_grid1, dim=dim)
optimisation_result = opt.optimize(func=J, dim=dim)


 27%|████████████████████▍                                                      | 67951/250000 [18:48<50:22, 60.24it/s]

In [ ]:
cost_list = opt.return_cost_list()
i_list= np.linspace(0, len(cost_list), len(cost_list))
plt.plot(i_list, np.power(cost_list, -1))

In [ ]:
_, _, best_index = opt.find_points()
optimisation_result = opt.x_points[best_index]



#this_grid = stochstic_grid1()
#print(J(optimisation_result, this_grid))
#print(J(optimisation_result, this_grid))#, this_grid)

In [ ]:
J(optimisation_result)

In [ ]:
number_col = 10
net.roll_matrixes(optimisation_result)
#collocation_grid = rand.random((1,200)) * (np.abs(x_0_max) + np.abs(x_0_min)) - np.abs(x_0_min)
#collocation_grid = np.linspace(x_0_min, x_0_max, number_col, endpoint=True).reshape(1, number_col)
collocation_grid = np.linspace(x_0_min, x_0_max, N, endpoint=True).reshape(1, N)
hamiltonian_matrix = net_images_value(collocation_grid)
bind_matrix = net.run(collocation_grid)

In [ ]:
from scipy.linalg import eig
#eigvals, eigvecs = eig(np.matmul(np.transpose(bind_matrix), hamiltonian_matrix), np.matmul(np.transpose(bind_matrix), bind_matrix))
eigvals, eigvecs = eig(hamiltonian_matrix, bind_matrix)
eigvals_norm = np.abs(eigvals)
eigvals_real = np.real(eigvals)

In [ ]:
for eigval in eigvals:
    print(eigval)
#print(eigvecs[2,:])

In [ ]:
x = np.linspace(0,10,1000)
i = 0

for eigval in eigvals:
    i+=1
    plt.plot(x, eigval+0*x)
    plt.plot(1+i, eigval, 'x')

In [ ]:
#eigvecs[i,:]
#net_images_value(np.linspace(x_0_min-1, x_0_max+1, 4).reshape(1, 4)).transpose()

In [ ]:
x_obs = np.linspace(x_0_min-1, x_0_max+1, 1000).reshape(1, 1000)
for i in range(N):
    y = (net.run(x_obs)).transpose()*eigvecs[i,:]
    y = np.sum(y,1)
    plt.plot(np.linspace(x_0_min, x_0_max, 1000), y)

In [ ]:
#Функции визуализации:
x_observe = np.linspace(-7,7,1000).reshape(1, 1000)
#---------------------------------------------------------------------------
def show_outputs_conv(x):
    y = net_conv_value(x)
    
    plt.title('Output:')
    plt.grid(True)
    plt.plot(x[0], y)
    
#---------------------------------------------------------------------------
def show_outputs(x):
    y = net.run(x)
    plt.title('Outputs:')
    plt.grid(True)
    for i in range(N):
        net_i = y[i,:] 
        plt.plot(x[0], net_i)
#---------------------------------------------------------------------------
def show_images(x):
    y = net_images_value(x)
    plt.title('Images:')
    plt.grid(True)
    for i in range(N):
        net_image_i = y[i,:] 
        plt.plot(x[0], net_image_i)
#---------------------------------------------------------------------------
def show_images_conv(x):
    y = net_images_conv_value(x)
    
    plt.title('Output:')
    plt.grid(True)
    plt.plot(x[0], y)    
#---------------------------------------------------------------------------
def plot_four(x):
    y1 = net.run(x)
    y2 = net_images_value(x)
    y3 = net_conv_value(x)
    y4 = net_images_conv_value(x)
    
    fig = plt.figure()
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)

    ax1 = plt.subplot(221)
    ax1.set_title('Original net outputs convolution:')
    ax1.plot(x[0], y3, 'x')

    ax2 = plt.subplot(222)
    ax2.set_title('Image of net outputs convolution:')
    ax2.plot(x[0], y4, 'x') 

    ax3 = plt.subplot(223)
    ax3.set_title('Original net outputs:')
    for i in range(N):
        net_i = y1[i,:] 
        ax3.plot(x[0], net_i)
        
    ax4 = plt.subplot(224)
    ax4.set_title('Image of net outputs:')
    for i in range(N):
        net_image_i = y2[i,:] 
        ax4.plot(x[0], net_image_i)
        
    fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)
#---------------------------------------------------------------------------

In [ ]:
plot_four(np.linspace(-20,20,1000).reshape(1, 1000))