In [1]:
# Необходмые команды импорта.
import sys, os
import numpy as np
import tensorflow as tf
from matplotlib import pylab as plt
from tqdm import tqdm_notebook
from IPython.display import clear_output

sys.path.append(os.path.join(sys.path[0], '../../'))
from physlearn.NeuralNet.NeuralNetPro import NeuralNetPro
from physlearn.Optimizer import NelderMead
%matplotlib inline

In [2]:
# Model Parameters
mass = 1
omega_square = 1
plank = 1
sigmoid_ammount = 15
m = 20 # Количество точек коллокаций
N = 7 # Количество выходных нейронов сети
x_0_min = -10
x_0_max = 10

x_collocations = np.linspace(x_0_min, x_0_max, m, endpoint=True).reshape(1, m)

In [3]:
# Построение сети
net = NeuralNetPro(-2,2)
net.add_input_layer(1)
net.add(sigmoid_ammount, tf.sigmoid)
net.add_output_layer(N, net.linear)
net.compile()

net.set_random_matrixes()

# "Вытаскивание" выражений для выходов и свертки выходов в 
net_output = net.return_graph()
net_conv = tf.reduce_sum(net_output, 0)
sess = net.return_session()

def net_conv_value(x):
    return net.calc(net_conv, {net.x : x})

dim = net.return_unroll_dim()
print(dim)


142

In [4]:
# Выражение, определяющеие образ выходов сети при действии гамильтонианом.

first_deriative = tf.gradients(net_output, net.x)[0]

net_images =( -(plank*plank/(2*mass))*(tf.gradients(first_deriative, net.x)[0]) 
             + (omega_square/2)*tf.multiply(tf.square(net.x), net.output) )

net_images_conv = tf.reduce_sum(net_images, 0)

def net_images_value(x):
    return net.calc(net_images, {net.x: x})

def net_images_conv_value(x):
    return net.calc(net_images_conv, {net.x: x})

# Линейная регрессия для аппроксимации сетью её образа.
#regression_parameters = numpy.random.uniform(-10, 10, N)

alpha = 1E-1
#numpy.random.uniform(-10, 10, N)
regression_parameters = tf.get_variable(name = 'regr_param',initializer = tf.random_uniform(shape=[N, 1], dtype=tf.double, minval = -2, maxval = 2))

regression_cost = tf.reduce_sum(tf.square(net_images_conv - tf.reduce_sum((regression_parameters * net_output), 0))) * (1 / m)
regression_grad = tf.gradients(regression_cost, regression_parameters)[0]
update_regression = regression_parameters.assign_sub(alpha*regression_grad) 

regression_reset = regression_parameters.assign(np.random.uniform(-10, 10, N).reshape((N,1)))

def regression(iters, x):
    for i in range(iters):
        net.calc(update_regression, {net.x: x})
    #print('regr_cost: ', net.calc(regression_cost, {net.x: x}))
    return sess.run(regression_parameters)

init = tf.global_variables_initializer()
sess.run(init)

In [5]:
# Выражение для ценовой функции:
#J_expression = (1/m) * tf.reduce_sum(tf.square(net_images_conv - tf.reduce_sum(tf.multiply(regression_parameters,net_output), 0)))

def J(params):
    
    #x = np.linspace(x_0_min, x_0_max, m, endpoint=True).reshape(1, 20)
    net.roll_matrixes(params)
    regression(3000, x_collocations)
    cost = net.calc(regression_cost, {net.x: x_collocations})
    #cost = net.calc(J_expression, {net.x : x_collocations})
    sess.run(regression_reset)
    return cost

In [7]:
resss = NelderMead.optimize(J, dim, 10, end_method='max_iter', min_element=-1, max_element=1)


  0%|          | 0/10 [00:00<?, ?it/s]
100%|██████████| 10/10 [00:49<00:00,  4.31s/it]

In [8]:
J(resss)


Out[8]:
13.548736636588913

In [ ]:
#param_0 = np.random.uniform(-1, 1, dim)
#bla = net_images_conv - tf.reduce_sum((regression_parameters * net_output), 0)
#bla = net_images_conv - tf.reduce_sum((regression_parameters * net_output), 0)
#bla = tf.square(net_images_conv - tf.reduce_sum((regression_parameters * net_output), 0))

#net.calc(bla, {net.x: x_collocations})

#J(param_0)

#bla2 = net_output
#bla1 = regression_parameters
#bla = tf.reduce_sum(tf.multiply(bla1, bla2),0)

#bla = tf.reduce_sum(tf.square(net_images_conv - tf.reduce_sum(tf.multiply(regression_parameters,net_output), 0)))



#resss = NelderMead.optimize(J, dim, 30000, end_method='max_iter', min_element=-1, max_element=1)
#alpha = 1E-1
#regression(100000, x_collocations)

In [ ]:
#print(net.calc(bla  ,{net.x:x_collocations}))

In [ ]:


In [10]:
#Функции визуализации:
x_observe = np.linspace(-10,10,1000).reshape(1, 1000)
#---------------------------------------------------------------------------
def show_outputs_conv(x):
    y = net_conv_value(x)
    
    plt.title('Output:')
    plt.grid(True)
    plt.plot(x[0], y)
    
#---------------------------------------------------------------------------
def show_outputs(x):
    y = net.run(x)
    plt.title('Outputs:')
    plt.grid(True)
    for i in range(N):
        net_i = y[i,:] 
        plt.plot(x[0], net_i)
#---------------------------------------------------------------------------
def show_images(x):
    y = net_images_value(x)
    plt.title('Images:')
    plt.grid(True)
    for i in range(N):
        net_image_i = y[i,:] 
        plt.plot(x[0], net_image_i)
#---------------------------------------------------------------------------
def show_images_conv(x):
    y = net_images_conv_value(x)
    
    plt.title('Output:')
    plt.grid(True)
    plt.plot(x[0], y)    
#---------------------------------------------------------------------------
def plot_four(x):
    y1 = net.run(x)
    y2 = net_images_value(x)
    y3 = net_conv_value(x)
    y4 = net_images_conv_value(x)
    
    fig = plt.figure()
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)

    ax1 = plt.subplot(221)
    ax1.set_title('Original net outputs convolution:')
    ax1.plot(x[0], y3, 'blue')

    ax2 = plt.subplot(222)
    ax2.set_title('Image of net outputs convolution:')
    ax2.plot(x[0], y4,'red') 

    ax3 = plt.subplot(223)
    ax3.set_title('Original net outputs:')
    for i in range(N):
        net_i = y1[i,:] 
        ax3.plot(x[0], net_i)
        
    ax4 = plt.subplot(224)
    ax4.set_title('Image of net outputs:')
    for i in range(N):
        net_image_i = y2[i,:] 
        ax4.plot(x[0], net_image_i)
        
    fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)
#---------------------------------------------------------------------------

In [11]:
plot_four(np.linspace(-5,5,1000).reshape(1, 1000))


<Figure size 432x288 with 0 Axes>

In [ ]:


In [ ]:


In [ ]:
#with tf.Session() as sess:
#    writer = tf.summary.FileWriter("output", sess.graph)
#    writer.close()