In [123]:
import numpy as np
import math
from matplotlib import mlab
from matplotlib import pylab as plt
from theano import *
import theano.tensor as T
%matplotlib inline
In [124]:
#Определение Theano-символических объектов:
x = T.fvector('x')
x0 = T.fvector('x0')
s = T.fvector('s')
y = T.fvector('y')
alpha = T.lscalar('alpha')
#Создание Theano-функции определяющей сумму гауссианов:
gaussList, updates = scan(
fn = lambda s, x0, prior_result, x: prior_result + (1/s)*T.exp(-(0.5*s**(-2))*(x - x0)**2),
outputs_info = T.zeros_like(x),
non_sequences = x,
sequences = [s, x0]
)
gaussSummExp = gaussList[-1]
gaussSumm = theano.function(
inputs = [x, s, x0],
outputs = gaussSummExp,
allow_input_downcast = True
)
#Изначальные параметры:
x_Min = -15
x_Max = 15
s_Original = np.array([1, 0.5, 1])
x0_Original = np.array([-5, 0, 5])
#Оригинальные данные:
x_Original = np.linspace(x_Min, x_Max, 10000, endpoint = False)
y_Original = gaussSumm(x_Original, s_Original, x0_Original)
plt.plot (x_Original, y_Original,'green')
Out[124]:
In [125]:
# Training set:
m_Train = 200
m = T.lscalar('m')
x_Train = np.linspace(x_Min, x_Max, m_Train, endpoint = False)
y_Train = gaussSumm(x_Train, s_Original, x0_Original)
plt.plot(x_Train, y_Train, 'x')
Out[125]:
In [126]:
def plotFour(omg, yOrig, yPred, I, J):
error = abs(yOrig - yPred)
fig = plt.figure()
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1 = plt.subplot(221)
ax1.set_title('Original Function:')
ax1.plot(omg, yOrig, 'blue')
ax2 = plt.subplot(222)
ax2.set_title('Fitted gaussComp() Function:')
ax2.plot(omg, yPred,'red')
ax3 = plt.subplot(223)
ax3.set_title('Cost Function:')
ax3.plot(I, J)
ax4 = plt.subplot(224)
ax4.set_title('Error:')
ax4.plot(omg, error)
fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)
In [127]:
#Формирование выражения для ценовой функции:
jExprs = T.sum((gaussSummExp-y)**2)/(2*m)
J = theano.function(
inputs = [x, y, m, s, x0],
outputs = jExprs,
allow_input_downcast = True
)
#Построение градиента ценовой цункции:
g_s = T.grad(jExprs, s)
g_x0 = T.grad(jExprs, x0)
initial_s = np.array([0.1, 2., 0.7])
initial_x0 = np.array([-6., 1., 6.])
#Определим оптимизируемые shared-параметры суммы гауссианов:
#nG = theano.shared(len(s_Original))
s_t = theano.shared(initial_s)
x0_t = theano.shared(initial_x0)
updates = [(s_t, s_t - alpha * g_s),
(x0_t, x0_t - alpha * g_x0)]
In [128]:
x_T = np.linspace(x_Min, x_Max, 100000, endpoint = False)
y_T = gaussSumm(x_T, s_t.get_value(), x0_t.get_value())
plt.plot(x_T, y_T,)
Out[128]:
In [129]:
#Theano-функция, обновляющая параметры модели.
trainBatch = theano.function(
inputs = [x, y, m, alpha, s, x0],
outputs = jExprs,
updates = updates,
allow_input_downcast = True
)
In [130]:
def batch_Descent(x_Train, y_Train, N, alpha_batch, initial_s, initial_x0):
s_t.set_value(initial_s)
x0_t.set_value(initial_x0)
J_l = []
I = np.linspace(0, N-1, N)
for i in range(N):
J_l.append(trainBatch(x_Train, y_Train, len(x_Train), alpha_batch, s_t.get_value(), x0_t.get_value()))
y_Pred = gaussSumm(x_Original, s_t.get_value(), x0_t.get_value())
plotFour(x_Original, y_Original, y_Pred, I, J_l)
return [s_t.get_value(), x0_t.get_value()]
In [131]:
initial_s = np.array([0.1, 2., 0.7])
initial_x0 = np.array([-6., 1., 6.])
N_B = 1000
alpha_batch = 1
prmB = batch_Descent(x_Train, y_Train, N_B, alpha_batch, initial_s, initial_x0)
print(prmB)
In [ ]:
In [ ]: