In [513]:
import math
import random
import numpy as np
from matplotlib import mlab
from matplotlib import pylab as plt
%matplotlib inline
#Функции вычисления значения гауссиана и композиции гауссиана в наборе точек при заданных параметрах, возвращает Array из NumPy:
def gaussian(omg, prm):
return (1/(prm[0]*math.sqrt(2*math.pi)))*np.exp(-(omg-prm[1])**2/(2*prm[0]**2))
def gaussComp(omg, prm):
a = np.zeros(omg.size)
for gauss in prm:
a = a + gaussian(omg, gauss)
return a
#Изначальные параметры:
omgMin = -10
omgMax = 40
paramsOriginal = [[1, 0], [1.5,20], [3, 10]]
#Строим эталонный набор данных:
omgOriginal = np.linspace(omgMin, omgMax, 100000, endpoint = False)
yOriginal = gaussComp(omgOriginal, paramsOriginal)
plt.plot (omgOriginal, yOriginal,'green')
Out[513]:
In [514]:
#Строим тренировочный набор данных:
m = 150
omgTrain = np.linspace(omgMin, omgMax, m, endpoint = False)
yTrain = gaussComp(omgTrain, paramsOriginal)
plt.plot(omgTrain,yTrain, 'x')
Out[514]:
In [515]:
#Вычисление весовых функций и градиентов:
#Batch
def JB(omg, y, prm):
Jv = (0.5/len(omg))*(gaussComp(omg, prm) - y)**2
return np.sum(Jv)
def gradientB(omg, y, prm):
prm = prm.tolist()
grad = np.zeros((len(prm), 2))
j = 0
for gauss in prm:
grad[j][0] = (1/m)*np.sum((gaussComp(omg, prm)-y)*((-1/gauss[0])*gaussian(omg, gauss) + gaussian(omg, gauss)*((omg-gauss[1])**2/gauss[0]**3)))
grad[j][1] = (1/m)*np.sum((gaussComp(omg, prm)-y)*gaussian(omg, gauss)*(omg-gauss[1])*(1/(gauss[0]**2)))
j = j + 1
return grad
#Stochastic
def JS(omg, y, prm):
Jv = (1/2)*(gaussComp(omg, prm) - y)**2
return np.sum(Jv)
def gradientS(omg, y, prm):
grad = np.zeros((len(prm), 2))
j = 0
for gauss in prm:
grad[j][0] = np.sum((gaussComp(omg, prm)-y)*((-1/gauss[0])*gaussian(omg, gauss) + gaussian(omg, gauss)*((omg-gauss[1])**2/gauss[0]**3)))
grad[j][1] = np.sum((gaussComp(omg, prm)-y)*gaussian(omg, gauss)*(omg-gauss[1])*(1/(gauss[0]**2)))
j = j + 1
return grad
In [516]:
def plotFour(omg, yOrig, yPred, I, J):
error = abs(yOrig - yPred)
fig = plt.figure()
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1 = plt.subplot(221)
ax1.set_title('Original Function:')
ax1.plot(omg, yOrig, 'blue')
ax2 = plt.subplot(222)
ax2.set_title('Fitted gaussComp() Function:')
ax2.plot(omg, yPred,'red')
ax3 = plt.subplot(223)
ax3.set_title('Cost Function:')
ax3.plot(I, J)
ax4 = plt.subplot(224)
ax4.set_title('Error:')
ax4.plot(omg, error)
fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)
In [517]:
N = 70000
alpha = 1
initialParams = [[2,-1], [1, 25], [5, 12]]
In [518]:
#Batch Gradient Descent
paramsB = np.array(initialParams)
Jb = []
I = []
for i in range(N):
I.append(i)
Jb.append(JB(omgTrain, yTrain, paramsB))
paramsB = paramsB - alpha*gradientB(omgTrain, yTrain, paramsB)
print(paramsB)
#Построим графики:
yBatch = gaussComp(omgOriginal, paramsB.tolist())
errorB = yOriginal - yBatch
plotFour(omgOriginal, yOriginal, yBatch, I, Jb)
In [519]:
#N = 25000
#alpha = 2
#initialParams = [[2,-1], [1, 25]]
In [520]:
#Stochastic Gradient Descent
paramsS = np.array(initialParams)
Js = []
Is = []
for i in range(N):
K = random.randint(0, omgTrain.size - 1)
omgK = np.array(omgTrain[K])
yK = np.array(yTrain[K])
Is.append(i)
Js.append(JB(omgTrain, yTrain, paramsS))
paramsS = paramsS - alpha*gradientS(omgK, yK, paramsS)
print(paramsS)
#Графики:
yStochastic = gaussComp(omgOriginal, paramsS.tolist())
plotFour(omgOriginal, yOriginal, yStochastic, Is, Js)
In [521]:
#Minibatch Gradient Descent
In [522]:
fig = plt.figure()
fig, ax1 = plt.subplots(1,1)
ax1 = plt.subplot(111)
ax1.set_title('All methods and original:')
ax1.plot(omgOriginal, yOriginal, 'b--', )
ax1.plot(omgOriginal, yBatch, 'g')
ax1.plot(omgOriginal, yStochastic, 'r')
fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)