In [5]:
import math
import random
import numpy as np
from matplotlib import mlab
from matplotlib import pylab as plt
%matplotlib inline
#Функции вычисления значения гауссиана и композиции гауссиана в наборе точек при заданных параметрах, возвращает Array из NumPy:
def gaussian(omg, prm):
return (1/(prm[0]*math.sqrt(2*math.pi)))*np.exp(-(omg-prm[1])**2/(2*prm[0]**2))
def gaussComp(omg, prm):
a = np.zeros(omg.size)
for gauss in prm:
a = a + gaussian(omg, gauss)
return a
#Изначальные параметры:
omgMin = -15
omgMax = 25
paramsOriginal = [[2, 5]]
#Строим эталонный набор данных:
omgOriginal = np.linspace(omgMin, omgMax, 100000, endpoint = False)
yOriginal = gaussComp(omgOriginal, paramsOriginal)
plt.plot (omgOriginal, yOriginal,'green')
Out[5]:
In [6]:
#Строим тренировочный набор данных:
m = 180
omgTrain = np.linspace(omgMin, omgMax, m, endpoint = False)
yTrain = gaussComp(omgTrain, paramsOriginal)
plt.plot(omgTrain,yTrain, 'x')
Out[6]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [7]:
#Вычисление весовых функций и градиентов:
#Batch
def JB(omg, y, prm):
Jv = (0.5/len(omg))*(gaussComp(omg, prm) - y)**2
return np.sum(Jv)
def gradientB(omg, y, prm):
prm = prm.tolist()
grad = np.zeros((len(prm), 2))
gc = gaussComp(omg, prm)
j = 0
for gauss in prm:
g = gaussian(omg, gauss)
grad[j][0] = (1/len(omg))*np.sum((gc-y)*((-1/gauss[0])*g + g*((omg-gauss[1])**2/gauss[0]**3)))
grad[j][1] = (1/len(omg))*np.sum((gc-y)*g*(omg-gauss[1])*(1/(gauss[0]**2)))
j = j + 1
return grad
#Stochastic
def JS(omg, y, prm):
Jv = (1/2)*(gaussComp(omg, prm) - y)**2
return np.sum(Jv)
def gradientS(omg, y, prm):
grad = np.zeros((len(prm), 2))
gc = gaussComp(omg, prm)
j = 0
for gauss in prm:
g = gaussian(omg, gauss)
grad[j][0] = np.sum((gc-y)*((-1/gauss[0])*g + g*((omg-gauss[1])**2/gauss[0]**3)))
grad[j][1] = np.sum((gc-y)*g*(omg-gauss[1])*(1/(gauss[0]**2)))
j = j + 1
return grad
In [8]:
def plotFour(omg, yOrig, yPred, I, J):
error = abs(yOrig - yPred)
fig = plt.figure()
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1 = plt.subplot(221)
ax1.set_title('Original Function:')
ax1.plot(omg, yOrig, 'blue')
ax2 = plt.subplot(222)
ax2.set_title('Fitted gaussComp() Function:')
ax2.plot(omg, yPred,'red')
ax3 = plt.subplot(223)
ax3.set_title('Cost Function:')
ax3.plot(I, J)
ax4 = plt.subplot(224)
ax4.set_title('Error:')
ax4.plot(omg, error)
fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)
In [29]:
N = 7000
alpha = 5
initialParams = [[0.5,0]]
In [33]:
#Batch Gradient Descent
paramsB = np.array(initialParams)
Jb = []
Ib = []
prmHist = []
for i in range(N):
Jb.append(JB(omgTrain, yTrain, paramsB))
paramsB = paramsB - alpha*gradientB(omgTrain, yTrain, paramsB)
prmHist.append(paramsB)
print(paramsB)
#Построим графики:
for i in range(N):
Ib.append(i)
yBatch = gaussComp(omgOriginal, paramsB.tolist())
plotFour(omgOriginal, yOriginal, yBatch, Ib, Jb)
In [46]:
gaussian(np.array([1,2,3,4,5,6,7,8,9,10,11]), [np.array([1,2,3,4,5,6,7,8,9,10,11]),np.array([-5,-4,-3,-2,-1,0,1,2,3,4,5])])
Out[46]:
In [51]:
from mpl_toolkits.mplot3d import Axes3D
def JForPlot(s, o):
J=0
for i in range(len(omg)):
J = J + ((1/(s*math.sqrt(2*math.pi)))*np.exp(-(omgTrain[i]-o)**2/(2*s**2)) - yTrain[i])**2
J = J*0.5/len(omg)
return J
def makeData():
s = np.arange(0, 10, 0.05)
o = np.arange(-5,5,0.05)
sgrid, ogrid = np.meshgrid(s,o, sparse=True)
Jgrid = JForPlot(sgrid,ogrid)
return sgrid, ogrid, Jgrid
s, o, J = makeData()
fig = plt.figure()
axes = Axes3D(fig)
axes.plot_surface(s, o, J)
plt.show
Out[51]:
In [36]:
def JTB(omg, y, prm):
cost = ((np.log(gaussComp(omg, prm)))**2 - (np.log(y))**2)
return (1/len(omg))*np.sum(cost)
Itest = []
Jtest = []
for i in range(len(prmHist)):
Itest.append(i)
Jtest.append(JTB(omgTrain, yTrain, prmHist[i]))
plt.plot(Itest,Jtest)
Out[36]:
In [11]:
N = 100000
alpha = 0.1
#initialParams = [[2,-1], [1, 25]]
In [12]:
#Stochastic Gradient Descent
paramsS = np.array(initialParams)
Js = []
Is = []
for i in range(N):
K = random.randint(0, omgTrain.size - 1)
omgK = np.array(omgTrain[K])
yK = np.array(yTrain[K])
Js.append(JB(omgTrain, yTrain, paramsS))
paramsS = paramsS - alpha*gradientS(omgK, yK, paramsS)
print(paramsS)
#Графики:
for i in range(N):
Is.append(i)
yStochastic = gaussComp(omgOriginal, paramsS.tolist())
plotFour(omgOriginal, yOriginal, yStochastic, Is, Js)
In [13]:
N = 38000
alpha = 0.5
#initialParams = [[],[]]
L = 4
In [14]:
#Minibatch Gradient Descent
paramsMB = np.array(initialParams)
JMB = []
IMB = []
for i in range(N):
#K = [random.randint(0, omgTrain.size-L) for l in range(L)]
k = random.randint(0, omgTrain.size-L)
K = [k+l for l in range(L)]
omgK = omgTrain[K]
yK = yTrain[K]
JMB.append(JB(omgTrain, yTrain, paramsMB))
paramsMB = paramsMB - alpha*gradientB(omgK, yK, paramsMB)
print(paramsMB)
#Графики:
for i in range(N):
IMB.append(i)
yMiniBatch = gaussComp(omgOriginal, paramsMB.tolist())
plotFour(omgOriginal, yOriginal, yMiniBatch, IMB, JMB)
In [15]:
fig = plt.figure()
fig, ax1 = plt.subplots(1,1)
ax1 = plt.subplot(111)
ax1.set_title('All methods and original:')
ax1.plot(omgOriginal, yOriginal, 'b--', )
ax1.plot(omgOriginal, yBatch, 'g')
ax1.plot(omgOriginal, yStochastic, 'r')
ax1.plot(omgOriginal, yMiniBatch, 'm')
fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)
In [ ]:
In [16]:
lny = math.log(yTrain)
lnh = math.log(yBatch)
plt.plot(omg)
In [ ]:
In [ ]:
In [ ]: