In [1]:
import math
import random
import numpy as np
from matplotlib import mlab
from matplotlib import pylab as plt
%matplotlib inline


#Функции вычисления значения гауссиана и композиции гауссиана в наборе точек при заданных параметрах, возвращает Array из NumPy:
def gaussian(omg, prm):
    return (1/(prm[0]*math.sqrt(2*math.pi)))*np.exp(-(omg-prm[1])**2/(2*prm[0]**2))


def gaussComp(omg, prm):
    a = np.zeros(omg.size)
    for gauss in prm:
        a = a + gaussian(omg, gauss)
    return a

#Изначальные параметры:
omgMin = -15
omgMax = 40
paramsOriginal = [[1, 5], [1.5,15], [3, 10]]

#Строим эталонный набор данных:
omgOriginal = np.linspace(omgMin, omgMax, 100000, endpoint = False)
yOriginal = gaussComp(omgOriginal, paramsOriginal)

plt.plot (omgOriginal, yOriginal,'green')


Out[1]:
[<matplotlib.lines.Line2D at 0x184dd568da0>]

In [2]:
#Строим тренировочный набор данных:
m = 180
omgTrain = np.linspace(omgMin, omgMax, m, endpoint = False)
yTrain = gaussComp(omgTrain, paramsOriginal)

plt.plot(omgTrain,yTrain, 'x')


Out[2]:
[<matplotlib.lines.Line2D at 0x5fcf350>]

In [3]:
#Вычисление весовых функций и градиентов:
#Batch

def JB(omg, y, prm):
    Jv = (0.5/len(omg))*(gaussComp(omg, prm) - y)**2
    return np.sum(Jv)

def gradientB(omg, y, prm):
    prm = prm.tolist()
    grad = np.zeros((len(prm), 2))
    gc = gaussComp(omg, prm)
    j = 0
    for gauss in prm:
        g = gaussian(omg, gauss)
        grad[j][0] = (1/len(omg))*np.sum((gc-y)*((-1/gauss[0])*g + g*((omg-gauss[1])**2/gauss[0]**3)))
        grad[j][1] = (1/len(omg))*np.sum((gc-y)*g*(omg-gauss[1])*(1/(gauss[0]**2))) 
        j = j + 1
    return grad

#Stochastic
def JS(omg, y, prm):
    Jv = (1/2)*(gaussComp(omg, prm) - y)**2
    return np.sum(Jv)

def gradientS(omg, y, prm):
    grad = np.zeros((len(prm), 2))
    gc = gaussComp(omg, prm)
    j = 0
    for gauss in prm:
        g = gaussian(omg, gauss)
        grad[j][0] = np.sum((gc-y)*((-1/gauss[0])*g + g*((omg-gauss[1])**2/gauss[0]**3)))
        grad[j][1] = np.sum((gc-y)*g*(omg-gauss[1])*(1/(gauss[0]**2)))
        j = j + 1
    return grad

In [4]:
def plotFour(omg, yOrig, yPred, I, J):
    error = abs(yOrig - yPred)
    fig = plt.figure()
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)

    ax1 = plt.subplot(221)
    ax1.set_title('Original Function:')
    ax1.plot(omg, yOrig, 'blue')

    ax2 = plt.subplot(222)
    ax2.set_title('Fitted gaussComp() Function:')
    ax2.plot(omg, yPred,'red') 

    ax3 = plt.subplot(223)
    ax3.set_title('Cost Function:')
    ax3.plot(I, J)

    ax4 = plt.subplot(224)
    ax4.set_title('Error:')
    ax4.plot(omg, error)


    fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)

In [5]:
N = 70000
alpha = 1
initialParams = [[0.5,-1], [5, 12], [1, 20]]

In [6]:
#Batch Gradient Descent
paramsB = np.array(initialParams)
Jb = []
Ib = []

for i in range(N):
    Jb.append(JB(omgTrain, yTrain, paramsB))
    paramsB = paramsB - alpha*gradientB(omgTrain, yTrain, paramsB)
    


print(paramsB)

#Построим графики:
for i in range(N):
    Ib.append(i)
yBatch = gaussComp(omgOriginal, paramsB.tolist())
plotFour(omgOriginal, yOriginal, yBatch, Ib, Jb)


[[  1.00250648   5.00028928]
 [  3.0773761    9.96039879]
 [  1.50881756  14.9875732 ]]
<matplotlib.figure.Figure at 0x5fd7470>

In [7]:
N = 200000
alpha = 0.5
#initialParams = [[2,-1], [1, 25]]

In [8]:
#Stochastic Gradient Descent
paramsS = np.array(initialParams)
Js = []
Is = []

for i in range(N):
    K = random.randint(0, omgTrain.size - 1)
    omgK = np.array(omgTrain[K])
    yK = np.array(yTrain[K])
    Js.append(JB(omgTrain, yTrain, paramsS))
    paramsS = paramsS - alpha*gradientS(omgK, yK, paramsS)
print(paramsS)

#Графики:
for i in range(N):
    Is.append(i)
yStochastic = gaussComp(omgOriginal, paramsS.tolist())
plotFour(omgOriginal, yOriginal, yStochastic, Is, Js)


[[  1.00016095   4.99991538]
 [  3.00333039   9.99810403]
 [  1.50036289  14.99947516]]
<matplotlib.figure.Figure at 0x815b0b0>

In [26]:
N = 180000
alpha = 0.4
#initialParams = [[],[]]
L = 4

In [27]:
#Minibatch Gradient Descent
paramsMB = np.array(initialParams)
JMB = []
IMB = []

for i in range(N):
    #K = [random.randint(0, omgTrain.size-L) for l in range(L)]
    k = random.randint(0, omgTrain.size-L)
    K = [k+l for l in range(L)]
    omgK = omgTrain[K]
    yK = yTrain[K]
    JMB.append(JB(omgTrain, yTrain, paramsMB))
    paramsMB = paramsMB - alpha*gradientB(omgK, yK, paramsMB)
print(paramsMB)

#Графики:
for i in range(N):
    IMB.append(i)
yMiniBatch = gaussComp(omgOriginal, paramsMB.tolist())
plotFour(omgOriginal, yOriginal, yMiniBatch, IMB, JMB)


[[  1.00272603   4.99981225]
 [  3.06656078   9.92689245]
 [  1.50584537  14.98534013]]
<matplotlib.figure.Figure at 0x676a470>

In [28]:
fig = plt.figure()
fig, ax1 = plt.subplots(1,1)

ax1 = plt.subplot(111)
ax1.set_title('All methods and original:')
ax1.plot(omgOriginal, yOriginal, 'b--', )
ax1.plot(omgOriginal, yBatch, 'g')
ax1.plot(omgOriginal, yStochastic, 'r')
ax1.plot(omgOriginal, yMiniBatch, 'm')
fig.subplots_adjust(left = 0, bottom = 0, right = 2, top = 2, hspace = 0.2, wspace = 0.2)


<matplotlib.figure.Figure at 0x9e1ef70>