In [1]:
%matplotlib inline

import matplotlib
import autograd.numpy as np
import matplotlib.pyplot as plt
import random
import math
from autograd import grad

def generateChevronData():
    xBounds = [-50, 50]
    yBounds = [-50, 50]
    totalPoints = 100
    
    points = []
    targets = []
    
    for i in range(0, totalPoints):
        x = random.randint(xBounds[0], xBounds[1])
        y = random.randint(yBounds[0], yBounds[1])
        
        if x >= y and x <= -y:
            points.append([1, x/50.0,y/50.0])
            targets.append(0)
        else:
            points.append([1, x/50.0,y/50.0])
            targets.append(1)
        
    return np.array(points), np.array(targets)
    
def plotScatter(points):
    xs = [x[1] for x in points]
    ys = [y[2] for y in points]
    
    plt.scatter(xs, ys)

In [2]:
def sigmoid(phi):
    return 1.0/(1.0 + np.exp(-phi))

def MSE(weights):
    predictions = logisticPrediction(weights, points)
    return 1.0/2.0 * np.sum(np.power((targets - predictions), 2))

def logisticPrediction(weights, p):
    return predict(weights, p)
    
def predict(weights, inputs):
    return sigmoid(np.dot(inputs, weights))

In [3]:
def computeGradient(weights, example, target):
    prediction = predict(weights, example)
    dE_dO = computeErrorDifferential(prediction, target)
    
    dO_dZ = prediction * (1-prediction)
    
    dZ_d0 = example[0]
    dZ_d1 = example[1]
    dZ_d2 = example[2]
    
    dE_dZ = dE_dO * dO_dZ
    
    grad = np.zeros(3)#[0.0, 0.0, 0.0]
    grad[0] = dZ_d0 * dE_dZ
    grad[1] = dZ_d1 * dE_dZ
    grad[2] = dZ_d2 * dE_dZ
    
    return grad

def computeErrorDifferential(prediction, target):
    return -(target - prediction)

In [8]:
def trainBoundaryHunter():
    weights = np.array([0.0, 0.0, 0.0])
    
    print("Initial Loss: ", MSE(weights))
    for i in range(0, 50000):
#         g = trainingGradient(weights) * 0.01
        if i % 1000 == 0:
            print()
            print("Loss Before: " + str(MSE(weights)))

        weights = computeStep(weights)
#         weights -= g
    
        if i % 1000 == 0:
            print("Loss After [i = " + str(i) + "]: " + str(MSE(weights)))
            print(weights)
            
    print("Trained Loss: ", MSE(weights))    
    print("Weights: ", weights)
    return weights

def computeStep(weights):
    totalG = np.zeros(3)
    totalE = 0
    for i in range(0, len(points)):
        g = computeGradient(weights, points[i], targets[i])
        totalG += g     
        
#     totalG = totalG * (1/len(points))
    
    weights -= totalG * 0.01
    return weights

In [9]:
random.seed(1234)
points, targets = generateChevronData()

plt.axis([-1.5, 1.5, -1.5, 1.5])

# Plot points on graph
c1 = []
c2 = []

for i in range(0, len(points)):
    if targets[i] == 0:
        c1.append(points[i])
    else:
        c2.append(points[i])

print("Type 0: ", len(c1))
print("Type 1: ", len(c2))
        
plotScatter(c1)
plotScatter(c2)

weights = trainBoundaryHunter()
byas = -1 * weights[0]/weights[2]
Xcoef = -1 * weights[1]/weights[2]

print()
print(weights)
print("\nLine")
print("B: " + str(byas))
print("XCoef: " + str(Xcoef))

plt.plot([-1.0, 1.0], [-1*Xcoef + byas, Xcoef + byas], 'k-')
plt.gca().set_aspect('equal')

plt.show()


Type 0:  35
Type 1:  65
Initial Loss:  12.5

Loss Before: 12.5
Loss After [i = 0]: 12.1601229227
[ 0.0375   0.00395  0.04505]

Loss Before: 4.0327324113
Loss After [i = 1000]: 4.03255300491
[ 1.96195027  1.16639143  4.75820348]

Loss Before: 3.94645855567
Loss After [i = 2000]: 3.94641942338
[ 2.30367262  1.42195545  5.557882  ]

Loss Before: 3.92265656135
Loss After [i = 3000]: 3.92264271993
[ 2.48647718  1.55383724  5.98422667]

Loss Before: 3.91340892145
Loss After [i = 4000]: 3.91340296967
[ 2.60103167  1.63491233  6.25137564]

Loss Before: 3.90922846367
Loss After [i = 5000]: 3.90922562267
[ 2.67822751  1.68891743  6.43147213]

Loss Before: 3.90717137096
Loss After [i = 6000]: 3.90716992584
[ 2.73244227  1.72655771  6.55800519]

Loss Before: 3.90610395777
Loss After [i = 7000]: 3.90610319142
[ 2.7715221   1.75354803  6.64924495]

Loss Before: 3.90553012685
Loss After [i = 8000]: 3.90552970868
[ 2.80018792  1.77327215  6.71618858]

Loss Before: 3.90521396202
Loss After [i = 9000]: 3.90521372917
[ 2.82147191  1.78787735  6.76590324]

Loss Before: 3.90503667925
Loss After [i = 10000]: 3.90503654769
[ 2.83741288  1.79879426  6.80314348]

Loss Before: 3.90493599371
Loss After [i = 11000]: 3.90493591857
[ 2.84942792  1.80701031  6.83121549]

Loss Before: 3.90487826914
Loss After [i = 12000]: 3.90487822588
[ 2.85852635  1.81322497  6.85247497]

Loss Before: 3.90484494149
Loss After [i = 13000]: 3.90484491644
[ 2.8654402   1.81794347  6.86863105]

Loss Before: 3.90482559783
Loss After [i = 14000]: 3.90482558325
[ 2.87070779  1.82153613  6.8809408 ]

Loss Before: 3.90481432581
Loss After [i = 15000]: 3.9048143173
[ 2.87472903  1.82427742  6.89033837]

Loss Before: 3.90480773748
Loss After [i = 16000]: 3.9048077325
[ 2.87780343  1.82637247  6.8975234 ]

Loss Before: 3.90480387785
Loss After [i = 17000]: 3.90480387493
[ 2.88015662  1.82797558  6.90302305]

Loss Before: 3.90480161281
Loss After [i = 18000]: 3.90480161109
[ 2.88195935  1.82920343  6.90723628]

Loss Before: 3.90480028178
Loss After [i = 19000]: 3.90480028077
[ 2.88334129  1.83014452  6.91046613]

Loss Before: 3.90479949882
Loss After [i = 20000]: 3.90479949822
[ 2.88440121  1.83086623  6.91294337]

Loss Before: 3.90479903788
Loss After [i = 21000]: 3.90479903753
[ 2.88521446  1.83141992  6.91484411]

Loss Before: 3.90479876637
Loss After [i = 22000]: 3.90479876616
[ 2.88583863  1.83184485  6.91630294]

Loss Before: 3.90479860636
Loss After [i = 23000]: 3.90479860624
[ 2.88631779  1.83217104  6.91742286]

Loss Before: 3.90479851203
Loss After [i = 24000]: 3.90479851196
[ 2.8866857   1.83242148  6.91828275]

Loss Before: 3.9047984564
Loss After [i = 25000]: 3.90479845636
[ 2.88696822  1.83261379  6.91894308]

Loss Before: 3.90479842359
Loss After [i = 26000]: 3.90479842357
[ 2.8871852   1.83276148  6.91945021]

Loss Before: 3.90479840424
Loss After [i = 27000]: 3.90479840423
[ 2.88735185  1.83287491  6.91983971]

Loss Before: 3.90479839282
Loss After [i = 28000]: 3.90479839281
[ 2.88747985  1.83296204  6.92013889]

Loss Before: 3.90479838608
Loss After [i = 29000]: 3.90479838608
[ 2.88757818  1.83302897  6.92036871]

Loss Before: 3.90479838211
Loss After [i = 30000]: 3.90479838211
[ 2.8876537   1.83308037  6.92054524]

Loss Before: 3.90479837976
Loss After [i = 31000]: 3.90479837976
[ 2.88771173  1.83311987  6.92068085]

Loss Before: 3.90479837838
Loss After [i = 32000]: 3.90479837838
[ 2.8877563   1.8331502   6.92078503]

Loss Before: 3.90479837756
Loss After [i = 33000]: 3.90479837756
[ 2.88779054  1.83317351  6.92086506]

Loss Before: 3.90479837708
Loss After [i = 34000]: 3.90479837708
[ 2.88781684  1.83319141  6.92092654]

Loss Before: 3.90479837679
Loss After [i = 35000]: 3.90479837679
[ 2.88783705  1.83320517  6.92097377]

Loss Before: 3.90479837663
Loss After [i = 36000]: 3.90479837663
[ 2.88785258  1.83321573  6.92101006]

Loss Before: 3.90479837653
Loss After [i = 37000]: 3.90479837653
[ 2.8878645   1.83322385  6.92103793]

Loss Before: 3.90479837647
Loss After [i = 38000]: 3.90479837647
[ 2.88787367  1.83323009  6.92105935]

Loss Before: 3.90479837643
Loss After [i = 39000]: 3.90479837643
[ 2.8878807   1.83323488  6.9210758 ]

Loss Before: 3.90479837641
Loss After [i = 40000]: 3.90479837641
[ 2.88788611  1.83323856  6.92108844]

Loss Before: 3.9047983764
Loss After [i = 41000]: 3.9047983764
[ 2.88789027  1.83324139  6.92109815]

Loss Before: 3.90479837639
Loss After [i = 42000]: 3.90479837639
[ 2.88789346  1.83324356  6.92110561]

Loss Before: 3.90479837639
Loss After [i = 43000]: 3.90479837639
[ 2.88789591  1.83324523  6.92111134]

Loss Before: 3.90479837639
Loss After [i = 44000]: 3.90479837639
[ 2.8878978   1.83324651  6.92111575]

Loss Before: 3.90479837639
Loss After [i = 45000]: 3.90479837639
[ 2.88789924  1.83324749  6.92111913]

Loss Before: 3.90479837639
Loss After [i = 46000]: 3.90479837639
[ 2.88790035  1.83324825  6.92112173]

Loss Before: 3.90479837638
Loss After [i = 47000]: 3.90479837638
[ 2.88790121  1.83324883  6.92112372]

Loss Before: 3.90479837638
Loss After [i = 48000]: 3.90479837638
[ 2.88790186  1.83324928  6.92112526]

Loss Before: 3.90479837638
Loss After [i = 49000]: 3.90479837638
[ 2.88790237  1.83324962  6.92112644]
Trained Loss:  3.90479837638
Weights:  [ 2.88790276  1.83324989  6.92112734]

[ 2.88790276  1.83324989  6.92112734]

Line
B: -0.417259011989
XCoef: -0.264877352374

In [ ]:


In [ ]:


In [ ]: