In [2]:
%matplotlib inline

import matplotlib
import autograd.numpy as np
import matplotlib.pyplot as plt
import random
import math
from autograd import grad

def generateChevronData():
    xBounds = [-50, 50]
    yBounds = [-50, 50]
    totalPoints = 100
    
    points = []
    targets = []
    
    for i in range(0, totalPoints):
        x = random.randint(xBounds[0], xBounds[1])
        y = random.randint(yBounds[0], yBounds[1])
        
        if x >= y and x <= -y:
            points.append([1, x/50.0,y/50.0])
            targets.append(0)
        else:
            points.append([1, x/50.0,y/50.0])
            targets.append(1)
        
    return np.array(points), np.array(targets)
    
def plotScatter(points):
    xs = [x[1] for x in points]
    ys = [y[2] for y in points]
    
    plt.scatter(xs, ys)

In [3]:
def sigmoid(phi):
    return 1.0/(1.0 + np.exp(-phi))

def MSE(weights):
    predictions = logisticPrediction(weights, points)
    return 1.0/2.0 * np.sum(np.power((targets - predictions), 2))

def logisticPrediction(weights, p):
    return np.array(list(map(lambda x: predict(weights, x), p))) 
    
def predict(weights, inputs):
    n = np.array([weights[0], weights[1]])
    i = np.array([weights[2] - inputs[1], weights[3] - inputs[2]])
#     n = np.array([weights[0], weights[1] - weights[3], weights[2] - weights[4]])
    return sigmoid(np.dot(n, i))

In [4]:
def computeGradient(weights, example, target):
    prediction = predict(weights, example)
    dE_dO = computeErrorDifferential(prediction, target)
    
    dO_dZ = prediction * (1-prediction)
    
#     dZ_d0 = example[0]
    dZ_d1 = (weights[2] - example[1])
    dZ_d2 = (weights[3] - example[2])
    dZ_d3 = weights[0]
    dZ_d4 = weights[1]
    
    dE_dZ = dE_dO * dO_dZ
    
    grad = np.zeros(len(weights))#[0.0, 0.0, 0.0]
#     grad[0] = dZ_d0 * dE_dZ
    grad[0] = dZ_d1 * dE_dZ
    grad[1] = dZ_d2 * dE_dZ
    grad[2] = dZ_d3 * dE_dZ
    grad[3] = dZ_d4 * dE_dZ
    
    return grad

def computeErrorDifferential(prediction, target):
    return -(target - prediction)

In [5]:
def trainBoundaryHunter():
    weights = np.array([0.0, 0.0, 0.0, 0.0])
    
    print("Initial Loss: ", MSE(weights))
    for i in range(0, 10000):
#         g = trainingGradient(weights) * 0.01
        if i % 1000 == 0:
            print()
            print("Loss Before: " + str(MSE(weights)))

        weights = computeStep(weights)
#         weights -= g
    
        if i % 1000 == 0:
            print("Loss After [i = " + str(i) + "]: " + str(MSE(weights)))
            print(weights)
            
    print("Trained Loss: ", MSE(weights))    
    print("Weights: ", weights)
    return weights

def computeStep(weights):
    totalG = np.zeros(len(weights))
    totalE = 0
    for i in range(0, len(points)):
        g = computeGradient(weights, points[i], targets[i])
        totalG += g     
        
#     totalG = totalG * (1/len(points))
    
    weights -= totalG * 0.01
    return weights

In [7]:
random.seed(1234)
points, targets = generateChevronData()

plt.axis([-1.5, 1.5, -1.5, 1.5])

# Plot points on graph
c1 = []
c2 = []

for i in range(0, len(points)):
    if targets[i] == 0:
        c1.append(points[i])
    else:
        c2.append(points[i])

print("Type 0: ", len(c1))
print("Type 1: ", len(c2))
        
plotScatter(c1)
plotScatter(c2)

weights = trainBoundaryHunter()

# plt.scatter(weights[1], weights[2])
plt.scatter(weights[2], weights[3])

n = np.array([weights[0] * weights[2] + weights[1] * weights[3], 
              -weights[0], 
              -weights[1]])

byas = -1 * n[0]/n[2]
Xcoef = -1 * n[1]/n[2]

# print()
# print(n)
# print("\nLine")
# print("B: " + str(byas))
# print("XCoef: " + str(Xcoef))

plt.plot([-1.0, 1.0], [-1*Xcoef + byas, Xcoef + byas], 'k-')
plt.gca().set_aspect('equal')

plt.show()


Type 0:  35
Type 1:  65
Initial Loss:  12.5

Loss Before: 12.5
Loss After [i = 0]: 12.2977334642
[-0.00395 -0.04505  0.       0.     ]

Loss Before: 4.01012790129
Loss After [i = 1000]: 4.00997060309
[-1.20492727 -4.91751613 -0.04069226 -0.40726579]

Loss Before: 3.93636202462
Loss After [i = 2000]: 3.93632991717
[-1.46402692 -5.71274349 -0.04051702 -0.40656406]

Loss Before: 3.91734039145
Loss After [i = 3000]: 3.91732973732
[-1.59270233 -6.12357861 -0.04049718 -0.40648712]

Loss Before: 3.91041411405
Loss After [i = 4000]: 3.91040981763
[-1.66916998 -6.37297355 -0.04049606 -0.40648281]

Loss Before: 3.90748115692
Loss After [i = 5000]: 3.90747923667
[-1.71844498 -6.53574047 -0.04049808 -0.40649052]

Loss Before: 3.90613150986
Loss After [i = 6000]: 3.90613059723
[-1.75165442 -6.6463408  -0.04050036 -0.40649917]

Loss Before: 3.90547797572
Loss After [i = 7000]: 3.90547752455
[-1.77466128 -6.72338484 -0.04050229 -0.40650651]

Loss Before: 3.90515083764
Loss After [i = 8000]: 3.90515060865
[-1.79088847 -6.77793206 -0.04050382 -0.40651227]

Loss Before: 3.90498336529
Loss After [i = 9000]: 3.90498324694
[-1.80247349 -6.81697877 -0.04050497 -0.40651665]
Trained Loss:  3.90489628601
Weights:  [-1.8108071  -6.84512015 -0.04050584 -0.40651993]

In [ ]:


In [ ]:


In [ ]: