In [1]:
%matplotlib inline

import matplotlib
import autograd.numpy as np
import matplotlib.pyplot as plt
import random
import math
from autograd import grad

def generateChevronData():
    xBounds = [-50, 50]
    yBounds = [-50, 50]
    totalPoints = 100
    
    points = []
    targets = []
    
    for i in range(0, totalPoints):
        x = random.randint(xBounds[0], xBounds[1])
        y = random.randint(yBounds[0], yBounds[1])
        
        if x >= y and x <= -y:
            points.append([x/50.0,y/50.0])
            targets.append(0)
        else:
            points.append([x/50.0,y/50.0])
            targets.append(1)
        
    return np.array(points), np.array(targets)
    
def plotScatter(points):
    xs = [x[0] for x in points]
    ys = [y[1] for y in points]
    
    plt.scatter(xs, ys)

In [2]:
def sigmoid(phi):
    return 1.0/(1.0 + np.exp(-phi))

def loss(weights):
    predictions = logisticPrediction(weights, points)
#     print(predictions)
    w = np.full((len(predictions)), np.log(1/2)) # CONSTANT
    r = responsibility(weights, points)
#     print(r)
    return -(1/len(points)) * np.sum( r * ((targets*np.log(predictions) + (1-targets)*np.log(1-predictions))) + (1-r) * w)

def logisticPrediction(weights, p):
    return np.array(list(map(lambda x: predict(weights, x), p))) 
    
def predict(weights, inputs):
    n = np.array([weights[0], weights[1]])
    i = np.array([weights[2] - inputs[0], weights[3] - inputs[1]])
    return sigmoid(np.dot(n, i))

def responsibility(weights, points):
    r = weights[4]
    a = np.array([weights[2], weights[3]])
    
    dif = np.array(list(map(lambda x: x - a, points)))
    s = np.array(list(map(lambda x: np.sum(np.power(x, 2)), dif)))
    d = np.sqrt(s)
#     print(d)
    t = 1-f(d, r)
#     print(t)

    return t

def f(d, r):
    return 1/(1 + np.power(np.e, -(d-r)))
#     return 1/(1 + np.power(np.e, 10*(d-r)))
#     return np.power(np.e, -(1.0/15.0) * np.power(d/r, 2))
#     return np.maximum(d - r, 0)/(np.abs(d - r) + 0.1)

In [ ]:


In [3]:
def trainBoundaryHunter():
    weights = np.array([0.0, 0.0, 0.0, 0.0, 0.3])
    gradient = grad(loss)
    print("Initial Loss: ", loss(weights))
    for i in range(0, 10000):
        g = gradient(weights)
        
        if i % 1000 == 0:
            print("Loss [i = " + str(i) + "]: " + str(loss(weights)))
            print(weights)
            checkGrad(0.00001, 0.0001, weights, g)
        
        weights -= 0.01 * g
        if weights[4] < 0:
            weights[4] = 0
            
    print("Trained Loss: ", loss(weights))    
    print("Weights: ", weights)
    return weights

def checkGrad(pterb, threshold, weights, g):
    grad = np.zeros(len(weights))
    for i in range(0, len(weights)):
        p = np.zeros(len(weights))
        p[i] = pterb
        
        lossBefore = loss(weights)
        lossAfter = loss(weights + p)
        
        grad[i] = (lossAfter - lossBefore)/pterb
        

    return grad

    dif = np.absolute(computedGrad - grad)
    for d in dif:
        if d > threshold:
            print("ERROR")

In [4]:
random.seed(1234)
points, targets = generateChevronData()

plt.axis([-1.5, 1.5, -1.5, 1.5])

# Plot points on graph
c1 = []
c2 = []

for i in range(0, len(points)):
    if targets[i] == 0:
        c1.append(points[i])
    else:
        c2.append(points[i])

print("Type 0: ", len(c1))
print("Type 1: ", len(c2))
        
plotScatter(c1)
plotScatter(c2)

weights = trainBoundaryHunter()

plt.scatter(weights[2], weights[3])

n = np.array([weights[0] * weights[2] + weights[1] * weights[3], 
              -weights[0], 
              -weights[1]])

byas = -1 * n[0]/n[2]
Xcoef = -1 * n[1]/n[2]

x = np.linspace(-1.5, 1.5, 500)
y = np.linspace(-1.5, 1.5, 500)
X, Y = np.meshgrid(x,y)
F = ((X - weights[2]))**2 + ((Y - weights[3]))**2 - weights[4]**2
plt.contour(X,Y,F,[0])

print()
print(n)
print("\nLine")
print("B: " + str(byas))
print("XCoef: " + str(Xcoef))

plt.plot([-1.0, 1.0], [-1*Xcoef + byas, Xcoef + byas], 'k-')
plt.gca().set_aspect('equal')

plt.show()


Type 0:  35
Type 1:  65
Initial Loss:  0.69314718056
Loss [i = 0]: 0.69314718056
[ 0.   0.   0.   0.   0.3]
Loss [i = 1000]: 0.648327860238
[-0.06346076 -0.61882409 -0.03079742 -0.17293024  0.42945971]
Loss [i = 2000]: 0.585123716354
[-0.15753503 -1.24763902 -0.08268921 -0.37628325  0.84522749]
Loss [i = 3000]: 0.507046339607
[-0.26247089 -1.84851508 -0.11660493 -0.4086575   1.4811032 ]
Loss [i = 4000]: 0.433151589652
[-0.3715274  -2.39029153 -0.13735043 -0.41649759  2.13841509]
Loss [i = 5000]: 0.381758517847
[-0.47286105 -2.84592627 -0.14766769 -0.41912897  2.68044739]
Loss [i = 6000]: 0.349412517702
[-0.56181495 -3.21990213 -0.15238183 -0.41857625  3.09786379]
Loss [i = 7000]: 0.328630977595
[-0.639233   -3.52902658 -0.15458226 -0.41712926  3.42268929]
Loss [i = 8000]: 0.314614407426
[-0.70703827 -3.78857372 -0.15562774 -0.41562443  3.6830977 ]
Loss [i = 9000]: 0.304711799258
[-0.76695471 -4.00995689 -0.15610263 -0.41428075  3.898065  ]
Trained Loss:  0.297440510232
Weights:  [-0.82034022 -4.20141146 -0.15627543 -0.41312884  4.07995168]

[ 1.86392325  0.82034022  4.20141146]

Line
B: -0.443642159361
XCoef: -0.195253482644

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]: