In [1]:
%matplotlib inline

import matplotlib
import autograd.numpy as np
import matplotlib.pyplot as plt
import random
import math
from autograd import grad

def generateChevronData():
    xBounds = [-50, 50]
    yBounds = [-50, 50]
    totalPoints = 100
    
    points = []
    targets = []
    
    for i in range(0, totalPoints):
        x = random.randint(xBounds[0], xBounds[1])
        y = random.randint(yBounds[0], yBounds[1])
        
        if x >= y and x <= -y:
            points.append([x/50.0,y/50.0])
            targets.append(0)
        else:
            points.append([x/50.0,y/50.0])
            targets.append(1)
        
    return np.array(points), np.array(targets)
    
def plotScatter(points):
    xs = [x[0] for x in points]
    ys = [y[1] for y in points]
    
    plt.scatter(xs, ys)

In [2]:
def sigmoid(phi):
    return 1.0/(1.0 + np.exp(-phi))

def loss(weights):
    B = 1.35
    C = 2.65
    
    predictions = logisticPrediction(weights, points)
    
    r = responsibility(weights, points)
    
    pRight = np.power(predictions, targets) * np.power(1-predictions, 1-targets)
    pWrong = np.power(predictions, 1-targets) * np.power(1-predictions, targets)
    
    return np.sum(r * (B*pRight - C*pWrong))

def logisticPrediction(weights, p):
    return np.array(list(map(lambda x: predict(weights, x), p))) 
    
def predict(weights, inputs):
    n = np.array([weights[0], weights[1]])
    i = np.array([weights[2] - inputs[0], weights[3] - inputs[1]])
    return sigmoid(np.dot(n, i))

def responsibility(weights, points):
    r = np.absolute(weights[4])
    a = np.array([weights[2], weights[3]])
    
    dif = np.array(list(map(lambda x: x - a, points)))
    s = np.array(list(map(lambda x: np.sum(np.power(x, 2)), dif)))
    d = np.sqrt(s)

    t = 1 - f(d, r)
    return t

def f(d, r):
    t = np.zeros(len(d))
    for i in range(0, len(d)):
        if d[i] > r:
            t[i] = 1
            
    return t
#     return 1/(1 + np.power(np.e, -(d-r)))

In [ ]:


In [5]:
def trainBoundaryHunter():
    weights = np.array([0.0, 0.0, 0.0, 0.0, 2])#-0.5 + np.random.rand(5)#
    weights[4] = np.absolute(weights[4])
    gradient = grad(loss)
    print("Initial Loss: ", loss(weights))
    for i in range(0, 7000):
        g = gradient(weights)
        
        if i % 1000 == 0:
            print("Loss [i = " + str(i) + "]: " + str(loss(weights)))
            print(weights)
            checkGrad(0.0001, 0.0001, weights, g)
        
        dr_dL = np.sign(loss(weights))
        if dr_dL == 0:
            dr_dL = 1
            
        #if dr_dL == 1:
        #    dr_dL = 5
        
        g[4] = dr_dL
#         weights = computeStep(weights)
        weights += 0.001 * g
        if weights[4] < 0:
            weights[4] = 0
            
    print("Trained Loss: ", loss(weights))    
    print("Weights: ", weights)
    return weights

def checkGrad(pterb, threshold, weights, g):
    grad = np.zeros(len(weights))
    for i in range(0, len(weights)):
        p = np.zeros(len(weights))
        p[i] = pterb
        
        lossBefore = loss(weights)
        lossAfter = loss(weights + p)
        
        grad[i] = (lossAfter - lossBefore)/pterb
        

    return grad

    dif = np.absolute(computedGrad - grad)
    for d in dif:
        if d > threshold:
            print("ERROR")

In [6]:
random.seed(1234)
points, targets = generateChevronData()

plt.axis([-1.5, 1.5, -1.5, 1.5])

# Plot points on graph
c1 = []
c2 = []

for i in range(0, len(points)):
    if targets[i] == 0:
        c1.append(points[i])
    else:
        c2.append(points[i])

print("Type 0: ", len(c1))
print("Type 1: ", len(c2))
        
plotScatter(c1)
plotScatter(c2)

random.seed(4332)
weights = trainBoundaryHunter()

# plt.scatter(weights[1], weights[2])
plt.scatter(weights[2], weights[3])

n = np.array([weights[0] * weights[2] + weights[1] * weights[3], 
              -weights[0], 
              -weights[1]])

byas = -1 * n[0]/n[2]
Xcoef = -1 * n[1]/n[2]

x = np.linspace(-1.5, 1.5, 500)
y = np.linspace(-1.5, 1.5, 500)
X, Y = np.meshgrid(x,y)
F = ((X - weights[2]))**2 + ((Y - weights[3]))**2 - weights[4]**2
plt.contour(X,Y,F,[0])

print()
print(n)
print("\nLine")
print("B: " + str(byas))
print("XCoef: " + str(Xcoef))

plt.plot([-1.0, 1.0], [-1*Xcoef + byas, Xcoef + byas], 'k-')
plt.gca().set_aspect('equal')

plt.show()


Type 0:  35
Type 1:  65
Initial Loss:  -65.0
Loss [i = 0]: -65.0
[ 0.  0.  0.  0.  2.]
Loss [i = 1000]: 77.543316546
[-1.08463289 -8.03775846 -0.01976835 -0.39932392  2.926     ]
Loss [i = 2000]: 81.7819278674
[ -1.26366499 -10.06141865  -0.02007334  -0.40170305   3.926     ]
Loss [i = 3000]: 83.537886024
[ -1.42584    -11.37089058  -0.02047632  -0.40492442   4.926     ]
Loss [i = 4000]: 84.5642065858
[ -1.5847237  -12.36940252  -0.02086187  -0.40796975   5.926     ]
Loss [i = 5000]: 85.2661758107
[ -1.73725595 -13.19231374  -0.02120887  -0.41064253   6.926     ]
Loss [i = 6000]: 85.791436855
[ -1.88084634 -13.90220553  -0.02151177  -0.41291227   7.926     ]
Trained Loss:  86.2075965625
Weights:  [ -2.01439766 -14.53304407  -0.02177184  -0.41481157   8.926     ]

[  6.07233193   2.01439766  14.53304407]

Line
B: -0.4178293204
XCoef: -0.13860810211

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]: