In [53]:
%matplotlib inline

import matplotlib
import autograd.numpy as np
import matplotlib.pyplot as plt
import random
import math
from autograd import grad

def generateChevronData():
    xBounds = [-50, 50]
    yBounds = [-50, 50]
    totalPoints = 100
    
    points = []
    targets = []
    
    for i in range(0, totalPoints):
        x = random.randint(xBounds[0], xBounds[1])
        y = random.randint(yBounds[0], yBounds[1])
        
        if x >= y and x <= -y:
            points.append([1, x/50.0,y/50.0])
            targets.append(0)
        else:
            points.append([1, x/50.0,y/50.0])
            targets.append(1)
        
    return np.array(points), np.array(targets)
    
def plotScatter(points):
    xs = [x[1] for x in points]
    ys = [y[2] for y in points]
    
    plt.scatter(xs, ys)

In [62]:
def sigmoid(phi):
    return 1.0/(1.0 + np.exp(-phi))

def MSE(weights):
    predictions = logisticPrediction(weights, points)
    return 1.0/2.0 * np.sum(np.power((targets - predictions), 2))

def logisticPrediction(weights, p):
    ins = np.array(list(map(lambda x: predict(weights, x), p)))
    return ins
    
def predict(weights, i):
    return sigmoid(-((weights[2] - i[2]) + weights[0] * (i[1] - weights[1])))

In [79]:
def computeGradient(weights, example, target):
    prediction = predict(weights, example)
    dE_dO = computeErrorDifferential(prediction, target)
    
    dO_dZ = prediction * (1-prediction)
    
    dZ_dy = -1
    dZ_dm = -(example[1] - weights[1])
    dZ_dx = weights[0]
    
    dE_dZ = dE_dO * dO_dZ
    
    grad = np.zeros(3)#[0.0, 0.0, 0.0]
    grad[0] = dZ_dm * dE_dZ
    grad[1] = dZ_dx * dE_dZ
    grad[2] = dZ_dy * dE_dZ
    
    return grad

def computeErrorDifferential(prediction, target):
    return -(target - prediction)

In [84]:
def trainBoundaryHunter():
    weights = np.array([0.0, 0.0, 0.0])
#     trainingGradient = grad(MSE)
    
    print("Initial Loss: ", MSE(weights))
    for i in range(0, 10000):
#         g = trainingGradient(weights) * 0.01
        weights = computeStep(weights)
#         weights -= g
    
        if i % 1000 == 0:
            print("Loss [i = " + str(i) + "]: " + str(MSE(weights)))
            print(weights)
            
    print("Trained Loss: ", MSE(weights))    
    print("Weights: ", weights)
    return weights

def computeStep(weights):
    totalG = np.zeros(3)
    for i in range(0, len(points)):
        g = computeGradient(weights, points[i], targets[i])
        totalG += g     
        
#     totalG = totalG * (1/len(points))        
    weights -= totalG * 0.01
    return weights

In [85]:
random.seed(1234)
points, targets = generateChevronData()

plt.axis([-1.5, 1.5, -1.5, 1.5])

# Plot points on graph
c1 = []
c2 = []

for i in range(0, len(points)):
    if targets[i] == 0:
        c1.append(points[i])
    else:
        c2.append(points[i])

print("Type 0: ", len(c1))
print("Type 1: ", len(c2))
        
plotScatter(c1)
plotScatter(c2)

weights = trainBoundaryHunter()
byas = 1 * (weights[2] + weights[0] * weights[1])
Xcoef = 1 * weights[0]
plt.plot([-1.0, 1.0], [-1*Xcoef + byas, Xcoef + byas], 'k-')
plt.scatter(weights[1], weights[2])
plt.plot([-1.0, 1.0], [weights[2] + weights[0]*((-1) - weights[1]), weights[2] + weights[0]*(1 - weights[1])], 'k-')
plt.gca().set_aspect('equal')

plt.show()


Type 0:  35
Type 1:  65
Initial Loss:  9.20843533436
Loss [i = 0]: 9.00827884794
[-0.00687314  0.         -0.04492183]
Loss [i = 1000]: 7.56235339807
[-0.32439232 -0.05173935 -0.66474739]
Loss [i = 2000]: 7.56235339807
[-0.32439232 -0.05173935 -0.66474739]
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-85-0673c24d044c> in <module>()
     20 plotScatter(c2)
     21 
---> 22 weights = trainBoundaryHunter()
     23 byas = 1 * (weights[2] + weights[0] * weights[1])
     24 Xcoef = 1 * weights[0]

<ipython-input-84-1db8851d971e> in trainBoundaryHunter()
      5     print("Initial Loss: ", MSE(weights))
      6     for i in range(0, 10000):
----> 7         g = trainingGradient(weights) * 0.01
      8 #         weights = computeStep(weights)
      9         weights -= g

C:\Users\danie\Anaconda3\lib\site-packages\autograd\errors.py in wrapped(*args, **kwargs)
     46   @wraps(fun)
     47   def wrapped(*args, **kwargs):
---> 48     try: return fun(*args, **kwargs)
     49     except Exception as e: add_extra_error_message(e)
     50   return wrapped

C:\Users\danie\Anaconda3\lib\site-packages\autograd\convenience_wrappers.py in gradfun(*args, **kwargs)
     26         args[argnum] = safe_type(args[argnum])
     27         vjp, ans = make_vjp(scalar_fun, argnum)(*args, **kwargs)
---> 28         return vjp(cast_to_same_dtype(1.0, ans))
     29 
     30     return gradfun

C:\Users\danie\Anaconda3\lib\site-packages\autograd\core.py in <lambda>(g)
     16             warnings.warn("Output seems independent of input.")
     17             return lambda g : start_node.vspace.zeros(), end_node
---> 18         return lambda g : backward_pass(g, end_node, start_node), end_node
     19     return vjp
     20 

C:\Users\danie\Anaconda3\lib\site-packages\autograd\core.py in backward_pass(g, end_node, start_node)
     32     outgrads[end_node] = [g]
     33     assert_vspace_match(outgrads[end_node][0], end_node.vspace, None)
---> 34     for node in toposort(end_node, start_node):
     35         if node not in outgrads: continue
     36         cur_outgrad = vsum(node.vspace, *outgrads[node])

C:\Users\danie\Anaconda3\lib\site-packages\autograd\core.py in toposort(end_node, start_node)
    175         else:
    176             child_counts[node] = 1
--> 177             stack.extend(relevant_parents(node))
    178 
    179     childless_nodes = [end_node]

C:\Users\danie\Anaconda3\lib\site-packages\autograd\core.py in relevant_parents(node)
    165 def toposort(end_node, start_node):
    166     def relevant_parents(node):
--> 167         return [parent for _, parent in node.recipe[3] if start_node in parent.progenitors]
    168 
    169     child_counts = {}

C:\Users\danie\Anaconda3\lib\site-packages\autograd\core.py in <listcomp>(.0)
    165 def toposort(end_node, start_node):
    166     def relevant_parents(node):
--> 167         return [parent for _, parent in node.recipe[3] if start_node in parent.progenitors]
    168 
    169     child_counts = {}

KeyboardInterrupt: 

In [ ]:


In [ ]:


In [ ]: