In [2]:
%matplotlib inline
import matplotlib
import autograd.numpy as np
import matplotlib.pyplot as plt
import random
import math
from autograd import grad
def generateChevronData():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if x >= y and x <= -y:
points.append([x/50.0,y/50.0])
targets.append(0)
else:
points.append([x/50.0,y/50.0])
targets.append(1)
return np.array(points), np.array(targets)
def plotScatter(points):
xs = [x[0] for x in points]
ys = [y[1] for y in points]
plt.scatter(xs, ys)
In [3]:
def sigmoid(phi):
return 1.0/(1.0 + np.exp(-phi))
def loss(weights):
predictions = logisticPrediction(weights, points)
w = np.full((len(predictions)), np.log(1/2)) # CONSTANT
r = responsibility(weights, points)
return -(1/len(points)) * np.sum(r *((targets*np.log(predictions) + (1-targets)*np.log(1-predictions))))
def logisticPrediction(weights, p):
return np.array(list(map(lambda x: predict(weights, x), p)))
def predict(weights, inputs):
n = np.array([weights[0], weights[1]])
i = np.array([weights[2] - inputs[0], weights[3] - inputs[1]])
return sigmoid(np.dot(n, i))
def responsibility(weights, points):
r = weights[4]
a = np.array([weights[2], weights[3]])
dif = np.array(list(map(lambda x: x - a, points)))
s = np.array(list(map(lambda x: np.sum(np.power(x, 2)), dif)))
d = np.sqrt(s)
# print(d)
t = 1-f(d, r)
# print(t)
return t
def f(d, r):
return 1/(1 + np.power(np.e, -(d-r)))
# return 1/(1 + np.power(np.e, 10*(d-r)))
# return np.power(np.e, -(1.0/15.0) * np.power(d/r, 2))
# return np.maximum(d - r, 0)/(np.abs(d - r) + 0.1)
In [ ]:
In [4]:
def trainBoundaryHunter():
# weights = np.array([0.0, 0.0, 0.0, 0.0, 0.3])
weights = np.random.rand(5)
weights[4] = 0.5
gradient = grad(loss)
print("Initial Loss: ", loss(weights))
for i in range(0, 10000):
g = gradient(weights)
if i % 1000 == 0:
print("Loss [i = " + str(i) + "]: " + str(loss(weights)))
print(weights)
checkGrad(0.00001, 0.0001, weights, g)
dL_dr = numericGrad(0.1, weights)[4]
# print(str(dL_dr) + " -> " + str(loss(weights)))
g[4] = -dL_dr
weights -= 0.01 * g
print("Trained Loss: ", loss(weights))
print("Weights: ", weights)
return weights
def checkGrad(pterb, threshold, weights, g):
grad = numericGrad(pterb, weights)
dif = np.absolute(g - grad)
for d in dif:
if d > threshold:
print("ERROR")
def numericGrad(pterb, weights):
grad = np.zeros(len(weights))
for i in range(0, len(weights)):
p = np.zeros(len(weights))
p[i] = pterb
lossBefore = loss(weights)
lossAfter = loss(weights + p)
grad[i] = (lossAfter - lossBefore)/pterb
return grad
In [5]:
random.seed(1234)
points, targets = generateChevronData()
plt.axis([-1.5, 1.5, -1.5, 1.5])
# Plot points on graph
c1 = []
c2 = []
for i in range(0, len(points)):
if targets[i] == 0:
c1.append(points[i])
else:
c2.append(points[i])
print("Type 0: ", len(c1))
print("Type 1: ", len(c2))
plotScatter(c1)
plotScatter(c2)
weights = trainBoundaryHunter()
plt.scatter(weights[2], weights[3])
n = np.array([weights[0] * weights[2] + weights[1] * weights[3],
-weights[0],
-weights[1]])
byas = -1 * n[0]/n[2]
Xcoef = -1 * n[1]/n[2]
x = np.linspace(-1.5, 1.5, 500)
y = np.linspace(-1.5, 1.5, 500)
X, Y = np.meshgrid(x,y)
F = ((X - weights[2]))**2 + ((Y - weights[3]))**2 - weights[4]**2
plt.contour(X,Y,F,[0])
print()
print(n)
print("\nLine")
print("B: " + str(byas))
print("XCoef: " + str(Xcoef))
plt.plot([-1.0, 1.0], [-1*Xcoef + byas, Xcoef + byas], 'k-')
plt.gca().set_aspect('equal')
plt.show()
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]: