In [30]:
import random as rn, numpy as np, math
def evolveParams(costFunction, vecLength, params=(100,0.01,100), *args):
def recombineVectors(arr):
#arr is a 2-dimensional array of vectors
#Steps:
# 1. Choose first pair of vectors: indices 0, 1
# 2. Pick random "cut point" along length of vector
# 3. Swap corresponding vector subsets; move on to second pair of vectors, indices: 2,3
resultVec = np.zeros(arr.shape)
arrLen = int((arr.shape[0]/2))
for i in range(arrLen):
x = 2*i;
a = arr[x]
b = arr[x+1]
cutPt = np.random.randint(len(a))
new_a = np.concatenate((a[0:cutPt], b[cutPt:]))
new_b = np.concatenate((b[0:cutPt], a[cutPt:]))
resultVec[x] = new_a
resultVec[x+1] = new_b
return resultVec
initPop, mutRate, numGen = params
solLen = vecLength
numWin = int(0.10 * initPop)
step = 0.01
bounds = (initPop * solLen) * step * 2
curPop = np.random.choice(np.arange(-1*bounds,bounds,step=0.01),size=(initPop, solLen),replace=False)
nextPop = np.zeros((curPop.shape[0], curPop.shape[1]))
fitVec = np.zeros((initPop, 2))
for i in range(numGen):
fitVec = np.array([np.array([x, np.sum(costFunction(*args, curPop[x].T))]) for x in range(initPop)])
#print(np.sum(fitVec[:,1]))
winners = np.zeros((numWin, solLen))
for n in range(len(winners)):
selected = np.random.choice(range(len(fitVec)), numWin/2, replace=False)
wnr = np.argmin(fitVec[selected,1])
winners[n] = curPop[int(fitVec[selected[wnr]][0])]
nextPop[:len(winners)] = winners
duplicWin = np.zeros((((initPop - len(winners))),winners.shape[1]))
for x in range(winners.shape[1]):
numDups = ((initPop - len(winners))/len(winners))
duplicWin[:, x] = np.repeat(winners[:, x], numDups, axis=0)
#duplicWin[:, x] = np.random.permutation(duplicWin[:, x])
duplicWin = recombineVectors(duplicWin)
nextPop[len(winners):] = np.matrix(duplicWin)
num_mut_els = nextPop.size * mutRate
mutated_elements = np.random.random_integers(0, (nextPop.size-1), size=(num_mut_els,))
for z in mutated_elements:
nextPop.flat[z] = nextPop.flat[z] * np.float(np.random.normal(0,2,1))
curPop = nextPop
best_soln = curPop[np.argmin(fitVec[:,1])]
#print("Best Sol'n:\n%s\nCost:%s" % (best_soln,np.sum(costFunction(*args, best_soln.T))))
return best_soln
In [34]:
X = np.array([[0,0,1],[0,1,1],[1,0,1],[1,1,1]])
y = np.array([[0, 1, 1, 0]]).T
init_theta = 10*(np.random.random((13,1)) - 0.5)
def runForward(X, theta):
theta1 = np.array(theta[:9]).reshape(3,3)
theta2 = np.array(theta[9:]).reshape(4,1)
h1 = sigmoid(np.dot(X, theta1))
h1_bias = np.insert(h1, 3, [1,1,1,1], axis=1)
output = sigmoid(np.dot(h1_bias, theta2))
return output
#4x3 * 3x1 = 4x1
def costFunction(X, y, theta):
m = float(len(X))
hThetaX = np.array(runForward(X, theta))
return np.sum(np.abs(y - hThetaX))
def sigmoid(x): return 1 / (1 + np.exp(- x))
def demoRun():
print("Random theta: \n%s\n" % (np.round(runForward(X, init_theta), 2)))
print("Cost: %s\n" % (costFunction(X,y, init_theta)))
optimal_theta = evolveParams(costFunction, 13, (100,0.01,50), X, y)
print("Optimal theta: \n%s\n" % (np.round(runForward(X, optimal_theta.reshape(13,1)), 2)))
print("Cost: %s\n" % (costFunction(X, y, optimal_theta.reshape(13,1))))
demoRun()
In [ ]: