In [10]:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import random
from IPython import display
from sklearn import datasets, preprocessing
(X, y) = datasets.make_circles(n_samples=1024, shuffle=True, noise=0.2, factor=0.4)
ind = np.logical_or(y==1, X[:,1] > X[:,0] - 0.5)
X = X[ind,:]
X = preprocessing.scale(X)
y = y[ind]
y = 2*y - 1
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.show()
In [11]:
h = 0.01
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
def visualize(X, y, w, loss, n_iter):
plt.clf()
Z = classify(np.c_[xx.ravel(), yy.ravel()], w)
Z = Z.reshape(xx.shape)
plt.subplot(1,2,1)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.subplot(1,2,2)
plt.plot(loss)
plt.grid()
ymin, ymax = plt.ylim()
plt.ylim(0, ymax)
display.clear_output(wait=True)
display.display(plt.gcf())
Your task starts here
First, let's write a function that predicts class for given X.
Since the problem above isn't linearly separable, we add quadratic features to the classifier. This transformation is implemented in the expand function.
Don't forget to expand X inside classify and other functions
Sample classification should not be much harder than computation of sign of dot product.
In [12]:
def expand(X):
X_ = np.zeros((X.shape[0], 6))
X_[:,0:2] = X
X_[:,2:4] = X**2
X_[:,4] = X[:,0] * X[:,1]
X_[:,5] = 1
return X_
def classify(X, w):
"""
Given feature matrix X [n_samples,2] and weight vector w [6],
return an array of +1 or -1 predictions
"""
#<your code here>
The loss you should try to minimize is the Hinge Loss:
$$ L = {1 \over N} \sum_{i=1}^N max(0,1-y_i \cdot w^T x_i) $$
In [13]:
def compute_loss(X, y, w):
"""
Given feature matrix X [n_samples,2], target vector [n_samples] of +1/-1,
and weight vector w [6], compute scalar loss function using formula above.
"""
#<your code here>
def compute_grad(X, y, w):
"""
Given feature matrix X [n_samples,2], target vector [n_samples] of +1/-1,
and weight vector w [6], compute vector [6] of derivatives of L over each weights.
"""
#<your code here>
In [9]:
w = np.array([1,0,0,0,0,0])
alpha = 0.0 # learning rate
n_iter = 50
batch_size = 4
loss = np.zeros(n_iter)
plt.figure(figsize=(12,5))
for i in range(n_iter):
ind = random.sample(range(X.shape[0]), batch_size)
loss[i] = compute_loss(X, y, w)
visualize(X[ind,:], y[ind], w, loss, n_iter)
w = w - alpha * compute_grad(X[ind,:], y[ind], w)
visualize(X, y, w, loss, n_iter)
plt.clf()
Implement gradient descent with momentum and test it's performance for different learning rate and momentum values.
In [ ]:
w = np.array([1,0,0,0,0,0])
alpha = 0.0 # learning rate
mu = 0.0 # momentum
n_iter = 50
batch_size = 4
loss = np.zeros(n_iter)
plt.figure(figsize=(12,5))
for i in range(n_iter):
ind = random.sample(range(X.shape[0]), batch_size)
loss[i] = compute_loss(X, y, w)
visualize(X[ind,:], y[ind], w, loss, n_iter)
#<update w and anything else here>
visualize(X, y, w, loss, n_iter)
plt.clf()
Same task but for Nesterov's accelerated gradient:
In [ ]:
w = np.array([1,0,0,0,0,0])
alpha = 0.0 # learning rate
mu = 0.0 # momentum
n_iter = 50
batch_size = 4
loss = np.zeros(n_iter)
plt.figure(figsize=(12,5))
for i in range(n_iter):
ind = random.sample(range(X.shape[0]), batch_size)
loss[i] = compute_loss(X, y, w)
visualize(X[ind,:], y[ind], w, loss, n_iter)
#<update w and anything else here>
visualize(X, y, w, loss, n_iter)
plt.clf()
Finally, try Adam algorithm. You can start with beta = 0.9 and mu = 0.999
In [ ]:
w = np.array([1,0,0,0,0,0])
alpha = 0.0 # learning rate
beta = 0.0 # (beta1 coefficient in original paper) exponential decay rate for the 1st moment estimates
mu = 0.0 # (beta2 coefficient in original paper) exponential decay rate for the 2nd moment estimates
eps = 1e-8 # A small constant for numerical stability
n_iter = 50
batch_size = 4
loss = np.zeros(n_iter)
plt.figure(figsize=(12,5))
for i in range(n_iter):
ind = random.sample(range(X.shape[0]), batch_size)
loss[i] = compute_loss(X, y, w)
visualize(X[ind,:], y[ind], w, loss, n_iter)
#<update w and anything else here>
visualize(X, y, w, loss, n_iter)
plt.clf()
Which optimization method do you consider the best? Type your answer in the cell below