In [1]:
import matplotlib.pyplot as plt
import numpy as np
In [2]:
np.random.seed(1)
X_xor = np.random.randn(200, 2)
y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
In [3]:
plt.scatter(X_xor[y_xor == 1, 0],
X_xor[y_xor == 1, 1],
c='b', marker='x', label='1')
plt.scatter(X_xor[y_xor == -1, 0],
X_xor[y_xor == -1, 1],
c='r', marker='s', label='-1')
plt.xlim([-3, 3])
plt.ylim([-3, 3])
plt.legend(loc='best')
plt.tight_layout()
plt.show()
In [4]:
from sklearn.svm import SVC
from helper import plot_decision_regions
svm = SVC(kernel='rbf', gamma=0.10, random_state=1, C=100.0)
svm.fit(X_xor, y_xor)
plot_decision_regions(X_xor, y_xor, classifier=svm)
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
This section is just for having fun, and the used transformation isn't right.
In [5]:
X_xor = np.append(X_xor, (X_xor[:, 0] ** 2 + X_xor[:, 1] ** 2).reshape(-1, 1), axis=1)
plt.scatter(X_xor[y_xor == 1, 1],
X_xor[y_xor == 1, 2],
c='b', marker='x', label='1')
plt.scatter(X_xor[y_xor == -1, 1],
X_xor[y_xor == -1, 2],
c='r', marker='s', label='-1')
plt.xlim([-3, 3])
plt.ylim([0, 9])
plt.legend(loc='best')
plt.tight_layout()
plt.show()