In [ ]:
# K Means Example
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
x1 = np.array([1, 2, 3, 1, 5, 6, 5, 5, 6, 7, 8, 9, 7, 9])
x2 = np.array([1, 3, 2, 2, 8, 6, 7, 6, 7, 1, 2, 1, 1, 3])
X = np.array(list(zip(x1, x2))).reshape(-1, 2)
plt.figure()
plt.grid(True)
plt.plot(X[:, 0],X[:, 1],'k.');
kmeans = KMeans(n_clusters=3)
kmeans.fit(X)
print('cluster_centers_:\n{}'.format(kmeans.cluster_centers_))
print('kmeans.labels_:\n{}'.format(kmeans.labels_))
dist_matrix = cdist(X, kmeans.cluster_centers_, 'euclidean')
avg_dist = sum(np.min(cdist(X, kmeans.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0]
print('Distance Matrix:')
print(dist_matrix)
print('Avarge Distance:')
print(avg_dist)
print('Silhouette Score: {}'.format(metrics.silhouette_score(X, kmeans.labels_, metric='euclidean')))
In [ ]:
# Elbow Method Optimization for K
# Use Mean Distortions
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
x1 = np.array([1, 2, 3, 1, 5, 6, 5, 5, 6, 7, 8, 9, 7, 9])
x2 = np.array([1, 3, 2, 2, 8, 6, 7, 6, 7, 1, 2, 1, 1, 3])
X = np.array(list(zip(x1, x2))).reshape(-1, 2)
plt.figure()
plt.grid(True)
plt.plot(X[:, 0],X[:, 1],'k.');
K = range(1, 10)
meandistortions = []
for k in K:
kmeans = KMeans(n_clusters=k)
kmeans.fit(X)
avg_dist = sum(np.min(cdist(X, kmeans.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0]
meandistortions.append(avg_dist)
plt.figure()
plt.plot(K, meandistortions, 'bx-')
plt.xlabel('k')
plt.ylabel('Mean Distortions')
plt.title('Elbow Method Optimization for K')
In [ ]:
# Elbow Method Optimization for K
# Use Silhouette Score
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
x1 = np.array([1, 2, 3, 1, 5, 6, 5, 5, 6, 7, 8, 9, 7, 9])
x2 = np.array([1, 3, 2, 2, 8, 6, 7, 6, 7, 1, 2, 1, 1, 3])
X = np.array(list(zip(x1, x2))).reshape(-1, 2)
plt.figure()
plt.grid(True)
plt.plot(X[:, 0],X[:, 1],'k.');
K = range(2, 10)
silhouette_scores = []
for k in K:
kmeans = KMeans(n_clusters=k)
kmeans.fit(X)
score = metrics.silhouette_score(X, kmeans.labels_, metric='euclidean')
silhouette_scores.append(score)
plt.figure()
plt.plot(K, silhouette_scores, 'bx-')
plt.xlabel('k')
plt.ylabel('Silhouette Score')
plt.title('Elbow Method Optimization for K')
In [ ]:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, n_features=2, centers=3, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples, n_features=2, centers=3,
cluster_std=[1.0, 2.5, 0.5], random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()