In [3]:
"""
Count words
"""
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(min_df=1)
content = ["How to format my hard disk", "Hard disk format problems"]
X = vectorizer.fit_transform(content)
print vectorizer.get_feature_names()
print X.toarray().transpose()
In [14]:
"""
"""
TOY_DIR = "./ch03/toy"
import os
import sys
posts = [open(os.path.join(TOY_DIR, f)).read() for f in os.listdir(TOY_DIR)]
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(min_df=1)
X_train = vectorizer.fit_transform(posts)
num_samples, num_features = X_train.shape
print("#samples: %d, #features: %d" % (num_samples, num_features))
print vectorizer.get_feature_names()
In [12]:
# vectorize new post
new_post = "imaging databases"
new_post_vec = vectorizer.transform([new_post])
print new_post_vec
print new_post_vec.toarray()
In [15]:
"""Euclidean Distance"""
import scipy as sp
def dist_raw(v1, v2):
delta = v1 - v2
return sp.linalg.norm(delta.toarray())
# iterate all over
best_dist = sys.maxsize
best_i = None
for i in range(0, num_samples):
post = posts[i]
if post == new_post:
continue
post_vec = X_train.getrow(i)
d = dist_raw(post_vec, new_post_vec)
print("=== Post %i with dist=%.2f: %s" % (i, d, post))
if d < best_dist:
best_dist = d
best_i = i
print("Best post is %i with dist=%.2f" % (best_i, best_dist))
In [22]:
# normalizing word count vectors
def dist_norm(v1, v2):
v1_normalized = v1 / sp.linalg.norm(v1.toarray())
v2_normalized = v2 / sp.linalg.norm(v2.toarray())
delta = v1_normalized - v2_normalized
return sp.linalg.norm(delta.toarray())
best_dist = sys.maxsize
best_i = None
for i in range(0, num_samples):
post = posts[i]
if post == new_post:
continue
post_vec = X_train.getrow(i)
d = dist_norm(post_vec, new_post_vec)
print("=== Post %i with dist=%.2f: %s" % (i, d, post))
if d < best_dist:
best_dist = d
best_i = i
print("Best post is %i with dist=%.2f" % (best_i, best_dist))
In [18]:
# Remove less important words
vectorizer = CountVectorizer(min_df=1, stop_words='english')
X_train = vectorizer.fit_transform(posts)
new_post = "imaging databases"
new_post_vec = vectorizer.transform([new_post])
print new_post_vec
print new_post_vec.toarray()
for i in range(0, num_samples):
post = posts[i]
if post == new_post:
continue
post_vec = X_train.getrow(i)
d = dist_norm(post_vec, new_post_vec)
print("=== Post %i with dist=%.2f: %s" % (i, d, post))
if d < best_dist:
best_dist = d
best_i = i
print("Best post is %i with dist=%.2f" % (best_i, best_dist))
In [23]:
# stemming
import nltk.stem
english_stemmer = nltk.stem.SnowballStemmer('english')
class StemmedCountVectorizer(CountVectorizer):
def build_analyzer(self):
analyzer = super(StemmedCountVectorizer, self).build_analyzer()
return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))
vectorizer = StemmedCountVectorizer(min_df=1, stop_words='english')
X_train = vectorizer.fit_transform(posts)
new_post = "imaging databases"
new_post_vec = vectorizer.transform([new_post])
print new_post_vec
print new_post_vec.toarray()
best_dist = sys.maxsize
best_i = None
for i in range(0, num_samples):
post = posts[i]
if post == new_post:
continue
post_vec = X_train.getrow(i)
d = dist_norm(post_vec, new_post_vec)
print("=== Post %i with dist=%.2f: %s" % (i, d, post))
if d < best_dist:
best_dist = d
best_i = i
print("Best post is %i with dist=%.2f" % (best_i, best_dist))
In [24]:
"""
TF-IDF: Term Frequency - Inverse Document Frequency
"""
from sklearn.feature_extraction.text import TfidfVectorizer
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(StemmedTfidfVectorizer, self).build_analyzer()
return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))
vectorizer = StemmedTfidfVectorizer(
min_df=1, stop_words='english', decode_error='ignore')
X_train = vectorizer.fit_transform(posts)
new_post = "imaging databases"
new_post_vec = vectorizer.transform([new_post])
print new_post_vec
print new_post_vec.toarray()
best_dist = sys.maxsize
best_i = None
for i in range(0, num_samples):
post = posts[i]
if post == new_post:
continue
post_vec = X_train.getrow(i)
d = dist_norm(post_vec, new_post_vec)
print("=== Post %i with dist=%.2f: %s" % (i, d, post))
if d < best_dist:
best_dist = d
best_i = i
print("Best post is %i with dist=%.2f" % (best_i, best_dist))
In [ ]: