In [3]:
from collections import Counter
import json
In [7]:
%%time
reviews = []
with open("/data/yelp_academic_dataset_review.json") as f:
i = 0
for line in f:
if i % 1000000 == 0:
print(i)
i += 1
reviews.append(json.loads(line))
In [8]:
print(reviews[0])
In [9]:
by_author = {} # author : "review 1\n review 2\n review 3"
for review in reviews:
uid = review['user_id']
if uid in by_author:
by_author[uid] += "\n{}".format(review['text'])
else:
by_author[uid] = "{}".format(review['text'])
In [10]:
len(by_author)
Out[10]:
In [11]:
# Get authors who have written at least 10k characters in total
by_author = {a: by_author[a] for a in by_author if len(by_author[a]) > 10010}
In [12]:
len(by_author)
Out[12]:
In [13]:
known_texts = []
unknown_texts = []
# skip some words in the middle so we can't infer anything by 'matching' broken texts
for a in by_author:
known_texts += [by_author[a][:5000]]
unknown_texts += [by_author[a][5010:10010]]
In [14]:
len(unknown_texts)
Out[14]:
In [15]:
total = 38900
half = int(total/2)
known_same = known_texts[:half]
unknown_same = unknown_texts[:half]
known_diff = known_texts[half:total]
unknown_diff = unknown_texts[half:total]
# move unknown diffs up by one
unknown_diff = unknown_diff[1:] + [unknown_diff[0]]
knowns = known_same + known_diff
unknowns = unknown_same + unknown_diff
In [16]:
len(knowns)
Out[16]:
In [17]:
n = int(len(knowns)/2)
labels = ([1] * n) + ([0] * n)
In [ ]:
%%time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import FeatureUnion
char_tf = TfidfVectorizer(analyzer='char', ngram_range=(2,3), min_df=0.01, lowercase=False)
word_tf = TfidfVectorizer(ngram_range=(1,2), lowercase=False, min_df=0.01)
vectorizer = FeatureUnion([
('char', char_tf),
('word', word_tf)
])
vectorizer.fit(knowns + unknowns)
In [ ]:
%%time
known_vecs = vectorizer.transform(knowns)
print(".")
unknown_vecs = vectorizer.transform(unknowns)
In [ ]:
len(knowns)
In [ ]:
known_vecs.shape
In [ ]:
len(labels)
In [ ]:
from random import shuffle
indices = list(range(len(labels)))
shuffle(indices)
indices[:10]
In [ ]:
len(indices)
In [ ]:
import numpy as np
labels = np.array(labels)
In [ ]:
train_indices = indices[:30000]
test_indices = indices[30000:]
known_train = known_vecs[train_indices, :]
unknown_train = unknown_vecs[train_indices, :]
train_labels = labels[train_indices]
known_test = known_vecs[test_indices, :]
unknown_test = unknown_vecs[test_indices, :]
test_labels = labels[test_indices]
In [ ]:
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import cross_val_score
In [ ]:
%%time
train_pairs = np.abs(known_train - unknown_train)
test_pairs = np.abs(known_test - unknown_test)
svm = LinearSVC()
svm.fit(train_pairs, train_labels)
In [ ]:
%%time
preds = svm.predict(test_pairs)
print(classification_report(test_labels, preds))
In [ ]: