In [3]:
from collections import Counter

import json

In [7]:
%%time
reviews = []
with open("/data/yelp_academic_dataset_review.json") as f:
    i = 0
    for line in f:
        if i % 1000000 == 0:
            print(i)
        i += 1
        reviews.append(json.loads(line))


0
1000000
2000000
3000000
4000000
CPU times: user 52 s, sys: 8.69 s, total: 1min
Wall time: 1min 8s

In [8]:
print(reviews[0])


{'review_id': 'NxL8SIC5yqOdnlXCg18IBg', 'user_id': 'KpkOkG6RIf4Ra25Lhhxf1A', 'business_id': '2aFiy99vNLklCx3T_tGS9A', 'stars': 5, 'date': '2011-10-10', 'text': "If you enjoy service by someone who is as competent as he is personable, I would recommend Corey Kaplan highly. The time he has spent here has been very productive and working with him educational and enjoyable. I hope not to need him again (though this is highly unlikely) but knowing he is there if I do is very nice. By the way, I'm not from El Centro, CA. but Scottsdale, AZ.", 'useful': 0, 'funny': 0, 'cool': 0, 'type': 'review'}

In [9]:
by_author = {} # author : "review 1\n review 2\n review 3"
for review in reviews:
    uid = review['user_id']
    if uid in by_author:
        by_author[uid] += "\n{}".format(review['text'])
    else:
        by_author[uid] = "{}".format(review['text'])

In [10]:
len(by_author)


Out[10]:
1029432

In [11]:
# Get authors who have written at least 10k characters in total
by_author = {a: by_author[a] for a in by_author if len(by_author[a]) > 10010}

In [12]:
len(by_author)


Out[12]:
38985

In [13]:
known_texts = []
unknown_texts = []

# skip some words in the middle so we can't infer anything by 'matching' broken texts
for a in by_author:
    known_texts += [by_author[a][:5000]]
    unknown_texts += [by_author[a][5010:10010]]

In [14]:
len(unknown_texts)


Out[14]:
38985

In [15]:
total = 38900
half = int(total/2)

known_same = known_texts[:half]
unknown_same = unknown_texts[:half]

known_diff = known_texts[half:total]
unknown_diff = unknown_texts[half:total]

# move unknown diffs up by one
unknown_diff = unknown_diff[1:] + [unknown_diff[0]]

knowns = known_same + known_diff
unknowns = unknown_same + unknown_diff

In [16]:
len(knowns)


Out[16]:
38900

In [17]:
n = int(len(knowns)/2)
labels = ([1] * n) + ([0] * n)

In [ ]:
%%time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import FeatureUnion
char_tf = TfidfVectorizer(analyzer='char', ngram_range=(2,3), min_df=0.01, lowercase=False)
word_tf = TfidfVectorizer(ngram_range=(1,2), lowercase=False, min_df=0.01)
vectorizer = FeatureUnion([
    ('char', char_tf),
    ('word', word_tf)
])

vectorizer.fit(knowns + unknowns)


CPU times: user 13min 32s, sys: 31.1 s, total: 14min 3s
Wall time: 14min 37s

In [ ]:
%%time
known_vecs = vectorizer.transform(knowns)
print(".")
unknown_vecs = vectorizer.transform(unknowns)


.

In [ ]:
len(knowns)

In [ ]:
known_vecs.shape

In [ ]:
len(labels)

In [ ]:
from random import shuffle
indices = list(range(len(labels)))
shuffle(indices)
indices[:10]

In [ ]:
len(indices)

In [ ]:
import numpy as np
labels = np.array(labels)

In [ ]:
train_indices = indices[:30000]
test_indices = indices[30000:]

known_train = known_vecs[train_indices, :]
unknown_train = unknown_vecs[train_indices, :]
train_labels = labels[train_indices]

known_test = known_vecs[test_indices, :]
unknown_test = unknown_vecs[test_indices, :]
test_labels = labels[test_indices]

In [ ]:
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import cross_val_score

In [ ]:
%%time
train_pairs = np.abs(known_train - unknown_train)
test_pairs = np.abs(known_test - unknown_test)
svm = LinearSVC()
svm.fit(train_pairs, train_labels)

In [ ]:
%%time
preds = svm.predict(test_pairs)
print(classification_report(test_labels, preds))

In [ ]: