In [1]:
import os
import json
import time
import pickle
import requests
import math


import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

In [2]:
df = pd.DataFrame()
df = pd.read_csv('may_june_july.csv', delimiter="|")

In [3]:
# Combine all text 
df['tags'] = df['descr'] + " " + df["title"] + " " + df["cat"]+ " " + df["primary_kw"]+ " " + df["tags"] + " " + df["cat"] 
# Drop unneeded columns
df.drop('id', axis=1, inplace=True)
df.drop('pull_cc', axis=1, inplace=True)
df.drop('cc', axis=1, inplace=True)
df.drop('metav', axis=1, inplace=True)
df.drop('descr', axis=1, inplace=True)
df.drop('title', axis=1, inplace=True)
df.drop('primary_kw', axis=1, inplace=True)
df.drop('cat', axis=1, inplace=True)

In [ ]:
# NORMALIZE TO LOG DISTRIBUTION
# Compute Log (freq*impressions/1000)
# Add log column
df['Log'] = df['freq']*df['impressions']/1000

for i, row in df.iterrows():
    cv = math.log(df.iloc[i,3],2)
    df.set_value(i,'Log',cv)

# Drop unneeded column
df.drop('freq', axis=1, inplace=True)
df.drop('impressions', axis=1, inplace=True)

In [ ]:
data_mean = df["Log"].mean()
data_mean

In [ ]:
data_std = df["Log"].std()
data_std

In [ ]:
#plt.hist(df["Log"])
#plt.show()

In [ ]:
# Virality defined as -1 sigma from mean
df['viral'] = np.where(df['Log']<data_mean-data_std, 'notviral', 'viral')
df['viral_num'] = df.viral.map({'notviral':0, 'viral':1})
df.drop('Log', axis=1, inplace=True)
df.head()

In [ ]:
df['tags'].fillna('a', inplace=True)

In [ ]:
df.tail()

In [ ]:
df.shape

In [ ]:
df.viral.value_counts()

In [ ]:
X = df.tags
y = df.viral_num
print(X.shape)
print(y.shape)

In [ ]:
X.head()

In [ ]:
y.head()

In [ ]:
# split X and y into training and testing sets
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)

In [ ]:
# instantiate the vectorizer
from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer()

In [ ]:
vect

In [ ]:
# learn training data vocabulary, then use it to create a document-term matrix
# FOLLOWING CAN BE DONE IN SINGLE STEP:  X_train_dtm = vect.fit_transform(X_train)
vect.fit(X_train)
X_train_dtm = vect.transform(X_train)

In [ ]:
X_train_dtm

In [ ]:
# transform testing data (using fitted vocabulary) into a document-term matrix
X_test_dtm = vect.transform(X_test)
X_test_dtm

In [ ]:
# import and instantiate a Multinomial Naive Bayes model
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()

In [ ]:
# train the model using X_train_dtm (timing it with an IPython "magic command")
%time nb.fit(X_train_dtm, y_train)

In [ ]:
# make class predictions for X_test_dtm
y_pred_class = nb.predict(X_test_dtm)

In [ ]:
# calculate accuracy of class predictions
from sklearn import metrics
metrics.accuracy_score(y_test, y_pred_class)

In [ ]:
# print the confusion matrix
metrics.confusion_matrix(y_test, y_pred_class)

In [ ]:
# print message text for the false positives (non-viral incorrectly classified as viral)
X_test[y_test < y_pred_class]

In [ ]:
# print message text for the false negatives (Viral incorrectly classified as non-viral)
X_test[y_test > y_pred_class]

In [ ]:
# example false negative
#X_test[3]

In [ ]:
# calculate predicted probabilities for X_test_dtm (poorly calibrated)
y_pred_prob = nb.predict_proba(X_test_dtm)[:, 1]
y_pred_prob

In [ ]:
# calculate AUC
metrics.roc_auc_score(y_test, y_pred_prob)

In [ ]:
# import and instantiate a logistic regression model
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()

In [ ]:
# train the model using X_train_dtm
%time logreg.fit(X_train_dtm, y_train)

In [ ]:
# make class predictions for X_test_dtm
y_pred_class = logreg.predict(X_test_dtm)

In [ ]:
# calculate predicted probabilities for X_test_dtm (well calibrated)
y_pred_prob = logreg.predict_proba(X_test_dtm)[:, 1]
y_pred_prob

In [ ]:
# calculate accuracy
metrics.accuracy_score(y_test, y_pred_class)

In [ ]:
# calculate AUC
metrics.roc_auc_score(y_test, y_pred_prob)

In [ ]:
# store the vocabulary of X_train
X_train_tokens = vect.get_feature_names()
len(X_train_tokens)

In [ ]:
# examine the first 50 tokens
print(X_train_tokens[0:50])

In [ ]:
# examine the last 50 tokens
print(X_train_tokens[-50:])

In [ ]:
# Naive Bayes counts the number of times each token appears in each class
nb.feature_count_

In [ ]:
# rows represent classes, columns represent tokens
nb.feature_count_.shape

In [ ]:
# number of times each token appears across all Non-viral Buzzes
non_viral_token_count = nb.feature_count_[0, :]
non_viral_token_count

In [ ]:
# number of times each token appears across all Viral Buzzes
viral_token_count = nb.feature_count_[1, :]
viral_token_count

In [ ]:
# create a DataFrame of tokens with their separate non-viral and viral counts
tokens = pd.DataFrame({'token':X_train_tokens, 'non_viral':non_viral_token_count, 'viral':viral_token_count}).set_index('token')
tokens.head()

In [ ]:
# examine 5 random DataFrame rows
tokens.sample(20, random_state=6)

In [ ]:
# Naive Bayes counts the number of observations in each class
nb.class_count_

In [ ]:
# add 1 to non-viral and viral counts to avoid dividing by 0
tokens['non_viral'] = tokens.non_viral + 1
tokens['viral'] = tokens.viral + 1
tokens.sample(5, random_state=6)

In [ ]:
# convert the non-viral and viral counts into frequencies
tokens['non_viral'] = tokens.non_viral / nb.class_count_[0]
tokens['viral'] = tokens.viral / nb.class_count_[1]
tokens.sample(5, random_state=6)

In [ ]:
# calculate the ratio of viral-to-non-viral for each token
tokens['viral_ratio'] = tokens.viral / tokens.non_viral
tokens.sample(5, random_state=6)

In [ ]:
# examine the DataFrame sorted by viral_ratio
# note: use sort() instead of sort_values() for pandas 0.16.2 and earlier
tokens.sort_values('viral_ratio', ascending=False)

In [ ]:
# look up the viral_ratio for a given token
tokens.loc['stanford', 'viral_ratio']

In [ ]:


In [ ]:


In [ ]: