Importing Modules


In [1]:
import sys
import numpy as np
import pickle
from time import time

from sklearn import cross_validation
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectPercentile, f_classif

import matplotlib.pyplot as plt

Auxiliary Functions


In [3]:
def preprocess(words_file = "../tools/word_data.pkl", authors_file="../tools/email_authors.pkl"):
    """
        this function takes a pre-made list of email texts (by default word_data.pkl)
        and the corresponding authors (by default email_authors.pkl) and performs
        a number of preprocessing steps:
            -- splits into training/testing sets (10% testing)
            -- vectorizes into tfidf matrix
            -- selects/keeps most helpful features

        after this, the feaures and labels are put into numpy arrays, which play nice with sklearn functions

        4 objects are returned:
            -- training/testing features
            -- training/testing labels

    """

    ### the words (features) and authors (labels), already largely preprocessed
    ### this preprocessing will be repeated in the text learning mini-project
    word_data = pickle.load( open(words_file, "r"))
    authors = pickle.load( open(authors_file, "r") )
    
    
    ### test_size is the percentage of events assigned to the test set (remainder go into training)
    features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(word_data, authors, test_size=0.1, random_state=42)

    
    ### text vectorization--go from strings to lists of numbers
    vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
                                 stop_words='english')
    features_train_transformed = vectorizer.fit_transform(features_train)
    features_test_transformed  = vectorizer.transform(features_test)
    
                                      
    
    ### feature selection, because text is super high dimensional and
    ### can be really computationally chewy as a result
    selector = SelectPercentile(f_classif, percentile=10)
    selector.fit(features_train_transformed, labels_train)
    features_train_transformed = selector.transform(features_train_transformed).toarray()
    features_test_transformed  = selector.transform(features_test_transformed).toarray()
    
  
    ### info on the data
    print "no. of Chris training emails:", sum(labels_train)
    print "no. of Sara training emails:", len(labels_train)-sum(labels_train)


    return features_test, features_train_transformed, features_test_transformed, labels_train, labels_test

In [9]:
#NAIVE BAYES

import sys
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
from time import time
sys.path.append("../tools")
from email_preprocess import preprocess


### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()




#########################################################
### your code goes here ###

from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score

clf = GaussianNB()
t0 = time()
clf.fit(features_train, labels_train)

arr = []


traintime = "training time: " + str(round(time()-t0, 3))
arr.append(traintime)
print traintime, "s"

t1 = time()
pred = clf.predict(features_test)

predtime = "predicting time: " + str(round(time()-t1, 3))
arr.append(predtime)
print predtime, "s"

accuracy = accuracy_score(labels_test, pred)
arr.append(accuracy)
np.save('/tmp/123', arr)

print accuracy


# rng = np.random.RandomState(10)  # deterministic random data
# a = np.hstack((rng.normal(size=1000),rng.normal(loc=5, scale=2, size=1000)))
# plt.hist(a, bins='auto')  # plt.hist passes it's arguments to np.histogram
# plt.title("Histogram with 'auto' bins")

#===== //==============
# gaussian_numbers = np.random.randn(1000)
# plt.hist(gaussian_numbers)
# plt.title("Gaussian Histogram")
# plt.xlabel("Value")
# plt.ylabel("Frequency")

# plt.show()

###########3################

gender = ['male','male','female','male','female']

import matplotlib.pyplot as plt
from collections import Counter

c = Counter(pred)

men = c[1]
print "No. of prodictions for Chris", men
women = c[0]
print "No. of prodictions for Sara", women

bar_heights = (men, women)
x = (1, 2)

fig, ax = plt.subplots()
width = 0.4

ax.bar(x, bar_heights, width)

ax.set_xlim((0, 3))
ax.set_ylim((0, max(men, women)*1.1))

ax.set_xticks([i+width/2 for i in x])
ax.set_xticklabels(['Cris', 'Sarah'])

plt.show()



## IT IS PENDING ADDING CODE TO DISPLAY HOW MANY EMAILS WERE PREDICTED TO BE CHRIS AND SARA, 
## WWHAT EMAILS WENT TO CHRIS AND SARA
## DISPLAY GRAPHS


no. of Chris training emails: 7936
no. of Sara training emails: 7884
training time: 0.869 s
predicting time: 0.107 s
0.973265073948
NUMBER OF PREDICTIONS FOR CRIS 906
NUMBER OF PREDICTIONS FOR SARAH 852