Importing Modules


In [ ]:
import nltk
import sklearn
import matplotlib
#print('The nltk version is {}.'.format(nltk.__version__))
#print('The scikit-learn version is {}.'.format(sklearn.__version__))
#print('The matplotlib version is {}.'.format(matplotlib.__version__))

In [ ]:
import sys
import numpy as np
import cPickle
from time import time

from sklearn import model_selection
# from sklearn import cross_validation
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectPercentile, f_classif

from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score

import matplotlib.pyplot as plt
%matplotlib inline
from collections import Counter

Functions


In [ ]:
def train_predict(desc):
    print
    print
    print ("Please await, processing the result:", str(desc))
    print
    
    ##Reduce DataSet (1%)
    features_train_small = features_train[:len(features_train)/100]
    labels_train_small = labels_train[:len(labels_train)/100]
    
    t0 = time()
    clf.fit(features_train_small, labels_train_small)
    traintime = "Training time: " + str(round(time()-t0, 3))

    t1 = time()
    pred = clf.predict(features_test)
    predtime = "Predicting time: " + str(round(time()-t1, 3))
    
    print
    print
    print "********** Results for experiment: \"",desc,"\" ************"
    print
    print traintime, "s"
    print predtime, "s"

    plotgraph (pred,str(desc))
    accuracy(labels_test, pred)

def train_predict_fulldataset(desc):
    print
    print
    print ("Please await, processing the result:", str(desc))
    print
    t0 = time()
    clf.fit(features_train, labels_train)
    traintime = "Training time: " + str(round(time()-t0, 3))

    t1 = time()
    pred = clf.predict(features_test)
    predtime = "Predicting time: " + str(round(time()-t1, 3))
    
    print
    print
    print "********** Results for experiment: \"",desc,"\" ************"
    print
    print traintime, "s"
    print predtime, "s"

    plotgraph (pred,str(desc))
    accuracy(labels_test, pred)
    
    
def grid_train_predict(desc):
    print
    print
    print ("Please await, processing the result:", str(desc))
    print
    
    ##Reduce DataSet (1%)
    features_train_small = features_train[:len(features_train)/100]
    labels_train_small = labels_train[:len(labels_train)/100]
    
    t0 = time()
    clf.fit(features_train_small, labels_train_small)
    traintime = "Training time: " + str(round(time()-t0, 3))

    t1 = time()
    pred = clf.predict(features_test)
    predtime = "Predicting time: " + str(round(time()-t1, 3))
    
    print
    print
    print "********** Results for experiment: \"",desc,"\" ************"
    print
    print traintime, "s"
    print predtime, "s"

    plotgraph (pred,str(desc))
#     accuracy(labels_test, pred)

def grid_train_predict_fulldataset(desc):
    print
    print
    print ("Please await, processing the result:", str(desc))
    print
    t0 = time()
    clf.fit(features_train, labels_train)
    traintime = "Training time: " + str(round(time()-t0, 3))

    t1 = time()
    pred = clf.predict(features_test)
    predtime = "Predicting time: " + str(round(time()-t1, 3))
    
    print
    print
    print "********** Results for experiment: \"",desc,"\" ************"
    print
    print traintime, "s"
    print predtime, "s"

    plotgraph (pred,str(desc))
#     accuracy(labels_test, pred)

def plotgraph (pred, desc):

    c = Counter(pred)

    men = c[1]
    
    print
    print "Number of Predicted emails for Chris", men
    women = c[0]
    print "Number of Predicted emails for Sara", women

    bar_heights = (men, women)
    x = (1, 2)

    fig, ax = plt.subplots()
    width = 0.4

    ax.bar(x, bar_heights, width)

    ax.set_xlim((0, 3))
    ax.set_ylim((0, max(men, women)*1.1))

    ax.set_xticks([i+width/2 for i in x])
    ax.set_xticklabels(['Cris', 'Sarah'])

    plt.show()

def accuracy(labels_test, pred):
    accuracy = accuracy_score(labels_test, pred)
    print "Total Accuracy:", accuracy
    
def preprocess(words_file = "../data/word_data.pkl", authors_file="../data/email_authors.pkl"):
    """
        this function takes a pre-made list of email texts (by default word_data.pkl)
        and the corresponding authors (by default email_authors.pkl) and performs
        a number of preprocessing steps:
            -- splits into training/testing sets (10% testing)
            -- vectorizes into tfidf matrix
            -- selects/keeps most helpful features

        after this, the feaures and labels are put into numpy arrays, which play nice with sklearn functions

        4 objects are returned:
            -- training/testing features
            -- training/testing labels

    """
    ### the words (features) and authors (labels), already largely preprocessed
    ### this preprocessing will be repeated in the text learning mini-project
    word_data = cPickle.load( open("../data/word_data.pkl", "r"))
    authors = cPickle.load( open("../data/email_authors.pkl", "r") )

    ### test_size is the percentage of events assigned to the test set (remainder go into training)
    features_train, features_test, labels_train, labels_test = model_selection.train_test_split(word_data, authors, test_size=0.1, random_state=42)



    ### text vectorization--go from strings to lists of numbers
    vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
                                 stop_words='english')
    features_train_transformed = vectorizer.fit_transform(features_train)
    features_test_transformed  = vectorizer.transform(features_test)



    ### feature selection, because text is super high dimensional and
    ### can be really computationally chewy as a result
    selector = SelectPercentile(f_classif, percentile=1)
    selector.fit(features_train_transformed, labels_train)
    features_train_transformed = selector.transform(features_train_transformed).toarray()
    features_test_transformed  = selector.transform(features_test_transformed).toarray()

    ### info on the data
    print
    print "Number of available emails to be trained for Chris:", sum(labels_train)
    print "Number of available emails to be trained for Sara:", len(labels_train)-sum(labels_train)
    
   
    return features_train_transformed, features_test_transformed, labels_train, labels_test

Load Data e Preprocess


In [ ]:
features_train, features_test, labels_train, labels_test = preprocess()