In [1]:
# imports
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from gensim import corpora, models, utils
from nltk.stem import WordNetLemmatizer
In [2]:
data = pd.read_csv('hillary-clinton-emails/Emails.csv', index_col=0).dropna()
texts = pd.concat((data.ExtractedBodyText ,data.ExtractedSubject), axis=1)
In [3]:
sw = ['re', 'fw', 'fvv', 'fwd']
To improve the result of the lda model, we group the mails by Subject. We filter the subjects to remove the keywords 're', 'fw', 'fvv', 'fwd'
In [4]:
def filt(row):
t = utils.simple_preprocess(row.ExtractedSubject)
filt = list(filter(lambda x: x not in sw, t))
return ' '.join(filt)
texts['ExtractedSubject'] = texts.apply(filt, axis=1)
texts = texts.groupby(by='ExtractedSubject', as_index=False).apply(lambda x: (x + ' ').sum())
In [5]:
texts.head()
Out[5]:
we concat the e-mail body wit the subject.
In [6]:
texts.ExtractedBodyText.fillna('',inplace=True)
texts.ExtractedSubject.fillna('',inplace=True)
texts['SubjectBody'] = texts.ExtractedBodyText + ' ' + texts.ExtractedSubject
mails = texts.SubjectBody
to get more meaningfull topics, we filter out english stop words and some custom words that don't have much meaning as topics.
In [7]:
documents = []
custom = ['like', 'think', 'know', 'want', 'sure', 'thing', 'send', 'sent', 'speech', 'print', 'time','want', 'said', 'maybe', 'today', 'tomorrow', 'thank', 'thanks']
english_stop_words = ["a", "about", "above", "above", "across", "after", "afterwards", "again", "against", "all", "almost", "alone", "along", "already", "also","although","always","am","among", "amongst", "amoungst", "amount", "an", "and", "another", "any","anyhow","anyone","anything","anyway", "anywhere", "are", "around", "as", "at", "back","be","became", "because","become","becomes", "becoming", "been", "before", "beforehand", "behind", "being", "below", "beside", "besides", "between", "beyond", "bill", "both", "bottom","but", "by", "call", "can", "cannot", "cant", "co", "con", "could", "couldnt", "cry", "de", "describe", "detail", "do", "done", "down", "due", "during", "each", "eg", "eight", "either", "eleven","else", "elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone", "everything", "everywhere", "except", "few", "fifteen", "fify", "fill", "find", "fire", "first", "five", "for", "former", "formerly", "forty", "found", "four", "from", "front", "full", "further", "get", "give", "go", "had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter", "hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his", "how", "however", "hundred", "ie", "if", "in", "inc", "indeed", "interest", "into", "is", "it", "its", "itself", "keep", "last", "latter", "latterly", "least", "less", "ltd", "made", "many", "may", "me", "meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly", "move", "much", "must", "my", "myself", "name", "namely", "neither", "never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone", "nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on", "once", "one", "only", "onto", "or", "other", "others", "otherwise", "our", "ours", "ourselves", "out", "over", "own","part", "per", "perhaps", "please", "put", "rather", "re", "same", "see", "seem", "seemed", "seeming", "seems", "serious", "several", "she", "should", "show", "side", "since", "sincere", "six", "sixty", "so", "some", "somehow", "someone", "something", "sometime", "sometimes", "somewhere", "still", "such", "system", "take", "ten", "than", "that", "the", "their", "them", "themselves", "then", "thence", "there", "thereafter", "thereby", "therefore", "therein", "thereupon", "these", "they", "thickv", "thin", "third", "this", "those", "though", "three", "through", "throughout", "thru", "thus", "to", "together", "too", "top", "toward", "towards", "twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us", "very", "via", "was", "we", "well", "were", "what", "whatever", "when", "whence", "whenever", "where", "whereafter", "whereas", "whereby", "wherein", "whereupon", "wherever", "whether", "which", "while", "whither", "who", "whoever", "whole", "whom", "whose", "why", "will", "with", "within", "without", "would", "yet", "you", "your", "yours", "yourself", "yourselves", "the"]
sw =stopwords.words('english') + sw + custom + english_stop_words
for text in mails:
t = utils.simple_preprocess(text)
filt = list(filter(lambda x: (x not in sw) and len(x) > 3, t))
lemmatizer = WordNetLemmatizer()
lemmatized = [lemmatizer.lemmatize(x) for x in filt]
filt2 = list(filter(lambda x: (x not in sw) and len(x) > 3, lemmatized))
documents.append(filt2)
To use the LDA model, we need to transform every document into a list of tuple (ID, term frequency).
In [8]:
dictionary = corpora.Dictionary(documents)
corpus = [dictionary.doc2bow(doc) for doc in documents]
Now we generate the LDA models with different numbers of topics.
In [9]:
import pprint
pp = pprint.PrettyPrinter(depth=2)
for i in range(5, 50, 10):
print('------------------------------------')
print(i, 'topics')
lda = models.LdaModel(corpus, num_topics=i, id2word = dictionary)
pp.pprint(lda.print_topics(lda.num_topics))
print()
when we choose the number of topics to be 25, the topics seem to be the most meaningfull.
In [10]:
lda = models.LdaModel(corpus, num_topics=25, id2word = dictionary)
pp.pprint(lda.print_topics(lda.num_topics))
In [ ]: