In [1]:
import sys, os
import nltk
import pandas as pd

In [2]:
from collections import defaultdict
from gensim import corpora, models, similarities
def make_dictionary(documents):
    """
    construct a dictionary, i.e. mapping btwn word ids and their freq of occurence in the whole corpus
    filter dictionary to remove stopwords and words occuring < min_count times
    
    input: documents is an iterable consisting of all the words in the corpus 
    output: filtered dictionary
    """

    
    dictionary = corpora.Dictionary(documents)

    stop_words = nltk.corpus.stopwords.words('english') 
    min_count = 2
    stop_ids = [dictionary.token2id[word] for word in stop_words
               if word in dictionary.token2id]
    rare_ids = [id for id, freq in dictionary.dfs.items()
                if freq < min_count]
    dictionary.filter_tokens(stop_ids + rare_ids)
    dictionary.compactify()
    return(dictionary)

def make_corpus(documents):
    """
    """
    dictionary = make_dictionary(documents)
    # convert corpus to vectors using bag-of-words representation, i.e. tuples of word indices and word counts
    corpus = [dictionary.doc2bow(words) for words in documents]
    return(corpus, dictionary)

def make_lsi_similarity_matrix(tfidf_corpus, dictionary):
    """
    construct LSI (latent semantic indexing) model on Tfidf-transformed corpus, print model topics, 
    return similarity matrix.
    """
    # construct model
    lsi = models.lsimodel.LsiModel(tfidf_corpus, id2word=dictionary, num_topics=200) 
    lsi.save('lsi-model.save')
    # create similarity matrix
    matsim = similarities.MatrixSimilarity(lsi[tfidf_corpus], num_best=4)
    return(matsim)

def make_lda_similarity_matrix(corpus, dictionary):
    """
    Latent Dirichlet Allocation (LDA) model
    """
    # construct model
    lda = models.ldamodel.LdaModel(corpus, id2word=dictionary, num_topics=200)
    lda.save('lda-model.save')
    # create similarity matrix
    matsim = similarities.MatrixSimilarity(lda[corpus], num_best=4)
    return(matsim)

In [3]:
# Read database of data
input_fname="AutismParentMagazine-posts.csv"
output_fname=input_fname.replace(".csv","-tokens.csv")

os.chdir('../data')
df=pd.read_csv(input_fname,index_col=0)
df.head(5)


Out[3]:
title source category text href
0 Autism, Head Banging and other Self Harming Be... https://www.autismparentingmagazine.com/ category-applied-behavior-analysis-aba For children with autism spectrum disorder (AS... https://www.autismparentingmagazine.com/autism...
1 High Quality ABA Treatment:  What Every Parent... https://www.autismparentingmagazine.com/ category-applied-behavior-analysis-aba Dr. Stephen Shore once said “If you’ve met one... https://www.autismparentingmagazine.com/high-q...
2 Help: I Don’t Know How to Choose an Applied Be... https://www.autismparentingmagazine.com/ category-applied-behavior-analysis-aba Help! I am going to be starting Applied Behav... https://www.autismparentingmagazine.com/choosi...
3 HELP: My Autistic Child is Absolutely Terrifie... https://www.autismparentingmagazine.com/ category-applied-behavior-analysis-aba How do you handle high anxiety of a child on t... https://www.autismparentingmagazine.com/help-a...
4 HELP: I Need Communication Advice for Autistic... https://www.autismparentingmagazine.com/ category-applied-behavior-analysis-aba A grandfather from Singapore asks… My eldest g... https://www.autismparentingmagazine.com/help-i...

In [4]:
# Tokenize data
import nltk
tokenizer = nltk.RegexpTokenizer(r'\w+')

# Get list of tokens from text in first article:
text = df['text'][0].lower()
ttext = tokenizer.tokenize(text)
print( text )
print( ttext )


for children with autism spectrum disorder (asd), head banging is a common way to self-soothe and communicate needs. both neurotypical and autistic babies and toddlers seek to recreate the rhythm that stimulated their vestibular system while in utero. other rhythmic habits that fuel a child’s kinesthetic drive include head rolling, body rocking, biting, and thumb… continue reading

['for', 'children', 'with', 'autism', 'spectrum', 'disorder', 'asd', 'head', 'banging', 'is', 'a', 'common', 'way', 'to', 'self', 'soothe', 'and', 'communicate', 'needs', 'both', 'neurotypical', 'and', 'autistic', 'babies', 'and', 'toddlers', 'seek', 'to', 'recreate', 'the', 'rhythm', 'that', 'stimulated', 'their', 'vestibular', 'system', 'while', 'in', 'utero', 'other', 'rhythmic', 'habits', 'that', 'fuel', 'a', 'child', 's', 'kinesthetic', 'drive', 'include', 'head', 'rolling', 'body', 'rocking', 'biting', 'and', 'thumb', 'continue', 'reading']

In [5]:
# Get a column with list of tokens:

# 1) convert to lower case 
# 2) get tokens
# 2) save data in a new column (tokens)
df['tokens'] = df['text'].map(lambda x: tokenizer.tokenize(x.lower()))
df['tokens'].head(5)


Out[5]:
0    [for, children, with, autism, spectrum, disord...
1    [dr, stephen, shore, once, said, if, you, ve, ...
2    [help, i, am, going, to, be, starting, applied...
3    [how, do, you, handle, high, anxiety, of, a, c...
4    [a, grandfather, from, singapore, asks, my, el...
Name: tokens, dtype: object

In [8]:
nltk.download()


showing info https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-8-a1a554e5d735> in <module>()
----> 1 nltk.download()

/Users/rangel/anaconda3/envs/cdips2017/lib/python3.6/site-packages/nltk/downloader.py in download(self, info_or_id, download_dir, quiet, force, prefix, halt_on_error, raise_on_error)
    659             # function should make a new copy of self to use?
    660             if download_dir is not None: self._download_dir = download_dir
--> 661             self._interactive_download()
    662             return True
    663 

/Users/rangel/anaconda3/envs/cdips2017/lib/python3.6/site-packages/nltk/downloader.py in _interactive_download(self)
    980         if TKINTER:
    981             try:
--> 982                 DownloaderGUI(self).mainloop()
    983             except TclError:
    984                 DownloaderShell(self).run()

/Users/rangel/anaconda3/envs/cdips2017/lib/python3.6/site-packages/nltk/downloader.py in mainloop(self, *args, **kwargs)
   1715 
   1716     def mainloop(self, *args, **kwargs):
-> 1717         self.top.mainloop(*args, **kwargs)
   1718 
   1719     #/////////////////////////////////////////////////////////////////

/Users/rangel/anaconda3/envs/cdips2017/lib/python3.6/tkinter/__init__.py in mainloop(self, n)
   1275     def mainloop(self, n=0):
   1276         """Call the mainloop of Tk."""
-> 1277         self.tk.mainloop(n)
   1278     def quit(self):
   1279         """Quit the Tcl interpreter. All widgets will be destroyed."""

KeyboardInterrupt: 

In [ ]:
# Save dataframe with tokens into files
df.to_csv(output_fname)

In [6]:
# Get similarity matrices
documents = df['tokens'].values
corpus,dictionary = make_corpus(documents)

#Save corpus into file
import pickle
pickle.dump(dictionary,open("dictionary.save","wb"))
pickle.dump(corpus,open("corpus.save", "wb"))

tfidf = models.TfidfModel(corpus)
tfidf.save('tfidf.save')

lsi_matsim = make_lsi_similarity_matrix(tfidf[corpus], dictionary)
lda_matsim = make_lda_similarity_matrix(corpus, dictionary)

# The models are saved into files in the above routines
# Save similarity matrices too:
pickle.dump(lsi_matsim,open("lsi-matsim.save","wb"))
pickle.dump(lda_matsim,open("lda-matsim.save","wb"))

In [ ]: