In [1]:
import sys, os
import nltk
import pandas as pd

In [2]:
from collections import defaultdict
from gensim import corpora, models, similarities
def make_dictionary(documents):
    """
    construct a dictionary, i.e. mapping btwn word ids and their freq of occurence in the whole corpus
    filter dictionary to remove stopwords and words occuring < min_count times
    
    input: documents is an iterable consisting of all the words in the corpus 
    output: filtered dictionary
    """

    
    dictionary = corpora.Dictionary(documents)

    stop_words = nltk.corpus.stopwords.words('english') 
    min_count = 2
    stop_ids = [dictionary.token2id[word] for word in stop_words
               if word in dictionary.token2id]
    rare_ids = [id for id, freq in dictionary.dfs.items()
                if freq < min_count]
    dictionary.filter_tokens(stop_ids + rare_ids)
    dictionary.compactify()
    return(dictionary)

def make_corpus(documents):
    """
    """
    dictionary = make_dictionary(documents)
    # convert corpus to vectors using bag-of-words representation, i.e. tuples of word indices and word counts
    corpus = [dictionary.doc2bow(words) for words in documents]
    return(corpus, dictionary)

def make_lsi_similarity_matrix(tfidf_corpus, dictionary):
    """
    construct LSI (latent semantic indexing) model on Tfidf-transformed corpus, print model topics, 
    return similarity matrix.
    """
    # construct model
    lsi = models.lsimodel.LsiModel(tfidf_corpus, id2word=dictionary, num_topics=200) 
    lsi.save('lsi-model.save')
    # create similarity matrix
    matsim = similarities.MatrixSimilarity(lsi[tfidf_corpus], num_best=1000)
    return(matsim)

def make_lda_similarity_matrix(corpus, dictionary):
    """
    Latent Dirichlet Allocation (LDA) model
    """
    # construct model
    lda = models.ldamodel.LdaModel(corpus, id2word=dictionary, num_topics=200)
    lda.save('lda-model.save')
    # create similarity matrix
    matsim = similarities.MatrixSimilarity(lda[corpus], num_best=1000)
    return(matsim)

In [3]:
# Read database of data
os.chdir('../data')

output_fname="articles-n-forums-posts.csv"

# Read articles from file
input_fname="AutismParentMagazine-posts.csv"
df=pd.read_csv(input_fname,index_col=0)
del df['category']
df.head(2)
df.index.name='post id'

In [4]:
input_fname="MedHelp-posts.csv"
df2=pd.read_csv(input_fname,index_col=0)
del df2['user id']
del df2['mother post id']
df2['source']='http://www.medhelp.org'
df2.head(2)


Out[4]:
title text href source
post id
1 Inappropriate Masterbation Down Syndrome \n It is common for children and adoles... http://www.medhelp.org//posts/Autism--Asperger... http://www.medhelp.org
2 Inappropriate Masterbation Down Syndrome \n A related discussion, self injusry i... http://www.medhelp.org//posts/Autism--Asperger... http://www.medhelp.org

In [5]:
# Join datasets
df=df.append(df2,ignore_index=True)

In [6]:
df.head(2)


Out[6]:
href source text title
0 https://www.autismparentingmagazine.com/autism... https://www.autismparentingmagazine.com/ For children with autism spectrum disorder (AS... Autism, Head Banging and other Self Harming Be...
1 https://www.autismparentingmagazine.com/high-q... https://www.autismparentingmagazine.com/ Dr. Stephen Shore once said “If you’ve met one... High Quality ABA Treatment:  What Every Parent...

In [7]:
# Tokenize data
import nltk
tokenizer = nltk.RegexpTokenizer(r'\w+')

# Get list of tokens from text in first article:
text = df['text'][0].lower()
ttext = tokenizer.tokenize(text)
print( text )
print( ttext )


for children with autism spectrum disorder (asd), head banging is a common way to self-soothe and communicate needs. both neurotypical and autistic babies and toddlers seek to recreate the rhythm that stimulated their vestibular system while in utero. other rhythmic habits that fuel a child’s kinesthetic drive include head rolling, body rocking, biting, and thumb… continue reading

['for', 'children', 'with', 'autism', 'spectrum', 'disorder', 'asd', 'head', 'banging', 'is', 'a', 'common', 'way', 'to', 'self', 'soothe', 'and', 'communicate', 'needs', 'both', 'neurotypical', 'and', 'autistic', 'babies', 'and', 'toddlers', 'seek', 'to', 'recreate', 'the', 'rhythm', 'that', 'stimulated', 'their', 'vestibular', 'system', 'while', 'in', 'utero', 'other', 'rhythmic', 'habits', 'that', 'fuel', 'a', 'child', 's', 'kinesthetic', 'drive', 'include', 'head', 'rolling', 'body', 'rocking', 'biting', 'and', 'thumb', 'continue', 'reading']

In [8]:
# Get a column with list of tokens:

# 1) convert to lower case 
# 2) get tokens
# 2) save data in a new column (tokens)
df['tokens'] = df['text'].map(lambda x: tokenizer.tokenize(x.lower()))
df['tokens'].head(5)

# Save dataframe with tokens into files
df.to_csv(output_fname)

In [9]:
# Get similarity matrices
documents = df['tokens'].values
corpus,dictionary = make_corpus(documents)

#Save corpus into file
import pickle
pickle.dump(dictionary,open("dictionary.save","wb"))
pickle.dump(corpus,open("corpus.save", "wb"))

tfidf = models.TfidfModel(corpus)
tfidf.save('tfidf.save')

lsi_matsim = make_lsi_similarity_matrix(tfidf[corpus], dictionary)
lda_matsim = make_lda_similarity_matrix(corpus, dictionary)

# The models are saved into files in the above routines
# Save similarity matrices too:
pickle.dump(lsi_matsim,open("lsi-matsim.save","wb"))
pickle.dump(lda_matsim,open("lda-matsim.save","wb"))

In [ ]: