Bag of Words Meets Bags of Popcorn

This is the code from the second part of the tutorial from kaggle.


In [17]:
import pandas as pd
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
import nltk.data

In [4]:
# Read data from files 
train = pd.read_csv( "labeledTrainData.tsv", header=0, 
 delimiter="\t", quoting=3 )
test = pd.read_csv( "testData.tsv", header=0, delimiter="\t", quoting=3 )
unlabeled_train = pd.read_csv( "unlabeledTrainData.tsv", header=0, 
 delimiter="\t", quoting=3 )

# Verify the number of reviews that were read (100,000 in total)
print "Read %d labeled train reviews, %d labeled test reviews, " \
 "and %d unlabeled reviews\n" % (train["review"].size,  
 test["review"].size, unlabeled_train["review"].size )


Read 25000 labeled train reviews, 25000 labeled test reviews, and 50000 unlabeled reviews


In [5]:
def review_to_wordlist( review, remove_stopwords=False ):
    # Function to convert a document to a sequence of words,
    # optionally removing stop words.  Returns a list of words.
    #
    # 1. Remove HTML
    review_text = BeautifulSoup(review).get_text()
    #  
    # 2. Remove non-letters
    review_text = re.sub("[^a-zA-Z]"," ", review_text)
    #
    # 3. Convert words to lower case and split them
    words = review_text.lower().split()
    #
    # 4. Optionally remove stop words (false by default)
    if remove_stopwords:
        stops = set(stopwords.words("english"))
        words = [w for w in words if not w in stops]
    #
    # 5. Return a list of words
    return(words)

In [8]:
# Load the punkt tokenizer
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')

# Define a function to split a review into parsed sentences
def review_to_sentences( review, tokenizer, remove_stopwords=False ):
    # Function to split a review into parsed sentences. Returns a 
    # list of sentences, where each sentence is a list of words
    #
    # 1. Use the NLTK tokenizer to split the paragraph into sentences
    raw_sentences = tokenizer.tokenize(review.strip())
    #
    # 2. Loop over each sentence
    sentences = []
    for raw_sentence in raw_sentences:
        # If a sentence is empty, skip it
        if len(raw_sentence) > 0:
            # Otherwise, call review_to_wordlist to get a list of words
            sentences.append( review_to_wordlist( raw_sentence, \
              remove_stopwords ))
    #
    # Return the list of sentences (each sentence is a list of words,
    # so this returns a list of lists
    return sentences

In [11]:
sentences = []  # Initialize an empty list of sentences

print "Parsing sentences from training set"
for review in train["review"]:
    sentences += review_to_sentences(review.decode("utf8"), tokenizer)

print "Parsing sentences from unlabeled set"
for review in unlabeled_train["review"]:
    sentences += review_to_sentences(review.decode("utf8"), tokenizer)


/usr/local/lib/python2.7/site-packages/bs4/__init__.py:182: UserWarning: "." looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.
  '"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
/usr/local/lib/python2.7/site-packages/bs4/__init__.py:189: UserWarning: "http://www.happierabroad.com"" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.
  '"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
Parsing sentences from training set
Parsing sentences from unlabeled set
/usr/local/lib/python2.7/site-packages/bs4/__init__.py:189: UserWarning: "http://www.archive.org/details/LovefromaStranger"" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.
  '"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
/usr/local/lib/python2.7/site-packages/bs4/__init__.py:189: UserWarning: "http://www.loosechangeguide.com/LooseChangeGuide.html"" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.
  '"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
/usr/local/lib/python2.7/site-packages/bs4/__init__.py:189: UserWarning: "http://www.msnbc.msn.com/id/4972055/site/newsweek/"" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.
  '"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
/usr/local/lib/python2.7/site-packages/bs4/__init__.py:182: UserWarning: ".." looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.
  '"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
/usr/local/lib/python2.7/site-packages/bs4/__init__.py:189: UserWarning: "http://www.youtube.com/watch?v=a0KSqelmgN8"" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.
  '"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
/usr/local/lib/python2.7/site-packages/bs4/__init__.py:189: UserWarning: "http://jake-weird.blogspot.com/2007/08/beneath.html"" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.
  '"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)


In [36]:
import logging
from gensim.models import word2vec

logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',\
    level=logging.INFO)

# Set values for various parameters
num_features = 300    # Word vector dimensionality                      
min_word_count = 40   # Minimum word count                        
num_workers = 4       # Number of threads to run in parallel
context = 10          # Context window size                                                                                    
downsampling = 1e-3   # Downsample setting for frequent words


# Initialize and train the model (this will take some time)
print "Training model..."
model = word2vec.Word2Vec(sentences, workers=num_workers, \
            size=num_features, min_count = min_word_count, \
            window = context, sample = downsampling)

# If you don't plan to train the model any further, calling 
# init_sims will make the model much more memory-efficient.
model.init_sims(replace=True)

# It can be helpful to create a meaningful model name and 
# save the model for later use. You can load it later using Word2Vec.load()
model_name = "300features_40minwords_10context"
model.save(model_name)


Training model...

In [37]:
model.doesnt_match("man woman child kitchen".split())


Out[37]:
'kitchen'

In [38]:
model.doesnt_match("france england germany berlin".split())


Out[38]:
'berlin'

In [41]:
model.most_similar("man")


Out[41]:
[(u'woman', 0.6366335153579712),
 (u'lady', 0.4809136390686035),
 (u'boy', 0.4803352355957031),
 (u'guy', 0.4642757773399353),
 (u'girl', 0.4357897937297821),
 (u'person', 0.4342450499534607),
 (u'men', 0.4255311191082001),
 (u'named', 0.40627405047416687),
 (u'doctor', 0.389176607131958),
 (u'he', 0.3876385986804962)]

In [42]:
model.most_similar("queen")


Out[42]:
[(u'princess', 0.49214181303977966),
 (u'latifah', 0.4819111227989197),
 (u'victoria', 0.44896575808525085),
 (u'king', 0.43613988161087036),
 (u'bee', 0.42390328645706177),
 (u'lion', 0.37804609537124634),
 (u'marie', 0.37270569801330566),
 (u'blonde', 0.3542603552341461),
 (u'norma', 0.34560805559158325),
 (u'seductive', 0.3437143862247467)]

In [43]:
model.most_similar("awful")


Out[43]:
[(u'terrible', 0.6609998941421509),
 (u'horrible', 0.634229838848114),
 (u'dreadful', 0.6030151844024658),
 (u'atrocious', 0.5839338302612305),
 (u'horrendous', 0.5305643081665039),
 (u'laughable', 0.5269584655761719),
 (u'embarrassing', 0.5222671031951904),
 (u'abysmal', 0.5205289125442505),
 (u'ridiculous', 0.5062302350997925),
 (u'horrid', 0.5033165812492371)]

In [45]:
model["flower"]


Out[45]:
array([-0.07484224,  0.03504419,  0.02251819,  0.08772575,  0.00671773,
       -0.00981483,  0.06652873, -0.09863216, -0.0769446 , -0.02300313,
       -0.08410838, -0.05238326, -0.02031626,  0.10492887, -0.02146316,
        0.06370694, -0.02195618, -0.0079751 , -0.04462753, -0.10103639,
       -0.07513167, -0.06514076,  0.02474669,  0.03929214, -0.01931028,
       -0.05430129,  0.06090382,  0.02713084, -0.02405303, -0.02892213,
        0.04481262, -0.07950708, -0.01645481, -0.0349642 ,  0.07431642,
        0.00110375,  0.08031197,  0.01761374, -0.06571191, -0.09526731,
        0.05261568, -0.01210957,  0.07132824, -0.07299084, -0.05902616,
       -0.04291788,  0.03389191,  0.02521493,  0.06037261, -0.0242028 ,
       -0.00796015,  0.04953256,  0.03508716,  0.02069657, -0.02412735,
        0.09052362,  0.0321848 , -0.01509004, -0.02288916, -0.0184294 ,
       -0.03156637,  0.09914596, -0.08912608,  0.10439133,  0.06987341,
       -0.03486204, -0.05194019, -0.01135688, -0.06064216,  0.00186343,
       -0.02069233,  0.08486801, -0.07777306,  0.04096626,  0.00170131,
       -0.04877428, -0.01395539,  0.02985214, -0.0458484 , -0.00810789,
        0.0376326 ,  0.01917978, -0.06918442, -0.02213688, -0.02425287,
        0.02927708,  0.01627481, -0.14395593, -0.0042607 ,  0.01003903,
       -0.0821752 ,  0.03904489, -0.00513161,  0.01201927,  0.06313201,
       -0.06706443,  0.00552539, -0.04346491,  0.08005493, -0.03654433,
        0.03334298, -0.05445556,  0.05570193, -0.1080606 ,  0.05144317,
        0.01098493,  0.01724626,  0.06440686, -0.09471754, -0.09003874,
        0.10845611,  0.0062663 ,  0.01909778,  0.01463693, -0.07901903,
       -0.07276919, -0.06905933,  0.023279  ,  0.05027185,  0.02239144,
       -0.01935119, -0.11879936, -0.00032441,  0.07813853,  0.12226875,
       -0.0720746 , -0.04170701,  0.08594857,  0.03407563, -0.0957265 ,
        0.01752669,  0.0483458 , -0.05584684, -0.03903203, -0.03244067,
        0.04183778, -0.04480976, -0.00499903, -0.06797484, -0.0287832 ,
        0.10014609,  0.0155243 , -0.04857568, -0.04635456,  0.07227517,
        0.00928918, -0.04952579,  0.08451407, -0.02709373,  0.03054571,
        0.11224463,  0.05832675,  0.10296202,  0.01834285, -0.06446715,
       -0.05101402, -0.07451255,  0.01497214,  0.09188299,  0.11657386,
        0.00265196, -0.07897543, -0.1281233 , -0.05831938,  0.01703343,
       -0.05327895, -0.02170705,  0.03098996,  0.03645593,  0.01606574,
       -0.16157173,  0.03447953,  0.09737241, -0.00203218, -0.0124328 ,
        0.06260807,  0.03434644,  0.0308863 ,  0.0119572 ,  0.04354547,
        0.05149814,  0.01611059, -0.06797301,  0.0650012 , -0.01249088,
       -0.05106969,  0.02936865, -0.02717052, -0.0048297 , -0.05748892,
        0.04435412, -0.09664144, -0.05893506,  0.157796  ,  0.03609653,
        0.04354417, -0.07639778,  0.05151582, -0.08621225, -0.05635231,
        0.07599116, -0.03608988,  0.03292482, -0.04143567,  0.02551002,
        0.05091608,  0.03412007,  0.03498705, -0.11189518, -0.03226838,
       -0.05025871,  0.01148122, -0.1032086 , -0.00641592,  0.03651296,
        0.0305886 , -0.01807953, -0.00697855,  0.05030023, -0.05859985,
        0.0087315 , -0.01047443, -0.0437373 ,  0.03516327,  0.07988772,
        0.12116606,  0.00296349,  0.00183105, -0.00913096, -0.01715685,
       -0.02604873, -0.02829699, -0.02461788, -0.04544047,  0.06071329,
       -0.01830816,  0.02464155, -0.03193037,  0.02017371, -0.00341726,
       -0.00453031, -0.01707009, -0.04593393,  0.02392642,  0.02510731,
        0.01974812,  0.09054442,  0.08180364,  0.01476894, -0.06669085,
        0.07059105, -0.06898726, -0.07182646,  0.03815618,  0.10506251,
       -0.1054315 , -0.05843208,  0.05154726, -0.03033077, -0.03199578,
        0.05428182,  0.03709899, -0.02612988, -0.02289922, -0.11013547,
       -0.03191717,  0.01256852,  0.0056342 ,  0.10699045, -0.08045568,
       -0.04192578, -0.0723919 , -0.03428426, -0.04904021, -0.08223958,
       -0.04170647, -0.00083555,  0.01877646, -0.0117913 , -0.02380593,
       -0.09623401,  0.08007528,  0.09653331,  0.09194318, -0.03006949,
       -0.06828462, -0.04271732,  0.02560064,  0.01715249,  0.0567019 ,
        0.03910007,  0.06909036, -0.05664308, -0.05109981, -0.08006199,
        0.02090722, -0.07132164,  0.05525782, -0.11532972,  0.08124672], dtype=float32)

In [ ]: