word2vec IMDB data

Training word2vec embeddings on the IMDB database and experimenting.

Referência: Tutorial Kagggle "Bag of Words meets Bags of Popcorn"


In [13]:
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
import nltk.data
import pandas as pd
import gensim

In [7]:
train = pd.read_csv( "labeledTrainData.tsv", header=0, 
 delimiter="\t", quoting=3 )
test = pd.read_csv( "testData.tsv", header=0, delimiter="\t", quoting=3 )
unlabeled_train = pd.read_csv( "unlabeledTrainData.tsv", header=0, 
 delimiter="\t", quoting=3 )

In [8]:
def review_to_wordlist( review, remove_stopwords=False ):
    # Function to convert a document to a sequence of words,
    # optionally removing stop words.  Returns a list of words.
    #
    # 1. Remove HTML
    review_text = BeautifulSoup(review).get_text()
    #  
    # 2. Remove non-letters
    review_text = re.sub("[^a-zA-Z]"," ", review_text)
    #
    # 3. Convert words to lower case and split them
    words = review_text.lower().split()
    #
    # 4. Optionally remove stop words (false by default)
    if remove_stopwords:
        stops = set(stopwords.words("english"))
        words = [w for w in words if not w in stops]
    #
    # 5. Return a list of words
    return(words)

In [9]:
# Load the punkt tokenizer
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')

# Define a function to split a review into parsed sentences
def review_to_sentences( review, tokenizer, remove_stopwords=False ):
    # Function to split a review into parsed sentences. Returns a 
    # list of sentences, where each sentence is a list of words
    #
    # 1. Use the NLTK tokenizer to split the paragraph into sentences
    raw_sentences = tokenizer.tokenize(review.decode('utf-8').strip())
    #
    # 2. Loop over each sentence
    sentences = []
    for raw_sentence in raw_sentences:
        # If a sentence is empty, skip it
        if len(raw_sentence) > 0:
            # Otherwise, call review_to_wordlist to get a list of words
            sentences.append( review_to_wordlist( raw_sentence, \
              remove_stopwords ))
    #
    # Return the list of sentences (each sentence is a list of words,
    # so this returns a list of lists
    return sentences

In [10]:
sentences = []  # Initialize an empty list of sentences

print("Parsing sentences from training set")
for review in train["review"]:
    sentences += review_to_sentences(review, tokenizer)

print("Parsing sentences from unlabeled set")
for review in unlabeled_train["review"]:
    sentences += review_to_sentences(review, tokenizer)


Parsing sentences from training set
/Users/emannuelcarvalho/.virtualenvs/primogenithon/lib/python2.7/site-packages/bs4/__init__.py:181: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system ("html5lib"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.

The code that caused this warning is on line 162 of the file /System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py. To get rid of this warning, change code that looks like this:

 BeautifulSoup(YOUR_MARKUP})

to this:

 BeautifulSoup(YOUR_MARKUP, "html5lib")

  markup_type=markup_type))
/Users/emannuelcarvalho/.virtualenvs/primogenithon/lib/python2.7/site-packages/bs4/__init__.py:219: UserWarning: "." looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.
  ' Beautiful Soup.' % markup)
/Users/emannuelcarvalho/.virtualenvs/primogenithon/lib/python2.7/site-packages/bs4/__init__.py:282: UserWarning: "http://www.happierabroad.com"" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client like requests to get the document behind the URL, and feed that document to Beautiful Soup.
  ' that document to Beautiful Soup.' % decoded_markup
Parsing sentences from unlabeled set
/Users/emannuelcarvalho/.virtualenvs/primogenithon/lib/python2.7/site-packages/bs4/__init__.py:282: UserWarning: "http://www.archive.org/details/LovefromaStranger"" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client like requests to get the document behind the URL, and feed that document to Beautiful Soup.
  ' that document to Beautiful Soup.' % decoded_markup
/Users/emannuelcarvalho/.virtualenvs/primogenithon/lib/python2.7/site-packages/bs4/__init__.py:282: UserWarning: "http://www.loosechangeguide.com/LooseChangeGuide.html"" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client like requests to get the document behind the URL, and feed that document to Beautiful Soup.
  ' that document to Beautiful Soup.' % decoded_markup
/Users/emannuelcarvalho/.virtualenvs/primogenithon/lib/python2.7/site-packages/bs4/__init__.py:282: UserWarning: "http://www.msnbc.msn.com/id/4972055/site/newsweek/"" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client like requests to get the document behind the URL, and feed that document to Beautiful Soup.
  ' that document to Beautiful Soup.' % decoded_markup
/Users/emannuelcarvalho/.virtualenvs/primogenithon/lib/python2.7/site-packages/bs4/__init__.py:219: UserWarning: ".." looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.
  ' Beautiful Soup.' % markup)
/Users/emannuelcarvalho/.virtualenvs/primogenithon/lib/python2.7/site-packages/bs4/__init__.py:282: UserWarning: "http://www.youtube.com/watch?v=a0KSqelmgN8"" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client like requests to get the document behind the URL, and feed that document to Beautiful Soup.
  ' that document to Beautiful Soup.' % decoded_markup
/Users/emannuelcarvalho/.virtualenvs/primogenithon/lib/python2.7/site-packages/bs4/__init__.py:282: UserWarning: "http://jake-weird.blogspot.com/2007/08/beneath.html"" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client like requests to get the document behind the URL, and feed that document to Beautiful Soup.
  ' that document to Beautiful Soup.' % decoded_markup

In [14]:
model = gensim.models.Word2Vec(sentences, min_count=1)

In [27]:
print(model.wv.most_similar(positive=['bad', 'best'], negative=['good']))


[(u'worst', 0.8046547174453735), (u'funniest', 0.6916342973709106), (u'finest', 0.6374659538269043), (u'stupidest', 0.6306242942810059), (u'weakest', 0.6112555265426636), (u'poorest', 0.6031641364097595), (u'lamest', 0.5914826989173889), (u'greatest', 0.5895812511444092), (u'dumbest', 0.5713971853256226), (u'scariest', 0.5700672268867493)]

In [29]:
acc = model.accuracy('questions-words.txt')

In [36]:
[(d.keys()[1], d[d.keys()[1]]) for d in acc]


Out[36]:
[('section', u'capital-common-countries'),
 ('section', u'capital-world'),
 ('section', u'currency'),
 ('section', u'city-in-state'),
 ('section', u'family'),
 ('section', u'gram1-adjective-to-adverb'),
 ('section', u'gram2-opposite'),
 ('section', u'gram3-comparative'),
 ('section', u'gram4-superlative'),
 ('section', u'gram5-present-participle'),
 ('section', u'gram6-nationality-adjective'),
 ('section', u'gram7-past-tense'),
 ('section', u'gram8-plural'),
 ('section', u'gram9-plural-verbs'),
 ('section', 'total')]

In [54]:
for i in range(0, len(acc)):
    print(acc[i][acc[i].keys()[1]], len(acc[i]['correct']), len(acc[i]['incorrect']))#, len(acc[i]['correct']/len(acc[i]['incorrect']))


(u'capital-common-countries', 26, 246)
(u'capital-world', 19, 269)
(u'currency', 0, 40)
(u'city-in-state', 6, 851)
(u'family', 241, 179)
(u'gram1-adjective-to-adverb', 47, 883)
(u'gram2-opposite', 42, 608)
(u'gram3-comparative', 643, 689)
(u'gram4-superlative', 217, 539)
(u'gram5-present-participle', 271, 541)
(u'gram6-nationality-adjective', 80, 1149)
(u'gram7-past-tense', 382, 950)
(u'gram8-plural', 273, 657)
(u'gram9-plural-verbs', 352, 350)
('total', 2599, 7951)

In [ ]: