Training word2vec embeddings on the IMDB database and experimenting.
Referência: Tutorial Kagggle "Bag of Words meets Bags of Popcorn"
In [13]:
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
import nltk.data
import pandas as pd
import gensim
In [7]:
train = pd.read_csv( "labeledTrainData.tsv", header=0,
delimiter="\t", quoting=3 )
test = pd.read_csv( "testData.tsv", header=0, delimiter="\t", quoting=3 )
unlabeled_train = pd.read_csv( "unlabeledTrainData.tsv", header=0,
delimiter="\t", quoting=3 )
In [8]:
def review_to_wordlist( review, remove_stopwords=False ):
# Function to convert a document to a sequence of words,
# optionally removing stop words. Returns a list of words.
#
# 1. Remove HTML
review_text = BeautifulSoup(review).get_text()
#
# 2. Remove non-letters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
#
# 3. Convert words to lower case and split them
words = review_text.lower().split()
#
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
#
# 5. Return a list of words
return(words)
In [9]:
# Load the punkt tokenizer
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# Define a function to split a review into parsed sentences
def review_to_sentences( review, tokenizer, remove_stopwords=False ):
# Function to split a review into parsed sentences. Returns a
# list of sentences, where each sentence is a list of words
#
# 1. Use the NLTK tokenizer to split the paragraph into sentences
raw_sentences = tokenizer.tokenize(review.decode('utf-8').strip())
#
# 2. Loop over each sentence
sentences = []
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call review_to_wordlist to get a list of words
sentences.append( review_to_wordlist( raw_sentence, \
remove_stopwords ))
#
# Return the list of sentences (each sentence is a list of words,
# so this returns a list of lists
return sentences
In [10]:
sentences = [] # Initialize an empty list of sentences
print("Parsing sentences from training set")
for review in train["review"]:
sentences += review_to_sentences(review, tokenizer)
print("Parsing sentences from unlabeled set")
for review in unlabeled_train["review"]:
sentences += review_to_sentences(review, tokenizer)
In [14]:
model = gensim.models.Word2Vec(sentences, min_count=1)
In [27]:
print(model.wv.most_similar(positive=['bad', 'best'], negative=['good']))
In [29]:
acc = model.accuracy('questions-words.txt')
In [36]:
[(d.keys()[1], d[d.keys()[1]]) for d in acc]
Out[36]:
In [54]:
for i in range(0, len(acc)):
print(acc[i][acc[i].keys()[1]], len(acc[i]['correct']), len(acc[i]['incorrect']))#, len(acc[i]['correct']/len(acc[i]['incorrect']))
In [ ]: