In [1]:
from __future__ import division
import re, time, os, gc
import sys
import string
import numpy as np
import pandas as pd
import gensim
import config
PATH = config.RAW_PATH
model = gensim.models.KeyedVectors.load_word2vec_format(PATH+'GoogleNews-vectors-negative300.bin.gz', binary=True)
words = model.index2word
w_rank = {}
for i,word in enumerate(words):
w_rank[word] = i
WORDS = w_rank
In [2]:
import re
from collections import Counter
def words(text): return re.findall(r'\w+', text.lower())
def P(word):
"Probability of `word`."
# use inverse of rank as proxy
# returns 0 if the word isn't in the dictionary
return - WORDS.get(word, 0)
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
In [3]:
from utils import dist_utils, ngram_utils, nlp_utils, np_utils
token_pattern = " "
def checker(word):
check_list = []
word = nlp_utils._tokenize(str(word), token_pattern)
for i in word:
check_list.append(correction(i))
return " ".join(check_list)
In [ ]:
from multiprocessing import Pool
num_partitions = 20 #number of partitions to split dataframe
num_cores = 20 #number of cores on your machine
def parallelize_dataframe(df, func):
df_split = np.array_split(df, num_partitions)
pool = Pool(num_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
def multiply_columns(train):
train['question1'] = train.apply(lambda x: checker(x['question1']), axis=1)
print 'tag'
train['question2'] = train.apply(lambda x: checker(x['question2']), axis=1)
return train
In [ ]:
# train = pd.read_csv(PATH+'train.csv')
train = pd.read_csv(PATH+'test.csv')
train['question1'] = train['question1'].astype(str)
train['question2'] = train['question2'].astype(str)
train = parallelize_dataframe(train, multiply_columns)
In [ ]:
train.to_csv(PATH+'test_check.csv',index=False)
In [ ]:
In [ ]:
In [ ]: