In [1]:
LANG="english"
In [2]:
%%bash
fdate=20170327
fname=enwikinews-$fdate-cirrussearch-content.json.gz
if [ ! -e $fname ]
then
wget "https://dumps.wikimedia.org/other/cirrussearch/$fdate/$fname"
fi
In [ ]:
# iterator
import gzip
import json
FDATE = 20170327
FNAME = "enwikinews-%s-cirrussearch-content.json.gz" % FDATE
def iter_texts(fpath=FNAME):
with gzip.open(fpath, "rt") as f:
for l in f:
data = json.loads(l)
if "title" in data:
yield data["title"]
yield data["text"]
In [ ]:
# also prepare nltk
import nltk
nltk.download("punkt")
nltk.download("stopwords")
In [ ]:
# make a custom tokenizer
import re
from nltk.tokenize import sent_tokenize
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer('\w[\w-]*|\d[\d,]*')
In [ ]:
# prepare a text
def prepare(txt):
# lower case
txt = txt.lower()
return [tokenizer.tokenize(sent)
for sent in sent_tokenize(txt, language=LANG)]
In [ ]:
# we put all data in ram, it's not so much
corpus = []
for txt in iter_texts():
corpus.extend(prepare(txt))
In [ ]:
# how many sentences and words ?
words_count = sum(len(s) for s in corpus)
print("Corpus has %d words in %d sentences" % (words_count, len(corpus)))
The Phrases
model gives us the possiblity of handling common terms, that is words that appears much time in a text and are there only to link objects between them.
While you could remove them, you may information, for "the president is in america" is not the same as "the president of america"
The common_terms parameter Phrases can help you deal with them in a smarter way, keeping them around but avoiding them to crush frequency statistics.
In [ ]:
from gensim.models.phrases import Phrases
In [ ]:
# which are the stop words we will use
from nltk.corpus import stopwords
" ".join(stopwords.words(LANG))
In [ ]:
# a version of corups without stop words
stop_words = frozenset(stopwords.words(LANG))
def stopwords_filter(txt):
return [w for w in txt if w not in stop_words]
st_corpus = [stopwords_filter(txt) for txt in corpus]
In [ ]:
# bigram std
%time bigram = Phrases(st_corpus)
# bigram with common terms
%time bigram_ct = Phrases(corpus, common_terms=stopwords.words(LANG))
In [ ]:
# grams that have more than 2 terms, are those with common terms
ct_ngrams = set((g[1], g[0].decode("utf-8"))
for g in bigram_ct.export_phrases(corpus)
if len(g[0].split()) > 2)
ct_ngrams = sorted(list(ct_ngrams))
print(len(ct_ngrams), "grams with common terms found")
# highest scores
ct_ngrams[-20:]
In [ ]:
# did we found any bigram with same words but different stopwords
import collections
by_terms = collections.defaultdict(set)
for ngram, score in bigram_ct.export_phrases(corpus):
grams = ngram.split()
by_terms[(grams[0], grams[-1])].add(ngram)
for k, v in by_terms.items():
if len(v) > 1:
print(b"-".join(k).decode("utf-8")," : ", [w.decode("utf-8") for w in v])
In [ ]: