In [1]:
import logging
import os
import time

from cltk.corpus.utils.formatter import phi5_plaintext_cleanup
from cltk.corpus.utils.formatter import tlg_plaintext_cleanup
from cltk.corpus.utils.formatter import assemble_phi5_author_filepaths
from cltk.stem.latin.j_v import JVReplacer
from cltk.stem.lemma import LemmaReplacer
from cltk.stop.latin.stops import STOPS_LIST as latin_stops
from cltk.tokenize.word import nltk_tokenize_words
from cltk.tokenize.sentence import TokenizeSentence
from cltk.tokenize.word import WordTokenizer
from gensim.models import Word2Vec

In [2]:
# Logging for Gensim
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

In [3]:
def gen_docs(corpus, lemmatize, rm_stops, testing):
    """Open and process files from a corpus. Return a list of sentences for an author. Each sentence 
    is itself a list of tokenized words.
    """
    
    assert corpus in ['phi5', 'tlg']
    
    if corpus == 'phi5':
        language = 'latin'
        filepaths = assemble_phi5_author_filepaths()
        jv_replacer = JVReplacer()
        text_cleaner = phi5_plaintext_cleanup
        word_tokenizer = nltk_tokenize_words
        if rm_stops:
            stops = latin_stops
        else:
            stops = None
    elif corpus == 'tlg':
        language = 'greek'
        filepaths = assemble_tlg_author_filepaths()
        text_cleaner = tlg_plaintext_cleanup
        punkt = PunktLanguageVars()
        word_tokenizer = punkt.word_tokenize

        if rm_stops:
            stops = latin_stops
        else:
            stops = None

    if lemmatize:
        lemmatizer = LemmaReplacer(language)        
    if testing:
        filepaths = filepaths[:20]

    sent_tokenizer = TokenizeSentence(language)

    for filepath in filepaths:
        with open(filepath) as f:
            text = f.read()
        # light first-pass cleanup, before sentence tokenization (which relies on punctuation)
        text = text_cleaner(text, rm_punctuation=False, rm_periods=False)
        sent_tokens = sent_tokenizer.tokenize_sentences(text)
        for sentence in sent_tokens:
            # a second cleanup at sentence-level, to rm all punctuation
            sentence = text_cleaner(sentence, rm_punctuation=True, rm_periods=True)
            sentence = word_tokenizer(sentence)
            sentence = [s.lower() for s in sentence]
            sentence = [w for w in sentence if w]
            if language == 'latin':
                sentence = [w[1:] if w.startswith('-') else w for w in sentence]

            
            if stops:
                sentence = [w for w in sentence if w not in stops]

            sentence = [w for w in sentence if len(w) > 1]  # rm short words

            if sentence:
                sentence = sentence

            if lemmatize:
                sentence = lemmatizer.lemmatize(sentence)
            if sentence and language == 'latin':
                sentence = [jv_replacer.replace(word) for word in sentence]
            if sentence != []:
                yield sentence

In [4]:
def make_model(corpus, lemmatize=False, rm_stops=False, size=100, window=10, min_count=5, workers=4, sg=1, testing=False, save_path=None):
    """Train W2V model."""

    # Step 0: Instantiate empty model ( https://groups.google.com/forum/#!topic/gensim/xXKz-v8brAI )
    model = Word2Vec(sentences=None, size=size, window=window, min_count=min_count, workers=workers, sg=sg)

    # Step 1: Add entire corpus's vocabulary to the model
    docs = gen_docs(corpus, lemmatize=lemmatize, rm_stops=rm_stops, testing=testing)
    vocab_counter = 0
    alert_per_processed = 100
    for sentences in docs:
        vocab_counter += 1
        model.build_vocab(sentences)
        if vocab_counter % alert_per_processed == 0:
            print('Building vocab:', vocab_counter)

    # Step 2: Train model sentence-by-sentence
    docs = gen_docs(corpus, lemmatize=lemmatize, rm_stops=rm_stops, testing=testing)
    train_counter = 0
    for sentences in docs:
        train_counter += 1
        try:
            model.train(sentences)
        except Exception as e:
            pass
            #print(e)
        if train_counter % alert_per_processed == 0:
            print('Training model:', train_counter)

    model.init_sims(replace=True)
    if save_path:
        save_path = os.path.expanduser(save_path)
        model.save(save_path)
    else:
        return model

In [5]:
def make_options():
    """All the options for all permutations of the models."""
    corpus = ['phi5', 'tlg']
    lemmatize = [True, False]
    rm_stops = [True, False]
    window = [20] #  [5, 10, 20]
    size = [500] #  [100, 200, 500, 1000]
    skip_gram = [True, False]

    for c in corpus:
        for l in lemmatize:
            for r in rm_stops:
                for w in window:
                    for s in size:
                        for sg in skip_gram:
                            option = (c, l, r, w, s, sg)
                            yield option

In [6]:
options = make_options()


log_path = os.path.expanduser('/tmp/make_models.log')
with open(log_path, 'w') as file_opened:
    file_opened.write('')

for option in options:
    if option[0] == 'phi5':
        option_lang = ('latin', option)
    elif option[0] == 'tlg':
        option_lang = ('greek', option)
    lang = option_lang[0]
    corpus = option_lang[1][0]
    lemmatize = option_lang[1][1]
    rm_stops = option_lang[1][2]
    window = option_lang[1][3]
    size = option_lang[1][4]
    skip_gram = option_lang[1][5]
    if skip_gram == True:
        sg = 1
    else:
        sg = 0  # ie, use cbow
    model_path = os.path.join('~', 
                              lang + '_word2vec_cltk', 
                              corpus + '_' + 'lemmatize' + str(lemmatize) + '_' + 'rmstops' + str(rm_stops) + '_' + 'window' + str(window) + '_' + 'size' + str(size) + '_' + 'sg' + str(skip_gram) + '.model')
    model_path = os.path.expanduser(model_path)
    print('Commencing to write model at: {}.'.format(model_path))
    start = time.time()
    try:
        make_model(corpus=corpus, lemmatize=lemmatize, rm_stops=rm_stops, size=size, window=window, min_count=5, workers=4, sg=sg, testing=False, save_path=model_path)
        mins = (time.time() - start) / 60
        build_time = '\nBuild time: {} mins.'.format(mins)
        log_text = model_path + '\n' + str(build_time) + '\n'
        print(log_text)
        with open(log_path, 'a') as file_opened:
            file_opened.write(log_text)
    except Exception as e:
        print('Something went wrong with {}.'.format(model_path))
        with open(log_path, 'a') as file_opened:
            file_opened.write('Build failed for: {0}.\n{1}'.format(model_path, e))

    try:
        Word2Vec.load(model_path)
    except:
        with open(log_path, 'a') as file_opened:
            file_opened.write('Loading failed.\n')


Commencing to write model at: /Users/kyle/latin_word2vec_cltk/phi5_lemmatizeTrue_rmstopsTrue_window20_size500_sgTrue.model.
Building vocab: 100
Building vocab: 200
Building vocab: 300
Training model: 100
Training model: 200
Training model: 300
/Users/kyle/latin_word2vec_cltk/phi5_lemmatizeTrue_rmstopsTrue_window20_size500_sgTrue.model

Build time: 6.340995864073435 mins.

Commencing to write model at: /Users/kyle/latin_word2vec_cltk/phi5_lemmatizeTrue_rmstopsTrue_window20_size500_sgFalse.model.
Building vocab: 100
Building vocab: 200
Building vocab: 300
Training model: 100
Training model: 200
Training model: 300
/Users/kyle/latin_word2vec_cltk/phi5_lemmatizeTrue_rmstopsTrue_window20_size500_sgFalse.model

Build time: 6.319399313131968 mins.

Commencing to write model at: /Users/kyle/latin_word2vec_cltk/phi5_lemmatizeTrue_rmstopsFalse_window20_size500_sgTrue.model.
Building vocab: 100
Building vocab: 200
Building vocab: 300
Training model: 100
Training model: 200
Training model: 300
/Users/kyle/latin_word2vec_cltk/phi5_lemmatizeTrue_rmstopsFalse_window20_size500_sgTrue.model

Build time: 5.86022766828537 mins.

Commencing to write model at: /Users/kyle/latin_word2vec_cltk/phi5_lemmatizeTrue_rmstopsFalse_window20_size500_sgFalse.model.
Building vocab: 100
Building vocab: 200
Building vocab: 300
Training model: 100
Training model: 200
Training model: 300
/Users/kyle/latin_word2vec_cltk/phi5_lemmatizeTrue_rmstopsFalse_window20_size500_sgFalse.model

Build time: 25.415699152151742 mins.

Commencing to write model at: /Users/kyle/latin_word2vec_cltk/phi5_lemmatizeFalse_rmstopsTrue_window20_size500_sgTrue.model.
Building vocab: 100
Building vocab: 200
Building vocab: 300
Training model: 100
Training model: 200
Training model: 300
/Users/kyle/latin_word2vec_cltk/phi5_lemmatizeFalse_rmstopsTrue_window20_size500_sgTrue.model

Build time: 5.568328734238943 mins.

Commencing to write model at: /Users/kyle/latin_word2vec_cltk/phi5_lemmatizeFalse_rmstopsTrue_window20_size500_sgFalse.model.
Building vocab: 100
Building vocab: 200
Building vocab: 300
Training model: 100
Training model: 200
Training model: 300
/Users/kyle/latin_word2vec_cltk/phi5_lemmatizeFalse_rmstopsTrue_window20_size500_sgFalse.model

Build time: 5.550594035784403 mins.

Commencing to write model at: /Users/kyle/latin_word2vec_cltk/phi5_lemmatizeFalse_rmstopsFalse_window20_size500_sgTrue.model.
Building vocab: 100
Building vocab: 200
Building vocab: 300
Training model: 100
Training model: 200
Training model: 300
/Users/kyle/latin_word2vec_cltk/phi5_lemmatizeFalse_rmstopsFalse_window20_size500_sgTrue.model

Build time: 425.3358330647151 mins.

Commencing to write model at: /Users/kyle/latin_word2vec_cltk/phi5_lemmatizeFalse_rmstopsFalse_window20_size500_sgFalse.model.
Building vocab: 100
Building vocab: 200
Building vocab: 300
Training model: 100
Training model: 200
Training model: 300
/Users/kyle/latin_word2vec_cltk/phi5_lemmatizeFalse_rmstopsFalse_window20_size500_sgFalse.model

Build time: 75.23516012032827 mins.

Commencing to write model at: /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeTrue_rmstopsTrue_window20_size500_sgTrue.model.
Something went wrong with /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeTrue_rmstopsTrue_window20_size500_sgTrue.model.
Commencing to write model at: /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeTrue_rmstopsTrue_window20_size500_sgFalse.model.
Something went wrong with /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeTrue_rmstopsTrue_window20_size500_sgFalse.model.
Commencing to write model at: /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeTrue_rmstopsFalse_window20_size500_sgTrue.model.
Something went wrong with /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeTrue_rmstopsFalse_window20_size500_sgTrue.model.
Commencing to write model at: /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeTrue_rmstopsFalse_window20_size500_sgFalse.model.
Something went wrong with /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeTrue_rmstopsFalse_window20_size500_sgFalse.model.
Commencing to write model at: /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeFalse_rmstopsTrue_window20_size500_sgTrue.model.
Something went wrong with /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeFalse_rmstopsTrue_window20_size500_sgTrue.model.
Commencing to write model at: /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeFalse_rmstopsTrue_window20_size500_sgFalse.model.
Something went wrong with /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeFalse_rmstopsTrue_window20_size500_sgFalse.model.
Commencing to write model at: /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeFalse_rmstopsFalse_window20_size500_sgTrue.model.
Something went wrong with /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeFalse_rmstopsFalse_window20_size500_sgTrue.model.
Commencing to write model at: /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeFalse_rmstopsFalse_window20_size500_sgFalse.model.
Something went wrong with /Users/kyle/greek_word2vec_cltk/tlg_lemmatizeFalse_rmstopsFalse_window20_size500_sgFalse.model.