deepSimDEF for Prediction of Protein-Protein Interactions (PPI)

Importing the required libraries


In [1]:
import os
import sys
import random
import operator
import numpy as np
import keras.backend as K

from keras import regularizers
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import EarlyStopping

from scipy.stats.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score

from deepSimDEF.tools.PPI_data_provider import gene_pair_data_reader, input_data_maker
from deepSimDEF.tools.PPI_model_saver import save_model, save_embeddings
from deepSimDEF.netwroks.PPI_network import PPI_model_builder

np.random.seed(321)


Using TensorFlow backend.

Setting variables, reading GO annotations of genes, and preparing them for networks


In [2]:
FOLD = 10
DROPOUT = 0.3
MAX_POOL = True

PRE_TRAINED = True
UPDATABLE = True

ACTIVATION_HIDDEN = 'relu'
ACTIVATION_HIGHWAY = 'sigmoid'
ACTIVATION_OUTPUT = 'sigmoid'

EMBEDDING_DIM = 100
NB_EPOCH = 20
BATCH_SIZE = 256
OPTIMIZER = 'adadelta'

IEA = True
SEQ = False

TRANSFER_LEARNING = False

SAVE_MODEL = True
SAVE_EMBEDDINGS = True

SUB_ONTOLOGY = ['BP', 'CC', 'MF']
SUB_ONTOLOGY_work = ['BP', 'CC', 'MF']

WITH_HIGH_THROUPUT = False

SBOs = {}
for sbo in SUB_ONTOLOGY_work:
    if sbo == 'BP':
        SBOs[sbo] = 'Biolobical Process (BP)'
    elif sbo == 'CC':
        SBOs[sbo] = 'Cellular Component (CC)'
    elif sbo == 'MF':
        SBOs[sbo] = 'Molecular Function (MF)'
    
WE = {}
embedding_save = {}
MAX_SEQUENCE_LENGTH = {}
MAX_SEQUENCE_LENGTH_INDEX = {}
sequences = {}
word_indeces = {}
protein_index = {}    
    
for sbo in SUB_ONTOLOGY:
    WE[sbo] = 'deepSimDEF/embeddings/GO_' + sbo + '_Embeddings_100D.emb'
    embedding_save[sbo] = 'GO_' + sbo + '_Embeddings_100D_Updated'
    MAX_SEQUENCE_LENGTH[sbo] = 0
    MAX_SEQUENCE_LENGTH_INDEX[sbo] = []
    sequences[sbo] = []
    word_indeces[sbo] = []
    protein_index[sbo] = {}
    
    if IEA:
        file_reader = open('deepSimDEF/gene_annotations/gene_product_GO_terms_with_IEA' + '.' + sbo)
    else:
        file_reader = open('deepSimDEF/gene_annotations/gene_product_GO_terms_without_IEA' + '.' + sbo)
    
    index_counter = 1
    texts = []
    for line in file_reader:
        values = line.rstrip().replace(':', '').split()
        protein_index[sbo][values[0]] = index_counter
        if len(values[1:]) > MAX_SEQUENCE_LENGTH[sbo]:
            MAX_SEQUENCE_LENGTH[sbo] = len(values[1:])
            MAX_SEQUENCE_LENGTH_INDEX[sbo] = index_counter
        texts.append(' '.join(values[1:]))
        index_counter += 1
        
    tokenizer = Tokenizer(lower=False, num_words=0)
    tokenizer.fit_on_texts(texts)
    sequences[sbo] = tokenizer.texts_to_sequences(texts)

    word_indeces[sbo] = tokenizer.word_index
    
    if sbo == 'BP':
        print "\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Biolobical Process (BP) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
    elif sbo == 'CC':
        print "\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Cellular Component (CC) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
    elif sbo == 'MF':
        print "\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Molecular Function (MF) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
            
    print "Found " + str(len(word_indeces[sbo])) + " unique tokens in " + sbo

    MOST_FREQUENT_LEVEL = 10
    print 'Top', MOST_FREQUENT_LEVEL, 'Most Frequent GO terms annotating sequences in', sbo + ":"
    for GO_ID, indx in sorted(word_indeces[sbo].items(), key=operator.itemgetter(1))[:MOST_FREQUENT_LEVEL]:
        print '  >>>', GO_ID, '   ' ,indx
        
    print "Number of annotated gene products by '" + sbo + "' terms: " + str(len(sequences[sbo]))
    print "Maximum annotation length of one gene product ('" + sbo + "' sub-ontology):", MAX_SEQUENCE_LENGTH[sbo]
    print "Index/line of the gene product with maximum annotations ('" + sbo + "' sub-ontology):", MAX_SEQUENCE_LENGTH_INDEX[sbo]
    print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
    
    file_reader.close()
    
    
fully_annotated_sequences = []   # we keep only those genes for which we have annatoation from all ontologies (defined in SUB_ONTOLOGY variable)
for sbo in SUB_ONTOLOGY:
    fully_annotated_sequences.append(protein_index[sbo].keys())
fully_annotated_sequences = list(set(fully_annotated_sequences[0]).intersection(*fully_annotated_sequences))
print "Number of fully annotated gene products:", len(fully_annotated_sequences)


~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Biolobical Process (BP) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Found 3054 unique tokens in BP
Top 10 Most Frequent GO terms annotating sequences in BP:
  >>> GO0006810     1
  >>> GO0006351     2
  >>> GO0006355     3
  >>> GO0015031     4
  >>> GO0055114     5
  >>> GO0007049     6
  >>> GO0006414     7
  >>> GO0008152     8
  >>> GO0006412     9
  >>> GO0055085     10
Number of annotated gene products by 'BP' terms: 5680
Maximum annotation length of one gene product ('BP' sub-ontology): 44
Index/line of the gene product with maximum annotations ('BP' sub-ontology): 294
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~


~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Cellular Component (CC) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Found 782 unique tokens in CC
Top 10 Most Frequent GO terms annotating sequences in CC:
  >>> GO0005737     1
  >>> GO0005634     2
  >>> GO0016020     3
  >>> GO0016021     4
  >>> GO0005739     5
  >>> GO0005829     6
  >>> GO0005783     7
  >>> GO0005886     8
  >>> GO0005789     9
  >>> GO0005730     10
Number of annotated gene products by 'CC' terms: 5971
Maximum annotation length of one gene product ('CC' sub-ontology): 17
Index/line of the gene product with maximum annotations ('CC' sub-ontology): 5290
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~


~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Molecular Function (MF) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Found 1966 unique tokens in MF
Top 10 Most Frequent GO terms annotating sequences in MF:
  >>> GO0000166     1
  >>> GO0046872     2
  >>> GO0016740     3
  >>> GO0005524     4
  >>> GO0016787     5
  >>> GO0003677     6
  >>> GO0003723     7
  >>> GO0003824     8
  >>> GO0030533     9
  >>> GO0003676     10
Number of annotated gene products by 'MF' terms: 4856
Maximum annotation length of one gene product ('MF' sub-ontology): 33
Index/line of the gene product with maximum annotations ('MF' sub-ontology): 1179
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Number of fully annotated gene products: 4658

Reading the gene-pair PPIs: manually curated PPIs and high-throughput PPIs (optional)


In [3]:
input_data_dir = 'deepSimDEF/datasets/PPI_data/PPI_FULL_physical_interactions_manually_curated'
annotation_G1_dic_MC, annotation_G2_dic_MC, interaction_pr_list_MC = gene_pair_data_reader(data_dir=input_data_dir, 
                                                                                           SUB_ONTOLOGY_work=SUB_ONTOLOGY_work, 
                                                                                           fully_annotated_sequences=fully_annotated_sequences, 
                                                                                           sequences=sequences, 
                                                                                           protein_index=protein_index,
                                                                                           MAX_SEQUENCE_LENGTH=MAX_SEQUENCE_LENGTH)


VALIDATION_SPLIT = 1.0/FOLD
indices = np.arange(annotation_G1_dic_MC[sbo].shape[0])
np.random.shuffle(indices)
test_size = int(VALIDATION_SPLIT * annotation_G1_dic_MC[sbo].shape[0])


annotation_G1_dic_HT = []
annotation_G2_dic_HT = []
interaction_pr_list_HT = []

if WITH_HIGH_THROUPUT:
    input_data_dir = 'deepSimDEF/datasets/PPI_data/PPI_FULL_physical_interactions_high_throughput'
    annotation_G1_dic_HT, annotation_G2_dic_HT, interaction_pr_list_HT = gene_pair_data_reader(data_dir=input_data_dir, 
                                                                                               SUB_ONTOLOGY_work=SUB_ONTOLOGY_work, 
                                                                                               fully_annotated_sequences=fully_annotated_sequences, 
                                                                                               sequences=sequences, 
                                                                                               protein_index=protein_index,
                                                                                               MAX_SEQUENCE_LENGTH= MAX_SEQUENCE_LENGTH)


Shape of data tensor 1 (BP): (32956, 44)
Shape of data tensor 2 (BP): (32956, 44)
Shape of similarity tensor (BP): (32956,) 

Shape of data tensor 1 (CC): (32956, 17)
Shape of data tensor 2 (CC): (32956, 17)
Shape of similarity tensor (CC): (32956,) 

Shape of data tensor 1 (MF): (32956, 33)
Shape of data tensor 2 (MF): (32956, 33)
Shape of similarity tensor (MF): (32956,) 

Number of positive classes/interactions: 16478

printing some information about the setting of the network and the experiment


In [4]:
for sbo in SUB_ONTOLOGY_work:
    print "@@@ " + SBOs[sbo] + " @@@"

if IEA:
    print "^^^ With IEA ^^^"
else:
    print "^^^ Without IEA ^^^"

print "%%% Optimizer:", OPTIMIZER, "%%%"

if PRE_TRAINED:
    if UPDATABLE:
        print "+++ Pre-trained (updatable) +++"
    else:
        print "+++ Pre-trained (not updatable) +++"
else:
    print "+++ NOT Pre-trained +++"


@@@ Biolobical Process (BP) @@@
@@@ Cellular Component (CC) @@@
@@@ Molecular Function (MF) @@@
^^^ With IEA ^^^
%%% Optimizer: adadelta %%%
+++ Pre-trained (updatable) +++

Making a 10-fold cross-validation experiment


In [5]:
models = []
embedding_layers = []
bests = []
thresholds = []
B = []

for m in range(0, FOLD):
    network = PPI_model_builder(EMBEDDING_DIM, 
                                 model_ind=m, 
                                 MAX_SEQUENCE_LENGTH=MAX_SEQUENCE_LENGTH, 
                                 WORD_EMBEDDINGS=WE,
                                 SUB_ONTOLOGY_work=SUB_ONTOLOGY_work,
                                 word_indeces=word_indeces, 
                                 ACTIVATION_HIDDEN=ACTIVATION_HIDDEN, 
                                 ACTIVATION_HIGHWAY=ACTIVATION_HIGHWAY, 
                                 ACTIVATION_OUTPUT=ACTIVATION_OUTPUT, 
                                 DROPOUT=DROPOUT, 
                                 OPTIMIZER=OPTIMIZER)
    models.append(network[0])
    embedding_layers.append(network[1])
    bests.append(0)
    thresholds.append(0)
    B.append({})


Loaded 29375 word vectors for BP (Model 1)
Loaded 4046 word vectors for CC (Model 1)
Loaded 10541 word vectors for MF (Model 1)
/root/anaconda2/lib/python2.7/site-packages/keras/legacy/layers.py:652: UserWarning: The `Highway` layer is deprecated and will be removed after 06/2017.
  warnings.warn('The `Highway` layer is deprecated '
Model for Fold Number 1 Instantiated!!

Loaded 29375 word vectors for BP (Model 2)
Loaded 4046 word vectors for CC (Model 2)
Loaded 10541 word vectors for MF (Model 2)
Model for Fold Number 2 Instantiated!!

Loaded 29375 word vectors for BP (Model 3)
Loaded 4046 word vectors for CC (Model 3)
Loaded 10541 word vectors for MF (Model 3)
Model for Fold Number 3 Instantiated!!

Loaded 29375 word vectors for BP (Model 4)
Loaded 4046 word vectors for CC (Model 4)
Loaded 10541 word vectors for MF (Model 4)
Model for Fold Number 4 Instantiated!!

Loaded 29375 word vectors for BP (Model 5)
Loaded 4046 word vectors for CC (Model 5)
Loaded 10541 word vectors for MF (Model 5)
Model for Fold Number 5 Instantiated!!

Loaded 29375 word vectors for BP (Model 6)
Loaded 4046 word vectors for CC (Model 6)
Loaded 10541 word vectors for MF (Model 6)
Model for Fold Number 6 Instantiated!!

Loaded 29375 word vectors for BP (Model 7)
Loaded 4046 word vectors for CC (Model 7)
Loaded 10541 word vectors for MF (Model 7)
Model for Fold Number 7 Instantiated!!

Loaded 29375 word vectors for BP (Model 8)
Loaded 4046 word vectors for CC (Model 8)
Loaded 10541 word vectors for MF (Model 8)
Model for Fold Number 8 Instantiated!!

Loaded 29375 word vectors for BP (Model 9)
Loaded 4046 word vectors for CC (Model 9)
Loaded 10541 word vectors for MF (Model 9)
Model for Fold Number 9 Instantiated!!

Loaded 29375 word vectors for BP (Model 10)
Loaded 4046 word vectors for CC (Model 10)
Loaded 10541 word vectors for MF (Model 10)
Model for Fold Number 10 Instantiated!!

Training the deepSimDEF netwrok for PPI task


In [6]:
RES = {}
best_total_f1 = 0
best_threshold = 0

early_stopping = EarlyStopping(monitor='val_loss', patience = 3)
cor = {}

best_epoch = 0

def pred(A, treshold = 0.5):
    B = []
    for n in A:
        if treshold < n:
            B.append(1)
        else:
            B.append(0)
    return B

def run_my_model(model_index, seq):
    X_train, y_train, X_test, y_test = input_data_maker(model_id=model_index, 
                                                        test_size=test_size, 
                                                        indices=indices, 
                                                        annotation_G1_dic_MC=annotation_G1_dic_MC, 
                                                        annotation_G2_dic_MC=annotation_G2_dic_MC, 
                                                        interaction_pr_list_MC=interaction_pr_list_MC, 
                                                        annotation_G1_dic_HT=annotation_G1_dic_HT,
                                                        annotation_G2_dic_HT=annotation_G2_dic_HT,
                                                        interaction_pr_list_HT=interaction_pr_list_HT,
                                                        SUB_ONTOLOGY_work=SUB_ONTOLOGY_work,
                                                        WITH_HIGH_THROUPUT=False)
    model = models[model_index]
    history = model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=1, validation_data=(X_test,y_test))
    p =  model.predict(X_test)
    for i in seq:
        predictions = np.asarray(pred(p, i))
        B[model_index][i] = np.round(f1_score(y_test, predictions, average='binary'), 5)
    pr = max(B[model_index].iteritems(), key=operator.itemgetter(1))[1]
    thresholds[model_index] = max(B[model_index].iteritems(), key=operator.itemgetter(1))[0]
    st = ''
    b = bests[model_index]
    if bests[model_index] < pr: 
        bests[model_index] = pr
        treshold = thresholds[model_index]
        st = "+ " + str(bests[model_index])
    else:
        st = "- " + str(bests[model_index])
    print ">>> F1-score (" + str(model_index + 1) + "):", pr, "Best (" + str(model_index + 1) + "):", st, "(" + str(thresholds[model_index]) + " : " + str(np.round(pr - b, 5)) + ")" + "\n"

def get_results(epoch_no):
    for i in seq:
        RES[i] = 0
        for j in range(FOLD):
            RES[i] += B[j][i]/FOLD
    res = max(RES.iteritems(), key=operator.itemgetter(1))[1]
    threshold_res = max(RES.iteritems(), key=operator.itemgetter(1))[0]
    cor[epoch_no + 1] = res
    total_max = 0
    for i, j in sorted(cor.items(), key=operator.itemgetter(1)):
        if total_max < j:
            total_max = j
            best_epoch = i
            threshold_best = threshold_res
    
    print "F1-score for this epoch:", res, "(", threshold_res, ")-- Best F1-score::==>", str(total_max), "(", threshold_best, ")  (for epoch #", str(best_epoch), "of", str(NB_EPOCH), "epochs)" + "\n"

def get_final_result():
    final_max = 0
    best_epoch = 0
    for i, j in sorted(cor.items(), key=operator.itemgetter(1)):
        if final_max < j:
            final_max = j
            best_epoch = i
        
    print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FINAL RESULT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~" + "\n" 
    print "For embedding size '" + str(EMBEDDING_DIM) + "' best number of epochs is '" + str(i) + "' with F1-score of: " + str(final_max) +"\n"
    
for e in range(NB_EPOCH):
     
    print "~~~~~~~~~ " + '/'.join(SUB_ONTOLOGY_work) +" ~~~~~~~~~~~~~~ EPOCH " + str(e + 1) + "/" + str(NB_EPOCH) + " (Embedding dimention: " + str(EMBEDDING_DIM) + ") ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n" 
    seq = [0.5]
    if SEQ:
        seq = np.arange(0.11, 0.9, 0.01)
        
    for index in range(0, len(models)):
        run_my_model(index, seq)
    
    get_results(e)

get_final_result()


~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 1/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.5417 - fmeasure: 0.7387 - val_loss: 0.4262 - val_fmeasure: 0.8073
>>> F1-score (1): 0.80753 Best (1): + 0.80753 (0.5 : 0.80753)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.5507 - fmeasure: 0.7328 - val_loss: 0.4151 - val_fmeasure: 0.8243
>>> F1-score (2): 0.8257 Best (2): + 0.8257 (0.5 : 0.8257)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.5635 - fmeasure: 0.7244 - val_loss: 0.4478 - val_fmeasure: 0.7974
>>> F1-score (3): 0.79813 Best (3): + 0.79813 (0.5 : 0.79813)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.5498 - fmeasure: 0.7289 - val_loss: 0.4119 - val_fmeasure: 0.8293
>>> F1-score (4): 0.82992 Best (4): + 0.82992 (0.5 : 0.82992)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.5529 - fmeasure: 0.7309 - val_loss: 0.4357 - val_fmeasure: 0.8065
>>> F1-score (5): 0.80704 Best (5): + 0.80704 (0.5 : 0.80704)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.5491 - fmeasure: 0.7299 - val_loss: 0.4202 - val_fmeasure: 0.8256
>>> F1-score (6): 0.8267 Best (6): + 0.8267 (0.5 : 0.8267)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.5539 - fmeasure: 0.7302 - val_loss: 0.4306 - val_fmeasure: 0.8148
>>> F1-score (7): 0.81512 Best (7): + 0.81512 (0.5 : 0.81512)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.5444 - fmeasure: 0.7339 - val_loss: 0.4284 - val_fmeasure: 0.8118
>>> F1-score (8): 0.81226 Best (8): + 0.81226 (0.5 : 0.81226)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.5545 - fmeasure: 0.7257 - val_loss: 0.4233 - val_fmeasure: 0.8222
>>> F1-score (9): 0.82203 Best (9): + 0.82203 (0.5 : 0.82203)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.5593 - fmeasure: 0.7287 - val_loss: 0.4334 - val_fmeasure: 0.8116
>>> F1-score (10): 0.81185 Best (10): + 0.81185 (0.5 : 0.81185)

F1-score for this epoch: 0.815628 ( 0.5 )-- Best F1-score::==> 0.815628 ( 0.5 )  (for epoch # 1 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 2/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.4313 - fmeasure: 0.8089 - val_loss: 0.3798 - val_fmeasure: 0.8293
>>> F1-score (1): 0.82934 Best (1): + 0.82934 (0.5 : 0.02181)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.4315 - fmeasure: 0.8073 - val_loss: 0.3759 - val_fmeasure: 0.8391
>>> F1-score (2): 0.84031 Best (2): + 0.84031 (0.5 : 0.01461)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.4369 - fmeasure: 0.8046 - val_loss: 0.4074 - val_fmeasure: 0.8358
>>> F1-score (3): 0.83632 Best (3): + 0.83632 (0.5 : 0.03819)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.4350 - fmeasure: 0.8052 - val_loss: 0.3623 - val_fmeasure: 0.8484
>>> F1-score (4): 0.84885 Best (4): + 0.84885 (0.5 : 0.01893)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.4407 - fmeasure: 0.8021 - val_loss: 0.4067 - val_fmeasure: 0.8269
>>> F1-score (5): 0.82769 Best (5): + 0.82769 (0.5 : 0.02065)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.4356 - fmeasure: 0.8074 - val_loss: 0.3928 - val_fmeasure: 0.8398
>>> F1-score (6): 0.84022 Best (6): + 0.84022 (0.5 : 0.01352)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.4389 - fmeasure: 0.8016 - val_loss: 0.4057 - val_fmeasure: 0.8278
>>> F1-score (7): 0.82829 Best (7): + 0.82829 (0.5 : 0.01317)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.4364 - fmeasure: 0.8031 - val_loss: 0.3938 - val_fmeasure: 0.8259
>>> F1-score (8): 0.82614 Best (8): + 0.82614 (0.5 : 0.01388)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.4374 - fmeasure: 0.8038 - val_loss: 0.3721 - val_fmeasure: 0.8409
>>> F1-score (9): 0.84089 Best (9): + 0.84089 (0.5 : 0.01886)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.4365 - fmeasure: 0.8041 - val_loss: 0.3872 - val_fmeasure: 0.8348
>>> F1-score (10): 0.83491 Best (10): + 0.83491 (0.5 : 0.02306)

F1-score for this epoch: 0.835296 ( 0.5 )-- Best F1-score::==> 0.835296 ( 0.5 )  (for epoch # 2 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 3/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3869 - fmeasure: 0.8346 - val_loss: 0.3454 - val_fmeasure: 0.8523
>>> F1-score (1): 0.85255 Best (1): + 0.85255 (0.5 : 0.02321)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3879 - fmeasure: 0.8329 - val_loss: 0.3500 - val_fmeasure: 0.8573
>>> F1-score (2): 0.85846 Best (2): + 0.85846 (0.5 : 0.01815)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3895 - fmeasure: 0.8309 - val_loss: 0.3770 - val_fmeasure: 0.8542
>>> F1-score (3): 0.85488 Best (3): + 0.85488 (0.5 : 0.01856)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3923 - fmeasure: 0.8288 - val_loss: 0.3484 - val_fmeasure: 0.8624
>>> F1-score (4): 0.86241 Best (4): + 0.86241 (0.5 : 0.01356)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3923 - fmeasure: 0.8304 - val_loss: 0.3614 - val_fmeasure: 0.8438
>>> F1-score (5): 0.84448 Best (5): + 0.84448 (0.5 : 0.01679)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3905 - fmeasure: 0.8296 - val_loss: 0.3357 - val_fmeasure: 0.8655
>>> F1-score (6): 0.86633 Best (6): + 0.86633 (0.5 : 0.02611)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3932 - fmeasure: 0.8295 - val_loss: 0.3509 - val_fmeasure: 0.8499
>>> F1-score (7): 0.85012 Best (7): + 0.85012 (0.5 : 0.02183)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3886 - fmeasure: 0.8302 - val_loss: 0.3699 - val_fmeasure: 0.8557
>>> F1-score (8): 0.85617 Best (8): + 0.85617 (0.5 : 0.03003)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3927 - fmeasure: 0.8305 - val_loss: 0.3713 - val_fmeasure: 0.8512
>>> F1-score (9): 0.85146 Best (9): + 0.85146 (0.5 : 0.01057)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3929 - fmeasure: 0.8291 - val_loss: 0.3568 - val_fmeasure: 0.8586
>>> F1-score (10): 0.8587 Best (10): + 0.8587 (0.5 : 0.02379)

F1-score for this epoch: 0.855556 ( 0.5 )-- Best F1-score::==> 0.855556 ( 0.5 )  (for epoch # 3 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 4/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3586 - fmeasure: 0.8486 - val_loss: 0.3418 - val_fmeasure: 0.8588
>>> F1-score (1): 0.8592 Best (1): + 0.8592 (0.5 : 0.00665)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3579 - fmeasure: 0.8491 - val_loss: 0.3273 - val_fmeasure: 0.8584
>>> F1-score (2): 0.85951 Best (2): + 0.85951 (0.5 : 0.00105)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3576 - fmeasure: 0.8489 - val_loss: 0.3505 - val_fmeasure: 0.8599
>>> F1-score (3): 0.86059 Best (3): + 0.86059 (0.5 : 0.00571)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3609 - fmeasure: 0.8473 - val_loss: 0.3143 - val_fmeasure: 0.8755
>>> F1-score (4): 0.87562 Best (4): + 0.87562 (0.5 : 0.01321)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3631 - fmeasure: 0.8452 - val_loss: 0.3429 - val_fmeasure: 0.8582
>>> F1-score (5): 0.85884 Best (5): + 0.85884 (0.5 : 0.01436)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3614 - fmeasure: 0.8465 - val_loss: 0.3163 - val_fmeasure: 0.8733
>>> F1-score (6): 0.87346 Best (6): + 0.87346 (0.5 : 0.00713)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3621 - fmeasure: 0.8480 - val_loss: 0.3299 - val_fmeasure: 0.8630
>>> F1-score (7): 0.86327 Best (7): + 0.86327 (0.5 : 0.01315)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3613 - fmeasure: 0.8469 - val_loss: 0.3325 - val_fmeasure: 0.8660
>>> F1-score (8): 0.86659 Best (8): + 0.86659 (0.5 : 0.01042)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3615 - fmeasure: 0.8465 - val_loss: 0.3359 - val_fmeasure: 0.8687
>>> F1-score (9): 0.86889 Best (9): + 0.86889 (0.5 : 0.01743)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3621 - fmeasure: 0.8450 - val_loss: 0.3395 - val_fmeasure: 0.8664
>>> F1-score (10): 0.86657 Best (10): + 0.86657 (0.5 : 0.00787)

F1-score for this epoch: 0.865254 ( 0.5 )-- Best F1-score::==> 0.865254 ( 0.5 )  (for epoch # 4 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 5/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3360 - fmeasure: 0.8608 - val_loss: 0.3079 - val_fmeasure: 0.8697
>>> F1-score (1): 0.87036 Best (1): + 0.87036 (0.5 : 0.01116)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3348 - fmeasure: 0.8608 - val_loss: 0.3155 - val_fmeasure: 0.8688
>>> F1-score (2): 0.86982 Best (2): + 0.86982 (0.5 : 0.01031)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3330 - fmeasure: 0.8617 - val_loss: 0.3325 - val_fmeasure: 0.8645
>>> F1-score (3): 0.86503 Best (3): + 0.86503 (0.5 : 0.00444)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3356 - fmeasure: 0.8590 - val_loss: 0.3016 - val_fmeasure: 0.8807
>>> F1-score (4): 0.88044 Best (4): + 0.88044 (0.5 : 0.00482)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3350 - fmeasure: 0.8594 - val_loss: 0.3240 - val_fmeasure: 0.8629
>>> F1-score (5): 0.86341 Best (5): + 0.86341 (0.5 : 0.00457)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3356 - fmeasure: 0.8581 - val_loss: 0.2990 - val_fmeasure: 0.8780
>>> F1-score (6): 0.87881 Best (6): + 0.87881 (0.5 : 0.00535)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3365 - fmeasure: 0.8590 - val_loss: 0.3128 - val_fmeasure: 0.8713
>>> F1-score (7): 0.87156 Best (7): + 0.87156 (0.5 : 0.00829)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3356 - fmeasure: 0.8598 - val_loss: 0.3229 - val_fmeasure: 0.8626
>>> F1-score (8): 0.86327 Best (8): - 0.86659 (0.5 : -0.00332)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3397 - fmeasure: 0.8576 - val_loss: 0.3226 - val_fmeasure: 0.8732
>>> F1-score (9): 0.87352 Best (9): + 0.87352 (0.5 : 0.00463)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3375 - fmeasure: 0.8591 - val_loss: 0.3257 - val_fmeasure: 0.8757
>>> F1-score (10): 0.87601 Best (10): + 0.87601 (0.5 : 0.00944)

F1-score for this epoch: 0.871223 ( 0.5 )-- Best F1-score::==> 0.871223 ( 0.5 )  (for epoch # 5 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 6/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3169 - fmeasure: 0.8697 - val_loss: 0.2973 - val_fmeasure: 0.8811
>>> F1-score (1): 0.88169 Best (1): + 0.88169 (0.5 : 0.01133)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3141 - fmeasure: 0.8701 - val_loss: 0.3009 - val_fmeasure: 0.8774
>>> F1-score (2): 0.87826 Best (2): + 0.87826 (0.5 : 0.00844)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3155 - fmeasure: 0.8680 - val_loss: 0.3364 - val_fmeasure: 0.8729
>>> F1-score (3): 0.87342 Best (3): + 0.87342 (0.5 : 0.00839)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3166 - fmeasure: 0.8700 - val_loss: 0.2952 - val_fmeasure: 0.8838
>>> F1-score (4): 0.88352 Best (4): + 0.88352 (0.5 : 0.00308)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3164 - fmeasure: 0.8693 - val_loss: 0.3122 - val_fmeasure: 0.8764
>>> F1-score (5): 0.87689 Best (5): + 0.87689 (0.5 : 0.01348)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3162 - fmeasure: 0.8701 - val_loss: 0.2830 - val_fmeasure: 0.8849
>>> F1-score (6): 0.88591 Best (6): + 0.88591 (0.5 : 0.0071)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3157 - fmeasure: 0.8708 - val_loss: 0.2953 - val_fmeasure: 0.8714
>>> F1-score (7): 0.8717 Best (7): + 0.8717 (0.5 : 0.00014)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3174 - fmeasure: 0.8686 - val_loss: 0.3022 - val_fmeasure: 0.8808
>>> F1-score (8): 0.88129 Best (8): + 0.88129 (0.5 : 0.0147)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3185 - fmeasure: 0.8666 - val_loss: 0.3097 - val_fmeasure: 0.8803
>>> F1-score (9): 0.88045 Best (9): + 0.88045 (0.5 : 0.00693)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3186 - fmeasure: 0.8684 - val_loss: 0.3073 - val_fmeasure: 0.8807
>>> F1-score (10): 0.88089 Best (10): + 0.88089 (0.5 : 0.00488)

F1-score for this epoch: 0.879402 ( 0.5 )-- Best F1-score::==> 0.879402 ( 0.5 )  (for epoch # 6 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 7/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2993 - fmeasure: 0.8783 - val_loss: 0.2884 - val_fmeasure: 0.8759
>>> F1-score (1): 0.87624 Best (1): - 0.88169 (0.5 : -0.00545)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2976 - fmeasure: 0.8786 - val_loss: 0.2932 - val_fmeasure: 0.8769
>>> F1-score (2): 0.87786 Best (2): - 0.87826 (0.5 : -0.0004)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2987 - fmeasure: 0.8782 - val_loss: 0.3174 - val_fmeasure: 0.8761
>>> F1-score (3): 0.87667 Best (3): + 0.87667 (0.5 : 0.00325)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3014 - fmeasure: 0.8768 - val_loss: 0.2659 - val_fmeasure: 0.8972
>>> F1-score (4): 0.89663 Best (4): + 0.89663 (0.5 : 0.01311)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2999 - fmeasure: 0.8756 - val_loss: 0.3078 - val_fmeasure: 0.8785
>>> F1-score (5): 0.879 Best (5): + 0.879 (0.5 : 0.00211)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3003 - fmeasure: 0.8768 - val_loss: 0.2732 - val_fmeasure: 0.8939
>>> F1-score (6): 0.8944 Best (6): + 0.8944 (0.5 : 0.00849)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2984 - fmeasure: 0.8789 - val_loss: 0.2869 - val_fmeasure: 0.8814
>>> F1-score (7): 0.88173 Best (7): + 0.88173 (0.5 : 0.01003)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3020 - fmeasure: 0.8755 - val_loss: 0.2964 - val_fmeasure: 0.8777
>>> F1-score (8): 0.87799 Best (8): - 0.88129 (0.5 : -0.0033)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3000 - fmeasure: 0.8773 - val_loss: 0.2998 - val_fmeasure: 0.8809
>>> F1-score (9): 0.88136 Best (9): + 0.88136 (0.5 : 0.00091)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.3008 - fmeasure: 0.8780 - val_loss: 0.2994 - val_fmeasure: 0.8734
>>> F1-score (10): 0.87332 Best (10): - 0.88089 (0.5 : -0.00757)

F1-score for this epoch: 0.88152 ( 0.5 )-- Best F1-score::==> 0.88152 ( 0.5 )  (for epoch # 7 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 8/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2839 - fmeasure: 0.8840 - val_loss: 0.2806 - val_fmeasure: 0.8865
>>> F1-score (1): 0.88682 Best (1): + 0.88682 (0.5 : 0.00513)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2841 - fmeasure: 0.8848 - val_loss: 0.2942 - val_fmeasure: 0.8826
>>> F1-score (2): 0.88336 Best (2): + 0.88336 (0.5 : 0.0051)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2840 - fmeasure: 0.8844 - val_loss: 0.3011 - val_fmeasure: 0.8761
>>> F1-score (3): 0.8769 Best (3): + 0.8769 (0.5 : 0.00023)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2855 - fmeasure: 0.8843 - val_loss: 0.2584 - val_fmeasure: 0.8977
>>> F1-score (4): 0.89717 Best (4): + 0.89717 (0.5 : 0.00054)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2833 - fmeasure: 0.8844 - val_loss: 0.2907 - val_fmeasure: 0.8847
>>> F1-score (5): 0.885 Best (5): + 0.885 (0.5 : 0.006)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2871 - fmeasure: 0.8839 - val_loss: 0.2688 - val_fmeasure: 0.8934
>>> F1-score (6): 0.89416 Best (6): - 0.8944 (0.5 : -0.00024)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2865 - fmeasure: 0.8840 - val_loss: 0.2851 - val_fmeasure: 0.8814
>>> F1-score (7): 0.88166 Best (7): - 0.88173 (0.5 : -7e-05)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2867 - fmeasure: 0.8832 - val_loss: 0.2886 - val_fmeasure: 0.8854
>>> F1-score (8): 0.88603 Best (8): + 0.88603 (0.5 : 0.00474)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2859 - fmeasure: 0.8838 - val_loss: 0.2905 - val_fmeasure: 0.8878
>>> F1-score (9): 0.88808 Best (9): + 0.88808 (0.5 : 0.00672)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2847 - fmeasure: 0.8849 - val_loss: 0.2832 - val_fmeasure: 0.8905
>>> F1-score (10): 0.89068 Best (10): + 0.89068 (0.5 : 0.00979)

F1-score for this epoch: 0.886986 ( 0.5 )-- Best F1-score::==> 0.886986 ( 0.5 )  (for epoch # 8 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 9/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2721 - fmeasure: 0.8912 - val_loss: 0.2709 - val_fmeasure: 0.8947
>>> F1-score (1): 0.89504 Best (1): + 0.89504 (0.5 : 0.00822)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2732 - fmeasure: 0.8896 - val_loss: 0.2793 - val_fmeasure: 0.8875
>>> F1-score (2): 0.88837 Best (2): + 0.88837 (0.5 : 0.00501)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2727 - fmeasure: 0.8905 - val_loss: 0.2960 - val_fmeasure: 0.8813
>>> F1-score (3): 0.88179 Best (3): + 0.88179 (0.5 : 0.00489)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2725 - fmeasure: 0.8910 - val_loss: 0.2509 - val_fmeasure: 0.9010
>>> F1-score (4): 0.9006 Best (4): + 0.9006 (0.5 : 0.00343)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2703 - fmeasure: 0.8916 - val_loss: 0.2855 - val_fmeasure: 0.8854
>>> F1-score (5): 0.88579 Best (5): + 0.88579 (0.5 : 0.00079)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2739 - fmeasure: 0.8891 - val_loss: 0.2605 - val_fmeasure: 0.8991
>>> F1-score (6): 0.89976 Best (6): + 0.89976 (0.5 : 0.00536)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2743 - fmeasure: 0.8903 - val_loss: 0.2742 - val_fmeasure: 0.8833
>>> F1-score (7): 0.8834 Best (7): + 0.8834 (0.5 : 0.00167)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2744 - fmeasure: 0.8898 - val_loss: 0.2834 - val_fmeasure: 0.8930
>>> F1-score (8): 0.89358 Best (8): + 0.89358 (0.5 : 0.00755)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2727 - fmeasure: 0.8890 - val_loss: 0.2827 - val_fmeasure: 0.8923
>>> F1-score (9): 0.89247 Best (9): + 0.89247 (0.5 : 0.00439)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2736 - fmeasure: 0.8896 - val_loss: 0.2768 - val_fmeasure: 0.8966
>>> F1-score (10): 0.89692 Best (10): + 0.89692 (0.5 : 0.00624)

F1-score for this epoch: 0.891772 ( 0.5 )-- Best F1-score::==> 0.891772 ( 0.5 )  (for epoch # 9 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 10/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2609 - fmeasure: 0.8944 - val_loss: 0.2656 - val_fmeasure: 0.8894
>>> F1-score (1): 0.88964 Best (1): - 0.89504 (0.5 : -0.0054)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2609 - fmeasure: 0.8945 - val_loss: 0.2730 - val_fmeasure: 0.8896
>>> F1-score (2): 0.89022 Best (2): + 0.89022 (0.5 : 0.00185)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2593 - fmeasure: 0.8950 - val_loss: 0.3012 - val_fmeasure: 0.8832
>>> F1-score (3): 0.88379 Best (3): + 0.88379 (0.5 : 0.002)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2556 - fmeasure: 0.8985 - val_loss: 0.2461 - val_fmeasure: 0.9040
>>> F1-score (4): 0.90376 Best (4): + 0.90376 (0.5 : 0.00316)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2578 - fmeasure: 0.8960 - val_loss: 0.2877 - val_fmeasure: 0.8896
>>> F1-score (5): 0.88994 Best (5): + 0.88994 (0.5 : 0.00415)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2629 - fmeasure: 0.8944 - val_loss: 0.2640 - val_fmeasure: 0.9029
>>> F1-score (6): 0.90338 Best (6): + 0.90338 (0.5 : 0.00362)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2608 - fmeasure: 0.8959 - val_loss: 0.2682 - val_fmeasure: 0.8872
>>> F1-score (7): 0.88736 Best (7): + 0.88736 (0.5 : 0.00396)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2637 - fmeasure: 0.8937 - val_loss: 0.2679 - val_fmeasure: 0.8920
>>> F1-score (8): 0.89244 Best (8): - 0.89358 (0.5 : -0.00114)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2578 - fmeasure: 0.8973 - val_loss: 0.2811 - val_fmeasure: 0.8926
>>> F1-score (9): 0.89262 Best (9): + 0.89262 (0.5 : 0.00015)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2625 - fmeasure: 0.8955 - val_loss: 0.2785 - val_fmeasure: 0.8924
>>> F1-score (10): 0.89275 Best (10): - 0.89692 (0.5 : -0.00417)

F1-score for this epoch: 0.89259 ( 0.5 )-- Best F1-score::==> 0.89259 ( 0.5 )  (for epoch # 10 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 11/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2515 - fmeasure: 0.8995 - val_loss: 0.2641 - val_fmeasure: 0.8975
>>> F1-score (1): 0.89786 Best (1): + 0.89786 (0.5 : 0.00282)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2501 - fmeasure: 0.9006 - val_loss: 0.2706 - val_fmeasure: 0.8895
>>> F1-score (2): 0.89011 Best (2): - 0.89022 (0.5 : -0.00011)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2492 - fmeasure: 0.8993 - val_loss: 0.2793 - val_fmeasure: 0.8865
>>> F1-score (3): 0.88682 Best (3): + 0.88682 (0.5 : 0.00303)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2503 - fmeasure: 0.9008 - val_loss: 0.2377 - val_fmeasure: 0.9080
>>> F1-score (4): 0.9078 Best (4): + 0.9078 (0.5 : 0.00404)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2476 - fmeasure: 0.9019 - val_loss: 0.2755 - val_fmeasure: 0.8947
>>> F1-score (5): 0.89496 Best (5): + 0.89496 (0.5 : 0.00502)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2505 - fmeasure: 0.9006 - val_loss: 0.2528 - val_fmeasure: 0.8974
>>> F1-score (6): 0.89813 Best (6): - 0.90338 (0.5 : -0.00525)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2509 - fmeasure: 0.9011 - val_loss: 0.2638 - val_fmeasure: 0.8916
>>> F1-score (7): 0.8919 Best (7): + 0.8919 (0.5 : 0.00454)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2536 - fmeasure: 0.8982 - val_loss: 0.2698 - val_fmeasure: 0.8978
>>> F1-score (8): 0.89851 Best (8): + 0.89851 (0.5 : 0.00493)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2496 - fmeasure: 0.8997 - val_loss: 0.2815 - val_fmeasure: 0.8974
>>> F1-score (9): 0.89757 Best (9): + 0.89757 (0.5 : 0.00495)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2490 - fmeasure: 0.9010 - val_loss: 0.2663 - val_fmeasure: 0.8950
>>> F1-score (10): 0.89507 Best (10): - 0.89692 (0.5 : -0.00185)

F1-score for this epoch: 0.895873 ( 0.5 )-- Best F1-score::==> 0.895873 ( 0.5 )  (for epoch # 11 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 12/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2429 - fmeasure: 0.9036 - val_loss: 0.2529 - val_fmeasure: 0.8975
>>> F1-score (1): 0.8978 Best (1): - 0.89786 (0.5 : -6e-05)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2395 - fmeasure: 0.9043 - val_loss: 0.2652 - val_fmeasure: 0.8976
>>> F1-score (2): 0.89807 Best (2): + 0.89807 (0.5 : 0.00785)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2387 - fmeasure: 0.9056 - val_loss: 0.2855 - val_fmeasure: 0.8868
>>> F1-score (3): 0.88722 Best (3): + 0.88722 (0.5 : 0.0004)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2399 - fmeasure: 0.9044 - val_loss: 0.2435 - val_fmeasure: 0.9018
>>> F1-score (4): 0.90152 Best (4): - 0.9078 (0.5 : -0.00628)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2398 - fmeasure: 0.9053 - val_loss: 0.2751 - val_fmeasure: 0.8897
>>> F1-score (5): 0.89012 Best (5): - 0.89496 (0.5 : -0.00484)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2412 - fmeasure: 0.9041 - val_loss: 0.2508 - val_fmeasure: 0.9007
>>> F1-score (6): 0.9013 Best (6): - 0.90338 (0.5 : -0.00208)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2382 - fmeasure: 0.9055 - val_loss: 0.2783 - val_fmeasure: 0.8801
>>> F1-score (7): 0.88044 Best (7): - 0.8919 (0.5 : -0.01146)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2419 - fmeasure: 0.9031 - val_loss: 0.2606 - val_fmeasure: 0.9019
>>> F1-score (8): 0.90255 Best (8): + 0.90255 (0.5 : 0.00404)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2406 - fmeasure: 0.9054 - val_loss: 0.2770 - val_fmeasure: 0.8975
>>> F1-score (9): 0.89755 Best (9): - 0.89757 (0.5 : -2e-05)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2389 - fmeasure: 0.9058 - val_loss: 0.2621 - val_fmeasure: 0.8978
>>> F1-score (10): 0.89797 Best (10): + 0.89797 (0.5 : 0.00105)

F1-score for this epoch: 0.895454 ( 0.5 )-- Best F1-score::==> 0.895873 ( 0.5 )  (for epoch # 11 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 13/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2329 - fmeasure: 0.9079 - val_loss: 0.2565 - val_fmeasure: 0.8951
>>> F1-score (1): 0.89516 Best (1): - 0.89786 (0.5 : -0.0027)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2316 - fmeasure: 0.9093 - val_loss: 0.2628 - val_fmeasure: 0.8986
>>> F1-score (2): 0.89922 Best (2): + 0.89922 (0.5 : 0.00115)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2321 - fmeasure: 0.9076 - val_loss: 0.2763 - val_fmeasure: 0.8937
>>> F1-score (3): 0.89409 Best (3): + 0.89409 (0.5 : 0.00687)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2298 - fmeasure: 0.9097 - val_loss: 0.2360 - val_fmeasure: 0.9082
>>> F1-score (4): 0.90775 Best (4): - 0.9078 (0.5 : -5e-05)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2321 - fmeasure: 0.9084 - val_loss: 0.2712 - val_fmeasure: 0.8963
>>> F1-score (5): 0.89642 Best (5): + 0.89642 (0.5 : 0.00146)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2329 - fmeasure: 0.9082 - val_loss: 0.2427 - val_fmeasure: 0.9074
>>> F1-score (6): 0.90806 Best (6): + 0.90806 (0.5 : 0.00468)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2333 - fmeasure: 0.9075 - val_loss: 0.2528 - val_fmeasure: 0.8976
>>> F1-score (7): 0.89763 Best (7): + 0.89763 (0.5 : 0.00573)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2336 - fmeasure: 0.9080 - val_loss: 0.2573 - val_fmeasure: 0.9001
>>> F1-score (8): 0.90049 Best (8): - 0.90255 (0.5 : -0.00206)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2312 - fmeasure: 0.9082 - val_loss: 0.2740 - val_fmeasure: 0.8973
>>> F1-score (9): 0.89751 Best (9): - 0.89757 (0.5 : -6e-05)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2322 - fmeasure: 0.9087 - val_loss: 0.2586 - val_fmeasure: 0.9002
>>> F1-score (10): 0.90045 Best (10): + 0.90045 (0.5 : 0.00248)

F1-score for this epoch: 0.899678 ( 0.5 )-- Best F1-score::==> 0.899678 ( 0.5 )  (for epoch # 13 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 14/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2273 - fmeasure: 0.9106 - val_loss: 0.2454 - val_fmeasure: 0.9027
>>> F1-score (1): 0.90297 Best (1): + 0.90297 (0.5 : 0.00511)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2221 - fmeasure: 0.9118 - val_loss: 0.2727 - val_fmeasure: 0.8915
>>> F1-score (2): 0.89229 Best (2): - 0.89922 (0.5 : -0.00693)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2243 - fmeasure: 0.9121 - val_loss: 0.2803 - val_fmeasure: 0.8929
>>> F1-score (3): 0.89335 Best (3): - 0.89409 (0.5 : -0.00074)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2227 - fmeasure: 0.9107 - val_loss: 0.2390 - val_fmeasure: 0.9082
>>> F1-score (4): 0.90803 Best (4): + 0.90803 (0.5 : 0.00023)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2211 - fmeasure: 0.9131 - val_loss: 0.2637 - val_fmeasure: 0.8998
>>> F1-score (5): 0.90003 Best (5): + 0.90003 (0.5 : 0.00361)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2235 - fmeasure: 0.9119 - val_loss: 0.2452 - val_fmeasure: 0.9009
>>> F1-score (6): 0.90194 Best (6): - 0.90806 (0.5 : -0.00612)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2228 - fmeasure: 0.9132 - val_loss: 0.2524 - val_fmeasure: 0.8985
>>> F1-score (7): 0.89864 Best (7): + 0.89864 (0.5 : 0.00101)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2254 - fmeasure: 0.9120 - val_loss: 0.2500 - val_fmeasure: 0.9108
>>> F1-score (8): 0.91146 Best (8): + 0.91146 (0.5 : 0.00891)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2251 - fmeasure: 0.9111 - val_loss: 0.2705 - val_fmeasure: 0.8975
>>> F1-score (9): 0.89762 Best (9): + 0.89762 (0.5 : 5e-05)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2243 - fmeasure: 0.9103 - val_loss: 0.2569 - val_fmeasure: 0.8978
>>> F1-score (10): 0.898 Best (10): - 0.90045 (0.5 : -0.00245)

F1-score for this epoch: 0.900433 ( 0.5 )-- Best F1-score::==> 0.900433 ( 0.5 )  (for epoch # 14 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 15/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2197 - fmeasure: 0.9133 - val_loss: 0.2466 - val_fmeasure: 0.9033
>>> F1-score (1): 0.9036 Best (1): + 0.9036 (0.5 : 0.00063)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2147 - fmeasure: 0.9147 - val_loss: 0.2541 - val_fmeasure: 0.9062
>>> F1-score (2): 0.90666 Best (2): + 0.90666 (0.5 : 0.00744)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2133 - fmeasure: 0.9162 - val_loss: 0.2772 - val_fmeasure: 0.8993
>>> F1-score (3): 0.89968 Best (3): + 0.89968 (0.5 : 0.00559)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2166 - fmeasure: 0.9144 - val_loss: 0.2307 - val_fmeasure: 0.9155
>>> F1-score (4): 0.91543 Best (4): + 0.91543 (0.5 : 0.0074)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2147 - fmeasure: 0.9153 - val_loss: 0.2709 - val_fmeasure: 0.9027
>>> F1-score (5): 0.90296 Best (5): + 0.90296 (0.5 : 0.00293)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2184 - fmeasure: 0.9130 - val_loss: 0.2447 - val_fmeasure: 0.9092
>>> F1-score (6): 0.90973 Best (6): + 0.90973 (0.5 : 0.00167)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2159 - fmeasure: 0.9165 - val_loss: 0.2489 - val_fmeasure: 0.8993
>>> F1-score (7): 0.8996 Best (7): + 0.8996 (0.5 : 0.00096)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2197 - fmeasure: 0.9145 - val_loss: 0.2527 - val_fmeasure: 0.9088
>>> F1-score (8): 0.90931 Best (8): - 0.91146 (0.5 : -0.00215)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2156 - fmeasure: 0.9151 - val_loss: 0.2717 - val_fmeasure: 0.8996
>>> F1-score (9): 0.89984 Best (9): + 0.89984 (0.5 : 0.00222)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2185 - fmeasure: 0.9142 - val_loss: 0.2550 - val_fmeasure: 0.9010
>>> F1-score (10): 0.90111 Best (10): + 0.90111 (0.5 : 0.00066)

F1-score for this epoch: 0.904792 ( 0.5 )-- Best F1-score::==> 0.904792 ( 0.5 )  (for epoch # 15 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 16/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2099 - fmeasure: 0.9167 - val_loss: 0.2420 - val_fmeasure: 0.9074
>>> F1-score (1): 0.90756 Best (1): + 0.90756 (0.5 : 0.00396)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2117 - fmeasure: 0.9160 - val_loss: 0.2600 - val_fmeasure: 0.9000
>>> F1-score (2): 0.90058 Best (2): - 0.90666 (0.5 : -0.00608)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2085 - fmeasure: 0.9183 - val_loss: 0.2758 - val_fmeasure: 0.8974
>>> F1-score (3): 0.8979 Best (3): - 0.89968 (0.5 : -0.00178)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2087 - fmeasure: 0.9181 - val_loss: 0.2309 - val_fmeasure: 0.9113
>>> F1-score (4): 0.91115 Best (4): - 0.91543 (0.5 : -0.00428)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.2076 - fmeasure: 0.9174 - val_loss: 0.2698 - val_fmeasure: 0.9021
>>> F1-score (5): 0.90233 Best (5): - 0.90296 (0.5 : -0.00063)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2110 - fmeasure: 0.9165 - val_loss: 0.2385 - val_fmeasure: 0.9043
>>> F1-score (6): 0.90488 Best (6): - 0.90973 (0.5 : -0.00485)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2102 - fmeasure: 0.9168 - val_loss: 0.2514 - val_fmeasure: 0.9001
>>> F1-score (7): 0.90024 Best (7): + 0.90024 (0.5 : 0.00064)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2115 - fmeasure: 0.9173 - val_loss: 0.2630 - val_fmeasure: 0.8949
>>> F1-score (8): 0.89521 Best (8): - 0.91146 (0.5 : -0.01625)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2076 - fmeasure: 0.9188 - val_loss: 0.2676 - val_fmeasure: 0.9047
>>> F1-score (9): 0.9049 Best (9): + 0.9049 (0.5 : 0.00506)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2086 - fmeasure: 0.9178 - val_loss: 0.2573 - val_fmeasure: 0.9043
>>> F1-score (10): 0.90448 Best (10): + 0.90448 (0.5 : 0.00337)

F1-score for this epoch: 0.902923 ( 0.5 )-- Best F1-score::==> 0.904792 ( 0.5 )  (for epoch # 15 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 17/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2052 - fmeasure: 0.9200 - val_loss: 0.2360 - val_fmeasure: 0.9072
>>> F1-score (1): 0.9074 Best (1): - 0.90756 (0.5 : -0.00016)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2045 - fmeasure: 0.9200 - val_loss: 0.2488 - val_fmeasure: 0.9072
>>> F1-score (2): 0.90758 Best (2): + 0.90758 (0.5 : 0.00092)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2026 - fmeasure: 0.9202 - val_loss: 0.2756 - val_fmeasure: 0.8978
>>> F1-score (3): 0.89819 Best (3): - 0.89968 (0.5 : -0.00149)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2024 - fmeasure: 0.9220 - val_loss: 0.2298 - val_fmeasure: 0.9139
>>> F1-score (4): 0.91383 Best (4): - 0.91543 (0.5 : -0.0016)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2008 - fmeasure: 0.9219 - val_loss: 0.2613 - val_fmeasure: 0.9011
>>> F1-score (5): 0.90157 Best (5): - 0.90296 (0.5 : -0.00139)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2055 - fmeasure: 0.9190 - val_loss: 0.2347 - val_fmeasure: 0.9121
>>> F1-score (6): 0.9128 Best (6): + 0.9128 (0.5 : 0.00307)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2040 - fmeasure: 0.9191 - val_loss: 0.2454 - val_fmeasure: 0.9010
>>> F1-score (7): 0.90136 Best (7): + 0.90136 (0.5 : 0.00112)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2082 - fmeasure: 0.9169 - val_loss: 0.2547 - val_fmeasure: 0.9063
>>> F1-score (8): 0.90697 Best (8): - 0.91146 (0.5 : -0.00449)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2004 - fmeasure: 0.9204 - val_loss: 0.2667 - val_fmeasure: 0.9024
>>> F1-score (9): 0.90253 Best (9): - 0.9049 (0.5 : -0.00237)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.2031 - fmeasure: 0.9208 - val_loss: 0.2474 - val_fmeasure: 0.9041
>>> F1-score (10): 0.90445 Best (10): - 0.90448 (0.5 : -3e-05)

F1-score for this epoch: 0.905668 ( 0.5 )-- Best F1-score::==> 0.905668 ( 0.5 )  (for epoch # 17 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 18/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.1979 - fmeasure: 0.9234 - val_loss: 0.2447 - val_fmeasure: 0.9067
>>> F1-score (1): 0.90711 Best (1): - 0.90756 (0.5 : -0.00045)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.1973 - fmeasure: 0.9225 - val_loss: 0.2671 - val_fmeasure: 0.9052
>>> F1-score (2): 0.90546 Best (2): - 0.90758 (0.5 : -0.00212)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 4s - loss: 0.1961 - fmeasure: 0.9244 - val_loss: 0.2785 - val_fmeasure: 0.9009
>>> F1-score (3): 0.90109 Best (3): + 0.90109 (0.5 : 0.00141)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1973 - fmeasure: 0.9224 - val_loss: 0.2273 - val_fmeasure: 0.9104
>>> F1-score (4): 0.91076 Best (4): - 0.91543 (0.5 : -0.00467)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1955 - fmeasure: 0.9238 - val_loss: 0.2620 - val_fmeasure: 0.9011
>>> F1-score (5): 0.90161 Best (5): - 0.90296 (0.5 : -0.00135)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1977 - fmeasure: 0.9225 - val_loss: 0.2406 - val_fmeasure: 0.9084
>>> F1-score (6): 0.9092 Best (6): - 0.9128 (0.5 : -0.0036)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1982 - fmeasure: 0.9234 - val_loss: 0.2446 - val_fmeasure: 0.9028
>>> F1-score (7): 0.90315 Best (7): + 0.90315 (0.5 : 0.00179)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1990 - fmeasure: 0.9217 - val_loss: 0.2397 - val_fmeasure: 0.9108
>>> F1-score (8): 0.91134 Best (8): - 0.91146 (0.5 : -0.00012)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1960 - fmeasure: 0.9234 - val_loss: 0.2740 - val_fmeasure: 0.9026
>>> F1-score (9): 0.90296 Best (9): - 0.9049 (0.5 : -0.00194)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1956 - fmeasure: 0.9230 - val_loss: 0.2505 - val_fmeasure: 0.9061
>>> F1-score (10): 0.90616 Best (10): + 0.90616 (0.5 : 0.00168)

F1-score for this epoch: 0.905884 ( 0.5 )-- Best F1-score::==> 0.905884 ( 0.5 )  (for epoch # 18 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 19/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1939 - fmeasure: 0.9244 - val_loss: 0.2317 - val_fmeasure: 0.9081
>>> F1-score (1): 0.90849 Best (1): + 0.90849 (0.5 : 0.00093)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1921 - fmeasure: 0.9247 - val_loss: 0.2503 - val_fmeasure: 0.9082
>>> F1-score (2): 0.90882 Best (2): + 0.90882 (0.5 : 0.00124)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1924 - fmeasure: 0.9236 - val_loss: 0.2682 - val_fmeasure: 0.9006
>>> F1-score (3): 0.90107 Best (3): - 0.90109 (0.5 : -2e-05)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1909 - fmeasure: 0.9247 - val_loss: 0.2337 - val_fmeasure: 0.9107
>>> F1-score (4): 0.9109 Best (4): - 0.91543 (0.5 : -0.00453)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1897 - fmeasure: 0.9268 - val_loss: 0.2659 - val_fmeasure: 0.9051
>>> F1-score (5): 0.90524 Best (5): + 0.90524 (0.5 : 0.00228)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1933 - fmeasure: 0.9248 - val_loss: 0.2341 - val_fmeasure: 0.9114
>>> F1-score (6): 0.91194 Best (6): - 0.9128 (0.5 : -0.00086)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1921 - fmeasure: 0.9254 - val_loss: 0.2490 - val_fmeasure: 0.9019
>>> F1-score (7): 0.90219 Best (7): - 0.90315 (0.5 : -0.00096)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1937 - fmeasure: 0.9249 - val_loss: 0.2459 - val_fmeasure: 0.9087
>>> F1-score (8): 0.90936 Best (8): - 0.91146 (0.5 : -0.0021)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1914 - fmeasure: 0.9248 - val_loss: 0.2681 - val_fmeasure: 0.9053
>>> F1-score (9): 0.90564 Best (9): + 0.90564 (0.5 : 0.00074)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1937 - fmeasure: 0.9245 - val_loss: 0.2504 - val_fmeasure: 0.9010
>>> F1-score (10): 0.90108 Best (10): - 0.90616 (0.5 : -0.00508)

F1-score for this epoch: 0.906473 ( 0.5 )-- Best F1-score::==> 0.906473 ( 0.5 )  (for epoch # 19 of 20 epochs)

~~~~~~~~~ BP/CC/MF ~~~~~~~~~~~~~~ EPOCH 20/20 (Embedding dimention: 100) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1878 - fmeasure: 0.9268 - val_loss: 0.2380 - val_fmeasure: 0.9097
>>> F1-score (1): 0.90998 Best (1): + 0.90998 (0.5 : 0.00149)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1852 - fmeasure: 0.9289 - val_loss: 0.2524 - val_fmeasure: 0.9065
>>> F1-score (2): 0.90698 Best (2): - 0.90882 (0.5 : -0.00184)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1853 - fmeasure: 0.9286 - val_loss: 0.2806 - val_fmeasure: 0.9003
>>> F1-score (3): 0.90085 Best (3): - 0.90109 (0.5 : -0.00024)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1842 - fmeasure: 0.9277 - val_loss: 0.2379 - val_fmeasure: 0.9110
>>> F1-score (4): 0.91095 Best (4): - 0.91543 (0.5 : -0.00448)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1867 - fmeasure: 0.9276 - val_loss: 0.2625 - val_fmeasure: 0.9024
>>> F1-score (5): 0.90309 Best (5): - 0.90524 (0.5 : -0.00215)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1860 - fmeasure: 0.9274 - val_loss: 0.2459 - val_fmeasure: 0.9125
>>> F1-score (6): 0.91301 Best (6): + 0.91301 (0.5 : 0.00021)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1855 - fmeasure: 0.9285 - val_loss: 0.2443 - val_fmeasure: 0.9027
>>> F1-score (7): 0.90321 Best (7): + 0.90321 (0.5 : 6e-05)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1907 - fmeasure: 0.9257 - val_loss: 0.2352 - val_fmeasure: 0.9151
>>> F1-score (8): 0.91546 Best (8): + 0.91546 (0.5 : 0.004)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1838 - fmeasure: 0.9279 - val_loss: 0.2596 - val_fmeasure: 0.9057
>>> F1-score (9): 0.90602 Best (9): + 0.90602 (0.5 : 0.00038)

Train on 59322 samples, validate on 3295 samples
Epoch 1/1
59322/59322 [==============================] - 3s - loss: 0.1878 - fmeasure: 0.9282 - val_loss: 0.2464 - val_fmeasure: 0.9032
>>> F1-score (10): 0.90336 Best (10): - 0.90616 (0.5 : -0.0028)

F1-score for this epoch: 0.907291 ( 0.5 )-- Best F1-score::==> 0.907291 ( 0.5 )  (for epoch # 20 of 20 epochs)

~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FINAL RESULT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~

For embedding size '100' best number of epochs is '20' with F1-score of: 0.907291

Saving the model and the embeddings


In [7]:
if SAVE_MODEL:
    save_model(FOLD=FOLD, models=models)
    
if SAVE_EMBEDDINGS:
    save_embeddings(FOLD=FOLD, 
                           embedding_layers=embedding_layers,
                           word_indeces=word_indeces, 
                           SUB_ONTOLOGY_work=SUB_ONTOLOGY_work,
                           embedding_save=embedding_save)


Saving model 1 to disk ...
The Model and its Weights Are Saved!!

Saving model 2 to disk ...
The Model and its Weights Are Saved!!

Saving model 3 to disk ...
The Model and its Weights Are Saved!!

Saving model 4 to disk ...
The Model and its Weights Are Saved!!

Saving model 5 to disk ...
The Model and its Weights Are Saved!!

Saving model 6 to disk ...
The Model and its Weights Are Saved!!

Saving model 7 to disk ...
The Model and its Weights Are Saved!!

Saving model 8 to disk ...
The Model and its Weights Are Saved!!

Saving model 9 to disk ...
The Model and its Weights Are Saved!!

Saving model 10 to disk ...
The Model and its Weights Are Saved!!

The Word Embeddings Are Saved!!

In [ ]: