Importing required packages into python:


In [2]:
# Required dependencies
# 1. NLTK
# 2. Gensim for word2vec
# 3. Keras with tensorflow/theano backend


import numpy as np
np.random.seed(1337)
import json, re, nltk, string, csv, sys, codecs
from nltk.corpus import wordnet
from gensim.models import Word2Vec
from keras.preprocessing import sequence
from keras.models import Model
from keras.layers import Dense, Dropout, Embedding, LSTM, Input, merge
from keras import layers
from keras.optimizers import RMSprop
from keras.utils import np_utils
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics.pairwise import cosine_similarity


# Hack to increase size due to Error: field larger than field limit (131072)
maxInt = sys.maxsize
decrement = True

while decrement:
    # decrease the maxInt value by factor 10
    # as long as the OverflowError occurs.

    decrement = False
    try:
        csv.field_size_limit(maxInt)
    except OverflowError:
        maxInt = int(maxInt / 10)
        decrement = True
        
open_bugs_csv = 'e1_open.csv'
closed_bugs_csv = 'm15_closed.csv'

#========================================================================================
# Initializing Hyper parameter
#========================================================================================
#1. Word2vec parameters
min_word_frequency_word2vec = 5
embed_size_word2vec = 200
context_window_word2vec = 5

#2. Classifier hyperparameters
numCV = 10
max_sentence_len = 50
min_sentence_length = 15
rankK = 10
batch_size = 32

#========================================================================================
# Preprocess the open bugs, extract the vocabulary and learn the word2vec representation
#========================================================================================
with open(open_bugs_csv) as data_file:
    data = csv.reader(data_file, delimiter=';')

    all_data = []
    for item in data:
        #1. Remove \r 
        current_title = unicode(item[1], errors='ignore').replace('\r', ' ')
        #print current_title
        current_desc = unicode(item[3], errors='ignore').replace('\r', ' ')
        #print current_desc
        #2. Remove URLs
        current_desc = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', current_desc)    
        #3. Remove Stack Trace
        start_loc = current_desc.find("Stack trace:")
        current_desc = current_desc[:start_loc]    
        #4. Remove hex code
        current_desc = re.sub(r'(\w+)0x\w+', '', current_desc)
        current_title= re.sub(r'(\w+)0x\w+', '', current_title)    
        #5. Change to lower case
        current_desc = current_desc.lower()
        current_title = current_title.lower()    
        #6. Tokenize
        current_desc_tokens = nltk.word_tokenize(current_desc)
        current_title_tokens = nltk.word_tokenize(current_title)
        #7. Strip trailing punctuation marks    
        current_desc_filter = [word.strip(string.punctuation) for word in current_desc_tokens]
        current_title_filter = [word.strip(string.punctuation) for word in current_title_tokens]      
        #8. Join the lists
        current_data = current_title_filter + current_desc_filter
        current_data = filter(None, current_data)
        all_data.append(current_data)
        
#print(len(all_data))
# Learn the word2vec model and extract vocabulary
wordvec_model = Word2Vec(all_data, min_count=min_word_frequency_word2vec, size=embed_size_word2vec, window=context_window_word2vec)
vocabulary = wordvec_model.wv.vocab
#print vocabulary
vocab_size = len(vocabulary)

#========================================================================================
# Preprocess the closed bugs, using the extracted the vocabulary
#========================================================================================
with open(closed_bugs_csv) as data_file:
    data = csv.reader(data_file, delimiter=';')

    all_data = []
    all_owner = []    
    for item in data:
        #1. Remove \r 
        current_title = unicode(item[1], errors='ignore').replace('\r', ' ')
        current_desc = unicode(item[3], errors='ignore').replace('\r', ' ')
        #2. Remove URLs
        current_desc = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', current_desc)
        #3. Remove Stack Trace
        start_loc = current_desc.find("Stack trace:")
        current_desc = current_desc[:start_loc]
        #4. Remove hex code
        current_desc = re.sub(r'(\w+)0x\w+', '', current_desc)
        current_title= re.sub(r'(\w+)0x\w+', '', current_title)
        #5. Change to lower case
        current_desc = current_desc.lower()
        current_title = current_title.lower()
        #6. Tokenize
        current_desc_tokens = nltk.word_tokenize(current_desc)
        current_title_tokens = nltk.word_tokenize(current_title)
        #7. Strip punctuation marks
        current_desc_filter = [word.strip(string.punctuation) for word in current_desc_tokens]
        current_title_filter = [word.strip(string.punctuation) for word in current_title_tokens]       
        #8. Join the lists
        current_data = current_title_filter + current_desc_filter
        current_data = filter(None, current_data)
        all_data.append(current_data)
        all_owner.append(item[4])
        
#========================================================================================
# Split cross validation sets and perform deep learning + softamx based classification
#========================================================================================
totalLength = len(all_data)
splitLength = int(totalLength / (numCV + 1))

for i in range(1, numCV + 1):
    # Split cross validation set
    print("Starting work on cross validation set {0}".format(i))
    train_data = all_data[:i*splitLength-1]
    test_data = all_data[i*splitLength:(i+1)*splitLength-1]
    train_owner = all_owner[:i*splitLength-1]
    test_owner = all_owner[i*splitLength:(i+1)*splitLength-1]
    
    # Remove words outside the vocabulary
    updated_train_data = []    
    updated_train_data_length = []    
    updated_train_owner = []
    final_test_data = []
    final_test_owner = []
    for j, item in enumerate(train_data):
        current_train_filter = [word for word in item if word in vocabulary]
        if len(current_train_filter) >= min_sentence_length:  
          updated_train_data.append(current_train_filter)
          updated_train_owner.append(train_owner[j])  
          
    for j, item in enumerate(test_data):
        current_test_filter = [word for word in item if word in vocabulary]  
        if len(current_test_filter) >= min_sentence_length:
          final_test_data.append(current_test_filter)          
          final_test_owner.append(test_owner[j])          
    
    # Remove data from test set that is not there in train set
    train_owner_unique = set(updated_train_owner)
    test_owner_unique = set(final_test_owner)
    unwanted_owner = list(test_owner_unique - train_owner_unique)
    updated_test_data = []
    updated_test_owner = []
    updated_test_data_length = []
    for j in range(len(final_test_owner)):
        if final_test_owner[j] not in unwanted_owner:
            updated_test_data.append(final_test_data[j])
            updated_test_owner.append(final_test_owner[j])

    unique_train_label = list(set(updated_train_owner))
    classes = np.array(unique_train_label)
    
    # Create train and test data for deep learning + softmax
    X_train = np.empty(shape=[len(updated_train_data), max_sentence_len, embed_size_word2vec], dtype='float32')
    Y_train = np.empty(shape=[len(updated_train_owner), 1], dtype='int32')
    # 1 - start of sentence, # 2 - end of sentence, # 0 - zero padding. Hence, word indices start with 3 
    for j, curr_row in enumerate(updated_train_data):
        sequence_cnt = 0         
        for item in curr_row:
            if item in vocabulary:
                X_train[j, sequence_cnt, :] = wordvec_model[item] 
                sequence_cnt = sequence_cnt + 1                
                if sequence_cnt == max_sentence_len-1:
                          break                
        for k in range(sequence_cnt, max_sentence_len):
            X_train[j, k, :] = np.zeros((1, embed_size_word2vec))        
        Y_train[j, 0] = unique_train_label.index(updated_train_owner[j])
    
    X_test = np.empty(shape=[len(updated_test_data), max_sentence_len, embed_size_word2vec], dtype='float32')
    Y_test = np.empty(shape=[len(updated_test_owner),1], dtype='int32')
    # 1 - start of sentence, # 2 - end of sentence, # 0 - zero padding. Hence, word indices start with 3 
    for j, curr_row in enumerate(updated_test_data):
        sequence_cnt = 0          
        for item in curr_row:
            if item in vocabulary:
                X_test[j, sequence_cnt, :] = wordvec_model[item] 
                sequence_cnt = sequence_cnt + 1                
                if sequence_cnt == max_sentence_len-1:
                          break                
        for k in range(sequence_cnt, max_sentence_len):
            X_test[j, k, :] = np.zeros((1, embed_size_word2vec))        
        Y_test[j, 0] = unique_train_label.index(updated_test_owner[j])
        
    y_train = np_utils.to_categorical(Y_train, len(unique_train_label))
    y_test = np_utils.to_categorical(Y_test, len(unique_train_label))


    # TODO: Add x_train and x_test
    
    # Construct the deep learning model
    print("Creating Model")
    sequence = Input(shape=(max_sentence_len, embed_size_word2vec), dtype='float32')
    forwards_1 = LSTM(1024)(sequence)
    after_dp_forward_4 = Dropout(0.20)(forwards_1) 
    backwards_1 = LSTM(1024, go_backwards=True)(sequence)
    after_dp_backward_4 = Dropout(0.20)(backwards_1)         
    #merged = merge([after_dp_forward_4, after_dp_backward_4], mode='concat', concat_axis=-1)
    merged = layers.concatenate([after_dp_forward_4, after_dp_backward_4], axis=-1)
    after_dp = Dropout(0.5)(merged)
    output = Dense(len(unique_train_label), activation='softmax')(after_dp)                
    model = Model(input=sequence, output=output)            
    rms = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08)
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])    
    hist = model.fit(X_train, y_train, batch_size=batch_size, epochs=20)  # Rename nb_epochs to epochs // Value original: 200
    
    predict = model.predict(X_test)        
    accuracy = []
    sortedIndices = []
    pred_classes = []
    if len(predict) == 0:
        exit(1)  # Avoid divide by zero
    for ll in predict:
          sortedIndices.append(sorted(range(len(ll)), key=lambda ii: ll[ii], reverse=True))
    for k in range(1, rankK + 1):
          id = 0
          trueNum = 0
          for sortedInd in sortedIndices:
            pred_classes.append(classes[sortedInd[:k]])
            if y_test[id] in classes[sortedInd[:k]]:
                  trueNum += 1            
            id += 1
          accuracy.append((float(trueNum) / len(predict)) * 100)
    print("Test accuracy: ", accuracy)       
    
    train_result = hist.history        
    print(train_result)
    del model

    
#========================================================================================
# Split cross validation sets and perform baseline classifiers
#========================================================================================    
    
totalLength = len(all_data)
splitLength = totalLength / (numCV + 1)

for i in range(1, numCV+1):
    # Split cross validation set
    print("Starting cross validation {0}".format(i))
    train_data = all_data[:i*splitLength-1]
    test_data = all_data[i*splitLength:(i+1)*splitLength-1]
    train_owner = all_owner[:i*splitLength-1]
    test_owner = all_owner[i*splitLength:(i+1)*splitLength-1]
    
    # Remove words outside the vocabulary
    updated_train_data = []    
    updated_train_data_length = []    
    updated_train_owner = []
    final_test_data = []
    final_test_owner = []
    for j, item in enumerate(train_data):
        current_train_filter = [word for word in item if word in vocabulary]
        if len(current_train_filter)>=min_sentence_length:  
          updated_train_data.append(current_train_filter)
          updated_train_owner.append(train_owner[j])  
          
    for j, item in enumerate(test_data):
        current_test_filter = [word for word in item if word in vocabulary]  
        if len(current_test_filter)>=min_sentence_length:
          final_test_data.append(current_test_filter)          
          final_test_owner.append(test_owner[j])          
    
    # Remove data from test set that is not there in train set
    train_owner_unique = set(updated_train_owner)
    test_owner_unique = set(final_test_owner)
    unwanted_owner = list(test_owner_unique - train_owner_unique)
    updated_test_data = []
    updated_test_owner = []
    updated_test_data_length = []
    for j in range(len(final_test_owner)):
        if final_test_owner[j] not in unwanted_owner:
            updated_test_data.append(final_test_data[j])
            updated_test_owner.append(final_test_owner[j])  
    
    train_data = []
    for item in updated_train_data:
          train_data.append(' '.join(item))
         
    test_data = []
    for item in updated_test_data:
          test_data.append(' '.join(item))
    
    vocab_data = []
    for item in vocabulary:
          vocab_data.append(item)
    
    # Extract tf based bag of words representation
    tfidf_transformer = TfidfTransformer(use_idf=False)
    count_vect = CountVectorizer(min_df=1, vocabulary= vocab_data,dtype=np.int32)
    
    train_counts = count_vect.fit_transform(train_data)       
    train_feats = tfidf_transformer.fit_transform(train_counts)
    print(train_feats.shape)
    
    test_counts = count_vect.transform(test_data)
    test_feats = tfidf_transformer.transform(test_counts)
    print(test_feats.shape)
    print("=" * 20)
    
    
    
    # perform classifification
    for classifier in range(1,5):
        #classifier = 3 # 1 - Niave Bayes, 2 - Softmax, 3 - cosine distance, 4 - SVM
        print classifier 
        if classifier == 1:            
            classifierModel = MultinomialNB(alpha=0.01)        
            classifierModel = OneVsRestClassifier(classifierModel).fit(train_feats, updated_train_owner)
            predict = classifierModel.predict_proba(test_feats)  
            classes = classifierModel.classes_  
            
            accuracy = []
            sortedIndices = []
            pred_classes = []
            for ll in predict:
                sortedIndices.append(sorted(range(len(ll)), key=lambda ii: ll[ii], reverse=True))
            for k in range(1, rankK+1):
                id = 0
                trueNum = 0
                for sortedInd in sortedIndices:            
                    if updated_test_owner[id] in classes[sortedInd[:k]]:
                        trueNum += 1
                        pred_classes.append(classes[sortedInd[:k]])
                    id += 1
                accuracy.append((float(trueNum) / len(predict)) * 100)
            print accuracy                                    
        elif classifier == 2:            
            classifierModel = LogisticRegression(solver='lbfgs', penalty='l2', tol=0.01)
            classifierModel = OneVsRestClassifier(classifierModel).fit(train_feats, updated_train_owner)
            predict = classifierModel.predict(test_feats)
            classes = classifierModel.classes_ 
            
            accuracy = []
            sortedIndices = []
            pred_classes = []
            for ll in predict:
                sortedIndices.append(sorted(range(len(ll)), key=lambda ii: ll[ii], reverse=True))
            for k in range(1, rankK+1):
                id = 0
                trueNum = 0
                for sortedInd in sortedIndices:            
                    if updated_test_owner[id] in classes[sortedInd[:k]]:
                        trueNum += 1
                        pred_classes.append(classes[sortedInd[:k]])
                    id += 1
                accuracy.append((float(trueNum) / len(predict)) * 100)
            print accuracy                                   
        elif classifier == 3:            
            predict = cosine_similarity(test_feats, train_feats)
            classes = np.array(updated_train_owner)
            classifierModel = []
            
            accuracy = []
            sortedIndices = []
            pred_classes = []
            for ll in predict:
                sortedIndices.append(sorted(range(len(ll)), key=lambda ii: ll[ii], reverse=True))
            for k in range(1, rankK+1):
                id = 0
                trueNum = 0
                for sortedInd in sortedIndices:            
                    if updated_test_owner[id] in classes[sortedInd[:k]]:
                        trueNum += 1
                        pred_classes.append(classes[sortedInd[:k]])
                    id += 1
                accuracy.append((float(trueNum) / len(predict)) * 100)
            print accuracy                        
        elif classifier == 4:            
            classifierModel = svm.SVC(probability=True, verbose=False, decision_function_shape='ovr', random_state=42)
            classifierModel.fit(train_feats, updated_train_owner)
            predict = classifierModel.predict(test_feats)
            classes = classifierModel.classes_ 
        
            accuracy = []
            sortedIndices = []
            pred_classes = []
            for ll in predict:
                sortedIndices.append(sorted(range(len(ll)), key=lambda ii: ll[ii], reverse=True))
            for k in range(1, rankK+1):
                id = 0
                trueNum = 0
                for sortedInd in sortedIndices:            
                    if updated_test_owner[id] in classes[sortedInd[:k]]:
                        trueNum += 1
                        pred_classes.append(classes[sortedInd[:k]])
                    id += 1
                accuracy.append((float(trueNum) / len(predict)) * 100)
            print accuracy


Starting work on cross validation set 1
Creating Model
/home/eruwsil/anaconda3/envs/MLpy27/lib/python2.7/site-packages/ipykernel_launcher.py:192: DeprecationWarning: Call to deprecated `__getitem__` (Method will be removed in 4.0.0, use self.wv.__getitem__() instead).
/home/eruwsil/anaconda3/envs/MLpy27/lib/python2.7/site-packages/ipykernel_launcher.py:207: DeprecationWarning: Call to deprecated `__getitem__` (Method will be removed in 4.0.0, use self.wv.__getitem__() instead).
/home/eruwsil/anaconda3/envs/MLpy27/lib/python2.7/site-packages/ipykernel_launcher.py:232: UserWarning: Update your `Model` call to the Keras 2 API: `Model(outputs=Tensor("de..., inputs=Tensor("in...)`
Epoch 1/20
74/74 [==============================] - 8s 102ms/step - loss: 4.8410 - acc: 0.0541
Epoch 2/20
74/74 [==============================] - 5s 72ms/step - loss: 4.0167 - acc: 0.0946
Epoch 3/20
74/74 [==============================] - 5s 70ms/step - loss: 3.3840 - acc: 0.1081
Epoch 4/20
74/74 [==============================] - 5s 73ms/step - loss: 3.0931 - acc: 0.1622
Epoch 5/20
74/74 [==============================] - 7s 92ms/step - loss: 2.9301 - acc: 0.1892
Epoch 6/20
74/74 [==============================] - 6s 75ms/step - loss: 2.7902 - acc: 0.1892
Epoch 7/20
74/74 [==============================] - 6s 78ms/step - loss: 2.6332 - acc: 0.2027
Epoch 8/20
74/74 [==============================] - 6s 77ms/step - loss: 2.4406 - acc: 0.2973
Epoch 9/20
74/74 [==============================] - 6s 81ms/step - loss: 2.4736 - acc: 0.3378
Epoch 10/20
74/74 [==============================] - 6s 86ms/step - loss: 2.1941 - acc: 0.4189
Epoch 11/20
74/74 [==============================] - 6s 80ms/step - loss: 2.2294 - acc: 0.2838
Epoch 12/20
74/74 [==============================] - 6s 77ms/step - loss: 1.9372 - acc: 0.4459
Epoch 13/20
74/74 [==============================] - 6s 84ms/step - loss: 2.0618 - acc: 0.3919
Epoch 14/20
74/74 [==============================] - 6s 81ms/step - loss: 2.6910 - acc: 0.3784
Epoch 15/20
74/74 [==============================] - 6s 76ms/step - loss: 1.7237 - acc: 0.5811
Epoch 16/20
74/74 [==============================] - 6s 75ms/step - loss: 1.5644 - acc: 0.5676
Epoch 17/20
74/74 [==============================] - 5s 74ms/step - loss: 1.3794 - acc: 0.6892
Epoch 18/20
74/74 [==============================] - 5s 74ms/step - loss: 1.2680 - acc: 0.6622
Epoch 19/20
74/74 [==============================] - 6s 75ms/step - loss: 1.8628 - acc: 0.4865
Epoch 20/20
74/74 [==============================] - 6s 78ms/step - loss: 1.1224 - acc: 0.7162
/home/eruwsil/anaconda3/envs/MLpy27/lib/python2.7/site-packages/ipykernel_launcher.py:250: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
('Test accuracy: ', [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
{'acc': [0.0540540556649904, 0.09459459499732868, 0.10810810810810811, 0.16216216256489624, 0.18918918999465736, 0.1891891891891892, 0.20270270350817088, 0.29729729749866435, 0.337837838643306, 0.41891892052985524, 0.2837837839851508, 0.445945946147313, 0.3918918935028282, 0.37837837878111247, 0.5810810843029538, 0.5675675659566313, 0.6891891924110619, 0.6621621605512258, 0.48648648487555013, 0.7162162146052798], 'loss': [4.840971998266272, 4.016664157042632, 3.3839768461278967, 3.0930660737527385, 2.930050347302411, 2.790204009494266, 2.6332499014364705, 2.440565895389866, 2.4735961347012907, 2.1940622523024276, 2.2294045461190715, 1.9372276099952492, 2.0618390199300407, 2.6910083487227157, 1.7237013288446374, 1.5644380208608266, 1.379394125294041, 1.2679926221435134, 1.862831473350525, 1.1224404509003099]}
Starting work on cross validation set 2
Creating Model
Epoch 1/20
149/149 [==============================] - 13s 85ms/step - loss: 4.5643 - acc: 0.0403
Epoch 2/20
149/149 [==============================] - 11s 72ms/step - loss: 4.4245 - acc: 0.0268
Epoch 3/20
149/149 [==============================] - 11s 75ms/step - loss: 3.7205 - acc: 0.1074
Epoch 4/20
149/149 [==============================] - 12s 82ms/step - loss: 3.5205 - acc: 0.1141
Epoch 5/20
149/149 [==============================] - 11s 74ms/step - loss: 3.4087 - acc: 0.1678
Epoch 6/20
149/149 [==============================] - 12s 81ms/step - loss: 3.2517 - acc: 0.1544
Epoch 7/20
149/149 [==============================] - 11s 74ms/step - loss: 3.1051 - acc: 0.2013
Epoch 8/20
149/149 [==============================] - 12s 80ms/step - loss: 2.8555 - acc: 0.1946
Epoch 9/20
149/149 [==============================] - 11s 73ms/step - loss: 2.6701 - acc: 0.2550
Epoch 10/20
149/149 [==============================] - 11s 71ms/step - loss: 2.8109 - acc: 0.2617
Epoch 11/20
149/149 [==============================] - 10s 66ms/step - loss: 2.3858 - acc: 0.3557
Epoch 12/20
149/149 [==============================] - 9s 63ms/step - loss: 2.2107 - acc: 0.3893
Epoch 13/20
149/149 [==============================] - 10s 64ms/step - loss: 2.1153 - acc: 0.3826
Epoch 14/20
149/149 [==============================] - 10s 67ms/step - loss: 1.6639 - acc: 0.5705
Epoch 15/20
149/149 [==============================] - 10s 69ms/step - loss: 1.9837 - acc: 0.4966
Epoch 16/20
149/149 [==============================] - 10s 70ms/step - loss: 1.4941 - acc: 0.5436
Epoch 17/20
149/149 [==============================] - 10s 70ms/step - loss: 1.2223 - acc: 0.6980
Epoch 18/20
149/149 [==============================] - 10s 69ms/step - loss: 1.0270 - acc: 0.7181
Epoch 19/20
149/149 [==============================] - 10s 68ms/step - loss: 0.8235 - acc: 0.8389
Epoch 20/20
149/149 [==============================] - 10s 66ms/step - loss: 0.9149 - acc: 0.7718
('Test accuracy: ', [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
{'acc': [0.04026845650084867, 0.026845637583892617, 0.1073825504605802, 0.11409395973154363, 0.16778523514934834, 0.15436241635740203, 0.2013422823792336, 0.1946308733832916, 0.25503355754701884, 0.2617449682430933, 0.35570469808658495, 0.3892617454664819, 0.38255033617051654, 0.5704698026580298, 0.4966442993023252, 0.5436241616738723, 0.6979865811815198, 0.7181208065692211, 0.838926176096769, 0.7718120813369751], 'loss': [4.564301727602146, 4.424455297073262, 3.7204697500139274, 3.520471492869742, 3.408738910751855, 3.2517468401249623, 3.1050563370621442, 2.8555117469505977, 2.670114661223136, 2.810937899071098, 2.385840348749353, 2.2107341529538966, 2.1152648061713917, 1.6638653918400708, 1.9836676624797334, 1.4941492288704687, 1.2222857163256446, 1.0270089179077404, 0.8234918469550626, 0.9149435842597244]}
Starting work on cross validation set 3
Creating Model
Epoch 1/20
225/225 [==============================] - 17s 74ms/step - loss: 4.5615 - acc: 0.0356
Epoch 2/20
225/225 [==============================] - 14s 62ms/step - loss: 4.5656 - acc: 0.0400
Epoch 3/20
225/225 [==============================] - 18s 79ms/step - loss: 3.9934 - acc: 0.0889
Epoch 4/20
225/225 [==============================] - 18s 79ms/step - loss: 3.8936 - acc: 0.0800
Epoch 5/20
225/225 [==============================] - 17s 75ms/step - loss: 3.7600 - acc: 0.0933
Epoch 6/20
225/225 [==============================] - 18s 78ms/step - loss: 3.6282 - acc: 0.1111
Epoch 7/20
225/225 [==============================] - 17s 75ms/step - loss: 3.5333 - acc: 0.1467
Epoch 8/20
225/225 [==============================] - 17s 76ms/step - loss: 3.5448 - acc: 0.1111
Epoch 9/20
225/225 [==============================] - 16s 73ms/step - loss: 3.5455 - acc: 0.1289
Epoch 10/20
225/225 [==============================] - 16s 71ms/step - loss: 3.1820 - acc: 0.1733
Epoch 11/20
225/225 [==============================] - 16s 70ms/step - loss: 3.1659 - acc: 0.1467
Epoch 12/20
225/225 [==============================] - 16s 72ms/step - loss: 2.9858 - acc: 0.2400
Epoch 13/20
225/225 [==============================] - 16s 72ms/step - loss: 2.8272 - acc: 0.2711
Epoch 14/20
225/225 [==============================] - 16s 73ms/step - loss: 2.5913 - acc: 0.3111
Epoch 15/20
225/225 [==============================] - 17s 76ms/step - loss: 2.5940 - acc: 0.3022
Epoch 16/20
225/225 [==============================] - 17s 73ms/step - loss: 2.2239 - acc: 0.3556
Epoch 17/20
225/225 [==============================] - 17s 74ms/step - loss: 2.1052 - acc: 0.4267
Epoch 18/20
225/225 [==============================] - 19s 84ms/step - loss: 2.3612 - acc: 0.3867
Epoch 19/20
225/225 [==============================] - 17s 77ms/step - loss: 2.3775 - acc: 0.4000
Epoch 20/20
225/225 [==============================] - 18s 81ms/step - loss: 1.9799 - acc: 0.4756
('Test accuracy: ', [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
{'acc': [0.035555555555555556, 0.04, 0.08888888888888889, 0.08, 0.09333333333333334, 0.1111111111111111, 0.14666666666666667, 0.1111111111111111, 0.1288888888888889, 0.17333333333333334, 0.14666666666666667, 0.24, 0.27111111111111114, 0.3111111111111111, 0.3022222222222222, 0.35555555555555557, 0.4266666666666667, 0.38666666666666666, 0.4, 0.47555555555555556], 'loss': [4.561522511376275, 4.5655607753329805, 3.9934298759036593, 3.8935755422380236, 3.7600166024102104, 3.6281945027245417, 3.5332802115546333, 3.544843004014757, 3.5455235120985242, 3.182040956285265, 3.1659093878004287, 2.9858461825052895, 2.8272289350297717, 2.591341871155633, 2.594017221331596, 2.223921536339654, 2.105167424413893, 2.3611528688006933, 2.3774549431271024, 1.9798882275157506]}
Starting work on cross validation set 4
Creating Model
Epoch 1/20
301/301 [==============================] - 25s 82ms/step - loss: 4.8892 - acc: 0.0266
Epoch 2/20
301/301 [==============================] - 19s 63ms/step - loss: 4.2606 - acc: 0.0532
Epoch 3/20
301/301 [==============================] - 20s 65ms/step - loss: 4.1051 - acc: 0.0532
Epoch 4/20
301/301 [==============================] - 21s 70ms/step - loss: 3.9127 - acc: 0.1196
Epoch 5/20
301/301 [==============================] - 20s 66ms/step - loss: 3.9369 - acc: 0.1063
Epoch 6/20
301/301 [==============================] - 20s 66ms/step - loss: 3.6524 - acc: 0.1229
Epoch 7/20
301/301 [==============================] - 20s 67ms/step - loss: 3.5093 - acc: 0.1395
Epoch 8/20
301/301 [==============================] - 20s 65ms/step - loss: 3.2904 - acc: 0.1728
Epoch 9/20
301/301 [==============================] - 23s 78ms/step - loss: 3.1186 - acc: 0.2193
Epoch 10/20
301/301 [==============================] - 23s 78ms/step - loss: 2.8487 - acc: 0.2724
Epoch 11/20
301/301 [==============================] - 23s 77ms/step - loss: 2.5760 - acc: 0.3189
Epoch 12/20
301/301 [==============================] - 23s 77ms/step - loss: 2.3266 - acc: 0.3721
Epoch 13/20
301/301 [==============================] - 24s 80ms/step - loss: 1.9432 - acc: 0.5017
Epoch 14/20
301/301 [==============================] - 23s 75ms/step - loss: 1.6971 - acc: 0.5748
Epoch 15/20
301/301 [==============================] - 24s 79ms/step - loss: 1.3645 - acc: 0.6179
Epoch 16/20
301/301 [==============================] - 23s 76ms/step - loss: 1.2376 - acc: 0.6877
Epoch 17/20
301/301 [==============================] - 22s 74ms/step - loss: 0.8851 - acc: 0.7973
Epoch 18/20
301/301 [==============================] - 21s 70ms/step - loss: 0.8464 - acc: 0.7774
Epoch 19/20
301/301 [==============================] - 23s 75ms/step - loss: 0.5824 - acc: 0.8970
Epoch 20/20
301/301 [==============================] - 24s 80ms/step - loss: 0.4603 - acc: 0.9103
('Test accuracy: ', [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
{'acc': [0.026578073089700997, 0.05315614642692959, 0.05315614642692959, 0.11960132902741828, 0.10631229235880399, 0.1229235885349223, 0.13953488377043574, 0.172757475132562, 0.21926910311379702, 0.27242524941696283, 0.3189368771259175, 0.37209302375086917, 0.5016611296671174, 0.5747508308618172, 0.6179402003256586, 0.6877076415920574, 0.7973421934831182, 0.7774086384678204, 0.8970099669754307, 0.9102990037183033], 'loss': [4.8891635099518735, 4.260608612104904, 4.105057171412876, 3.91272412265258, 3.9369022299680996, 3.6524393376321886, 3.509311457409019, 3.290358064182573, 3.1186120470496905, 2.8486707345195783, 2.57597946407787, 2.3265996248619105, 1.9431716231412666, 1.6970903485320334, 1.3644813128880091, 1.2375905420693052, 0.8851172401263468, 0.8464128642383207, 0.5823763041599248, 0.4602829875344058]}
Starting work on cross validation set 5
Creating Model
Epoch 1/20
376/376 [==============================] - 30s 80ms/step - loss: 4.7601 - acc: 0.0266
Epoch 2/20
376/376 [==============================] - 24s 64ms/step - loss: 4.3763 - acc: 0.0479
Epoch 3/20
376/376 [==============================] - 26s 70ms/step - loss: 4.2068 - acc: 0.0665
Epoch 4/20
376/376 [==============================] - 28s 74ms/step - loss: 4.0355 - acc: 0.0745
Epoch 5/20
376/376 [==============================] - 28s 74ms/step - loss: 3.8992 - acc: 0.1011
Epoch 6/20
376/376 [==============================] - 26s 69ms/step - loss: 3.7104 - acc: 0.1144
Epoch 7/20
376/376 [==============================] - 24s 65ms/step - loss: 3.5211 - acc: 0.1250
Epoch 8/20
376/376 [==============================] - 25s 67ms/step - loss: 3.2771 - acc: 0.2074
Epoch 9/20
376/376 [==============================] - 28s 75ms/step - loss: 3.0592 - acc: 0.2314
Epoch 10/20
376/376 [==============================] - 26s 69ms/step - loss: 2.8317 - acc: 0.2952
Epoch 11/20
376/376 [==============================] - 26s 68ms/step - loss: 2.5498 - acc: 0.3165
Epoch 12/20
376/376 [==============================] - 26s 69ms/step - loss: 2.3242 - acc: 0.4016
Epoch 13/20
376/376 [==============================] - 27s 73ms/step - loss: 1.9604 - acc: 0.4814
Epoch 14/20
376/376 [==============================] - 30s 79ms/step - loss: 1.6834 - acc: 0.5745
Epoch 15/20
376/376 [==============================] - 31s 81ms/step - loss: 1.3465 - acc: 0.6622
Epoch 16/20
376/376 [==============================] - 26s 70ms/step - loss: 1.0981 - acc: 0.7340
Epoch 17/20
376/376 [==============================] - 26s 68ms/step - loss: 0.8957 - acc: 0.8165
Epoch 18/20
376/376 [==============================] - 26s 70ms/step - loss: 0.7710 - acc: 0.8378
Epoch 19/20
376/376 [==============================] - 27s 72ms/step - loss: 0.5805 - acc: 0.8963
Epoch 20/20
376/376 [==============================] - 24s 64ms/step - loss: 0.4606 - acc: 0.9309
('Test accuracy: ', [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
{'acc': [0.026595744680851064, 0.04787234058405491, 0.06648936178138916, 0.07446808510638298, 0.10106382994575704, 0.11436170212765957, 0.125, 0.2074468088276843, 0.23138297904045024, 0.2952127662744928, 0.3164893610680357, 0.401595744997897, 0.48138297935749624, 0.574468084472291, 0.6622340438213754, 0.7340425519233055, 0.8164893604339437, 0.8377659587149925, 0.8962765957446809, 0.9308510650979712], 'loss': [4.7600539592986415, 4.376315045864024, 4.20678326424132, 4.03553812554542, 3.899201195290748, 3.7103967869535404, 3.521138906478882, 3.2771159283658293, 3.0592132933596345, 2.8316910723422435, 2.5497772084905748, 2.3242136569733316, 1.9603500061846795, 1.6833818415378003, 1.3465420743252368, 1.098072906757923, 0.8956897613850046, 0.7709934850956531, 0.5804945357302402, 0.46057907571183876]}
Starting work on cross validation set 6
Creating Model
Epoch 1/20
452/452 [==============================] - 33s 73ms/step - loss: 4.7483 - acc: 0.0354
Epoch 2/20
452/452 [==============================] - 33s 72ms/step - loss: 4.3077 - acc: 0.0664
Epoch 3/20
452/452 [==============================] - 31s 68ms/step - loss: 4.1453 - acc: 0.0774
Epoch 4/20
452/452 [==============================] - 32s 71ms/step - loss: 3.9606 - acc: 0.0841
Epoch 5/20
452/452 [==============================] - 35s 78ms/step - loss: 3.9466 - acc: 0.0973
Epoch 6/20
452/452 [==============================] - 36s 79ms/step - loss: 3.7145 - acc: 0.1128
Epoch 7/20
452/452 [==============================] - 34s 74ms/step - loss: 3.6164 - acc: 0.1372
Epoch 8/20
452/452 [==============================] - 31s 69ms/step - loss: 3.3076 - acc: 0.1726
Epoch 9/20
452/452 [==============================] - 34s 74ms/step - loss: 3.0585 - acc: 0.2323
Epoch 10/20
452/452 [==============================] - 33s 74ms/step - loss: 2.9358 - acc: 0.2611
Epoch 11/20
452/452 [==============================] - 31s 68ms/step - loss: 2.7264 - acc: 0.2810
Epoch 12/20
452/452 [==============================] - 34s 75ms/step - loss: 2.3443 - acc: 0.3761
Epoch 13/20
452/452 [==============================] - 34s 75ms/step - loss: 2.1397 - acc: 0.4646
Epoch 14/20
452/452 [==============================] - 33s 73ms/step - loss: 1.7779 - acc: 0.5398
Epoch 15/20
452/452 [==============================] - 32s 72ms/step - loss: 1.4976 - acc: 0.6438
Epoch 16/20
452/452 [==============================] - 34s 76ms/step - loss: 1.2652 - acc: 0.6991
Epoch 17/20
452/452 [==============================] - 37s 82ms/step - loss: 1.0667 - acc: 0.7655
Epoch 18/20
452/452 [==============================] - 36s 80ms/step - loss: 0.9293 - acc: 0.8031
Epoch 19/20
452/452 [==============================] - 31s 69ms/step - loss: 0.6500 - acc: 0.8761
Epoch 20/20
452/452 [==============================] - 32s 71ms/step - loss: 0.5543 - acc: 0.9004
('Test accuracy: ', [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
{'acc': [0.035398230088495575, 0.06637168141592921, 0.07743362831858407, 0.084070796460177, 0.09734513274336283, 0.11283185840707964, 0.13716814159292035, 0.17256637168141592, 0.2323008849557522, 0.2610619469026549, 0.2809734513274336, 0.37610619469026546, 0.4646017699115044, 0.5398230088495575, 0.6438053097345132, 0.6991150442477876, 0.7654867256637168, 0.8030973451327433, 0.8761061946902655, 0.9004424778761062], 'loss': [4.748289762345036, 4.307738979305841, 4.145290374755859, 3.9605516881014395, 3.946607129763713, 3.714482769501948, 3.616358571347937, 3.307562849162954, 3.0585303390975547, 2.935791650704578, 2.726368288023282, 2.3442795508730727, 2.13969166510928, 1.777875073188174, 1.497554521645065, 1.2652210503552868, 1.0667367967884098, 0.929327082844962, 0.6500051886634489, 0.5542748356814933]}
Starting work on cross validation set 7
Creating Model
Epoch 1/20
528/528 [==============================] - 42s 80ms/step - loss: 4.6917 - acc: 0.0417
Epoch 2/20
528/528 [==============================] - 35s 66ms/step - loss: 4.2730 - acc: 0.0644
Epoch 3/20
528/528 [==============================] - 37s 70ms/step - loss: 4.1010 - acc: 0.0568
Epoch 4/20
528/528 [==============================] - 35s 66ms/step - loss: 3.9940 - acc: 0.0814
Epoch 5/20
528/528 [==============================] - 34s 64ms/step - loss: 3.8481 - acc: 0.1004
Epoch 6/20
528/528 [==============================] - 36s 68ms/step - loss: 3.6777 - acc: 0.1004
Epoch 7/20
528/528 [==============================] - 36s 67ms/step - loss: 3.5027 - acc: 0.1345
Epoch 8/20
528/528 [==============================] - 34s 65ms/step - loss: 3.3015 - acc: 0.1610
Epoch 9/20
528/528 [==============================] - 37s 71ms/step - loss: 2.9750 - acc: 0.2576
Epoch 10/20
528/528 [==============================] - 36s 68ms/step - loss: 2.7759 - acc: 0.2689
Epoch 11/20
528/528 [==============================] - 35s 67ms/step - loss: 2.4950 - acc: 0.3523
Epoch 12/20
528/528 [==============================] - 34s 64ms/step - loss: 2.1837 - acc: 0.4394
Epoch 13/20
528/528 [==============================] - 35s 67ms/step - loss: 1.8560 - acc: 0.5246
Epoch 14/20
528/528 [==============================] - 39s 75ms/step - loss: 1.5844 - acc: 0.5947
Epoch 15/20
528/528 [==============================] - 34s 64ms/step - loss: 1.2456 - acc: 0.6951
Epoch 16/20
528/528 [==============================] - 36s 67ms/step - loss: 0.9347 - acc: 0.7879
Epoch 17/20
528/528 [==============================] - 38s 73ms/step - loss: 0.7086 - acc: 0.8409
Epoch 18/20
528/528 [==============================] - 36s 68ms/step - loss: 0.5666 - acc: 0.8826
Epoch 19/20
528/528 [==============================] - 38s 73ms/step - loss: 0.4436 - acc: 0.9091
Epoch 20/20
528/528 [==============================] - 36s 68ms/step - loss: 0.3773 - acc: 0.9356
('Test accuracy: ', [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
{'acc': [0.041666666666666664, 0.06439393939393939, 0.056818181818181816, 0.08143939393939394, 0.10037878787878787, 0.10037878787878787, 0.13446969696969696, 0.16098484848484848, 0.25757575757575757, 0.2689393939393939, 0.3522727272727273, 0.4393939393939394, 0.5246212121212122, 0.5946969696969697, 0.6950757575757576, 0.7878787878787878, 0.8409090909090909, 0.8825757575757576, 0.9090909090909091, 0.9356060606060606], 'loss': [4.691659566127893, 4.273039528817842, 4.100989876371441, 3.994010217262037, 3.8480561429804023, 3.677702860398726, 3.502653945576061, 3.301510529084639, 2.9749556310249097, 2.775935895515211, 2.4949935855287495, 2.1837160226070518, 1.8560212091966108, 1.584432688626376, 1.2456301179799167, 0.9347401640631936, 0.7085631345257615, 0.5665768908731865, 0.44363827416391083, 0.37728766961531207]}
Starting work on cross validation set 8
Creating Model
Epoch 1/20
603/603 [==============================] - 41s 69ms/step - loss: 4.6698 - acc: 0.0315
Epoch 2/20
603/603 [==============================] - 39s 65ms/step - loss: 4.2351 - acc: 0.0564
Epoch 3/20
603/603 [==============================] - 40s 67ms/step - loss: 4.1044 - acc: 0.0614
Epoch 4/20
603/603 [==============================] - 40s 66ms/step - loss: 4.0026 - acc: 0.0962
Epoch 5/20
603/603 [==============================] - 42s 69ms/step - loss: 3.8246 - acc: 0.1028
Epoch 6/20
603/603 [==============================] - 44s 73ms/step - loss: 3.7176 - acc: 0.1277
Epoch 7/20
603/603 [==============================] - 40s 66ms/step - loss: 3.4968 - acc: 0.1443
Epoch 8/20
603/603 [==============================] - 40s 67ms/step - loss: 3.3580 - acc: 0.1658
Epoch 9/20
603/603 [==============================] - 43s 72ms/step - loss: 3.1061 - acc: 0.2139
Epoch 10/20
603/603 [==============================] - 38s 64ms/step - loss: 2.7846 - acc: 0.2753
Epoch 11/20
603/603 [==============================] - 41s 67ms/step - loss: 2.4915 - acc: 0.3433
Epoch 12/20
603/603 [==============================] - 40s 67ms/step - loss: 2.1897 - acc: 0.4295
Epoch 13/20
603/603 [==============================] - 37s 62ms/step - loss: 1.8053 - acc: 0.5257
Epoch 14/20
603/603 [==============================] - 39s 64ms/step - loss: 1.5169 - acc: 0.6269
Epoch 15/20
603/603 [==============================] - 40s 66ms/step - loss: 1.1963 - acc: 0.7015
Epoch 16/20
603/603 [==============================] - 38s 63ms/step - loss: 0.9276 - acc: 0.8010
Epoch 17/20
603/603 [==============================] - 38s 64ms/step - loss: 0.6792 - acc: 0.8756
Epoch 18/20
603/603 [==============================] - 39s 64ms/step - loss: 0.5873 - acc: 0.8773
Epoch 19/20
603/603 [==============================] - 38s 63ms/step - loss: 0.4389 - acc: 0.9204
Epoch 20/20
603/603 [==============================] - 40s 66ms/step - loss: 0.3327 - acc: 0.9519
('Test accuracy: ', [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
{'acc': [0.03150912106135987, 0.056384742964262984, 0.061359867354728294, 0.09618573801385032, 0.10281923719701877, 0.12769485911227776, 0.14427860701459755, 0.16583747902319798, 0.21393034801158936, 0.27529021534160597, 0.3432835818424351, 0.4295190718043503, 0.5257048087432412, 0.6268656728279531, 0.7014925382030543, 0.8009950244802345, 0.8756218918322726, 0.8772802666249757, 0.9203980095548614, 0.9519071310116086], 'loss': [4.669779673935366, 4.235099155906816, 4.104372794750714, 4.002596817997162, 3.8245955490156587, 3.7175952308806615, 3.496755461590009, 3.358038788806542, 3.106081306242429, 2.784575996509634, 2.4915211825426145, 2.189684135601493, 1.80526688363817, 1.5168681103198682, 1.1963234511180896, 0.9276306743843243, 0.6791500727928693, 0.5872696725092519, 0.43892215110769317, 0.3327474698893862]}
Starting work on cross validation set 9
Creating Model
Epoch 1/20
679/679 [==============================] - 49s 71ms/step - loss: 4.6211 - acc: 0.0353
Epoch 2/20
679/679 [==============================] - 42s 62ms/step - loss: 4.2281 - acc: 0.0560
Epoch 3/20
679/679 [==============================] - 47s 69ms/step - loss: 4.0790 - acc: 0.0736
Epoch 4/20
679/679 [==============================] - 44s 65ms/step - loss: 3.9319 - acc: 0.1001
Epoch 5/20
679/679 [==============================] - 47s 70ms/step - loss: 3.8347 - acc: 0.1075
Epoch 6/20
679/679 [==============================] - 47s 69ms/step - loss: 3.6770 - acc: 0.1237
Epoch 7/20
679/679 [==============================] - 44s 65ms/step - loss: 3.5221 - acc: 0.1429
Epoch 8/20
679/679 [==============================] - 46s 68ms/step - loss: 3.3665 - acc: 0.1576
Epoch 9/20
679/679 [==============================] - 46s 67ms/step - loss: 3.1382 - acc: 0.2077
Epoch 10/20
679/679 [==============================] - 44s 65ms/step - loss: 2.9046 - acc: 0.2666
Epoch 11/20
679/679 [==============================] - 51s 75ms/step - loss: 2.6821 - acc: 0.2680
Epoch 12/20
679/679 [==============================] - 46s 68ms/step - loss: 2.3567 - acc: 0.3829
Epoch 13/20
679/679 [==============================] - 45s 67ms/step - loss: 2.0367 - acc: 0.4683
Epoch 14/20
679/679 [==============================] - 48s 71ms/step - loss: 1.7307 - acc: 0.5420
Epoch 15/20
679/679 [==============================] - 46s 68ms/step - loss: 1.4355 - acc: 0.6303
Epoch 16/20
679/679 [==============================] - 49s 72ms/step - loss: 1.1800 - acc: 0.7143
Epoch 17/20
679/679 [==============================] - 46s 67ms/step - loss: 0.8904 - acc: 0.7776
Epoch 18/20
679/679 [==============================] - 46s 68ms/step - loss: 0.7549 - acc: 0.8498
Epoch 19/20
679/679 [==============================] - 46s 68ms/step - loss: 0.5154 - acc: 0.8925
Epoch 20/20
679/679 [==============================] - 47s 69ms/step - loss: 0.4872 - acc: 0.8984
('Test accuracy: ', [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
{'acc': [0.035346097201767304, 0.055964653902798235, 0.07363770250368189, 0.1001472754708446, 0.10751104565537556, 0.12371134027202281, 0.14285714285714285, 0.15758468342371648, 0.20765832106038293, 0.26656848319500287, 0.26804123724507656, 0.38291605328249473, 0.468335787989254, 0.5419734906904476, 0.6303387336948659, 0.7142857144612802, 0.7776141386144467, 0.8497790869802719, 0.8924889545201904, 0.898379970544919], 'loss': [4.621057058820317, 4.228107469949174, 4.078992609422934, 3.93189020746762, 3.8346509132890882, 3.6769705949370395, 3.522086222849468, 3.3664759623986806, 3.1382337487323295, 2.9046428919891896, 2.6821456587542842, 2.3567150531004737, 2.036662376330773, 1.7306505315258156, 1.4354887833827024, 1.1800168436651552, 0.8904028091585162, 0.7548831908446524, 0.5154136480744352, 0.4872376092754864]}
Starting work on cross validation set 10
Creating Model
Epoch 1/20
754/754 [==============================] - 58s 78ms/step - loss: 4.6361 - acc: 0.0451
Epoch 2/20
754/754 [==============================] - 46s 62ms/step - loss: 4.1864 - acc: 0.0809
Epoch 3/20
754/754 [==============================] - 52s 69ms/step - loss: 4.1002 - acc: 0.0676
Epoch 4/20
754/754 [==============================] - 51s 68ms/step - loss: 3.9483 - acc: 0.0928
Epoch 5/20
754/754 [==============================] - 48s 64ms/step - loss: 3.8437 - acc: 0.1141
Epoch 6/20
754/754 [==============================] - 49s 65ms/step - loss: 3.7042 - acc: 0.1114
Epoch 7/20
754/754 [==============================] - 45s 59ms/step - loss: 3.5241 - acc: 0.1618
Epoch 8/20
754/754 [==============================] - 49s 65ms/step - loss: 3.3454 - acc: 0.1910
Epoch 9/20
754/754 [==============================] - 52s 69ms/step - loss: 3.1057 - acc: 0.2321
Epoch 10/20
754/754 [==============================] - 59s 78ms/step - loss: 2.8894 - acc: 0.2626
Epoch 11/20
754/754 [==============================] - 53s 70ms/step - loss: 2.5854 - acc: 0.3302
Epoch 12/20
754/754 [==============================] - 53s 70ms/step - loss: 2.2552 - acc: 0.3966
Epoch 13/20
754/754 [==============================] - 53s 70ms/step - loss: 1.9251 - acc: 0.4814
Epoch 14/20
754/754 [==============================] - 50s 66ms/step - loss: 1.6566 - acc: 0.5623
Epoch 15/20
754/754 [==============================] - 53s 71ms/step - loss: 1.3065 - acc: 0.6790
Epoch 16/20
754/754 [==============================] - 51s 67ms/step - loss: 1.0791 - acc: 0.7427
Epoch 17/20
754/754 [==============================] - 52s 68ms/step - loss: 0.7923 - acc: 0.8236
Epoch 18/20
754/754 [==============================] - 52s 68ms/step - loss: 0.6003 - acc: 0.8727
Epoch 19/20
754/754 [==============================] - 50s 66ms/step - loss: 0.4701 - acc: 0.8992
Epoch 20/20
754/754 [==============================] - 52s 69ms/step - loss: 0.4121 - acc: 0.9218
('Test accuracy: ', [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
{'acc': [0.04509283820616788, 0.08090185677380714, 0.06763925741300658, 0.09283819640504903, 0.1140583554475472, 0.11140583555364798, 0.16180371353773287, 0.19098143238050552, 0.23209549073594318, 0.2625994695059026, 0.3302387271066559, 0.39655172421698226, 0.4814323610589106, 0.5623342177437851, 0.6790450933125038, 0.7427055707660847, 0.8236074265813954, 0.8726790455671457, 0.8992042435575227, 0.9217506631299734], 'loss': [4.636089188350606, 4.1864450135977265, 4.100213534003544, 3.9483115590851883, 3.843667171361908, 3.704173157006423, 3.5241424765447724, 3.3454189167731005, 3.1056937987987814, 2.8894043645428726, 2.5853507834973324, 2.2551933884304463, 1.9250771392245507, 1.6565946185620457, 1.3064601943094472, 1.0790650809791422, 0.7923498153686523, 0.600299005641229, 0.47013033906724155, 0.41210855089068726]}
Starting cross validation 1
(74, 5445)
(31, 5445)
====================
1
[25.806451612903224, 29.03225806451613, 29.03225806451613, 35.483870967741936, 38.70967741935484, 45.16129032258064, 45.16129032258064, 45.16129032258064, 45.16129032258064, 48.38709677419355]
2
[9.67741935483871, 9.67741935483871, 9.67741935483871, 9.67741935483871, 9.67741935483871, 16.129032258064516, 16.129032258064516, 16.129032258064516, 16.129032258064516, 16.129032258064516]
3
[3.225806451612903, 12.903225806451612, 22.58064516129032, 32.25806451612903, 32.25806451612903, 35.483870967741936, 38.70967741935484, 38.70967741935484, 41.935483870967744, 45.16129032258064]
4
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
Starting cross validation 2
(149, 5445)
(54, 5445)
====================
1
[9.25925925925926, 14.814814814814813, 22.22222222222222, 25.925925925925924, 33.33333333333333, 35.18518518518518, 35.18518518518518, 38.88888888888889, 40.74074074074074, 42.592592592592595]
2
[0.0, 5.555555555555555, 5.555555555555555, 5.555555555555555, 5.555555555555555, 7.4074074074074066, 7.4074074074074066, 7.4074074074074066, 7.4074074074074066, 7.4074074074074066]
3
[5.555555555555555, 20.37037037037037, 22.22222222222222, 25.925925925925924, 25.925925925925924, 25.925925925925924, 29.629629629629626, 33.33333333333333, 33.33333333333333, 33.33333333333333]
4
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
Starting cross validation 3
(225, 5445)
(47, 5445)
====================
1
[19.148936170212767, 25.53191489361702, 27.659574468085108, 34.04255319148936, 36.17021276595745, 38.297872340425535, 42.5531914893617, 42.5531914893617, 42.5531914893617, 46.808510638297875]
2
[0.0, 0.0, 0.0, 0.0, 2.127659574468085, 8.51063829787234, 8.51063829787234, 8.51063829787234, 8.51063829787234, 8.51063829787234]
3
[8.51063829787234, 12.76595744680851, 12.76595744680851, 17.02127659574468, 17.02127659574468, 19.148936170212767, 23.404255319148938, 27.659574468085108, 27.659574468085108, 31.914893617021278]
4
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
Starting cross validation 4
(301, 5445)
(61, 5445)
====================
1
[3.278688524590164, 3.278688524590164, 3.278688524590164, 3.278688524590164, 8.19672131147541, 9.836065573770492, 16.39344262295082, 16.39344262295082, 18.0327868852459, 18.0327868852459]
2
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.639344262295082, 1.639344262295082, 1.639344262295082, 1.639344262295082]
3
[6.557377049180328, 8.19672131147541, 9.836065573770492, 9.836065573770492, 13.114754098360656, 18.0327868852459, 19.672131147540984, 21.311475409836063, 22.950819672131146, 22.950819672131146]
4
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
Starting cross validation 5
(376, 5445)
(74, 5445)
====================
1
[6.756756756756757, 9.45945945945946, 16.216216216216218, 21.62162162162162, 25.675675675675674, 28.37837837837838, 29.72972972972973, 32.432432432432435, 35.13513513513514, 35.13513513513514]
2
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
3
[4.054054054054054, 10.81081081081081, 14.864864864864865, 18.91891891891892, 22.972972972972975, 25.675675675675674, 27.027027027027028, 27.027027027027028, 28.37837837837838, 32.432432432432435]
4
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
Starting cross validation 6
(452, 5445)
(75, 5445)
====================
1
[9.333333333333334, 14.666666666666666, 18.666666666666668, 18.666666666666668, 25.333333333333336, 26.666666666666668, 33.33333333333333, 36.0, 38.666666666666664, 44.0]
2
[0.0, 1.3333333333333335, 2.666666666666667, 5.333333333333334, 5.333333333333334, 6.666666666666667, 6.666666666666667, 6.666666666666667, 6.666666666666667, 6.666666666666667]
3
[2.666666666666667, 5.333333333333334, 5.333333333333334, 8.0, 10.666666666666668, 10.666666666666668, 13.333333333333334, 18.666666666666668, 21.333333333333336, 22.666666666666664]
4
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
Starting cross validation 7
(528, 5445)
(72, 5445)
====================
1
[16.666666666666664, 22.22222222222222, 29.166666666666668, 40.27777777777778, 43.05555555555556, 44.44444444444444, 45.83333333333333, 45.83333333333333, 50.0, 52.77777777777778]
2
[0.0, 1.3888888888888888, 1.3888888888888888, 2.7777777777777777, 2.7777777777777777, 2.7777777777777777, 4.166666666666666, 4.166666666666666, 4.166666666666666, 4.166666666666666]
3
[9.722222222222223, 15.277777777777779, 20.833333333333336, 26.38888888888889, 29.166666666666668, 31.944444444444443, 37.5, 41.66666666666667, 44.44444444444444, 45.83333333333333]
4
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
Starting cross validation 8
(603, 5445)
(73, 5445)
====================
1
[13.698630136986301, 24.65753424657534, 32.87671232876712, 38.35616438356164, 39.726027397260275, 43.83561643835616, 46.57534246575342, 47.94520547945205, 53.42465753424658, 54.794520547945204]
2
[0.0, 4.10958904109589, 4.10958904109589, 6.8493150684931505, 6.8493150684931505, 6.8493150684931505, 15.068493150684931, 15.068493150684931, 15.068493150684931, 15.068493150684931]
3
[8.21917808219178, 10.95890410958904, 13.698630136986301, 19.17808219178082, 23.28767123287671, 27.397260273972602, 28.767123287671232, 28.767123287671232, 32.87671232876712, 35.61643835616438]
4
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
Starting cross validation 9
(679, 5445)
(64, 5445)
====================
1
[12.5, 28.125, 31.25, 35.9375, 35.9375, 40.625, 43.75, 45.3125, 45.3125, 48.4375]
2
[0.0, 0.0, 0.0, 3.125, 4.6875, 12.5, 14.0625, 14.0625, 14.0625, 14.0625]
3
[7.8125, 12.5, 17.1875, 20.3125, 21.875, 21.875, 26.5625, 28.125, 31.25, 35.9375]
4
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
Starting cross validation 10
(754, 5445)
(68, 5445)
====================
1
[11.76470588235294, 11.76470588235294, 13.23529411764706, 17.647058823529413, 22.058823529411764, 25.0, 25.0, 27.941176470588236, 30.88235294117647, 32.35294117647059]
2
[0.0, 0.0, 0.0, 2.941176470588235, 4.411764705882353, 5.88235294117647, 7.352941176470589, 7.352941176470589, 7.352941176470589, 7.352941176470589]
3
[2.941176470588235, 4.411764705882353, 10.294117647058822, 11.76470588235294, 11.76470588235294, 14.705882352941178, 17.647058823529413, 17.647058823529413, 20.588235294117645, 20.588235294117645]
4
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]

In [ ]: