In [1]:
from matplotlib import pyplot as plt
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.models import Sequential
from keras.layers import Dense, Activation, Embedding, TimeDistributed, Bidirectional
from keras.layers import LSTM, SpatialDropout1D, Conv1D, GlobalMaxPooling1D, MaxPooling1D, Flatten
from keras.layers.core import Dropout
from keras.callbacks import EarlyStopping
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import keras
import codecs
import os

%matplotlib inline


Using TensorFlow backend.

In [2]:
import sys
import csv

csv.field_size_limit(sys.maxsize)


Out[2]:
131072

In [3]:
!ls


datasetCSV.zip		 DetectedAuthorTwitter.ipynb  LICENSE
datasetHabrahabr.csv	 DetectedAuthorVK.ipynb       model
dataset.zip		 echo_posts_100_and_more.csv  posts.csv
DetectedAuthorRNN.ipynb  Habrahabr		      README.md

In [4]:
# Устанавливаем seed для повторяемости результатов
np.random.seed(42)

In [5]:
N = 10
with open("echo_posts_100_and_more.csv") as myfile:
    head = [next(myfile) for x in range(N)]
print(head)


['|Author|Text\n', '0|ААВ-старший|"\n', '\n', 'Я\xa0сожалею, что начало передачи с\xa0Леонидом Волковым было скомкано. \n', '\n', 'Приношу извинения тем слушателям Эха, которые сочли этот инцидент неуместным  \n', '\n', '«Эхо Москвы» готово предложить дебаты между Ксенией Собчак и\xa0Алексеем Навальным.  \n', 'Оригинал "\n', '1|ААВ-старший|"\n']

In [5]:
data = pd.read_csv('echo_posts_100_and_more.csv', sep='|', engine='python', index_col=0)
data.head()


Out[5]:
Author Text
0 ААВ-старший \n\nЯ сожалею, что начало передачи с Леонидом ...
1 ААВ-старший \n\nПонятно, почему Путин рассказывал о высоко...
2 ААВ-старший \n\nСчитал и считаю, что отказ в регистрации А...
3 ААВ-старший \nО странностях жизни. \nПутин благодарит Тра...
4 ААВ-старший \n\nВот интересно – начальник Службы безопасно...

In [6]:
data.info()


<class 'pandas.core.frame.DataFrame'>
Index: 87640 entries, 0 to 87436
Data columns (total 2 columns):
Author    87437 non-null object
Text      80168 non-null object
dtypes: object(2)
memory usage: 2.0+ MB

In [4]:
data = pd.read_csv('posts.csv', names=['Author', 'Text'], sep=',', header=None)
data.head()


Out[4]:
Author Text
0 22211 Надо жить у моря, Мама.\n\nAdMe.ru публикует р...
1 22211 Памятка трейдера.\n\nВыражения и термины приме...
2 22211 Руслан Проводников или "Сибирский Рокки", как ...
3 61454 "Выбирай платья на лето", "готовься к лету вме...
4 61454 "Спят ли мамы?"- я не знала. На такой вопрос о...

In [9]:
#from nltk.corpus import stopwords
#stop = stopwords.words('russian')
#data['Text'].apply(lambda x: ' '.join([item for item in x.split() if item not in stop]))
#print('Stop words have been deleted')
#data['Text'].apply(lambda x: x.replace('\n', ' '));

Подсчет количества слов в корпусе


In [6]:
data.Text = data.Text.apply(str);

In [7]:
data['CountWords'] = data['Text'].map(lambda x: len(x.split()))
print('Количество статей в корпусе:', len(data))


Количество статей в корпусе: 87640

Удаление спам пользователей


In [7]:
spam_list = [301557842, 184330667, 191315693, 111604181, 212846761, 278053852, 281162155, 206300456, 306859957, 19006413, 275267400, 131629784, 276695686, 227968805, 65265920, 215657793, 212066696, 281866527, 172515637, 420657548, 259904924, 103086082, 36309849, 151622711, 304407589, 304407589, 234135027, 330779808, 135431520, 326927985, 22070189, 16497456, 22070189, 16497456, 120772826, 181255568, 24655736, 90966607, 102977537, 317336618, 189704404, 11663845, 57439210, 163452787, 38680483, 121144040, 211289578, 280155968, 198528079, 277512341, 33152119, 314327791, 229101154, 271546215, 4121239, 59729051, 288393060, 17620663, 296431137, 320666372, 153340241, 13309210, 333014765, 24595847, 152263680, 135815313, 27763994, 34902711, 200488318, 393710429, 400853612, 18091069, 232368999, 243123251, 159034443, 101820061, 334397479, 36372315, 18412482, 362494239, 98538332, 94983825, 171941220, 35386477, 179364423, 136668179, 57446172, 288223392, 321431512, 277994902, 190694040, 325021098, 129392296, 289027630, 216216545, 75884932, 208023062, 185965346, 2857932, 2574325, 154034429, 312715518, 286913827, 336049060, 163410643, 309938895, 372204020, 400615680, 235441786, 8391421, 136997064, 174934251, 173179778, 134275738, 93070783, 403039516, 326194938, 26960550, 18090797, 15931375, 64401851, 14665799, 223678239, 1963998, 252135842, 16352364, 386230491, 135103805, 335233013, 226605259, 94018354, 363143703, 278775919, 134780377,498638, 498638, 20403116, 824871, 22259914, 203601767, 386435681, 280519165, 225577927, 336493264, 203836012, 250362235, 135154139, 332671406, 296879545, 191398452, 48411613, 276175277, 77459397, 15155303, 6646818, 174692652, 25755928, 145799445, 210213078, 225680585, 167783297, 159347782, 154637631, 343704548, 276329737, 158954181, 7736592, 52430830, 54738353, 181641220, 249065698, 356566757, 12276180, 146475131, 38034256, 301152537, 181250759, 65169740, 229445819, 360098848, 204181735, 311981137, 9365217, 323032519, 144081373, 166471693, 219977388, 3252582, 92645976, 341675206, 394122042, 8469671, 293599275, 150564713, 315874927, 75299334, 395839270, 256327432, 8758868, 234523373, 234523373] 
print('Count of spam users:', len(spam_list))
lam = lambda row: row['Author'] not in spam_list
data = data[data.apply(lam, axis=1)]


Count of spam users: 201

In [8]:
author_count_news = data.Author.value_counts()
print('Количество авторов в корпусе:', len(author_count_news))
#print(author_count_news[:-8300: -1])
#author_count_news.plot(kind='bar', figsize=(15, 5), title="Number of author's articles");


Количество авторов в корпусе: 290

Количество статей у каждого пользователя


In [13]:
# Попробовать изменить количество статей у автора, у каждого 10 постов, 50, 100

num_classes = 100
skip_person = 9
author_count_news_dubset = author_count_news[skip_person:skip_person+num_classes]

In [14]:
author_count_news_dubset.plot(kind='bar', figsize=(15, 5), title="Number of author's articles");



In [11]:
for i, (user_id, number) in enumerate(author_count_news.items()):
    print('User id', user_id)
    print(data[data.Author == user_id]['Text'][:2])
    print('===========')
    if i == 2:
        break


User id 181880183
56504    ---Жизнь-— Колыбель. Пеленки. Плач. Слово. Шаг...
56505    - ... мать! - Дорогой, ты что-то сказал? - Я с...
Name: Text, dtype: object
===========
User id 97649923
35528    "Югорская звезда" - собрала  сегодня на своём ...
35529    #21деньбезсладкого \nНаконец то этот день наст...
Name: Text, dtype: object
===========
User id 175642079
54323    - А пойдемте гулять, Саша?\n- А пойдемте....😂\...
54324    - Вы думали когда-то о собственном бизнесе?\n-...
Name: Text, dtype: object
===========

Подготовка данных


In [15]:
temp_data = pd.DataFrame()
names_author = author_count_news_dubset.index.values

for author in names_author:
    temp_data = temp_data.append(data[data.Author == author])

data = temp_data
print('Количество статей после удаления:', len(data))


Количество статей после удаления: 41941

In [13]:
# def remove_stopwords(data):
#     from nltk.corpus import stopwords
#     stop = stopwords.words('russian')
#     data['Text'].apply(lambda x: ' '.join([item for item in x.split() if item not in stop]))
#     print('Stop words have been deleted')

In [14]:
# def get_lemmatization_corpus(data):
#     import pymorphy2
#     morph = pymorphy2.MorphAnalyzer()
#     data['TextLem'] = data['Text'].map(lambda x: ' '.join([morph.parse(word)[0].normal_form for word in x.split()]))
#     print('The lemmatization completed')

In [16]:
names = data.Author.value_counts().index.values

lableEnc = LabelEncoder()
lableEnc.fit(names.ravel()) 
lables = lableEnc.transform(names).reshape((num_classes, 1))

oneHotEnc = OneHotEncoder()
oneHotEnc.fit(lables)

for author in names:
    val = lableEnc.transform([author])[0]
    data.Author.replace(to_replace=author, value=val, inplace=True)

data.head()


Out[16]:
Author Text CountWords
22596 68 \n\nРоссия предложила вернуть Украине военные ... 334
22597 68 \n\nРоссийские властные комики продолжают жечь... 457
22598 68 \n\nЕсть такое украинское слово «чорноротый». ... 384
22599 68 \n\nПесков отказался назвать имя того самого у... 455
22600 68 \nТут для начала хочу напомнить, что в Америке... 481

In [16]:
df = data
samplesize = 10  #number of samples that you want
sample_10 = df.groupby('Author', as_index=False).apply(lambda array: array.loc[np.random.choice(array.index, samplesize, False),:])

samplesize = 20  #number of samples that you want
sample_20 = df.groupby('Author', as_index=False).apply(lambda array: array.loc[np.random.choice(array.index, samplesize, False),:])

samplesize = 30  #number of samples that you want
sample_30 = df.groupby('Author', as_index=False).apply(lambda array: array.loc[np.random.choice(array.index, samplesize, False),:])

samplesize = 40  #number of samples that you want
sample_40 = df.groupby('Author', as_index=False).apply(lambda array: array.loc[np.random.choice(array.index, samplesize, False),:])

samplesize = 50  #number of samples that you want
sample_50 = df.groupby('Author', as_index=False).apply(lambda array: array.loc[np.random.choice(array.index, samplesize, False),:])

array_samples = [sample_10, sample_20, sample_30, sample_40, sample_50]

In [17]:
df = data
samplesize = 20  #number of samples that you want
sample_20 = df.groupby('Author', as_index=False).apply(lambda array: array.loc[np.random.choice(array.index, samplesize, False),:])
numberOfword_20 = 1000

samplesize = 40  #number of samples that you want
sample_40 = df.groupby('Author', as_index=False).apply(lambda array: array.loc[np.random.choice(array.index, samplesize, False),:])
numberOfword_20 = 1200

samplesize = 60  #number of samples that you want
sample_60 = df.groupby('Author', as_index=False).apply(lambda array: array.loc[np.random.choice(array.index, samplesize, False),:])
numberOfword_20 = 1000

samplesize = 80  #number of samples that you want
sample_80 = df.groupby('Author', as_index=False).apply(lambda array: array.loc[np.random.choice(array.index, samplesize, False),:])

samplesize = 100  #number of samples that you want
sample_100 = df.groupby('Author', as_index=False).apply(lambda array: array.loc[np.random.choice(array.index, samplesize, False),:])


samplesize = 130  #number of samples that you want
sample_130 = df.groupby('Author', as_index=False).apply(lambda array: array.loc[np.random.choice(array.index, samplesize, False),:])


samplesize = 160  #number of samples that you want
sample_160 = df.groupby('Author', as_index=False).apply(lambda array: array.loc[np.random.choice(array.index, samplesize, False),:])


samplesize = 200  #number of samples that you want
sample_200 = df.groupby('Author', as_index=False).apply(lambda array: array.loc[np.random.choice(array.index, samplesize, False),:])

In [18]:
array_samples = [sample_20, sample_40, sample_60, sample_80, sample_100, sample_130, sample_160, sample_200]

In [36]:
np.mean(sample_200.CountWords)


Out[36]:
526.87735

In [27]:
df.CountWords.plot(kind='bar', figsize=(15, 5), title="Number of worlds in texts");



In [28]:
# Перемешать набор данных и удалить index столбец
data = data.sample(frac=1).reset_index(drop=True)

In [19]:
for x in range(len(array_samples)):
    array_samples[x] = array_samples[x].sample(frac=1).reset_index(drop=True)

Токенизация текста


In [20]:
def get_texts_to_matrix(texts, max_features = 0):
    tokenizer = Tokenizer(split=" ", lower=True)
    if max_features != 0:
        tokenizer = Tokenizer(split=" ", lower=True, num_words=max_features)
    
    tokenizer.fit_on_texts(texts)
    matrix_tfidf = tokenizer.texts_to_matrix(texts=texts, mode='tfidf')
    print('Количество текстов:', matrix_tfidf.shape[0])
    print('Количество токенов:', matrix_tfidf.shape[1])
    return matrix_tfidf

In [21]:
# Убрать LowerCase

def get_texts_to_sequences(text):
    # создаем единый словарь (слово -> число) для преобразования
    tokenizer = Tokenizer(split=" ", lower=True)
    tokenizer.fit_on_texts(text)
    # Преобразуем все описания в числовые последовательности, заменяя слова на числа по словарю.
    text_sequences = tokenizer.texts_to_sequences(text)
    print('В словаре {} слов'.format(len(tokenizer.word_index)))
    return text_sequences

In [22]:
def get_texts_to_gramm_sequences(texts, count_gramm = 3):
    #char_filter = '«»—!–"−#$%&()*…,-./:;<=>?@[\\]^_`{|}~\t\n'
    gramms = {}
    gram_id = 0
    result = []
    for text in texts:
        #for c_filter in char_filter:
        #    text = text.replace(c_filter, '')
        temp_vector = []
        count_grams_for_text = range(len(text) - count_gramm - 1)
        for i in count_grams_for_text:
            gramm = text[i : i + count_gramm]
            if gramm not in gramms:
                gramms[gramm] = gram_id
                gram_id += 1
            temp_vector.append(gramms[gramm])
        result.append(temp_vector)
    print('Количество грамм в корпусе:', len(gramms))
    return result

In [23]:
array_X = []
for x in range(len(array_samples)):
    array_X.append(get_texts_to_gramm_sequences(array_samples[x]['Text'], count_gramm = 3))


Количество грамм в корпусе: 61885
Количество грамм в корпусе: 80403
Количество грамм в корпусе: 91831
Количество грамм в корпусе: 104360
Количество грамм в корпусе: 111833
Количество грамм в корпусе: 123850
Количество грамм в корпусе: 132689
Количество грамм в корпусе: 144023

In [24]:
array_grams = [61885, 80403, 91831, 104360, 111833, 123850, 132689, 144023]

In [19]:
#X = get_texts_to_matrix(data['Text'], 80000)
#X = get_texts_to_gramm_sequences(data['Text'])
#X = get_texts_to_sequences(data['Text'])

In [33]:
#X_gram_3 = get_texts_to_gramm_sequences(data['Text'], count_gramm = 3)
X_gram_4 = get_texts_to_gramm_sequences(data['Text'], count_gramm = 4)
#X_seq = get_texts_to_sequences(data['Text'])


Количество грамм в корпусе: 344915

In [24]:
array_X = [X_gram_3, X_gram_4]

In [26]:
def print_plot_mean(x):
    means = [len(x) for x in x]
    plt.plot(means);

In [28]:
len(array_X[0])
print_plot_mean(array_X[6])
#print_plot_mean(X_gram_4)
#print_plot_mean(X_seq)



In [34]:
X = X_gram_4

In [35]:
#plt.plot(X)
means = [len(x) for x in X]
print(np.mean(means))
plt.plot(means);
#X.plot(kind='bar', figsize=(15, 5), title="Number of worlds in texts");


683.488856858

Разбиваем выборку на тестовую и тренировочную


In [29]:
def get_X_y_for_traning(X, y, num_words):
    #tokenizer = Tokenizer(num_words=num_words)
    #X = tokenizer.sequences_to_matrix(X, mode='binary')

    X = keras.preprocessing.sequence.pad_sequences(X, maxlen=num_words)     
    y = keras.utils.to_categorical(y, num_classes)
    print('Размерность X:', X.shape)
    print('Размерность y:', y.shape)
    return X, y

In [36]:
# Максимальное количество слов в самом длинном тексте
num_words = 4000
array_sets = []
X_full, y_full = get_X_y_for_traning(X_gram_4, data.Author, num_words)
array_sets.append(train_test_split(X_full, y_full, test_size=0.2, random_state=42))


Размерность X: (9019, 4000)
Размерность y: (9019, 100)

In [30]:
num_words = 20000
array_sets = []

for x in range(len(array_samples)):
    X_full, y_full = get_X_y_for_traning(array_X[x], array_samples[x].Author, num_words)
    array_sets.append(train_test_split(X_full, y_full, test_size=0.2, random_state=42))


Размерность X: (2000, 20000)
Размерность y: (2000, 100)
Размерность X: (4000, 20000)
Размерность y: (4000, 100)
Размерность X: (6000, 20000)
Размерность y: (6000, 100)
Размерность X: (8000, 20000)
Размерность y: (8000, 100)
Размерность X: (10000, 20000)
Размерность y: (10000, 100)
Размерность X: (13000, 20000)
Размерность y: (13000, 100)
Размерность X: (16000, 20000)
Размерность y: (16000, 100)
Размерность X: (20000, 20000)
Размерность y: (20000, 100)

In [30]:
# Максимальное количество слов в самом длинном тексте
num_words = 20000
# X_full, y_full = get_X_y_for_traning(X_gram_3, data.Author, num_words)
# X_train, X_test, y_train, y_test = train_test_split(X_full, y_full, test_size=0.2, random_state=42)
array_sets = []
X_full, y_full = get_X_y_for_traning(X_gram_3, data.Author, num_words)
array_sets.append(train_test_split(X_full, y_full, test_size=0.2, random_state=42))

X_full, y_full = get_X_y_for_traning(X_gram_4, data.Author, num_words)
array_sets.append(train_test_split(X_full, y_full, test_size=0.2, random_state=42))

#num_words = 2500
#X_full, y_full = get_X_y_for_traning(X_seq, data.Author, num_words)
#array_sets.append(train_test_split(X_full, y_full, test_size=0.2, random_state=42))

#print('Testing set size:', len(X_test))
#print('Training set size:', len(X_train))


Размерность X: (41941, 20000)
Размерность y: (41941, 100)
Размерность X: (41941, 20000)
Размерность y: (41941, 100)

Создание модели нейронной сети


In [54]:
def get_lstm_model(input_length):
    model = Sequential()
    model.add(Embedding(input_length, 200))
    model.add(SpatialDropout1D(0.2))
    model.add(LSTM(128, dropout=0.3, recurrent_dropout=0.3))
    model.add(Dense(num_classes, activation="sigmoid"))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model

In [32]:
def get_bidirectional_lstm():
    model = Sequential()
    model.add(Embedding(80000, 200))
    model.add(SpatialDropout1D(0.2))
    model.add(Bidirectional(LSTM(10, dropout=0.3, recurrent_dropout=0.3)))
    model.add(Dense(num_classes, activation="sigmoid"))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model

In [35]:
def get_conv_conv_model():
    model = Sequential()
    model.add(Embedding(160000, 300))
    model.add(SpatialDropout1D(0.2))
    model.add(Conv1D(filters=512, kernel_size=3, activation='relu'))
    model.add(MaxPooling1D())
    model.add(Conv1D(filters=512, kernel_size=3, activation='relu'))
    model.add(GlobalMaxPooling1D())
    model.add(Dense(num_classes, activation="sigmoid"))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model

In [36]:
def get_conv_lstm_model():
    model = Sequential()
    #model.add(Dense(102562, activation='relu', input_shape=(8664, 600)))
    model.add(Embedding(100000, 200))
    model.add(SpatialDropout1D(0.3))
    #model.add(TimeDistributed(Conv1D(filters=512, kernel_size=3, activation='relu')))
    #model.add(TimeDistributed(GlobalMaxPooling1D()))
    #model.add(TimeDistributed(Flatten()))
    model.add(Conv1D(filters=512, kernel_size=3, activation='relu'))
    model.add(MaxPooling1D())
    #model.add(Flatten())
    model.add(LSTM(50, dropout=0.3, recurrent_dropout=0.3))
    model.add(Dense(num_classes, activation="sigmoid"))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model

In [37]:
def get_lstm_conv_model():
    model = Sequential()
    model.add(Embedding(160000, 300))
    model.add(SpatialDropout1D(0.2))
    model.add(LSTM(50, dropout=0.3, recurrent_dropout=0.3, return_sequences=True)) 
    #model.add(SpatialDropout1D(0.2))
    model.add(Conv1D(filters=512, kernel_size=3, activation='sigmoid'))
    model.add(GlobalMaxPooling1D())
    model.add(Dense(num_classes, activation="sigmoid"))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model

In [31]:
def get_conv_model(embeding_size):
    model = Sequential()
    model.add(Embedding(embeding_size, 200))
    model.add(SpatialDropout1D(0.2))
    model.add(Conv1D(filters=512, kernel_size=3, activation='relu'))
    model.add(GlobalMaxPooling1D())
    model.add(Dense(num_classes, activation="sigmoid"))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model

In [37]:
# ВК 4 грамма со знаками припинания

for x in range(len(array_sets)):
    X_train, X_test, y_train, y_test = array_sets[x]
    model = get_conv_model(embeding_size=344915)
    
    model.summary()

    BATCH_SIZE = 32
    EPOCHS = 10
    VERBOSE = 2

    history = model.fit(X_train, y_train,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS, verbose=VERBOSE,
                        validation_data=(X_test, y_test)
                        #validation_split=0.1, 
                        #callbacks=[EarlyStopping(monitor='val_loss')]
                       )
    #hist_array.append(history)


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_6 (Embedding)      (None, None, 200)         68983000  
_________________________________________________________________
spatial_dropout1d_6 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_6 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_6 (Glob (None, 512)               0         
_________________________________________________________________
dense_6 (Dense)              (None, 100)               51300     
=================================================================
Total params: 69,342,012
Trainable params: 69,342,012
Non-trainable params: 0
_________________________________________________________________
Train on 7215 samples, validate on 1804 samples
Epoch 1/10
74s - loss: 4.1117 - acc: 0.1504 - val_loss: 3.2747 - val_acc: 0.3171
Epoch 2/10
70s - loss: 2.4777 - acc: 0.4678 - val_loss: 2.5340 - val_acc: 0.4429
Epoch 3/10
70s - loss: 1.2770 - acc: 0.7662 - val_loss: 2.3890 - val_acc: 0.4784
Epoch 4/10
70s - loss: 0.3877 - acc: 0.9556 - val_loss: 2.4010 - val_acc: 0.4850
Epoch 5/10
71s - loss: 0.1028 - acc: 0.9856 - val_loss: 2.4106 - val_acc: 0.4945
Epoch 6/10
70s - loss: 0.0698 - acc: 0.9867 - val_loss: 2.4783 - val_acc: 0.4928
Epoch 7/10
71s - loss: 0.0633 - acc: 0.9884 - val_loss: 2.5775 - val_acc: 0.4823
Epoch 8/10
71s - loss: 0.0544 - acc: 0.9899 - val_loss: 2.4975 - val_acc: 0.4928
Epoch 9/10
71s - loss: 0.0503 - acc: 0.9918 - val_loss: 2.5911 - val_acc: 0.4928
Epoch 10/10
71s - loss: 0.0491 - acc: 0.9917 - val_loss: 2.6379 - val_acc: 0.4873

In [35]:
# ВК 4 грамма без знаков припинания

for x in range(len(array_sets)):
    X_train, X_test, y_train, y_test = array_sets[x]
    model = get_conv_model(embeding_size=292945)
    
    model.summary()

    BATCH_SIZE = 32
    EPOCHS = 10
    VERBOSE = 2

    history = model.fit(X_train, y_train,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS, verbose=VERBOSE,
                        validation_data=(X_test, y_test)
                        #validation_split=0.1, 
                        #callbacks=[EarlyStopping(monitor='val_loss')]
                       )
    #hist_array.append(history)


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_6 (Embedding)      (None, None, 200)         58589000  
_________________________________________________________________
spatial_dropout1d_6 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_6 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_6 (Glob (None, 512)               0         
_________________________________________________________________
dense_6 (Dense)              (None, 100)               51300     
=================================================================
Total params: 58,948,012
Trainable params: 58,948,012
Non-trainable params: 0
_________________________________________________________________
Train on 7215 samples, validate on 1804 samples
Epoch 1/10
71s - loss: 4.2107 - acc: 0.1149 - val_loss: 3.2867 - val_acc: 0.3115
Epoch 2/10
67s - loss: 2.6183 - acc: 0.4374 - val_loss: 2.5796 - val_acc: 0.4407
Epoch 3/10
68s - loss: 1.4212 - acc: 0.7186 - val_loss: 2.4265 - val_acc: 0.4739
Epoch 4/10
68s - loss: 0.5062 - acc: 0.9310 - val_loss: 2.4371 - val_acc: 0.4728
Epoch 5/10
68s - loss: 0.1446 - acc: 0.9789 - val_loss: 2.4782 - val_acc: 0.4812
Epoch 6/10
68s - loss: 0.0938 - acc: 0.9827 - val_loss: 2.5322 - val_acc: 0.4878
Epoch 7/10
68s - loss: 0.0824 - acc: 0.9839 - val_loss: 2.5873 - val_acc: 0.4823
Epoch 8/10
68s - loss: 0.0740 - acc: 0.9856 - val_loss: 2.6765 - val_acc: 0.4789
Epoch 9/10
68s - loss: 0.0668 - acc: 0.9881 - val_loss: 2.7055 - val_acc: 0.4834
Epoch 10/10
68s - loss: 0.0643 - acc: 0.9897 - val_loss: 2.6895 - val_acc: 0.4806

In [32]:
# ДЛЯ ВК со знаками припинания, изменение количества постов на человека

for x in range(len(array_sets)):
    X_train, X_test, y_train, y_test = array_sets[x]
    model = get_conv_model(embeding_size=array_grams[x])
    
    model.summary()

    BATCH_SIZE = 32
    EPOCHS = 10
    VERBOSE = 2

    history = model.fit(X_train, y_train,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS, verbose=VERBOSE,
                        validation_data=(X_test, y_test)
                        #validation_split=0.1, 
                        #callbacks=[EarlyStopping(monitor='val_loss')]
                       )
    #hist_array.append(history)


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_1 (Embedding)      (None, None, 200)         7199800   
_________________________________________________________________
spatial_dropout1d_1 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_1 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_1 (Glob (None, 512)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 100)               51300     
=================================================================
Total params: 7,558,812
Trainable params: 7,558,812
Non-trainable params: 0
_________________________________________________________________
Train on 800 samples, validate on 200 samples
Epoch 1/10
11s - loss: 4.6192 - acc: 0.0138 - val_loss: 4.6120 - val_acc: 0.0150
Epoch 2/10
6s - loss: 4.5425 - acc: 0.1025 - val_loss: 4.6074 - val_acc: 0.0400
Epoch 3/10
6s - loss: 4.4479 - acc: 0.2088 - val_loss: 4.5880 - val_acc: 0.0900
Epoch 4/10
6s - loss: 4.2849 - acc: 0.3688 - val_loss: 4.5208 - val_acc: 0.1150
Epoch 5/10
6s - loss: 3.8866 - acc: 0.5650 - val_loss: 4.2426 - val_acc: 0.1950
Epoch 6/10
6s - loss: 2.9415 - acc: 0.7600 - val_loss: 3.8879 - val_acc: 0.2200
Epoch 7/10
6s - loss: 1.7442 - acc: 0.9300 - val_loss: 3.7116 - val_acc: 0.2300
Epoch 8/10
6s - loss: 0.8249 - acc: 0.9862 - val_loss: 3.6200 - val_acc: 0.2400
Epoch 9/10
6s - loss: 0.3432 - acc: 0.9950 - val_loss: 3.6112 - val_acc: 0.2550
Epoch 10/10
6s - loss: 0.1469 - acc: 0.9937 - val_loss: 3.6127 - val_acc: 0.2450
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_2 (Embedding)      (None, None, 200)         9841600   
_________________________________________________________________
spatial_dropout1d_2 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_2 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_2 (Glob (None, 512)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 100)               51300     
=================================================================
Total params: 10,200,612
Trainable params: 10,200,612
Non-trainable params: 0
_________________________________________________________________
Train on 1600 samples, validate on 400 samples
Epoch 1/10
13s - loss: 4.6071 - acc: 0.0231 - val_loss: 4.5921 - val_acc: 0.0800
Epoch 2/10
12s - loss: 4.4808 - acc: 0.1537 - val_loss: 4.5225 - val_acc: 0.1100
Epoch 3/10
12s - loss: 4.1224 - acc: 0.2669 - val_loss: 4.1420 - val_acc: 0.1800
Epoch 4/10
12s - loss: 2.9963 - acc: 0.4794 - val_loss: 3.6264 - val_acc: 0.2525
Epoch 5/10
12s - loss: 1.8129 - acc: 0.7700 - val_loss: 3.4179 - val_acc: 0.2825
Epoch 6/10
12s - loss: 0.9130 - acc: 0.9419 - val_loss: 3.2664 - val_acc: 0.3075
Epoch 7/10
12s - loss: 0.3759 - acc: 0.9925 - val_loss: 3.3617 - val_acc: 0.3100
Epoch 8/10
12s - loss: 0.1557 - acc: 0.9950 - val_loss: 3.3555 - val_acc: 0.3075
Epoch 9/10
12s - loss: 0.0748 - acc: 0.9975 - val_loss: 3.3779 - val_acc: 0.3125
Epoch 10/10
12s - loss: 0.0483 - acc: 0.9963 - val_loss: 3.4214 - val_acc: 0.3050
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_3 (Embedding)      (None, None, 200)         12133600  
_________________________________________________________________
spatial_dropout1d_3 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_3 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_3 (Glob (None, 512)               0         
_________________________________________________________________
dense_3 (Dense)              (None, 100)               51300     
=================================================================
Total params: 12,492,612
Trainable params: 12,492,612
Non-trainable params: 0
_________________________________________________________________
Train on 2400 samples, validate on 600 samples
Epoch 1/10
21s - loss: 4.5937 - acc: 0.0400 - val_loss: 4.5606 - val_acc: 0.1033
Epoch 2/10
19s - loss: 4.3602 - acc: 0.1713 - val_loss: 4.2180 - val_acc: 0.1933
Epoch 3/10
19s - loss: 3.3278 - acc: 0.3588 - val_loss: 3.4060 - val_acc: 0.2800
Epoch 4/10
19s - loss: 2.0694 - acc: 0.6250 - val_loss: 3.1692 - val_acc: 0.3250
Epoch 5/10
19s - loss: 1.1433 - acc: 0.8600 - val_loss: 3.0252 - val_acc: 0.3700
Epoch 6/10
19s - loss: 0.5047 - acc: 0.9700 - val_loss: 3.0058 - val_acc: 0.3917
Epoch 7/10
19s - loss: 0.1963 - acc: 0.9954 - val_loss: 3.0783 - val_acc: 0.3833
Epoch 8/10
19s - loss: 0.0831 - acc: 0.9983 - val_loss: 3.0997 - val_acc: 0.4000
Epoch 9/10
19s - loss: 0.0470 - acc: 0.9988 - val_loss: 3.0977 - val_acc: 0.4050
Epoch 10/10
19s - loss: 0.0325 - acc: 0.9988 - val_loss: 3.1571 - val_acc: 0.3983
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_4 (Embedding)      (None, None, 200)         14139000  
_________________________________________________________________
spatial_dropout1d_4 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_4 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_4 (Glob (None, 512)               0         
_________________________________________________________________
dense_4 (Dense)              (None, 100)               51300     
=================================================================
Total params: 14,498,012
Trainable params: 14,498,012
Non-trainable params: 0
_________________________________________________________________
Train on 3200 samples, validate on 800 samples
Epoch 1/10
26s - loss: 4.5779 - acc: 0.0522 - val_loss: 4.5018 - val_acc: 0.1375
Epoch 2/10
25s - loss: 4.0564 - acc: 0.2356 - val_loss: 3.5991 - val_acc: 0.2487
Epoch 3/10
25s - loss: 2.7164 - acc: 0.4409 - val_loss: 3.0418 - val_acc: 0.3563
Epoch 4/10
25s - loss: 1.6966 - acc: 0.6897 - val_loss: 2.8141 - val_acc: 0.3862
Epoch 5/10
25s - loss: 0.8835 - acc: 0.8866 - val_loss: 2.8015 - val_acc: 0.3962
Epoch 6/10
25s - loss: 0.3671 - acc: 0.9703 - val_loss: 2.8099 - val_acc: 0.4062
Epoch 7/10
25s - loss: 0.1431 - acc: 0.9938 - val_loss: 2.8167 - val_acc: 0.4037
Epoch 8/10
25s - loss: 0.0679 - acc: 0.9959 - val_loss: 2.8103 - val_acc: 0.4075
Epoch 9/10
25s - loss: 0.0458 - acc: 0.9956 - val_loss: 2.8759 - val_acc: 0.4050
Epoch 10/10
25s - loss: 0.0382 - acc: 0.9963 - val_loss: 2.8921 - val_acc: 0.4100
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_5 (Embedding)      (None, None, 200)         15331800  
_________________________________________________________________
spatial_dropout1d_5 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_5 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_5 (Glob (None, 512)               0         
_________________________________________________________________
dense_5 (Dense)              (None, 100)               51300     
=================================================================
Total params: 15,690,812
Trainable params: 15,690,812
Non-trainable params: 0
_________________________________________________________________
Train on 4000 samples, validate on 1000 samples
Epoch 1/10
32s - loss: 4.5572 - acc: 0.0693 - val_loss: 4.4146 - val_acc: 0.1880
Epoch 2/10
32s - loss: 3.6872 - acc: 0.2765 - val_loss: 3.1968 - val_acc: 0.3080
Epoch 3/10
32s - loss: 2.3573 - acc: 0.4970 - val_loss: 2.8603 - val_acc: 0.3640
Epoch 4/10
32s - loss: 1.4627 - acc: 0.7245 - val_loss: 2.6696 - val_acc: 0.3950
Epoch 5/10
32s - loss: 0.7440 - acc: 0.8888 - val_loss: 2.7099 - val_acc: 0.4010
Epoch 6/10
32s - loss: 0.3140 - acc: 0.9725 - val_loss: 2.6383 - val_acc: 0.4090
Epoch 7/10
32s - loss: 0.1233 - acc: 0.9933 - val_loss: 2.6484 - val_acc: 0.4240
Epoch 8/10
32s - loss: 0.0727 - acc: 0.9922 - val_loss: 2.7007 - val_acc: 0.4140
Epoch 9/10
32s - loss: 0.0462 - acc: 0.9948 - val_loss: 2.7221 - val_acc: 0.4260
Epoch 10/10
32s - loss: 0.0410 - acc: 0.9940 - val_loss: 2.7198 - val_acc: 0.4170

In [26]:
# ДЛЯ ВК без знаков припинания, изменение количества постов на человека

for x in range(len(array_sets)):
    X_train, X_test, y_train, y_test = array_sets[x]
    model = get_conv_model(embeding_size=array_grams[x])
    
    model.summary()

    BATCH_SIZE = 32
    EPOCHS = 10
    VERBOSE = 2

    history = model.fit(X_train, y_train,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS, verbose=VERBOSE,
                        validation_data=(X_test, y_test)
                        #validation_split=0.1, 
                        #callbacks=[EarlyStopping(monitor='val_loss')]
                       )
    #hist_array.append(history)


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_1 (Embedding)      (None, None, 200)         6026600   
_________________________________________________________________
spatial_dropout1d_1 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_1 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_1 (Glob (None, 512)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 100)               51300     
=================================================================
Total params: 6,385,612
Trainable params: 6,385,612
Non-trainable params: 0
_________________________________________________________________
Train on 800 samples, validate on 200 samples
Epoch 1/10
11s - loss: 4.6173 - acc: 0.0100 - val_loss: 4.6398 - val_acc: 0.0050
Epoch 2/10
6s - loss: 4.5425 - acc: 0.0600 - val_loss: 4.6514 - val_acc: 0.0150
Epoch 3/10
6s - loss: 4.4520 - acc: 0.1387 - val_loss: 4.6462 - val_acc: 0.0450
Epoch 4/10
6s - loss: 4.3026 - acc: 0.2350 - val_loss: 4.5945 - val_acc: 0.0650
Epoch 5/10
6s - loss: 3.9637 - acc: 0.3950 - val_loss: 4.3601 - val_acc: 0.1100
Epoch 6/10
6s - loss: 3.1251 - acc: 0.7488 - val_loss: 3.9963 - val_acc: 0.1650
Epoch 7/10
6s - loss: 1.9585 - acc: 0.8975 - val_loss: 3.8477 - val_acc: 0.1950
Epoch 8/10
6s - loss: 0.9814 - acc: 0.9825 - val_loss: 3.8010 - val_acc: 0.2200
Epoch 9/10
6s - loss: 0.4079 - acc: 0.9988 - val_loss: 3.8002 - val_acc: 0.2100
Epoch 10/10
6s - loss: 0.1694 - acc: 1.0000 - val_loss: 3.8624 - val_acc: 0.1950
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_2 (Embedding)      (None, None, 200)         8360200   
_________________________________________________________________
spatial_dropout1d_2 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_2 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_2 (Glob (None, 512)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 100)               51300     
=================================================================
Total params: 8,719,212
Trainable params: 8,719,212
Non-trainable params: 0
_________________________________________________________________
Train on 1600 samples, validate on 400 samples
Epoch 1/10
13s - loss: 4.6081 - acc: 0.0163 - val_loss: 4.6000 - val_acc: 0.0375
Epoch 2/10
12s - loss: 4.5034 - acc: 0.1200 - val_loss: 4.5499 - val_acc: 0.0975
Epoch 3/10
12s - loss: 4.2431 - acc: 0.2800 - val_loss: 4.2850 - val_acc: 0.1625
Epoch 4/10
12s - loss: 3.3766 - acc: 0.4250 - val_loss: 3.8230 - val_acc: 0.2000
Epoch 5/10
12s - loss: 2.1591 - acc: 0.7106 - val_loss: 3.5227 - val_acc: 0.2675
Epoch 6/10
12s - loss: 1.1657 - acc: 0.9062 - val_loss: 3.4352 - val_acc: 0.2650
Epoch 7/10
12s - loss: 0.5003 - acc: 0.9850 - val_loss: 3.3775 - val_acc: 0.2850
Epoch 8/10
12s - loss: 0.2005 - acc: 0.9981 - val_loss: 3.3863 - val_acc: 0.2875
Epoch 9/10
12s - loss: 0.0948 - acc: 0.9981 - val_loss: 3.3773 - val_acc: 0.2900
Epoch 10/10
12s - loss: 0.0599 - acc: 0.9981 - val_loss: 3.4123 - val_acc: 0.2975
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_3 (Embedding)      (None, None, 200)         9947200   
_________________________________________________________________
spatial_dropout1d_3 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_3 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_3 (Glob (None, 512)               0         
_________________________________________________________________
dense_3 (Dense)              (None, 100)               51300     
=================================================================
Total params: 10,306,212
Trainable params: 10,306,212
Non-trainable params: 0
_________________________________________________________________
Train on 2400 samples, validate on 600 samples
Epoch 1/10
20s - loss: 4.5972 - acc: 0.0371 - val_loss: 4.5546 - val_acc: 0.0800
Epoch 2/10
18s - loss: 4.3820 - acc: 0.1358 - val_loss: 4.2412 - val_acc: 0.1800
Epoch 3/10
18s - loss: 3.4683 - acc: 0.3417 - val_loss: 3.4673 - val_acc: 0.2650
Epoch 4/10
18s - loss: 2.2788 - acc: 0.5717 - val_loss: 3.2428 - val_acc: 0.3217
Epoch 5/10
18s - loss: 1.3547 - acc: 0.8004 - val_loss: 3.1927 - val_acc: 0.3233
Epoch 6/10
18s - loss: 0.6695 - acc: 0.9346 - val_loss: 3.1129 - val_acc: 0.3283
Epoch 7/10
18s - loss: 0.2798 - acc: 0.9842 - val_loss: 3.2304 - val_acc: 0.3400
Epoch 8/10
18s - loss: 0.1260 - acc: 0.9938 - val_loss: 3.2242 - val_acc: 0.3433
Epoch 9/10
18s - loss: 0.0695 - acc: 0.9954 - val_loss: 3.2521 - val_acc: 0.3433
Epoch 10/10
18s - loss: 0.0539 - acc: 0.9938 - val_loss: 3.2905 - val_acc: 0.3517
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_4 (Embedding)      (None, None, 200)         11712200  
_________________________________________________________________
spatial_dropout1d_4 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_4 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_4 (Glob (None, 512)               0         
_________________________________________________________________
dense_4 (Dense)              (None, 100)               51300     
=================================================================
Total params: 12,071,212
Trainable params: 12,071,212
Non-trainable params: 0
_________________________________________________________________
Train on 3200 samples, validate on 800 samples
Epoch 1/10
25s - loss: 4.5811 - acc: 0.0403 - val_loss: 4.5238 - val_acc: 0.1125
Epoch 2/10
25s - loss: 4.1478 - acc: 0.2234 - val_loss: 3.8398 - val_acc: 0.2350
Epoch 3/10
25s - loss: 2.8638 - acc: 0.4119 - val_loss: 3.3536 - val_acc: 0.3013
Epoch 4/10
25s - loss: 1.8515 - acc: 0.6538 - val_loss: 3.2122 - val_acc: 0.3375
Epoch 5/10
25s - loss: 1.0222 - acc: 0.8512 - val_loss: 3.2217 - val_acc: 0.3425
Epoch 6/10
25s - loss: 0.4845 - acc: 0.9566 - val_loss: 3.3385 - val_acc: 0.3362
Epoch 7/10
25s - loss: 0.1983 - acc: 0.9884 - val_loss: 3.3175 - val_acc: 0.3463
Epoch 8/10
25s - loss: 0.0936 - acc: 0.9941 - val_loss: 3.4165 - val_acc: 0.3500
Epoch 9/10
25s - loss: 0.0577 - acc: 0.9941 - val_loss: 3.4560 - val_acc: 0.3500
Epoch 10/10
25s - loss: 0.0445 - acc: 0.9947 - val_loss: 3.4582 - val_acc: 0.3513
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_5 (Embedding)      (None, None, 200)         13192800  
_________________________________________________________________
spatial_dropout1d_5 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_5 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_5 (Glob (None, 512)               0         
_________________________________________________________________
dense_5 (Dense)              (None, 100)               51300     
=================================================================
Total params: 13,551,812
Trainable params: 13,551,812
Non-trainable params: 0
_________________________________________________________________
Train on 4000 samples, validate on 1000 samples
Epoch 1/10
32s - loss: 4.5662 - acc: 0.0512 - val_loss: 4.4697 - val_acc: 0.1200
Epoch 2/10
31s - loss: 3.8501 - acc: 0.2503 - val_loss: 3.3235 - val_acc: 0.3040
Epoch 3/10
32s - loss: 2.5421 - acc: 0.4617 - val_loss: 2.9993 - val_acc: 0.3590
Epoch 4/10
32s - loss: 1.6396 - acc: 0.6727 - val_loss: 2.8669 - val_acc: 0.3740
Epoch 5/10
31s - loss: 0.9040 - acc: 0.8598 - val_loss: 2.8097 - val_acc: 0.3930
Epoch 6/10
31s - loss: 0.4186 - acc: 0.9527 - val_loss: 2.8888 - val_acc: 0.3960
Epoch 7/10
32s - loss: 0.1754 - acc: 0.9873 - val_loss: 2.9413 - val_acc: 0.4060
Epoch 8/10
31s - loss: 0.0915 - acc: 0.9905 - val_loss: 2.9670 - val_acc: 0.4150
Epoch 9/10
31s - loss: 0.0630 - acc: 0.9922 - val_loss: 2.9828 - val_acc: 0.4160
Epoch 10/10
32s - loss: 0.0513 - acc: 0.9922 - val_loss: 2.9892 - val_acc: 0.4130

In [44]:
# ДЛЯ ЭХО Изменение количества постов  без знаков припинания

for x in range(len(array_sets)):
    X_train, X_test, y_train, y_test = array_sets[x]
    model = get_conv_model(embeding_size=array_grams[x])
    
    model.summary()

    BATCH_SIZE = 32
    EPOCHS = 10
    VERBOSE = 2

    history = model.fit(X_train, y_train,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS, verbose=VERBOSE,
                        validation_data=(X_test, y_test)
                        #validation_split=0.1, 
                        #callbacks=[EarlyStopping(monitor='val_loss')]
                       )
    #hist_array.append(history)


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_1 (Embedding)      (None, None, 200)         9540600   
_________________________________________________________________
spatial_dropout1d_1 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_1 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_1 (Glob (None, 512)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 100)               51300     
=================================================================
Total params: 9,899,612
Trainable params: 9,899,612
Non-trainable params: 0
_________________________________________________________________
Train on 1600 samples, validate on 400 samples
Epoch 1/10
99s - loss: 4.6237 - acc: 0.0138 - val_loss: 4.6148 - val_acc: 0.0150
Epoch 2/10
61s - loss: 4.5297 - acc: 0.0306 - val_loss: 4.5624 - val_acc: 0.0225
Epoch 3/10
61s - loss: 4.2342 - acc: 0.1056 - val_loss: 4.1200 - val_acc: 0.1175
Epoch 4/10
61s - loss: 3.2280 - acc: 0.3825 - val_loss: 3.4418 - val_acc: 0.1975
Epoch 5/10
61s - loss: 1.8521 - acc: 0.6831 - val_loss: 2.9610 - val_acc: 0.2875
Epoch 6/10
61s - loss: 0.8098 - acc: 0.8994 - val_loss: 2.7670 - val_acc: 0.3350
Epoch 7/10
61s - loss: 0.3514 - acc: 0.9725 - val_loss: 2.6555 - val_acc: 0.3500
Epoch 8/10
61s - loss: 0.1596 - acc: 0.9919 - val_loss: 2.5679 - val_acc: 0.3700
Epoch 9/10
60s - loss: 0.0917 - acc: 0.9956 - val_loss: 2.5549 - val_acc: 0.3775
Epoch 10/10
60s - loss: 0.0637 - acc: 0.9956 - val_loss: 2.5744 - val_acc: 0.3725
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_2 (Embedding)      (None, None, 200)         12395200  
_________________________________________________________________
spatial_dropout1d_2 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_2 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_2 (Glob (None, 512)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 100)               51300     
=================================================================
Total params: 12,754,212
Trainable params: 12,754,212
Non-trainable params: 0
_________________________________________________________________
Train on 3200 samples, validate on 800 samples
Epoch 1/10
123s - loss: 4.5924 - acc: 0.0191 - val_loss: 4.5073 - val_acc: 0.0375
Epoch 2/10
123s - loss: 4.0430 - acc: 0.1156 - val_loss: 3.6338 - val_acc: 0.2137
Epoch 3/10
123s - loss: 2.5580 - acc: 0.4412 - val_loss: 2.6608 - val_acc: 0.3613
Epoch 4/10
122s - loss: 1.2728 - acc: 0.7191 - val_loss: 2.2977 - val_acc: 0.4437
Epoch 5/10
123s - loss: 0.6157 - acc: 0.8806 - val_loss: 2.1990 - val_acc: 0.4425
Epoch 6/10
122s - loss: 0.2681 - acc: 0.9694 - val_loss: 2.1141 - val_acc: 0.4788
Epoch 7/10
123s - loss: 0.1232 - acc: 0.9919 - val_loss: 2.0736 - val_acc: 0.4863
Epoch 8/10
123s - loss: 0.0636 - acc: 0.9972 - val_loss: 2.0798 - val_acc: 0.5062
Epoch 9/10
123s - loss: 0.0419 - acc: 0.9972 - val_loss: 2.0557 - val_acc: 0.5062
Epoch 10/10
123s - loss: 0.0315 - acc: 0.9972 - val_loss: 2.0580 - val_acc: 0.5100
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_3 (Embedding)      (None, None, 200)         14229800  
_________________________________________________________________
spatial_dropout1d_3 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_3 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_3 (Glob (None, 512)               0         
_________________________________________________________________
dense_3 (Dense)              (None, 100)               51300     
=================================================================
Total params: 14,588,812
Trainable params: 14,588,812
Non-trainable params: 0
_________________________________________________________________
Train on 4800 samples, validate on 1200 samples
Epoch 1/10
186s - loss: 4.5307 - acc: 0.0252 - val_loss: 4.2173 - val_acc: 0.0758
Epoch 2/10
186s - loss: 3.1930 - acc: 0.2948 - val_loss: 2.4727 - val_acc: 0.4092
Epoch 3/10
185s - loss: 1.5517 - acc: 0.6296 - val_loss: 1.9273 - val_acc: 0.5292
Epoch 4/10
186s - loss: 0.7953 - acc: 0.8237 - val_loss: 1.7473 - val_acc: 0.5675
Epoch 5/10
184s - loss: 0.3992 - acc: 0.9221 - val_loss: 1.6681 - val_acc: 0.5875
Epoch 6/10
185s - loss: 0.1892 - acc: 0.9800 - val_loss: 1.6178 - val_acc: 0.5858
Epoch 7/10
184s - loss: 0.0886 - acc: 0.9935 - val_loss: 1.5687 - val_acc: 0.6017
Epoch 8/10
185s - loss: 0.0516 - acc: 0.9962 - val_loss: 1.5503 - val_acc: 0.6225
Epoch 9/10
185s - loss: 0.0365 - acc: 0.9969 - val_loss: 1.5619 - val_acc: 0.6117
Epoch 10/10
184s - loss: 0.0288 - acc: 0.9969 - val_loss: 1.5560 - val_acc: 0.6200
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_4 (Embedding)      (None, None, 200)         16259200  
_________________________________________________________________
spatial_dropout1d_4 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_4 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_4 (Glob (None, 512)               0         
_________________________________________________________________
dense_4 (Dense)              (None, 100)               51300     
=================================================================
Total params: 16,618,212
Trainable params: 16,618,212
Non-trainable params: 0
_________________________________________________________________
Train on 6400 samples, validate on 1600 samples
Epoch 1/10
248s - loss: 4.3646 - acc: 0.0570 - val_loss: 3.6591 - val_acc: 0.1950
Epoch 2/10
247s - loss: 2.4017 - acc: 0.4416 - val_loss: 2.0684 - val_acc: 0.4919
Epoch 3/10
247s - loss: 1.1853 - acc: 0.7084 - val_loss: 1.7039 - val_acc: 0.5637
Epoch 4/10
249s - loss: 0.6341 - acc: 0.8533 - val_loss: 1.5700 - val_acc: 0.6056
Epoch 5/10
247s - loss: 0.3141 - acc: 0.9383 - val_loss: 1.6300 - val_acc: 0.5913
Epoch 6/10
248s - loss: 0.1391 - acc: 0.9831 - val_loss: 1.4773 - val_acc: 0.6356
Epoch 7/10
247s - loss: 0.0697 - acc: 0.9931 - val_loss: 1.4508 - val_acc: 0.6438
Epoch 8/10
247s - loss: 0.0394 - acc: 0.9959 - val_loss: 1.4870 - val_acc: 0.6519
Epoch 9/10
248s - loss: 0.0288 - acc: 0.9959 - val_loss: 1.4753 - val_acc: 0.6488
Epoch 10/10
248s - loss: 0.0234 - acc: 0.9962 - val_loss: 1.4848 - val_acc: 0.6538
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_5 (Embedding)      (None, None, 200)         17486600  
_________________________________________________________________
spatial_dropout1d_5 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_5 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_5 (Glob (None, 512)               0         
_________________________________________________________________
dense_5 (Dense)              (None, 100)               51300     
=================================================================
Total params: 17,845,612
Trainable params: 17,845,612
Non-trainable params: 0
_________________________________________________________________
Train on 8000 samples, validate on 2000 samples
Epoch 1/10
310s - loss: 4.1848 - acc: 0.0943 - val_loss: 3.0952 - val_acc: 0.3015
Epoch 2/10
310s - loss: 2.0345 - acc: 0.5095 - val_loss: 1.7585 - val_acc: 0.5710
Epoch 3/10
310s - loss: 1.0373 - acc: 0.7406 - val_loss: 1.5711 - val_acc: 0.6065
Epoch 4/10
310s - loss: 0.5726 - acc: 0.8601 - val_loss: 1.4230 - val_acc: 0.6415
Epoch 5/10
309s - loss: 0.2820 - acc: 0.9427 - val_loss: 1.5047 - val_acc: 0.6400
Epoch 6/10
309s - loss: 0.1352 - acc: 0.9806 - val_loss: 1.4230 - val_acc: 0.6490
Epoch 7/10
309s - loss: 0.0630 - acc: 0.9945 - val_loss: 1.4037 - val_acc: 0.6470
Epoch 8/10
310s - loss: 0.0364 - acc: 0.9966 - val_loss: 1.3879 - val_acc: 0.6600
Epoch 9/10
309s - loss: 0.0263 - acc: 0.9970 - val_loss: 1.3747 - val_acc: 0.6680
Epoch 10/10
309s - loss: 0.0214 - acc: 0.9973 - val_loss: 1.3962 - val_acc: 0.6680
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_6 (Embedding)      (None, None, 200)         19282200  
_________________________________________________________________
spatial_dropout1d_6 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_6 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_6 (Glob (None, 512)               0         
_________________________________________________________________
dense_6 (Dense)              (None, 100)               51300     
=================================================================
Total params: 19,641,212
Trainable params: 19,641,212
Non-trainable params: 0
_________________________________________________________________
Train on 10400 samples, validate on 2600 samples
Epoch 1/10
409s - loss: 3.8495 - acc: 0.1520 - val_loss: 2.3616 - val_acc: 0.4246
Epoch 2/10
404s - loss: 1.5994 - acc: 0.5982 - val_loss: 1.5304 - val_acc: 0.6042
Epoch 3/10
404s - loss: 0.8362 - acc: 0.7887 - val_loss: 1.2866 - val_acc: 0.6696
Epoch 4/10
403s - loss: 0.4539 - acc: 0.8915 - val_loss: 1.2099 - val_acc: 0.6935
Epoch 5/10
404s - loss: 0.2293 - acc: 0.9573 - val_loss: 1.1895 - val_acc: 0.6996
Epoch 6/10
402s - loss: 0.1031 - acc: 0.9873 - val_loss: 1.1381 - val_acc: 0.7162
Epoch 7/10
403s - loss: 0.0501 - acc: 0.9956 - val_loss: 1.1725 - val_acc: 0.7142
Epoch 8/10
404s - loss: 0.0313 - acc: 0.9966 - val_loss: 1.1484 - val_acc: 0.7265
Epoch 9/10
403s - loss: 0.0250 - acc: 0.9968 - val_loss: 1.1438 - val_acc: 0.7354
Epoch 10/10
404s - loss: 0.0214 - acc: 0.9966 - val_loss: 1.1586 - val_acc: 0.7319
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_7 (Embedding)      (None, None, 200)         20910200  
_________________________________________________________________
spatial_dropout1d_7 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_7 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_7 (Glob (None, 512)               0         
_________________________________________________________________
dense_7 (Dense)              (None, 100)               51300     
=================================================================
Total params: 21,269,212
Trainable params: 21,269,212
Non-trainable params: 0
_________________________________________________________________
Train on 12800 samples, validate on 3200 samples
Epoch 1/10
499s - loss: 3.5690 - acc: 0.2049 - val_loss: 2.1097 - val_acc: 0.4884
Epoch 2/10
497s - loss: 1.4117 - acc: 0.6440 - val_loss: 1.3929 - val_acc: 0.6419
Epoch 3/10
498s - loss: 0.7692 - acc: 0.8020 - val_loss: 1.2368 - val_acc: 0.6772
Epoch 4/10
497s - loss: 0.4216 - acc: 0.8965 - val_loss: 1.1936 - val_acc: 0.6909
Epoch 5/10
498s - loss: 0.2115 - acc: 0.9577 - val_loss: 1.2139 - val_acc: 0.7009
Epoch 6/10
498s - loss: 0.0974 - acc: 0.9866 - val_loss: 1.2631 - val_acc: 0.6922
Epoch 7/10
498s - loss: 0.0447 - acc: 0.9955 - val_loss: 1.1747 - val_acc: 0.7156
Epoch 8/10
498s - loss: 0.0287 - acc: 0.9961 - val_loss: 1.2079 - val_acc: 0.7150
Epoch 9/10
497s - loss: 0.0331 - acc: 0.9941 - val_loss: 1.1992 - val_acc: 0.7212
Epoch 10/10
499s - loss: 0.0159 - acc: 0.9973 - val_loss: 1.1863 - val_acc: 0.7275
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_8 (Embedding)      (None, None, 200)         22533200  
_________________________________________________________________
spatial_dropout1d_8 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_8 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_8 (Glob (None, 512)               0         
_________________________________________________________________
dense_8 (Dense)              (None, 100)               51300     
=================================================================
Total params: 22,892,212
Trainable params: 22,892,212
Non-trainable params: 0
_________________________________________________________________
Train on 16000 samples, validate on 4000 samples
Epoch 1/10
624s - loss: 3.2397 - acc: 0.2634 - val_loss: 1.7119 - val_acc: 0.5800
Epoch 2/10
622s - loss: 1.2322 - acc: 0.6857 - val_loss: 1.2278 - val_acc: 0.6860
Epoch 3/10
623s - loss: 0.6890 - acc: 0.8216 - val_loss: 1.0631 - val_acc: 0.7255
Epoch 4/10
623s - loss: 0.3760 - acc: 0.9065 - val_loss: 1.0777 - val_acc: 0.7268
Epoch 5/10
623s - loss: 0.1923 - acc: 0.9581 - val_loss: 1.0050 - val_acc: 0.7470
Epoch 6/10
624s - loss: 0.0842 - acc: 0.9873 - val_loss: 1.0811 - val_acc: 0.7428
Epoch 7/10
622s - loss: 0.0392 - acc: 0.9957 - val_loss: 1.0138 - val_acc: 0.7590
Epoch 8/10
622s - loss: 0.0247 - acc: 0.9966 - val_loss: 1.0176 - val_acc: 0.7612
Epoch 9/10
623s - loss: 0.0194 - acc: 0.9968 - val_loss: 1.0281 - val_acc: 0.7638
Epoch 10/10
622s - loss: 0.0168 - acc: 0.9969 - val_loss: 1.0438 - val_acc: 0.7648

In [ ]:
# ДЛЯ ЭХО Изменение количества постов со знаками припинания

for x in range(len(array_sets)):
    X_train, X_test, y_train, y_test = array_sets[x]
    model = get_conv_model(embeding_size=array_grams[x])
    
    model.summary()

    BATCH_SIZE = 32
    EPOCHS = 10
    VERBOSE = 2

    history = model.fit(X_train, y_train,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS, verbose=VERBOSE,
                        validation_data=(X_test, y_test)
                        #validation_split=0.1, 
                        #callbacks=[EarlyStopping(monitor='val_loss')]
                       )
    #hist_array.append(history)


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_1 (Embedding)      (None, None, 200)         12377000  
_________________________________________________________________
spatial_dropout1d_1 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_1 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_1 (Glob (None, 512)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 100)               51300     
=================================================================
Total params: 12,736,012
Trainable params: 12,736,012
Non-trainable params: 0
_________________________________________________________________
Train on 1600 samples, validate on 400 samples
Epoch 1/10
86s - loss: 4.6205 - acc: 0.0131 - val_loss: 4.6050 - val_acc: 0.0125
Epoch 2/10
61s - loss: 4.4877 - acc: 0.0375 - val_loss: 4.4303 - val_acc: 0.0250
Epoch 3/10
61s - loss: 3.9888 - acc: 0.1550 - val_loss: 3.8495 - val_acc: 0.1625
Epoch 4/10
61s - loss: 2.8737 - acc: 0.4675 - val_loss: 3.1296 - val_acc: 0.2850
Epoch 5/10
62s - loss: 1.4848 - acc: 0.7656 - val_loss: 2.6123 - val_acc: 0.3900
Epoch 6/10
62s - loss: 0.5997 - acc: 0.9313 - val_loss: 2.3663 - val_acc: 0.4325
Epoch 7/10
61s - loss: 0.2558 - acc: 0.9844 - val_loss: 2.1875 - val_acc: 0.4525
Epoch 8/10
61s - loss: 0.1218 - acc: 0.9931 - val_loss: 2.1359 - val_acc: 0.4675
Epoch 9/10
61s - loss: 0.0743 - acc: 0.9969 - val_loss: 2.1025 - val_acc: 0.4675
Epoch 10/10
61s - loss: 0.0539 - acc: 0.9969 - val_loss: 2.0884 - val_acc: 0.4775
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_2 (Embedding)      (None, None, 200)         16080600  
_________________________________________________________________
spatial_dropout1d_2 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_2 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_2 (Glob (None, 512)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 100)               51300     
=================================================================
Total params: 16,439,612
Trainable params: 16,439,612
Non-trainable params: 0
_________________________________________________________________
Train on 3200 samples, validate on 800 samples
Epoch 1/10
123s - loss: 4.5953 - acc: 0.0178 - val_loss: 4.5365 - val_acc: 0.0325
Epoch 2/10
123s - loss: 3.9488 - acc: 0.1544 - val_loss: 3.1831 - val_acc: 0.2963
Epoch 3/10
124s - loss: 2.0213 - acc: 0.5509 - val_loss: 2.2268 - val_acc: 0.4487
Epoch 4/10
123s - loss: 0.9122 - acc: 0.8075 - val_loss: 1.9190 - val_acc: 0.5188
Epoch 5/10
123s - loss: 0.4064 - acc: 0.9334 - val_loss: 1.7904 - val_acc: 0.5563
Epoch 6/10
124s - loss: 0.1658 - acc: 0.9847 - val_loss: 1.7280 - val_acc: 0.5813
Epoch 7/10
124s - loss: 0.0794 - acc: 0.9953 - val_loss: 1.6837 - val_acc: 0.5750
Epoch 8/10
123s - loss: 0.0452 - acc: 0.9969 - val_loss: 1.6995 - val_acc: 0.5687
Epoch 9/10
124s - loss: 0.0326 - acc: 0.9975 - val_loss: 1.6842 - val_acc: 0.5825
Epoch 10/10
123s - loss: 0.0264 - acc: 0.9975 - val_loss: 1.6948 - val_acc: 0.5850
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_3 (Embedding)      (None, None, 200)         18366200  
_________________________________________________________________
spatial_dropout1d_3 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_3 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_3 (Glob (None, 512)               0         
_________________________________________________________________
dense_3 (Dense)              (None, 100)               51300     
=================================================================
Total params: 18,725,212
Trainable params: 18,725,212
Non-trainable params: 0
_________________________________________________________________
Train on 4800 samples, validate on 1200 samples
Epoch 1/10
187s - loss: 4.5108 - acc: 0.0317 - val_loss: 4.0920 - val_acc: 0.1417
Epoch 2/10
187s - loss: 2.8927 - acc: 0.3604 - val_loss: 2.1729 - val_acc: 0.4875
Epoch 3/10
187s - loss: 1.2958 - acc: 0.6927 - val_loss: 1.6346 - val_acc: 0.5858
Epoch 4/10
186s - loss: 0.6270 - acc: 0.8683 - val_loss: 1.4787 - val_acc: 0.6158
Epoch 5/10
187s - loss: 0.2861 - acc: 0.9546 - val_loss: 1.3873 - val_acc: 0.6508
Epoch 6/10
186s - loss: 0.1239 - acc: 0.9898 - val_loss: 1.3447 - val_acc: 0.6483
Epoch 7/10
187s - loss: 0.0615 - acc: 0.9965 - val_loss: 1.2980 - val_acc: 0.6617
Epoch 8/10
187s - loss: 0.0401 - acc: 0.9967 - val_loss: 1.2963 - val_acc: 0.6775
Epoch 9/10
185s - loss: 0.0307 - acc: 0.9969 - val_loss: 1.2925 - val_acc: 0.6775
Epoch 10/10
187s - loss: 0.0246 - acc: 0.9969 - val_loss: 1.2893 - val_acc: 0.6775
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_4 (Embedding)      (None, None, 200)         20872000  
_________________________________________________________________
spatial_dropout1d_4 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_4 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_4 (Glob (None, 512)               0         
_________________________________________________________________
dense_4 (Dense)              (None, 100)               51300     
=================================================================
Total params: 21,231,012
Trainable params: 21,231,012
Non-trainable params: 0
_________________________________________________________________
Train on 6400 samples, validate on 1600 samples
Epoch 1/10
249s - loss: 4.3276 - acc: 0.0634 - val_loss: 3.3900 - val_acc: 0.2456
Epoch 2/10
249s - loss: 2.1027 - acc: 0.5012 - val_loss: 1.8207 - val_acc: 0.5581
Epoch 3/10
249s - loss: 0.9344 - acc: 0.7672 - val_loss: 1.4909 - val_acc: 0.6350
Epoch 4/10
250s - loss: 0.4683 - acc: 0.8927 - val_loss: 1.3781 - val_acc: 0.6631
Epoch 5/10
249s - loss: 0.2129 - acc: 0.9647 - val_loss: 1.3510 - val_acc: 0.6706
Epoch 6/10
249s - loss: 0.0914 - acc: 0.9911 - val_loss: 1.2932 - val_acc: 0.6863
Epoch 7/10
249s - loss: 0.0463 - acc: 0.9962 - val_loss: 1.2659 - val_acc: 0.6894
Epoch 8/10
249s - loss: 0.0304 - acc: 0.9969 - val_loss: 1.2660 - val_acc: 0.6963
Epoch 9/10
248s - loss: 0.0230 - acc: 0.9973 - val_loss: 1.2745 - val_acc: 0.7031
Epoch 10/10
249s - loss: 0.0192 - acc: 0.9970 - val_loss: 1.2825 - val_acc: 0.7025
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_5 (Embedding)      (None, None, 200)         22366600  
_________________________________________________________________
spatial_dropout1d_5 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_5 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_5 (Glob (None, 512)               0         
_________________________________________________________________
dense_5 (Dense)              (None, 100)               51300     
=================================================================
Total params: 22,725,612
Trainable params: 22,725,612
Non-trainable params: 0
_________________________________________________________________
Train on 8000 samples, validate on 2000 samples
Epoch 1/10
312s - loss: 4.0195 - acc: 0.1251 - val_loss: 2.6407 - val_acc: 0.3985
Epoch 2/10
312s - loss: 1.6683 - acc: 0.5946 - val_loss: 1.4636 - val_acc: 0.6320
Epoch 3/10
312s - loss: 0.7962 - acc: 0.7986 - val_loss: 1.2698 - val_acc: 0.6700
Epoch 4/10
312s - loss: 0.3989 - acc: 0.9113 - val_loss: 1.0868 - val_acc: 0.7095
Epoch 5/10
313s - loss: 0.1764 - acc: 0.9723 - val_loss: 1.0923 - val_acc: 0.7130
Epoch 6/10
312s - loss: 0.0797 - acc: 0.9910 - val_loss: 1.0428 - val_acc: 0.7160
Epoch 7/10
311s - loss: 0.0390 - acc: 0.9965 - val_loss: 1.0292 - val_acc: 0.7215
Epoch 8/10
311s - loss: 0.0257 - acc: 0.9974 - val_loss: 1.0199 - val_acc: 0.7240
Epoch 9/10
311s - loss: 0.0198 - acc: 0.9976 - val_loss: 1.0283 - val_acc: 0.7305
Epoch 10/10
312s - loss: 0.0168 - acc: 0.9978 - val_loss: 1.0348 - val_acc: 0.7290
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_6 (Embedding)      (None, None, 200)         24770000  
_________________________________________________________________
spatial_dropout1d_6 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_6 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_6 (Glob (None, 512)               0         
_________________________________________________________________
dense_6 (Dense)              (None, 100)               51300     
=================================================================
Total params: 25,129,012
Trainable params: 25,129,012
Non-trainable params: 0
_________________________________________________________________
Train on 10400 samples, validate on 2600 samples
Epoch 1/10
411s - loss: 3.6439 - acc: 0.2042 - val_loss: 2.0373 - val_acc: 0.4942
Epoch 2/10
407s - loss: 1.2969 - acc: 0.6794 - val_loss: 1.2897 - val_acc: 0.6604
Epoch 3/10
407s - loss: 0.6348 - acc: 0.8445 - val_loss: 1.0819 - val_acc: 0.7208
Epoch 4/10
406s - loss: 0.3126 - acc: 0.9291 - val_loss: 1.0513 - val_acc: 0.7346
Epoch 5/10
407s - loss: 0.1349 - acc: 0.9797 - val_loss: 1.0179 - val_acc: 0.7419
Epoch 6/10
407s - loss: 0.0571 - acc: 0.9950 - val_loss: 0.9960 - val_acc: 0.7550
Epoch 7/10
406s - loss: 0.0322 - acc: 0.9964 - val_loss: 0.9768 - val_acc: 0.7608
Epoch 8/10
406s - loss: 0.0228 - acc: 0.9970 - val_loss: 0.9997 - val_acc: 0.7562
Epoch 9/10
406s - loss: 0.0194 - acc: 0.9969 - val_loss: 0.9858 - val_acc: 0.7658
Epoch 10/10
407s - loss: 0.0172 - acc: 0.9968 - val_loss: 1.0082 - val_acc: 0.7612
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_7 (Embedding)      (None, None, 200)         26537800  
_________________________________________________________________
spatial_dropout1d_7 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_7 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_7 (Glob (None, 512)               0         
_________________________________________________________________
dense_7 (Dense)              (None, 100)               51300     
=================================================================
Total params: 26,896,812
Trainable params: 26,896,812
Non-trainable params: 0
_________________________________________________________________
Train on 12800 samples, validate on 3200 samples
Epoch 1/10
500s - loss: 3.3093 - acc: 0.2598 - val_loss: 1.7701 - val_acc: 0.5634
Epoch 2/10
501s - loss: 1.1316 - acc: 0.7138 - val_loss: 1.1847 - val_acc: 0.6897
Epoch 3/10
502s - loss: 0.5627 - acc: 0.8591 - val_loss: 1.0524 - val_acc: 0.7178
Epoch 4/10
500s - loss: 0.2702 - acc: 0.9370 - val_loss: 1.0060 - val_acc: 0.7300
Epoch 5/10
502s - loss: 0.1147 - acc: 0.9805 - val_loss: 0.9437 - val_acc: 0.7528
Epoch 6/10
500s - loss: 0.0488 - acc: 0.9945 - val_loss: 0.9047 - val_acc: 0.7631
Epoch 7/10
500s - loss: 0.0238 - acc: 0.9973 - val_loss: 0.9118 - val_acc: 0.7672
Epoch 8/10
500s - loss: 0.0173 - acc: 0.9974 - val_loss: 0.9271 - val_acc: 0.7697
Epoch 9/10
500s - loss: 0.0141 - acc: 0.9974 - val_loss: 0.9201 - val_acc: 0.7662
Epoch 10/10
499s - loss: 0.0124 - acc: 0.9977 - val_loss: 0.9377 - val_acc: 0.7694
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_8 (Embedding)      (None, None, 200)         28804600  
_________________________________________________________________
spatial_dropout1d_8 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_8 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_8 (Glob (None, 512)               0         
_________________________________________________________________
dense_8 (Dense)              (None, 100)               51300     
=================================================================
Total params: 29,163,612
Trainable params: 29,163,612
Non-trainable params: 0
_________________________________________________________________
Train on 16000 samples, validate on 4000 samples
Epoch 1/10
628s - loss: 3.0033 - acc: 0.3143 - val_loss: 1.4350 - val_acc: 0.6335
Epoch 2/10
627s - loss: 0.9955 - acc: 0.7432 - val_loss: 0.9721 - val_acc: 0.7412
Epoch 3/10
628s - loss: 0.4991 - acc: 0.8714 - val_loss: 0.8880 - val_acc: 0.7665
Epoch 4/10
627s - loss: 0.2378 - acc: 0.9445 - val_loss: 0.7997 - val_acc: 0.7925
Epoch 5/10
628s - loss: 0.1008 - acc: 0.9820 - val_loss: 0.8052 - val_acc: 0.8007
Epoch 6/10
628s - loss: 0.0426 - acc: 0.9952 - val_loss: 0.7705 - val_acc: 0.8103
Epoch 7/10
627s - loss: 0.0285 - acc: 0.9955 - val_loss: 0.7524 - val_acc: 0.8163
Epoch 8/10
628s - loss: 0.0201 - acc: 0.9966 - val_loss: 0.7560 - val_acc: 0.8165
Epoch 9/10
627s - loss: 0.0154 - acc: 0.9972 - val_loss: 0.7697 - val_acc: 0.8173
Epoch 10/10

In [ ]:
hist_array = []
for i, (X_train, X_test, y_train, y_test) in enumerate(array_sets):
    if i == 0:
        model = get_conv_model(embeding_size=143950) # 53.1 6-epoch
    if i == 1:
        model = get_conv_model(embeding_size=741852) # 53.1 6-epoch
    if i == 2:
        model = get_conv_model(embeding_size=1359198) # 53.1 6-epoch
        
    model.summary()

    BATCH_SIZE = 32
    EPOCHS = 10
    VERBOSE = 2

    history = model.fit(X_train, y_train,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS, verbose=VERBOSE,
                        validation_data=(X_test, y_test)
                        #validation_split=0.1, 
                        #callbacks=[EarlyStopping(monitor='val_loss')]
                       )
    #hist_array.append(history)


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_1 (Embedding)      (None, None, 200)         28790000  
_________________________________________________________________
spatial_dropout1d_1 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_1 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_1 (Glob (None, 512)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 100)               51300     
=================================================================
Total params: 29,149,012
Trainable params: 29,149,012
Non-trainable params: 0
_________________________________________________________________
Train on 33552 samples, validate on 8389 samples
Epoch 1/10
1371s - loss: 2.2435 - acc: 0.4698 - val_loss: 1.2052 - val_acc: 0.6814
Epoch 2/10
1323s - loss: 0.8342 - acc: 0.7783 - val_loss: 0.9067 - val_acc: 0.7600
Epoch 3/10
1390s - loss: 0.4833 - acc: 0.8700 - val_loss: 0.8965 - val_acc: 0.7676
Epoch 4/10
1385s - loss: 0.2750 - acc: 0.9281 - val_loss: 0.8238 - val_acc: 0.7907
Epoch 5/10
1412s - loss: 0.1448 - acc: 0.9633 - val_loss: 0.9298 - val_acc: 0.7791
Epoch 6/10
1323s - loss: 0.0879 - acc: 0.9785 - val_loss: 1.0081 - val_acc: 0.7832
Epoch 7/10
1322s - loss: 0.0860 - acc: 0.9765 - val_loss: 1.1037 - val_acc: 0.7758
Epoch 8/10
1322s - loss: 0.0650 - acc: 0.9814 - val_loss: 1.0688 - val_acc: 0.7877
Epoch 9/10
1323s - loss: 0.0590 - acc: 0.9846 - val_loss: 1.1037 - val_acc: 0.7896
Epoch 10/10
1360s - loss: 0.0540 - acc: 0.9849 - val_loss: 1.2377 - val_acc: 0.7833
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_2 (Embedding)      (None, None, 200)         148370400 
_________________________________________________________________
spatial_dropout1d_2 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_2 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_2 (Glob (None, 512)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 100)               51300     
=================================================================
Total params: 148,729,412
Trainable params: 148,729,412
Non-trainable params: 0
_________________________________________________________________
/anaconda/envs/py35/lib/python3.5/site-packages/tensorflow/python/ops/gradients_impl.py:90: UserWarning: Converting sparse IndexedSlices to a dense Tensor with 148370400 elements. This may consume a large amount of memory.
  "This may consume a large amount of memory." % num_elements)
Train on 33552 samples, validate on 8389 samples
Epoch 1/10
1467s - loss: 2.1483 - acc: 0.4987 - val_loss: 1.1091 - val_acc: 0.7116
Epoch 2/10
1513s - loss: 0.7308 - acc: 0.8094 - val_loss: 0.8739 - val_acc: 0.7715
Epoch 3/10
1566s - loss: 0.3297 - acc: 0.9128 - val_loss: 0.8892 - val_acc: 0.7771
Epoch 4/10
1633s - loss: 0.1196 - acc: 0.9724 - val_loss: 0.8766 - val_acc: 0.7870
Epoch 5/10
1634s - loss: 0.0529 - acc: 0.9876 - val_loss: 0.9243 - val_acc: 0.7898
Epoch 6/10
1633s - loss: 0.0532 - acc: 0.9866 - val_loss: 1.0945 - val_acc: 0.7703
Epoch 7/10
1633s - loss: 0.0816 - acc: 0.9751 - val_loss: 1.1049 - val_acc: 0.7883
Epoch 8/10
1633s - loss: 0.0481 - acc: 0.9865 - val_loss: 1.3241 - val_acc: 0.7672
Epoch 9/10

In [26]:
X_train, X_test, y_train, y_test = array_sets[2]

model = get_conv_model(embeding_size=1359198) # 53.1 6-epoch

model.summary()

BATCH_SIZE = 8
EPOCHS = 10
VERBOSE = 2

history = model.fit(X_train, y_train,
                    batch_size=BATCH_SIZE,
                    epochs=EPOCHS, verbose=VERBOSE,
                    validation_data=(X_test, y_test)
                    #validation_split=0.1, 
                    #callbacks=[EarlyStopping(monitor='val_loss')]
                   )


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_1 (Embedding)      (None, None, 200)         271839600 
_________________________________________________________________
spatial_dropout1d_1 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_1 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_1 (Glob (None, 512)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 100)               51300     
=================================================================
Total params: 272,198,612
Trainable params: 272,198,612
Non-trainable params: 0
_________________________________________________________________
/anaconda/envs/py35/lib/python3.5/site-packages/tensorflow/python/ops/gradients_impl.py:90: UserWarning: Converting sparse IndexedSlices to a dense Tensor with 271839600 elements. This may consume a large amount of memory.
  "This may consume a large amount of memory." % num_elements)
Train on 33552 samples, validate on 8389 samples
Epoch 1/10
1313s - loss: 2.0779 - acc: 0.4987 - val_loss: 1.2015 - val_acc: 0.6852
Epoch 2/10
1298s - loss: 0.5818 - acc: 0.8517 - val_loss: 1.0358 - val_acc: 0.7355
Epoch 3/10
1294s - loss: 0.0694 - acc: 0.9855 - val_loss: 1.1773 - val_acc: 0.7192
Epoch 4/10
1289s - loss: 0.0301 - acc: 0.9926 - val_loss: 1.5661 - val_acc: 0.6690
Epoch 5/10
1289s - loss: 0.0271 - acc: 0.9927 - val_loss: 1.4401 - val_acc: 0.7010
Epoch 6/10
1291s - loss: 0.0208 - acc: 0.9940 - val_loss: 1.5282 - val_acc: 0.7052
Epoch 7/10
1289s - loss: 0.0199 - acc: 0.9948 - val_loss: 1.4818 - val_acc: 0.7196
Epoch 8/10
1292s - loss: 0.0159 - acc: 0.9958 - val_loss: 1.5540 - val_acc: 0.7190
Epoch 9/10
1293s - loss: 0.0218 - acc: 0.9941 - val_loss: 1.6801 - val_acc: 0.7101
Epoch 10/10
1275s - loss: 0.0082 - acc: 0.6891 - val_loss: 1.1921e-07 - val_acc: 0.0123

In [1]:
len(hist_array)


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-1-20cb28469f75> in <module>()
----> 1 len(hist_array)

NameError: name 'hist_array' is not defined

In [ ]:
%%time
#model = get_lstm_model(120000) # 34.10 10-epoch
#model = get_bidirectional_lstm() # 50.86 20-epoch
model = get_conv_model() # 53.1 6-epoch
#model = get_conv_conv_model() # 54.04 7-epoch
#model = get_conv_lstm_model() # 53.17 10-epoch
#model = get_lstm_conv_model() # 52.60 10-epoch

model.summary()

BATCH_SIZE = 32
EPOCHS = 7
VERBOSE = 2

history = model.fit(X_train, y_train, 
                    batch_size=BATCH_SIZE, 
                    epochs=EPOCHS, verbose=VERBOSE, 
                    validation_split=0.1, 
                    #callbacks=[EarlyStopping(monitor='val_loss')]
                   )


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_2 (Embedding)      (None, None, 200)         30192200  
_________________________________________________________________
spatial_dropout1d_2 (Spatial (None, None, 200)         0         
_________________________________________________________________
conv1d_2 (Conv1D)            (None, None, 512)         307712    
_________________________________________________________________
global_max_pooling1d_2 (Glob (None, 512)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 50)                25650     
=================================================================
Total params: 30,525,562
Trainable params: 30,525,562
Non-trainable params: 0
_________________________________________________________________
Train on 21942 samples, validate on 2438 samples
Epoch 1/7
880s - loss: 1.6866 - acc: 0.5625 - val_loss: 0.8000 - val_acc: 0.7834
Epoch 2/7
845s - loss: 0.5167 - acc: 0.8571 - val_loss: 0.6162 - val_acc: 0.8322
Epoch 3/7
839s - loss: 0.2761 - acc: 0.9240 - val_loss: 0.5604 - val_acc: 0.8450
Epoch 4/7
884s - loss: 0.1374 - acc: 0.9642 - val_loss: 0.5371 - val_acc: 0.8548
Epoch 5/7
869s - loss: 0.0684 - acc: 0.9842 - val_loss: 0.5467 - val_acc: 0.8548
Epoch 6/7
906s - loss: 0.0384 - acc: 0.9906 - val_loss: 0.5350 - val_acc: 0.8610
Epoch 7/7
841s - loss: 0.0285 - acc: 0.9919 - val_loss: 0.5093 - val_acc: 0.8745
CPU times: user 50min 53s, sys: 19min 13s, total: 1h 10min 7s
Wall time: 1h 41min 9s

In [59]:
print('Точность модели составляет: {}'.format(model.evaluate(X_test, y_test, batch_size=64, verbose=2)[1] * 100))


Точность модели составляет: 88.3182937009497

In [60]:
from matplotlib import pyplot as plt
print(history.history.keys())
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.show();
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.show();


dict_keys(['val_loss', 'loss', 'val_acc', 'acc'])

In [44]:
def save_model(model, model_name):
    # Генерируем описание модели в формате json
    model_json = model.to_json()
    # Записываем модель в файл
    json_file = open("model/{}_model.json".format(model_name), "w")
    json_file.write(model_json)
    json_file.close()
    model.save_weights("model/{}_weights.h5".format(model_name))
    print('Модель и веса успешно сохранены!')

In [58]:
save_model(model, 'vk_98percent_cnn')


Модель и веса успешно сохранены!

In [ ]: