Sentence Quality Classification

Classifiy sentences into high / low quality. This is used to improve data quality for texts from different sources (scanned PDFs, crawled HTML, parsed Wikipedia, ..). It's mainly used to filter out artifacts from faulty parsing, HTML fragments, navigation elements, non-sentences (references, titles, ..)


In [22]:
%matplotlib inline
%load_ext autoreload
%autoreload 2

import load_config
import json
import nltk

### define a new configuration
config_dict = {
    # training data
    "train_file": "data/sentence-quality-8k.csv",
     
    # model
   
    # chose tokenizer. possible values: "nst" and "sgt"
    ## NonStemmingTokenizer: 'nst'
    # - no stemming, only remove punctuation marks
    # - lowercase letters

    ## SimpleGermanTokenizer: 'sgt'
    # - remove punctuation marks
    # - stemming
    # - lowercase letters
    "tokenizer": "nst",
    
    # where to store the configuration file
    "config_path": "data/configuration-sentence-quality-classifier.json"
}


The autoreload extension is already loaded. To reload it, use:
  %reload_ext autoreload

In [23]:
### save config file (don't change)
config_src = config_dict["config_path"]

with open(config_src, 'w+') as f:
    json.dump(config_dict, f, indent=4)
    
# load config object based on config file (don't change)
config = load_config.Configuration(config_src, True)

In [24]:
from tokenizer import get_tokenizer
tokenizer = get_tokenizer(config_dict['tokenizer'])

In [25]:
# load training data
import pandas as pd 

data = pd.read_csv(config['train_file']) 
data = data.sample(frac=1).reset_index(drop=True)

In [39]:
# set targets
print(len(data.index))
data = data.dropna(subset= ['Klassierung'])
print(len(data.index))
data.groupby('Klassierung').Klassierung.count().plot.bar()

#data['label'] = pd.Series(['high' if a else 'low' for a in data['Klassierung'].isin(['SENTENCE'])], index=data.index)
data['label'] = pd.Series(data['Klassierung'], index=data.index)

# set label / classes
classes = list(data['label'].unique())
data.head()
print(classes)


8007
8005
['SENTENCE', 'LOW_QUALITY', 'TITLE', 'REFERENCE', 'FOREIGN']

In [40]:
# character frequency analysis

high_quality = data.loc[data['Klassierung'].isin(['SENTENCE', 'REFERENCE'])]
low_quality = data.loc[[not a for a in data['Klassierung'].isin(['SENTENCE', 'REFERENCE'])]]

fdc = nltk.FreqDist()
tokens_h = []
for s in high_quality['Satz']:
    tokens_h.extend(tokenizer.tokenize(s))
    fdc.update(s)
    
# character frequencies
fdc.plot(40,cumulative=False)

tokens_l = []
fdc2 = nltk.FreqDist()
for s in low_quality['Satz']:
    tokens_l.extend(tokenizer.tokenize(s))
    fdc2.update(s)
    fdc.update(s)
    
# character frequencies
fdc2.plot(40,cumulative=False)

# word frequencies
fd = nltk.FreqDist(tokens_h)
fd.plot(30,cumulative=False) 
fd = nltk.FreqDist(tokens_l)
fd.plot(30,cumulative=False)



In [41]:
# Keras LSTM input data

# one hot encoded characters
from keras.preprocessing.text import Tokenizer
tokenizer_char = Tokenizer(char_level=True)
tokenizer_char.fit_on_texts(data['Satz'])
char_int_encoded = tokenizer_char.texts_to_sequences(data['Satz'])

In [42]:
# set input data

import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer

tfidf = TfidfVectorizer(sublinear_tf=True, min_df=8, norm='l2', encoding='utf8', ngram_range=(2, 2), stop_words='english')
tfidf = tfidf.fit(data.Satz)

fd = nltk.FreqDist(tokens_h + tokens_l)

voc_chars = list(fdc.keys())[:200]
voc_words = list(fd.keys())[:500]

def convert_sentence_to_input(s):
    tokens = tokenizer.tokenize(s)
    fd_words = nltk.FreqDist(tokens)
    fd_chars = nltk.FreqDist(s)
    
    words = [fd_words.freq(w) for w in voc_words]
    chars = [fd_chars.freq(c) for c in voc_chars]
    
    words_in_vocab = sum(1 for i in words if i > 0.0)
    long_tail = float(words_in_vocab) /  len(tokens)
    
    tfidf_features = list(tfidf.transform([s]).toarray()[0])
    
    return np.array(words + chars + [long_tail/10.0], dtype=np.float32)

data['input'] = pd.Series([[convert_sentence_to_input(s)] for s in data['Satz']], index=data.index)
data.head()


Out[42]:
Satz Tokens Klassierung Bemerkungen label input
0 Durch die Ausnutzung der Schwerkraft kann di... ['durch', 'die', 'ausnutzung', 'der', 'schwerk... SENTENCE NaN SENTENCE [[0.0384615, 0.0769231, 0.0384615, 0.0384615, ...
1 Er ist vor allem mit seinen Arbeiten zum Thema... ['er', 'ist', 'vor', 'allem', 'mit', 'seinen',... SENTENCE NaN SENTENCE [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0588235...
2 An anderen [[Monoamin]]transportern wirken sie... ['an', 'anderen', 'monoamin', 'transportern', ... SENTENCE NaN SENTENCE [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,...
3 Etwa die Hälfte der an einem Schlaganfall-Erkr... ['etwa', 'die', 'hälfte', 'der', 'an', 'einem'... LOW_QUALITY NaN LOW_QUALITY [[0.0, 0.0294118, 0.0, 0.0294118, 0.0, 0.0, 0....
4 Schwerpunktthema: Was ist gesichert in der The... ['schwerpunktthema', 'was', 'ist', 'gesichert'... TITLE NaN TITLE [[0.0, 0.0, 0.0, 0.142857, 0.0, 0.0, 0.0, 0.0,...

In [43]:
# fit scikit learn models

from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer

from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.neural_network import MLPClassifier

models = [
    RandomForestClassifier(n_estimators=200, max_depth=4, random_state=0),
    RandomForestClassifier(n_estimators=200, max_depth=8, random_state=0),
    RandomForestClassifier(n_estimators=200, max_depth=16, random_state=0),
    RandomForestClassifier(n_estimators=200, max_depth=32, random_state=0),
    RandomForestClassifier(n_estimators=200, max_depth=64, random_state=0),
    RandomForestClassifier(n_estimators=200, max_depth=128, random_state=0),
    LinearSVC(),
    # MultinomialNB(),
    LogisticRegression(random_state=0),
    SVC(kernel='linear', gamma=2),
    # SVC(kernel='poly', gamma=2),
    SVC(kernel='rbf', gamma=2),
    MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(8), random_state=1),
    MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(16), random_state=1),
    MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(32), random_state=1),
    MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(32, 16), random_state=1),
    MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(32, 16, 8), random_state=1)
]

X_train, X_test, char_int_encoded_train, char_int_encoded_test, y_train, y_test = train_test_split(np.array([e[0] for e in data['input']]), char_int_encoded, data['label'], random_state = 2)

print(X_train.shape)

# count_vect = CountVectorizer()
# X_train_counts = count_vect.fit_transform(X_train)
# tfidf_transformer = TfidfTransformer()
# X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)

models = [model.fit(X_train, y_train) for model in models]


(6003, 701)

In [44]:
# model evaluation

for model in models:
    y_pred = model.predict(X_test)
    from sklearn.metrics import confusion_matrix
    from sklearn.metrics import accuracy_score

    print(model.__class__.__name__ + ": " + str(accuracy_score(y_test, y_pred)))

    conf_mat = confusion_matrix(y_test, y_pred)

    import matplotlib.pyplot as plt
    import seaborn as sns

    fig, ax = plt.subplots(figsize=(10,10))
    sns.heatmap(conf_mat, annot=True, fmt='d',
                xticklabels=model.classes_, yticklabels=model.classes_)
    plt.ylabel('Actual')
    plt.xlabel('Predicted')
    # plt.show()


RandomForestClassifier: 0.665334665335
RandomForestClassifier: 0.743256743257
RandomForestClassifier: 0.775724275724
RandomForestClassifier: 0.792707292707
RandomForestClassifier: 0.789210789211
RandomForestClassifier: 0.79020979021
LinearSVC: 0.733266733267
LogisticRegression: 0.656343656344
SVC: 0.682317682318
SVC: 0.732267732268
MLPClassifier: 0.762737262737
MLPClassifier: 0.746753246753
MLPClassifier: 0.754245754246
MLPClassifier: 0.742257742258
MLPClassifier: 0.729270729271

In [45]:
# Keras MLP
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils

batch_size = 128
num_classes = len(classes)
epochs = 1000


# encode class values as integers
encoder = LabelEncoder()
encoder.fit(y_train)
encoded_y_train = encoder.transform(y_train)
encoded_y_test = encoder.transform(y_test)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y_train = np_utils.to_categorical(encoded_y_train)
dummy_y_test = np_utils.to_categorical(encoded_y_test)

model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(X_train.shape[1],)))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))

model.summary()

# early stopping criterion
early_stopping = EarlyStopping(monitor='val_acc', patience=20)

model.compile(loss='categorical_crossentropy',
              optimizer=RMSprop(),
              metrics=['accuracy'])

history = model.fit(X_train, dummy_y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_data=(X_test, dummy_y_test),
                    callbacks=[early_stopping])
score = model.evaluate(X_test, dummy_y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_4 (Dense)              (None, 128)               89856     
_________________________________________________________________
dropout_2 (Dropout)          (None, 128)               0         
_________________________________________________________________
dense_5 (Dense)              (None, 5)                 645       
=================================================================
Total params: 90,501
Trainable params: 90,501
Non-trainable params: 0
_________________________________________________________________
Train on 6003 samples, validate on 2002 samples
Epoch 1/1000
6003/6003 [==============================] - 0s - loss: 1.2974 - acc: 0.5332 - val_loss: 1.1266 - val_acc: 0.5195
Epoch 2/1000
6003/6003 [==============================] - 0s - loss: 1.0379 - acc: 0.5579 - val_loss: 1.0213 - val_acc: 0.5509
Epoch 3/1000
6003/6003 [==============================] - 0s - loss: 0.9490 - acc: 0.5994 - val_loss: 0.9472 - val_acc: 0.6049
Epoch 4/1000
6003/6003 [==============================] - 0s - loss: 0.8797 - acc: 0.6483 - val_loss: 0.8846 - val_acc: 0.6508
Epoch 5/1000
6003/6003 [==============================] - 0s - loss: 0.8245 - acc: 0.6835 - val_loss: 0.8370 - val_acc: 0.6933
Epoch 6/1000
6003/6003 [==============================] - 0s - loss: 0.7818 - acc: 0.7091 - val_loss: 0.7996 - val_acc: 0.7133
Epoch 7/1000
6003/6003 [==============================] - 0s - loss: 0.7511 - acc: 0.7223 - val_loss: 0.7723 - val_acc: 0.7133
Epoch 8/1000
6003/6003 [==============================] - 0s - loss: 0.7244 - acc: 0.7333 - val_loss: 0.7525 - val_acc: 0.7263
Epoch 9/1000
6003/6003 [==============================] - 0s - loss: 0.7051 - acc: 0.7393 - val_loss: 0.7328 - val_acc: 0.7243
Epoch 10/1000
6003/6003 [==============================] - 0s - loss: 0.6902 - acc: 0.7448 - val_loss: 0.7207 - val_acc: 0.7288
Epoch 11/1000
6003/6003 [==============================] - 0s - loss: 0.6747 - acc: 0.7466 - val_loss: 0.7088 - val_acc: 0.7328
Epoch 12/1000
6003/6003 [==============================] - 0s - loss: 0.6635 - acc: 0.7518 - val_loss: 0.6981 - val_acc: 0.7368
Epoch 13/1000
6003/6003 [==============================] - 0s - loss: 0.6510 - acc: 0.7588 - val_loss: 0.6909 - val_acc: 0.7368
Epoch 14/1000
6003/6003 [==============================] - 0s - loss: 0.6407 - acc: 0.7643 - val_loss: 0.6829 - val_acc: 0.7398
Epoch 15/1000
6003/6003 [==============================] - 0s - loss: 0.6324 - acc: 0.7611 - val_loss: 0.6753 - val_acc: 0.7478
Epoch 16/1000
6003/6003 [==============================] - 0s - loss: 0.6227 - acc: 0.7643 - val_loss: 0.6702 - val_acc: 0.7478
Epoch 17/1000
6003/6003 [==============================] - 0s - loss: 0.6157 - acc: 0.7674 - val_loss: 0.6659 - val_acc: 0.7448
Epoch 18/1000
6003/6003 [==============================] - 0s - loss: 0.6065 - acc: 0.7724 - val_loss: 0.6585 - val_acc: 0.7502
Epoch 19/1000
6003/6003 [==============================] - 0s - loss: 0.5985 - acc: 0.7753 - val_loss: 0.6555 - val_acc: 0.7483
Epoch 20/1000
6003/6003 [==============================] - 0s - loss: 0.5931 - acc: 0.7764 - val_loss: 0.6487 - val_acc: 0.7493
Epoch 21/1000
6003/6003 [==============================] - 0s - loss: 0.5881 - acc: 0.7766 - val_loss: 0.6466 - val_acc: 0.7493
Epoch 22/1000
6003/6003 [==============================] - 0s - loss: 0.5810 - acc: 0.7776 - val_loss: 0.6403 - val_acc: 0.7532
Epoch 23/1000
6003/6003 [==============================] - 0s - loss: 0.5739 - acc: 0.7806 - val_loss: 0.6368 - val_acc: 0.7542
Epoch 24/1000
6003/6003 [==============================] - 0s - loss: 0.5670 - acc: 0.7851 - val_loss: 0.6348 - val_acc: 0.7537
Epoch 25/1000
6003/6003 [==============================] - 0s - loss: 0.5629 - acc: 0.7858 - val_loss: 0.6332 - val_acc: 0.7562
Epoch 26/1000
6003/6003 [==============================] - 0s - loss: 0.5586 - acc: 0.7879 - val_loss: 0.6284 - val_acc: 0.7562
Epoch 27/1000
6003/6003 [==============================] - 0s - loss: 0.5533 - acc: 0.7896 - val_loss: 0.6299 - val_acc: 0.7587
Epoch 28/1000
6003/6003 [==============================] - 0s - loss: 0.5471 - acc: 0.7941 - val_loss: 0.6229 - val_acc: 0.7572
Epoch 29/1000
6003/6003 [==============================] - 0s - loss: 0.5442 - acc: 0.7894 - val_loss: 0.6212 - val_acc: 0.7552
Epoch 30/1000
6003/6003 [==============================] - 0s - loss: 0.5380 - acc: 0.7946 - val_loss: 0.6218 - val_acc: 0.7547
Epoch 31/1000
6003/6003 [==============================] - 0s - loss: 0.5353 - acc: 0.7939 - val_loss: 0.6185 - val_acc: 0.7592
Epoch 32/1000
6003/6003 [==============================] - 0s - loss: 0.5339 - acc: 0.7958 - val_loss: 0.6162 - val_acc: 0.7587
Epoch 33/1000
6003/6003 [==============================] - 0s - loss: 0.5297 - acc: 0.7953 - val_loss: 0.6157 - val_acc: 0.7592
Epoch 34/1000
6003/6003 [==============================] - 0s - loss: 0.5244 - acc: 0.7966 - val_loss: 0.6140 - val_acc: 0.7627
Epoch 35/1000
6003/6003 [==============================] - 0s - loss: 0.5213 - acc: 0.8011 - val_loss: 0.6125 - val_acc: 0.7562
Epoch 36/1000
6003/6003 [==============================] - 0s - loss: 0.5183 - acc: 0.8016 - val_loss: 0.6127 - val_acc: 0.7592
Epoch 37/1000
6003/6003 [==============================] - 0s - loss: 0.5150 - acc: 0.8011 - val_loss: 0.6121 - val_acc: 0.7622
Epoch 38/1000
6003/6003 [==============================] - 0s - loss: 0.5104 - acc: 0.8014 - val_loss: 0.6098 - val_acc: 0.7567
Epoch 39/1000
6003/6003 [==============================] - 0s - loss: 0.5110 - acc: 0.8026 - val_loss: 0.6095 - val_acc: 0.7597
Epoch 40/1000
6003/6003 [==============================] - 0s - loss: 0.5079 - acc: 0.8064 - val_loss: 0.6075 - val_acc: 0.7637
Epoch 41/1000
6003/6003 [==============================] - 0s - loss: 0.5037 - acc: 0.8058 - val_loss: 0.6072 - val_acc: 0.7612
Epoch 42/1000
6003/6003 [==============================] - 0s - loss: 0.5017 - acc: 0.8074 - val_loss: 0.6077 - val_acc: 0.7647
Epoch 43/1000
6003/6003 [==============================] - 0s - loss: 0.4997 - acc: 0.8083 - val_loss: 0.6076 - val_acc: 0.7627
Epoch 44/1000
6003/6003 [==============================] - 0s - loss: 0.4958 - acc: 0.8069 - val_loss: 0.6086 - val_acc: 0.7647
Epoch 45/1000
6003/6003 [==============================] - 0s - loss: 0.4953 - acc: 0.8103 - val_loss: 0.6043 - val_acc: 0.7627
Epoch 46/1000
6003/6003 [==============================] - 0s - loss: 0.4917 - acc: 0.8134 - val_loss: 0.6062 - val_acc: 0.7652
Epoch 47/1000
6003/6003 [==============================] - 0s - loss: 0.4900 - acc: 0.8096 - val_loss: 0.6039 - val_acc: 0.7687
Epoch 48/1000
6003/6003 [==============================] - 0s - loss: 0.4898 - acc: 0.8088 - val_loss: 0.6054 - val_acc: 0.7662
Epoch 49/1000
6003/6003 [==============================] - 0s - loss: 0.4855 - acc: 0.8131 - val_loss: 0.6031 - val_acc: 0.7667
Epoch 50/1000
6003/6003 [==============================] - 0s - loss: 0.4858 - acc: 0.8161 - val_loss: 0.6044 - val_acc: 0.7662
Epoch 51/1000
6003/6003 [==============================] - 0s - loss: 0.4822 - acc: 0.8116 - val_loss: 0.6034 - val_acc: 0.7687
Epoch 52/1000
6003/6003 [==============================] - 0s - loss: 0.4811 - acc: 0.8134 - val_loss: 0.6018 - val_acc: 0.7672
Epoch 53/1000
6003/6003 [==============================] - 0s - loss: 0.4805 - acc: 0.8133 - val_loss: 0.6053 - val_acc: 0.7697
Epoch 54/1000
6003/6003 [==============================] - 0s - loss: 0.4757 - acc: 0.8183 - val_loss: 0.6075 - val_acc: 0.7632
Epoch 55/1000
6003/6003 [==============================] - 0s - loss: 0.4767 - acc: 0.8181 - val_loss: 0.6063 - val_acc: 0.7682
Epoch 56/1000
6003/6003 [==============================] - 0s - loss: 0.4754 - acc: 0.8171 - val_loss: 0.6015 - val_acc: 0.7692
Epoch 57/1000
6003/6003 [==============================] - 0s - loss: 0.4706 - acc: 0.8198 - val_loss: 0.6040 - val_acc: 0.7682
Epoch 58/1000
6003/6003 [==============================] - 0s - loss: 0.4697 - acc: 0.8181 - val_loss: 0.6096 - val_acc: 0.7612
Epoch 59/1000
6003/6003 [==============================] - 0s - loss: 0.4666 - acc: 0.8213 - val_loss: 0.6041 - val_acc: 0.7702
Epoch 60/1000
6003/6003 [==============================] - 0s - loss: 0.4670 - acc: 0.8204 - val_loss: 0.6093 - val_acc: 0.7617
Epoch 61/1000
6003/6003 [==============================] - 0s - loss: 0.4643 - acc: 0.8214 - val_loss: 0.6045 - val_acc: 0.7732
Epoch 62/1000
6003/6003 [==============================] - 0s - loss: 0.4623 - acc: 0.8204 - val_loss: 0.6027 - val_acc: 0.7702
Epoch 63/1000
6003/6003 [==============================] - 0s - loss: 0.4600 - acc: 0.8263 - val_loss: 0.6030 - val_acc: 0.7717
Epoch 64/1000
6003/6003 [==============================] - 0s - loss: 0.4590 - acc: 0.8254 - val_loss: 0.6050 - val_acc: 0.7692
Epoch 65/1000
6003/6003 [==============================] - 0s - loss: 0.4576 - acc: 0.8241 - val_loss: 0.6046 - val_acc: 0.7682
Epoch 66/1000
6003/6003 [==============================] - 0s - loss: 0.4582 - acc: 0.8219 - val_loss: 0.6054 - val_acc: 0.7722
Epoch 67/1000
6003/6003 [==============================] - 0s - loss: 0.4555 - acc: 0.8263 - val_loss: 0.6049 - val_acc: 0.7687
Epoch 68/1000
6003/6003 [==============================] - 0s - loss: 0.4517 - acc: 0.8248 - val_loss: 0.6076 - val_acc: 0.7647
Epoch 69/1000
6003/6003 [==============================] - 0s - loss: 0.4489 - acc: 0.8249 - val_loss: 0.6053 - val_acc: 0.7712
Epoch 70/1000
6003/6003 [==============================] - 0s - loss: 0.4497 - acc: 0.8256 - val_loss: 0.6050 - val_acc: 0.7727
Epoch 71/1000
6003/6003 [==============================] - 0s - loss: 0.4466 - acc: 0.8298 - val_loss: 0.6062 - val_acc: 0.7702
Epoch 72/1000
6003/6003 [==============================] - 0s - loss: 0.4458 - acc: 0.8281 - val_loss: 0.6045 - val_acc: 0.7722
Epoch 73/1000
6003/6003 [==============================] - 0s - loss: 0.4434 - acc: 0.8271 - val_loss: 0.6074 - val_acc: 0.7672
Epoch 74/1000
6003/6003 [==============================] - 0s - loss: 0.4448 - acc: 0.8304 - val_loss: 0.6122 - val_acc: 0.7632
Epoch 75/1000
6003/6003 [==============================] - 0s - loss: 0.4419 - acc: 0.8298 - val_loss: 0.6059 - val_acc: 0.7742
Epoch 76/1000
6003/6003 [==============================] - 0s - loss: 0.4378 - acc: 0.8354 - val_loss: 0.6052 - val_acc: 0.7732
Epoch 77/1000
6003/6003 [==============================] - 0s - loss: 0.4373 - acc: 0.8318 - val_loss: 0.6049 - val_acc: 0.7712
Epoch 78/1000
6003/6003 [==============================] - 0s - loss: 0.4388 - acc: 0.8314 - val_loss: 0.6101 - val_acc: 0.7682
Epoch 79/1000
6003/6003 [==============================] - 0s - loss: 0.4356 - acc: 0.8309 - val_loss: 0.6053 - val_acc: 0.7757
Epoch 80/1000
6003/6003 [==============================] - 0s - loss: 0.4369 - acc: 0.8339 - val_loss: 0.6061 - val_acc: 0.7737
Epoch 81/1000
6003/6003 [==============================] - 0s - loss: 0.4323 - acc: 0.8311 - val_loss: 0.6084 - val_acc: 0.7732
Epoch 82/1000
6003/6003 [==============================] - 0s - loss: 0.4308 - acc: 0.8359 - val_loss: 0.6093 - val_acc: 0.7687
Epoch 83/1000
6003/6003 [==============================] - 0s - loss: 0.4332 - acc: 0.8326 - val_loss: 0.6062 - val_acc: 0.7757
Epoch 84/1000
6003/6003 [==============================] - 0s - loss: 0.4337 - acc: 0.8329 - val_loss: 0.6072 - val_acc: 0.7747
Epoch 85/1000
6003/6003 [==============================] - 0s - loss: 0.4302 - acc: 0.8356 - val_loss: 0.6104 - val_acc: 0.7702
Epoch 86/1000
6003/6003 [==============================] - 0s - loss: 0.4294 - acc: 0.8354 - val_loss: 0.6098 - val_acc: 0.7742
Epoch 87/1000
6003/6003 [==============================] - 0s - loss: 0.4265 - acc: 0.8357 - val_loss: 0.6123 - val_acc: 0.7732
Epoch 88/1000
6003/6003 [==============================] - 0s - loss: 0.4239 - acc: 0.8374 - val_loss: 0.6077 - val_acc: 0.7772
Epoch 89/1000
6003/6003 [==============================] - 0s - loss: 0.4224 - acc: 0.8396 - val_loss: 0.6082 - val_acc: 0.7747
Epoch 90/1000
6003/6003 [==============================] - 0s - loss: 0.4235 - acc: 0.8351 - val_loss: 0.6120 - val_acc: 0.7737
Epoch 91/1000
6003/6003 [==============================] - 0s - loss: 0.4225 - acc: 0.8367 - val_loss: 0.6105 - val_acc: 0.7742
Epoch 92/1000
6003/6003 [==============================] - 0s - loss: 0.4214 - acc: 0.8394 - val_loss: 0.6106 - val_acc: 0.7767
Epoch 93/1000
6003/6003 [==============================] - 0s - loss: 0.4185 - acc: 0.8411 - val_loss: 0.6114 - val_acc: 0.7762
Epoch 94/1000
6003/6003 [==============================] - 0s - loss: 0.4184 - acc: 0.8391 - val_loss: 0.6149 - val_acc: 0.7697
Epoch 95/1000
6003/6003 [==============================] - 0s - loss: 0.4188 - acc: 0.8379 - val_loss: 0.6117 - val_acc: 0.7757
Epoch 96/1000
6003/6003 [==============================] - 0s - loss: 0.4181 - acc: 0.8431 - val_loss: 0.6139 - val_acc: 0.7757
Epoch 97/1000
6003/6003 [==============================] - 0s - loss: 0.4118 - acc: 0.8437 - val_loss: 0.6127 - val_acc: 0.7717
Epoch 98/1000
6003/6003 [==============================] - 0s - loss: 0.4105 - acc: 0.8429 - val_loss: 0.6162 - val_acc: 0.7772
Epoch 99/1000
6003/6003 [==============================] - 0s - loss: 0.4127 - acc: 0.8429 - val_loss: 0.6156 - val_acc: 0.7757
Epoch 100/1000
6003/6003 [==============================] - 0s - loss: 0.4102 - acc: 0.8414 - val_loss: 0.6146 - val_acc: 0.7772
Epoch 101/1000
6003/6003 [==============================] - 0s - loss: 0.4085 - acc: 0.8487 - val_loss: 0.6177 - val_acc: 0.7752
Epoch 102/1000
6003/6003 [==============================] - 0s - loss: 0.4109 - acc: 0.8426 - val_loss: 0.6151 - val_acc: 0.7767
Epoch 103/1000
6003/6003 [==============================] - 0s - loss: 0.4076 - acc: 0.8444 - val_loss: 0.6152 - val_acc: 0.7762
Epoch 104/1000
6003/6003 [==============================] - 0s - loss: 0.4072 - acc: 0.8446 - val_loss: 0.6216 - val_acc: 0.7712
Epoch 105/1000
6003/6003 [==============================] - 0s - loss: 0.4031 - acc: 0.8447 - val_loss: 0.6165 - val_acc: 0.7782
Epoch 106/1000
6003/6003 [==============================] - 0s - loss: 0.4041 - acc: 0.8499 - val_loss: 0.6212 - val_acc: 0.7742
Epoch 107/1000
6003/6003 [==============================] - 0s - loss: 0.4050 - acc: 0.8469 - val_loss: 0.6173 - val_acc: 0.7777
Epoch 108/1000
6003/6003 [==============================] - 0s - loss: 0.4007 - acc: 0.8481 - val_loss: 0.6173 - val_acc: 0.7742
Epoch 109/1000
6003/6003 [==============================] - 0s - loss: 0.4008 - acc: 0.8507 - val_loss: 0.6185 - val_acc: 0.7762
Epoch 110/1000
6003/6003 [==============================] - 0s - loss: 0.4014 - acc: 0.8487 - val_loss: 0.6240 - val_acc: 0.7697
Epoch 111/1000
6003/6003 [==============================] - 0s - loss: 0.3957 - acc: 0.8479 - val_loss: 0.6229 - val_acc: 0.7692
Epoch 112/1000
6003/6003 [==============================] - 0s - loss: 0.3943 - acc: 0.8484 - val_loss: 0.6208 - val_acc: 0.7777
Epoch 113/1000
6003/6003 [==============================] - 0s - loss: 0.3971 - acc: 0.8509 - val_loss: 0.6214 - val_acc: 0.7737
Epoch 114/1000
6003/6003 [==============================] - 0s - loss: 0.3959 - acc: 0.8511 - val_loss: 0.6219 - val_acc: 0.7742
Epoch 115/1000
6003/6003 [==============================] - 0s - loss: 0.3919 - acc: 0.8507 - val_loss: 0.6232 - val_acc: 0.7752
Epoch 116/1000
6003/6003 [==============================] - 0s - loss: 0.3946 - acc: 0.8511 - val_loss: 0.6290 - val_acc: 0.7707
Epoch 117/1000
6003/6003 [==============================] - 0s - loss: 0.3911 - acc: 0.8534 - val_loss: 0.6246 - val_acc: 0.7747
Epoch 118/1000
6003/6003 [==============================] - 0s - loss: 0.3936 - acc: 0.8502 - val_loss: 0.6231 - val_acc: 0.7747
Epoch 119/1000
6003/6003 [==============================] - 0s - loss: 0.3910 - acc: 0.8512 - val_loss: 0.6228 - val_acc: 0.7757
Epoch 120/1000
6003/6003 [==============================] - 0s - loss: 0.3889 - acc: 0.8517 - val_loss: 0.6230 - val_acc: 0.7747
Epoch 121/1000
6003/6003 [==============================] - 0s - loss: 0.3852 - acc: 0.8569 - val_loss: 0.6299 - val_acc: 0.7727
Epoch 122/1000
6003/6003 [==============================] - 0s - loss: 0.3862 - acc: 0.8577 - val_loss: 0.6289 - val_acc: 0.7752
Epoch 123/1000
6003/6003 [==============================] - 0s - loss: 0.3880 - acc: 0.8506 - val_loss: 0.6272 - val_acc: 0.7742
Epoch 124/1000
6003/6003 [==============================] - 0s - loss: 0.3839 - acc: 0.8546 - val_loss: 0.6328 - val_acc: 0.7672
Epoch 125/1000
6003/6003 [==============================] - 0s - loss: 0.3866 - acc: 0.8549 - val_loss: 0.6363 - val_acc: 0.7682
Epoch 126/1000
6003/6003 [==============================] - 0s - loss: 0.3853 - acc: 0.8556 - val_loss: 0.6301 - val_acc: 0.7777
Test loss: 0.630098594831
Test accuracy: 0.777722277544

In [12]:
# Keras GRU

from keras.models import Sequential
from keras.layers import *
n_symbols = len(tokenizer_char.word_index) + 1
seq_length = 64
embedding_size = 20
batch_size = 32

char_int_encoded_train = keras.preprocessing.sequence.pad_sequences(
            char_int_encoded_train, maxlen=seq_length, dtype='int', truncating='pre')
char_int_encoded_test = keras.preprocessing.sequence.pad_sequences(
            char_int_encoded_test, maxlen=seq_length, dtype='int', truncating='pre')

model = Sequential()
model.add(Embedding(input_dim=n_symbols,
                          output_dim=n_symbols,
                          input_length=seq_length,
                          mask_zero=False, embeddings_initializer='identity', trainable=False))
model.add(GRU(32, dropout=0.0, recurrent_dropout=0.0))
model.add(Dense(16, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

model.summary()

history = model.fit(char_int_encoded_train, encoded_y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_data=(char_int_encoded_test, encoded_y_test),
                    callbacks=[early_stopping])
score = model.evaluate(char_int_encoded_test, encoded_y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_1 (Embedding)      (None, 64, 294)           86436     
_________________________________________________________________
gru_1 (GRU)                  (None, 32)                31392     
_________________________________________________________________
dense_3 (Dense)              (None, 16)                528       
=================================================================
Total params: 118,356
Trainable params: 31,920
Non-trainable params: 86,436
_________________________________________________________________
Train on 3003 samples, validate on 1001 samples
Epoch 1/1000
3003/3003 [==============================] - 3s - loss: 1.8205 - acc: 0.4276 - val_loss: 1.1439 - val_acc: 0.5075
Epoch 2/1000
3003/3003 [==============================] - 3s - loss: 1.1391 - acc: 0.5135 - val_loss: 1.1232 - val_acc: 0.5075
Epoch 3/1000
3003/3003 [==============================] - 3s - loss: 1.1234 - acc: 0.5198 - val_loss: 1.1061 - val_acc: 0.5155
Epoch 4/1000
3003/3003 [==============================] - 5s - loss: 1.1079 - acc: 0.5405 - val_loss: 1.0871 - val_acc: 0.5524
Epoch 5/1000
3003/3003 [==============================] - 4s - loss: 1.0878 - acc: 0.5541 - val_loss: 1.0643 - val_acc: 0.5684
Epoch 6/1000
3003/3003 [==============================] - 4s - loss: 1.0627 - acc: 0.5778 - val_loss: 1.0436 - val_acc: 0.5744
Epoch 7/1000
3003/3003 [==============================] - 4s - loss: 1.0364 - acc: 0.5818 - val_loss: 1.0249 - val_acc: 0.5914
Epoch 8/1000
3003/3003 [==============================] - 4s - loss: 1.0035 - acc: 0.6021 - val_loss: 1.0029 - val_acc: 0.5904
Epoch 9/1000
3003/3003 [==============================] - 4s - loss: 0.9791 - acc: 0.6277 - val_loss: 0.9904 - val_acc: 0.6004
Epoch 10/1000
3003/3003 [==============================] - 5s - loss: 0.9563 - acc: 0.6347 - val_loss: 0.9787 - val_acc: 0.6044
Epoch 11/1000
3003/3003 [==============================] - 5s - loss: 0.9397 - acc: 0.6424 - val_loss: 0.9834 - val_acc: 0.6094
Epoch 12/1000
3003/3003 [==============================] - 4s - loss: 0.9240 - acc: 0.6507 - val_loss: 0.9586 - val_acc: 0.6204
Epoch 13/1000
3003/3003 [==============================] - 4s - loss: 0.9127 - acc: 0.6573 - val_loss: 0.9566 - val_acc: 0.6264
Epoch 14/1000
3003/3003 [==============================] - 4s - loss: 0.8880 - acc: 0.6673 - val_loss: 0.9487 - val_acc: 0.6294
Epoch 15/1000
3003/3003 [==============================] - 5s - loss: 0.8759 - acc: 0.6687 - val_loss: 0.9512 - val_acc: 0.6314
Epoch 16/1000
3003/3003 [==============================] - 5s - loss: 0.8572 - acc: 0.6773 - val_loss: 0.9450 - val_acc: 0.6274
Epoch 17/1000
3003/3003 [==============================] - 4s - loss: 0.8500 - acc: 0.6820 - val_loss: 0.9372 - val_acc: 0.6354
Epoch 18/1000
3003/3003 [==============================] - 4s - loss: 0.8342 - acc: 0.6923 - val_loss: 0.9333 - val_acc: 0.6454
Epoch 19/1000
3003/3003 [==============================] - 4s - loss: 0.8184 - acc: 0.6936 - val_loss: 0.9434 - val_acc: 0.6384
Epoch 20/1000
3003/3003 [==============================] - 4s - loss: 0.8134 - acc: 0.7003 - val_loss: 0.9419 - val_acc: 0.6214
Epoch 21/1000
3003/3003 [==============================] - 4s - loss: 0.7927 - acc: 0.7070 - val_loss: 0.9415 - val_acc: 0.6464
Epoch 22/1000
3003/3003 [==============================] - 4s - loss: 0.7790 - acc: 0.7106 - val_loss: 0.9557 - val_acc: 0.6494
Epoch 23/1000
3003/3003 [==============================] - 3s - loss: 0.7683 - acc: 0.7169 - val_loss: 0.9377 - val_acc: 0.6314
Epoch 24/1000
3003/3003 [==============================] - 4s - loss: 0.7520 - acc: 0.7189 - val_loss: 0.9358 - val_acc: 0.6344
Epoch 25/1000
3003/3003 [==============================] - 4s - loss: 0.7413 - acc: 0.7219 - val_loss: 0.9399 - val_acc: 0.6414
Epoch 26/1000
3003/3003 [==============================] - 3s - loss: 0.7305 - acc: 0.7253 - val_loss: 0.9465 - val_acc: 0.6334
Epoch 27/1000
3003/3003 [==============================] - 4s - loss: 0.7255 - acc: 0.7346 - val_loss: 0.9390 - val_acc: 0.6444
Epoch 28/1000
3003/3003 [==============================] - 4s - loss: 0.7040 - acc: 0.7393 - val_loss: 0.9566 - val_acc: 0.6344
Epoch 29/1000
3003/3003 [==============================] - 4s - loss: 0.6957 - acc: 0.7436 - val_loss: 0.9665 - val_acc: 0.6264
Epoch 30/1000
3003/3003 [==============================] - 4s - loss: 0.6865 - acc: 0.7486 - val_loss: 0.9560 - val_acc: 0.6354
Epoch 31/1000
3003/3003 [==============================] - 4s - loss: 0.6702 - acc: 0.7586 - val_loss: 0.9688 - val_acc: 0.6364
Epoch 32/1000
3003/3003 [==============================] - 4s - loss: 0.6605 - acc: 0.7572 - val_loss: 0.9812 - val_acc: 0.6354
Epoch 33/1000
3003/3003 [==============================] - 3s - loss: 0.6568 - acc: 0.7579 - val_loss: 0.9795 - val_acc: 0.6364
Epoch 34/1000
3003/3003 [==============================] - 3s - loss: 0.6361 - acc: 0.7716 - val_loss: 1.0014 - val_acc: 0.6354
Epoch 35/1000
3003/3003 [==============================] - 3s - loss: 0.6268 - acc: 0.7756 - val_loss: 1.0121 - val_acc: 0.6334
Epoch 36/1000
3003/3003 [==============================] - 3s - loss: 0.6162 - acc: 0.7819 - val_loss: 1.0134 - val_acc: 0.6254
Epoch 37/1000
3003/3003 [==============================] - 3s - loss: 0.6025 - acc: 0.7822 - val_loss: 1.0243 - val_acc: 0.6324
Epoch 38/1000
3003/3003 [==============================] - 4s - loss: 0.5877 - acc: 0.7922 - val_loss: 1.0566 - val_acc: 0.6124
Epoch 39/1000
3003/3003 [==============================] - 4s - loss: 0.5802 - acc: 0.7939 - val_loss: 1.0556 - val_acc: 0.6224
Epoch 40/1000
3003/3003 [==============================] - 4s - loss: 0.5736 - acc: 0.7982 - val_loss: 1.0675 - val_acc: 0.6144
Epoch 41/1000
3003/3003 [==============================] - 4s - loss: 0.5552 - acc: 0.8049 - val_loss: 1.0784 - val_acc: 0.6284
Epoch 42/1000
3003/3003 [==============================] - 5s - loss: 0.5436 - acc: 0.8125 - val_loss: 1.1219 - val_acc: 0.6114
Epoch 43/1000
3003/3003 [==============================] - 4s - loss: 0.5324 - acc: 0.8182 - val_loss: 1.1466 - val_acc: 0.5894
Test loss: 1.146633152
Test accuracy: 0.58941058944

In [ ]: