Using pretrained GloVe Embedding


In [1]:
# Based on
# https://github.com/fchollet/deep-learning-with-python-notebooks/blob/master/6.1-using-word-embeddings.ipynb
# https://machinelearningmastery.com/develop-word-embeddings-python-gensim/

In [2]:
import warnings
warnings.filterwarnings('ignore')

In [3]:
%matplotlib inline
%pylab inline


Populating the interactive namespace from numpy and matplotlib

In [4]:
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)


1.8.0

Download data:


In [5]:
import os

imdb_dir = 'C:/Users/olive/Development/data/aclImdb'
train_dir = os.path.join(imdb_dir, 'train')

labels = []
texts = []

for label_type in ['neg', 'pos']:
    dir_name = os.path.join(train_dir, label_type)
    for fname in os.listdir(dir_name):
        if fname[-4:] == '.txt':
            f = open(os.path.join(dir_name, fname), encoding='UTF-8')
            texts.append(f.read())
            f.close()
            if label_type == 'neg':
                labels.append(0)
            else:
                labels.append(1)

In [6]:
len(texts)


Out[6]:
25000

In [7]:
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np

maxlen = 500  # We will cut reviews after 100 words
training_samples = 15000  # We will be training on 200 samples
validation_samples = 10000  # We will be validating on 10000 samples
max_words = 10000  # We will only consider the top 10,000 words in the dataset

tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)

word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))

data = pad_sequences(sequences, maxlen=maxlen)

labels = np.asarray(labels)
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)

# But first, shuffle the data, since we started from data
# where sample are ordered (all negative first, then all positive).
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]


Using TensorFlow backend.
Found 88582 unique tokens.
Shape of data tensor: (25000, 500)
Shape of label tensor: (25000,)

In [8]:
from sklearn.model_selection import train_test_split

In [9]:
x_train, x_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, random_state=42, stratify=labels)

In [10]:
x_train.shape


Out[10]:
(20000, 500)

With pre-defined and fixed embeddings, we can not be better than just guessing


In [11]:
glove_dir = 'C:/Users/olive/Development/data/glove.6B'

embeddings_index = {}
f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'), encoding='UTF-8')
for line in f:
    values = line.split()
    word = values[0]
    coefs = np.asarray(values[1:], dtype='float32')
    embeddings_index[word] = coefs
f.close()

print('Found %s word vectors.' % len(embeddings_index))


Found 400000 word vectors.

In [12]:
embedding_dim = 100

embedding_matrix = np.zeros((max_words, embedding_dim))
for word, i in word_index.items():
    embedding_vector = embeddings_index.get(word)
    if i < max_words:
        if embedding_vector is not None:
            # Words not found in embedding index will be all-zeros.
            embedding_matrix[i] = embedding_vector

In [13]:
from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense

model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

In [14]:
model.layers[0].set_weights([embedding_matrix])
model.layers[0].trainable = False

In [15]:
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['acc'])
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_1 (Embedding)      (None, 500, 100)          1000000   
_________________________________________________________________
flatten_1 (Flatten)          (None, 50000)             0         
_________________________________________________________________
dense_1 (Dense)              (None, 32)                1600032   
_________________________________________________________________
dense_2 (Dense)              (None, 1)                 33        
=================================================================
Total params: 2,600,065
Trainable params: 1,600,065
Non-trainable params: 1,000,000
_________________________________________________________________

In [18]:
batch_size=1000
model.fit(x_train, y_train,
                    epochs=10,
                    batch_size=batch_size,
                    validation_split=0.2)


Train on 16000 samples, validate on 4000 samples
Epoch 1/10
16000/16000 [==============================] - 1s 48us/step - loss: 0.9644 - acc: 0.5029 - val_loss: 0.6932 - val_acc: 0.4945
Epoch 2/10
16000/16000 [==============================] - 0s 24us/step - loss: 0.6931 - acc: 0.5014 - val_loss: 0.6932 - val_acc: 0.4945
Epoch 3/10
16000/16000 [==============================] - 0s 23us/step - loss: 0.6931 - acc: 0.5014 - val_loss: 0.6932 - val_acc: 0.4945
Epoch 4/10
16000/16000 [==============================] - 0s 23us/step - loss: 0.6931 - acc: 0.5014 - val_loss: 0.6932 - val_acc: 0.4945
Epoch 5/10
16000/16000 [==============================] - 0s 23us/step - loss: 0.6931 - acc: 0.5014 - val_loss: 0.6932 - val_acc: 0.4945
Epoch 6/10
16000/16000 [==============================] - 0s 22us/step - loss: 0.6931 - acc: 0.5014 - val_loss: 0.6932 - val_acc: 0.4945
Epoch 7/10
16000/16000 [==============================] - 0s 23us/step - loss: 0.6931 - acc: 0.5014 - val_loss: 0.6932 - val_acc: 0.4945
Epoch 8/10
16000/16000 [==============================] - 0s 25us/step - loss: 0.6931 - acc: 0.5014 - val_loss: 0.6932 - val_acc: 0.4945
Epoch 9/10
16000/16000 [==============================] - 0s 23us/step - loss: 0.6931 - acc: 0.5014 - val_loss: 0.6932 - val_acc: 0.4945
Epoch 10/10
16000/16000 [==============================] - 0s 23us/step - loss: 0.6931 - acc: 0.5014 - val_loss: 0.6932 - val_acc: 0.4945
Out[18]:
<keras.callbacks.History at 0x21de3d3db00>

Embedding trainable, but still pre-set


In [16]:
model.layers[0].trainable = True

In [17]:
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['acc'])
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_1 (Embedding)      (None, 500, 100)          1000000   
_________________________________________________________________
flatten_1 (Flatten)          (None, 50000)             0         
_________________________________________________________________
dense_1 (Dense)              (None, 32)                1600032   
_________________________________________________________________
dense_2 (Dense)              (None, 1)                 33        
=================================================================
Total params: 2,600,065
Trainable params: 2,600,065
Non-trainable params: 0
_________________________________________________________________

In [19]:
batch_size=1000
model.fit(x_train, y_train,
                    epochs=20,
                    batch_size=batch_size,
                    validation_split=0.2)


Train on 16000 samples, validate on 4000 samples
Epoch 1/100
16000/16000 [==============================] - 1s 42us/step - loss: 0.6872 - acc: 0.5368 - val_loss: 0.6789 - val_acc: 0.5817
Epoch 2/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.6610 - acc: 0.6124 - val_loss: 0.6483 - val_acc: 0.6625
Epoch 3/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.6072 - acc: 0.7096 - val_loss: 0.6066 - val_acc: 0.7432
Epoch 4/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.5435 - acc: 0.7864 - val_loss: 0.5704 - val_acc: 0.7510
Epoch 5/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.4832 - acc: 0.8464 - val_loss: 0.5604 - val_acc: 0.7502
Epoch 6/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.4364 - acc: 0.8869 - val_loss: 0.5564 - val_acc: 0.7625
Epoch 7/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.4021 - acc: 0.9178 - val_loss: 0.5374 - val_acc: 0.7835
Epoch 8/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.3759 - acc: 0.9381 - val_loss: 0.5360 - val_acc: 0.7850
Epoch 9/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.3545 - acc: 0.9542 - val_loss: 0.5692 - val_acc: 0.7705
Epoch 10/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.3373 - acc: 0.9657 - val_loss: 0.5695 - val_acc: 0.7745
Epoch 11/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.3234 - acc: 0.9744 - val_loss: 0.5434 - val_acc: 0.7932
Epoch 12/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.3124 - acc: 0.9794 - val_loss: 0.5617 - val_acc: 0.7863
Epoch 13/100
16000/16000 [==============================] - 1s 48us/step - loss: 0.3028 - acc: 0.9818 - val_loss: 0.5695 - val_acc: 0.7833
Epoch 14/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.2945 - acc: 0.9855 - val_loss: 0.5963 - val_acc: 0.7738
Epoch 15/100
16000/16000 [==============================] - 1s 44us/step - loss: 0.2868 - acc: 0.9869 - val_loss: 0.6084 - val_acc: 0.7740
Epoch 16/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.2801 - acc: 0.9880 - val_loss: 0.5977 - val_acc: 0.7805
Epoch 17/100
16000/16000 [==============================] - 1s 44us/step - loss: 0.2743 - acc: 0.9888 - val_loss: 0.6044 - val_acc: 0.7792
Epoch 18/100
16000/16000 [==============================] - 1s 43us/step - loss: 0.2689 - acc: 0.9893 - val_loss: 0.5978 - val_acc: 0.7815
Epoch 19/100
16000/16000 [==============================] - 1s 47us/step - loss: 0.2638 - acc: 0.9896 - val_loss: 0.6237 - val_acc: 0.7785
Epoch 20/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.2592 - acc: 0.9897 - val_loss: 0.6219 - val_acc: 0.7778
Epoch 21/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.2547 - acc: 0.9900 - val_loss: 0.6111 - val_acc: 0.7802
Epoch 22/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.2507 - acc: 0.9902 - val_loss: 0.6323 - val_acc: 0.7755
Epoch 23/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.2469 - acc: 0.9904 - val_loss: 0.6173 - val_acc: 0.7848
Epoch 24/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.2432 - acc: 0.9903 - val_loss: 0.6383 - val_acc: 0.7770
Epoch 25/100
16000/16000 [==============================] - 1s 46us/step - loss: 0.2394 - acc: 0.9905 - val_loss: 0.6247 - val_acc: 0.7840
Epoch 26/100
16000/16000 [==============================] - 1s 41us/step - loss: 0.2358 - acc: 0.9907 - val_loss: 0.6301 - val_acc: 0.7830
Epoch 27/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.2326 - acc: 0.9908 - val_loss: 0.6488 - val_acc: 0.7775
Epoch 28/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.2293 - acc: 0.9909 - val_loss: 0.6674 - val_acc: 0.7778
Epoch 29/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.2260 - acc: 0.9911 - val_loss: 0.6431 - val_acc: 0.7813
Epoch 30/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.2228 - acc: 0.9912 - val_loss: 0.6520 - val_acc: 0.7793
Epoch 31/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.2198 - acc: 0.9913 - val_loss: 0.6335 - val_acc: 0.7862
Epoch 32/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.2170 - acc: 0.9913 - val_loss: 0.6423 - val_acc: 0.7847
Epoch 33/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.2141 - acc: 0.9914 - val_loss: 0.6426 - val_acc: 0.7850
Epoch 34/100
16000/16000 [==============================] - 1s 40us/step - loss: 0.2114 - acc: 0.9914 - val_loss: 0.6552 - val_acc: 0.7815
Epoch 35/100
16000/16000 [==============================] - 1s 46us/step - loss: 0.2087 - acc: 0.9914 - val_loss: 0.6610 - val_acc: 0.7810
Epoch 36/100
16000/16000 [==============================] - 1s 40us/step - loss: 0.2061 - acc: 0.9914 - val_loss: 0.6650 - val_acc: 0.7807
Epoch 37/100
16000/16000 [==============================] - 1s 43us/step - loss: 0.2036 - acc: 0.9915 - val_loss: 0.6651 - val_acc: 0.7815
Epoch 38/100
16000/16000 [==============================] - 1s 48us/step - loss: 0.2012 - acc: 0.9915 - val_loss: 0.6144 - val_acc: 0.7950
Epoch 39/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1989 - acc: 0.9916 - val_loss: 0.6226 - val_acc: 0.7898
Epoch 40/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1963 - acc: 0.9917 - val_loss: 0.6475 - val_acc: 0.7880
Epoch 41/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.1939 - acc: 0.9918 - val_loss: 0.6582 - val_acc: 0.7853
Epoch 42/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1916 - acc: 0.9918 - val_loss: 0.6269 - val_acc: 0.7923
Epoch 43/100
16000/16000 [==============================] - 1s 40us/step - loss: 0.1894 - acc: 0.9918 - val_loss: 0.6391 - val_acc: 0.7890
Epoch 44/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1872 - acc: 0.9918 - val_loss: 0.6819 - val_acc: 0.7823
Epoch 45/100
16000/16000 [==============================] - 1s 40us/step - loss: 0.1849 - acc: 0.9919 - val_loss: 0.6640 - val_acc: 0.7847
Epoch 46/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1828 - acc: 0.9919 - val_loss: 0.6694 - val_acc: 0.7850
Epoch 47/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1807 - acc: 0.9919 - val_loss: 0.6695 - val_acc: 0.7858
Epoch 48/100
16000/16000 [==============================] - 1s 40us/step - loss: 0.1787 - acc: 0.9919 - val_loss: 0.6683 - val_acc: 0.7855
Epoch 49/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1768 - acc: 0.9919 - val_loss: 0.6715 - val_acc: 0.7862
Epoch 50/100
16000/16000 [==============================] - 1s 40us/step - loss: 0.1748 - acc: 0.9919 - val_loss: 0.6796 - val_acc: 0.7843
Epoch 51/100
16000/16000 [==============================] - 1s 45us/step - loss: 0.1729 - acc: 0.9920 - val_loss: 0.6785 - val_acc: 0.7870
Epoch 52/100
16000/16000 [==============================] - 1s 46us/step - loss: 0.1710 - acc: 0.9921 - val_loss: 0.6753 - val_acc: 0.7863
Epoch 53/100
16000/16000 [==============================] - 1s 47us/step - loss: 0.1692 - acc: 0.9921 - val_loss: 0.6741 - val_acc: 0.7870
Epoch 54/100
16000/16000 [==============================] - 1s 43us/step - loss: 0.1674 - acc: 0.9921 - val_loss: 0.6638 - val_acc: 0.7885
Epoch 55/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.1656 - acc: 0.9921 - val_loss: 0.6882 - val_acc: 0.7842
Epoch 56/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1639 - acc: 0.9921 - val_loss: 0.6967 - val_acc: 0.7845
Epoch 57/100
16000/16000 [==============================] - 1s 45us/step - loss: 0.1622 - acc: 0.9921 - val_loss: 0.6450 - val_acc: 0.7937
Epoch 58/100
16000/16000 [==============================] - 1s 40us/step - loss: 0.1606 - acc: 0.9921 - val_loss: 0.6689 - val_acc: 0.7903
Epoch 59/100
16000/16000 [==============================] - 1s 51us/step - loss: 0.1589 - acc: 0.9921 - val_loss: 0.6605 - val_acc: 0.7913
Epoch 60/100
16000/16000 [==============================] - 1s 44us/step - loss: 0.1573 - acc: 0.9921 - val_loss: 0.6617 - val_acc: 0.7925
Epoch 61/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.1556 - acc: 0.9921 - val_loss: 0.6859 - val_acc: 0.7860
Epoch 62/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1541 - acc: 0.9921 - val_loss: 0.6506 - val_acc: 0.7943
Epoch 63/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1525 - acc: 0.9921 - val_loss: 0.6791 - val_acc: 0.7890
Epoch 64/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.1510 - acc: 0.9921 - val_loss: 0.6697 - val_acc: 0.7910
Epoch 65/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1496 - acc: 0.9921 - val_loss: 0.6740 - val_acc: 0.7903
Epoch 66/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.1481 - acc: 0.9921 - val_loss: 0.6063 - val_acc: 0.8025
Epoch 67/100
16000/16000 [==============================] - 1s 40us/step - loss: 0.1466 - acc: 0.9923 - val_loss: 0.6194 - val_acc: 0.8043
Epoch 68/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1451 - acc: 0.9923 - val_loss: 0.6505 - val_acc: 0.7955
Epoch 69/100
16000/16000 [==============================] - 1s 43us/step - loss: 0.1436 - acc: 0.9923 - val_loss: 0.6921 - val_acc: 0.7888
Epoch 70/100
16000/16000 [==============================] - 1s 52us/step - loss: 0.1422 - acc: 0.9923 - val_loss: 0.6612 - val_acc: 0.7955
Epoch 71/100
16000/16000 [==============================] - 1s 44us/step - loss: 0.1409 - acc: 0.9923 - val_loss: 0.6552 - val_acc: 0.7955
Epoch 72/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1395 - acc: 0.9923 - val_loss: 0.6596 - val_acc: 0.7930
Epoch 73/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.1380 - acc: 0.9923 - val_loss: 0.6735 - val_acc: 0.7933
Epoch 74/100
16000/16000 [==============================] - 1s 40us/step - loss: 0.1347 - acc: 0.9923 - val_loss: 0.6492 - val_acc: 0.7998
Epoch 75/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.0960 - acc: 0.9917 - val_loss: 0.5333 - val_acc: 0.8188
Epoch 76/100
16000/16000 [==============================] - 1s 44us/step - loss: 0.0613 - acc: 0.9923 - val_loss: 0.5085 - val_acc: 0.8255
Epoch 77/100
16000/16000 [==============================] - 1s 45us/step - loss: 0.0388 - acc: 0.9951 - val_loss: 0.5118 - val_acc: 0.8268
Epoch 78/100
16000/16000 [==============================] - 1s 46us/step - loss: 0.0271 - acc: 0.9976 - val_loss: 0.5027 - val_acc: 0.8375
Epoch 79/100
16000/16000 [==============================] - 1s 45us/step - loss: 0.0213 - acc: 0.9979 - val_loss: 0.5071 - val_acc: 0.8413
Epoch 80/100
16000/16000 [==============================] - 1s 44us/step - loss: 0.0177 - acc: 0.9983 - val_loss: 0.4935 - val_acc: 0.8400
Epoch 81/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.0122 - acc: 0.9983 - val_loss: 0.5265 - val_acc: 0.8390
Epoch 82/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.0075 - acc: 0.9988 - val_loss: 0.5365 - val_acc: 0.8403
Epoch 83/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.0038 - acc: 0.9995 - val_loss: 0.5224 - val_acc: 0.8410
Epoch 84/100
16000/16000 [==============================] - 1s 38us/step - loss: 0.0027 - acc: 0.9995 - val_loss: 0.5318 - val_acc: 0.8425
Epoch 85/100
16000/16000 [==============================] - 1s 40us/step - loss: 0.0024 - acc: 0.9995 - val_loss: 0.5392 - val_acc: 0.8427
Epoch 86/100
16000/16000 [==============================] - 1s 52us/step - loss: 0.0022 - acc: 0.9995 - val_loss: 0.5504 - val_acc: 0.8413
Epoch 87/100
16000/16000 [==============================] - 1s 43us/step - loss: 0.0020 - acc: 0.9995 - val_loss: 0.5602 - val_acc: 0.8433
Epoch 88/100
16000/16000 [==============================] - 1s 44us/step - loss: 0.0018 - acc: 0.9995 - val_loss: 0.5617 - val_acc: 0.8430
Epoch 89/100
16000/16000 [==============================] - 1s 42us/step - loss: 0.0016 - acc: 0.9995 - val_loss: 0.5696 - val_acc: 0.8457
Epoch 90/100
16000/16000 [==============================] - 1s 45us/step - loss: 0.0015 - acc: 0.9995 - val_loss: 0.5790 - val_acc: 0.8458
Epoch 91/100
16000/16000 [==============================] - 1s 45us/step - loss: 0.0014 - acc: 0.9995 - val_loss: 0.5847 - val_acc: 0.8462
Epoch 92/100
16000/16000 [==============================] - 1s 44us/step - loss: 0.0014 - acc: 0.9995 - val_loss: 0.5987 - val_acc: 0.8452
Epoch 93/100
16000/16000 [==============================] - 1s 41us/step - loss: 0.0013 - acc: 0.9995 - val_loss: 0.6000 - val_acc: 0.8448
Epoch 94/100
16000/16000 [==============================] - 1s 45us/step - loss: 0.0013 - acc: 0.9995 - val_loss: 0.6159 - val_acc: 0.8443
Epoch 95/100
16000/16000 [==============================] - 1s 42us/step - loss: 0.0012 - acc: 0.9995 - val_loss: 0.6180 - val_acc: 0.8452
Epoch 96/100
16000/16000 [==============================] - 1s 43us/step - loss: 0.0012 - acc: 0.9995 - val_loss: 0.6153 - val_acc: 0.8462
Epoch 97/100
16000/16000 [==============================] - 1s 40us/step - loss: 0.0012 - acc: 0.9995 - val_loss: 0.6228 - val_acc: 0.8450
Epoch 98/100
16000/16000 [==============================] - 1s 39us/step - loss: 0.0011 - acc: 0.9995 - val_loss: 0.6329 - val_acc: 0.8442
Epoch 99/100
16000/16000 [==============================] - 1s 40us/step - loss: 0.0011 - acc: 0.9995 - val_loss: 0.6409 - val_acc: 0.8428
Epoch 100/100
16000/16000 [==============================] - 1s 40us/step - loss: 0.0011 - acc: 0.9995 - val_loss: 0.6398 - val_acc: 0.8438
Out[19]:
<keras.callbacks.History at 0x23800b68cc0>

Embeddings trained from scratch


In [20]:
from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense

model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['acc'])
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_2 (Embedding)      (None, 500, 100)          1000000   
_________________________________________________________________
flatten_2 (Flatten)          (None, 50000)             0         
_________________________________________________________________
dense_3 (Dense)              (None, 32)                1600032   
_________________________________________________________________
dense_4 (Dense)              (None, 1)                 33        
=================================================================
Total params: 2,600,065
Trainable params: 2,600,065
Non-trainable params: 0
_________________________________________________________________

In [21]:
batch_size=1000
model.fit(x_train, y_train,
                    epochs=10,
                    batch_size=batch_size,
                    validation_split=0.2)


Train on 16000 samples, validate on 4000 samples
Epoch 1/10
16000/16000 [==============================] - 1s 60us/step - loss: 0.7110 - acc: 0.5105 - val_loss: 0.6878 - val_acc: 0.5138
Epoch 2/10
16000/16000 [==============================] - 1s 38us/step - loss: 0.6634 - acc: 0.5683 - val_loss: 0.6735 - val_acc: 0.5337
Epoch 3/10
16000/16000 [==============================] - 1s 39us/step - loss: 0.6034 - acc: 0.6673 - val_loss: 0.5625 - val_acc: 0.7602
Epoch 4/10
16000/16000 [==============================] - 1s 41us/step - loss: 0.4082 - acc: 0.8406 - val_loss: 0.3716 - val_acc: 0.8425
Epoch 5/10
16000/16000 [==============================] - 1s 46us/step - loss: 0.2298 - acc: 0.9159 - val_loss: 0.2989 - val_acc: 0.8720
Epoch 6/10
16000/16000 [==============================] - 1s 38us/step - loss: 0.1364 - acc: 0.9556 - val_loss: 0.2882 - val_acc: 0.8760
Epoch 7/10
16000/16000 [==============================] - 1s 37us/step - loss: 0.0784 - acc: 0.9809 - val_loss: 0.2911 - val_acc: 0.8812
Epoch 8/10
16000/16000 [==============================] - 1s 41us/step - loss: 0.0419 - acc: 0.9928 - val_loss: 0.3028 - val_acc: 0.8802
Epoch 9/10
16000/16000 [==============================] - 1s 38us/step - loss: 0.0197 - acc: 0.9984 - val_loss: 0.3207 - val_acc: 0.8788
Epoch 10/10
16000/16000 [==============================] - 1s 39us/step - loss: 0.0100 - acc: 0.9996 - val_loss: 0.3380 - val_acc: 0.8787
Out[21]:
<keras.callbacks.History at 0x238232c1898>

In [23]:
train_loss, train_accuracy = model.evaluate(x_train, y_train, batch_size=batch_size)
train_accuracy


20000/20000 [==============================] - 0s 17us/step
Out[23]:
0.9756000012159347

In [24]:
test_loss, test_accuracy = model.evaluate(x_test, y_test, batch_size=batch_size)
test_accuracy


5000/5000 [==============================] - 0s 16us/step
Out[24]:
0.8834000110626221

In [ ]: