In [1]:
import pandas as pd
import numpy as np
import os
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
In [3]:
from keras.datasets import imdb
max_features = 20000
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
In [5]:
# cut texts after this number of words (among top max_features most common words)
from keras.preprocessing import sequence
maxlen = 80
batch_size = 32
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
In [22]:
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding, Bidirectional, LSTM, TimeDistributed
In [27]:
embedding_size = 150
n_lstm_units = 100
model = Sequential()
model.add(Embedding(max_features+1, embedding_size, mask_zero=True))
model.add(
Bidirectional(LSTM(n_lstm_units, dropout=0.2, recurrent_dropout=0.2))
)
model.add(Dense(1, activation='sigmoid'))
model.summary()
In [28]:
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=4,
validation_split=0.2)
In [ ]: