see https://github.com/fchollet/keras/blob/master/examples/imdb_bidirectional_lstm.py
In [1]:
WEIGHTS_FILEPATH = 'imdb_bidirectional_lstm.hdf5'
MODEL_ARCH_FILEPATH = 'imdb_bidirectional_lstm.json'
In [2]:
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, LSTM, Input, Bidirectional
from keras.datasets import imdb
from keras.callbacks import EarlyStopping, ModelCheckpoint
import json
In [3]:
max_features = 20000
maxlen = 200 # cut texts after this number of words (among top max_features most common words)
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
y_train = np.array(y_train)
y_test = np.array(y_test)
In [6]:
model = Sequential()
model.add(Embedding(max_features, 64, input_length=maxlen))
model.add(Bidirectional(LSTM(32)))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
# try using different optimizers and different optimizer configs
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
In [7]:
# Model saving callback
checkpointer = ModelCheckpoint(filepath=WEIGHTS_FILEPATH, monitor='val_acc', verbose=1, save_best_only=True)
# Early stopping
early_stopping = EarlyStopping(monitor='val_acc', verbose=1, patience=2)
# train
batch_size = 128
epochs = 10
model.fit(X_train, y_train,
validation_data=[X_test, y_test],
batch_size=batch_size, epochs=epochs, verbose=2,
callbacks=[checkpointer, early_stopping])
Out[7]:
In [8]:
with open(MODEL_ARCH_FILEPATH, 'w') as f:
f.write(model.to_json())
sample data
In [9]:
word_index = imdb.get_word_index()
In [10]:
word_dict = {idx: word for word, idx in word_index.items()}
In [11]:
sample = []
for idx in X_train[0]:
if idx >= 3:
sample.append(word_dict[idx-3])
elif idx == 2:
sample.append('-')
' '.join(sample)
Out[11]:
In [12]:
with open('imdb_dataset_word_index_top20000.json', 'w') as f:
f.write(json.dumps({word: idx for word, idx in word_index.items() if idx < max_features}))
In [13]:
with open('imdb_dataset_word_dict_top20000.json', 'w') as f:
f.write(json.dumps({idx: word for word, idx in word_index.items() if idx < max_features}))
In [14]:
sample_test_data = []
for i in np.random.choice(range(X_test.shape[0]), size=1000, replace=False):
sample_test_data.append({'values': X_test[i].tolist(), 'label': y_test[i].tolist()})
with open('imdb_dataset_test.json', 'w') as f:
f.write(json.dumps(sample_test_data))
In [ ]: