In [6]:
'''Train a LSTM on the IMDB sentiment classification task.
The dataset is actually too small for LSTM to be of any advantage
compared to simpler, much faster methods such as TF-IDF+LogReg.
Notes:
- RNNs are tricky. Choice of batch size is important,
choice of loss and optimizer is critical, etc.
Some configurations won't converge.
- LSTM loss decrease patterns during training can be quite different
from what you see with CNNs/MLPs/etc.
GPU command:
    THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python imdb_lstm.py
'''

from __future__ import print_function
import numpy as np
np.random.seed(1337)  # for reproducibility

from keras.preprocessing import sequence
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.datasets import imdb

max_features = 20000
maxlen = 100  # cut texts after this number of words (among top max_features most common words)
batch_size = 32

print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features,
                                                      test_split=0.2)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')

print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)

print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen))
model.add(LSTM(128))  # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))

# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              class_mode="binary")

print("Train...")
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=3,
          validation_data=(X_test, y_test), show_accuracy=True)
score, acc = model.evaluate(X_test, y_test,
                            batch_size=batch_size,
                            show_accuracy=True)
print('Test score:', score)
print('Test accuracy:', acc)

print('Exporting model to YAML:')
yaml_string = model.to_yaml()
print(yaml_string)
print('Exporting model state:')
model.save_weights('my_model_weights.h5')


Loading data...
20000 train sequences
5000 test sequences
Pad sequences (samples x time)
X_train shape: (20000, 100)
X_test shape: (5000, 100)
Build model...
Train...
Train on 20000 samples, validate on 5000 samples
Epoch 1/3
20000/20000 [==============================] - 832s - loss: 0.4704 - acc: 0.7756 - val_loss: 0.4304 - val_acc: 0.8060
Epoch 2/3
20000/20000 [==============================] - 837s - loss: 0.2697 - acc: 0.8921 - val_loss: 0.3656 - val_acc: 0.8292
Epoch 3/3
20000/20000 [==============================] - 860s - loss: 0.1641 - acc: 0.9401 - val_loss: 0.4020 - val_acc: 0.8388
5000/5000 [==============================] - 27s    
Test score: 0.401985421205
Test accuracy: 0.8388
Exporting model to YAML:
class_mode: binary
layers:
- W_constraint: null
  W_regularizer: null
  activity_regularizer: null
  cache_enabled: true
  init: uniform
  input_dim: 20000
  input_length: 100
  input_shape: !!python/tuple [20000]
  mask_zero: false
  name: Embedding
  output_dim: 128
- {activation: tanh, cache_enabled: true, forget_bias_init: one, go_backwards: false,
  init: glorot_uniform, inner_activation: hard_sigmoid, inner_init: orthogonal, input_dim: 128,
  input_length: null, name: LSTM, output_dim: 128, return_sequences: false, stateful: false}
- {cache_enabled: true, name: Dropout, p: 0.5}
- {W_constraint: null, W_regularizer: null, activation: linear, activity_regularizer: null,
  b_constraint: null, b_regularizer: null, cache_enabled: true, init: glorot_uniform,
  input_dim: null, name: Dense, output_dim: 1}
- {activation: sigmoid, cache_enabled: true, name: Activation}
loss: binary_crossentropy
name: Sequential
optimizer: {beta_1: 0.8999999761581421, beta_2: 0.9990000128746033, epsilon: 1.0e-08,
  lr: 0.0010000000474974513, name: Adam}

/usr/local/lib/python2.7/dist-packages/theano/scan_module/scan_perform_ext.py:133: RuntimeWarning: numpy.ndarray size changed, may indicate binary incompatibility
  from scan_perform.scan_perform import *

In [ ]: