In [1]:
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb


Using TensorFlow backend.

In [2]:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 100
epochs = 2

In [3]:
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)

In [4]:
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)

x_train.shape, x_test.shape


Out[4]:
((25000, 400), (25000, 400))

In [5]:
y_train[0]


Out[5]:
1

In [6]:
model = Sequential()
model.add(Embedding(max_features,
                    embedding_dims,
                    input_length=maxlen))
model.add(Dropout(0.2))
model.add(Conv1D(250,
                 3,
                 padding='valid',
                 activation='relu',
                 strides=1))
model.add(GlobalMaxPooling1D())
model.add(Dense(250))
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))

In [7]:
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

In [8]:
model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=epochs,
          validation_data=(x_test, y_test))


Train on 25000 samples, validate on 25000 samples
Epoch 1/2
25000/25000 [==============================] - 22s - loss: 0.3874 - acc: 0.8150 - val_loss: 0.2778 - val_acc: 0.8827
Epoch 2/2
25000/25000 [==============================] - 20s - loss: 0.2182 - acc: 0.9148 - val_loss: 0.2670 - val_acc: 0.8885
Out[8]:
<keras.callbacks.History at 0x7f6cdaf8c4a8>

In [ ]: