In [1]:
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
In [2]:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 100
epochs = 2
In [3]:
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
In [8]:
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
x_train.shape, x_test.shape
Out[8]:
In [10]:
y_train[0]
Out[10]:
In [5]:
model = Sequential()
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
model.add(Dropout(0.2))
model.add(Conv1D(250,
3,
padding='valid',
activation='relu',
strides=1))
model.add(GlobalMaxPooling1D())
model.add(Dense(250))
model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
In [6]:
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
In [7]:
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
Out[7]:
In [ ]: