In [2]:
import tensorflow as tf
from tensorflow import keras
import numpy as np

print(tf.__version__)


1.13.1

Import wiki dataset


In [3]:
imdb = keras.datasets.imdb

(train_data, train_label),(test_data,test_label) = imdb.load_data(num_words=10000)


Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz
17465344/17464789 [==============================] - 3s 0us/step

The argument num_words=10000 keeps the top 10,000 most frequently occurring words in the training data. The rare words are discarded to keep the size of the data manageable.


In [10]:
print("Train data shape:",train_data.shape)
print("Test data shape:",test_data.shape)
print("Train label :",len(train_label))

print("First Imdb review: ",train_data[0]) ## review data for the first review

## notice the difference in length of 2 reviews
print("length of first and second review:",len(train_data[0])," ",len(test_data[1]))


Train data shape: (25000,)
Test data shape: (25000,)
Train label : 25000
First Imdb review:  [1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25, 100, 43, 838, 112, 50, 670, 2, 9, 35, 480, 284, 5, 150, 4, 172, 112, 167, 2, 336, 385, 39, 4, 172, 4536, 1111, 17, 546, 38, 13, 447, 4, 192, 50, 16, 6, 147, 2025, 19, 14, 22, 4, 1920, 4613, 469, 4, 22, 71, 87, 12, 16, 43, 530, 38, 76, 15, 13, 1247, 4, 22, 17, 515, 17, 12, 16, 626, 18, 2, 5, 62, 386, 12, 8, 316, 8, 106, 5, 4, 2223, 5244, 16, 480, 66, 3785, 33, 4, 130, 12, 16, 38, 619, 5, 25, 124, 51, 36, 135, 48, 25, 1415, 33, 6, 22, 12, 215, 28, 77, 52, 5, 14, 407, 16, 82, 2, 8, 4, 107, 117, 5952, 15, 256, 4, 2, 7, 3766, 5, 723, 36, 71, 43, 530, 476, 26, 400, 317, 46, 7, 4, 2, 1029, 13, 104, 88, 4, 381, 15, 297, 98, 32, 2071, 56, 26, 141, 6, 194, 7486, 18, 4, 226, 22, 21, 134, 476, 26, 480, 5, 144, 30, 5535, 18, 51, 36, 28, 224, 92, 25, 104, 4, 226, 65, 16, 38, 1334, 88, 12, 16, 283, 5, 16, 4472, 113, 103, 32, 15, 16, 5345, 19, 178, 32]
length of first and second review: 218   260

Convert integers to String from the dictonary of words


In [20]:
## A dictionary mapping of a word to a integer index
word_index = imdb.get_word_index()

## The first indices are reserved
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2 ## unknown
word_index["<UNUSED>"] = 3
word_index = {k:(v+3) for k,v in word_index.items()}

reverse_word_index = dict([(value, key) for (key,value) in word_index.items()])

def decode_review(text):
    return ' '.join([reverse_word_index.get(i,'?') for i in text])

In [21]:
decode_review(train_data[0])


Out[21]:
"<START> this film was just brilliant casting location scenery story direction everyone's really suited the part they played and you could just imagine being there robert <UNK> is an amazing actor and now the same being director <UNK> father came from the same scottish island as myself so i loved the fact there was a real connection with this film the witty remarks throughout the film were great it was just brilliant so much that i bought the film as soon as it was released for <UNK> and would recommend it to everyone to watch and the fly fishing was amazing really cried at the end it was so sad and you know what they say if you cry at a film it must have been good and this definitely was also <UNK> to the two little boy's that played the <UNK> of norman and paul they were just brilliant children are often left out of the <UNK> list i think because the stars that play them all grown up are such a big profile for the whole film but these children are amazing and should be praised for what they have done don't you think the whole story was so lovely because it was true and was someone's life after all that was shared with us all"

Preparing the data

we can pad the arrays so they all have the same length, then create an integer tensor of shape max_length * num_reviews. We can use an embedding layer capable of handling this shape as the first layer in our network.

Since the movie reviews must be the same length, we will use the pad_sequences function to standardize the lengths


In [22]:
train_data = keras.preprocessing.sequence.pad_sequences(train_data, 
                                                       value = word_index["<PAD>"],
                                                       padding='post',
                                                       maxlen = 256)

test_data = keras.preprocessing.sequence.pad_sequences(test_data,
                                                      value = word_index["<PAD>"],
                                                      padding = 'post',
                                                      maxlen = 256)

In [23]:
print(len(train_data[0])," ",len(test_data[1]))


256   256

In [24]:
print(train_data[0])


[   1   14   22   16   43  530  973 1622 1385   65  458 4468   66 3941
    4  173   36  256    5   25  100   43  838  112   50  670    2    9
   35  480  284    5  150    4  172  112  167    2  336  385   39    4
  172 4536 1111   17  546   38   13  447    4  192   50   16    6  147
 2025   19   14   22    4 1920 4613  469    4   22   71   87   12   16
   43  530   38   76   15   13 1247    4   22   17  515   17   12   16
  626   18    2    5   62  386   12    8  316    8  106    5    4 2223
 5244   16  480   66 3785   33    4  130   12   16   38  619    5   25
  124   51   36  135   48   25 1415   33    6   22   12  215   28   77
   52    5   14  407   16   82    2    8    4  107  117 5952   15  256
    4    2    7 3766    5  723   36   71   43  530  476   26  400  317
   46    7    4    2 1029   13  104   88    4  381   15  297   98   32
 2071   56   26  141    6  194 7486   18    4  226   22   21  134  476
   26  480    5  144   30 5535   18   51   36   28  224   92   25  104
    4  226   65   16   38 1334   88   12   16  283    5   16 4472  113
  103   32   15   16 5345   19  178   32    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0]

 Building the model


In [27]:
# input shape is the vocabulary count used in the reviews i.e. word count = 10,000

vocab_size = 10000

model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation = tf.nn.relu))
model.add(keras.layers.Dense(1, activation = tf.nn.sigmoid))

model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_2 (Embedding)      (None, None, 16)          160000    
_________________________________________________________________
global_average_pooling1d (Gl (None, 16)                0         
_________________________________________________________________
dense (Dense)                (None, 16)                272       
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 17        
=================================================================
Total params: 160,289
Trainable params: 160,289
Non-trainable params: 0
_________________________________________________________________

In [28]:
### adding the loss function and optimizer

model.compile(optimizer = 'adam',
             loss = 'binary_crossentropy',
             metrics = ['acc'])

In [32]:
### creating a validation data set to test the training accuracy

x_val = train_data[:10000]
partial_x_train = train_data[10000:]

y_val = train_label[:10000]
partial_y_train = train_label[10000:]

Training the model


In [33]:
history = model.fit(partial_x_train,
                    partial_y_train,
                    epochs=40,
                    batch_size=512,
                    validation_data=(x_val, y_val),
                    verbose=1)


Train on 15000 samples, validate on 10000 samples
WARNING:tensorflow:From /anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Epoch 1/40
15000/15000 [==============================] - 4s 295us/sample - loss: 0.6917 - acc: 0.5573 - val_loss: 0.6894 - val_acc: 0.6509
Epoch 2/40
15000/15000 [==============================] - 4s 236us/sample - loss: 0.6849 - acc: 0.7289 - val_loss: 0.6798 - val_acc: 0.7407
Epoch 3/40
15000/15000 [==============================] - 4s 235us/sample - loss: 0.6700 - acc: 0.7575 - val_loss: 0.6614 - val_acc: 0.7565
Epoch 4/40
15000/15000 [==============================] - 4s 240us/sample - loss: 0.6449 - acc: 0.7678 - val_loss: 0.6330 - val_acc: 0.7589
Epoch 5/40
15000/15000 [==============================] - 4s 243us/sample - loss: 0.6088 - acc: 0.7948 - val_loss: 0.5958 - val_acc: 0.7875
Epoch 6/40
15000/15000 [==============================] - 4s 244us/sample - loss: 0.5642 - acc: 0.8169 - val_loss: 0.5535 - val_acc: 0.8072
Epoch 7/40
15000/15000 [==============================] - 4s 246us/sample - loss: 0.5155 - acc: 0.8320 - val_loss: 0.5084 - val_acc: 0.8241
Epoch 8/40
15000/15000 [==============================] - 4s 238us/sample - loss: 0.4678 - acc: 0.8499 - val_loss: 0.4676 - val_acc: 0.8366
Epoch 9/40
15000/15000 [==============================] - 4s 238us/sample - loss: 0.4243 - acc: 0.8648 - val_loss: 0.4317 - val_acc: 0.8472
Epoch 10/40
15000/15000 [==============================] - 4s 244us/sample - loss: 0.3864 - acc: 0.8762 - val_loss: 0.4025 - val_acc: 0.8541
Epoch 11/40
15000/15000 [==============================] - 4s 241us/sample - loss: 0.3547 - acc: 0.8846 - val_loss: 0.3787 - val_acc: 0.8608
Epoch 12/40
15000/15000 [==============================] - 4s 239us/sample - loss: 0.3281 - acc: 0.8915 - val_loss: 0.3604 - val_acc: 0.8640
Epoch 13/40
15000/15000 [==============================] - 4s 244us/sample - loss: 0.3063 - acc: 0.8977 - val_loss: 0.3444 - val_acc: 0.8695
Epoch 14/40
15000/15000 [==============================] - 4s 241us/sample - loss: 0.2866 - acc: 0.9029 - val_loss: 0.3326 - val_acc: 0.8730
Epoch 15/40
15000/15000 [==============================] - 4s 243us/sample - loss: 0.2701 - acc: 0.9074 - val_loss: 0.3230 - val_acc: 0.8752
Epoch 16/40
15000/15000 [==============================] - 4s 245us/sample - loss: 0.2553 - acc: 0.9130 - val_loss: 0.3149 - val_acc: 0.8759
Epoch 17/40
15000/15000 [==============================] - 4s 247us/sample - loss: 0.2415 - acc: 0.9169 - val_loss: 0.3082 - val_acc: 0.8785
Epoch 18/40
15000/15000 [==============================] - 4s 243us/sample - loss: 0.2294 - acc: 0.9220 - val_loss: 0.3026 - val_acc: 0.8813
Epoch 19/40
15000/15000 [==============================] - 4s 247us/sample - loss: 0.2183 - acc: 0.9244 - val_loss: 0.2977 - val_acc: 0.8817
Epoch 20/40
15000/15000 [==============================] - 4s 242us/sample - loss: 0.2084 - acc: 0.9283 - val_loss: 0.2945 - val_acc: 0.8819
Epoch 21/40
15000/15000 [==============================] - 4s 242us/sample - loss: 0.1983 - acc: 0.9331 - val_loss: 0.2916 - val_acc: 0.8824
Epoch 22/40
15000/15000 [==============================] - 4s 255us/sample - loss: 0.1898 - acc: 0.9365 - val_loss: 0.2893 - val_acc: 0.8849
Epoch 23/40
15000/15000 [==============================] - 4s 258us/sample - loss: 0.1812 - acc: 0.9408 - val_loss: 0.2885 - val_acc: 0.8843
Epoch 24/40
15000/15000 [==============================] - 4s 235us/sample - loss: 0.1738 - acc: 0.9443 - val_loss: 0.2870 - val_acc: 0.8846
Epoch 25/40
15000/15000 [==============================] - 4s 245us/sample - loss: 0.1662 - acc: 0.9469 - val_loss: 0.2857 - val_acc: 0.8861
Epoch 26/40
15000/15000 [==============================] - 4s 265us/sample - loss: 0.1594 - acc: 0.9497 - val_loss: 0.2864 - val_acc: 0.8836
Epoch 27/40
15000/15000 [==============================] - 4s 245us/sample - loss: 0.1530 - acc: 0.9521 - val_loss: 0.2860 - val_acc: 0.8844
Epoch 28/40
15000/15000 [==============================] - 4s 252us/sample - loss: 0.1468 - acc: 0.9550 - val_loss: 0.2866 - val_acc: 0.8852
Epoch 29/40
15000/15000 [==============================] - 4s 240us/sample - loss: 0.1414 - acc: 0.9579 - val_loss: 0.2884 - val_acc: 0.8844
Epoch 30/40
15000/15000 [==============================] - 4s 240us/sample - loss: 0.1357 - acc: 0.9594 - val_loss: 0.2878 - val_acc: 0.8858
Epoch 31/40
15000/15000 [==============================] - 4s 243us/sample - loss: 0.1300 - acc: 0.9619 - val_loss: 0.2890 - val_acc: 0.8870
Epoch 32/40
15000/15000 [==============================] - 4s 249us/sample - loss: 0.1249 - acc: 0.9651 - val_loss: 0.2906 - val_acc: 0.8859
Epoch 33/40
15000/15000 [==============================] - 4s 245us/sample - loss: 0.1199 - acc: 0.9664 - val_loss: 0.2929 - val_acc: 0.8851
Epoch 34/40
15000/15000 [==============================] - 4s 246us/sample - loss: 0.1154 - acc: 0.9675 - val_loss: 0.2952 - val_acc: 0.8844
Epoch 35/40
15000/15000 [==============================] - 4s 243us/sample - loss: 0.1113 - acc: 0.9685 - val_loss: 0.2978 - val_acc: 0.8844
Epoch 36/40
15000/15000 [==============================] - 4s 246us/sample - loss: 0.1069 - acc: 0.9708 - val_loss: 0.2996 - val_acc: 0.8843
Epoch 37/40
15000/15000 [==============================] - 4s 244us/sample - loss: 0.1026 - acc: 0.9717 - val_loss: 0.3024 - val_acc: 0.8831
Epoch 38/40
15000/15000 [==============================] - 4s 255us/sample - loss: 0.0987 - acc: 0.9734 - val_loss: 0.3060 - val_acc: 0.8818
Epoch 39/40
15000/15000 [==============================] - 4s 247us/sample - loss: 0.0955 - acc: 0.9745 - val_loss: 0.3098 - val_acc: 0.8810
Epoch 40/40
15000/15000 [==============================] - 4s 238us/sample - loss: 0.0914 - acc: 0.9766 - val_loss: 0.3126 - val_acc: 0.8822

Evaluate the model


In [34]:
results = model.evaluate(test_data, test_label)

print(results)


25000/25000 [==============================] - 1s 29us/sample - loss: 0.3339 - acc: 0.8714
[0.33389624541282653, 0.8714]

Create a graph of accuracy over time


In [36]:
history_dict = history.history

history_dict.keys()


Out[36]:
dict_keys(['loss', 'acc', 'val_loss', 'val_acc'])

In [37]:
import matplotlib.pyplot as plt

acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']

epochs = range(1, len(acc) + 1)

# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

plt.show()


<Figure size 640x480 with 1 Axes>

In [38]:
plt.clf()   # clear figure

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()

plt.show()



In [ ]: