Simple embedding and sentiment analysis

Inspired by: https://machinelearningmastery.com/use-word-embedding-layers-deep-learning-keras/


In [25]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import tensorflow as tf
# for tf < version2, delete "tensorflow."
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.utils import plot_model
from tensorflow.keras.layers import Embedding
from tensorflow.keras.preprocessing.text import one_hot
from tensorflow.keras.preprocessing.sequence import pad_sequences

print(tf.__version__)
print(tf.keras.__version__)

docs = ['Well done!',
        'Good work',
        'Great effort',
        'Nice work',
        'Excellent!',
        'Wow!',
        'Weak',
        'Poor effort!',
        'Not good',
        'Poor work',
        'Could have done better.',
        'Very bad!']
# define class labels
labels = np.array([1,1,1,1,1,1,0,0,0,0,0,0])
# integer encode the documents
vocab_size = 50
encoded_docs = [one_hot(d, vocab_size) for d in docs]
print(encoded_docs)
# pad documents to a max length of 4 words
max_length = 4
padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
print(padded_docs)


2.0.0-alpha0
2.2.4-tf
[[12, 1], [41, 18], [39, 41], [6, 18], [49], [48], [30], [42, 41], [32, 41], [42, 18], [1, 13, 1, 29], [34, 7]]
[[12  1  0  0]
 [41 18  0  0]
 [39 41  0  0]
 [ 6 18  0  0]
 [49  0  0  0]
 [48  0  0  0]
 [30  0  0  0]
 [42 41  0  0]
 [32 41  0  0]
 [42 18  0  0]
 [ 1 13  1 29]
 [34  7  0  0]]

In [26]:
# define the model
model = Sequential()
model.add(Embedding(vocab_size, 8, input_length=max_length))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
# compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
# summarize the model
print(model.summary())


Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_1 (Embedding)      (None, 4, 8)              400       
_________________________________________________________________
flatten_1 (Flatten)          (None, 32)                0         
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 33        
=================================================================
Total params: 433
Trainable params: 433
Non-trainable params: 0
_________________________________________________________________
None

In [33]:
# fit the model
model.fit(padded_docs, labels, epochs=200, verbose=0)
# evaluate the model
loss, accuracy = model.evaluate(padded_docs, labels, verbose=0)
print('Accuracy: %f' % (accuracy*100))


Accuracy: 100.000000

In [39]:
test = ['Great job!']
encoded_test = [one_hot(d, vocab_size) for d in test]
print(encoded_test)
padded_test = pad_sequences(encoded_test, maxlen=max_length, padding='post')


[[39, 14]]

In [40]:
model.predict(padded_test)


Out[40]:
array([[0.823561]], dtype=float32)

In [41]:
test = ['This is bad!']
encoded_test = [one_hot(d, vocab_size) for d in test]
print(encoded_test)
padded_test = pad_sequences(encoded_test, maxlen=max_length, padding='post')
model.predict(padded_test)


[[8, 13, 7]]
Out[41]:
array([[0.29199824]], dtype=float32)

In [ ]: