In [1]:
import numpy as np

from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence


Using TensorFlow backend.

In [2]:
class RNN:
    '''
    RNN classifier
    '''
    def __init__(self, train_x, train_y, test_x,\
                 test_y, embedding_layer=None,\
                 dict_size=5000, example_length=500,\
                 embedding_length=32, epochs=15, batch_size=128):
        '''
        initialize RNN model
        :param train_x: training data
        :param train_y: training label
        :param test_x: test data
        :param test_y: test label
        :param epoches:
        :param batch_size:
        '''
        self.batch_size = batch_size
        self.epochs = epochs
        self.example_len = example_length
        self.dict_size = dict_size
        self.embedding_len = embedding_length

        # TODO:preprocess training data
        self.train_x = sequence.pad_sequences(train_x, maxlen=example_length)
        self.test_x = sequence.pad_sequences(test_x, maxlen=example_length)
        self.train_y = train_y
        self.test_y = test_y

        # TODO:build model
        # create the model
        model = Sequential()
        if embedding_layer == None:
            model.add(Embedding(dict_size, embedding_length, input_length=example_length))
        else:
            model.add(embedding_layer)
        model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
        print(model.summary())
        self.model = model


    def train(self):
        '''
        fit in data and train model
        :return:
        '''
        # TODO: fit in data to train your model
        self.model.fit(self.train_x, self.train_y,\
                validation_data=(self.test_x, self.test_y),\
                       epochs=self.epochs, batch_size=self.batch_size)


    def evaluate(self):
        '''
        evaluate trained model
        :return:
        '''
        return self.model.evaluate(self.test_x, self.test_y)

In [ ]:


In [3]:
dict_size=5000
example_length=500
embedding_length=100
epochs=15
batch_size=128

In [4]:
(train_x, train_y), (test_x, test_y) = imdb.load_data(num_words=dict_size)

In [5]:
import os


GLOVE_DIR = "/Users/brianmckean/Downloads/glove.6B/"
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
for line in f:
    values = line.split()
    word = values[0]
    coefs = np.asarray(values[1:], dtype='float32')
    embeddings_index[word] = coefs
f.close()

print('Found %s word vectors.' % len(embeddings_index))


Found 400000 word vectors.

In [6]:
EMBEDDING_DIM=embedding_length

In [7]:
word_index = imdb.get_word_index()

In [12]:
print("Downloading word2vec model...please give it time!")
url = 'http://nlp.stanford.edu/data/'
filename = maybe_download('glove.6B.zip', url, 862182613)
print("Loading vocab into memory, you should have a few GB of spare memory!")
glove_data = read_data(filename)

train_word_index = imdb.get_word_index()

embeddings_index = {}
for datum in glove_data:
    values = datum.split()
    word = values[0]
    coefs = np.asarray(values[1:], dtype='float32')
    embeddings_index[word] = coefs


Downloading word2vec model...please give it time!
---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-12-0a523c8c7fbc> in <module>()
      1 print("Downloading word2vec model...please give it time!")
      2 url = 'http://nlp.stanford.edu/data/'
----> 3 filename = maybe_download('glove.6B.zip', url, 862182613)
      4 print("Loading vocab into memory, you should have a few GB of spare memory!")
      5 glove_data = read_data(filename)

NameError: name 'maybe_download' is not defined

In [8]:
embedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
    embedding_vector = embeddings_index.get(word)
    if embedding_vector is not None:
        # words not found in embedding index will be all-zeros.
        embedding_matrix[i] = embedding_vector

In [9]:
from keras.layers import Embedding

embedding_layer = Embedding(len(word_index) + 1,
                            EMBEDDING_DIM,
                            weights=[embedding_matrix],
                            input_length=example_length,
                            trainable=False)

In [10]:
(train_x, train_y), (test_x, test_y) = imdb.load_data(num_words=dict_size)



rnn = RNN(train_x, train_y, test_x, test_y, embedding_layer = embedding_layer,\
         dict_size=dict_size, embedding_length=embedding_length, example_length=example_length,\
             epochs=epochs, batch_size=batch_size)
rnn.train()
rnn.evaluate()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding_1 (Embedding)      (None, 500, 100)          8858500   
_________________________________________________________________
lstm_1 (LSTM)                (None, 100)               80400     
_________________________________________________________________
dense_1 (Dense)              (None, 1)                 101       
=================================================================
Total params: 8,939,001
Trainable params: 80,501
Non-trainable params: 8,858,500
_________________________________________________________________
None
Train on 25000 samples, validate on 25000 samples
Epoch 1/15
 4736/25000 [====>.........................] - ETA: 1473s - loss: 0.6935 - acc: 0.5158
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-10-e9a891ff4e95> in <module>()
      5 
      6 rnn = RNN(train_x, train_y, test_x, test_y, embedding_layer = embedding_layer,         dict_size=dict_size, embedding_length=embedding_length, example_length=example_length,             epochs=epochs, batch_size=batch_size)
----> 7 rnn.train()
      8 rnn.evaluate()

<ipython-input-2-195c58cfb106> in train(self)
     45         '''
     46         # TODO: fit in data to train your model
---> 47         self.model.fit(self.train_x, self.train_y,                validation_data=(self.test_x, self.test_y),                       epochs=self.epochs, batch_size=self.batch_size)
     48 
     49 

~/anaconda2/envs/hwenv/lib/python3.6/site-packages/keras/models.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
    861                               class_weight=class_weight,
    862                               sample_weight=sample_weight,
--> 863                               initial_epoch=initial_epoch)
    864 
    865     def evaluate(self, x, y, batch_size=32, verbose=1,

~/anaconda2/envs/hwenv/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
   1428                               val_f=val_f, val_ins=val_ins, shuffle=shuffle,
   1429                               callback_metrics=callback_metrics,
-> 1430                               initial_epoch=initial_epoch)
   1431 
   1432     def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):

~/anaconda2/envs/hwenv/lib/python3.6/site-packages/keras/engine/training.py in _fit_loop(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch)
   1077                 batch_logs['size'] = len(batch_ids)
   1078                 callbacks.on_batch_begin(batch_index, batch_logs)
-> 1079                 outs = f(ins_batch)
   1080                 if not isinstance(outs, list):
   1081                     outs = [outs]

~/anaconda2/envs/hwenv/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2266         updated = session.run(self.outputs + [self.updates_op],
   2267                               feed_dict=feed_dict,
-> 2268                               **self.session_kwargs)
   2269         return updated[:len(self.outputs)]
   2270 

~/anaconda2/envs/hwenv/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    765     try:
    766       result = self._run(None, fetches, feed_dict, options_ptr,
--> 767                          run_metadata_ptr)
    768       if run_metadata:
    769         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

~/anaconda2/envs/hwenv/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    963     if final_fetches or final_targets:
    964       results = self._do_run(handle, final_targets, final_fetches,
--> 965                              feed_dict_string, options, run_metadata)
    966     else:
    967       results = []

~/anaconda2/envs/hwenv/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1013     if handle is None:
   1014       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1015                            target_list, options, run_metadata)
   1016     else:
   1017       return self._do_call(_prun_fn, self._session, handle, feed_dict,

~/anaconda2/envs/hwenv/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1020   def _do_call(self, fn, *args):
   1021     try:
-> 1022       return fn(*args)
   1023     except errors.OpError as e:
   1024       message = compat.as_text(e.message)

~/anaconda2/envs/hwenv/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1002         return tf_session.TF_Run(session, options,
   1003                                  feed_dict, fetch_list, target_list,
-> 1004                                  status, run_metadata)
   1005 
   1006     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [ ]: