In [1]:
from __future__ import division, print_function
%matplotlib inline
from importlib import reload # Python 3
import utils; reload(utils)
from utils import *
In [2]:
from keras.layers import TimeDistributed, Activation
from numpy.random import choice
We haven't really looked into the detail of how this works yet - so this is provided for self-study for those who are interested. We'll look at it closely next week.
In [3]:
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
In [4]:
!tail -n 25 {path}
In [5]:
chars = sorted(list(set(text)))
vocab_size = len(chars)+1
print('total chars:', vocab_size)
In [6]:
chars.insert(0, "\0")
In [7]:
''.join(chars[1:-6])
Out[7]:
In [8]:
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
In [9]:
idx = [char_indices[c] for c in text]
In [10]:
idx[:10]
Out[10]:
In [11]:
''.join(indices_char[i] for i in idx[:70])
Out[11]:
In [12]:
maxlen = 40
sentences = []
next_chars = []
for i in range(0, len(idx) - maxlen+1):
sentences.append(idx[i: i + maxlen])
next_chars.append(idx[i+1: i+maxlen+1])
print('nb sequences:', len(sentences))
In [13]:
sentences = np.concatenate([[np.array(o)] for o in sentences[:-2]])
next_chars = np.concatenate([[np.array(o)] for o in next_chars[:-2]])
In [14]:
sentences.shape, next_chars.shape
Out[14]:
In [15]:
n_fac = 24
In [16]:
model=Sequential([
Embedding(vocab_size, n_fac, input_length=maxlen),
LSTM(units=512, input_shape=(n_fac,),return_sequences=True, dropout=0.2, recurrent_dropout=0.2,
implementation=2),
Dropout(0.2),
LSTM(512, return_sequences=True, dropout=0.2, recurrent_dropout=0.2,
implementation=2),
Dropout(0.2),
TimeDistributed(Dense(vocab_size)),
Activation('softmax')
])
In [17]:
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
In [18]:
def print_example():
seed_string="ethics is a basic foundation of all that"
for i in range(320):
x=np.array([char_indices[c] for c in seed_string[-40:]])[np.newaxis,:] # [-40] picks up the last 40 chars
preds = model.predict(x, verbose=0)[0][-1] # [-1] picks up the last char
preds = preds/np.sum(preds)
next_char = choice(chars, p=preds)
seed_string = seed_string + next_char
print(seed_string)
In [19]:
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
Out[19]:
In [20]:
print_example()
In [21]:
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
Out[21]:
In [22]:
print_example()
In [23]:
model.optimizer.lr=0.001
In [24]:
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
Out[24]:
In [25]:
print_example()
In [26]:
model.optimizer.lr=0.0001
In [27]:
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
Out[27]:
In [28]:
print_example()
In [29]:
model.save_weights('data/char_rnn.h5')
In [30]:
model.optimizer.lr=0.00001
In [31]:
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
Out[31]:
In [32]:
print_example()
In [33]:
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
Out[33]:
In [34]:
print_example()
In [35]:
print_example()
In [36]:
model.save_weights('data/char_rnn.h5')
In [ ]: