In [1]:
import numpy as np
import tensorflow as tf
from tensorflow.python.layers.core import Dense
import collections

In [2]:
def build_dataset(words, n_words):
    count = [['GO', 0], ['PAD', 1], ['EOS', 2], ['UNK', 3]]
    count.extend(collections.Counter(words).most_common(n_words - 1))
    dictionary = dict()
    for word, _ in count:
        dictionary[word] = len(dictionary)
    data = list()
    unk_count = 0
    for word in words:
        index = dictionary.get(word, 0)
        if index == 0:
            unk_count += 1
        data.append(index)
    count[0][1] = unk_count
    reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    return data, count, dictionary, reversed_dictionary

In [3]:
with open('data/from', 'r') as fopen:
    text_from = fopen.read().lower().split('\n')
with open('data/to', 'r') as fopen:
    text_to = fopen.read().lower().split('\n')
print('len from: %d, len to: %d'%(len(text_from), len(text_to)))


len from: 796, len to: 796

In [4]:
concat_from = ' '.join(text_from).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)
print('vocab from size: %d'%(vocabulary_size_from))
print('Most common words', count_from[3:10])
print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])


vocab from size: 944
Most common words [['UNK', 3], ('awak', 188), ('saya', 114), ('yang', 62), ('akan', 50), ('ini', 41), ('dan', 38)]
Sample data [430, 16, 41, 4, 390, 4, 24, 798, 933, 20] ['benarkah?', 'di', 'mana', 'awak', 'florida', 'awak', 'dalam', 'divisyen', 'timurlaut?', 'dengan']

In [5]:
concat_to = ' '.join(text_to).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print('vocab to size: %d'%(vocabulary_size_to))
print('Most common words', count_to[3:10])
print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])


vocab to size: 916
Most common words [['UNK', 3], ('saya', 157), ('awak', 103), ('tidak', 53), ('di', 48), ('yang', 48), ('dia', 41)]
Sample data [17, 395, 10, 86, 156, 889, 4, 13, 94, 219] ['sudah', 'jelas?', 'dan', 'sara', 'sebagai', 'bukti', 'saya', 'akan', 'dapatkan', 'tarikh']

In [6]:
GO = dictionary_from['GO']
PAD = dictionary_from['PAD']
EOS = dictionary_from['EOS']
UNK = dictionary_from['UNK']

In [7]:
class Chatbot:
    def __init__(self, size_layer, num_layers, embedded_size, 
                 from_dict_size, to_dict_size, learning_rate, 
                 batch_size, dropout = 0.5, beam_width = 15):
        
        def lstm_cell(reuse=False):
            return tf.nn.rnn_cell.LSTMCell(size_layer, reuse=reuse)
        
        self.X = tf.placeholder(tf.int32, [None, None])
        self.Y = tf.placeholder(tf.int32, [None, None])
        self.X_seq_len = tf.placeholder(tf.int32, [None])
        self.Y_seq_len = tf.placeholder(tf.int32, [None])
        # encoder
        encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))
        encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
        encoder_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell() for _ in range(num_layers)])
        encoder_dropout = tf.contrib.rnn.DropoutWrapper(encoder_cells, output_keep_prob = 0.5)
        self.encoder_out, self.encoder_state = tf.nn.dynamic_rnn(cell = encoder_dropout, 
                                                                 inputs = encoder_embedded, 
                                                                 sequence_length = self.X_seq_len,
                                                                 dtype = tf.float32)
        
        self.encoder_state = tuple(self.encoder_state[-1] for _ in range(num_layers))
        main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])
        decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
        # decoder
        decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))
        decoder_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell() for _ in range(num_layers)])
        dense_layer = Dense(to_dict_size)
        training_helper = tf.contrib.seq2seq.ScheduledEmbeddingTrainingHelper(
                inputs = tf.nn.embedding_lookup(decoder_embeddings, decoder_input),
                sequence_length = self.Y_seq_len,
                embedding = decoder_embeddings,
                sampling_probability = 0.5,
                time_major = False)
        training_decoder = tf.contrib.seq2seq.BasicDecoder(
                cell = decoder_cells,
                helper = training_helper,
                initial_state = self.encoder_state,
                output_layer = dense_layer)
        training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
                decoder = training_decoder,
                impute_finished = True,
                maximum_iterations = tf.reduce_max(self.Y_seq_len))
        predicting_decoder = tf.contrib.seq2seq.BeamSearchDecoder(
                cell = decoder_cells,
                embedding = decoder_embeddings,
                start_tokens = tf.tile(tf.constant([GO], dtype=tf.int32), [batch_size]),
                end_token = EOS,
                initial_state = tf.contrib.seq2seq.tile_batch(self.encoder_state, beam_width),
                beam_width = beam_width,
                output_layer = dense_layer,
                length_penalty_weight = 0.0)
        predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
                decoder = predicting_decoder,
                impute_finished = False,
                maximum_iterations = 2 * tf.reduce_max(self.X_seq_len))
        self.training_logits = training_decoder_output.rnn_output
        self.predicting_ids = predicting_decoder_output.predicted_ids[:, :, 0]
        masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
        self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,
                                                     targets = self.Y,
                                                     weights = masks)
        self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)

In [8]:
size_layer = 256
num_layers = 2
embedded_size = 256
learning_rate = 0.001
batch_size = 32
epoch = 50

In [9]:
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Chatbot(size_layer, num_layers, embedded_size, vocabulary_size_from + 4, 
                vocabulary_size_to + 4, learning_rate, batch_size)
sess.run(tf.global_variables_initializer())


/usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/gradients_impl.py:93: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.
  "Converting sparse IndexedSlices to a dense Tensor of unknown shape. "

In [10]:
def str_idx(corpus, dic):
    X = []
    for i in corpus:
        ints = []
        for k in i.split():
            try:
                ints.append(dic[k])
            except Exception as e:
                print(e)
                ints.append(UNK)
        X.append(ints)
    return X

In [11]:
X = str_idx(text_from, dictionary_from)
Y = str_idx(text_to, dictionary_to)


'minum'
'keberatan,'

In [12]:
def pad_sentence_batch(sentence_batch, pad_int):
    padded_seqs = []
    seq_lens = []
    max_sentence_len = max([len(sentence) for sentence in sentence_batch])
    for sentence in sentence_batch:
        padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
        seq_lens.append(len(sentence))
    return padded_seqs, seq_lens

In [13]:
for i in range(epoch):
    total_loss = 0
    for k in range(0, (len(text_from) // batch_size) * batch_size, batch_size):
        batch_x, seq_x = pad_sentence_batch(X[k: k+batch_size], PAD)
        batch_y, seq_y = pad_sentence_batch(Y[k: k+batch_size], PAD)
        loss, _ = sess.run([model.cost, model.optimizer], feed_dict={model.X:batch_x,
                                                                    model.Y:batch_y,
                                                                    model.X_seq_len:seq_x,
                                                                    model.Y_seq_len:seq_y})
        total_loss += loss
    total_loss /= (len(text_from) // batch_size)
    print('epoch: %d, avg loss: %f'%(i+1, total_loss))


epoch: 1, avg loss: 6.617245
epoch: 2, avg loss: 6.004203
epoch: 3, avg loss: 5.855214
epoch: 4, avg loss: 5.692413
epoch: 5, avg loss: 5.452851
epoch: 6, avg loss: 5.141638
epoch: 7, avg loss: 4.791116
epoch: 8, avg loss: 4.447184
epoch: 9, avg loss: 4.148418
epoch: 10, avg loss: 3.807234
epoch: 11, avg loss: 3.432694
epoch: 12, avg loss: 3.041191
epoch: 13, avg loss: 2.670436
epoch: 14, avg loss: 2.341844
epoch: 15, avg loss: 2.043502
epoch: 16, avg loss: 1.786270
epoch: 17, avg loss: 1.575515
epoch: 18, avg loss: 1.401609
epoch: 19, avg loss: 1.235308
epoch: 20, avg loss: 1.120157
epoch: 21, avg loss: 1.008670
epoch: 22, avg loss: 0.908493
epoch: 23, avg loss: 0.776691
epoch: 24, avg loss: 0.660849
epoch: 25, avg loss: 0.557715
epoch: 26, avg loss: 0.481938
epoch: 27, avg loss: 0.433815
epoch: 28, avg loss: 0.399786
epoch: 29, avg loss: 0.361747
epoch: 30, avg loss: 0.332339
epoch: 31, avg loss: 0.307750
epoch: 32, avg loss: 0.291502
epoch: 33, avg loss: 0.254794
epoch: 34, avg loss: 0.237130
epoch: 35, avg loss: 0.215183
epoch: 36, avg loss: 0.191755
epoch: 37, avg loss: 0.182730
epoch: 38, avg loss: 0.168038
epoch: 39, avg loss: 0.156151
epoch: 40, avg loss: 0.147061
epoch: 41, avg loss: 0.141702
epoch: 42, avg loss: 0.134256
epoch: 43, avg loss: 0.132190
epoch: 44, avg loss: 0.122094
epoch: 45, avg loss: 0.119448
epoch: 46, avg loss: 0.117541
epoch: 47, avg loss: 0.114613
epoch: 48, avg loss: 0.115981
epoch: 49, avg loss: 0.112481
epoch: 50, avg loss: 0.102238

In [14]:
def predict(X, Y, from_dict, to_dict, batch_size): 
    out_indices = sess.run(model.predicting_ids, {model.X: [X] * batch_size,
                                                 model.X_seq_len: [len(X)] * batch_size})[0]
        
    print('FROM')
    print('IN:',[i for i in X])
    print('WORD:', ' '.join([from_dict[i] for i in X]))
    print('\nTO')
    print('OUT:', [i for i in out_indices])
    print('WORD:', ' '.join([to_dict[i] for i in out_indices]))
    print('ACTUAL REPLY:', ' '.join([to_dict[i] for i in Y]))

In [15]:
predict(X[2], Y[2], rev_dictionary_from, rev_dictionary_to, batch_size)


FROM
IN: [4, 24, 798, 933]
WORD: awak dalam divisyen timurlaut?

TO
OUT: [4, 13, 94, 219, 219, 219, 29, 594]
WORD: saya akan dapatkan tarikh tarikh tarikh sini mengetuai,
ACTUAL REPLY: saya akan dapatkan tarikh

In [ ]: