In [1]:
import numpy as np
import tensorflow as tf
import collections


/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [2]:
def build_dataset(words, n_words):
    count = [['GO', 0], ['PAD', 1], ['EOS', 2], ['UNK', 3]]
    count.extend(collections.Counter(words).most_common(n_words - 1))
    dictionary = dict()
    for word, _ in count:
        dictionary[word] = len(dictionary)
    data = list()
    unk_count = 0
    for word in words:
        index = dictionary.get(word, 0)
        if index == 0:
            unk_count += 1
        data.append(index)
    count[0][1] = unk_count
    reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    return data, count, dictionary, reversed_dictionary

In [3]:
with open('data/from', 'r') as fopen:
    text_from = fopen.read().lower().split('\n')
with open('data/to', 'r') as fopen:
    text_to = fopen.read().lower().split('\n')
print('len from: %d, len to: %d'%(len(text_from), len(text_to)))


len from: 796, len to: 796

In [4]:
concat_from = ' '.join(text_from).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)
print('vocab from size: %d'%(vocabulary_size_from))
print('Most common words', count_from[3:10])
print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])


vocab from size: 944
Most common words [['UNK', 3], ('awak', 188), ('saya', 114), ('yang', 62), ('akan', 50), ('ini', 41), ('dan', 38)]
Sample data [580, 16, 42, 4, 609, 4, 24, 600, 660, 20] ['benarkah?', 'di', 'mana', 'awak', 'florida', 'awak', 'dalam', 'divisyen', 'timurlaut?', 'dengan']

In [5]:
concat_to = ' '.join(text_to).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print('vocab to size: %d'%(vocabulary_size_to))
print('Most common words', count_to[3:10])
print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])


vocab to size: 916
Most common words [['UNK', 3], ('saya', 157), ('awak', 103), ('tidak', 53), ('yang', 48), ('di', 48), ('dia', 41)]
Sample data [15, 624, 10, 91, 161, 774, 4, 13, 93, 269] ['sudah', 'jelas?', 'dan', 'sara', 'sebagai', 'bukti', 'saya', 'akan', 'dapatkan', 'tarikh']

In [6]:
GO = dictionary_from['GO']
PAD = dictionary_from['PAD']
EOS = dictionary_from['EOS']
UNK = dictionary_from['UNK']

In [7]:
class Chatbot:
    def __init__(self, size_layer, num_layers, embedded_size, 
                 from_dict_size, to_dict_size, batch_size,
                 grad_clip=5.0, beam_width=5, force_teaching_ratio=0.5):
        
        def lstm_cell(size, reuse=False):
            return tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(),reuse=reuse)
        
        self.X = tf.placeholder(tf.int32, [None, None])
        self.Y = tf.placeholder(tf.int32, [None, None])
        self.X_seq_len = tf.placeholder(tf.int32, [None])
        self.Y_seq_len = tf.placeholder(tf.int32, [None])
        
        encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))
        decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))
        encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
        
        for n in range(num_layers):
            (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
                cell_fw = lstm_cell(size_layer // 2),
                cell_bw = lstm_cell(size_layer // 2),
                inputs = encoder_embedded,
                sequence_length = self.X_seq_len,
                dtype = tf.float32,
                scope = 'bidirectional_rnn_%d'%(n))
            encoder_embedded = tf.concat((out_fw, out_bw), 2)
        bi_state_c = tf.concat((state_fw.c, state_bw.c), -1)
        bi_state_h = tf.concat((state_fw.h, state_bw.h), -1)
        bi_lstm_state = tf.nn.rnn_cell.LSTMStateTuple(c=bi_state_c, h=bi_state_h)
        encoder_state = tuple([bi_lstm_state] * num_layers)
        
        with tf.variable_scope('decode'):
            attention_mechanism = tf.contrib.seq2seq.LuongAttention(
            num_units = size_layer, 
            memory = encoder_embedded,
            memory_sequence_length = self.X_seq_len)
            decoder_cell = tf.contrib.seq2seq.AttentionWrapper(
                cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(size_layer) for _ in range(num_layers)]),
                attention_mechanism = attention_mechanism,
                attention_layer_size = size_layer)
            main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])
            decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
            training_helper = tf.contrib.seq2seq.ScheduledEmbeddingTrainingHelper(
            inputs = tf.nn.embedding_lookup(decoder_embeddings, decoder_input),
                sequence_length = self.Y_seq_len,
                embedding = decoder_embeddings,
                sampling_probability = 1 - force_teaching_ratio,
                time_major = False)
            training_decoder = tf.contrib.seq2seq.BasicDecoder(
                cell = decoder_cell,
                helper = training_helper,
                initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=encoder_state),
                output_layer = tf.layers.Dense(to_dict_size))
            training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
                decoder = training_decoder,
                impute_finished = True,
                maximum_iterations = tf.reduce_max(self.Y_seq_len))
            self.logits = training_decoder_output.rnn_output
            
        with tf.variable_scope('decode', reuse=True):
            encoder_out_tiled = tf.contrib.seq2seq.tile_batch(encoder_embedded, beam_width)
            encoder_state_tiled = tf.contrib.seq2seq.tile_batch(encoder_state, beam_width)
            X_seq_len_tiled = tf.contrib.seq2seq.tile_batch(self.X_seq_len, beam_width)
            attention_mechanism = tf.contrib.seq2seq.LuongAttention(
                num_units = size_layer, 
                memory = encoder_out_tiled,
                memory_sequence_length = X_seq_len_tiled)
            decoder_cell = tf.contrib.seq2seq.AttentionWrapper(
                cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(size_layer, reuse=True) for _ in range(num_layers)]),
                attention_mechanism = attention_mechanism,
                attention_layer_size = size_layer)
            predicting_decoder = tf.contrib.seq2seq.BeamSearchDecoder(
                cell = decoder_cell,
                embedding = decoder_embeddings,
                start_tokens = tf.tile(tf.constant([GO], dtype=tf.int32), [batch_size]),
                end_token = EOS,
                initial_state = decoder_cell.zero_state(batch_size * beam_width, tf.float32).clone(cell_state = encoder_state_tiled),
                beam_width = beam_width,
                output_layer = tf.layers.Dense(to_dict_size, _reuse=True),
                length_penalty_weight = 0.0)
            predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
                decoder = predicting_decoder,
                impute_finished = False,
                maximum_iterations = 2 * tf.reduce_max(self.X_seq_len))
            self.predicting_ids = predicting_decoder_output.predicted_ids[:, :, 0]
        
        masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
        self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.logits,
                                                     targets = self.Y,
                                                     weights = masks)
        params = tf.trainable_variables()
        gradients = tf.gradients(self.cost, params)
        clipped_gradients, _ = tf.clip_by_global_norm(gradients, grad_clip)
        self.optimizer = tf.train.AdamOptimizer().apply_gradients(zip(clipped_gradients, params))

In [8]:
size_layer = 256
num_layers = 2
embedded_size = 256
batch_size = 32
epoch = 50

In [9]:
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Chatbot(size_layer, num_layers, embedded_size, vocabulary_size_from + 4, 
                vocabulary_size_to + 4, batch_size)
sess.run(tf.global_variables_initializer())

In [10]:
def str_idx(corpus, dic):
    X = []
    for i in corpus:
        ints = []
        for k in i.split():
            try:
                ints.append(dic[k])
            except Exception as e:
                print(e)
                ints.append(UNK)
        X.append(ints)
    return X

In [11]:
X = str_idx(text_from, dictionary_from)
Y = str_idx(text_to, dictionary_to)


'besar?'
'cintakan'

In [12]:
def pad_sentence_batch(sentence_batch, pad_int):
    padded_seqs = []
    seq_lens = []
    max_sentence_len = max([len(sentence) for sentence in sentence_batch])
    for sentence in sentence_batch:
        padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
        seq_lens.append(len(sentence))
    return padded_seqs, seq_lens

def check_accuracy(logits, Y):
    acc = 0
    for i in range(logits.shape[0]):
        internal_acc = 0
        for k in range(len(Y[i])):
            if Y[i][k] == logits[i][k]:
                internal_acc += 1
        acc += (internal_acc /= len(Y[i]))
    return acc / logits.shape[0]

In [13]:
for i in range(epoch):
    total_loss, total_accuracy = 0, 0
    for k in range(0, (len(text_from) // batch_size) * batch_size, batch_size):
        batch_x, seq_x = pad_sentence_batch(X[k: k+batch_size], PAD)
        batch_y, seq_y = pad_sentence_batch(Y[k: k+batch_size], PAD)
        predicted, loss, _ = sess.run([model.predicting_ids, model.cost, model.optimizer], feed_dict={model.X:batch_x,
                                                                    model.Y:batch_y,
                                                                    model.X_seq_len:seq_x,
                                                                    model.Y_seq_len:seq_y})
        total_loss += loss
        total_accuracy += check_accuracy(predicted,batch_y)
    total_loss /= (len(text_from) // batch_size)
    total_accuracy /= (len(text_from) // batch_size)
    print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1, total_loss, total_accuracy))


epoch: 1, avg loss: 6.518941, avg accuracy: 0.023171
epoch: 2, avg loss: 6.032852, avg accuracy: 0.027716
epoch: 3, avg loss: 5.831940, avg accuracy: 0.032844
epoch: 4, avg loss: 5.547847, avg accuracy: 0.043707
epoch: 5, avg loss: 5.102346, avg accuracy: 0.058408
epoch: 6, avg loss: 4.445675, avg accuracy: 0.079136
epoch: 7, avg loss: 3.754902, avg accuracy: 0.112624
epoch: 8, avg loss: 3.089773, avg accuracy: 0.160503
epoch: 9, avg loss: 2.539135, avg accuracy: 0.220858
epoch: 10, avg loss: 2.187318, avg accuracy: 0.254960
epoch: 11, avg loss: 1.906706, avg accuracy: 0.287723
epoch: 12, avg loss: 1.579186, avg accuracy: 0.326755
epoch: 13, avg loss: 1.324486, avg accuracy: 0.369048
epoch: 14, avg loss: 1.031026, avg accuracy: 0.417094
epoch: 15, avg loss: 0.824613, avg accuracy: 0.452170
epoch: 16, avg loss: 0.686384, avg accuracy: 0.472997
epoch: 17, avg loss: 0.569146, avg accuracy: 0.494072
epoch: 18, avg loss: 0.479078, avg accuracy: 0.506665
epoch: 19, avg loss: 0.391860, avg accuracy: 0.525676
epoch: 20, avg loss: 0.370656, avg accuracy: 0.526829
epoch: 21, avg loss: 0.325732, avg accuracy: 0.535609
epoch: 22, avg loss: 0.319365, avg accuracy: 0.535807
epoch: 23, avg loss: 0.269184, avg accuracy: 0.544897
epoch: 24, avg loss: 0.222742, avg accuracy: 0.551172
epoch: 25, avg loss: 0.195825, avg accuracy: 0.557025
epoch: 26, avg loss: 0.146952, avg accuracy: 0.562457
epoch: 27, avg loss: 0.125353, avg accuracy: 0.566059
epoch: 28, avg loss: 0.108545, avg accuracy: 0.568062
epoch: 29, avg loss: 0.102070, avg accuracy: 0.568651
epoch: 30, avg loss: 0.099154, avg accuracy: 0.570660
epoch: 31, avg loss: 0.096500, avg accuracy: 0.570474
epoch: 32, avg loss: 0.094787, avg accuracy: 0.570412
epoch: 33, avg loss: 0.090694, avg accuracy: 0.570461
epoch: 34, avg loss: 0.092588, avg accuracy: 0.570120
epoch: 35, avg loss: 0.086194, avg accuracy: 0.571274
epoch: 36, avg loss: 0.085000, avg accuracy: 0.569816
epoch: 37, avg loss: 0.091381, avg accuracy: 0.570753
epoch: 38, avg loss: 0.085481, avg accuracy: 0.568806
epoch: 39, avg loss: 0.090427, avg accuracy: 0.571230
epoch: 40, avg loss: 0.091360, avg accuracy: 0.570294
epoch: 41, avg loss: 0.081988, avg accuracy: 0.570579
epoch: 42, avg loss: 0.081090, avg accuracy: 0.568471
epoch: 43, avg loss: 0.081140, avg accuracy: 0.571503
epoch: 44, avg loss: 0.076972, avg accuracy: 0.568936
epoch: 45, avg loss: 0.082354, avg accuracy: 0.570585
epoch: 46, avg loss: 0.087850, avg accuracy: 0.570939
epoch: 47, avg loss: 0.083670, avg accuracy: 0.570139
epoch: 48, avg loss: 0.078286, avg accuracy: 0.569519
epoch: 49, avg loss: 0.080651, avg accuracy: 0.570226
epoch: 50, avg loss: 0.073352, avg accuracy: 0.569444

In [ ]: