In [1]:
import numpy as np
import tensorflow as tf
from tensorflow.python.layers.core import Dense
import collections


/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [2]:
def build_dataset(words, n_words):
    count = [['GO', 0], ['PAD', 1], ['EOS', 2], ['UNK', 3]]
    count.extend(collections.Counter(words).most_common(n_words - 1))
    dictionary = dict()
    for word, _ in count:
        dictionary[word] = len(dictionary)
    data = list()
    unk_count = 0
    for word in words:
        index = dictionary.get(word, 0)
        if index == 0:
            unk_count += 1
        data.append(index)
    count[0][1] = unk_count
    reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    return data, count, dictionary, reversed_dictionary

In [3]:
with open('data/from', 'r') as fopen:
    text_from = fopen.read().lower().split('\n')
with open('data/to', 'r') as fopen:
    text_to = fopen.read().lower().split('\n')
print('len from: %d, len to: %d'%(len(text_from), len(text_to)))


len from: 796, len to: 796

In [4]:
concat_from = ' '.join(text_from).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(concat_from, vocabulary_size_from)
print('vocab from size: %d'%(vocabulary_size_from))
print('Most common words', count_from[3:10])
print('Sample data', data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])


vocab from size: 944
Most common words [['UNK', 3], ('awak', 188), ('saya', 114), ('yang', 62), ('akan', 50), ('ini', 41), ('dan', 38)]
Sample data [933, 16, 41, 4, 401, 4, 24, 674, 672, 20] ['benarkah?', 'di', 'mana', 'awak', 'florida', 'awak', 'dalam', 'divisyen', 'timurlaut?', 'dengan']

In [5]:
concat_to = ' '.join(text_to).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print('vocab to size: %d'%(vocabulary_size_to))
print('Most common words', count_to[3:10])
print('Sample data', data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])


vocab to size: 916
Most common words [['UNK', 3], ('saya', 157), ('awak', 103), ('tidak', 53), ('yang', 48), ('di', 48), ('dia', 41)]
Sample data [15, 379, 10, 81, 186, 460, 4, 13, 92, 319] ['sudah', 'jelas?', 'dan', 'sara', 'sebagai', 'bukti', 'saya', 'akan', 'dapatkan', 'tarikh']

In [6]:
GO = dictionary_from['GO']
PAD = dictionary_from['PAD']
EOS = dictionary_from['EOS']
UNK = dictionary_from['UNK']

In [7]:
class Chatbot:
    def __init__(self, size_layer, num_layers, embedded_size, 
                 from_dict_size, to_dict_size, learning_rate, 
                 batch_size, dropout = 0.5):
        
        def lstm_cell(reuse=False):
            return tf.nn.rnn_cell.LSTMCell(size_layer, reuse=reuse)
        
        def attention(encoder_out, seq_len, reuse=False):
            attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(num_units = size_layer, 
                                                                    memory = encoder_out,
                                                                    memory_sequence_length = seq_len)
            return tf.contrib.seq2seq.AttentionWrapper(
            cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(reuse) for _ in range(num_layers)]), 
                attention_mechanism = attention_mechanism,
                attention_layer_size = size_layer)
        
        self.X = tf.placeholder(tf.int32, [None, None])
        self.Y = tf.placeholder(tf.int32, [None, None])
        self.X_seq_len = tf.placeholder(tf.int32, [None])
        self.Y_seq_len = tf.placeholder(tf.int32, [None])
        # encoder
        encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))
        encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
        encoder_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell() for _ in range(num_layers)])
        encoder_dropout = tf.contrib.rnn.DropoutWrapper(encoder_cells, output_keep_prob = 0.5)
        self.encoder_out, self.encoder_state = tf.nn.dynamic_rnn(cell = encoder_dropout, 
                                                                 inputs = encoder_embedded, 
                                                                 sequence_length = self.X_seq_len,
                                                                 dtype = tf.float32)
        
        self.encoder_state = tuple(self.encoder_state[-1] for _ in range(num_layers))
        main = tf.strided_slice(self.Y, [0, 0], [batch_size, -1], [1, 1])
        decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
        # decoder
        decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))
        decoder_cell = attention(self.encoder_out, self.X_seq_len)
        dense_layer = Dense(to_dict_size)
        training_helper = tf.contrib.seq2seq.TrainingHelper(
                inputs = tf.nn.embedding_lookup(decoder_embeddings, decoder_input),
                sequence_length = self.Y_seq_len,
                time_major = False)
        training_decoder = tf.contrib.seq2seq.BasicDecoder(
                cell = decoder_cell,
                helper = training_helper,
                initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=self.encoder_state),
                output_layer = dense_layer)
        training_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
                decoder = training_decoder,
                impute_finished = True,
                maximum_iterations = tf.reduce_max(self.Y_seq_len))
        predicting_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
                embedding = encoder_embeddings,
                start_tokens = tf.tile(tf.constant([GO], dtype=tf.int32), [batch_size]),
                end_token = EOS)
        predicting_decoder = tf.contrib.seq2seq.BasicDecoder(
                cell = decoder_cell,
                helper = predicting_helper,
                initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=self.encoder_state),
                output_layer = dense_layer)
        predicting_decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
                decoder = predicting_decoder,
                impute_finished = True,
                maximum_iterations = 2 * tf.reduce_max(self.X_seq_len))
        self.training_logits = training_decoder_output.rnn_output
        self.predicting_ids = predicting_decoder_output.sample_id
        masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
        self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,
                                                     targets = self.Y,
                                                     weights = masks)
        self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)

In [8]:
size_layer = 256
num_layers = 2
embedded_size = 256
learning_rate = 0.001
batch_size = 32
epoch = 50

In [9]:
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Chatbot(size_layer, num_layers, embedded_size, vocabulary_size_from + 4, 
                vocabulary_size_to + 4, learning_rate, batch_size)
sess.run(tf.global_variables_initializer())

In [10]:
def str_idx(corpus, dic):
    X = []
    for i in corpus:
        ints = []
        for k in i.split():
            try:
                ints.append(dic[k])
            except Exception as e:
                print(e)
                ints.append(2)
        X.append(ints)
    return X

In [11]:
X = str_idx(text_from, dictionary_from)
Y = str_idx(text_to, dictionary_to)


'beberapa'
'menulis'

In [13]:
def pad_sentence_batch(sentence_batch, pad_int):
    padded_seqs = []
    seq_lens = []
    max_sentence_len = max([len(sentence) for sentence in sentence_batch])
    for sentence in sentence_batch:
        padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
        seq_lens.append(len(sentence))
    return padded_seqs, seq_lens

def check_accuracy(logits, Y):
    acc = 0
    for i in range(logits.shape[0]):
        internal_acc = 0
        for k in range(len(Y[i])):
            if Y[i][k] == logits[i][k]:
                internal_acc += 1
        acc += (internal_acc / len(Y[i]))
    return acc / logits.shape[0]

In [14]:
for i in range(epoch):
    total_loss, total_accuracy = 0, 0
    for k in range(0, (len(text_from) // batch_size) * batch_size, batch_size):
        batch_x, seq_x = pad_sentence_batch(X[k: k+batch_size], PAD)
        batch_y, seq_y = pad_sentence_batch(Y[k: k+batch_size], PAD)
        predicted, loss, _ = sess.run([model.predicting_ids, model.cost, model.optimizer], feed_dict={model.X:batch_x,
                                                                    model.Y:batch_y,
                                                                    model.X_seq_len:seq_x,
                                                                    model.Y_seq_len:seq_y})
        total_loss += loss
        total_accuracy += check_accuracy(predicted,batch_y)
    total_loss /= (len(text_from) // batch_size)
    total_accuracy /= (len(text_from) // batch_size)
    print('epoch: %d, avg loss: %f, avg accuracy: %f'%(i+1, total_loss, total_accuracy))


epoch: 1, avg loss: 6.545544, avg accuracy: 0.025484
epoch: 2, avg loss: 6.032529, avg accuracy: 0.035156
epoch: 3, avg loss: 5.819380, avg accuracy: 0.038876
epoch: 4, avg loss: 5.553213, avg accuracy: 0.039348
epoch: 5, avg loss: 5.089363, avg accuracy: 0.044203
epoch: 6, avg loss: 4.492129, avg accuracy: 0.059530
epoch: 7, avg loss: 3.897117, avg accuracy: 0.075818
epoch: 8, avg loss: 3.381940, avg accuracy: 0.092832
epoch: 9, avg loss: 2.809660, avg accuracy: 0.119643
epoch: 10, avg loss: 2.253229, avg accuracy: 0.162091
epoch: 11, avg loss: 1.871990, avg accuracy: 0.178001
epoch: 12, avg loss: 1.569543, avg accuracy: 0.193018
epoch: 13, avg loss: 1.304318, avg accuracy: 0.215203
epoch: 14, avg loss: 1.067986, avg accuracy: 0.237159
epoch: 15, avg loss: 0.824454, avg accuracy: 0.248320
epoch: 16, avg loss: 0.623685, avg accuracy: 0.258879
epoch: 17, avg loss: 0.470644, avg accuracy: 0.278044
epoch: 18, avg loss: 0.376205, avg accuracy: 0.292845
epoch: 19, avg loss: 0.291214, avg accuracy: 0.297123
epoch: 20, avg loss: 0.216652, avg accuracy: 0.308079
epoch: 21, avg loss: 0.161716, avg accuracy: 0.325942
epoch: 22, avg loss: 0.120674, avg accuracy: 0.318800
epoch: 23, avg loss: 0.093227, avg accuracy: 0.330562
epoch: 24, avg loss: 0.078573, avg accuracy: 0.331417
epoch: 25, avg loss: 0.068792, avg accuracy: 0.334480
epoch: 26, avg loss: 0.067120, avg accuracy: 0.329365
epoch: 27, avg loss: 0.063798, avg accuracy: 0.336651
epoch: 28, avg loss: 0.057518, avg accuracy: 0.339125
epoch: 29, avg loss: 0.058529, avg accuracy: 0.334679
epoch: 30, avg loss: 0.054220, avg accuracy: 0.336868
epoch: 31, avg loss: 0.049469, avg accuracy: 0.340916
epoch: 32, avg loss: 0.045394, avg accuracy: 0.341257
epoch: 33, avg loss: 0.046843, avg accuracy: 0.338349
epoch: 34, avg loss: 0.042347, avg accuracy: 0.340966
epoch: 35, avg loss: 0.044255, avg accuracy: 0.340482
epoch: 36, avg loss: 0.046104, avg accuracy: 0.343787
epoch: 37, avg loss: 0.042106, avg accuracy: 0.340699
epoch: 38, avg loss: 0.040359, avg accuracy: 0.340123
epoch: 39, avg loss: 0.038754, avg accuracy: 0.343769
epoch: 40, avg loss: 0.041025, avg accuracy: 0.344327
epoch: 41, avg loss: 0.038279, avg accuracy: 0.342808
epoch: 42, avg loss: 0.038966, avg accuracy: 0.346243
epoch: 43, avg loss: 0.037633, avg accuracy: 0.340606
epoch: 44, avg loss: 0.036607, avg accuracy: 0.344097
epoch: 45, avg loss: 0.037814, avg accuracy: 0.343967
epoch: 46, avg loss: 0.036279, avg accuracy: 0.345145
epoch: 47, avg loss: 0.036285, avg accuracy: 0.344488
epoch: 48, avg loss: 0.035038, avg accuracy: 0.341648
epoch: 49, avg loss: 0.036597, avg accuracy: 0.343353
epoch: 50, avg loss: 0.034884, avg accuracy: 0.341270

In [ ]: