In [1]:
from sklearn.datasets import fetch_20newsgroups
import re
import tensorflow as tf
from sklearn import metrics
import numpy as np
import collections
import time


/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

In [2]:
newsgroups_train = fetch_20newsgroups(subset='train')
newsgroups_test = fetch_20newsgroups(subset='test')

In [3]:
def clearstring(string):
    string = re.sub('[^A-Za-z ]+', '', string)
    string = string.split('\n')
    string = [y.strip() for y in filter(None, string)]
    string = (' '.join(string)).lower()
    return ' '.join([y.strip() for y in string.split()])

def build_dataset(words, n_words):
    count = [['GO', 0], ['PAD', 1], ['EOS', 2], ['UNK', 3]]
    count.extend(collections.Counter(words).most_common(n_words - 1))
    dictionary = dict()
    for word, _ in count:
        dictionary[word] = len(dictionary)
    data = list()
    unk_count = 0
    for word in words:
        index = dictionary.get(word, 0)
        if index == 0:
            unk_count += 1
        data.append(index)
    count[0][1] = unk_count
    reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    return data, count, dictionary, reversed_dictionary

def str_idx(corpus, dic, maxlen, UNK=3):
    X = np.zeros((len(corpus),maxlen))
    for i in range(len(corpus)):
        for no, k in enumerate(corpus[i].split()[:maxlen][::-1]):
            try:
                X[i,-1 - no]=dic[k]
            except Exception as e:
                X[i,-1 - no]=UNK
    return X

In [4]:
for i in range(len(newsgroups_train.data)):
    newsgroups_train.data[i] = clearstring(newsgroups_train.data[i])
    
for i in range(len(newsgroups_test.data)):
    newsgroups_test.data[i] = clearstring(newsgroups_test.data[i])

In [5]:
concat = ' '.join(newsgroups_train.data).split()
vocabulary_size = len(list(set(concat)))
data, count, dictionary, rev_dictionary = build_dataset(concat, vocabulary_size)
print('vocab from size: %d'%(vocabulary_size))
print('Most common words', count[4:10])
print('Sample data', data[:10], [rev_dictionary[i] for i in data[:10]])


vocab from size: 214326
Most common words [('the', 131819), ('to', 67232), ('of', 64127), ('a', 57044), ('and', 51878), ('in', 42822)]
Sample data [16, 67554, 8512, 34, 94803, 37, 245, 10, 17901, 15881] ['from', 'lerxstwamumdedu', 'wheres', 'my', 'thingsubject', 'what', 'car', 'is', 'thisnntppostinghost', 'racwamumdeduorganization']

In [6]:
GO = dictionary['GO']
PAD = dictionary['PAD']
EOS = dictionary['EOS']
UNK = dictionary['UNK']

In [7]:
class Model:
    def __init__(self, size_layer, num_layers, embedded_size,
                 dict_size, dimension_output, learning_rate):
        
        def cells(reuse=False):
            return tf.nn.rnn_cell.LSTMCell(size_layer,
                                           initializer=tf.orthogonal_initializer(),
                                           reuse=reuse)
        
        self.X = tf.placeholder(tf.int32, [None, None])
        self.Y = tf.placeholder(tf.int32, [None])
        self.global_step = tf.Variable(0, trainable=False, name='global_step')
        
        with tf.name_scope('layer_embedded'):
            encoder_embeddings = tf.Variable(tf.random_uniform([dict_size, embedded_size], -1, 1))
            encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
            
            tf.summary.histogram("X", self.X)
            tf.summary.histogram("Embedded", encoder_embeddings)
        
        with tf.name_scope('layer_rnn'):
            rnn_cells = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)])
            outputs, _ = tf.nn.dynamic_rnn(rnn_cells, encoder_embedded, dtype = tf.float32)
            
        with tf.name_scope('layer_logits'):
            W = tf.get_variable('w',
                                shape=(size_layer, dimension_output),
                                initializer=tf.orthogonal_initializer())
            b = tf.get_variable('b',
                                shape=(dimension_output),
                                initializer=tf.zeros_initializer())
            self.logits = tf.matmul(outputs[:, -1], W) + b
            
            tf.summary.histogram("Weight", W)
            tf.summary.histogram("logits", self.logits)
            
        with tf.name_scope('optimizer'):
            self.cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits = self.logits, labels = self.Y))
            self.optimizer = tf.train.AdamOptimizer(
                learning_rate = learning_rate).minimize(self.cost,
                                                        global_step=self.global_step)
            tf.summary.scalar('cost', self.cost)
            
        with tf.name_scope('accuracy'):
            correct_pred = tf.equal(tf.argmax(self.logits, 1), tf.cast(self.Y,tf.int64))
            self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
            tf.summary.scalar('accuracy', self.accuracy)

In [8]:
size_layer = 128
num_layers = 2
embedded_size = 128
dimension_output = len(newsgroups_train.target_names)
learning_rate = 1e-3
maxlen = 50
batch_size = 128

In [9]:
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model(size_layer,num_layers,embedded_size,vocabulary_size+4,dimension_output,learning_rate)
sess.run(tf.global_variables_initializer())

In [10]:
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('./logs', sess.graph)

In [11]:
train_X = newsgroups_train.data
train_Y = newsgroups_train.target
test_X = newsgroups_test.data
test_Y = newsgroups_test.target

In [ ]:
EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 2, 0, 0, 0
while True:
    lasttime = time.time()
    if CURRENT_CHECKPOINT == EARLY_STOPPING:
        print('break epoch:%d\n'%(EPOCH))
        break
        
    train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
    for i in range(0, (len(train_X) // batch_size) * batch_size, batch_size):
        batch_x = str_idx(train_X[i:i+batch_size],dictionary,maxlen)
        acc, loss, _ = sess.run([model.accuracy, model.cost, model.optimizer], 
                           feed_dict = {model.X : batch_x, model.Y : train_Y[i:i+batch_size]})
        train_loss += loss
        train_acc += acc
        summary = sess.run(merged, feed_dict={model.X : batch_x, model.Y : train_Y[i:i+batch_size]})
        writer.add_summary(summary, global_step=sess.run(model.global_step))
    
    for i in range(0, (len(test_X) // batch_size) * batch_size, batch_size):
        batch_x = str_idx(test_X[i:i+batch_size],dictionary,maxlen)
        acc, loss = sess.run([model.accuracy, model.cost], 
                           feed_dict = {model.X : batch_x, model.Y : test_Y[i:i+batch_size]})
        test_loss += loss
        test_acc += acc
    
    train_loss /= (len(train_X) // batch_size)
    train_acc /= (len(train_X) // batch_size)
    test_loss /= (len(test_X) // batch_size)
    test_acc /= (len(test_X) // batch_size)
    
    if test_acc > CURRENT_ACC:
        print('epoch: %d, pass acc: %f, current acc: %f'%(EPOCH,CURRENT_ACC, test_acc))
        CURRENT_ACC = test_acc
        CURRENT_CHECKPOINT = 0
    else:
        CURRENT_CHECKPOINT += 1
        
    print('time taken:', time.time()-lasttime)
    print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'%(EPOCH,train_loss,
                                                                                          train_acc,test_loss,
                                                                                          test_acc))
    EPOCH += 1


epoch: 0, pass acc: 0.000000, current acc: 0.145474
time taken: 114.65196776390076
epoch: 0, training loss: 2.775419, training acc: 0.131658, valid loss: 2.952247, valid acc: 0.145474

epoch: 1, pass acc: 0.145474, current acc: 0.249731
time taken: 114.50321960449219
epoch: 1, training loss: 2.043835, training acc: 0.353604, valid loss: 2.827415, valid acc: 0.249731

epoch: 2, pass acc: 0.249731, current acc: 0.309671
time taken: 114.58725595474243
epoch: 2, training loss: 1.381938, training acc: 0.555309, valid loss: 2.640666, valid acc: 0.309671

epoch: 3, pass acc: 0.309671, current acc: 0.333648
time taken: 114.40744924545288
epoch: 3, training loss: 0.959115, training acc: 0.701438, valid loss: 2.668871, valid acc: 0.333648

epoch: 4, pass acc: 0.333648, current acc: 0.348464
time taken: 114.28855514526367
epoch: 4, training loss: 0.657708, training acc: 0.796964, valid loss: 2.878420, valid acc: 0.348464

epoch: 5, pass acc: 0.348464, current acc: 0.351428
time taken: 114.43061232566833
epoch: 5, training loss: 0.465186, training acc: 0.856889, valid loss: 3.024994, valid acc: 0.351428

epoch: 6, pass acc: 0.351428, current acc: 0.379310
time taken: 114.12321949005127
epoch: 6, training loss: 0.316311, training acc: 0.907848, valid loss: 2.947812, valid acc: 0.379310

epoch: 7, pass acc: 0.379310, current acc: 0.407732
time taken: 114.2262933254242
epoch: 7, training loss: 0.215957, training acc: 0.939808, valid loss: 2.848780, valid acc: 0.407732

epoch: 8, pass acc: 0.407732, current acc: 0.422953
time taken: 114.1632707118988
epoch: 8, training loss: 0.166599, training acc: 0.953303, valid loss: 2.944798, valid acc: 0.422953

epoch: 9, pass acc: 0.422953, current acc: 0.424165
time taken: 114.53234839439392
epoch: 9, training loss: 0.094133, training acc: 0.974787, valid loss: 3.075085, valid acc: 0.424165

epoch: 10, pass acc: 0.424165, current acc: 0.427128
time taken: 114.63622403144836
epoch: 10, training loss: 0.066248, training acc: 0.984375, valid loss: 3.153322, valid acc: 0.427128

time taken: 114.65429186820984
epoch: 11, training loss: 0.041573, training acc: 0.991477, valid loss: 3.255511, valid acc: 0.424434

time taken: 114.4284360408783
epoch: 12, training loss: 0.036154, training acc: 0.990589, valid loss: 3.344553, valid acc: 0.424030

break epoch:13


In [ ]:
!tensorboard --logdir=./logs


/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
W0531 23:49:07.671579 Reloader tf_logging.py:86] Found more than one graph event per run, or there was a metagraph containing a graph_def, as well as one or more graph events.  Overwriting the graph with the newest event.
W0531 23:49:07.672278 Reloader tf_logging.py:86] Found more than one metagraph event per run. Overwriting the metagraph with the newest event.
W0531 23:49:07.686550 Reloader tf_logging.py:86] Found more than one graph event per run, or there was a metagraph containing a graph_def, as well as one or more graph events.  Overwriting the graph with the newest event.
W0531 23:49:07.686931 Reloader tf_logging.py:86] Found more than one metagraph event per run. Overwriting the metagraph with the newest event.
TensorBoard 0.4.0 at http://husein-G1-Sniper-H6:6006 (Press CTRL+C to quit)
W0531 23:49:07.727122 Reloader tf_logging.py:86] Found more than one graph event per run, or there was a metagraph containing a graph_def, as well as one or more graph events.  Overwriting the graph with the newest event.
W0531 23:49:07.727695 Reloader tf_logging.py:86] Found more than one metagraph event per run. Overwriting the metagraph with the newest event.