In [1]:
# Bridge the gap between python2 and python 3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import collections
import os
import sys

import tensorflow as tf

Py3 = sys.version_info[0] == 3

In [2]:
def _read_words(filename):
    with tf.gfile.GFile(filename,"r") as f:
        if Py3:
            return f.read().replace("\n","<eos>").split()
        else:
            return f.read().decode("utf-8").replace("\n", "<eos>").split()

In [3]:
def _build_vocab(filename):
    data = _read_words(filename)
    
    counter = collections.Counter(data)
    count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
    
    words, _ = list(zip(*count_pairs))
    word_to_id = dict(zip(words, range(len(words))))
    
    return word_to_id

In [4]:
def _file_to_word_ids(filename,word_to_id):
    data = _read_words(filename)
    
    return [word_to_id[word] for word in data if word in word_to_id]

In [5]:
def ptb_raw_data(data_path=None):
    
    train_path = os.path.join(data_path, "data/ptb.train.txt")
    valid_path = os.path.join(data_path, "data/ptb.valid.txt")
    test_path = os.path.join(data_path, "data/ptb.test.txt")
    
    word_to_id = _build_vocab(train_path)
    train_data = _file_to_word_ids(train_path, word_to_id)
    valid_data = _file_to_word_ids(valid_path, word_to_id)
    test_data = _file_to_word_ids(test_path, word_to_id)
    vocabulary = len(word_to_id)
    return train_data, valid_data,test_data, vocabulary

In [6]:
def ptb_producer(raw_data, batch_size, num_steps, name=None):
    with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
        raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)
        
        data_len = tf.size(raw_data)
        batch_len = data_len // batch_size
        data = tf.reshape(raw_data[0: batch_size *batch_len],
                         [batch_size, batch_len])
        
        epoch_size = (batch_len - 1) // num_steps
        assertion = tf.assert_positive(
        epoch_size,
        message="epoch_size == 0, decrease batch_size or num_steps")
        
        with tf.control_dependencies([assertion]):
            epoch_size = tf.identity(epoch_size, name="epoch_size")
            
        i = tf.train.range_input_producer(epoch_size,shuffle=False).dequeue()
        x = tf.strided_slice(data,[0, i * num_steps],
                            [batch_size, (i + 1) * num_steps])
        x.set_shape([batch_size,num_steps])
        y = tf.strided_slice(data, [0, i * num_steps],
                            [batch_size, (i + 1) * num_steps + 1])
        y.set_shape([batch_size, num_steps])
        return x, y

In [8]:
import numpy as np
#import util
import time

from tensorflow.python.client import device_lib

flags = tf.flags
logging = tf.logging

flags.DEFINE_string("model", "small",
                    "A type of model.Possible options are small, medium, large.")
flags.DEFINE_string("data_path", None,
                    "Where the training/test data is stored.")
flags.DEFINE_string("save_path", None,
                    "Model output directory.")
flags.DEFINE_bool("use_fp16", False,
                  "Train using 16-bit floats instead of 32bit floats")
flags.DEFINE_integer("num_gpus", 1,
                     "If larger than 1, Grappler AutoParallel optimizer "
                     "will create multiple training replicas with each GPU "
                     "running one replica.")
flags.DEFINE_string("rnn_mode", None,
                    "The low level implementation of lstm cell: one of CUDNN, "
                    "BASIC, and BLOCK, representing cudnn_lstm, basic_lstm, "
                    "and lstm_block_cell classes.")

FLAGS = flags.FLAGS
BASIC = "basic"
CUDNN = "cudnn"
BLOCK = "block"

In [9]:
def data_type():
    return tf.float16 if FLAGS.use_fp16 else tf.float32

class PTBInput(object):
    """The input data."""
    
    def __init__(self, config, data, name=None):
        self.batch_size = batch_size = config.batch_size
        self.num_steps = num_steps = config.num_steps
        self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
        self.input_data, self.targets = ptb_producer(data, batch_size, num_steps,name=name)

In [ ]:
class PTBModel(object):
    """The PTB model."""
    
    def __init__(self, is_training, config, input_):
        self._is_training = is_training
        self._input = input_
        self._rnn_params = None
        self._cell = None
        self.batch_size = input_.batch_size
        self.num_steps = input_.num_steps
        size = config.hidden_size
        vocab_size = config.vocab_size
        
        with tf.device("/cpu:0"):
            embedding = tf.get_variable("embedding",
                                       [vocab_size, size], dtype=data_type())
            inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
            
        if is_training and config.keep_prop < 1:
            inputs = tf.nn.dropout(inputs, config.keep_prob)
            
        output, state = self._build_rnn_graph(inputs, config, is_training)