Use Tensorflow to Generate Text for Santi

Define some helper functions


In [1]:
import pickle
import os
import re

def load_text(path):
    input_file = os.path.join(path)
    with open(input_file, 'r') as f:
        text_data = f.read()
    return text_data

def preprocess_and_save_data(text, token_lookup, create_lookup_tables):
    token_dict = token_lookup()
    for key, token in token_dict.items():
        text = text.replace(key, '{}'.format(token))
        
    text = list(text)
    vocab_to_in, int_to_vocab = create_lookup_tables(text)
    int_text = [vocab_to_in[word] for word in text]
    pickle.dump((int_text, vocab_to_in, int_to_vocab, token_dict), open('preprocess.p', 'wb'))
    
def load_preprocess():
    return pickle.load(open('preprocess.p', mode='rb'))

def save_params(params):
    pickle.dump(params, open('params.p', 'wb'))
    
def load_params():
    return pickle.load(open('params.p', mode='rb'))

In [2]:
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import RNNCell

class BNLSTMCell(RNNCell):
    '''Batch normalized LSTM as described in arxiv.org/abs/1603.09025'''
    def __init__(self, num_units, training):
        self.num_units = num_units
        self.training = training

    @property
    def state_size(self):
        return (self.num_units, self.num_units)

    @property
    def output_size(self):
        return self.num_units

    def __call__(self, x, state, scope=None):
        with tf.variable_scope(scope or type(self).__name__):
            c, h = state

            x_size = x.get_shape().as_list()[1]
            W_xh = tf.get_variable('W_xh',
                [x_size, 4 * self.num_units],
                initializer=orthogonal_initializer())
            W_hh = tf.get_variable('W_hh',
                [self.num_units, 4 * self.num_units],
                initializer=bn_lstm_identity_initializer(0.95))
            bias = tf.get_variable('bias', [4 * self.num_units])

            xh = tf.matmul(x, W_xh)
            hh = tf.matmul(h, W_hh)

            bn_xh = batch_norm(xh, 'xh', self.training)
            bn_hh = batch_norm(hh, 'hh', self.training)

            hidden = bn_xh + bn_hh + bias

            i, j, f, o = tf.split(1, 4, hidden)

            new_c = c * tf.sigmoid(f) + tf.sigmoid(i) * tf.tanh(j)
            bn_new_c = batch_norm(new_c, 'c', self.training)

            new_h = tf.tanh(bn_new_c) * tf.sigmoid(o)

            return new_h, (new_c, new_h)

def orthogonal(shape):
    flat_shape = (shape[0], np.prod(shape[1:]))
    a = np.random.normal(0.0, 1.0, flat_shape)
    u, _, v = np.linalg.svd(a, full_matrices=False)
    q = u if u.shape == flat_shape else v
    return q.reshape(shape)

def bn_lstm_identity_initializer(scale):
    def _initializer(shape, dtype=tf.float32, partition_info=None):
        '''Ugly cause LSTM params calculated in one matrix multiply'''
        size = shape[0]
        # gate (j) is identity
        t = np.zeros(shape)
        t[:, size:size * 2] = np.identity(size) * scale
        t[:, :size] = orthogonal([size, size])
        t[:, size * 2:size * 3] = orthogonal([size, size])
        t[:, size * 3:] = orthogonal([size, size])
        return tf.constant(t, dtype)

    return _initializer

def orthogonal_initializer():
    def _initializer(shape, dtype=tf.float32, partition_info=None):
        return tf.constant(orthogonal(shape), dtype)
    return _initializer

def batch_norm(x, name_scope, training, epsilon=1e-3, decay=0.999):
    '''Assume 2d [batch, values] tensor'''

    with tf.variable_scope(name_scope):
        size = x.get_shape().as_list()[1]

        scale = tf.get_variable('scale', [size], initializer=tf.constant_initializer(0.1))
        offset = tf.get_variable('offset', [size])

        pop_mean = tf.get_variable('pop_mean', [size], initializer=tf.zeros_initializer, trainable=False)
        pop_var = tf.get_variable('pop_var', [size], initializer=tf.ones_initializer, trainable=False)
        batch_mean, batch_var = tf.nn.moments(x, [0])

        train_mean_op = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
        train_var_op = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))

        def batch_statistics():
            with tf.control_dependencies([train_mean_op, train_var_op]):
                return tf.nn.batch_normalization(x, batch_mean, batch_var, offset, scale, epsilon)

        def population_statistics():
            return tf.nn.batch_normalization(x, pop_mean, pop_var, offset, scale, epsilon)

        return tf.cond(training, batch_statistics, population_statistics)

Read in data & Do some cleaning


In [3]:
data_path = './text/santi.txt'
text = load_text(data_path)
text = text.replace(' ', '')

# reduce 2 or more empty line to 1
text = re.sub(r'\n{2,}', '\n', text)
# replace ...... with 。
text = re.sub(r'\.+', '。', text)
# remove the content in 《》
text = re.sub(r'《.*》', '', text)
# remove ――
text = re.sub(r'――', '', text)
text = re.sub(r'\u3000', '', text)
print(len(text))
num_words_for_training = 500000

text = text[-num_words_for_training:]
lines_of_text = text.split('\n')
print(len(lines_of_text))
print(lines_of_text[:20])


878856
6352
['可是罗辑反复察看,除了那个滑动开关外没有任何东西,他使劲拧杯子底,但底部与杯子是一体化的。', '“不要乱动这里的用品,你们还不了解,会有危险的。”护士看到罗辑的举动后说。', '“我想知道它从哪儿充电。”', '“充……电?”护士生涩地重复着这个她显然第一次听到的词。', '“就是Charge、Recharge。”罗辑提示说,护士仍然迷惑地摇摇头。', '“不是充电式的……那里面的电池用完了怎么办呢?”', '“电池?”', '“就是Battery呀,你们现在没有电池了吗?”看到护士又摇头,罗辑说,“那这杯子里的电从哪儿来?”', '“电?到处都有电啊!”护士很不以为然地说。', '“杯子里的电用不完?”', '“用不完。”护士点点头说。', '“永远用不完?”', '“永远用不完,电怎么会用完呢。”', '护士走后,罗辑仍捧着那个杯子不放。他没注意熊文的嘲笑,只觉得心潮澎湃,知道自己其实是捧着一个人类千古梦想的圣物捧着的是永动机。如果人类真的得到了无尽的能量,那他们几乎可以得到一切了,现在他相信了美丽护士的话:事情可能真的没那么严重。', '当医生来到监护室进行例行检查时,罗辑向他问起了面壁计划。', '“知道,一个古代的笑话。”医生随口答道。', '“那些面壁者都怎么样了?”', '“好像是一个自杀了,另一个被石头砸死了……都是很早的事,快两个世纪了吧。”', '“还有两个呢?”', '“不知道,还在冬眠中吧。”']

In [4]:
print(lines_of_text[-10:])


['现在,小宇宙的太空中,只剩下一艘细长的小飞船和漂浮在船边的三个人。', '智子拿着一个金属盒,那是他们要留在小宇宙中的东西,是要送往新宇宙的漂流瓶。它的主体是一台微型电脑,电脑的量子存储器中存储着小宇宙电脑主机的全部信息,这几乎是三体和地球文明的全部记忆了。当新宇宙诞生时,金属盒会收到门发来的信号,然后用自己的小推进器穿过门,进入新宇宙。它会在新宇宙的高维太空中飘浮,等待着被拾取和解读的那一天。同时,它还会用中微子束把自己存储的信息不断地播放出来,如果新宇宙中也有中微子的话。', '程心和关一帆相信,其他的小宇宙,那些响应回归运动呼吁的小宇宙,也在做着同样的事。如果新宇宙真的诞生,其中会有许多来自旧宇宙的漂流瓶。可以相信,相当一部分漂流瓶中的记忆体里存储的信息可能达到这样的程度:记录了那个文明每一个个体的全部记忆和意识,以及每个个体的全部生物学细节,以至于新宇宙中的文明可以根据这些信息复原那个文明。', '“还可以再留下五公斤吗?”程心问道。她在飞船的另一侧,身穿太空服,手中举着一个发光的透明球体,球体直径约半米,里面飘浮着几个水球,有的里面游动着几条小鱼,有的里面生长着绿藻;还有两块漂浮的微型陆地,上面长着嫩绿的青草。光亮是从球体顶部发出的,那里安装着一个小小的发光体,是这个小世界的太阳。这是一个全封闭的生态球,是程心和智子十多天的工作成果,只要球体内的小太阳还能够发光,这个小小的生态系统就能生存下去。只要有它留在这里,647号宇宙就不是一个没有生命的黑暗世界。', '“当然可以,大宇宙不会因为这五公斤就不坍缩了。”关一帆说,他还有一个没说出来的想法:也许大宇宙真的会因为相差一个原子的质量而由封闭转为开放。大自然的精巧有时超出想象,比如生命的诞生,就需要各项宇宙参数在几亿亿分之一精度上的精确配合。但程心仍然可以留下她的生态球,因为在那无数文明创造的无数小宇宙中,肯定有相当一部分不响应回归运动的号召,所以,大宇宙最终被夺走的质量至少有几亿吨,甚至可能是几亿亿亿吨。', '但愿大宇宙能够忽略这个误差。', '程心和关一帆进入了飞船,智子最后也进来了。她早就不再穿那身华丽的和服了,她现在身着迷彩服,再次成为一名轻捷精悍的战士,她的身上佩带着许多武器和生存装备,最引人注目的是那把插在背后的武士刀。', '“放心,我在,你们就在!”智子对两位人类朋友说。', '聚变发动机启动了,推进器发出幽幽的蓝光,飞船缓缓地穿过了宇宙之门。', '小宇宙中只剩下漂流瓶和生态球。漂流瓶隐没于黑暗里,在一千米见方的宇宙中,只有生态球里的小太阳发出一点光芒。在这个小小的生命世界中,几只清澈的水球在零重力环境中静静地飘浮着,有一条小鱼从一只水球中蹦出,跃入另一只水球,轻盈地穿游于绿藻之间。在一小块陆地上的草丛中,有一滴露珠从一片草叶上脱离,旋转着飘起,向太空中折射出一缕晶莹的阳光。']

Preprocess the text data to suit the model


In [5]:
def create_lookup_tables(input_data):
    vocab = set(input_data)
    vocab_to_int = {word: idx for idx, word in enumerate(vocab)}
    int_to_vocab = dict(enumerate(vocab))
    return vocab_to_int, int_to_vocab

def token_lookup():
    """ Lookup tables for Chinese punctuations
    """
    symbols = set(['。', ',', '“', "”", ';', '!', '?', '(', ')', '\n'])
    tokens = ["P", "C", "Q", "T", "S", "E", "M", "I", "O", "R"]
    return dict(zip(symbols, tokens))

In [6]:
# process and save the processed data
preprocess_and_save_data(''.join(lines_of_text), token_lookup, create_lookup_tables)

In [7]:
int_text, vocab_to_int, int_to_vocab, token_dict = load_preprocess()

Check Tensorflow environment


In [8]:
import warnings
import tensorflow as tf
import numpy as np

# Check TensorFlow Version, need > 1
print('TensorFlow Version: {}'.format(tf.__version__))

# Check for a GPU
if not tf.test.gpu_device_name():
    warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
    print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))


TensorFlow Version: 1.2.1
/home/ubuntu/anaconda2/envs/py36/lib/python3.6/site-packages/ipykernel/__main__.py:10: UserWarning: No GPU found. Please use a GPU to train your neural network.

Build Our RNN!

Set super params


In [9]:
# 训练循环次数
num_epochs = 150

# batch大小
batch_size = 256

# lstm层中包含的unit个数
rnn_size = 512

# embedding layer的大小
embed_dim = 1200

# 训练步长
seq_length = 60

# 学习率
learning_rate = 0.002

# 每多少步打印一次训练信息
show_every_n_batches = 100

# 保存session状态的位置
save_dir = './santi/save'

Create the placeholders for input, targets and learning_Rate


In [10]:
def get_inputs():

    # inputs和targets的类型都是整数的
    inputs = tf.placeholder(tf.int32, [None, None], name='inputs')
    targets = tf.placeholder(tf.int32, [None, None], name='targets')
    learning_rate = tf.placeholder(tf.float32, name='learning_rate')

    return inputs, targets, learning_rate

Create rnn cell,use lstm cell,and create lstm layers. Use dropout,and initialize lstm layer


In [11]:
def get_init_cell(batch_size, rnn_size):
    # lstm层数
    num_layers = 2

    # dropout时的保留概率
    keep_prob = 0.8

    # 创建包含rnn_size个神经元的lstm cell
    # cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
    cell = tf.contrib.rnn.LayerNormBasicLSTMCell(rnn_size)

    # 使用dropout机制防止overfitting等
    drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)

    # 创建2层lstm层
    cell = tf.contrib.rnn.MultiRNNCell([drop for _ in range(num_layers)])

    # 初始化状态为0.0
    init_state = cell.zero_state(batch_size, tf.float32)

    # 使用tf.identify给init_state取个名字,后面生成文字的时候,要使用这个名字来找到缓存的state
    init_state = tf.identity(init_state, name='init_state')

    return cell, init_state

Create embedding layers


In [12]:
def get_embed(input_data, vocab_size, embed_dim):
    # create tf variable based on embedding layer size and vocab size
    embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim)), dtype=tf.float32)
    return tf.nn.embedding_lookup(embedding, input_data)

创建rnn节点,使用dynamic_rnn方法计算出output和final_state


In [13]:
def build_rnn(cell, inputs):
    outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
    final_state = tf.identity(final_state, name="final_state")
    return outputs, final_state

In [14]:
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
    embed = get_embed(input_data, vocab_size, rnn_size)
    outputs, final_state = build_rnn(cell, embed)
    logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None,
                                               weights_initializer = tf.truncated_normal_initializer(stddev=0.1),
                                               biases_initializer=tf.zeros_initializer())
    return logits, final_state

Define get_batches to train batch by batch


In [15]:
def get_batches(int_text, batch_size, seq_length):
    # 计算有多少个batch可以创建
    n_batches = (len(int_text) // (batch_size * seq_length))

    # 计算每一步的原始数据,和位移一位之后的数据
    batch_origin = np.array(int_text[: n_batches * batch_size * seq_length])
    batch_shifted = np.array(int_text[1: n_batches * batch_size * seq_length + 1])

    # 将位移之后的数据的最后一位,设置成原始数据的第一位,相当于在做循环
    batch_shifted[-1] = batch_origin[0]

    batch_origin_reshape = np.split(batch_origin.reshape(batch_size, -1), n_batches, 1)
    batch_shifted_reshape = np.split(batch_shifted.reshape(batch_size, -1), n_batches, 1)

    batches = np.array(list(zip(batch_origin_reshape, batch_shifted_reshape)))

    return batches

Now, let's build the whole RNN model!


In [16]:
from tensorflow.contrib import seq2seq

train_graph = tf.Graph()
with train_graph.as_default():
    vocab_size = len(int_to_vocab)
    
    input_text, targets, lr = get_inputs()
    input_data_shape = tf.shape(input_text)
    
    # 创建rnn的cell和初始状态节点,rnn的cell已经包含了lstm,dropout
    # 这里的rnn_size表示每个lstm cell中包含了多少的神经元
    cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
    
    # 创建计算loss和finalstate的节点
    logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)

    # 使用softmax计算最后的预测概率
    probs = tf.nn.softmax(logits, name='probs')

    # 计算loss
    cost = seq2seq.sequence_loss(
        logits,
        targets,
        tf.ones([input_data_shape[0], input_data_shape[1]]))

    # 使用Adam提督下降
    optimizer = tf.train.AdamOptimizer(lr)

    # 裁剪一下Gradient输出,最后的gradient都在[-1, 1]的范围内
    gradients = optimizer.compute_gradients(cost)
    capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
    train_op = optimizer.apply_gradients(capped_gradients)

Now, let's train the model


In [17]:
# 获得训练用的所有batch
batches = get_batches(int_text, batch_size, seq_length)

# 打开session开始训练,将上面创建的graph对象传递给session
with tf.Session(graph=train_graph) as sess:
    sess.run(tf.global_variables_initializer())

    for epoch_i in range(num_epochs):
        state = sess.run(initial_state, {input_text: batches[0][0]})

        for batch_i, (x, y) in enumerate(batches):
            feed = {
                input_text: x,
                targets: y,
                initial_state: state,
                lr: learning_rate}
            train_loss, state, _ = sess.run([cost, final_state, train_op], feed)

            # 打印训练信息
            if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
                print('Epoch {:>3} Batch {:>4}/{}   train_loss = {:.3f}'.format(
                    epoch_i,
                    batch_i,
                    len(batches),
                    train_loss))

    # 保存模型
    saver = tf.train.Saver()
    saver.save(sess, save_dir)
    print('Model Trained and Saved')

save_params((seq_length, save_dir))


Epoch   0 Batch    0/32   train_loss = 8.313
Epoch   3 Batch    4/32   train_loss = 6.277
Epoch   6 Batch    8/32   train_loss = 6.219
Epoch   9 Batch   12/32   train_loss = 4.989
Epoch  12 Batch   16/32   train_loss = 4.233
Epoch  15 Batch   20/32   train_loss = 3.857
Epoch  18 Batch   24/32   train_loss = 3.561
Epoch  21 Batch   28/32   train_loss = 3.414
Epoch  25 Batch    0/32   train_loss = 3.260
Epoch  28 Batch    4/32   train_loss = 3.149
Epoch  31 Batch    8/32   train_loss = 3.003
Epoch  34 Batch   12/32   train_loss = 2.890
Epoch  37 Batch   16/32   train_loss = 2.785
Epoch  40 Batch   20/32   train_loss = 2.667
Epoch  43 Batch   24/32   train_loss = 2.556
Epoch  46 Batch   28/32   train_loss = 2.491
Epoch  50 Batch    0/32   train_loss = 2.467
Epoch  53 Batch    4/32   train_loss = 2.397
Epoch  56 Batch    8/32   train_loss = 2.328
Epoch  59 Batch   12/32   train_loss = 2.278
Epoch  62 Batch   16/32   train_loss = 2.219
Epoch  65 Batch   20/32   train_loss = 2.166
Epoch  68 Batch   24/32   train_loss = 2.083
Epoch  71 Batch   28/32   train_loss = 2.035
Epoch  75 Batch    0/32   train_loss = 2.035
Epoch  78 Batch    4/32   train_loss = 1.988
Epoch  81 Batch    8/32   train_loss = 1.978
Epoch  84 Batch   12/32   train_loss = 1.916
Epoch  87 Batch   16/32   train_loss = 1.870
Epoch  90 Batch   20/32   train_loss = 1.851
Epoch  93 Batch   24/32   train_loss = 1.800
Epoch  96 Batch   28/32   train_loss = 1.755
Epoch 100 Batch    0/32   train_loss = 1.758
Epoch 103 Batch    4/32   train_loss = 1.754
Epoch 106 Batch    8/32   train_loss = 1.728
Epoch 109 Batch   12/32   train_loss = 1.691
Epoch 112 Batch   16/32   train_loss = 1.646
Epoch 115 Batch   20/32   train_loss = 1.639
Epoch 118 Batch   24/32   train_loss = 1.589
Epoch 121 Batch   28/32   train_loss = 1.565
Epoch 125 Batch    0/32   train_loss = 1.588
Epoch 128 Batch    4/32   train_loss = 1.570
Epoch 131 Batch    8/32   train_loss = 1.558
Epoch 134 Batch   12/32   train_loss = 1.508
Epoch 137 Batch   16/32   train_loss = 1.484
Epoch 140 Batch   20/32   train_loss = 1.480
Epoch 143 Batch   24/32   train_loss = 1.434
Epoch 146 Batch   28/32   train_loss = 1.417
Model Trained and Saved

In [27]:
# Save the trained model
save_params((seq_length, save_dir))

Let's Generate Some Text!


In [22]:
import tensorflow as tf
import numpy as np
# %cd santi
_, vocab_to_int, int_to_vocab, token_dict = load_preprocess()
seq_length, load_dir = load_params()
# %cd ..

要使用保存的模型,我们要将保存下来的变量(tensor)通过指定的name获取到


In [23]:
def get_tensors(loaded_graph):
    inputs = loaded_graph.get_tensor_by_name("inputs:0")

    initial_state = loaded_graph.get_tensor_by_name("init_state:0")

    final_state = loaded_graph.get_tensor_by_name("final_state:0")

    probs = loaded_graph.get_tensor_by_name("probs:0")

    return inputs, initial_state, final_state, probs

def pick_word(probabilities, int_to_vocab):
    chances = []
    for idx, prob in enumerate(probabilities):    
        if prob >= 0.05:
            chances.append(int_to_vocab[idx])
    if len(chances) < 1:
        return str(int_to_vocab[np.argmax(probabilities)])
    else:
        rand = np.random.randint(0, len(chances))
        return str(chances[rand])

In [24]:
# 生成文本的长度
gen_length = 500

# 文章开头的字,指定一个即可,这个字必须是在训练词汇列表中的
prime_word = '从'

loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
    # 加载保存过的session
    loader = tf.train.import_meta_graph(load_dir + '.meta')
    loader.restore(sess, load_dir)

    # 通过名称获取缓存的tensor
    input_text, initial_state, final_state, probs = get_tensors(loaded_graph)

    # 准备开始生成文本
    gen_sentences = [prime_word]
    prev_state = sess.run(initial_state, {input_text: np.array([[1]])})

    # 开始生成文本
    for n in range(gen_length):
        dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
        dyn_seq_length = len(dyn_input[0])

        probabilities, prev_state = sess.run(
            [probs, final_state],
            {input_text: dyn_input, initial_state: prev_state})
        # print(probabilities)
        # print(len(probabilities))
        pred_word = pick_word(probabilities[0][dyn_seq_length - 1], int_to_vocab)

        gen_sentences.append(pred_word)

    # 将标点符号还原
    novel = ''.join(gen_sentences)
    for key, token in token_dict.items():
        ending = ' ' if key in ['\n', '(', '“'] else ''
        novel = novel.replace(token.upper(), key)
    novel = novel.replace('\n ', '\n')
    novel = novel.replace('( ', '(')

    print(novel)


INFO:tensorflow:Restoring parameters from ./santi/save
从环区和尾迹飞快地飞过,由于光速的光芒显然和原因各个星系都是不同样,不减速区飞出“更有限光准。比如:这要有一个比例更强约二分之六。这里,二十二天后一千六点,七万年的范围内都不放下来,取确了多少大行?来一个宇宙基本在三维中等话?这个问题就不能成附这个选择了,一个世界在本质的流利监学上并令人不以为光。那些可能拯救人们所为都不可能,但很少有人为这幕判品真正的有意义。现在云舰队的战略情报的思议也在这诡异,严肃地思望有可以在国际上采意识的成功人。对于所有人都说,恐龙是一件很平静。这样平方的建筑不比取由比壳上的低大气,贼而约通一个环层,最粗利的原因都能看明,而上膜的建筑几乎可以做出这把伞面作好的景象。因为这个时代,其粗暴总角有一种大型核核核,在水滴表面就能够在小宇宙飞行器相长或。“魔戒”的尾迹呈蓝色,看上去很小。这里丝毫没有减过,光结很可能没用。这颗小太阳在光后沿,原有零十五分短近一段左1右行走。“小区的落处聚变堆是由摧毁运行。”“不,不要原因有一种让社会太空武器,向外表发现怎么样?”医生问。“从危机纪年后,他已经是面壁计划了,但我仍未给治安。”“给世界比这更毁灭两个世纪了。”“什么地方不呢,‘自然

In [37]:
vocab_size


Out[37]:
2734

In [47]:
len(probabilities)


Out[47]:
1

In [49]:
max(probabilities[0][0])


Out[49]:
0.17373672

In [ ]: