In [1]:
# http://www.wildml.com/2016/08/rnns-in-tensorflow-a-practical-guide-and-undocumented-features/
# http://learningtensorflow.com/index.html
# http://suriyadeepan.github.io/2016-12-31-practical-seq2seq/

import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import pprint
pp = pprint.PrettyPrinter(indent=4)
sess = tf.InteractiveSession()

In [2]:
# One hot encoding for each char in 'hello'
h = [1, 0, 0, 0]
e = [0, 1, 0, 0]
l = [0, 0, 1, 0]
o = [0, 0, 0, 1]


In [3]:
with tf.variable_scope('one_cell') as scope:
    # One cell RNN input_dim (4) -> output_dim (2)
    hidden_size = 1
    cell = tf.contrib.rnn.BasicRNNCell(num_units=hidden_size)
    print(cell.output_size, cell.state_size)

    x_data = np.array([[h]], dtype=np.float32) # x_data = [[[1,0,0,0]]]
    pp.pprint(x_data)
    outputs, _states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32)

    sess.run(tf.global_variables_initializer())
    pp.pprint(outputs.eval())


1 1
array([[[ 1.,  0.,  0.,  0.]]], dtype=float32)
array([[[-0.68226224]]], dtype=float32)


In [4]:
with tf.variable_scope('two_sequances') as scope:
    # One cell RNN input_dim (4) -> output_dim (2). sequence: 5
    hidden_size = 2
    cell = tf.contrib.rnn.BasicRNNCell(num_units=hidden_size)
    x_data = np.array([[h, e, l, l, o]], dtype=np.float32)
    print(x_data.shape)
    pp.pprint(x_data)
    outputs, states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32)
    sess.run(tf.global_variables_initializer())
    pp.pprint(outputs.eval())


(1, 5, 4)
array([[[ 1.,  0.,  0.,  0.],
        [ 0.,  1.,  0.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  0.,  1.]]], dtype=float32)
array([[[-0.60135782, -0.67066818],
        [-0.76201087,  0.15562601],
        [-0.85076123, -0.12917398],
        [-0.91217172,  0.11725479],
        [ 0.27258348, -0.050533  ]]], dtype=float32)


In [5]:
with tf.variable_scope('3_batches') as scope:
    # One cell RNN input_dim (4) -> output_dim (2). sequence: 5, batch 3
    # 3 batches 'hello', 'eolll', 'lleel'
    x_data = np.array([[h, e, l, l, o],
                       [e, o, l, l, l],
                       [l, l, e, e, l]], dtype=np.float32)
    pp.pprint(x_data)
    
    hidden_size = 2
    cell = rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True)
    outputs, _states = tf.nn.dynamic_rnn(
        cell, x_data, dtype=tf.float32)
    sess.run(tf.global_variables_initializer())
    pp.pprint(outputs.eval())


array([[[ 1.,  0.,  0.,  0.],
        [ 0.,  1.,  0.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  0.,  1.]],

       [[ 0.,  1.,  0.,  0.],
        [ 0.,  0.,  0.,  1.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  1.,  0.]],

       [[ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  1.,  0.,  0.],
        [ 0.,  1.,  0.,  0.],
        [ 0.,  0.,  1.,  0.]]], dtype=float32)
array([[[-0.08363709,  0.14027484],
        [ 0.02493201,  0.20462605],
        [-0.06182761,  0.29794976],
        [-0.12991247,  0.30553949],
        [-0.21330857,  0.15022287]],

       [[ 0.068808  ,  0.13457137],
        [-0.06971022,  0.08396979],
        [-0.13594906,  0.1603317 ],
        [-0.1734536 ,  0.18938743],
        [-0.19931419,  0.21245483]],

       [[-0.07762068,  0.06599158],
        [-0.1312525 ,  0.11477097],
        [ 0.0118046 ,  0.1876744 ],
        [ 0.07184076,  0.26778108],
        [-0.01831046,  0.36711457]]], dtype=float32)

In [6]:
with tf.variable_scope('3_batches_dynamic_length') as scope:
    # One cell RNN input_dim (4) -> output_dim (5). sequence: 5, batch 3
    # 3 batches 'hello', 'eolll', 'lleel'
    x_data = np.array([[h, e, l, l, o],
                       [e, o, l, l, l],
                       [l, l, e, e, l]], dtype=np.float32)
    pp.pprint(x_data)
    
    hidden_size = 2
    cell = rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True)
    outputs, _states = tf.nn.dynamic_rnn(
        cell, x_data, sequence_length=[5,3,4], dtype=tf.float32)
    sess.run(tf.global_variables_initializer())
    pp.pprint(outputs.eval())


array([[[ 1.,  0.,  0.,  0.],
        [ 0.,  1.,  0.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  0.,  1.]],

       [[ 0.,  1.,  0.,  0.],
        [ 0.,  0.,  0.,  1.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  1.,  0.]],

       [[ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  1.,  0.,  0.],
        [ 0.,  1.,  0.,  0.],
        [ 0.,  0.,  1.,  0.]]], dtype=float32)
array([[[ 0.11846812,  0.09838437],
        [ 0.01435947,  0.21795948],
        [-0.1294212 ,  0.1101583 ],
        [-0.24306259,  0.04897844],
        [-0.26552296,  0.09324916]],

       [[-0.07799197,  0.09925572],
        [-0.13092862,  0.11419758],
        [-0.22510643,  0.04204131],
        [ 0.        ,  0.        ],
        [ 0.        ,  0.        ]],

       [[-0.1404013 , -0.06096476],
        [-0.24985476, -0.10693933],
        [-0.22802687, -0.00521357],
        [-0.25779328,  0.11324508],
        [ 0.        ,  0.        ]]], dtype=float32)

In [7]:
with tf.variable_scope('initial_state') as scope:
    batch_size = 3
    x_data = np.array([[h, e, l, l, o],
                      [e, o, l, l, l],
                      [l, l, e, e, l]], dtype=np.float32)
    pp.pprint(x_data)
    
    # One cell RNN input_dim (4) -> output_dim (5). sequence: 5, batch: 3
    hidden_size=2
    cell = rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True)
    initial_state = cell.zero_state(batch_size, tf.float32)
    outputs, _states = tf.nn.dynamic_rnn(cell, x_data,
                                         initial_state=initial_state, dtype=tf.float32)
    sess.run(tf.global_variables_initializer())
    pp.pprint(outputs.eval())


array([[[ 1.,  0.,  0.,  0.],
        [ 0.,  1.,  0.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  0.,  1.]],

       [[ 0.,  1.,  0.,  0.],
        [ 0.,  0.,  0.,  1.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  1.,  0.]],

       [[ 0.,  0.,  1.,  0.],
        [ 0.,  0.,  1.,  0.],
        [ 0.,  1.,  0.,  0.],
        [ 0.,  1.,  0.,  0.],
        [ 0.,  0.,  1.,  0.]]], dtype=float32)
array([[[-0.01510691, -0.09768563],
        [-0.11986788, -0.2062224 ],
        [-0.12150764, -0.09573848],
        [-0.11341213, -0.00686507],
        [-0.11165538,  0.03148442]],

       [[-0.11858094, -0.13462871],
        [-0.12052085, -0.06944887],
        [-0.1357048 ,  0.00758943],
        [-0.12825252,  0.05504551],
        [-0.12495665,  0.08196589]],

       [[-0.03133412,  0.03963752],
        [-0.05609235,  0.06543965],
        [-0.15608624, -0.09140078],
        [-0.20793875, -0.18669473],
        [-0.19900799, -0.07157262]]], dtype=float32)

In [8]:
# Create input data
batch_size=6
sequence_length=1000
input_dim=48

x_data = np.arange(288000, dtype=np.float32).reshape(batch_size, sequence_length, input_dim)
#pp.pprint(x_data)  # batch, sequence_length, input_dim

In [9]:
# with tf.variable_scope('generated_data') as scope:
#     # One cell RNN input_dim (3) -> output_dim (5). sequence: 5, batch: 3
#     cell = rnn.BasicLSTMCell(num_units=5, state_is_tuple=True)
#     initial_state = cell.zero_state(batch_size, tf.float32)
#     outputs, _states = tf.nn.dynamic_rnn(cell, x_data,
#                                          initial_state=initial_state, dtype=tf.float32)
#     sess.run(tf.global_variables_initializer())
#     pp.pprint(outputs.eval())

In [10]:
# with tf.variable_scope('MultiRNNCell') as scope:
#     # Make rnn
#     cell = rnn.BasicLSTMCell(num_units=5, state_is_tuple=True)
#     cell = rnn.MultiRNNCell([cell] * 3, state_is_tuple=True) # 3 layers

#     # rnn in/out
#     outputs, _states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32)
#     print("dynamic rnn: ", outputs)
#     sess.run(tf.global_variables_initializer())
#     pp.pprint(outputs.eval())  # batch size, unrolling (time), hidden_size

In [11]:
# with tf.variable_scope('dynamic_rnn') as scope:
#     cell = rnn.BasicLSTMCell(num_units=5, state_is_tuple=True)
#     outputs, _states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32,
#                                          sequence_length=[1, 3, 2])
#     # lentgh 1 for batch 1, lentgh 2 for batch 2
    
#     print("dynamic rnn: ", outputs)
#     sess.run(tf.global_variables_initializer())
#     pp.pprint(outputs.eval())  # batch size, unrolling (time), hidden_size

In [12]:
with tf.variable_scope('bi-directional') as scope:
    # bi-directional rnn
    cell_fw = rnn.BasicLSTMCell(num_units=5, state_is_tuple=True)
    cell_bw = rnn.BasicLSTMCell(num_units=5, state_is_tuple=True)

    outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, x_data,
                                                      sequence_length=[2, 3, 1,5,7,9],
                                                      dtype=tf.float32)

    sess.run(tf.global_variables_initializer())
    #pp.pprint(sess.run(outputs))
    #pp.pprint(sess.run(states))
#     #pp.pprint(sess.run(outputs[0].get_shape()))
#     pp.pprint(sess.run(tf.shape(x_data)))
#     pp.pprint(sess.run(tf.shape(outputs[0])))
#     pp.pprint(sess.run(tf.shape(outputs[1])))
#     pp.pprint(sess.run(tf.shape(outputs)))
#     #pp.pprint(sess.run(outputs[-1]))
#     X_for_fc = tf.reshape(outputs, [-1, 5])
#     pp.pprint(sess.run(tf.shape(X_for_fc)))

In [13]:
# flattern based softmax
hidden_size=3
sequence_length=5
batch_size=3
num_classes=5

pp.pprint(x_data) # hidden_size=3, sequence_length=4, batch_size=2
x_data = x_data.reshape(-1, hidden_size)
pp.pprint(x_data)

softmax_w = np.arange(15, dtype=np.float32).reshape(hidden_size, num_classes)
outputs = np.matmul(x_data, softmax_w)
outputs = outputs.reshape(-1, sequence_length, num_classes) # batch, seq, class
pp.pprint(outputs)


array([[[  0.00000000e+00,   1.00000000e+00,   2.00000000e+00, ...,
           4.50000000e+01,   4.60000000e+01,   4.70000000e+01],
        [  4.80000000e+01,   4.90000000e+01,   5.00000000e+01, ...,
           9.30000000e+01,   9.40000000e+01,   9.50000000e+01],
        [  9.60000000e+01,   9.70000000e+01,   9.80000000e+01, ...,
           1.41000000e+02,   1.42000000e+02,   1.43000000e+02],
        ..., 
        [  4.78560000e+04,   4.78570000e+04,   4.78580000e+04, ...,
           4.79010000e+04,   4.79020000e+04,   4.79030000e+04],
        [  4.79040000e+04,   4.79050000e+04,   4.79060000e+04, ...,
           4.79490000e+04,   4.79500000e+04,   4.79510000e+04],
        [  4.79520000e+04,   4.79530000e+04,   4.79540000e+04, ...,
           4.79970000e+04,   4.79980000e+04,   4.79990000e+04]],

       [[  4.80000000e+04,   4.80010000e+04,   4.80020000e+04, ...,
           4.80450000e+04,   4.80460000e+04,   4.80470000e+04],
        [  4.80480000e+04,   4.80490000e+04,   4.80500000e+04, ...,
           4.80930000e+04,   4.80940000e+04,   4.80950000e+04],
        [  4.80960000e+04,   4.80970000e+04,   4.80980000e+04, ...,
           4.81410000e+04,   4.81420000e+04,   4.81430000e+04],
        ..., 
        [  9.58560000e+04,   9.58570000e+04,   9.58580000e+04, ...,
           9.59010000e+04,   9.59020000e+04,   9.59030000e+04],
        [  9.59040000e+04,   9.59050000e+04,   9.59060000e+04, ...,
           9.59490000e+04,   9.59500000e+04,   9.59510000e+04],
        [  9.59520000e+04,   9.59530000e+04,   9.59540000e+04, ...,
           9.59970000e+04,   9.59980000e+04,   9.59990000e+04]],

       [[  9.60000000e+04,   9.60010000e+04,   9.60020000e+04, ...,
           9.60450000e+04,   9.60460000e+04,   9.60470000e+04],
        [  9.60480000e+04,   9.60490000e+04,   9.60500000e+04, ...,
           9.60930000e+04,   9.60940000e+04,   9.60950000e+04],
        [  9.60960000e+04,   9.60970000e+04,   9.60980000e+04, ...,
           9.61410000e+04,   9.61420000e+04,   9.61430000e+04],
        ..., 
        [  1.43856000e+05,   1.43857000e+05,   1.43858000e+05, ...,
           1.43901000e+05,   1.43902000e+05,   1.43903000e+05],
        [  1.43904000e+05,   1.43905000e+05,   1.43906000e+05, ...,
           1.43949000e+05,   1.43950000e+05,   1.43951000e+05],
        [  1.43952000e+05,   1.43953000e+05,   1.43954000e+05, ...,
           1.43997000e+05,   1.43998000e+05,   1.43999000e+05]],

       [[  1.44000000e+05,   1.44001000e+05,   1.44002000e+05, ...,
           1.44045000e+05,   1.44046000e+05,   1.44047000e+05],
        [  1.44048000e+05,   1.44049000e+05,   1.44050000e+05, ...,
           1.44093000e+05,   1.44094000e+05,   1.44095000e+05],
        [  1.44096000e+05,   1.44097000e+05,   1.44098000e+05, ...,
           1.44141000e+05,   1.44142000e+05,   1.44143000e+05],
        ..., 
        [  1.91856000e+05,   1.91857000e+05,   1.91858000e+05, ...,
           1.91901000e+05,   1.91902000e+05,   1.91903000e+05],
        [  1.91904000e+05,   1.91905000e+05,   1.91906000e+05, ...,
           1.91949000e+05,   1.91950000e+05,   1.91951000e+05],
        [  1.91952000e+05,   1.91953000e+05,   1.91954000e+05, ...,
           1.91997000e+05,   1.91998000e+05,   1.91999000e+05]],

       [[  1.92000000e+05,   1.92001000e+05,   1.92002000e+05, ...,
           1.92045000e+05,   1.92046000e+05,   1.92047000e+05],
        [  1.92048000e+05,   1.92049000e+05,   1.92050000e+05, ...,
           1.92093000e+05,   1.92094000e+05,   1.92095000e+05],
        [  1.92096000e+05,   1.92097000e+05,   1.92098000e+05, ...,
           1.92141000e+05,   1.92142000e+05,   1.92143000e+05],
        ..., 
        [  2.39856000e+05,   2.39857000e+05,   2.39858000e+05, ...,
           2.39901000e+05,   2.39902000e+05,   2.39903000e+05],
        [  2.39904000e+05,   2.39905000e+05,   2.39906000e+05, ...,
           2.39949000e+05,   2.39950000e+05,   2.39951000e+05],
        [  2.39952000e+05,   2.39953000e+05,   2.39954000e+05, ...,
           2.39997000e+05,   2.39998000e+05,   2.39999000e+05]],

       [[  2.40000000e+05,   2.40001000e+05,   2.40002000e+05, ...,
           2.40045000e+05,   2.40046000e+05,   2.40047000e+05],
        [  2.40048000e+05,   2.40049000e+05,   2.40050000e+05, ...,
           2.40093000e+05,   2.40094000e+05,   2.40095000e+05],
        [  2.40096000e+05,   2.40097000e+05,   2.40098000e+05, ...,
           2.40141000e+05,   2.40142000e+05,   2.40143000e+05],
        ..., 
        [  2.87856000e+05,   2.87857000e+05,   2.87858000e+05, ...,
           2.87901000e+05,   2.87902000e+05,   2.87903000e+05],
        [  2.87904000e+05,   2.87905000e+05,   2.87906000e+05, ...,
           2.87949000e+05,   2.87950000e+05,   2.87951000e+05],
        [  2.87952000e+05,   2.87953000e+05,   2.87954000e+05, ...,
           2.87997000e+05,   2.87998000e+05,   2.87999000e+05]]], dtype=float32)
array([[  0.00000000e+00,   1.00000000e+00,   2.00000000e+00],
       [  3.00000000e+00,   4.00000000e+00,   5.00000000e+00],
       [  6.00000000e+00,   7.00000000e+00,   8.00000000e+00],
       ..., 
       [  2.87991000e+05,   2.87992000e+05,   2.87993000e+05],
       [  2.87994000e+05,   2.87995000e+05,   2.87996000e+05],
       [  2.87997000e+05,   2.87998000e+05,   2.87999000e+05]], dtype=float32)
array([[[  2.50000000e+01,   2.80000000e+01,   3.10000000e+01,
           3.40000000e+01,   3.70000000e+01],
        [  7.00000000e+01,   8.20000000e+01,   9.40000000e+01,
           1.06000000e+02,   1.18000000e+02],
        [  1.15000000e+02,   1.36000000e+02,   1.57000000e+02,
           1.78000000e+02,   1.99000000e+02],
        [  1.60000000e+02,   1.90000000e+02,   2.20000000e+02,
           2.50000000e+02,   2.80000000e+02],
        [  2.05000000e+02,   2.44000000e+02,   2.83000000e+02,
           3.22000000e+02,   3.61000000e+02]],

       [[  2.50000000e+02,   2.98000000e+02,   3.46000000e+02,
           3.94000000e+02,   4.42000000e+02],
        [  2.95000000e+02,   3.52000000e+02,   4.09000000e+02,
           4.66000000e+02,   5.23000000e+02],
        [  3.40000000e+02,   4.06000000e+02,   4.72000000e+02,
           5.38000000e+02,   6.04000000e+02],
        [  3.85000000e+02,   4.60000000e+02,   5.35000000e+02,
           6.10000000e+02,   6.85000000e+02],
        [  4.30000000e+02,   5.14000000e+02,   5.98000000e+02,
           6.82000000e+02,   7.66000000e+02]],

       [[  4.75000000e+02,   5.68000000e+02,   6.61000000e+02,
           7.54000000e+02,   8.47000000e+02],
        [  5.20000000e+02,   6.22000000e+02,   7.24000000e+02,
           8.26000000e+02,   9.28000000e+02],
        [  5.65000000e+02,   6.76000000e+02,   7.87000000e+02,
           8.98000000e+02,   1.00900000e+03],
        [  6.10000000e+02,   7.30000000e+02,   8.50000000e+02,
           9.70000000e+02,   1.09000000e+03],
        [  6.55000000e+02,   7.84000000e+02,   9.13000000e+02,
           1.04200000e+03,   1.17100000e+03]],

       ..., 
       [[  4.31935000e+06,   5.18321800e+06,   6.04708600e+06,
           6.91095400e+06,   7.77482200e+06],
        [  4.31939500e+06,   5.18327200e+06,   6.04714900e+06,
           6.91102600e+06,   7.77490300e+06],
        [  4.31944000e+06,   5.18332600e+06,   6.04721200e+06,
           6.91109800e+06,   7.77498400e+06],
        [  4.31948500e+06,   5.18338000e+06,   6.04727500e+06,
           6.91117000e+06,   7.77506500e+06],
        [  4.31953000e+06,   5.18343400e+06,   6.04733800e+06,
           6.91124200e+06,   7.77514600e+06]],

       [[  4.31957500e+06,   5.18348800e+06,   6.04740100e+06,
           6.91131400e+06,   7.77522700e+06],
        [  4.31962000e+06,   5.18354200e+06,   6.04746400e+06,
           6.91138600e+06,   7.77530800e+06],
        [  4.31966500e+06,   5.18359600e+06,   6.04752700e+06,
           6.91145800e+06,   7.77538900e+06],
        [  4.31971000e+06,   5.18365000e+06,   6.04759000e+06,
           6.91153000e+06,   7.77547000e+06],
        [  4.31975500e+06,   5.18370400e+06,   6.04765300e+06,
           6.91160200e+06,   7.77555100e+06]],

       [[  4.31980000e+06,   5.18375800e+06,   6.04771600e+06,
           6.91167400e+06,   7.77563200e+06],
        [  4.31984500e+06,   5.18381200e+06,   6.04777900e+06,
           6.91174600e+06,   7.77571300e+06],
        [  4.31989000e+06,   5.18386600e+06,   6.04784200e+06,
           6.91181800e+06,   7.77579400e+06],
        [  4.31993500e+06,   5.18392000e+06,   6.04790500e+06,
           6.91189000e+06,   7.77587500e+06],
        [  4.31998000e+06,   5.18397400e+06,   6.04796800e+06,
           6.91196200e+06,   7.77595600e+06]]], dtype=float32)

In [14]:
# [batch_size, sequence_length]
y_data = tf.constant([[1, 1, 1]])

# [batch_size, sequence_length, emb_dim ]
prediction = tf.constant([[[0.2, 0.7], [0.6, 0.2], [0.2, 0.9]]], dtype=tf.float32)

# [batch_size * sequence_length]
weights = tf.constant([[1, 1, 1]], dtype=tf.float32)

sequence_loss = tf.contrib.seq2seq.sequence_loss(logits=prediction, targets=y_data, weights=weights)
sess.run(tf.global_variables_initializer())
print("Loss: ", sequence_loss.eval())


Loss:  0.596759

In [15]:
# [batch_size, sequence_length]
y_data = tf.constant([[1, 1, 1]])

# [batch_size, sequence_length, emb_dim ]
prediction1 = tf.constant([[[0.3, 0.7], [0.3, 0.7], [0.3, 0.7]]], dtype=tf.float32)
prediction2 = tf.constant([[[0.1, 0.9], [0.1, 0.9], [0.1, 0.9]]], dtype=tf.float32)

prediction3 = tf.constant([[[1, 0], [1, 0], [1, 0]]], dtype=tf.float32)
prediction4 = tf.constant([[[0, 1], [1, 0], [0, 1]]], dtype=tf.float32)

# [batch_size * sequence_length]
weights = tf.constant([[1, 1, 1]], dtype=tf.float32)

sequence_loss1 = tf.contrib.seq2seq.sequence_loss(prediction1, y_data, weights)
sequence_loss2 = tf.contrib.seq2seq.sequence_loss(prediction2, y_data, weights)
sequence_loss3 = tf.contrib.seq2seq.sequence_loss(prediction3, y_data, weights)
sequence_loss4 = tf.contrib.seq2seq.sequence_loss(prediction3, y_data, weights)

sess.run(tf.global_variables_initializer())
print("Loss1: ", sequence_loss1.eval(),
      "Loss2: ", sequence_loss2.eval(),
      "Loss3: ", sequence_loss3.eval(),
      "Loss4: ", sequence_loss4.eval())


Loss1:  0.513015 Loss2:  0.371101 Loss3:  1.31326 Loss4:  1.31326