In [ ]:
# Tensor where we will feed the data into graph
inputs = tf.placeholder(tf.int32, (None, x_seq_length), 'inputs')
outputs = tf.placeholder(tf.int32, (None, None), 'output')
targets = tf.placeholder(tf.int32, (None, None), 'targets')

In [ ]:
# Embedding layers
input_embedding = tf.Variable(tf.random_uniform((len(char2numX), embed_size),
                                                -1.0, 1.0), name='enc_embedding')
output_embedding = tf.Variable(tf.random_uniform((len(char2numX), embed_size),
                                                -1.0, 1.0), name='dec_embedding')
date_input_embed = tf.nn.embedding_lookup(input_embedding, inputs)
date_output_embed = tf.nn.embedding_lookup(output_embedding, outputs)

with tf.variable_scope("encoding") as encoding_scope:
    lstm_enc = tf.contrib.rnn.BasicLSTMCell(nodes)
    _, last_state = tf.nn.dynamic_rnn(lstm_enc, inputs=date_input_embed,
                                     dtype=tf.float32)

with tf.variable_scope("decoding") as decoding_scope:
    lstm_dec = tf.contrib.rnn.BasicLSTMCell(nodes)
    dec_ouputs, _ = tf.nn.dynamic_rnn(lstm_dec, inputs=date_output_embed,
                                     initial_state=last_state)

In [ ]:
# Connect outputs to
logits = tf.contrib.layers.fully_connected(dec_outputs,
                                          num_outputs=len(char2numY), activation_fn=None)

with tf.name_scope("optimization"):
    # Loss function
    loss = tf.contrib.seq2seq.sequence_loss(logits, targets, tf.ones([batch_size, y_seq_length]))
    
    # Optimizer
    optimizer = tf.train.RMSPropOptimizer(1e-3).minimize(loss)