In [1]:
'''
A Recurrent Neural Network (LSTM) implementation example using TensorFlow..
Next word prediction after n_input words learned from text file.
A story is automatically generated if the predicted word is fed back as input.
Author: Rowel Atienza
Project: https://github.com/roatienza/Deep-Learning-Experiments
'''

from __future__ import print_function

import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
import random
import collections
import time

In [2]:
start_time = time.time()
def elapsed(sec):
    if sec<60:
        return str(sec) + " sec"
    elif sec<(60*60):
        return str(sec/60) + " min"
    else:
        return str(sec/(60*60)) + " hr"

In [3]:
# Target log path
logs_path = 'rnn_code'
writer = tf.summary.FileWriter(logs_path)

# Text file containing words for training
training_file = 'inputcode.txt'

In [4]:
def read_data(fname):
    with open(fname) as f:
        content = f.read()
    #print(content)
    #content = [x.strip() for x in content]
    #content = [content[i].split(',') for i in range(len(content)-1)]
    #content = np.array(content)
    #content = np.reshape(content, [-1, ])
    content = content.split(',')
    #print(content)
    return content[:-1]

with open('inputcode.txt',encoding="utf8") as f: content = f.read() data = content.split(',') print(data)

content = list(content)

content = [content[i].split(',') for i in range(len(content))]

print(split(content))


In [5]:
training_data = read_data(training_file)
print("Loaded training data...")
print(training_data)

training_data = list(map(int, training_data))
print(training_data)


Loaded training data...
['1', '2', '3', '2', '2', '3', '2', '1', '1', '3', '3', '1', '2', '3', '1', '2', '3', '2', '2', '1', '2', '3', '2', '1', '2', '3', '2', '1', '2', '3', '3', '2', '3', '2', '1', '2', '1', '1', '2', '3', '2', '1', '2', '3', '2', '1', '2', '3', '2', '2', '3', '1', '1', '2', '3', '2', '1', '2', '3', '2', '1', '2', '3']
[1, 2, 3, 2, 2, 3, 2, 1, 1, 3, 3, 1, 2, 3, 1, 2, 3, 2, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 3, 2, 3, 2, 1, 2, 1, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 2, 3, 1, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]

In [6]:
print(training_data[:10])
print(len(training_data))


[1, 2, 3, 2, 2, 3, 2, 1, 1, 3]
63

def build_dataset(words): count = collections.Counter(words).mostcommon() dictionary = dict() for word, in count: dictionary[word] = len(dictionary) reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return dictionary, reverse_dictionary

dictionary, reverse_dictionary = build_dataset(training_data) vocab_size = len(dictionary)

print(vocab_size) print(dictionary)

print(training_data) vocab = list(set(training_data)) print(vocab) vocab1 = ['1','2','3'] print(vocab1)


In [13]:
# Parameters
learning_rate = 0.001
training_iters = 50000
display_step = 1000
n_input = 3
vocab = [1,2,3]
vocab_size = 3
# number of units in RNN cell
n_hidden = 512
py_flag = False
# tf Graph input
x = tf.placeholder("float", [1, n_input, 1])
y = tf.placeholder("float", [1, vocab_size])

In [14]:
# RNN output node weights and biases
weights = {
    'out': tf.Variable(tf.random_normal([n_hidden, vocab_size]))
}
biases = {
    'out': tf.Variable(tf.random_normal([vocab_size]))
}

In [15]:
#tf.reset_default_graph()
#print(x)

In [16]:
print(tf.split(x,n_input,1))


[<tf.Tensor 'split:0' shape=(1, 1, 1) dtype=float32>, <tf.Tensor 'split:1' shape=(1, 1, 1) dtype=float32>, <tf.Tensor 'split:2' shape=(1, 1, 1) dtype=float32>]

In [17]:
def RNN(x, weights, biases):
    #print("1", x)
    # reshape to [1, n_input]
    x = tf.reshape(x, [-1, n_input])
    #print("2", x, x.shape)
    # Generate a n_input-element sequence of inputs
    # (eg. [had] [a] [general] -> [20] [6] [33])
    x = tf.split(x,n_input,1)
    #print("3", x)
    # 1-layer LSTM with n_hidden units.
    
    rnn_cell = rnn.BasicLSTMCell(n_hidden)
    
    # generate prediction
    outputs, states = rnn.static_rnn(rnn_cell, x, dtype=tf.float32)
    print("4", outputs)
    # there are n_input outputs but
    # we only want the last output
    return tf.matmul(outputs[-1], weights['out']) + biases['out']

In [18]:
pred = RNN(x, weights, biases)

# Loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost)

# Model evaluation
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.global_variables_initializer()


4 [<tf.Tensor 'rnn_1/rnn/basic_lstm_cell/mul_2:0' shape=(1, 512) dtype=float32>, <tf.Tensor 'rnn_1/rnn/basic_lstm_cell/mul_5:0' shape=(1, 512) dtype=float32>, <tf.Tensor 'rnn_1/rnn/basic_lstm_cell/mul_8:0' shape=(1, 512) dtype=float32>]
#testing    

#symbols_in_keys = [ [dictionary[ str(training_data[i])]] for i in range(offset, offset+n_input) ]
    #symbols_in_keys = np.reshape(np.array(symbols_in_keys), [-1, n_input, 1])
    #input_set = np.reshape(training_data, [-1, n_input, 1])
    offset = 8
    symbols_out_onehot = np.zeros([vocab_size], dtype=float)
    #symbols_out_onehot[dictionary[str(training_data[offset+n_input])]] = 1.0
    symbols_out_onehot[int(training_data[offset+n_input]) - 1] = 1.0
    print(symbols_out_onehot)
    #symbols_out_onehot[training_data[offset+n_input]] = 1.0
    #symbols_out_onehot = np.reshape(symbols_out_onehot,[1,-1])

symbols_in_keys = [ [dictionary[ str(training_data[i])]] for i in range(offset, offset+n_input) ]

symbols_in_keys = np.reshape(np.array(symbols_in_keys), [-1, n_input, 1])

input_data = [training_data[i] for i in range(offset, offset+n_input)] input_set = np.reshape(input_data, [-1, n_input, 1]) print(input_set)


In [20]:
# Launch the graph
with tf.Session() as session:
    session.run(init)
    step = 0
    offset = random.randint(0,n_input+1)
    end_offset = n_input + 1
    acc_total = 0
    loss_total = 0

    writer.add_graph(session.graph)

    while step < training_iters: #training_iters=50000
        # Generate a minibatch. Add some randomness on selection process.
        if offset > (len(training_data)-end_offset):
            offset = random.randint(0, n_input+1)

        #symbols_in_keys = [ [dictionary[ str(training_data[i])]] for i in range(offset, offset+n_input) ]
        #symbols_in_keys = np.reshape(np.array(symbols_in_keys), [-1, n_input, 1])
        input_data = [training_data[i] for i in range(offset, offset+n_input)]
        input_set = np.reshape(input_data, [-1, n_input, 1]) 
        #input_set = np.reshape(training_data, [-1, n_input, 1])
        #print(input_set, input_set.shape)
        symbols_out_onehot = np.zeros([vocab_size], dtype=float)
        symbols_out_onehot[int(training_data[offset+n_input]) - 1] = 1.0
        symbols_out_onehot = np.reshape(symbols_out_onehot,[1,-1])
        
        #print(symbols_out_onehot, symbols_out_onehot.shape)
        
        _, acc, loss, onehot_pred = session.run([optimizer, accuracy, cost, pred], \
                                                feed_dict={x: input_set, y: symbols_out_onehot})
        loss_total += loss
        acc_total += acc
        if (step+1) % display_step == 0:
            print("Iter= " + str(step+1) + ", Average Loss= " + \
                  "{:.6f}".format(loss_total/display_step) + ", Average Accuracy= " + \
                  "{:.2f}%".format(100*acc_total/display_step))
            acc_total = 0
            loss_total = 0
            symbols_in = [training_data[i] for i in range(offset, offset + n_input)]
            symbols_out = training_data[offset + n_input]
            #symbols_out_pred = reverse_dictionary[int(tf.argmax(onehot_pred, 1).eval())]
            symbols_out_pred = int(tf.argmax(onehot_pred, 1).eval())
            print("%s - [%s] vs [%s]" % (symbols_in,symbols_out,symbols_out_pred))
        step += 1
        offset += (n_input+1)
    print("Optimization Finished!")
    print("Elapsed time: ", elapsed(time.time() - start_time))
    print("Run on command line.")
    print("\ttensorboard --logdir=%s" % (logs_path))
    print("Point your web browser to: http://localhost:6006/")
    '''
    while True:
        prompt = "%s words: " % n_input
        sentence = input(prompt)
        sentence = sentence.strip()
        words = sentence.split(' ')
        if len(words) != n_input:
            continue
        try:
            symbols_in_keys = [dictionary[str(words[i])] for i in range(len(words))]
            for i in range(32):
                keys = np.reshape(np.array(symbols_in_keys), [-1, n_input, 1])
                onehot_pred = session.run(pred, feed_dict={x: keys})
                onehot_pred_index = int(tf.argmax(onehot_pred, 1).eval())
                sentence = "%s %s" % (sentence,reverse_dictionary[onehot_pred_index])
                symbols_in_keys = symbols_in_keys[1:]
                symbols_in_keys.append(onehot_pred_index)
            print(sentence)
        except:
            print("Word not in dictionary")'''


Iter= 1000, Average Loss= 1.060242, Average Accuracy= 54.20%
[2, 1, 2] - [3] vs [2]
Iter= 2000, Average Loss= 0.825732, Average Accuracy= 63.80%
[2, 3, 2] - [2] vs [0]
Iter= 3000, Average Loss= 0.809271, Average Accuracy= 67.50%
[2, 3, 2] - [1] vs [1]
Iter= 4000, Average Loss= 0.797424, Average Accuracy= 69.00%
[2, 3, 1] - [1] vs [1]
Iter= 5000, Average Loss= 0.724709, Average Accuracy= 73.20%
[3, 2, 1] - [2] vs [1]
Iter= 6000, Average Loss= 0.700956, Average Accuracy= 75.30%
[1, 2, 3] - [2] vs [1]
Iter= 7000, Average Loss= 0.732293, Average Accuracy= 73.60%
[2, 3, 2] - [1] vs [0]
Iter= 8000, Average Loss= 0.748283, Average Accuracy= 73.50%
[2, 1, 2] - [3] vs [2]
Iter= 9000, Average Loss= 0.635940, Average Accuracy= 77.60%
[2, 1, 2] - [3] vs [2]
Iter= 10000, Average Loss= 0.714077, Average Accuracy= 74.70%
[2, 1, 1] - [2] vs [1]
Iter= 11000, Average Loss= 0.696289, Average Accuracy= 76.30%
[1, 3, 3] - [1] vs [0]
Iter= 12000, Average Loss= 0.677662, Average Accuracy= 74.10%
[2, 3, 2] - [1] vs [0]
Iter= 13000, Average Loss= 0.674364, Average Accuracy= 73.40%
[3, 1, 1] - [2] vs [1]
Iter= 14000, Average Loss= 0.665561, Average Accuracy= 74.20%
[2, 3, 2] - [1] vs [0]
Iter= 15000, Average Loss= 0.667130, Average Accuracy= 75.20%
[1, 1, 3] - [3] vs [2]
Iter= 16000, Average Loss= 0.662025, Average Accuracy= 75.80%
[3, 2, 1] - [2] vs [1]
Iter= 17000, Average Loss= 0.600265, Average Accuracy= 77.40%
[3, 1, 2] - [3] vs [2]
Iter= 18000, Average Loss= 0.674467, Average Accuracy= 75.90%
[2, 1, 1] - [2] vs [2]
Iter= 19000, Average Loss= 0.586608, Average Accuracy= 78.40%
[3, 3, 2] - [3] vs [0]
Iter= 20000, Average Loss= 0.618051, Average Accuracy= 79.10%
[1, 2, 3] - [2] vs [1]
Iter= 21000, Average Loss= 0.672422, Average Accuracy= 76.90%
[1, 1, 2] - [3] vs [2]
Iter= 22000, Average Loss= 0.643663, Average Accuracy= 77.90%
[3, 2, 2] - [3] vs [0]
Iter= 23000, Average Loss= 0.621040, Average Accuracy= 80.50%
[3, 2, 1] - [2] vs [1]
Iter= 24000, Average Loss= 0.632671, Average Accuracy= 79.60%
[2, 3, 2] - [1] vs [0]
Iter= 25000, Average Loss= 0.631959, Average Accuracy= 78.40%
[2, 1, 2] - [3] vs [2]
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-20-09d702421191> in <module>()
     27         #print(symbols_out_onehot, symbols_out_onehot.shape)
     28 
---> 29         _, acc, loss, onehot_pred = session.run([optimizer, accuracy, cost, pred],                                                 feed_dict={x: input_set, y: symbols_out_onehot})
     30         loss_total += loss
     31         acc_total += acc

c:\users\eratsau\appdata\local\programs\python\python36\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    787     try:
    788       result = self._run(None, fetches, feed_dict, options_ptr,
--> 789                          run_metadata_ptr)
    790       if run_metadata:
    791         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

c:\users\eratsau\appdata\local\programs\python\python36\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    995     if final_fetches or final_targets:
    996       results = self._do_run(handle, final_targets, final_fetches,
--> 997                              feed_dict_string, options, run_metadata)
    998     else:
    999       results = []

c:\users\eratsau\appdata\local\programs\python\python36\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1130     if handle is None:
   1131       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1132                            target_list, options, run_metadata)
   1133     else:
   1134       return self._do_call(_prun_fn, self._session, handle, feed_dict,

c:\users\eratsau\appdata\local\programs\python\python36\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
   1137   def _do_call(self, fn, *args):
   1138     try:
-> 1139       return fn(*args)
   1140     except errors.OpError as e:
   1141       message = compat.as_text(e.message)

c:\users\eratsau\appdata\local\programs\python\python36\lib\site-packages\tensorflow\python\client\session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1119         return tf_session.TF_Run(session, options,
   1120                                  feed_dict, fetch_list, target_list,
-> 1121                                  status, run_metadata)
   1122 
   1123     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [ ]:


In [ ]: