In [27]:
import tensorflow as tf
import numpy as np

In [28]:
# FIFO queue holding the filenames
filename_queue = tf.train.string_input_producer(["./Data/samplecsv.txt"], shuffle=False)

In [29]:
# Use the reader that corresponds to the input file format and pass the filename_queue to its read method
# The read method outputs a key identifying the file and a string
reader = tf.TextLineReader()

key, value = reader.read(filename_queue)

In [30]:
# Use default values in case columns are empty. This also tells tensorflow the type of data
default_values = [["0"],["0 0"],["0 0"]]

In [31]:
# Use a decoder to decode the data into tensors. Specify the defaults with record_defaults=
col1, col2, col3 = tf.decode_csv(value, record_defaults=default_values)
features = tf.stack([col2, col3])

In [32]:
with tf.Session() as sess:
    # Coordinator helps coordinate thread termination
    coordinator = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coordinator)
    
    # Reads 10 examples. The sample file contains 4 examples. 
    # Without doing any shuffling, this reads them in order.
    for i in xrange(10):
        # This reads a single example at a time
        example, label = sess.run([features, col1])
        # Preprocessing and training can be done here. (Using split and reshape)
        print("1 example read")
        print(example)

    # Any thread can request a stop which stops all threads
    coordinator.request_stop()
    # Waits for all threads to terminate
    coordinator.join(threads)


1 example read
['0 1' '1 0']
1 example read
['0 0' '0 1']
1 example read
['1 0' '-1 1']
1 example read
['0 -1' '1 1']
1 example read
['0 1' '1 0']
1 example read
['0 0' '0 1']
1 example read
['1 0' '-1 1']
1 example read
['0 -1' '1 1']
1 example read
['0 1' '1 0']
1 example read
['0 0' '0 1']

In [32]: