In [15]:
import tensorflow as tf
import numpy as np

In [16]:
def read_my_csv(filename_queue):
    # Set up the reader
    reader = tf.TextLineReader()
    # Grab the values from the file(s)
    key, value = reader.read(filename_queue)
    # Perform the decoding
    default_values = [["0"],["0 0"],["0 0"]]
    col1, col2, col3 = tf.decode_csv(value, record_defaults=default_values)
    features = tf.stack([col1, col2])
    # Perform preporcessing here
    ##
    return features, col3

In [17]:
def input_pipeline(filenames, batch_size):
    filename_queue = tf.train.string_input_producer(filenames, shuffle=True)
    example, label = read_my_csv(filename_queue)
    min_after_dequeue = 100
    capacity = min_after_dequeue + 3 * batch_size
    # Create the batches using shuffle_batch which performs random shuffling
    example_batch, label_batch = tf.train.shuffle_batch([example, label], 
                                                        batch_size=batch_size, 
                                                        capacity=capacity, 
                                                        min_after_dequeue=min_after_dequeue)
    return example_batch, label_batch

In [19]:
example_batch, label_batch = input_pipeline(["./Data/samplecsv.txt"], 2)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    
    coordinator = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coordinator)
    
    for i in xrange(10):
        print sess.run(example_batch)
        
    coordinator.request_stop()
    coordinator.join(threads)
    sess.close()


[['1' '0 1']
 ['1' '1 0']]
[['1' '0 1']
 ['-1' '0 -1']]
[['-1' '0 0']
 ['-1' '0 -1']]
[['1' '0 1']
 ['-1' '0 -1']]
[['1' '1 0']
 ['-1' '0 0']]
[['-1' '0 -1']
 ['-1' '0 -1']]
[['-1' '0 -1']
 ['1' '1 0']]
[['-1' '0 -1']
 ['1' '0 1']]
[['-1' '0 -1']
 ['-1' '0 -1']]
[['-1' '0 -1']
 ['-1' '0 -1']]

In [18]: