In [1]:
import os
import sys
import tensorflow as tf
In [3]:
tf_records_dir = '/Users/jorge/data/cats_vs_dogs/'
In [4]:
# Creates a dataset that reads all of the examples from two files.
tf_trn = [tf_records_dir + "data_train_00000-of-00005.tfrecord",
tf_records_dir + "data_train_00001-of-00005.tfrecord",
tf_records_dir + "data_train_00002-of-00005.tfrecord",
tf_records_dir + "data_train_00003-of-00005.tfrecord",
tf_records_dir + "data_train_00004-of-00005.tfrecord",]
tf_val = [tf_records_dir + "data_validation_00000-of-00005.tfrecord",
tf_records_dir + "data_validation_00001-of-00005.tfrecord",
tf_records_dir + "data_validation_00002-of-00005.tfrecord",
tf_records_dir + "data_validation_00003-of-00005.tfrecord",
tf_records_dir + "data_validation_00004-of-00005.tfrecord",]
In [ ]:
def map_func:
In [ ]:
# Use a tf.placeholder(tf.string) to represent the filenames,
# and initialize an iterator from the appropriate filenames
filenames = tf.placeholder(tf.string, shape=[None])
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(map_func) # Parse the record into tensors.
dataset = dataset.repeat() # Repeat the input indefinitely.
dataset = dataset.batch(32)
iterator = dataset.make_initializable_iterator()
In [ ]:
In [ ]:
In [ ]:
In [ ]:
# You can feed the initializer with the appropriate filenames for the current
# phase of execution, e.g. training vs. validation.
# Initialize `iterator` with training data.
sess.run(iterator.initializer, feed_dict={filenames: tf_trn})
# Initialize `iterator` with validation data.
sess.run(iterator.initializer, feed_dict={filenames: tf_val})
In [ ]: