In [1]:
from __future__ import print_function
import tensorflow as tf
import numpy as np
import re
import matplotlib.pyplot as plt
%matplotlib inline
In [70]:
from datetime import date
date.today()
Out[70]:
In [71]:
author = "kyubyong. https://github.com/Kyubyong/tensorflow-exercises"
In [72]:
tf.__version__
Out[72]:
In [73]:
np.__version__
Out[73]:
NOTE on notation
_x, _y, _z, _X, _Y, _Z, ...: NumPy arrays
x, y, z, X, Y, Z, ...: Tensors
In [74]:
# Make data and save to npz.
_x = np.zeros((100, 10), np.int32)
for i in range(100):
_x[i] = np.random.permutation(10)
_x, _y = _x[:, :-1], _x[:, -1]
import os
if not os.path.exists('example'): os.mkdir('example')
np.savez('example/example.npz', _x=_x, _y=_y)
In [75]:
# Load data
data = np.load('example/example.npz')
_x, _y = data["_x"], data["_y"]
#Q1. Make a placeholder for x such that it should be of dtype=int32, shape=(None, 9).
# Inputs and targets
x_pl = tf.placeholder(tf.int32, [None, 9])
y_hat = 45 - tf.reduce_sum(x_pl, axis=1) # We find a digit x_pl doesn't contain.
# Session
with tf.Session() as sess:
_y_hat = sess.run(y_hat, {x_pl: _x})
print("y_hat =", _y_hat[:30])
print("true y =", _y[:30])
In [76]:
tf.reset_default_graph()
# Load data
data = np.load('example/example.npz')
_x, _y = data["_x"], data["_y"]
# Serialize
with tf.python_io.TFRecordWriter("example/tfrecord") as fout:
for _xx, _yy in zip(_x, _y):
ex = tf.train.Example()
# Q2. Add each value to ex.
ex.features.feature['x'].int64_list.value.extend(_xx)
ex.features.feature['y'].int64_list.value.append(_yy)
fout.write(ex.SerializeToString())
def read_and_decode_single_example(fname):
# Create a string queue
fname_q = tf.train.string_input_producer([fname], num_epochs=1, shuffle=True)
# Q3. Create a TFRecordReader
reader = tf.TFRecordReader()
# Read the string queue
_, serialized_example = reader.read(fname_q)
# Q4. Describe parsing syntax
features = tf.parse_single_example(
serialized_example,
features={'x': tf.FixedLenFeature([9], tf.int64),
'y': tf.FixedLenFeature([1], tf.int64)}
)
# Output
x = features['x']
y = features['y']
return x, y
# Ops
x, y = read_and_decode_single_example('example/tfrecord')
y_hat = 45 - tf.reduce_sum(x)
# Session
with tf.Session() as sess:
#Q5. Initialize local variables
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
while not coord.should_stop():
_y, _y_hat = sess.run([y, y_hat])
print(_y[0],"==", _y_hat, end="; ")
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
In [77]:
tf.reset_default_graph()
# Load data
data = np.load('example/example.npz')
_x, _y = data["_x"], data["_y"]
# Hyperparams
batch_size = 10 # We will feed mini-batches of size 10.
num_epochs = 2 # We will feed data for two epochs.
# Convert to tensors
x = tf.convert_to_tensor(_x)
y = tf.convert_to_tensor(_y)
# Q6. Make slice queues
x_q, y_q = tf.train.slice_input_producer([x, y], num_epochs=num_epochs, shuffle=True)
# Batching
x_batch, y_batch = tf.train.batch([x_q, y_q], batch_size=batch_size)
# Targets
y_hat = 45 - tf.reduce_sum(x_batch, axis=1)
# Session
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
# Q7. Make a train.Coordinator and threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
while not coord.should_stop():
_y_hat, _y_batch = sess.run([y_hat, y_batch])
print(_y_hat, "==", _y_batch)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
In [78]:
tf.reset_default_graph()
# Load data
data = np.load('example/example.npz')
_x, _y = data["_x"], data["_y"]
_x = np.concatenate((_x, np.expand_dims(_y, axis=1)), 1)
# Write to a csv file
_x_str = np.array_str(_x)
_x_str = re.sub("[\[\]]", "", _x_str)
_x_str = re.sub("(?m)^ +", "", _x_str)
_x_str = re.sub("[ ]+", ",", _x_str)
with open('example/example.csv', 'w') as fout:
fout.write(_x_str)
# Hyperparams
batch_size = 10
# Create a string queue
fname_q = tf.train.string_input_producer(["example/example.csv"])
# Q8. Create a TextLineReader
reader = tf.TextLineReader()
# Read the string queue
_, value = reader.read(fname_q)
# Q9. Decode value
record_defaults = [[0]]*10
col1, col2, col3, col4, col5, col6, col7, col8, col9, col10 = tf.decode_csv(
value, record_defaults=record_defaults)
x = tf.stack([col1, col2, col3, col4, col5, col6, col7, col8, col9])
y = col10
# Batching
x_batch, y_batch = tf.train.shuffle_batch(
[x, y], batch_size=batch_size, capacity=200, min_after_dequeue=100)
# Ops
y_hat = 45 - tf.reduce_sum(x_batch, axis=1)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(num_epochs*10):
_y_hat, _y_batch = sess.run([y_hat, y_batch])
print(_y_hat, "==", _y_batch)
coord.request_stop()
coord.join(threads)
In [3]:
tf.reset_default_graph()
# Hyperparams
batch_size = 10
num_epochs = 1
# Make fake images and save
for i in range(100):
_x = np.random.randint(0, 256, size=(10, 10, 4))
plt.imsave("example/image_{}.jpg".format(i), _x)
# Import jpg files
images = tf.train.match_filenames_once('example/*.jpg')
# Create a string queue
fname_q = tf.train.string_input_producer(images, num_epochs=num_epochs, shuffle=True)
# Q10. Create a WholeFileReader
reader = tf.WholeFileReader()
# Read the string queue
_, value = reader.read(fname_q)
# Q11. Decode value
img = tf.image.decode_image(value)
# Batching
img_batch = tf.train.batch([img], shapes=([10, 10, 4]), batch_size=batch_size)
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
num_samples = 0
try:
while not coord.should_stop():
sess.run(img_batch)
num_samples += batch_size
print(num_samples, "samples have been seen")
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
In [ ]: