In [1]:
from __future__ import print_function
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
In [2]:
from datetime import date
date.today()
Out[2]:
In [3]:
author = "kyubyong. https://github.com/Kyubyong/tensorflow-exercises"
In [4]:
tf.__version__
Out[4]:
In [5]:
np.__version__
Out[5]:
Q1. Apply l2_normalize
to x
.
In [20]:
_x = np.arange(1, 11)
epsilon = 1e-12
x = tf.convert_to_tensor(_x, tf.float32)
Q2. Calculate the mean and variance of x
based on the sufficient statistics.
In [40]:
_x = np.arange(1, 11)
x = tf.convert_to_tensor(_x, tf.float32)
Q3. Calculate the mean and variance of x
.
In [39]:
tf.reset_default_graph()
_x = np.arange(1, 11)
x = tf.convert_to_tensor(_x, tf.float32)
Q4. Calculate the mean and variance of x
using unique_x
and counts
.
In [63]:
tf.reset_default_graph()
x = tf.constant([1, 1, 2, 2, 2, 3], tf.float32)
# From `x`
mean, variance = tf.nn.moments(x, [0])
with tf.Session() as sess:
print(sess.run([mean, variance]))
# From unique elements and their counts
unique_x, _, counts = tf.unique_with_counts(x)
mean, variance = ...
with tf.Session() as sess:
print(sess.run([mean, variance]))
Q5. The code below is to implement the mnist classification task. Complete it by adding batch normalization.
In [16]:
# Load data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
# build graph
class Graph:
def __init__(self, is_training=False):
# Inputs and labels
self.x = tf.placeholder(tf.float32, shape=[None, 784])
self.y = tf.placeholder(tf.int32, shape=[None])
# Layer 1
w1 = tf.get_variable("w1", shape=[784, 100], initializer=tf.truncated_normal_initializer())
output1 = tf.matmul(self.x, w1)
output1 = tf.contrib.layers.batch_norm(...)
#Layer 2
w2 = tf.get_variable("w2", shape=[100, 10], initializer=tf.truncated_normal_initializer())
logits = tf.matmul(output1, w2)
preds = tf.to_int32(tf.arg_max(logits, dimension=1))
# training
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=logits)
self.train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
self.acc = tf.reduce_mean(tf.to_float(tf.equal(self.y, preds)))
# Training
tf.reset_default_graph()
g = Graph(is_training=True)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
saver = tf.train.Saver()
for i in range(1, 10000+1):
batch = mnist.train.next_batch(60)
sess.run(g.train_op, {g.x: batch[0], g.y: batch[1]})
# Evaluation
if i % 100 == 0:
print("training steps=", i, "Acc. =", sess.run(g.acc, {g.x: mnist.test.images, g.y: mnist.test.labels}))
save_path = saver.save(sess, './my-model')
# Inference
tf.reset_default_graph()
g2 = Graph(is_training=False)
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, save_path)
hits = 0
for i in range(100):
hits += sess.run(g2.acc, {g2.x: [mnist.test.images[i]], g2.y: [mnist.test.labels[i]]})
print(hits)
Q06. Compute half the L2 norm of x
without the sqrt.
In [89]:
tf.reset_default_graph()
x = tf.constant([1, 1, 2, 2, 2, 3], tf.float32)
Q7. Compute softmax cross entropy between logits and labels. Note that the rank of them is not the same.
In [108]:
tf.reset_default_graph()
logits = tf.random_normal(shape=[2, 5, 10])
labels = tf.convert_to_tensor(np.random.randint(0, 10, size=[2, 5]), tf.int32)
output = tf.nn....
with tf.Session() as sess:
print(sess.run(output))
Q8. Compute softmax cross entropy between logits and labels.
In [110]:
logits = tf.random_normal(shape=[2, 5, 10])
labels = tf.convert_to_tensor(np.random.randint(0, 10, size=[2, 5]), tf.int32)
labels = tf.one_hot(labels, depth=10)
output = tf.nn....
with tf.Session() as sess:
print(sess.run(output))
Q9. Map tensor x
to the embedding.
In [113]:
tf.reset_default_graph()
x = tf.constant([0, 2, 1, 3, 4], tf.int32)
embedding = tf.constant([0, 0.1, 0.2, 0.3, 0.4], tf.float32)
output = tf.nn....
with tf.Session() as sess:
print(sess.run(output))