In [1]:
from __future__ import print_function
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
In [31]:
from datetime import date
date.today()
Out[31]:
In [32]:
author = "kyubyong. https://github.com/Kyubyong/tensorflow-exercises"
In [33]:
tf.__version__
Out[33]:
In [34]:
np.__version__
Out[34]:
Q1. Apply relu
, elu
, and softplus
to x
.
In [3]:
_x = np.linspace(-10., 10., 1000)
x = tf.convert_to_tensor(_x)
relu = ...
elu = ...
softplus = ...
with tf.Session() as sess:
_relu, _elu, _softplus = sess.run([relu, elu, softplus])
plt.plot(_x, _relu, label='relu')
plt.plot(_x, _elu, label='elu')
plt.plot(_x, _softplus, label='softplus')
plt.legend(bbox_to_anchor=(0.5, 1.0))
plt.show()
Q2. Apply sigmoid
and tanh
to x.
In [28]:
_x = np.linspace(-10., 10., 1000)
x = tf.convert_to_tensor(_x)
sigmoid = ...
tanh = ...
with tf.Session() as sess:
_sigmoid, _tanh = sess.run([sigmoid, tanh])
plt.plot(_x, _sigmoid, label='sigmoid')
plt.plot(_x, _tanh, label='tanh')
plt.legend(bbox_to_anchor=(0.5, 1.0))
plt.grid()
plt.show()
Q3. Apply softmax
to x.
In [5]:
_x = np.array([[1, 2, 4, 8], [2, 4, 6, 8]], dtype=np.float32)
x = tf.convert_to_tensor(_x)
out = ...
with tf.Session() as sess:
_out = sess.run(out)
print(_out)
assert np.allclose(np.sum(_out, axis=-1), 1)
Q4. Apply dropout
with keep_prob=.5 to x.
In [6]:
_x = np.array([[1, 2, 4, 8], [2, 4, 6, 8]], dtype=np.float32)
print("_x =\n" , _x)
x = tf.convert_to_tensor(_x)
out = ...
with tf.Session() as sess:
_out = sess.run(out)
print("_out =\n", _out)
Q5. Apply a fully connected layer to x with 2 outputs and then an sigmoid function.
In [3]:
x = tf.random_normal([8, 10])
Q6. Apply 2 kernels of width-height (2, 2), stride 1, and same padding to x.
In [7]:
tf.reset_default_graph()
In [8]:
x = tf.random_uniform(shape=(2, 3, 3, 3), dtype=tf.float32)
filter = tf.get_variable("filter", shape=..., dtype=tf.float32,
initializer=tf.random_uniform_initializer())
out = ...
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
Q7. Apply 3 kernels of width-height (2, 2), stride 1, dilation_rate 2 and valid padding to x.
In [9]:
tf.reset_default_graph()
In [10]:
x = tf.random_uniform(shape=(4, 10, 10, 3), dtype=tf.float32)
filter = tf.get_variable("filter", shape=..., dtype=tf.float32,
initializer=tf.random_uniform_initializer())
out = ...
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
Q8. Apply 4 kernels of width-height (3, 3), stride 2, and same padding to x.
In [11]:
tf.reset_default_graph()
In [12]:
x = tf.random_uniform(shape=(4, 10, 10, 5), dtype=tf.float32)
filter = tf.get_variable("filter", shape=..., dtype=tf.float32,
initializer=tf.random_uniform_initializer())
out = ...
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
Q9. Apply 4 times of kernels of width-height (3, 3), stride 2, and same padding to x, depth-wise.
In [13]:
tf.reset_default_graph()
In [14]:
x = tf.random_uniform(shape=(4, 10, 10, 5), dtype=tf.float32)
filter = tf.get_variable("filter", shape=..., dtype=tf.float32,
initializer=tf.random_uniform_initializer())
out = ...
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
Q10. Apply 5 kernels of height 3, stride 2, and valid padding to x.
In [15]:
tf.reset_default_graph()
In [16]:
x = tf.random_uniform(shape=(4, 10, 5), dtype=tf.float32)
filter = tf.get_variable("filter", shape=..., dtype=tf.float32,
initializer=tf.random_uniform_initializer())
out = ...
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
Q11. Apply conv2d transpose with 5 kernels of width-height (3, 3), stride 2, and same padding to x.
In [17]:
tf.reset_default_graph()
In [18]:
x = tf.random_uniform(shape=(4, 5, 5, 4), dtype=tf.float32)
filter = tf.get_variable("filter", shape=..., dtype=tf.float32,
initializer=tf.random_uniform_initializer())
shp = x.get_shape().as_list()
output_shape = ...
out = ...
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
Q12. Apply conv2d transpose with 5 kernels of width-height (3, 3), stride 2, and valid padding to x.
In [19]:
tf.reset_default_graph()
In [2]:
x = tf.random_uniform(shape=(4, 5, 5, 4), dtype=tf.float32)
filter = tf.get_variable("filter", shape=(3, 3, 5, 4), dtype=tf.float32,
initializer=tf.random_uniform_initializer())
shp = x.get_shape().as_list()
output_shape = ...
out = ...
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
_out = sess.run(out)
print(_out.shape)
Q13. Apply max pooling and average pooling of window size 2, stride 1, and valid padding to x.
In [35]:
_x = np.zeros((1, 3, 3, 3), dtype=np.float32)
_x[0, :, :, 0] = np.arange(1, 10, dtype=np.float32).reshape(3, 3)
_x[0, :, :, 1] = np.arange(10, 19, dtype=np.float32).reshape(3, 3)
_x[0, :, :, 2] = np.arange(19, 28, dtype=np.float32).reshape(3, 3)
print("1st channel of x =\n", _x[:, :, :, 0])
print("\n2nd channel of x =\n", _x[:, :, :, 1])
print("\n3rd channel of x =\n", _x[:, :, :, 2])
x = tf.constant(_x)
maxpool = ...
avgpool = ...
with tf.Session() as sess:
_maxpool, _avgpool = sess.run([maxpool, avgpool])
print("\n1st channel of max pooling =\n", _maxpool[:, :, :, 0])
print("\n2nd channel of max pooling =\n", _maxpool[:, :, :, 1])
print("\n3rd channel of max pooling =\n", _maxpool[:, :, :, 2])
print("\n1st channel of avg pooling =\n", _avgpool[:, :, :, 0])
print("\n2nd channel of avg pooling =\n", _avgpool[:, :, :, 1])
print("\n3rd channel of avg pooling =\n", _avgpool[:, :, :, 2])
In [ ]: