In [1]:
from __future__ import print_function
import tensorflow as tf
import numpy as np
In [2]:
from datetime import date
date.today()
Out[2]:
In [3]:
author = "kyubyong. https://github.com/Kyubyong/tensorflow-exercises"
In [4]:
tf.__version__
Out[4]:
In [5]:
np.__version__
Out[5]:
In [6]:
sess = tf.InteractiveSession()
NOTE on notation
Q1. Let X be a tensor of [["1.1", "2.2"], ["3.3", "4.4"]]. Convert the datatype of X to float32.
In [7]:
_X = np.array([["1.1", "2.2"], ["3.3", "4.4"]])
X = tf.constant(_X)
out = tf.string_to_number(X)
print(out.eval())
assert np.allclose(out.eval(), _X.astype(np.float32))
Q2. Let X be a tensor [[1, 2], [3, 4]] of int32. Convert the data type of X to float64.
In [8]:
_X = np.array([[1, 2], [3, 4]], dtype=np.int32)
X = tf.constant(_X)
out1 = tf.to_double(X)
out2 = tf.cast(X, tf.float64)
assert np.allclose(out1.eval(), out2.eval())
print(out1.eval())
assert np.allclose(out1.eval(), _X.astype(np.float64))
Q3. Let X be a tensor [[1, 2], [3, 4]] of int32. Convert the data type of X to float32.
In [9]:
_X = np.array([[1, 2], [3, 4]], dtype=np.int32)
X = tf.constant(_X)
out1 = tf.to_float(X)
out2 = tf.cast(X, tf.float32)
assert np.allclose(out1.eval(), out2.eval())
print(out1.eval())
assert np.allclose(out1.eval(), _X.astype(np.float32))
Q4. Let X be a tensor [[1, 2], [3, 4]] of float32. Convert the data type of X to int32.
In [10]:
_X = np.array([[1, 2], [3, 4]], dtype=np.float32)
X = tf.constant(_X)
out1 = tf.to_int32(X)
out2 = tf.cast(X, tf.int32)
assert np.allclose(out1.eval(), out2.eval())
print(out1.eval())
assert np.allclose(out1.eval(), _X.astype(np.int32))
Q5. Let X be a tensor [[1, 2], [3, 4]] of float32. Convert the data type of X to int64.
In [11]:
_X = np.array([[1, 2], [3, 4]], dtype=np.float32)
X = tf.constant(_X)
out1 = tf.to_int64(X)
out2 = tf.cast(X, tf.int64)
assert np.allclose(out1.eval(), out2.eval())
print(out1.eval())
assert np.allclose(out1.eval(), _X.astype(np.int64))
Q6. Let X be a tensor of [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]. Create a tensor representing the shape of X.
In [12]:
_X = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]])
X = tf.constant(_X)
out = tf.shape(X)
print(out.eval())
assert np.allclose(out.eval(), _X.shape) # tf.shape() == np.ndarray.shape
Q7. Let X be a tensor of [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]) and y be a tensor [10, 20]. Create a list of tensors representing the shape of X and y.
In [13]:
X = tf.constant([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]])
y = tf.constant([10, 20])
out_X, out_y = tf.shape_n([X, y])
print(out_X.eval(), out_y.eval())
Q8. Let X be a tensor of [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]. Create a tensor representing the size (=total number of elements) of X.
In [14]:
_X = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]])
X = tf.constant(_X)
out = tf.size(X)
print(out.eval())
assert out.eval() == _X.size # tf.size() == np.ndarry.size
Q9. Let X be a tensor of [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]. Create a tensor representing the rank (=number of dimensions) of X.
In [15]:
_X = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]])
X = tf.constant(_X)
out = tf.rank(X)
print(out.eval())
assert out.eval() == _X.ndim # tf.rank() == np.ndarray.ndim
Q10. Let X be tf.ones([10, 10, 3]). Reshape X so that the size of the second dimension equals 150.
In [16]:
X = tf.ones([10, 10, 3])
out = tf.reshape(X, [-1, 150])
print(out.eval())
assert np.allclose(out.eval(), np.reshape(np.ones([10, 10, 3]), [-1, 150]))
# tf.reshape(tensor, hape) == np.reshape(array, shape)
Q11. Let X be tf.ones([10, 10, 1, 1]). Remove all the dimensions of size 1 in X.
In [17]:
X = tf.ones([10, 10, 1, 1])
out = tf.squeeze(X)
print(out.eval().shape)
assert np.allclose(out.eval(), np.squeeze(np.ones([10, 10, 1, 1])))
# tf.squeeze(tensor) == np.squeeze(array)
Q12. Let X be tf.ones([10, 10, 1, 1]). Remove only the third dimension in X.
In [18]:
X = tf.ones([10, 10, 1, 1])
out = tf.squeeze(X, [2])
print(out.eval().shape)
assert np.allclose(out.eval(), np.squeeze(np.ones([10, 10, 1, 1]), 2))
# tf.squeeze(tensor, axis) == np.squeeze(array, axis)
Q13. Let X be tf.ones([10, 10]). Add a dimension of 1 at the end of X.
In [19]:
X = tf.ones([10, 10])
out = tf.expand_dims(X, -1)
print(out.eval().shape)
assert np.allclose(out.eval(), np.expand_dims(np.ones([10, 10]), -1))
# tf.expand_dims(tensor, axis) == np.expand_dims(array, axis)
Q14. Let X be a tensor
[[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]].
Extract the [[[3, 3, 3], [5, 5, 5]] from X.
In [20]:
_X = np.array([[[1, 1, 1],
[2, 2, 2]],
[[3, 3, 3],
[4, 4, 4]],
[[5, 5, 5],
[6, 6, 6]]])
X = tf.constant(_X)
out = tf.slice(X, [1, 0, 0], [2, 1, 3])
print(out.eval())
Q15. Let X be a tensor of
[[ 1 2]
[ 3 4]
[ 5 6]
[ 7 8]
[ 9 10]].
Extract the [[1, 2], [5, 6], [9, 10]]] from X.
In [21]:
_X = np.arange(1, 11).reshape([5, 2])
X = tf.convert_to_tensor(_X)
out = tf.strided_slice(X, begin=[0], end=[5], strides=[2])
print(out.eval())
assert np.allclose(out.eval(), _X[[0, 2, 4]])
Q16. Let X be a tensor of
[[ 1 2 3 4 5]
[ 6 7 8 9 10]].
Split X into 5 same-sized tensors along the second dimension.
In [22]:
_X = np.arange(1, 11).reshape([2, 5])
X = tf.convert_to_tensor(_X)
out = tf.split(X, 5, axis=1) # Note that the order of arguments has changed in TensorFlow 1.0
print([each.eval() for each in out])
comp = np.array_split(_X, 5, 1)
# tf.split(tensor, num_or_size_splits, axis) == np.array_split(array, indices_or_sections, axis=0)
assert np.allclose([each.eval() for each in out], comp)
Q17. Lex X be a tensor
[[ 1 2 3]
[ 4 5 6].
Create a tensor looking like
[[ 1 2 3 1 2 3 1 2 3 ]
[ 4 5 6 4 5 6 4 5 6 ]].
In [23]:
_X = np.arange(1, 7).reshape((2, 3))
X = tf.convert_to_tensor(_X)
out = tf.tile(X, [1, 3])
print(out.eval())
assert np.allclose(out.eval(), np.tile(_X, [1, 3]))
# tf.tile(tensor, multiples) == np.tile(array, reps)
Q18. Lex X be a tensor
[[ 1 2 3]
[ 4 5 6].
Pad 2 0's before the first dimension, 3 0's after the second dimension.
In [24]:
_X = np.arange(1, 7).reshape((2, 3))
X = tf.convert_to_tensor(_X)
out = tf.pad(X, [[2, 0], [0, 3]])
print(out.eval())
assert np.allclose(out.eval(), np.pad(_X, [[2, 0], [0, 3]], 'constant', constant_values=[0, 0]))
Q19. Lex X be a tensor
[[ 1 2 3]
[ 4 5 6].
and Y be a tensor
[[ 7 8 9]
[10 11 12]].
Concatenate X and Y so that the new tensor looks like [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]].
In [25]:
_X = np.array([[1, 2, 3], [4, 5, 6]])
_Y = np.array([[7, 8, 9], [10, 11, 12]])
X = tf.constant(_X)
Y = tf.constant(_Y)
out = tf.concat([X, Y], 1) # Note that the order of arguments has changed in TF 1.0!
print(out.eval())
assert np.allclose(out.eval(), np.concatenate((_X, _Y), 1))
# tf.concat == np.concatenate
Q20. Let x, y, and z be tensors [1, 4], [2, 5], and [3, 6], respectively.
Create a single tensor from these such that it looks [[1, 2, 3], [4, 5, 6]].
In [26]:
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
out = tf.stack([x, y, z], 1)
print(out.eval())
Q21. Let X be a tensor [[1, 2, 3], [4, 5, 6]]. Convert X into Y such that Y looks like [[1, 4], [2, 5], [3, 6]].
In [27]:
X = tf.constant([[1, 2, 3], [4, 5, 6]])
Y = tf.unstack(X, axis=1)
print([each.eval() for each in Y])
Q22. Given X below, reverse the sequence along the second axis except the zero-paddings.
In [28]:
X = tf.constant(
[[[0, 0, 1],
[0, 1, 0],
[0, 0, 0]],
[[0, 0, 1],
[0, 1, 0],
[1, 0, 0]]])
out = tf.reverse_sequence(X, [2, 3], seq_axis=1, batch_axis=0)
out.eval()
Out[28]:
Q23. Given X below, reverse the last dimension.
In [29]:
_X = np.arange(1, 1*2*3*4 + 1).reshape((1, 2, 3, 4))
X = tf.convert_to_tensor(_X)
out = tf.reverse(X, [-1]) #Note that tf.reverse has changed its behavior in TF 1.0.
print(out.eval())
assert np.allclose(out.eval(), _X[:, :, :, ::-1])
Q24. Given X below, permute its dimensions such that the new tensor has shape (3, 1, 2).
In [30]:
_X = np.ones((1, 2, 3))
X = tf.convert_to_tensor(_X)
out = tf.transpose(X, [2, 0, 1])
print(out.eval().shape)
assert np.allclose(out.eval(), np.transpose(_X))
Q25. Given X, below, get the first, and third rows.
In [31]:
_X = np.arange(1, 10).reshape((3, 3))
X = tf.convert_to_tensor(_X)
out1 = tf.gather(X, [0, 2])
out2 = tf.gather_nd(X, [[0], [2]])
assert np.allclose(out1.eval(), out2.eval())
print(out1.eval())
assert np.allclose(out1.eval(), _X[[0, 2]])
Q26. Given X below, get the elements 5 and 7.
In [32]:
_X = np.arange(1, 10).reshape((3, 3))
X = tf.convert_to_tensor(_X)
out = tf.gather_nd(X, [[1, 1], [2, 0]])
print(out.eval())
assert np.allclose(out.eval(), _X[[1, 2], [1, 0]])
Q27. Let x be a tensor [2, 2, 1, 5, 4, 5, 1, 2, 3]. Get the tensors of unique elements and their counts.
In [33]:
x = tf.constant([2, 2, 1, 5, 4, 5, 1, 2, 3])
out1, _, out2 = tf.unique_with_counts(x)
print(out1.eval(), out2.eval())
Q28. Let x be a tensor [1, 2, 3, 4, 5]. Divide the elements of x into a list of tensors that looks like [[3, 5], [1], [2, 4]].
In [34]:
x = tf.constant([1, 2, 3, 4, 5])
out = tf.dynamic_partition(x, [1, 2, 0, 2, 0], 3)
print([each.eval() for each in out])
Q29. Let X be a tensor [[7, 8], [5, 6]] and Y be a tensor [[1, 2], [3, 4]]. Create a single tensor looking like [[1, 2], [3, 4], [5, 6], [7, 8]].
In [35]:
X = tf.constant([[7, 8], [5, 6]])
Y = tf.constant([[1, 2], [3, 4]])
out = tf.dynamic_stitch([[3, 2], [0, 1]], [X, Y])
print(out.eval())
Q30. Let x be a tensor [0, 1, 2, 3] and y be a tensor [True, False, False, True].
Apply mask y to x.
In [36]:
_x = np.array([0, 1, 2, 3])
_y = np.array([True, False, False, True])
x = tf.convert_to_tensor(_x)
y = tf.convert_to_tensor(_y)
out = tf.boolean_mask(x, y)
print(out.eval())
assert np.allclose(out.eval(), _x[_y])
Q31. Let X be a tensor [[0, 5, 3], [4, 2, 1]]. Convert X into one-hot.
In [37]:
X = tf.constant([[0, 5, 3], [4, 2, 1]])
out = tf.one_hot(x, 6)
print(out.eval())
In [ ]: