In [1]:
from __future__ import print_function
import tensorflow as tf
import numpy as np
In [2]:
from datetime import date
date.today()
Out[2]:
In [3]:
author = "kyubyong. https://github.com/Kyubyong/tensorflow-exercises"
In [4]:
tf.__version__
Out[4]:
In [5]:
np.__version__
Out[5]:
In [2]:
sess = tf.InteractiveSession()
NOTE on notation
Q1. Create a diagonal tensor with the diagonal values of x.
In [7]:
_x = np.array([1, 2, 3, 4])
x = tf.convert_to_tensor(_x)
Q2. Extract the diagonal of X.
In [8]:
_X = np.array(
[[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
X = tf.convert_to_tensor(_X)
Q3. Permutate the dimensions of x such that the new tensor has shape (3, 4, 2).
In [9]:
_X = np.random.rand(2,3,4)
X = tf.convert_to_tensor(_X)
Q4. Construct a 3 by 3 identity matrix.
In [10]:
Q5. Predict the result of this.
In [3]:
_X = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
X = tf.convert_to_tensor(_X)
diagonal_tensor = tf.matrix_diag(X)
diagonal_part = tf.matrix_diag_part(diagonal_tensor)
print("diagonal_tensor =\n", diagonal_tensor.eval())
print("diagonal_part =\n", diagonal_part.eval())
Q6. Transpose the last two dimensions of X.
In [4]:
_X= np.random.rand(1, 2, 3, 4)
X = tf.convert_to_tensor(_X)
Q7. Multiply X by Y.
In [13]:
_X = np.array([[1, 2, 3], [4, 5, 6]])
_Y = np.array([[1, 1], [2, 2], [3, 3]])
X = tf.convert_to_tensor(_X)
Y = tf.convert_to_tensor(_Y)
Q8. Multiply X and Y. The first axis represents batches.
In [14]:
_X = np.arange(1, 13, dtype=np.int32).reshape((2, 2, 3))
_Y = np.arange(13, 25, dtype=np.int32).reshape((2, 3, 2))
X = tf.convert_to_tensor(_X)
Y = tf.convert_to_tensor(_Y)
Q9. Compute the determinant of X.
In [15]:
_X = np.arange(1, 5, dtype=np.float32).reshape((2, 2))
X = tf.convert_to_tensor(_X)
Q10. Compute the inverse of X.
In [16]:
_X = np.arange(1, 5, dtype=np.float64).reshape((2, 2))
X = tf.convert_to_tensor(_X)
Q11. Get the lower-trianglular in the Cholesky decomposition of X.
In [17]:
_X = np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98]], np.float32)
X = tf.convert_to_tensor(_X)
Q12. Compute the eigenvalues and eigenvectors of X.
In [18]:
_X = np.diag((1, 2, 3))
X = tf.convert_to_tensor(_X, tf.float32)
Q13. Compute the singular values of X.
In [19]:
_X = np.array(
[[1, 0, 0, 0, 2],
[0, 0, 3, 0, 0],
[0, 0, 0, 0, 0],
[0, 2, 0, 0, 0]], dtype=np.float32)
X = tf.convert_to_tensor(_X)
Q14. Predict the results of these.
In [5]:
_X = np.array(
[[1, 2, 3, 4],
[5, 6, 7, 8]])
X = tf.convert_to_tensor(_X)
outs = [tf.reduce_sum(X),
tf.reduce_sum(X, axis=0),
tf.reduce_sum(X, axis=1, keep_dims=True),
"",
tf.reduce_prod(X),
tf.reduce_prod(X, axis=0),
tf.reduce_prod(X, axis=1, keep_dims=True),
"",
tf.reduce_min(X),
tf.reduce_min(X, axis=0),
tf.reduce_min(X, axis=1, keep_dims=True),
"",
tf.reduce_max(X),
tf.reduce_max(X, axis=0),
tf.reduce_max(X, axis=1, keep_dims=True),
"",
tf.reduce_mean(X),
tf.reduce_mean(X, axis=0),
tf.reduce_mean(X, axis=1, keep_dims=True)
]
for out in outs:
if out == "":
print()
else:
print("->", out.eval())
# If you remove the common suffix "reduce_", you will get the same
# result in numpy.
Q15. Predict the results of these.
In [6]:
_X = np.array([[True, True],
[False, False]], np.bool)
X = tf.convert_to_tensor(_X)
outs = [tf.reduce_all(X),
tf.reduce_all(X, axis=0),
tf.reduce_all(X, axis=1, keep_dims=True),
"",
tf.reduce_any(X),
tf.reduce_any(X, axis=0),
tf.reduce_any(X, axis=1, keep_dims=True),
]
# for out in outs:
if out == "":
print()
else:
print("->", out.eval())
# If you remove the common suffix "reduce_", you will get the same
# result in numpy.
Q16. Predict the results of these.
In [7]:
_X = np.array([[0, 1, 0],
[1, 1, 0]])
X = tf.convert_to_tensor(_X)
outs = [tf.count_nonzero(X),
tf.count_nonzero(X, axis=0),
tf.count_nonzero(X, axis=1, keep_dims=True),
]
for out in outs:
print("->", out.eval())
# tf.count_nonzero == np.count_nonzero
Q17. Complete the einsum function that would yield the same result as the given function.
In [23]:
_X = np.arange(1, 7).reshape((2, 3))
_Y = np.arange(1, 7).reshape((3, 2))
X = tf.convert_to_tensor(_X)
Y = tf.convert_to_tensor(_Y)
# Matrix multiplication
out1 = tf.matmul(X, Y)
out1_ = tf.einsum(..., X, Y)
assert np.allclose(out1.eval(), out1_.eval())
# Dot product
flattened = tf.reshape(X, [-1])
out2 = tf.reduce_sum(flattened * flattened)
out2_ = tf.einsum(..., flattened, flattened)
assert np.allclose(out2.eval(), out2_.eval())
# Outer product
expanded_a = tf.expand_dims(flattened, 1) # shape: (6, 1)
expanded_b = tf.expand_dims(flattened, 0) # shape: (1, 6)
out3 = tf.matmul(expanded_a, expanded_b)
out3_ = tf.einsum(..., flattened, flattened)
assert np.allclose(out3.eval(), out3_.eval())
# Transpose
out4 = tf.transpose(X) # shape: (3, 2)
out4_ = tf.einsum(..., X)
assert np.allclose(out4.eval(), out4_.eval())
In [ ]: