In [1]:
import tensorflow as tf
import numpy as np
import math

Hint: Use dtype=tf.float64 if you want to have same precision as numpy for testing
Hint: You migth wanna use tf.InterativeSession for convenience

1a: Create two random 0-d tensors x and y of any distribution.
Create a TensorFlow object that returns x + y if x > y, and x - y otherwise.
Hint: look up tf.cond()
I do the first problem for you


In [2]:
def task_1a_np(x, y):
    return np.where(x > y, x + y, x - y)

In [3]:
X = tf.placeholder(tf.float64)
Y = tf.placeholder(tf.float64)
out = tf.cond(tf.greater(X, Y), lambda: tf.add(X, Y), lambda: tf.subtract(X, Y))

In [4]:
with tf.Session() as sess:
    for xx, yy in np.random.uniform(size=(50, 2)):
        actual = sess.run(out, feed_dict={X:xx, Y:yy})
        expected = task_1a_np(xx, yy)
        if actual != expected:
            print('Fail')
    else:
        print('Success')


Success

1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).
Return x + y if x < y, x - y if x > y, 0 otherwise.
Hint: Look up tf.case().


In [5]:
def task_1b_np(x, y):
    return np.select(condlist=[x < y, x > y],
                     choicelist=[x + y, x - y],
                     default=0)

In [6]:
X = tf.placeholder(tf.float64)
Y = tf.placeholder(tf.float64)
 
out = tf.case([(tf.less(X, Y), lambda: tf.add(X, Y)), 
               (tf.greater(X, Y), lambda: tf.subtract(X, Y))],
              default = lambda: tf.constant(0., dtype=tf.float64))

In [7]:
with tf.Session() as sess:
    for xx, yy in np.random.uniform(-1, 1, size=(50, 2)):
        actual = sess.run(out, feed_dict={X:xx, Y:yy})
        expected = task_1b_np(xx, yy)
        if actual != expected:
            print('Fail')
    else:
        print('Success')


Success

1c: Create the tensor x of the value [[0, -2, -1], [0, 1, 2]]
and y as a tensor of zeros with the same shape as x.
Return a boolean tensor that yields Trues if x equals y element-wise.
Hint: Look up tf.equal().


In [8]:
def task_1c_np():
    x = np.array([[0, -2, -1], [0, 1, 2]])
    y = np.zeros_like(x)
    return x == y

In [9]:
X = tf.constant([[0, -2, -1], [0, 1, 2]])
Y = tf.zeros_like(X)

out = tf.equal(X, Y)

In [10]:
with tf.Session() as sess:
    actual = sess.run(out)
    expected = task_1c_np()
    if not np.array_equal(actual, expected):
        print('Fail')
    else:
        print('Success')


Success

1d:
Get the indices of elements in x whose values are greater than 30.
Hint: Use tf.where().
Then extract elements whose values are greater than 30.
Hint: Use tf.gather().


In [11]:
def task_1d_np(x):
    return x[x > 30].reshape(-1, 1)

In [12]:
X = tf.placeholder(tf.float64)

out = tf.gather(X, tf.where(X > 30))

In [13]:
with tf.Session() as sess:
    for xx in np.random.uniform(size=(50, 1)):
        actual = sess.run(out, feed_dict={X:xx})
        expected = task_1d_np(xx)
        if actual != expected:
            print('Fail')
    else:
        print('Success')


Success

1e: Create a diagnoal 2-d tensor of size 6 x 6 with the diagonal values of 1,
2, ..., 6
Hint: Use tf.range() and tf.diag().


In [14]:
def task_1e_np():
    return np.diag(np.arange(1, 7))

In [15]:
out = tf.diag(tf.range(1, 7))

In [16]:
with tf.Session() as sess:
    actual = sess.run(out)
    expected = task_1e_np()
    if not np.array_equal(actual, expected):
        print('Fail')
    else:
        print('Success')


Success

1f: Create a random 2-d tensor of size 10 x 10 from any distribution.
Calculate its determinant.
Hint: Look at tf.matrix_determinant().


In [17]:
def task_1f_np(x):
    return np.linalg.det(x)

In [18]:
X = tf.placeholder(tf.float64, shape=(10, 10))

out = tf.matrix_determinant(X)

In [19]:
with tf.Session() as sess:
    for xx in np.random.uniform(size=(50, 10, 10)):
        actual = sess.run(out, feed_dict={X:xx})
        expected = task_1f_np(xx)
        if not math.isclose(actual, expected):
            print('Fail')
    else:
        print('Success')


Success

1g: Create tensor x with value [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9].
Return the unique elements in x
Hint: use tf.unique(). Keep in mind that tf.unique() returns a tuple.


In [20]:
def task_1g_np():
    x = [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9]
    _, idx = np.unique(x, return_index=True)
    return np.take(x, sorted(idx))

In [21]:
X = tf.constant([5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9])

out = tf.unique(X)

In [22]:
with tf.Session() as sess:
    print(sess.run(out))


Unique(y=array([ 5,  2,  3, 10,  6,  4,  1,  0,  9], dtype=int32), idx=array([0, 1, 2, 0, 3, 4, 1, 2, 5, 1, 6, 6, 7, 8], dtype=int32))

In [23]:
with tf.Session() as sess:
    actual, _ = sess.run(out)
    expected = task_1g_np()
    if not np.array_equal(actual, expected):
        print('Fail')
    else:
        print('Success')


Success

1h: Create two tensors x and y of shape 300 from any normal distribution,
as long as they are from the same distribution.
Use tf.cond() to return:

  • The mean squared error of (x - y) if the average of all elements in (x - y)
    is negative, or
  • The sum of absolute value of all elements in the tensor (x - y) otherwise.
    Hint: see the Huber loss function in the lecture slides 3.

In [24]:
def task_1h_np(x, y):
    average = np.mean(x - y)
    mse = np.mean((x - y) ** 2)
    asum = np.sum(np.abs(x - y))
    return mse if average < 0 else asum

In [25]:
X = tf.placeholder(tf.float64)
Y = tf.placeholder(tf.float64)

out = tf.cond(tf.less(tf.reduce_mean(X - Y), 0), 
              lambda: tf.reduce_mean(tf.square(X - Y)), 
              lambda: tf.reduce_sum(tf.abs(X - Y)))

In [26]:
with tf.Session() as sess:
    for xx, yy in np.random.normal(size=(50, 2, 300)):
        actual = sess.run(out, feed_dict={X:xx, Y:yy})
        expected = task_1h_np(xx, yy)
        if not math.isclose(actual, expected):
            print('Fail')
    else:
        print('Success')


Success