In [2]:
# import and check version
import tensorflow as tf
# tf can be really verbose
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
In [3]:
# a small sanity check, does tf seem to work ok?
hello = tf.constant('Hello TF!')
sess = tf.Session()
print(sess.run(hello))
sess.close()
In [4]:
a = tf.constant(3.0, dtype=tf.float32) # special type of tensor
b = tf.constant(4.0) # also tf.float32 implicitly
total = a + b
print(a)
print(b)
print(total)
In [0]:
# types need to match
try:
tf.constant(3.0, dtype=tf.float32) + tf.constant(4, dtype=tf.int32)
except TypeError as te:
print(te)
In [0]:
# https://www.tensorflow.org/api_docs/python/tf/dtypes/cast
a = tf.constant(3, dtype=tf.int32)
b = tf.cast(tf.constant(4.0, dtype=tf.float32), tf.int32)
int_total = a + b
int_total
Out[0]:
In [5]:
# sessions need to be closed in order not to leak ressources, this makes sure close is called in any case
with tf.Session() as sess:
print(sess.run(total))
# print(sess.run(int_total))
In [6]:
# let's see what compute devices we have available, hopefully a GPU
# if you do not see it, switch on under Runtime->Change runtime type
with tf.Session() as sess:
devices = sess.list_devices()
for d in devices:
print(d.name)
In [7]:
tf.test.gpu_device_name()
Out[7]:
In [8]:
# GPU requires nvidia cuda
tf.test.is_built_with_cuda()
Out[8]:
In [9]:
with tf.device("/device:XLA_CPU:0"):
with tf.Session() as sess:
print(sess.run(total))
In [0]:
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
z = x + y
In [0]:
with tf.Session() as sess:
try:
print(sess.run(z))
except tf.errors.InvalidArgumentError as iae:
print(iae.message)
In [0]:
with tf.Session() as sess:
print(sess.run(z, feed_dict={x: 3.0, y: 4.5}))