In [1]:
# Attention: Hands-on TensorBoard (TensorFlow Dev Summit 2017) - naming scopes
# Attention: не всегда возвращается ошибка
# Internal: failed initializing StreamExecutor for CUDA device ordinal 0: Internal:
# failed call to cuDevicePrimaryCtxRetain: CUDA_ERROR_OUT_OF_MEMORY;
# total memory reported: 18446744070441271296
# fixme: как отдетектиь зафейленный run()?
import tensorflow as tf
# from tensorflow.python.client import device_lib
# bulding comp. graph
# running comp. graph
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# node1 = tf.constant(3., tf.float32)
# node2 = tf.constant(4.) # tf.float32 by dflt
# print node1, node2
# print(sess.run([node1, node2]))
# node3 = tf.add(node1, node2)
# print sess.run(node3)
# const is boring - Placeholders
a = tf.placeholder(tf.float32)#, shape=[None])
b = tf.placeholder(tf.float32)#, shape=[None])
print "a:", a, "b:", b
adder_node = tf.add(a, b, name="adder0")#a + b
print(sess.run(adder_node, {a: [31, 9, 9], b:[4.5, 0, 0]}))
print(sess.run(adder_node, {a: [1,3.], b: [2, 4.]}))
# add_and_triple = adder_node# * 3.
# print(sess.run(add_and_triple, {a: 3, b:4.5}))
# Runs the op.
# spec for GPU
options = tf.RunOptions(output_partition_graphs=True)
metadata = tf.RunMetadata()
c_val = sess.run(adder_node, {a: 3, b:4.5}, options=options, run_metadata=metadata)
# print metadata.partition_graphs
# def get_available_gpus():
# local_device_protos = device_lib.list_local_devices()
# return [x.name for x in local_device_protos if x.device_type == 'GPU']
# [3, nan]
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b # + provides a shortcut for tf.add(a, b)
print(sess.run(adder_node, {a: 3, b:4.5}))
print(sess.run(adder_node, {a: [100, 99], b: [2, 6]}))
In [9]:
In [ ]: