check devices on system


In [ ]:
from tensorflow.python.client import device_lib
device_lib.list_local_devices()

tensor initialization


In [ ]:
import tensorflow as tf
a = tf.constant(3.0, 
                dtype=tf.float32, 
                name='a')
b = tf.constant(4.0, 
                name='b')
total = a + b
print(a)
print(b)
print(total)

In [ ]:
sess = tf.Session()
sess.run([a, b])
sess.run(total)

In [ ]:
sess.run({'a,b': (a, b), 'total': total})

tensor values within session


In [ ]:
vec = tf.random_uniform(shape=(3,))
out1 = vec + 1
out2 = vec + 2
print(sess.run(vec))
print(sess.run(vec))
print(sess.run((out1, out2)))

save session and tensorboard initialization


In [ ]:
tf.summary.FileWriter('./', sess.graph)
# tensorboard --logdir=./

In [ ]:
writer = tf.summary.FileWriter('.')
writer.add_graph(tf.get_default_graph())

placeholder and input feeding


In [ ]:
a = tf.placeholder(tf.float32, name='a')
b = tf.placeholder(tf.float32, name='b')
z = a+b

sess = tf.Session()
print(sess.run(z, {a: 3, b: 4}))
print(sess.run(z, feed_dict={a: [1, 2, 3], b: [4, 5, 6]}))
tf.summary.FileWriter('./', sess.graph)

datasets


In [ ]:
data = [
    [0, 1,],
    [2, 3,],
    [4, 5,],
    [6, 7,],
]
slices = tf.data.Dataset.from_tensor_slices(data)
next_item = slices.make_one_shot_iterator().get_next()
while True:
  try:
    print(sess.run(next_item))
  except tf.errors.OutOfRangeError:
    break

In [ ]:
dataset1.get_next()

In [ ]:
def print_dtype(input_data, print_string="", key=None):
    if type(input_data) == tuple:
        print_string += "("
        for _ in input_data:
            print_string = print_dtype(_, print_string)
        print_string += ")"
    elif type(input_data) == dict:
        print_string += "("
        for idx, _ in input_data.items():
            print_string = print_dtype(_, print_string, idx)
        print_string += ")"
    else:
        if key:
            print_string += key + ":" + str(input_data.dtype) + ", "
        else:
            print_string += str(input_data.dtype) + ", "
    return print_string

def print_dimension(input_data, print_string="", key=None):
    if type(input_data) == tuple:
        print_string += "("
        for _ in input_data:
            print_string = print_dimension(_, print_string)
        print_string += ")"
    elif type(input_data) == dict:
        print_string += "("
        for idx, _ in input_data.items():
            print_string = print_dimension(_, print_string, idx)
        print_string += ")"
    else:
        if key:
            print_string += key + ":" + str(input_data.get_shape()) + ", "
        else:
            print_string += str(input_data.get_shape()) + ", "
    return print_string

In [ ]:
print_dtype(element2)

In [ ]:
dataset1 = tf.data.Dataset.from_tensor_slices(tf.random_uniform([4, 10]))
element1 = dataset1.make_initializable_iterator().get_next()
print("dataset1:")
print("\t", print_dtype(element1))
print("\t", print_dimension(element1))
print("\t", dataset1.output_types)
print("\t", dataset1.output_shapes)

dataset2 = tf.data.Dataset.from_tensor_slices(
   {'first': tf.random_uniform([4]),
    'second': tf.random_uniform([4, 100], maxval=100, dtype=tf.int32)})
element2 = dataset2.make_initializable_iterator().get_next()
print("dataset2:")
print("\t", print_dtype(element2))
print("\t", print_dimension(element2))
print("\t", dataset2.output_types)
print("\t", dataset2.output_shapes)

dataset3 = tf.data.Dataset.zip({'d1': dataset1, 'd2' : dataset2})
element3 = dataset3.make_initializable_iterator().get_next()
print("dataset3:")
print("\t", print_dtype(element3))
print("\t", print_dimension(element3))
print("\t", dataset3.output_types)
print("\t", dataset3.output_shapes)

In [ ]:
max_value = tf.placeholder(tf.int64, shape=[])
dataset = tf.data.Dataset.range(max_value)
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()

sess.run(iterator.initializer, feed_dict={max_value: 10})
for i in range(10):
  value = sess.run(next_element)
  assert i == value

In [ ]:
# two different datasets with same structure
training_dataset = tf.data.Dataset.range(100).map(
    lambda x: x + tf.random_uniform([], -10, 10, tf.int64))
validation_dataset = tf.data.Dataset.range(50)

# define the iterator using the structure property
iterator = tf.data.Iterator.from_structure(training_dataset.output_types,
                                           training_dataset.output_shapes)
next_element = iterator.get_next()

training_init = iterator.make_initializer(training_dataset)
validation_init = iterator.make_initializer(validation_dataset)

# initialize for training set
sess.run(training_init)
sess.run(next_element)

# reinitialize for validation set
sess.run(validation_init)
sess.run(next_element)

session run


In [ ]:
import collections
a = tf.constant([10, 20], name='a')
b = tf.constant([1.0, 2.0], name='b')
v = sess.run(a)
v

In [ ]:
v = sess.run([a, b])
v

In [ ]:
print(b)

In [ ]:
data = collections.namedtuple('MyData', ['a', 'b'])
v = sess.run({'k1': data(a, b), 'k2': [b, a]})
print(v)

Layers


In [ ]:
import tensorflow as tf
x = tf.placeholder(tf.float32, shape=[None, 3], name='x')
linear_model = tf.layers.Dense(units=1, name='dense_layer')
y = linear_model(x)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
sess.run(y, {x: [[1, 2, 3],[4, 5, 6]]})

In [ ]:
tf.summary.FileWriter('./', sess.graph)

Feature Column


In [ ]:
features = {
    'sales' : [[5], [10], [8], [9]],
    'department': ['sports', 'sports', 'gardening', 'gardening']}

department_column = tf.feature_column.categorical_column_with_vocabulary_list(
        'department', ['sports', 'gardening'])
department_column = tf.feature_column.indicator_column(department_column)

columns = [
    tf.feature_column.numeric_column('sales'),
    department_column
]

inputs = tf.feature_column.input_layer(features, columns)

var_init = tf.global_variables_initializer()
table_init = tf.tables_initializer()
sess = tf.Session()
sess.run((var_init, table_init))
print(sess.run(inputs))

Training


In [ ]:
import tensorflow as tf
# define data and labels
x = tf.constant([[1], [2], [3], [4]], dtype=tf.float32, name='input')
y_true = tf.constant([[2], [4], [6], [8]], dtype=tf.float32, name='actuals')

# get model output
linear_model = tf.layers.Dense(units=1, name='dense_layer')
y_pred = linear_model(x)

# initialize session and variable
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)

# extract model output
sess.run(y_pred)

# define loss
loss = tf.losses.mean_squared_error(
    labels=y_true, 
    predictions=y_pred)

sess.run(loss)

# define optimizer to minimize the loss
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)

for i in range(100):
  _, loss_value = sess.run((train, loss))

sess.run(y_pred)

In [ ]:
tf.summary.FileWriter('./', sess.graph)

In [ ]: