In [1]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
In [2]:
'''
A linear regression example
'''
# generate phony data
x_data = np.float32(np.random.rand(2, 100))
y_data = np.dot([0.1, 0.2], x_data) + 0.3
# build graph
b = tf.Variable(tf.zeros(1))
W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))
y = tf.matmul(W, x_data) + b
# target
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
# initialize variables
init = tf.global_variables_initializer()
# startup graph
with tf.Session() as sess:
sess.run(init)
for step in range(1, 201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(W), sess.run(b))
In [3]:
'''
tensor operation example
'''
matrix1 = tf.constant([[3., 3.]], name = 'left')
matrix2 = tf.constant([[2.], [2.]], name = 'right')
product = tf.matmul(matrix1, matrix2)
with tf.Session() as sess:
result = sess.run(product)
print(result)
# show the value
print(matrix1.eval())
print(matrix2.eval())
# show the tensor
print(matrix1)
print(matrix2)
In [4]:
'''
variables and constants, gat more than one outputs
'''
state = tf.Variable(0, name = 'counter')
one = tf.constant(1)
new_value = tf.add(state, one)
update = tf.assign(state, new_value)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
print(state.eval())
for _ in range(3):
sess.run(update)
print(state.eval())
result = sess.run([state, one])
print('result : ', result)
In [5]:
'''
usage of placeholder
'''
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
output = tf.multiply(input1, input2)
with tf.Session() as sess:
result = sess.run([output], feed_dict = {input1:[2.], input2:[3.]})
print(result)