In [2]:
import tensorflow as tf
In [31]:
x=tf.constant(5)
y=tf.constant(8)
z=tf.Variable(2)
c=tf.mul(x,z)
model = tf.initialize_all_variables()
In [32]:
sess=tf.Session()
sess.run(model)
print(sess.run(x*y))
print(sess.run(c))
In [37]:
intermed=x+y
final=tf.mul(intermed,c)
In [38]:
sess.run([intermed,final])
Out[38]:
In [40]:
input1=tf.placeholder(tf.float16)
input2=tf.placeholder(tf.float16)
output=tf.add(input1,input2)
In [44]:
# always specify which input you are feeding
sess.run(output,feed_dict={input1:788,input2:677})
Out[44]:
In [49]:
sess.close()
In [55]:
sess=tf.InteractiveSession()
x = tf.Variable([1.0, 2.0])
a = tf.constant([3.0, 3.0])
sub = tf.sub(x, a)
x.initializer.run()
print(sess.run(sub))
sess.close()
In [63]:
# Create a Variable, that will be initialized to the scalar value 0.
state = tf.Variable(10, name="counter")
In [64]:
# Create an Op to add one to `state`.
one=tf.constant(1)
new_value=tf.add(state,one)
update=tf.assign(state,new_value)
In [65]:
# Variables must be initialized by running an `init` Op after having
# launched the graph. We first have to add the `init` Op to the graph.
In [66]:
init_op = tf.initialize_all_variables()
In [67]:
with tf.Session() as sess:
sess.run(init_op)
print(sess.run(state))
print(sess.run(new_value))
print(sess.run(update))
In [69]:
### Run a for loop
In [70]:
with tf.Session() as sess:
# Run the 'init' op
sess.run(init_op)
# Print the initial value of 'state'
print(sess.run(state))
# Run the op that updates 'state' and print 'state'.
for _ in range(3):
sess.run(update)
print(sess.run(state))
In [76]:
#tensor `a` is [1.8, 2.2], dtype=tf.float
a= [1.8, 2.2]
n=tf.cast(a, tf.int32)
#==> [1, 2] # dtype=tf.int32
In [77]:
model=tf.initialize_all_variables()
sess=tf.Session()
sess.run(model)
sess.run(n)
Out[77]:
In [86]:
t=[[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
print(sess.run(tf.shape(t)))# ==> [2, 2, 3]
print(sess.run(tf.size(t)))
sess.run(tf.rank(t))
Out[86]:
In [ ]:
# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
# tensor 't' has shape [9]
reshape(t, [3, 3]) ==> [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
In [88]:
t= [1, 2, 3, 4, 5, 6, 7, 8, 9]
# tensor 't' has shape [9]
sess.run(tf.reshape(t, [3, 3]))
Out[88]:
In [91]:
# tensor 't' has shape [2, 2, 2]
t= [[[1, 1], [2, 2]],
[[3, 3], [4, 4]]]
sess.run(tf.reshape(t, [2, 4]))
Out[91]:
In [96]:
### tf.meshgrid(*args, **kwargs)
In [97]:
x = [1, 2, 3]
y = [4, 5, 6]
sess.run(tf.meshgrid(x,y))
Out[97]:
In [98]:
## tf.slice(input_, begin, size, name=None) Extracts a slice from a tensor.
In [99]:
input = [[[1, 1, 1],[2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]]
In [104]:
print(sess.run(tf.slice(input, [1, 0, 0], [1, 1, 3])))
In [103]:
sess.run(tf.slice(input, [1, 0, 0], [1, 2, 3]))
Out[103]:
In [116]:
### Splits a tensor into num_split tensors along one dimension.
In [117]:
import numpy as np
values=np.random.rand(5,30)
split0, split1, split2 = tf.split(1, 3, values)
In [118]:
sess.run(split0)
Out[118]:
In [119]:
print(sess.run(tf.size(split0)))
sess.run(tf.shape(split0))
Out[119]:
Splits a tensor into num_split tensors along one dimension.
In [ ]:
## tf.tile(input, multiples, name=None)
In [124]:
x=['a', 'b', 'c', 'd']
sess.run(tf.tile(x,[2]))
Out[124]:
Pads a tensor.
In [125]:
## tf.pad(tensor, paddings, mode='CONSTANT', name=None)
t= [[1, 2, 3], [4, 5, 6]]
In [129]:
paddings=[[1, 1,], [2, 2]]
sess.run(tf.pad(t,paddings))
Out[129]:
Concatenates tensors along one dimension.
In [130]:
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
In [131]:
sess.run(tf.concat(0, [t1, t2]))
Out[131]:
In [132]:
sess.run(tf.concat(1, [t1, t2]))
Out[132]:
In [ ]:
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat(0, [t3, t4])) ==> [4, 3]
tf.shape(tf.concat(1, [t3, t4])) ==> [2, 6]
tf.pack(values, axis=0, name='pack')
Packs a list of rank-R tensors into one rank-(R+1) tensor.
In [137]:
x= [1, 4]
y= [2, 5]
z= [3, 6]
sess.run(tf.pack([x, y, z]))
Out[137]:
In [139]:
sess.run(tf.pack([x, y, z], axis=1))
Out[139]:
In [ ]: