In [1]:
import tensorflow as tf
import numpy as np
Isess = tf.InteractiveSession()
The rank of a tensor is the number of indices required to specify an element
In [2]:
m1 = [[1.0, 2.0], [3.0, 4.0]] #list
m2 = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) #numpy ndarray
m3 = tf.constant([[1.0, 2.0], [3.0, 4.0]]) #Tensor constant object
print(type(m1))
print(type(m2))
print(type(m3))
In [3]:
t1 = tf.convert_to_tensor(m1, dtype=tf.float32)
print(type(t1))
In [4]:
m4 = tf.constant([ [1,2], [3,4] ])
print(m4)
In [5]:
tf.ones([3,3]) * 0.5
Out[5]:
TF requires a session to execute an operation and retrieve its calculated value. A Session is a hardware environment definition describing how code will run. When the Session starts, it assigns the CPU/GPU devices to nodes in the graph. The Session definition can change, without requiring the machine learning code to change
In [6]:
x = tf.constant([1.,2.])
print(x)
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
result = sess.run(tf.negative(x))
print(result)
sess.close()
Sessions can take placeholders, variables, and constants as input
In [7]:
# sess = tf.InteractiveSession() #start Session in interactive mode
raw_data = [1., 3., -5., -4., 0., 3., 9.]
spike = tf.Variable(False) #create a Boolean tf variable
spike.initializer.run() #all tf variables must be initialized
for i in range(1, len(raw_data)):
if raw_data[i] - raw_data[i-1] > 3:
updater = tf.assign(spike, True) #update variable with assign(varName, value)
updater.eval() #evaluate variable to see the change
else:
tf.assign(spike, False).eval()
print("Spike", spike.eval())
# sess.close()
In [3]:
raw_data = [1., 3., -5., -4., 0., 3., 9.]
spikes = tf.Variable([False] * len(raw_data), name='spikes') #create vector of boolean variables
spikes.initializer.run() #and initialize
saver = tf.train.Saver() #saves and restores variables (all by default)
for i in range(1, len(raw_data)):
if raw_data[i] - raw_data[i-1] > 3:
spikes_val = spikes.eval()
spikes_val[i] = True
updater = tf.assign(spikes, spikes_val) #update variable with assign(varName, value)
updater.eval() #evaluate variable to see the change
save_path = saver.save(Isess, './spikes.ckpt') #saves variables to disk
print("spikes data saved in file: %s" % save_path)
In [9]:
%ls spike*
saver.save() saves a compact binary version of the variables in 'ckpt' files. They can only be read by using the saver.restore() function
In [6]:
spikes = tf.Variable([False]*7, name='spikes')
# spikes.initializer.run() #don't need to init, they'll be directly loaded
saver = tf.train.Saver()
saver.restore(Isess, "./spikes.ckpt")
print(spikes.eval())
In [3]:
raw_data = np.random.normal(10, 1, 100)
alpha = tf.constant(0.05)
curr_value = tf.placeholder(tf.float32)
prev_avg = tf.Variable(0.)
update_avg = alpha * curr_value + (1 - alpha) * prev_avg
### Create Summary Nodes for TensorBoard ###
avg_hist = tf.summary.scalar("running_average", update_avg)
value_hist = tf.summary.scalar("incoming_values", curr_value)
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs")
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(len(raw_data)):
summary_str, curr_avg = sess.run([merged, update_avg], feed_dict={curr_value: raw_data[i]})
sess.run(tf.assign(prev_avg, curr_avg))
print(raw_data[i], curr_avg)
writer.add_summary(summary_str, i)
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]: