In [4]:
import tensorflow as tf
import numpy as np

Convertir arreglos de numpy en tensores


In [5]:
m1 = [[1.0, 2.0],
      [3.0, 4.0]]
m2 = np.array([[1.0, 2.0],
               [3.0, 4.0]],dtype=np.float32)
m3 = tf.constant([[1.0, 2.0],
                  [3.0, 4.0]])

print(type(m1))
print(type(m2))
print(type(m3))

t1 = tf.convert_to_tensor(m1, dtype=tf.float32)
t2 = tf.convert_to_tensor(m2, dtype=tf.float32)
t3 = tf.convert_to_tensor(m3, dtype=tf.float32)

print(type(t1))
print(type(t2))
print(type(t3))


<class 'list'>
<class 'numpy.ndarray'>
<class 'tensorflow.python.framework.ops.Tensor'>
<class 'tensorflow.python.framework.ops.Tensor'>
<class 'tensorflow.python.framework.ops.Tensor'>
<class 'tensorflow.python.framework.ops.Tensor'>

Crear tensores directamente


In [6]:
m1 = tf.constant([1.,2.])

m2 = tf.constant([[1],[2]])

m3 = tf.constant([
    [[1,2],
     [3,4],
     [5,6]],
    [[7,8],
     [9,10],
     [11,12]]  ])

print(m1)
print(m2)
print(m3)


Tensor("Const_3:0", shape=(2,), dtype=float32)
Tensor("Const_4:0", shape=(2, 1), dtype=int32)
Tensor("Const_5:0", shape=(2, 3, 2), dtype=int32)

In [7]:
m_zeros = tf.zeros([1,2])
m_ones = tf.ones([3,3])
m_sevens = tf.ones([2,3,2])*7

print(m_zeros)
print(m_ones)
print(m_sevens)


Tensor("zeros:0", shape=(1, 2), dtype=float32)
Tensor("ones:0", shape=(3, 3), dtype=float32)
Tensor("mul:0", shape=(2, 3, 2), dtype=float32)

Operaciones Básicas

Para ver todas las operaciones --> https://www.tensorflow.org/api_guides/python/math_ops

El código realmente corre hasta que se ejecuta una sesión. Esta es la manera que tiene tensorflow de desacoplar el código de ML del harware en el que correrá. Entonces lo que se hace es definir las operaciones que queremos computar y después, en una sesión (totalmente configurable, pero ese es otro tema) se ejecutan efectivamente esas líneas de código.


In [8]:
x = tf.constant([1.,2.])
y = tf.constant([5.,6.])

# Definir la operacion negacion sobre el tensor x
neg_op = tf.negative(x)
print(neg_op)

#tf.add(x, y)      # Add two tensors of the same type, x + y
#tf.subtract(x, y) # Subtract tensors of the same type, x - y 
#tf.multiply(x, y) # Multiply two tensors element-wise
#tf.pow(x, y)      # Take the element-wise power of x to y 
#tf.exp(x)         # Equivalent to pow(e, x), where e is Euler’s number (2.718...) 
#tf.sqrt(x)        # Equivalent to pow(x, 0.5)
#tf.div(x, y)      # Take the element-wise division of x and y
#tf.truediv(x, y)  # Same as tf.div, except casts the arguments as a float 
#tf.floordiv(x, y) # Same as truediv, except rounds down the final answer into an integer
#tf.mod(x, y)      # Takes the element-wise remainder from division

# Ejecutar las operaciones definidas e imprimir los resultados
with tf.Session() as sess:
    result = sess.run(neg_op)

# Imprimir el resultado computado
print(result)


Tensor("Neg:0", shape=(2,), dtype=float32)
[-1. -2.]

Hacer una sesión interactiva

El levantar una sesión es computacionalmente costoso, por eso es preferible que se haga una sola vez. Cuando se están haciendo pruebas con código es comun levantar un sesión interactiva una sola vez, e invocar a la función eval() que reutiliza la misma sesión sin tener que estar creando sesiones nuevas cada vez que se quieran obtener resultados.


In [9]:
import tensorflow as tf
sess = tf.InteractiveSession()

x = tf.constant([[1.,2.]])
neg_x = tf.negative(x)

result = neg_x.eval()
print(result)

sess.close()


[[-1. -2.]]

Usando una variable

Un ejemplo sobre-simplificado en donde el booleano spike se activa cuando el dato nuevo excede en 5 unidades al dato anterior. Se imprime en cada iteracion el estado del booleano para verificar si hubi un Spike o no.


In [10]:
import tensorflow as tf
sess = tf.InteractiveSession()

raw_data = [1., 2., 8., -1., 0., 5.5, 6., 13]
spike = tf.Variable(False)
spike.initializer.run()


for i in range(1, len(raw_data)):
    if raw_data[i] - raw_data[i-1] > 5:
        updater = tf.assign(spike, True)
        updater.eval()
    else:
        tf.assign(spike, False).eval()
    print("Spike", spike.eval())
    
sess.close()


Spike False
Spike True
Spike False
Spike False
Spike True
Spike False
Spike True

Guardar variables


In [11]:
import tensorflow as tf
sess = tf.InteractiveSession()

raw_data = [1., 2., 8., -1., 0., 5.5, 6., 13]
spikes = tf.Variable([False]*len(raw_data),name='spikes')
spikes.initializer.run()


saver = tf.train.Saver({"spikes": spikes})

for i in range(1, len(raw_data)):
    if raw_data[i] - raw_data[i-1] > 5:
        spikes_val = spikes.eval()
        spikes_val[i] = True
        updater = tf.assign(spikes,spikes_val)
        updater.eval()

save_path = saver.save(sess, "./checkpoints/spikes.ckpt")
print("spikes data saved in file: %s" % save_path)

spikes_val = spikes.eval()
print("SPIKES: {}".format(spikes_val))

sess.close()


spikes data saved in file: ./checkpoints/spikes.ckpt
SPIKES: [False False  True False False  True False  True]

Cargar una variable guardada


In [12]:
import tensorflow as tf
sess = tf.InteractiveSession()
loaded_spikes = tf.Variable([False]*8, name='loaded_spikes')

saver = tf.train.Saver({"spikes": loaded_spikes})

saver.restore(sess, "./checkpoints/spikes.ckpt")
print(loaded_spikes.eval())

sess.close()


[False False  True False False  True False  True]

Visualizando operaciones

Como ejemplo se usara el calculo de promedio ponderado que se calcula como:

$$ Avg_{t} = f(Avg_{t-1}, x_{t}) = (1 - a)Avg_{t-1} + ax_{t} $$

Primero se define el algoritmo y se imprime en consola... En el siguiente bloque se volvera a hacer el mismo algoritmo pero con las adiciones necesarias para poder visualizarlo en tensorboard.


In [13]:
import tensorflow as tf
import numpy as np

raw_data = np.random.normal(10,1,100)

alpha = tf.constant(0.05)
curr_value = tf.placeholder(tf.float32)
prev_avg = tf.Variable(0.)

update_avg = alpha * curr_value + (1 - alpha) * prev_avg

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    for i in range(len(raw_data)):
        curr_avg = sess.run(update_avg, feed_dict={curr_value:raw_data[i]})
        sess.run(tf.assign(prev_avg, curr_avg))
        print(raw_data[i], curr_avg)


10.3382926075 0.516915
8.52887848723 0.917513
9.6642939104 1.35485
10.126766871 1.79345
7.11701535033 2.05963
9.41124564083 2.42721
10.1093890835 2.81132
8.56327777153 3.09891
8.38874797906 3.36341
10.8551957839 3.738
8.62029079181 3.98211
10.4508432344 4.30555
9.93974497376 4.58726
12.0180899677 4.9588
9.38689416679 5.1802
9.13666976901 5.37803
9.37485371775 5.57787
11.1112062863 5.85453
9.23940266213 6.02378
11.5276350518 6.29897
10.5416476333 6.5111
9.51607753156 6.66135
9.36216736527 6.79639
10.4916073761 6.98115
10.6339661868 7.16379
6.99211737895 7.15521
10.3960684295 7.31725
9.68004627299 7.43539
11.5696599297 7.64211
10.4287301389 7.78144
9.00748133316 7.84274
9.94291751963 7.94775
9.09383312853 8.00505
9.76241325101 8.09292
10.5949560884 8.21802
9.34056511997 8.27415
11.2522619568 8.42306
9.50205573712 8.47701
8.92113106524 8.49921
10.6037324649 8.60444
9.97352107372 8.67289
9.60031161869 8.71926
9.45286932616 8.75594
10.2550726349 8.8309
8.4537112885 8.81204
11.7913868754 8.96101
10.7651944685 9.05122
10.449036239 9.12111
9.47494644317 9.1388
8.65145939715 9.11443
10.0976996439 9.16359
10.7063639429 9.24073
11.6369996196 9.36055
11.8077429176 9.48291
9.30626215597 9.47407
11.1716548992 9.55895
9.29947617083 9.54598
10.9526519367 9.61631
11.4919166353 9.71009
8.46035262333 9.6476
9.80287259172 9.65537
11.0375042177 9.72447
9.43217071427 9.70986
10.2983898435 9.73928
9.7364049275 9.73914
11.3822889016 9.8213
10.4826778428 9.85437
9.4399257937 9.83364
8.02007017019 9.74297
9.38939215926 9.72529
10.163048234 9.74718
10.304384217 9.77504
10.2068378666 9.79663
9.37310322328 9.77545
10.8770560902 9.83053
10.8216675698 9.88009
10.1377938579 9.89297
12.8470321497 10.0407
9.80723332489 10.029
11.6482647987 10.11
9.68704291523 10.0888
9.50734826353 10.0597
11.8691913927 10.1502
9.59743046771 10.1226
10.2500167875 10.1289
8.32083886932 10.0385
10.3725353837 10.0552
9.56953396084 10.031
12.382878645 10.1486
10.2655541665 10.1544
9.40398185433 10.1169
8.25707216153 10.0239
9.07986360146 9.97669
8.79974446834 9.91784
8.29048160403 9.83647
10.2852251716 9.85891
9.25751708302 9.82884
10.1157887288 9.84319
11.3484420008 9.91845
10.704113198 9.95773

NOTA: Para visualizar se utilizara tensorboard. Si se esta usando zsh puede ser que no encuentre el comando tensorboard. Normalmente se arregla instalando tensorboard con pip y reinicializando la terminal zsh (cuando este es el problema si uno se cambia a la terminal sh el comando si existe!)

Se visualiza ejecutando en terminal: tensorboard --logdir=./logs

A continuacion se utiliza el SummaryWriter para poder visualizar en el tensorboard lo que esta pasando con nuestro algoritmo.


In [14]:
import tensorflow as tf
import numpy as np

raw_data = np.random.normal(10,1,100)

alpha = tf.constant(0.05)
curr_value = tf.placeholder(tf.float32)
prev_avg = tf.Variable(0.)

update_avg = alpha * curr_value + (1 - alpha) * prev_avg

avg_hist = tf.summary.scalar("running_average", update_avg)
value_hist = tf.summary.scalar("incoming_values", curr_value)
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs")

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    for i in range(len(raw_data)):
        summary_str, curr_avg = sess.run([merged,update_avg], feed_dict={curr_value:raw_data[i]})
        sess.run(tf.assign(prev_avg, curr_avg))
        print(raw_data[i], curr_avg)
        writer.add_summary(summary_str, i)


10.231763994 0.511588
9.37198212617 0.954608
10.2504603133 1.4194
9.46382986946 1.82162
8.92175435848 2.17663
9.62922868386 2.54926
9.59755703166 2.90167
9.15623463119 3.2144
8.31074982362 3.46922
8.83847195022 3.73768
8.64013056275 3.9828
11.3805610616 4.35269
9.27918167655 4.59902
9.6434330225 4.85124
10.4617564026 5.13176
8.09995022147 5.28017
9.12909870571 5.47262
9.93042856443 5.69551
9.31049372779 5.87626
10.2687992308 6.09589
11.4961772279 6.3659
8.06455671028 6.45083
11.6080742943 6.70869
9.9098051131 6.86875
8.72213798681 6.96142
9.21286870751 7.07399
11.8965428716 7.31512
9.92916605554 7.44582
9.24513877046 7.53579
9.72557570751 7.64528
11.0039301315 7.81321
8.43485860082 7.84429
9.37258884516 7.92071
8.87443698397 7.96839
8.74883716458 8.00741
9.43449773293 8.07877
9.73206038269 8.16143
9.0623562391 8.20648
9.80930690993 8.28662
9.32191954517 8.33839
8.77720425988 8.36033
8.48121078791 8.36637
8.13403591371 8.35475
9.42491574469 8.40826
9.00272131735 8.43799
9.38878955536 8.48553
9.34786234594 8.52864
10.0071722762 8.60257
10.4866644551 8.69677
9.19545813547 8.72171
11.2857906169 8.84991
9.82509024362 8.89867
12.4547435126 9.07647
10.7596334627 9.16063
10.2625078095 9.21572
8.8186417438 9.19587
10.3210073826 9.25213
10.9008260465 9.33456
11.5061465194 9.44314
11.2192333611 9.53194
8.47667471802 9.47918
9.19817627534 9.46513
8.63808927387 9.42378
9.79988237413 9.44258
9.65214142781 9.45306
9.24121427022 9.44247
10.9228138193 9.51649
10.8738432036 9.58435
8.62884918076 9.53658
9.17812654218 9.51866
9.8009432671 9.53277
9.89638868436 9.55095
11.3147645305 9.63914
10.4323144522 9.6788
9.91085536212 9.6904
8.4534129184 9.62855
11.1643928409 9.70535
10.5452096097 9.74734
10.6637324773 9.79316
10.7927508447 9.84314
10.7799014819 9.88998
10.4469674737 9.91782
8.49777350255 9.84682
9.32439905452 9.8207
8.14373802128 9.73685
12.5900604996 9.87951
8.15533196566 9.7933
11.1833410113 9.8628
10.683705195 9.90385
9.80928777908 9.89912
9.00339613429 9.85433
8.42657376472 9.78295
9.63680861799 9.77564
9.5871022889 9.76621
10.4585902851 9.80083
11.374141908 9.8795
8.46640173878 9.80884
11.3041328175 9.88361
10.7520373637 9.92703
8.89165847411 9.87526