In [11]:
import numpy as np
#source ~/tensorflow/bin/activate
import tensorflow as tf
import seaborn


:0: FutureWarning: IPython widgets are experimental and may change in the future.

In [4]:
a = np.array([1,2,3])

In [4]:
a


Out[4]:
array([1, 2, 3])

In [5]:
a.shape


Out[5]:
(3,)

In [7]:
b = np.array([[1,2,3],[4,5,6]])

In [8]:
b


Out[8]:
array([[1, 2, 3],
       [4, 5, 6]])

In [9]:
b.shape


Out[9]:
(2, 3)

In [10]:
a = np.zeros((2,2))

In [11]:
a


Out[11]:
array([[ 0.,  0.],
       [ 0.,  0.]])

In [12]:
b = np.ones((1,2))

In [13]:
b


Out[13]:
array([[ 1.,  1.]])

In [14]:
b.shape


Out[14]:
(1, 2)

In [15]:
b = np.array([1,1])

In [16]:
b


Out[16]:
array([1, 1])

In [17]:
b.shape


Out[17]:
(2,)

In [18]:
b = np.array([[1,1]])

In [19]:
b


Out[19]:
array([[1, 1]])

In [20]:
b.shape


Out[20]:
(1, 2)

In [21]:
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])

In [22]:
a


Out[22]:
array([[ 1,  2,  3,  4],
       [ 5,  6,  7,  8],
       [ 9, 10, 11, 12]])

In [23]:
b = a[:2, 1:3] #righe 0 e 1, colonne 1 e 2

In [24]:
b


Out[24]:
array([[2, 3],
       [6, 7]])

In [25]:
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])

In [26]:
row_r1 = a[1, :] #indexing reduces the rank

In [27]:
row_r1


Out[27]:
array([5, 6, 7, 8])

In [28]:
row_r2 = a[1:2, :] #slicing keep the same rank

In [29]:
row_r2


Out[29]:
array([[5, 6, 7, 8]])

In [34]:
col_r1 = a[:, 1] #second col

In [35]:
col_r1


Out[35]:
array([ 2,  6, 10])

In [36]:
col_r1.shape


Out[36]:
(3,)

In [38]:
col_r2 = a[:, 1:2]

In [39]:
col_r2


Out[39]:
array([[ 2],
       [ 6],
       [10]])

In [40]:
col_r2.shape


Out[40]:
(3, 1)

In [41]:
a = np.array([[1,2], [3, 4], [5, 6]])

In [42]:
a


Out[42]:
array([[1, 2],
       [3, 4],
       [5, 6]])

In [43]:
a.shape


Out[43]:
(3, 2)

In [47]:
print a[[0, 1, 2], [0, 1, 0]] #il primo set da' l'indice delle righe, il secondo delle colonne


[1 4 5]

In [50]:
x = np.array([[1,2],[3,4]], dtype=np.float64)

In [52]:
x.ndim


Out[52]:
2

In [53]:
a = np.array([0, 1, 2])

In [62]:
a


Out[62]:
array([0, 1, 2])

In [63]:
a.shape


Out[63]:
(3,)

In [64]:
a1 = np.tile(a, 2)

In [65]:
a1.shape


Out[65]:
(6,)

In [66]:
a2 = np.tile(a, (2, 2))

In [67]:
a2


Out[67]:
array([[0, 1, 2, 0, 1, 2],
       [0, 1, 2, 0, 1, 2]])

In [68]:
a2.shape


Out[68]:
(2, 6)

In [70]:
a.ndim


Out[70]:
1

In [71]:
a3 = np.tile(a, (2, 1, 2))

In [72]:
a3


Out[72]:
array([[[0, 1, 2, 0, 1, 2]],

       [[0, 1, 2, 0, 1, 2]]])

In [73]:
a3.ndim


Out[73]:
3

In [74]:
a3.shape


Out[74]:
(2, 1, 6)

In [78]:
b = np.array([[1, 2], [3, 4]])
b


Out[78]:
array([[1, 2],
       [3, 4]])

In [76]:
b.shape


Out[76]:
(2, 2)

In [79]:
b1 = np.tile(b, 2)

In [84]:
b1.shape


Out[84]:
(2, 4)

In [81]:
b2 = np.tile(b, (2, 1))

In [82]:
b2


Out[82]:
array([[1, 2],
       [3, 4],
       [1, 2],
       [3, 4]])

In [83]:
b2.shape


Out[83]:
(4, 2)

In [85]:
x = np.array([[[1],[2],[3]], [[4],[5],[6]]])

In [86]:
x


Out[86]:
array([[[1],
        [2],
        [3]],

       [[4],
        [5],
        [6]]])

In [87]:
x.shape


Out[87]:
(2, 3, 1)

In [5]:
x1 = np.array([[[1,2,3], [4,5,6]], [[1,2,3], [4,5,6]]])

In [6]:
x1


Out[6]:
array([[[1, 2, 3],
        [4, 5, 6]],

       [[1, 2, 3],
        [4, 5, 6]]])

In [7]:
x1.shape


Out[7]:
(2, 2, 3)

In [96]:
x1.ndim


Out[96]:
3

In [9]:
x1.shape[2]


Out[9]:
3

In [98]:
x1 = np.array([1, 2, 3, 4, 5])
x2 = np.array([5, 4, 3])

In [99]:
x1


Out[99]:
array([1, 2, 3, 4, 5])

In [100]:
x2


Out[100]:
array([5, 4, 3])

In [101]:
x1_new = x1[:, np.newaxis]

In [102]:
x1_new


Out[102]:
array([[1],
       [2],
       [3],
       [4],
       [5]])

In [104]:
x3 = x1_new + x2

In [106]:
x3


Out[106]:
array([[ 6,  5,  4],
       [ 7,  6,  5],
       [ 8,  7,  6],
       [ 9,  8,  7],
       [10,  9,  8]])

In [4]:
tf1 = tf.zeros([2, 3])

In [6]:
tf1.shape


Out[6]:
TensorShape([Dimension(2), Dimension(3)])

In [7]:
tf2 = tf.ones([2, 3,4])

In [8]:
tf2.shape


Out[8]:
TensorShape([Dimension(2), Dimension(3), Dimension(4)])

In [9]:
tf2


Out[9]:
<tf.Tensor 'ones:0' shape=(2, 3, 4) dtype=float32>

In [12]:
tf3 = tf.placeholder(tf.float32, shape=[None,3], name="train_inputs")

In [13]:
tf3


Out[13]:
<tf.Tensor 'train_inputs:0' shape=(?, 3) dtype=float32>

In [9]:
np.zeros([2, 3] +
         [3 * (3 + 1) / 2], dtype=np.float32)


Out[9]:
array([[[ 0.,  0.,  0.,  0.,  0.,  0.],
        [ 0.,  0.,  0.,  0.,  0.,  0.],
        [ 0.,  0.,  0.,  0.,  0.,  0.]],

       [[ 0.,  0.,  0.,  0.,  0.,  0.],
        [ 0.,  0.,  0.,  0.,  0.,  0.],
        [ 0.,  0.,  0.,  0.,  0.,  0.]]], dtype=float32)

In [16]:
a = np.zeros((2,2))

In [17]:
a


Out[17]:
array([[ 0.,  0.],
       [ 0.,  0.]])

In [18]:
a.shape


Out[18]:
(2, 2)

In [ ]:


In [5]:
b = np.ones((2,2))

In [6]:
b


Out[6]:
array([[ 1.,  1.],
       [ 1.,  1.]])

In [13]:
b1 = np.sum(b, axis=0)

In [14]:
b1


Out[14]:
array([ 2.,  2.])

In [15]:
b1.shape


Out[15]:
(2,)

In [2]:
tf.InteractiveSession()


Out[2]:
<tensorflow.python.client.session.InteractiveSession at 0x7f207c076850>

In [3]:
a = tf.zeros((2,2))

In [4]:
a


Out[4]:
<tf.Tensor 'zeros:0' shape=(2, 2) dtype=float32>

In [5]:
b = tf.ones((2,2))

In [6]:
b


Out[6]:
<tf.Tensor 'ones:0' shape=(2, 2) dtype=float32>

In [7]:
tf.reduce_sum(b, reduction_indices=1).eval()


Out[7]:
array([ 2.,  2.], dtype=float32)

In [9]:
a.get_shape()


Out[9]:
TensorShape([Dimension(2), Dimension(2)])

In [12]:
ta = tf.zeros((2,2))

In [15]:
print(ta)


Out[15]:
<tf.Tensor 'zeros_1:0' shape=(2, 2) dtype=float32>

In [16]:
print(ta.eval())


[[ 0.  0.]
 [ 0.  0.]]

In [29]:
b2 = tf.reduce_sum(b, reduction_indices=1).eval()[0:1]

In [30]:
b2


Out[30]:
array([ 2.], dtype=float32)

In [38]:
a = tf.constant(5.0)
b = tf.constant(6.0)

In [39]:
a


Out[39]:
<tf.Tensor 'Const_2:0' shape=() dtype=float32>

In [32]:
c = a*b

In [33]:
c  #ho creato l'oggetto ma non l'ho ancora valutato


Out[33]:
<tf.Tensor 'mul:0' shape=() dtype=float32>

In [37]:
with tf.Session() as sess:
    print(sess.run(c))
    print(c.eval())


30.0
30.0

In [42]:
W1 = tf.ones((2,2))  #this is a constant that still need to be executed
W2 = tf.Variable(tf.zeros((2,2)), name="weights") 
W1
W2


Out[42]:
<tensorflow.python.ops.variables.Variable at 0x7f205c58b910>

In [43]:
with tf.Session() as sess:
        print(sess.run(W1)) #running the constant
        sess.run(tf.initialize_all_variables())
        print(sess.run(W2)) #running the variable


[[ 1.  1.]
 [ 1.  1.]]
WARNING:tensorflow:From <ipython-input-43-e74ec20ca67b>:3: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
WARNING:tensorflow:From <ipython-input-43-e74ec20ca67b>:3: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
[[ 0.  0.]
 [ 0.  0.]]

In [2]:
# Regression in tensorflow
X_data = np.arange(100, step=.1)
y_data = X_data + 20 * np.sin(X_data/10)

In [3]:
X_data.shape


Out[3]:
(1000,)

In [4]:
y_data.shape


Out[4]:
(1000,)

In [5]:
n_samples = 1000 #ho 1000 osservazioni per una variabile unidimensionale
batch_size = 1000

In [6]:
# Tensorflow is finicky about shapes, so resize
X_data = np.reshape(X_data, (n_samples,1))
X_data.shape


Out[6]:
(1000, 1)

In [7]:
y_data = np.reshape(y_data, (n_samples,1))

In [8]:
y_data.shape


Out[8]:
(1000, 1)

In [9]:
# Define placeholders for input - creo degli element che 
# conterranno i dati
# considero solamente 100 dati per volta (batch_size)
X = tf.placeholder(tf.float32, shape=(batch_size, 1))
y = tf.placeholder(tf.float32, shape=(batch_size, 1))

In [10]:
X


Out[10]:
<tf.Tensor 'Placeholder:0' shape=(1000, 1) dtype=float32>

In [11]:
y


Out[11]:
<tf.Tensor 'Placeholder_1:0' shape=(1000, 1) dtype=float32>

In [12]:
# Define variables to be learned
with tf.variable_scope("linear-regression"):
    # il coefficiente
    W = tf.get_variable("weights", (1, 1),
                        initializer=tf.random_normal_initializer())
    # l'intercetta
    b = tf.get_variable("bias", (1,),
                        initializer=tf.constant_initializer(0.0))
    y_pred = tf.matmul(X, W) + b
    # funzione da minimizzare
    loss = tf.reduce_sum((y - y_pred)**2/n_samples)

In [13]:
# Ottimizzazione rispetto alle variabile con i dati assegnati 
# ai placeholder
# specifico il metodo di ottimizzazione: fai train con AdamOptimizer
opt = tf.train.AdamOptimizer()

In [14]:
opt_operation = opt.minimize(loss) #l'operazione di ottimiz.

In [31]:
#devo runnare all'interno di una sessione
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    #print(sess.run(y_pred, feed_dict={X: X_data}))
    #print(sees.run(loss, feed_dict={X: X_data, y: y_data}))
    for i in range(500):
        sess.run([opt_operation], feed_dict={X: X_data, y: y_data})
    # Il comando deva stare cosi indentato perchè è parte della
    # sessione che apro con with
    curr_W, curr_b, curr_loss = sess.run([W,b,loss], 
                                         feed_dict={X: X_data, y: y_data})

In [32]:
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))


W: [[-0.03080422]] b: [ 0.46191812] loss: 4001.26

In [23]:
#print(sess.run([W,b, loss])
opt_operation


Out[23]:
<tf.Operation 'Adam' type=NoOp>

In [8]:
print(opt_operation)


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-8-02322271820a> in <module>()
----> 1 print(opt_operation)

NameError: name 'opt_operation' is not defined

In [9]:
x1 = tf.sqrt(2.0)

In [10]:
x1


Out[10]:
<tf.Tensor 'Sqrt_4:0' shape=() dtype=float32>

In [15]:
x1 = tf.ones([2, 3] +
         [3 * (3 + 1) / 2], dtype=np.float32)

In [16]:
x1.shape


Out[16]:
TensorShape([Dimension(2), Dimension(3), Dimension(6)])

In [18]:
x1[1, :, :]


Out[18]:
<tf.Tensor 'strided_slice_1:0' shape=(3, 6) dtype=float32>

In [ ]: