In [11]:
import numpy as np
#source ~/tensorflow/bin/activate
import tensorflow as tf
import seaborn
In [4]:
a = np.array([1,2,3])
In [4]:
a
Out[4]:
In [5]:
a.shape
Out[5]:
In [7]:
b = np.array([[1,2,3],[4,5,6]])
In [8]:
b
Out[8]:
In [9]:
b.shape
Out[9]:
In [10]:
a = np.zeros((2,2))
In [11]:
a
Out[11]:
In [12]:
b = np.ones((1,2))
In [13]:
b
Out[13]:
In [14]:
b.shape
Out[14]:
In [15]:
b = np.array([1,1])
In [16]:
b
Out[16]:
In [17]:
b.shape
Out[17]:
In [18]:
b = np.array([[1,1]])
In [19]:
b
Out[19]:
In [20]:
b.shape
Out[20]:
In [21]:
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
In [22]:
a
Out[22]:
In [23]:
b = a[:2, 1:3] #righe 0 e 1, colonne 1 e 2
In [24]:
b
Out[24]:
In [25]:
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
In [26]:
row_r1 = a[1, :] #indexing reduces the rank
In [27]:
row_r1
Out[27]:
In [28]:
row_r2 = a[1:2, :] #slicing keep the same rank
In [29]:
row_r2
Out[29]:
In [34]:
col_r1 = a[:, 1] #second col
In [35]:
col_r1
Out[35]:
In [36]:
col_r1.shape
Out[36]:
In [38]:
col_r2 = a[:, 1:2]
In [39]:
col_r2
Out[39]:
In [40]:
col_r2.shape
Out[40]:
In [41]:
a = np.array([[1,2], [3, 4], [5, 6]])
In [42]:
a
Out[42]:
In [43]:
a.shape
Out[43]:
In [47]:
print a[[0, 1, 2], [0, 1, 0]] #il primo set da' l'indice delle righe, il secondo delle colonne
In [50]:
x = np.array([[1,2],[3,4]], dtype=np.float64)
In [52]:
x.ndim
Out[52]:
In [53]:
a = np.array([0, 1, 2])
In [62]:
a
Out[62]:
In [63]:
a.shape
Out[63]:
In [64]:
a1 = np.tile(a, 2)
In [65]:
a1.shape
Out[65]:
In [66]:
a2 = np.tile(a, (2, 2))
In [67]:
a2
Out[67]:
In [68]:
a2.shape
Out[68]:
In [70]:
a.ndim
Out[70]:
In [71]:
a3 = np.tile(a, (2, 1, 2))
In [72]:
a3
Out[72]:
In [73]:
a3.ndim
Out[73]:
In [74]:
a3.shape
Out[74]:
In [78]:
b = np.array([[1, 2], [3, 4]])
b
Out[78]:
In [76]:
b.shape
Out[76]:
In [79]:
b1 = np.tile(b, 2)
In [84]:
b1.shape
Out[84]:
In [81]:
b2 = np.tile(b, (2, 1))
In [82]:
b2
Out[82]:
In [83]:
b2.shape
Out[83]:
In [85]:
x = np.array([[[1],[2],[3]], [[4],[5],[6]]])
In [86]:
x
Out[86]:
In [87]:
x.shape
Out[87]:
In [5]:
x1 = np.array([[[1,2,3], [4,5,6]], [[1,2,3], [4,5,6]]])
In [6]:
x1
Out[6]:
In [7]:
x1.shape
Out[7]:
In [96]:
x1.ndim
Out[96]:
In [9]:
x1.shape[2]
Out[9]:
In [98]:
x1 = np.array([1, 2, 3, 4, 5])
x2 = np.array([5, 4, 3])
In [99]:
x1
Out[99]:
In [100]:
x2
Out[100]:
In [101]:
x1_new = x1[:, np.newaxis]
In [102]:
x1_new
Out[102]:
In [104]:
x3 = x1_new + x2
In [106]:
x3
Out[106]:
In [4]:
tf1 = tf.zeros([2, 3])
In [6]:
tf1.shape
Out[6]:
In [7]:
tf2 = tf.ones([2, 3,4])
In [8]:
tf2.shape
Out[8]:
In [9]:
tf2
Out[9]:
In [12]:
tf3 = tf.placeholder(tf.float32, shape=[None,3], name="train_inputs")
In [13]:
tf3
Out[13]:
In [9]:
np.zeros([2, 3] +
[3 * (3 + 1) / 2], dtype=np.float32)
Out[9]:
In [16]:
a = np.zeros((2,2))
In [17]:
a
Out[17]:
In [18]:
a.shape
Out[18]:
In [ ]:
In [5]:
b = np.ones((2,2))
In [6]:
b
Out[6]:
In [13]:
b1 = np.sum(b, axis=0)
In [14]:
b1
Out[14]:
In [15]:
b1.shape
Out[15]:
In [2]:
tf.InteractiveSession()
Out[2]:
In [3]:
a = tf.zeros((2,2))
In [4]:
a
Out[4]:
In [5]:
b = tf.ones((2,2))
In [6]:
b
Out[6]:
In [7]:
tf.reduce_sum(b, reduction_indices=1).eval()
Out[7]:
In [9]:
a.get_shape()
Out[9]:
In [12]:
ta = tf.zeros((2,2))
In [15]:
print(ta)
Out[15]:
In [16]:
print(ta.eval())
In [29]:
b2 = tf.reduce_sum(b, reduction_indices=1).eval()[0:1]
In [30]:
b2
Out[30]:
In [38]:
a = tf.constant(5.0)
b = tf.constant(6.0)
In [39]:
a
Out[39]:
In [32]:
c = a*b
In [33]:
c #ho creato l'oggetto ma non l'ho ancora valutato
Out[33]:
In [37]:
with tf.Session() as sess:
print(sess.run(c))
print(c.eval())
In [42]:
W1 = tf.ones((2,2)) #this is a constant that still need to be executed
W2 = tf.Variable(tf.zeros((2,2)), name="weights")
W1
W2
Out[42]:
In [43]:
with tf.Session() as sess:
print(sess.run(W1)) #running the constant
sess.run(tf.initialize_all_variables())
print(sess.run(W2)) #running the variable
In [2]:
# Regression in tensorflow
X_data = np.arange(100, step=.1)
y_data = X_data + 20 * np.sin(X_data/10)
In [3]:
X_data.shape
Out[3]:
In [4]:
y_data.shape
Out[4]:
In [5]:
n_samples = 1000 #ho 1000 osservazioni per una variabile unidimensionale
batch_size = 1000
In [6]:
# Tensorflow is finicky about shapes, so resize
X_data = np.reshape(X_data, (n_samples,1))
X_data.shape
Out[6]:
In [7]:
y_data = np.reshape(y_data, (n_samples,1))
In [8]:
y_data.shape
Out[8]:
In [9]:
# Define placeholders for input - creo degli element che
# conterranno i dati
# considero solamente 100 dati per volta (batch_size)
X = tf.placeholder(tf.float32, shape=(batch_size, 1))
y = tf.placeholder(tf.float32, shape=(batch_size, 1))
In [10]:
X
Out[10]:
In [11]:
y
Out[11]:
In [12]:
# Define variables to be learned
with tf.variable_scope("linear-regression"):
# il coefficiente
W = tf.get_variable("weights", (1, 1),
initializer=tf.random_normal_initializer())
# l'intercetta
b = tf.get_variable("bias", (1,),
initializer=tf.constant_initializer(0.0))
y_pred = tf.matmul(X, W) + b
# funzione da minimizzare
loss = tf.reduce_sum((y - y_pred)**2/n_samples)
In [13]:
# Ottimizzazione rispetto alle variabile con i dati assegnati
# ai placeholder
# specifico il metodo di ottimizzazione: fai train con AdamOptimizer
opt = tf.train.AdamOptimizer()
In [14]:
opt_operation = opt.minimize(loss) #l'operazione di ottimiz.
In [31]:
#devo runnare all'interno di una sessione
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#print(sess.run(y_pred, feed_dict={X: X_data}))
#print(sees.run(loss, feed_dict={X: X_data, y: y_data}))
for i in range(500):
sess.run([opt_operation], feed_dict={X: X_data, y: y_data})
# Il comando deva stare cosi indentato perchè è parte della
# sessione che apro con with
curr_W, curr_b, curr_loss = sess.run([W,b,loss],
feed_dict={X: X_data, y: y_data})
In [32]:
print("W: %s b: %s loss: %s"%(curr_W, curr_b, curr_loss))
In [23]:
#print(sess.run([W,b, loss])
opt_operation
Out[23]:
In [8]:
print(opt_operation)
In [9]:
x1 = tf.sqrt(2.0)
In [10]:
x1
Out[10]:
In [15]:
x1 = tf.ones([2, 3] +
[3 * (3 + 1) / 2], dtype=np.float32)
In [16]:
x1.shape
Out[16]:
In [18]:
x1[1, :, :]
Out[18]:
In [ ]: