In [1]:
import tensorflow as tf
In [2]:
tf.zeros([3, 3])
Out[2]:
In [3]:
tf.ones([3, 3])
Out[3]:
In [4]:
tf.fill([3, 3], 42)
Out[4]:
In [5]:
constant_tsr = tf.constant([1, 2, 3])
constant_tsr
Out[5]:
In [6]:
tf.zeros_like(constant_tsr)
Out[6]:
In [7]:
tf.ones_like(constant_tsr)
Out[7]:
In [8]:
tf.linspace(0.0, 1.0, 3)
Out[8]:
In [9]:
tf.range(6, 15, 3)
Out[9]:
In [10]:
tf.random_uniform([3, 3], minval=0, maxval=1)
Out[10]:
In [11]:
tf.random_normal([3, 3], mean=0.0, stddev=1.0)
Out[11]:
In [12]:
tf.truncated_normal([3, 3], mean=0.0, stddev=1.0)
Out[12]:
In [13]:
my_var = tf.Variable(tf.zeros([2, 3]))
sess = tf.Session()
initialize_op = tf.global_variables_initializer()
sess.run(initialize_op)
In [30]:
import numpy as np
sess = tf.Session()
x = tf.placeholder(tf.float32, shape=[2, 2])
y = tf.identity(x)
x_vals = np.random.rand(2, 2)
sess.run(y, feed_dict={x: x_vals})
Out[30]:
In [31]:
import numpy as np
import tensorflow as tf
sess = tf.Session()
In [38]:
identity_matrix = tf.diag([1.0, 1.0, 1.0])
print(identity_matrix)
print(sess.run(identity_matrix))
In [36]:
A = tf.truncated_normal([2, 3])
B = tf.fill([2, 3], 5.0)
C = tf.random_uniform([3, 2])
D = tf.convert_to_tensor(np.array(
[[1., 2., 3.],
[-3., -7., -1.],
[0., 5., -2.]]
))
In [46]:
print(sess.run(B))
In [43]:
print(sess.run(A + B))
In [44]:
print(sess.run(B - B))
In [45]:
print(sess.run(tf.matmul(B, identity_matrix)))
In [47]:
sess.run(C)
Out[47]:
In [48]:
sess.run(tf.transpose(C))
Out[48]:
In [49]:
sess.run(tf.matrix_determinant(D))
Out[49]:
In [50]:
sess.run(tf.matrix_inverse(D))
Out[50]:
In [53]:
aaa = tf.matmul(D, tf.matrix_inverse(D))
aaa
Out[53]:
In [54]:
sess.run(aaa)
Out[54]:
In [55]:
sess.run(tf.cholesky(identity_matrix))
Out[55]:
In [56]:
sess.run(tf.self_adjoint_eig(D))
Out[56]:
In [57]:
sess.run(tf.div(3, 4))
Out[57]:
In [58]:
sess.run(tf.truediv(3, 4))
Out[58]:
In [59]:
sess.run(tf.div(3.0, 4.0))
Out[59]:
In [60]:
sess.run(tf.floordiv(3.0, 4.0))
Out[60]:
In [61]:
sess.run(tf.mod(22.0, 5.0))
Out[61]:
In [62]:
def custom_polynomial(value):
return(tf.subtract(3 * tf.square(value), value) + 10)
In [67]:
sess.run(custom_polynomial(11))
Out[67]:
In [75]:
sess.run(tf.nn.relu([-3., 3., 10.]))
Out[75]:
In [76]:
sess.run(tf.nn.relu6([-3., 3., 10.]))
Out[76]:
In [77]:
sess.run(tf.nn.sigmoid([-1., 0., 1.]))
Out[77]:
In [78]:
sess.run(tf.nn.tanh([-1., 0., 1.]))
Out[78]:
In [79]:
sess.run(tf.nn.softsign([-1., 0., 1.]))
Out[79]:
In [91]:
import matplotlib.pyplot as plt
%matplotlib inline
xlist = np.linspace(-5, 5, 1000)
ss = sess.run(tf.nn.softsign(xlist))
sp = sess.run(tf.nn.softplus(xlist))
el = sess.run(tf.nn.elu(xlist))
plt.plot(xlist, ss)
plt.plot(xlist, sp)
plt.plot(xlist, el)
Out[91]:
In [87]:
sess.run(tf.nn.softplus([-1., 0., 1.]))
Out[87]:
In [93]:
from sklearn import datasets
iris = datasets.load_iris()
In [95]:
len(iris.data)
Out[95]:
In [96]:
len(iris.target)
Out[96]:
In [98]:
iris.data[0]
Out[98]:
In [99]:
iris.target[0]
Out[99]:
In [100]:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
In [101]:
len(mnist.train.images)
Out[101]:
In [102]:
len(mnist.test.images)
Out[102]:
In [103]:
len(mnist.validation.images)
Out[103]: