Linear Regression


In [1]:
import tensorflow as tf


/home/minesh/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters

variables


In [2]:
W = tf.Variable([0.3])
b = tf.Variable([-0.3])
x = tf.placeholder(tf.float32)
linear_model = W * x + b

In [3]:
init = tf.global_variables_initializer()

In [4]:
sess = tf.Session()

In [5]:
sess.run(init)

In [6]:
print(sess.run(linear_model,{x:[1,2,3,4]}))


[0.         0.3        0.6        0.90000004]

Calculate initial loss

other important classes of metrics

  • Accuracy
  • Presicion
  • Sensitivity

In [7]:
y = tf.placeholder(tf.float32)

In [8]:
squared_deltas = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_deltas)

In [9]:
print(sess.run(loss,{x:[1,2,3,4],y:[0,-1,-2,-3]}))


23.66

assign


In [10]:
fixW = tf.assign(W,[-1])
fixb = tf.assign(b,[1])
sess.run([fixW,fixb])


Out[10]:
[array([-1.], dtype=float32), array([1.], dtype=float32)]

In [11]:
print(sess.run(loss,{x:[1,2,3,4],y:[0,-1,-2,-3]}))


0.0

optimize


In [12]:
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)

In [13]:
sess.run(init)

In [14]:
for i in range(1000):
    sess.run(train,{x:[1,2,3,4],y:[0,-1,-2,-3]})
    if i % 100 ==0 :
        print(sess.run([W,b]))
        print("loss:",sess.run(loss,{x:[1,2,3,4],y:[0,-1,-2,-3]}))


[array([-0.21999997], dtype=float32), array([-0.45600003], dtype=float32)]
loss: 4.018144
[array([-0.84270465], dtype=float32), array([0.5375326], dtype=float32)]
loss: 0.14287975
[array([-0.9528499], dtype=float32), array([0.86137295], dtype=float32)]
loss: 0.012838208
[array([-0.98586655], dtype=float32), array([0.95844597], dtype=float32)]
loss: 0.0011535463
[array([-0.9957634], dtype=float32), array([0.98754394], dtype=float32)]
loss: 0.000103651124
[array([-0.9987301], dtype=float32), array([0.9962665], dtype=float32)]
loss: 9.312402e-06
[array([-0.99961936], dtype=float32), array([0.998881], dtype=float32)]
loss: 8.3645574e-07
[array([-0.9998859], dtype=float32), array([0.9996646], dtype=float32)]
loss: 7.514916e-08
[array([-0.9999658], dtype=float32), array([0.99989945], dtype=float32)]
loss: 6.753911e-09
[array([-0.9999897], dtype=float32), array([0.9999697], dtype=float32)]
loss: 6.12733e-10

tensor board


In [15]:
tf.summary.histogram("W", W)
tf.summary.histogram("b", b)


Out[15]:
<tf.Tensor 'b:0' shape=() dtype=string>

In [16]:
writer = tf.summary.FileWriter("./my_graph_reg",sess.graph)
writer.close()

In [17]:
#sess.close()

In [20]:
!tensorboard --logdir=./my_graph_reg


/home/minesh/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
W0401 01:03:26.420831 Reloader tf_logging.py:121] Found more than one graph event per run, or there was a metagraph containing a graph_def, as well as one or more graph events.  Overwriting the graph with the newest event.
W0401 01:03:26.421070 Reloader tf_logging.py:121] Found more than one metagraph event per run. Overwriting the metagraph with the newest event.
W0401 01:03:26.421443 Reloader tf_logging.py:121] Found more than one graph event per run, or there was a metagraph containing a graph_def, as well as one or more graph events.  Overwriting the graph with the newest event.
W0401 01:03:26.421630 Reloader tf_logging.py:121] Found more than one metagraph event per run. Overwriting the metagraph with the newest event.
TensorBoard 1.5.1 at http://Immortal:6006 (Press CTRL+C to quit)
^C

In [19]:
# !rm -rf ./my_graph_reg

In [ ]: