In [2]:
import tensorflow as tf
import math

In [3]:
interactive_sesssion=tf.InteractiveSession()

In [21]:
y=[[5.,4.,1.],[1.,9.,20.],[3.,4.,3.]]

In [22]:
y_=[[1,0,0],[1,0,0],[1,0,0]]

In [23]:
cross_entropy=tf.nn.softmax_cross_entropy_with_logits(y,y_)

In [24]:
interactive_sesssion.run(cross_entropy)


Out[24]:
array([  0.32656264,  19.00001717,   1.55144465], dtype=float32)

In [35]:
softmax=tf.nn.softmax(y)

In [36]:
interactive_sesssion.run(softmax)


Out[36]:
array([[  7.21399188e-01,   2.65387923e-01,   1.32128876e-02],
       [  5.60270275e-09,   1.67014223e-05,   9.99983311e-01],
       [  2.11941570e-01,   5.76116920e-01,   2.11941570e-01]], dtype=float32)

In [38]:
-1*math.log(0.721399188)


Out[38]:
0.3265626361024641

following is to make a deep learning model using softmax, it looks like optim don't action The reason why optim don't take action, it is because loss_function. It is a regression problem using softmax method.


In [4]:
x=tf.constant([[3.0,4.0,5.0]])

In [5]:
#weight=tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))
weight=tf.Variable([[0.6],[0.5],[0.3]])

In [6]:
y_=tf.constant([1.0])

In [7]:
y=tf.matmul(x,weight)

In [8]:
y_softmax=tf.nn.softmax(y)

In [14]:
#loss_function=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(model,y_))
#loss_function=-tf.reduce_mean(y_*tf.log(tf.clip_by_value(y_softmax,1e-10,1.0)))
loss_function=tf.reduce_mean(tf.square(y_-y))

In [15]:
optim = tf.train.AdamOptimizer(0.001)

In [16]:
train_step=optim.minimize(loss_function)

In [17]:
init_op=tf.initialize_all_variables()


WARNING:tensorflow:From <ipython-input-17-beca12014393>:1: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.

In [18]:
interactive_sesssion.run(init_op)

In [20]:
for i in range(1000):
    interactive_sesssion.run(train_step)
    loss=interactive_sesssion.run(loss_function)
    w=interactive_sesssion.run(weight)
    if i%100 == 0:
        print(w)
        print(loss)


[[ 0.50426698]
 [ 0.40426725]
 [ 0.20426714]]
9.9301
[[ 0.4249652 ]
 [ 0.32496548]
 [ 0.12496553]]
4.83817
[[ 0.362335  ]
 [ 0.26233527]
 [ 0.06233538]]
2.09677
[[ 0.3160443 ]
 [ 0.21604462]
 [ 0.01604476]]
0.796619
[[ 0.28435239]
 [ 0.18435271]
 [-0.01564718]]
0.262382
[[ 0.26438984]
 [ 0.16439024]
 [-0.03560972]]
0.0743554
[[ 0.25285834]
 [ 0.15285876]
 [-0.04714119]]
0.0180376
[[ 0.24675588]
 [ 0.14675631]
 [-0.05324365]]
0.00373012
[[ 0.2437984 ]
 [ 0.14379883]
 [-0.05620107]]
0.000654601
[[ 0.24248682]
 [ 0.14248724]
 [-0.05751257]]
9.69549e-05

In [ ]: