In [54]:
import tensorflow as tf
print(tf.__version__)
import numpy as np


1.2.0

In [55]:
hello = tf.constant('Hello, World')
print(hello)


Tensor("Const_8:0", shape=(), dtype=string)

In [56]:
a = tf.constant(10)
b = tf.constant(20)
c = a+b
print(c)


Tensor("add_5:0", shape=(), dtype=int32)

In [57]:
with tf.Session() as sess:
    print(sess.run(c))


30

In [58]:
with tf.Session() as sess:
    print(sess.run(tf.add(a,b)))


30

In [59]:
with tf.Session() as sess:
    aa,bb,cc = sess.run([a,b,c])
    print(aa)


10

In [60]:
#아래는 행의 크기가 정해지지 않고 열은 3인 홀더가 만들어 진다. 
X = tf.placeholder(tf.float32, [None, 3])
print(X)


Tensor("Placeholder_7:0", shape=(?, 3), dtype=float32)

In [61]:
with tf.Session() as sess:
    print(sess.run(X, feed_dict={X:[[1,2,3]]})) #여기서 앞의 x는 연산해야 할 그래프를 나타내고 뒤의 x는 그 그래프안의 홀더를 뜻 한다.


[[ 1.  2.  3.]]

In [62]:
x_data = [[1,2,3],[4,5,6]]

In [63]:
W = tf.Variable(tf.random_normal([3,2])) #tf.random_normal은 주작위로 정규분포의 값으로 세팅한다.
b = tf.Variable(tf.random_normal([2,1]))

In [64]:
expr = tf.matmul(X,W) + b

In [65]:
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(x_data)
    print(sess.run(W))
    print(sess.run(b))
    print(sess.run(expr, feed_dict={X:x_data}))


[[1, 2, 3], [4, 5, 6]]
[[-1.74057806 -0.66880399]
 [ 0.02417612 -1.1236856 ]
 [-0.2459085  -0.02459022]]
[[ 0.53283751]
 [ 0.67303425]]
[[-1.89711368 -2.4571085 ]
 [-7.6438489  -7.76815081]]

In [66]:
mat_a = tf.constant(value=[2,2,2,2],dtype=tf.float32, shape=[2,2])
mat_b = tf.constant(value=[1,2], dtype=tf.float32, shape=[2,1])
with tf.Session() as sess:
    print(sess.run(mat_a))
    print(sess.run(mat_b))
    print(sess.run(mat_a+mat_b))


[[ 2.  2.]
 [ 2.  2.]]
[[ 1.]
 [ 2.]]
[[ 3.  3.]
 [ 4.  4.]]

In [67]:
x_data = [1,2,3]
y_data = [1,2,3]

In [68]:
with tf.Session() as sess:
    for i in range(10):
        print(sess.run(tf.random_uniform(shape=[1], minval=0, maxval=9, dtype=tf.int32)))


[0]
[5]
[2]
[2]
[1]
[5]
[6]
[8]
[7]
[2]

In [69]:
W = tf.Variable(tf.random_uniform(shape=[1], minval=-1.0, maxval=1.0))
b = tf.Variable(tf.random_uniform(shape=[1], minval=-1.0, maxval=1.0))

In [70]:
X = tf.placeholder(dtype=tf.float32, name = "X")
Y = tf.placeholder(dtype=tf.float32, name = "Y")

In [71]:
hypothesis = W * X + b

In [72]:
cost = tf.reduce_mean(tf.square(hypothesis - Y))

In [73]:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(cost)

In [74]:
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    
    for step in range(100):
        _, cost_val = sess.run([train_op, cost], feed_dict={X:x_data, Y:y_data})
        
        print(step, cost_val, sess.run(W), sess.run(b))
    
    print("X: 5, Y: ", sess.run(hypothesis, feed_dict={X:5}))


0 13.5516 [ 1.08028972] [ 0.24168092]
1 0.166111 [ 0.90868032] [ 0.16122887]
2 0.00601794 [ 0.92942047] [ 0.16551097]
3 0.003914 [ 0.92909032] [ 0.1606406]
4 0.00370636 [ 0.93101639] [ 0.15687634]
5 0.00353005 [ 0.93265057] [ 0.15309452]
6 0.00336236 [ 0.93427223] [ 0.14941539]
7 0.00320265 [ 0.93585199] [ 0.14582342]
8 0.00305052 [ 0.93739408] [ 0.14231794]
9 0.00290562 [ 0.9388991] [ 0.13889672]
10 0.0027676 [ 0.94036788] [ 0.13555773]
11 0.00263614 [ 0.94180143] [ 0.13229904]
12 0.00251093 [ 0.94320053] [ 0.12911868]
13 0.00239165 [ 0.94456589] [ 0.12601474]
14 0.00227804 [ 0.94589847] [ 0.12298543]
15 0.00216984 [ 0.94719911] [ 0.12002896]
16 0.00206677 [ 0.94846833] [ 0.11714352]
17 0.00196859 [ 0.94970709] [ 0.11432746]
18 0.00187509 [ 0.95091611] [ 0.11157912]
19 0.00178601 [ 0.9520961] [ 0.10889684]
20 0.00170118 [ 0.95324767] [ 0.10627904]
21 0.00162037 [ 0.95437151] [ 0.10372414]
22 0.0015434 [ 0.95546842] [ 0.1012307]
23 0.00147009 [ 0.95653898] [ 0.09879719]
24 0.00140026 [ 0.95758373] [ 0.09642217]
25 0.00133374 [ 0.95860338] [ 0.09410425]
26 0.00127039 [ 0.95959848] [ 0.09184204]
27 0.00121005 [ 0.9605698] [ 0.08963425]
28 0.00115257 [ 0.96151763] [ 0.08747949]
29 0.00109782 [ 0.9624427] [ 0.08537653]
30 0.00104568 [ 0.96334553] [ 0.08332413]
31 0.000996005 [ 0.96422666] [ 0.08132108]
32 0.000948695 [ 0.9650867] [ 0.07936621]
33 0.00090363 [ 0.96592599] [ 0.07745828]
34 0.000860705 [ 0.96674508] [ 0.07559622]
35 0.000819823 [ 0.9675445] [ 0.07377894]
36 0.000780882 [ 0.96832472] [ 0.07200536]
37 0.000743788 [ 0.96908617] [ 0.07027441]
38 0.000708457 [ 0.96982932] [ 0.06858507]
39 0.000674804 [ 0.97055459] [ 0.06693631]
40 0.00064275 [ 0.97126245] [ 0.06532723]
41 0.000612219 [ 0.97195327] [ 0.06375681]
42 0.000583142 [ 0.97262758] [ 0.06222416]
43 0.000555442 [ 0.9732855] [ 0.06072829]
44 0.000529054 [ 0.97392774] [ 0.05926843]
45 0.000503926 [ 0.97455448] [ 0.05784364]
46 0.000479988 [ 0.9751662] [ 0.05645313]
47 0.000457189 [ 0.97576314] [ 0.05509602]
48 0.000435471 [ 0.97634584] [ 0.05377156]
49 0.000414787 [ 0.97691441] [ 0.05247891]
50 0.000395083 [ 0.97746938] [ 0.05121735]
51 0.000376319 [ 0.97801101] [ 0.04998613]
52 0.000358443 [ 0.97853965] [ 0.04878451]
53 0.000341417 [ 0.97905552] [ 0.04761174]
54 0.000325196 [ 0.979559] [ 0.04646718]
55 0.00030975 [ 0.98005038] [ 0.04535015]
56 0.000295037 [ 0.98052996] [ 0.04425996]
57 0.000281023 [ 0.98099804] [ 0.04319599]
58 0.000267674 [ 0.98145479] [ 0.04215758]
59 0.000254959 [ 0.98190063] [ 0.04114415]
60 0.00024285 [ 0.98233569] [ 0.04015506]
61 0.000231311 [ 0.98276031] [ 0.03918975]
62 0.000220325 [ 0.9831748] [ 0.03824768]
63 0.000209861 [ 0.98357928] [ 0.03732824]
64 0.000199891 [ 0.98397398] [ 0.03643087]
65 0.000190396 [ 0.98435926] [ 0.03555511]
66 0.000181353 [ 0.98473525] [ 0.03470038]
67 0.000172738 [ 0.98510218] [ 0.0338662]
68 0.000164533 [ 0.98546034] [ 0.0330521]
69 0.000156717 [ 0.9858098] [ 0.03225753]
70 0.000149274 [ 0.98615098] [ 0.03148209]
71 0.000142183 [ 0.98648393] [ 0.0307253]
72 0.000135429 [ 0.98680884] [ 0.02998668]
73 0.000128996 [ 0.98712593] [ 0.02926581]
74 0.000122868 [ 0.9874354] [ 0.02856227]
75 0.000117031 [ 0.98773742] [ 0.02787565]
76 0.000111473 [ 0.98803222] [ 0.02720555]
77 0.000106179 [ 0.98831999] [ 0.02655157]
78 0.000101134 [ 0.98860073] [ 0.02591326]
79 9.63302e-05 [ 0.98887473] [ 0.02529032]
80 9.17547e-05 [ 0.98914218] [ 0.02468236]
81 8.73969e-05 [ 0.98940325] [ 0.02408903]
82 8.32442e-05 [ 0.98965794] [ 0.02350991]
83 7.92899e-05 [ 0.98990655] [ 0.02294475]
84 7.55249e-05 [ 0.9901492] [ 0.02239319]
85 7.19366e-05 [ 0.99038601] [ 0.02185488]
86 6.85198e-05 [ 0.9906171] [ 0.0213295]
87 6.5265e-05 [ 0.9908427] [ 0.02081676]
88 6.21656e-05 [ 0.99106282] [ 0.02031633]
89 5.92118e-05 [ 0.99127769] [ 0.01982794]
90 5.63998e-05 [ 0.99148738] [ 0.01935129]
91 5.37206e-05 [ 0.99169201] [ 0.01888609]
92 5.11694e-05 [ 0.99189168] [ 0.01843207]
93 4.87375e-05 [ 0.99208659] [ 0.01798897]
94 4.64235e-05 [ 0.99227685] [ 0.01755654]
95 4.42181e-05 [ 0.99246252] [ 0.01713451]
96 4.21184e-05 [ 0.99264377] [ 0.01672262]
97 4.01167e-05 [ 0.99282056] [ 0.01632059]
98 3.82109e-05 [ 0.99299312] [ 0.01592823]
99 3.63963e-05 [ 0.99316156] [ 0.01554533]
X: 5, Y:  [ 4.98135328]

In [75]:
x_data = np.array([[0,0],
                   [1,0],
                   [1,1],
                   [0,0],
                   [0,0],
                   [0,1]])  #날개, 털 있으면 1 없으면 0

In [76]:
y_data = np.array([
        [1,0,0],  #기타
        [0,1,0],  #포유류
        [0,0,1],  #조류
        [1,0,0],
        [1,0,0],
        [0,0,1]
    ])

In [77]:
X = tf.placeholder(dtype=tf.float32)
Y = tf.placeholder(dtype=tf.float32)

In [78]:
W = tf.Variable(tf.random_uniform([2,3], -1.,1.))
b = tf.Variable(tf.zeros([3]))

In [79]:
with tf.Session() as sess:
    print(sess.run(tf.zeros([3])))
    print(sess.run(tf.random_uniform([2,3], -1.,1.)))


[ 0.  0.  0.]
[[-0.72813773  0.73657107  0.38934898]
 [-0.67828202 -0.0703969   0.17200136]]

In [80]:
L = tf.add(tf.matmul(X,W), b)
L = tf.nn.relu(L)

In [81]:
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(tf.matmul(X,W),feed_dict={X:x_data}))


[[ 0.          0.          0.        ]
 [-0.18138576 -0.67487407  0.20770836]
 [-0.58908939  0.08517241  0.47319937]
 [ 0.          0.          0.        ]
 [ 0.          0.          0.        ]
 [-0.40770364  0.76004648  0.26549101]]

In [82]:
model = tf.nn.softmax(L)

In [83]:
cost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(model), axis=1))

In [84]:
%timeit 3+5


100000000 loops, best of 3: 18.6 ns per loop

In [85]:
%%timeit
a = [1,2,3]
a = [x+1 for x in a]


The slowest run took 7.99 times longer than the fastest. This could mean that an intermediate result is being cached.
1000000 loops, best of 3: 722 ns per loop

In [86]:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(cost)

In [87]:
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for step in range(100):
        sess.run(train_op, feed_dict={X:x_data, Y:y_data})
        
        if (step + 1) % 10 == 0:
            print(step + 1, sess.run(cost, feed_dict={X:x_data, Y:y_data}))


10 0.944255
20 0.938797
30 0.933455
40 0.92836
50 0.92338
60 0.918519
70 0.913822
80 0.90928
90 0.904872
100 0.900544

In [94]:
prediction = tf.argmax(model, axis=1)
target = tf.argmax(Y, axis=1)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print('예측값 : ', sess.run(prediction, feed_dict = {X: x_data}))
    print('실제값 : ', sess.run(target, feed_dict = {Y:y_data}))
    is_correct = tf.equal(prediction, target)
    accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
    print('정확도: %.2f' % sess.run(accuracy * 100, feed_dict={X: x_data, Y: y_data}))


예측값 :  [0 2 2 0 0 2]
실제값 :  [0 1 2 0 0 2]
정확도: 83.33

In [ ]: