In [141]:
import numpy as np
from PIL import Image
import tensorflow as tf
import matplotlib.pyplot as plt
import os
%matplotlib inline
In [ ]:
def convert_to_alpha_list():
pixels = Image.open("../넘파이 연습/temp.png").resize((28,28)).tobytes("raw","A")
return np.array([pixel / 255 for pixel in pixels]).reshape(1,784)
In [ ]:
temp = convert_to_alpha_list()
temp.shape
In [ ]:
print(convert_to_alpha_list()[:,3:10])
In [ ]:
def predict_to_number(img_arr):
X = tf.placeholder(dtype=tf.float32, shape=[None, 784])
Y = tf.placeholder(dtype=tf.float32, shape=[None, 10])
W1 = tf.Variable(tf.random_normal(shape=[784, 256], stddev=0.01), name="w1val")
L1 = tf.nn.relu(tf.matmul(X, W1))
W2 = tf.Variable(tf.random_normal(shape=[256, 256], stddev=0.01), name="w2val")
L2 = tf.nn.relu(tf.matmul(L1, W2))
W3 = tf.Variable(tf.random_normal([256, 10], stddev=0.01), name="w3val")
model = tf.matmul(L2, W3)
param_list = [W1, W2, W3]
saver = tf.train.Saver(param_list)
with tf.Session() as sess:
saver.restore(sess, "./chkp_save2/mnist")
predict = sess.run([ model ], feed_dict = {X : img_arr})
predict = np.array(predict)
result = np.argmax(predict[0], axis=1)
print("result : ", result)
return result
In [ ]:
result = predict_to_number(convert_to_alpha_list())
In [ ]:
type(result)
In [ ]:
type(result[0])
In [ ]:
type(str(result[0]))
In [ ]:
str(result[0])
In [ ]:
1014460 / 1024
In [ ]:
os.getcwd()
In [ ]:
img = Image.open("../넘파이 연습/temp28x28.png")
In [ ]:
img
In [ ]:
b = img.tobytes("raw","A")
In [ ]:
bb =np.array([i for i in b])
In [ ]:
len(b)
In [ ]:
bb.shape
In [ ]:
b = bb.reshape(28,28)
In [ ]:
for i in range(b.shape[0]):
for j in range(b.shape[1]):
print(b[i,j], end='')
print()
In [ ]:
a = tf.constant(dtype=tf.int32, value=1) #인풋노드
In [ ]:
b = tf.constant(dtype=tf.int32, value=2) #인풋노드
In [ ]:
c = a + b
In [ ]:
with tf.Session() as sess:
c_val = sess.run(c)
print(c_val)
In [ ]:
a = tf.placeholder(dtype=tf.int32, shape=[1,])
In [ ]:
b = tf.placeholder(dtype=tf.int32, shape=[1,])
In [ ]:
c = tf.multiply(a,b)
In [ ]:
d = tf.add(a,b)
In [ ]:
e = tf.add(c,d)
In [ ]:
with tf.Session() as sess:
print(sess.run(e, feed_dict={a:[5], b:[3]}))
In [ ]:
k = tf.placeholder(dtype=tf.int32) #shape를 안써주면 그냥 스칼라인거 같다.
In [ ]:
with tf.Session() as sess:
print(sess.run(k, feed_dict={k:2}))
In [ ]:
with tf.Session() as sess:
print(sess.run(tf.add(1,2))) #인풋노드 없이 그냥 add노드만 이용한 경우
In [ ]:
a = tf.constant(value=5, dtype=tf.int32, name = "input_a")
b = tf.constant(value=3, dtype=tf.int32, name = "input_b")
In [ ]:
c = tf.multiply(a,b, name="mul_c")
d = tf.add(a,b, name="add_d")
e = tf.add(c,d, name="add_e")
In [ ]:
with tf.Session() as sess:
print(sess.run(e))
writer = tf.summary.FileWriter("./my_graph", sess.graph)
In [ ]:
np.array([2,3]).shape
In [2]:
g = tf.Graph() #내가 직접 그래프를 만들어서 사용해도 된다.
In [7]:
with g.as_default():
abc = tf.add(2,3)
In [12]:
with tf.Session(graph=g) as sess:
print(sess.run(abc))
In [13]:
g2 = tf.Graph()
In [14]:
with g2.as_default():
temp = tf.add(4,4)
with tf.Session() as sess: #이렇게 하면 명시적으로 그래프를 지정 안해줘도 된다.
print(sess.run(temp))
In [16]:
def step_function(x):
y = x > 0
return y.astype(np.int)
In [19]:
a = np.array([1,2,3])
In [21]:
aa = a > 2
In [22]:
aa.astype(np.int)
Out[22]:
In [23]:
aa.astype(np.float)
Out[23]:
In [24]:
aa.astype(np.bool)
Out[24]:
In [25]:
np.array([1,0,1]).astype(np.bool)
Out[25]:
In [31]:
def step_function(x):
return np.array(object = x > 0, dtype=np.int)
In [34]:
step_function(np.array([1,2,-3]))
Out[34]:
In [35]:
x = np.arange(-5.0,5.0,0.1)
In [36]:
y = step_function(x)
In [37]:
plt.plot(x,y)
plt.ylim(-0.1, 1.1)
plt.show()
In [38]:
type(x)
Out[38]:
In [39]:
type(np.array([1,2,3]))
Out[39]:
In [123]:
def sigmoid(x):
return 1 / ( 1 + np.exp(-x) )
In [41]:
sigmoid(1)
Out[41]:
In [42]:
sigmoid(100)
Out[42]:
In [43]:
sigmoid(1000)
Out[43]:
In [44]:
sigmoid(10)
Out[44]:
In [45]:
sigmoid(20)
Out[45]:
In [46]:
sigmoid(50)
Out[46]:
In [47]:
sigmoid(30)
Out[47]:
In [48]:
sigmoid(40)
Out[48]:
In [49]:
sigmoid(33)
Out[49]:
In [50]:
sigmoid(35)
Out[50]:
In [51]:
sigmoid(38)
Out[51]:
In [52]:
sigmoid(36)
Out[52]:
In [53]:
sigmoid(37)
Out[53]:
In [54]:
sigmoid(-30)
Out[54]:
In [55]:
sigmoid(-40)
Out[55]:
In [62]:
sigmoid(-500)
Out[62]:
In [63]:
np.exp([1,2])
Out[63]:
In [64]:
np.exp(np.array([1,2,3]))
Out[64]:
In [68]:
np.exp(-np.array([1,2]))
Out[68]:
In [69]:
-np.array([1,2])
Out[69]:
In [74]:
np.arange(1,10)
Out[74]:
In [75]:
np.arange(1,10,0.1)
Out[75]:
In [81]:
x = np.arange(-10.0,10.0,0.1)
In [82]:
y = sigmoid(x)
In [83]:
plt.plot(x,y)
plt.ylim(-0.1,1.1)
plt.show()
In [84]:
def relu(x):
return np.maximum(0,x)
In [85]:
relu([1,2,-1])
Out[85]:
In [88]:
A = np.array([1,2,3,4]) #1차원 텐서
print(A)
In [89]:
np.ndim(A)
Out[89]:
In [90]:
A.shape
Out[90]:
In [92]:
A.shape[0]
Out[92]:
In [93]:
type(A.shape)
Out[93]:
In [96]:
np.ndim(np.array(1)) #0차원 텐서
Out[96]:
In [97]:
A = np.array([[1,2],[3,4]])
A.shape
Out[97]:
In [98]:
B = np.array([[5,6],[7,8]])
B.shape
Out[98]:
In [99]:
np.dot(A,B) #벡터의 내적
Out[99]:
In [101]:
A = np.array([[1,2],[3,4],[5,6]])
A.shape
Out[101]:
In [102]:
B = np.array([7,8])
B.shape
Out[102]:
In [103]:
np.dot(A,B)
Out[103]:
In [104]:
X = np.array([1,2])
X.shape
Out[104]:
In [110]:
W = np.array([[1,3,5],
[2,4,6]])
In [111]:
W.shape
Out[111]:
In [112]:
Y = np.dot(X,W)
In [113]:
Y
Out[113]:
In [114]:
X = np.array([1.0, 0.5])
In [115]:
W1 = np.array([[0.1,0.3,0.5],
[0.2,0.4,0.6]])
In [116]:
B1 = np.array([0.1,0.2,0.3])
In [117]:
print(W1.shape)
In [118]:
print(X.shape)
print(B1.shape)
In [119]:
A1 = np.dot(X, W1) + B1
In [120]:
print(A1)
In [121]:
A1.shape
Out[121]:
In [122]:
np.ndim(A1)
Out[122]:
In [124]:
Z1 = sigmoid(A1)
In [125]:
print(Z1)
In [126]:
W2 = np.array([[0.1,0.4],
[0.2,0.5],
[0.3,0.6]])
In [127]:
B2 = np.array([0.1,0.2])
In [128]:
print(Z1.shape)
print(W2.shape)
print(B2.shape)
In [129]:
A2 = np.dot(Z1,W2) + B2
In [130]:
print(A2)
In [131]:
Z2 = sigmoid(A2)
In [132]:
print(Z2)
In [133]:
W3 = np.array([[0.1,0.3],
[0.2,0.4]])
In [134]:
B3 = np.array([0.1,0.2])
In [135]:
A3 = np.dot(Z2,W3) + B3
In [139]:
Y = A3
In [137]:
print(Y)
In [140]:
a = np.array([0.3,2.9,4.0])
In [142]:
exp_a = np.exp(a)
In [143]:
print(exp_a)
In [144]:
sum_exp_a = np.sum(exp_a)
In [145]:
print(sum_exp_a)
In [146]:
y = exp_a / sum_exp_a
In [147]:
y
Out[147]:
In [167]:
def softmax(a):
c = np.max(a)
exp_a = np.exp(a-c) #오버플로 대책
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
In [168]:
softmax(a)
Out[168]:
In [169]:
a = np.array([1010,1000,990])
In [170]:
softmax(a)
Out[170]:
In [153]:
c = np.max(a)
In [154]:
c
Out[154]:
In [156]:
a - c
Out[156]:
In [157]:
#1씩 뺀 값으로 소프트 맥스를 구해도 결과는 같다 비율이기 때문
b = np.array([3,2,5])
c = np.array([2,1,4])
In [158]:
softmax(b)
Out[158]:
In [159]:
softmax(c)
Out[159]:
In [160]:
softmax(a-c)
Out[160]:
In [161]:
d = a-c
In [162]:
softmax(d)
Out[162]:
In [163]:
c = np.max(a)
In [164]:
c
Out[164]:
In [165]:
d = a-c
In [171]:
softmax(d)
Out[171]:
In [172]:
a = np.array([0.3,2.9,4.0])
In [173]:
y = softmax(a)
In [174]:
print(y)
In [175]:
np.sum(y)
Out[175]:
In [ ]: