In [45]:
#unpool layer를 학습 가능하게 만듬

import tensorflow as tf
from PIL import Image
import numpy as np

#이미지, 상수들
learning_rate=5e-4
W1=192
H1=144
W15=384
H15=288
W2=960
H2=720
path="../04/"
pref1="144p/"
pref2="720p/"
suff1=".jpg"
suff2=".jpg"
train_num=1000
file_num=2#6#30
#batch_num=1000


#가중치 초기화 함수
def weight_variable(shape, name):
  initial = tf.truncated_normal(shape, stddev=0.00001)
  return tf.Variable(initial, name=name)
#절편 초기화 함수
def bias_variable(shape, name):
  initial = tf.constant(10, shape=shape)
  return tf.Variable(initial, name=name)
#2D 컨벌루션 실행
def conv2d(x, W):
  return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def getimage(idx):
    img_1=Image.open(path+pref1+str(idx).zfill(5)+suff1)
    array_1_720=np.array(img_1)[:, :]
    array_1=array_1.astype(np.float32)

    img_1_720 = img_1.resize((W2, H2), Image.BILINEAR)
    array_1_720=np.array(img_1_720)[:, :]
    array_1_720=array_1_720.astype(np.float32)

    img_2=Image.open(path+pref2+str(idx).zfill(5)+suff2)
    array_2=np.array(img_2)[:, :, 0:3]
    array_2=array_2.astype(np.float32)
    return array_1, array_1_720, array_2

def l_relu(x, alpha=0.):
    return tf.nn.relu(x)-alpha*tf.nn.relu(-x)

def asImage(tensor):
    result = tensor[0].astype(np.uint8)
    return Image.fromarray(result, 'RGB')

def showres(index, steps):
    test360, test_res, test720 = getimage(index)
    A=sess.run(y_res, feed_dict={x_image:[test360], x_image_720:[test_res], y_image:[test720]})
    result = A.astype(np.uint8)
#    Image.fromarray(img360, 'RGB').save('results/img360_'+str(steps)+'.jpg')
#    Image.fromarray(img720, 'RGB').save('results/img720_'+str(steps)+'.jpg')
    asImage(result).save('results/result_'+str(steps)+'.jpg')


#학습때 사용하는 변수들
F_1 = [0,-0.25,0,-0.25,1,-0.25,0,-0.25,0]
F = [ [k,k,k] for k in F_1]
Filter = tf.constant([F], shape = [3, 3, 3, 3])
x_image = tf.placeholder(np.float32, shape=[None, H1, W1, 3])
x_new = tf.abs(conv2d(x_image, Filter))
x_image_720 = tf.placeholder(np.float32, shape=[None, H2, W2, 3])

y_image = tf.placeholder(np.float32, shape=[None, H2, W2, 3])
#가중치, 절편, 결과
W_conv = weight_variable([10, 10, 3, 3], name='weight')
W_conv2= weight_variable([10, 10, 3, 3], name='weight')#####
#b_conv = bias_variable([7], name='bias')
#b_conv2= bias_variable([3], name='bias')

y_unpooling1 = l_relu(tf.image.resize_bilinear(x_new, [H15,W15]), alpha=0.5)
y_conv1 = l_relu(conv2d(y_unpooling1, W_conv), alpha=0.05)##
y_unpooling2 = l_relu(tf.image.resize_bilinear(y_conv1, [H2,W2]), alpha=0.5)
y_conv2 = l_relu(conv2d(y_unpooling2, W_conv2), alpha=0.05)##
y_res = tf.nn.relu(tf.reshape(y_conv2, [-1, H2, W2, 3])+x_image_720)

cost = (tf.reduce_sum(tf.square(y_image-y_res)))
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cost)
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
#saver.restore(sess, "01/models.ckpt")

temp =sess.run(y_unpooling1, feed_dict={x_image:[array360], x_image_720:[array360_720],y_image:[array720]})
print(tf.shape(temp))
#asImage(temp).show()
#print(tf.shape(sess.run(temp)))
temp = sess.run(x_new, feed_dict={x_image:[array360], x_image_720:[array360_720],y_image:[array720]})
asImage(temp).save('filtered.jpg')


for steps in range(train_num):
    for index in range(1, file_num):
        array360, array360_720, array720 = getimage(index)
        sess.run(train_step, feed_dict={x_image:[array360], x_image_720:[array360_720], y_image:[array720]})
    print (steps)
    print(sess.run(cost, feed_dict={x_image:[array360], x_image_720:[array360_720],y_image:[array720]}))
    if(steps%5==0):
        showres(index, steps)
print ("끝났")


[[0, 0, 0], [-0.25, -0.25, -0.25], [0, 0, 0], [-0.25, -0.25, -0.25], [1, 1, 1], [-0.25, -0.25, -0.25], [0, 0, 0], [-0.25, -0.25, -0.25], [0, 0, 0]]
Tensor("Shape_20:0", shape=(4,), dtype=int32)
0
9.07873e+08
1
9.07874e+08
2
9.07931e+08
3
9.07981e+08
4
9.07972e+08
5
9.07901e+08
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-45-c81128e16b00> in <module>()
    105     for index in range(1, file_num):
    106         array360, array360_720, array720 = getimage(index)
--> 107         sess.run(train_step, feed_dict={x_image:[array360], x_image_720:[array360_720], y_image:[array720]})
    108     print (steps)
    109     print(sess.run(cost, feed_dict={x_image:[array360], x_image_720:[array360_720],y_image:[array720]}))

/home/alpha/.local/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    776     try:
    777       result = self._run(None, fetches, feed_dict, options_ptr,
--> 778                          run_metadata_ptr)
    779       if run_metadata:
    780         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/alpha/.local/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    980     if final_fetches or final_targets:
    981       results = self._do_run(handle, final_targets, final_fetches,
--> 982                              feed_dict_string, options, run_metadata)
    983     else:
    984       results = []

/home/alpha/.local/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1030     if handle is None:
   1031       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1032                            target_list, options, run_metadata)
   1033     else:
   1034       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/home/alpha/.local/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1037   def _do_call(self, fn, *args):
   1038     try:
-> 1039       return fn(*args)
   1040     except errors.OpError as e:
   1041       message = compat.as_text(e.message)

/home/alpha/.local/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1019         return tf_session.TF_Run(session, options,
   1020                                  feed_dict, fetch_list, target_list,
-> 1021                                  status, run_metadata)
   1022 
   1023     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [ ]: