In [29]:
# 논문 Learning a Deep Convolutional Network for Image Super-Resolution
# 의 알고리즘대로 구현
# 2017. 07. 06

import tensorflow as tf
from PIL import Image
import numpy as np

#이미지, 상수들
learning_rate=5e-4
W1=640
H1=360
W2=1280
H2=720
path="../06/"
pref1="360p/"
pref2="720p/"
suff1="_360.jpg"
suff2="_720.jpg"
train_num=1#1000
file_num=2#6#30
#batch_num=1000


#가중치 초기화 함수
def weight_variable(shape, name):
  initial = tf.truncated_normal(shape, stddev=1.)
  return tf.Variable(initial, name=name)
#절편 초기화 함수
def bias_variable(shape, name):
  initial = tf.constant(10., shape=shape)
  return tf.Variable(initial, name=name)
#2D 컨벌루션 실행
def conv2d(x, W, B):
  return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')+B

def getimage(idx):
    img_1=Image.open(path+pref1+str(idx)+suff1)

    img_1_720 = img_1.resize((1280, 720), Image.BILINEAR)
    img_1_720.show()
    array_1_720=np.array(img_1_720)[:, :]
    array_1_720=array_1_720.astype(np.float32)

    img_2=Image.open(path+pref2+str(idx)+suff2)
    array_2=np.array(img_2)[:, :, 0:3]
    array_2=array_2.astype(np.float32)
    return array_1_720, array_2

def l_relu(x, alpha=0.):
    return tf.nn.relu(x)-alpha*tf.nn.relu(-x)

def asImage(tensor):
    result = tensor[0].astype(np.uint8)
    return Image.fromarray(result, 'RGB')

def showres(index, steps):
    test360, test720 = getimage(index)
    A=sess.run(y_result, feed_dict={x_image:[test360], y_image:[test720]})
    result = A.astype(np.uint8)
    #Image.fromarray(array360, 'RGB').save('results/img360.jpg')
    #Image.fromarray(array720, 'RGB').save('results/img720.jpg')
    asImage(result).save('results/result_06_'+str(steps)+'.jpg')


x_image = tf.placeholder(np.float32, shape=[None, H2, W2, 3])
y_image = tf.placeholder(np.float32, shape=[None, H2, W2, 3])

W1 = weight_variable([11, 11, 3, 10], name = 'W1')
B1 = bias_variable([10], name = 'B1')
W2 = weight_variable([1, 1, 10, 5], name = 'W2')
B2 = bias_variable([5], name='B2')
W3 = weight_variable([5, 5, 5, 3], name = 'W3')
B3 = bias_variable([3], name = 'B3')

F1 = l_relu(conv2d(x_image, W1, B1), alpha = 0.0)
F2 = l_relu(conv2d(F1, W2, B2), alpha = 0.0)
y_result = l_relu(conv2d(F2, W3, B3), alpha = 0.0)

cost = tf.reduce_mean(tf.square(y_image-y_result))
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cost)
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
#saver.restore(sess, "01/models.ckpt")

for steps in range(train_num):
    for index in range(1, file_num):
        ##array360은 360p인 이미지를 확대해서 720p으로 만들어놓은것.
        array360, array720 = getimage(index) 
        sess.run(train_step, feed_dict={x_image:[array360], y_image:[array720]})
    print (steps, sess.run(cost, feed_dict={x_image:[array360], y_image:[array720]}))
    ##print(sess.run(cost, feed_dict={x_image:[array360], x_image_720:[array360_720],y_image:[array720]}))
    #if(steps%5==0):
    showres(index, steps)
print ("끝났")


0 6.60446e+07
끝났

In [ ]: