我们使用TensorFlow来实现图像风格迁移,主要用到深度学习中的卷积神经网络,即CNN。先用别人训练好的VGG模型来搞
pip install numpy scipy tensorflow keras
再准备一些风格图片,和一张内容图片
使用 python3.6,最新的 python3.7由于keras不支持,所以没办法使用
为了将风格图的风格和内容图的内容进行融合,所生成的图片,在内容上应当尽可能接近内容图,在风格上应当尽可能接近风格图
因此需要定义内容损失函数和风格损失函数,经过加权后作为总的损失函数
In [9]:
# 按惯例,导入一堆包
import tensorflow as tf
import keras
from keras.applications.vgg19 import VGG19,preprocess_input, decode_predictions
import scipy.io
import scipy.misc
from PIL import Image
import numpy as np
import os.path,sys,time
# 输出靠谱日志必备
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
print("本次运行使用的包版本为:keras.__version__",keras.__version__,
"np.__version__",np.__version__,"tf.__version__",tf.__version__,"scipy.__version__",scipy.__version__,"Python 版本为",sys.version)
In [2]:
# 定义一些变量
## 图片文件名、路径
CONTENT_IMG , STYLE_IMG , OUTPUT_DIR = 'content.jpg', 'style1.jpg', 'neural_style_transfer_tensorflow/'
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
## 图像大小等
IMAGE_W ,IMAGE_H,COLOR_C= 800,600,3
NOISE_RATIO,BETA,ALPHA,VGG_MODEL,MEAN_VALUES = 0.7,5,100,'imagenet-vgg-verydeep-19.mat',np.array([123.68, 116.779, 103.939]).reshape((1, 1, 1, 3))
In [3]:
def load_vgg_model(path):
return model
# 内容损失函数
def content_loss_func(sess, model):
def _content_loss(p, x):
N = p.shape[3]
M = p.shape[1] * p.shape[2]
return (1 / (4 * N * M)) * tf.reduce_sum(tf.pow(x - p, 2))
return _content_loss(sess.run(model['conv4_2']), model['conv4_2'])
# 风格损失函数
STYLE_LAYERS = [('conv1_1', 0.5), ('conv2_1', 1.0), ('conv3_1', 1.5), ('conv4_1', 3.0), ('conv5_1', 4.0)]
def style_loss_func(sess, model):
def _gram_matrix(F, N, M):
Ft = tf.reshape(F, (M, N))
return tf.matmul(tf.transpose(Ft), Ft)
def _style_loss(a, x):
N = a.shape[3]
M = a.shape[1] * a.shape[2]
A = _gram_matrix(a, N, M)
G = _gram_matrix(x, N, M)
return (1 / (4 * N ** 2 * M ** 2)) * tf.reduce_sum(tf.pow(G - A, 2))
return sum([_style_loss(sess.run(model[layer_name]), model[layer_name]) * w for layer_name, w in STYLE_LAYERS])
# 随机产生一张初始图片
def generate_noise_image(content_image, noise_ratio=NOISE_RATIO):
noise_image = np.random.uniform(-20, 20, (1, IMAGE_H, IMAGE_W, COLOR_C)).astype('float32')
input_image = noise_image * noise_ratio + content_image * (1 - noise_ratio)
return input_image
# 加载图片
def load_image(path):
image = scipy.misc.imread(path)
image = scipy.misc.imresize(image, (IMAGE_H, IMAGE_W))
image = np.reshape(image, ((1, ) + image.shape))
image = image - MEAN_VALUES
return image
# 保存图片
def save_image(path, image):
image = image + MEAN_VALUES
image = image[0]
image = np.clip(image, 0, 255).astype('uint8')
scipy.misc.imsave(path, image)
In [5]:
logging.info("time-the_current_time()")
# 载入模型
model = VGG19(weights='imagenet')
In [11]:
with tf.Session() as sess:
content_image = load_image(CONTENT_IMG)
style_image = load_image(STYLE_IMG)
model = load_vgg_model(VGG_MODEL)
input_image = generate_noise_image(content_image)
sess.run(tf.global_variables_initializer())
sess.run(model['input'].assign(content_image))
content_loss = content_loss_func(sess, model)
sess.run(model['input'].assign(style_image))
style_loss = style_loss_func(sess, model)
total_loss = BETA * content_loss + ALPHA * style_loss
optimizer = tf.train.AdamOptimizer(2.0)
train = optimizer.minimize(total_loss)
sess.run(tf.global_variables_initializer())
sess.run(model['input'].assign(input_image))
ITERATIONS = 1330
for i in range(ITERATIONS):
sess.run(train)
if i-1 % 133 == 0:
output_image = sess.run(model['input'])
logging.info("time-the_current_time()")
logging.info('Iteration %d' % i)
print('Cost: ', sess.run(total_loss))
save_image(os.path.join(OUTPUT_DIR, 'output_%d.jpg' % i), output_image)
In [ ]: