python深度学习(9):风格迁移

算法简介

本文使用方法是15年提出的最早的风格迁移算法。
其本质与即为深度学习的本质:降低loss值。只是这里loss值的设定比较有意思。loss可以分为两部分:内容损失和风格损失。
内容损失:可以被卷积神经网络更靠顶部的层的表示所捕捉到,因此将图像送入预训练的网络模型(本文使用VGG19在ImageNet上的训练参数),从顶部的一层的输出可以表示该图片内容,损失即可用生成的图片和原图片输出之间的差异来表示。
风格损失:风格需要用图片在多个空间尺度上提取的外观来表示,即在预训练的网络模型的不同层的输出结果的相互关系。在具体的计算中,使用了Gram矩阵。

代码

Keras实现

from keras.preprocessing.image import load_img, img_to_array
import numpy as np
from keras.applications import vgg19
from keras import backend as K
from scipy.optimize import fmin_l_bfgs_b
from scipy.misc import imsave
import time

target_image_path = './portrait.jpg' #原图片
style_reference_image_path = './transfer_style_reference.jpg' #风格参考图片

width, height = load_img(target_image_path).size #原图片的大小
#生成图片的尺寸
img_height = 400
img_width = 600
#将图片在进入VGG19的网络前进行预处理
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_height, img_width))
    img = img_to_array(img) #转为数组
    img = np.expand_dims(img, axis=0) #展开
    img = vgg19.preprocess_input(img)
    return img
#对VGG19处理后的图片进行后处理
def deprocess_image(x):
    #减去ImageNet的平均像素值
    x[:, :, 0] += 103.939
    x[:, :, 1] += 116.779
    x[:, :, 2] += 123.68
    x = x[:, :, ::-1] #将BGR格式转换为RGB格式
    x = np.clip(x, 0, 255).astype('uint8')
    return x

target_image = K.constant(preprocess_image(target_image_path)) #原图像
style_reference_image = K.constant(preprocess_image(style_reference_image_path)) #参考的风格图像

combination_image = K.placeholder((1, img_height, img_width, 3)) #生成图像的占位符
#三张图像合并为一个批量
input_tensor = K.concatenate([target_image,
                              style_reference_image,
                              combination_image], axis=0)
#输入VGG19网络训练
model = vgg19.VGG19(input_tensor=input_tensor,
                    weights='imagenet',
                    include_top=False)
print('Model loaded.')
#计算内容损失
def content_loss(base, combination):
    return K.sum(K.square(combination - base)) #
#计算格拉姆矩阵
#格拉姆矩阵为某一层特征图的内积,该内积可以理解成表示该层特征之间相互关系的映射
def gram_matrix(x):
    features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
    gram = K.dot(features, K.transpose(features))
    return gram
#计算风格损失
def style_loss(style, combination):
    S = gram_matrix(style)
    C = gram_matrix(combination)
    channels = 3
    size = img_height * img_width
    return K.sum(K.square(S-C) / (4. * (channels ** 2) * (size ** 2)))
#总变量方差,计算总的loss值
def total_variation_loss(x):
    a = K.square(x[:, :img_height-1, : img_width-1, :] - x[:, 1:, : img_width-1, :])
    b = K.square(x[:, :img_height-1, : img_width-1, :] - x[:, :img_height-1, 1: ,:])
    return K.sum(K.pow(a+b, 1.25))

outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
content_layer = 'block5_conv2' #用于内容损失的层
#用于风格损失的层
style_layers = ['block1_conv1',
                'block2_conv1',
                'block3_conv1',
                'block4_conv1',
                'block5_conv1']
#损失分量加权平均使用的权重
total_variation_weight = 1e-4
style_weight = 1.
content_weight = 0.025

loss = K.variable(0.) #总的loss值
layer_features = outputs_dict[content_layer]
target_image_features = layer_features[0, :, :, :]
combination_features = layer_features[2, :, :, :]
loss += content_weight * content_loss(target_image_features,
                                      combination_features) #计算内容损失
#添加每个目标层风格损失风量
for layer_name in style_layers:
    layer_features = outputs_dict[layer_name]
    style_reference_features = layer_features[1, :, :, :]
    combination_features = layer_features[2, :, :, :]
    sl = style_loss(style_reference_features, combination_features)
    loss += (style_weight / len(style_layers)) * sl

loss += total_variation_weight * total_variation_loss(combination_image) #总的损失值

grads = K.gradients(loss, combination_image)[0] #获取损失相对于生成图像的梯度

fetch_loss_and_grads = K.function([combination_image], [loss, grads]) #同于获取当前损失值和当前梯度值的函数

#获取损失和梯度
class Evaluator(object):

    def __init__(self):
        self.loss_value = None
        self.grads_values = None

    def loss(self, x):
        assert self.loss_value is None
        x = x.reshape((1, img_height, img_width, 3))
        outs = fetch_loss_and_grads([x])
        loss_value = outs[0]
        grad_values = outs[1].flatten().astype('float64')
        self.loss_value = loss_value
        self.grad_values = grad_values
        return self.loss_value

    def grads(self, x):
        assert self.loss_value is not None
        grad_values = np.copy(self.grad_values)
        self.loss_value = None
        self.grad_values = None
        return grad_values

evaluator = Evaluator()
result_prefix = 'style_transfer_result'
iterations = 20

x = preprocess_image(target_image_path) #加载原图像
for i in range(iterations):
    print('Start of iteration', i)
    start_time = time.time()
    x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x,
                                     fprime=evaluator.grads, maxfun=20) #运行L-BFGS最优化,将loss最小
    print('Current loss value:', min_val)
    img = x.copy().reshape((img_height, img_width, 3))
    img = deprocess_image(img)
    fname = result_prefix + '_at_iteration_%d.png' % i
    imsave(fname, img)
    end_time = time.time()
    print('Image saved as', fname)
    print('Iteration %d completed in %ds' % (i, end_time - start_time))

运行效果

原图片:
在这里插入图片描述
风格参考图片:
在这里插入图片描述
结果图:
在这里插入图片描述

推荐

风格迁移的原理
Image Style Transfer Using Convolutional Neural Networks论文
Gram矩阵
Gram矩阵的通俗理解

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章