python深度學習(9):風格遷移

算法簡介

本文使用方法是15年提出的最早的風格遷移算法。
其本質與即爲深度學習的本質:降低loss值。只是這裏loss值的設定比較有意思。loss可以分爲兩部分:內容損失和風格損失。
內容損失:可以被卷積神經網絡更靠頂部的層的表示所捕捉到,因此將圖像送入預訓練的網絡模型(本文使用VGG19在ImageNet上的訓練參數),從頂部的一層的輸出可以表示該圖片內容,損失即可用生成的圖片和原圖片輸出之間的差異來表示。
風格損失:風格需要用圖片在多個空間尺度上提取的外觀來表示,即在預訓練的網絡模型的不同層的輸出結果的相互關係。在具體的計算中,使用了Gram矩陣。

代碼

Keras實現

from keras.preprocessing.image import load_img, img_to_array
import numpy as np
from keras.applications import vgg19
from keras import backend as K
from scipy.optimize import fmin_l_bfgs_b
from scipy.misc import imsave
import time

target_image_path = './portrait.jpg' #原圖片
style_reference_image_path = './transfer_style_reference.jpg' #風格參考圖片

width, height = load_img(target_image_path).size #原圖片的大小
#生成圖片的尺寸
img_height = 400
img_width = 600
#將圖片在進入VGG19的網絡前進行預處理
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_height, img_width))
    img = img_to_array(img) #轉爲數組
    img = np.expand_dims(img, axis=0) #展開
    img = vgg19.preprocess_input(img)
    return img
#對VGG19處理後的圖片進行後處理
def deprocess_image(x):
    #減去ImageNet的平均像素值
    x[:, :, 0] += 103.939
    x[:, :, 1] += 116.779
    x[:, :, 2] += 123.68
    x = x[:, :, ::-1] #將BGR格式轉換爲RGB格式
    x = np.clip(x, 0, 255).astype('uint8')
    return x

target_image = K.constant(preprocess_image(target_image_path)) #原圖像
style_reference_image = K.constant(preprocess_image(style_reference_image_path)) #參考的風格圖像

combination_image = K.placeholder((1, img_height, img_width, 3)) #生成圖像的佔位符
#三張圖像合併爲一個批量
input_tensor = K.concatenate([target_image,
                              style_reference_image,
                              combination_image], axis=0)
#輸入VGG19網絡訓練
model = vgg19.VGG19(input_tensor=input_tensor,
                    weights='imagenet',
                    include_top=False)
print('Model loaded.')
#計算內容損失
def content_loss(base, combination):
    return K.sum(K.square(combination - base)) #
#計算格拉姆矩陣
#格拉姆矩陣爲某一層特徵圖的內積,該內積可以理解成表示該層特徵之間相互關係的映射
def gram_matrix(x):
    features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
    gram = K.dot(features, K.transpose(features))
    return gram
#計算風格損失
def style_loss(style, combination):
    S = gram_matrix(style)
    C = gram_matrix(combination)
    channels = 3
    size = img_height * img_width
    return K.sum(K.square(S-C) / (4. * (channels ** 2) * (size ** 2)))
#總變量方差,計算總的loss值
def total_variation_loss(x):
    a = K.square(x[:, :img_height-1, : img_width-1, :] - x[:, 1:, : img_width-1, :])
    b = K.square(x[:, :img_height-1, : img_width-1, :] - x[:, :img_height-1, 1: ,:])
    return K.sum(K.pow(a+b, 1.25))

outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
content_layer = 'block5_conv2' #用於內容損失的層
#用於風格損失的層
style_layers = ['block1_conv1',
                'block2_conv1',
                'block3_conv1',
                'block4_conv1',
                'block5_conv1']
#損失分量加權平均使用的權重
total_variation_weight = 1e-4
style_weight = 1.
content_weight = 0.025

loss = K.variable(0.) #總的loss值
layer_features = outputs_dict[content_layer]
target_image_features = layer_features[0, :, :, :]
combination_features = layer_features[2, :, :, :]
loss += content_weight * content_loss(target_image_features,
                                      combination_features) #計算內容損失
#添加每個目標層風格損失風量
for layer_name in style_layers:
    layer_features = outputs_dict[layer_name]
    style_reference_features = layer_features[1, :, :, :]
    combination_features = layer_features[2, :, :, :]
    sl = style_loss(style_reference_features, combination_features)
    loss += (style_weight / len(style_layers)) * sl

loss += total_variation_weight * total_variation_loss(combination_image) #總的損失值

grads = K.gradients(loss, combination_image)[0] #獲取損失相對於生成圖像的梯度

fetch_loss_and_grads = K.function([combination_image], [loss, grads]) #同於獲取當前損失值和當前梯度值的函數

#獲取損失和梯度
class Evaluator(object):

    def __init__(self):
        self.loss_value = None
        self.grads_values = None

    def loss(self, x):
        assert self.loss_value is None
        x = x.reshape((1, img_height, img_width, 3))
        outs = fetch_loss_and_grads([x])
        loss_value = outs[0]
        grad_values = outs[1].flatten().astype('float64')
        self.loss_value = loss_value
        self.grad_values = grad_values
        return self.loss_value

    def grads(self, x):
        assert self.loss_value is not None
        grad_values = np.copy(self.grad_values)
        self.loss_value = None
        self.grad_values = None
        return grad_values

evaluator = Evaluator()
result_prefix = 'style_transfer_result'
iterations = 20

x = preprocess_image(target_image_path) #加載原圖像
for i in range(iterations):
    print('Start of iteration', i)
    start_time = time.time()
    x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x,
                                     fprime=evaluator.grads, maxfun=20) #運行L-BFGS最優化,將loss最小
    print('Current loss value:', min_val)
    img = x.copy().reshape((img_height, img_width, 3))
    img = deprocess_image(img)
    fname = result_prefix + '_at_iteration_%d.png' % i
    imsave(fname, img)
    end_time = time.time()
    print('Image saved as', fname)
    print('Iteration %d completed in %ds' % (i, end_time - start_time))

運行效果

原圖片:
在這裏插入圖片描述
風格參考圖片:
在這裏插入圖片描述
結果圖:
在這裏插入圖片描述

推薦

風格遷移的原理
Image Style Transfer Using Convolutional Neural Networks論文
Gram矩陣
Gram矩陣的通俗理解

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章