tensorlayer學習日誌18_chapter8_1

第八章主要就是學習對抗網絡,源碼地址在這:https://github.com/tensorlayer/dcgan,一共四個文件,download.py , model.py,   main.py和utils.py

其中download.py文件有誤如下:

parser.add_argument('-datasets', metavar='N', type=str, nargs='+', choices=['celebA', 'lsun', 'mnist'],help='name of dataset to download [celebA, lsun, mnist]')
# 這行的'datasets'要改爲'-datasets' ,對應的結尾代碼也可改爲如下:

if __name__ == '__main__':
    args = parser.parse_args('-datasets celebA'.split())
    prepare_data_dir()

 然並卵,運行後,報錯如下:requests.exceptions.ConnectionError: HTTPSConnectionPool(host='docs.google.com', port=443): Max retries exceeded with url: /uc?export=download&id=0B7EVK8r0v71pZjFTYXZWM3FlRnM (Caused by NewConnectionError('<urllib3.connection.VerifiedHTTPSConnection object at 0x0000015F269FBB38>: Failed to establish a new connection: [WinError 10060] 由於連接方在一段時間後沒有正確答覆或連接的主機沒有反應,連接嘗試失敗。',))

這是因爲這是個google的下載鏈接,被牆了。所以下載數據要到這裏下載:

http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html,這是香港中文大學的開放數據

我是If the above links are not accessable, you could download the dataset using Baidu Drive. 選用這個百度網盤,下載“img_align_celeba.zip”這個,大小是1.34G,數據說明看這個博客:

https://blog.csdn.net/Cloudox_/article/details/78432517?locationNum=4&fps=1

 

utils.py文件是關於圖像操作有關的命令,我把它拆解分析如下:

from random import shuffle
import scipy.misc
import numpy as np
import cv2

image = cv2.imread('000001.jpg')
# cv2.imshow('000001_original', image)
# cv2.waitKey()
# cv2.destroyAllWindows()

# print('~~~~~~~~~center_crop~~~~~~~~')
def center_crop(x, crop_h, crop_w=None, resize_w=64):
    if crop_w is None:
        crop_w = crop_h
    h, w = x.shape[:2]
    j = int(round((h - crop_h)/2.))
    i = int(round((w - crop_w)/2.))
    return scipy.misc.imresize(x[j:j+crop_h, i:i+crop_w],
                               [resize_w, resize_w])
# center_crop1 = center_crop(image,crop_h=64,crop_w=None,resize_w=64)
# cv2.imshow('center_crop1', center_crop1)
# cv2.imwrite('center_crop1.jpg', center_crop1)
# cv2.waitKey()
# cv2.destroyAllWindows()

# print("~~~~~~~~~transform~~~~~~~~~~~~")
def transform(image, npx=64, is_crop=True, resize_w=64):
    if is_crop:
        cropped_image = center_crop(image, npx, resize_w=resize_w)
    else:
        cropped_image = image
    return np.array(cropped_image)/127.5 - 1.
# transform1 = transform(center_crop1, is_crop=False)
# cv2.imshow('transform', transform1)
# cv2.imwrite('transform1.jpg', transform1)
# transform2 = transform1*255 
# # transform.resize()裁剪後的圖片是以 float64的格式存儲的,數值的取值範圍是(0~1)
# # 被歸一化了,255/127.5-1變成[−1 1][−1 1],所以要乘255還原,否則保存transform1是個全黑圖
# cv2.imwrite('transform2.jpg', transform2)
# cv2.waitKey()
# cv2.destroyAllWindows()

# print('~~~~~~~~~inverse~~~~~~~~~~')
def inverse_transform(images):
    return (images+1.)/2.
# inverse = inverse_transform(image)
# cv2.imshow('inverse', inverse)
# cv2.imwrite('inverse.jpg', inverse)
# # 變成[0 1][0 1]
# cv2.waitKey()
# cv2.destroyAllWindows()

# print('~~~~~~~~~還原transform~~~~~~~~~~')
def inverse_transform2(images):
    return (images+1.)*127.5
# inverse_transform = inverse_transform2(transform1)
# cv2.imshow('inverse_transform', inverse_transform)
# cv2.imwrite('inverse_transform.jpg', inverse_transform)
# cv2.waitKey()
# cv2.destroyAllWindows()

# print('~~~~~~~~~~~is_grayscale~~~~~~~~~~')
def imread(path, is_grayscale = False):
    if (is_grayscale):
        return scipy.misc.imread(path, flatten = True).astype(np.float)
    else:
        return scipy.misc.imread(path).astype(np.float)
# is_grayscale = imread(r'E:\python教材\一朵花開的時間\\4一起玩轉Tensorlayer\000001.jpg', is_grayscale=True)
# cv2.imshow('is_grayscale', is_grayscale)
# cv2.imwrite('is_grayscale.jpg', is_grayscale)
# cv2.waitKey()
# cv2.destroyAllWindows()

# print('~~~~~~get_image~~~~~~~~')
def get_image(image_path, image_size, is_crop=True, resize_w=64, is_grayscale = False):
    return transform(imread(image_path, is_grayscale), image_size, is_crop, resize_w)
# get_image = get_image(r'E:\python教材\一朵花開的時間\\4一起玩轉Tensorlayer\000001.jpg',
#     image_size=90, is_crop =True, resize_w=45, is_grayscale=True)
# cv2.imshow('get_image', get_image)
# cv2.imwrite('get_image.jpg', get_image)
# cv2.waitKey()
# cv2.destroyAllWindows()

# get_image1 = get_image*255
# cv2.imshow('get_image1', get_image1)
# cv2.imwrite('get_image1.jpg', get_image1)
# cv2.waitKey()
# cv2.destroyAllWindows()

print('~~~~~~~~~merge~~~~~~~~~~')
import os
from glob import glob
import tensorflow as tf
data_files = glob(os.path.join("./data/img_align_celeba/*.jpg"))
# print(len(data_files))

sample_files = data_files[0:6]
sample = [get_image(sample_file, image_size=150, is_crop=True, resize_w=100, is_grayscale = True) for sample_file in sample_files]
sample_images = np.array(sample).astype(np.float32)

# 下面的merge方法需要把圖片變成array才能操作,所以上邊的是爲這個用的

def merge(images, size):
    h, w = images.shape[1], images.shape[2]
    # img = np.zeros((h * size[0], w * size[1], 3)) # 彩圖要用這個,需要通道值
    img = np.zeros((h * size[0], w * size[1])) # 灰白圖要用這個,因爲沒色彩就沒通道值
    for idx, image in enumerate(images):
        i = idx % size[1]
        j = idx // size[1]
        # img[j*h:j*h+h, i*w:i*w+w, :] = image # 彩圖要用這個,需要通道值
        img[j*h:j*h+h, i*w:i*w+w] = image # 灰白圖要用這個,因爲沒色彩就沒通道值
    return img

# merge = merge(sample_images,[1,6])
# cv2.imshow('merge', merge)
# cv2.imwrite('merge.jpg', merge)
# merge1 = merge*255
# cv2.imshow('merge1', merge1)
# cv2.imwrite('merge1.jpg', merge1)
# cv2.waitKey()
# cv2.destroyAllWindows()

print('~~~~~~imsave~~~~~~~~')
def imsave(images, size, path):
    return scipy.misc.imsave(path, merge(images, size))
def save_images(images, size, image_path):
    return imsave(inverse_transform(images), size, image_path)

save_images(sample_images,[6,1],image_path=r'E:\python教材\一朵花開的時間\\4一起玩轉Tensorlayer\merge_imsave.jpg')

save_image = cv2.imread('merge_imsave.jpg')
cv2.imshow('merge_imsave', save_image)
cv2.waitKey()
cv2.destroyAllWindows()

輸出的圖如下:

      get_image.jpg           get_image1.jpg  

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章