【tensorflow2.0 圖片數據】tensorflow中準備圖片數據的常用方案

在tensorflow中準備圖片數據的常用方案有兩種,第一種是使用tf.keras中的ImageDataGenerator工具構建圖片數據生成器。

第二種是使用tf.data.Dataset搭配tf.image中的一些圖片處理方法構建數據管道。

第一種方法更爲簡單

from keras.preprocessing.image import ImageDataGenerator

train_dir = 'cifar2_datasets/train'
test_dir = 'cifar2_datasets/test'

# 對訓練集數據設置數據增強
train_datagen = ImageDataGenerator(
            rescale = 1./255,
            rotation_range=40,
            width_shift_range=0.2,
            height_shift_range=0.2,
            shear_range=0.2,
            zoom_range=0.2,
            horizontal_flip=True,
            fill_mode='nearest')

# 對測試集數據無需使用數據增強
test_datagen = ImageDataGenerator(rescale=1./255)


train_generator = train_datagen.flow_from_directory(
                    train_dir,
                    target_size=(32, 32),
                    batch_size=32,
                    shuffle = True,
                    class_mode='binary')

test_generator = test_datagen.flow_from_directory(
                    test_dir,
                    target_size=(32, 32),
                    batch_size=32,
                    shuffle = False,
                    class_mode='binary')


from keras import models,layers,optimizers
from  keras import backend as K

K.clear_session()
model = models.Sequential()
model.add(layers.Flatten(input_shape = (32,32,3)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
            optimizer=optimizers.RMSprop(lr=1e-4),
            metrics=['acc'])


# 計算每輪次需要的步數 
import numpy as np 
train_steps_per_epoch  = np.ceil(10000/32)
test_steps_per_epoch  = np.ceil(2000/32)

# 使用內存友好的fit_generator方法進行訓練
history = model.fit_generator(
        train_generator,
        steps_per_epoch = train_steps_per_epoch,
        epochs = 5,
        validation_data= test_generator,
        validation_steps=test_steps_per_epoch,
        workers=1, # 讀取數據的進程數
        use_multiprocessing=False #linux上可使用多進程讀取數據
        )

第二種方式,TensorFlow的原生方法,更加靈活,使用得當的話也可以獲得更好的性能。:


import tensorflow as tf
from tensorflow.keras import datasets,layers,models
import datetime
import pandas as pd
import matplotlib.pyplot as plt

BATCH_SIZE = 100

# 自定義導入圖片函數
def load_image(img_path,size = (32,32)):
    label = tf.constant(1,tf.int8) if tf.strings.regex_full_match(img_path,".*/automobile/.*") else tf.constant(0,tf.int8)
    img = tf.io.read_file(img_path)
    img = tf.image.decode_jpeg(img) #注意此處爲jpeg格式
    img = tf.image.resize(img,size)/255.0
    return(img,label)




#使用並行化預處理num_parallel_calls 和預存數據prefetch來提升性能
ds_train = tf.data.Dataset.list_files("./data/cifar2/train/*/*.jpg") \
           .map(load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
           .shuffle(buffer_size = 1000).batch(BATCH_SIZE) \
           .prefetch(tf.data.experimental.AUTOTUNE)

ds_test = tf.data.Dataset.list_files("./data/cifar2/test/*/*.jpg") \
           .map(load_image, num_parallel_calls=tf.data.experimental.AUTOTUNE) \
           .batch(BATCH_SIZE) \
           .prefetch(tf.data.experimental.AUTOTUNE)

#清空會話
tf.keras.backend.clear_session()

# 定義模型結構
inputs = layers.Input(shape=(32,32,3))
x = layers.Conv2D(32,kernel_size=(3,3))(inputs)
x = layers.MaxPool2D()(x)
x = layers.Conv2D(64,kernel_size=(5,5))(x)
x = layers.MaxPool2D()(x)
x = layers.Dropout(rate=0.1)(x)
x = layers.Flatten()(x)
x = layers.Dense(32,activation='relu')(x)
outputs = layers.Dense(1,activation = 'sigmoid')(x)
model = models.Model(inputs = inputs,outputs = outputs)
print(model.summary())


# 定義日誌目錄
logdir = "./data/keras_model/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)

# 模型編譯
model.compile(
        optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
        loss=tf.keras.losses.binary_crossentropy,
        metrics=["accuracy"]
    )

# 模型訓練
history = model.fit(ds_train,epochs= 10,validation_data=ds_test,
                    callbacks = [tensorboard_callback],workers = 4)



dfhistory = pd.DataFrame(history.history)
dfhistory.index = range(1,len(dfhistory) + 1)
dfhistory.index.name = 'epoch'



# 可視化
def plot_metric(history, metric):
    train_metrics = history.history[metric]
    val_metrics = history.history['val_'+metric]
    epochs = range(1, len(train_metrics) + 1)
    plt.plot(epochs, train_metrics, 'bo--')
    plt.plot(epochs, val_metrics, 'ro-')
    plt.title('Training and validation '+ metric)
    plt.xlabel("Epochs")
    plt.ylabel(metric)
    plt.legend(["train_"+metric, 'val_'+metric])
    plt.show()


plot_metric(history,"loss")
val_loss,val_accuracy = model.evaluate(ds_test,workers=4)
print(val_loss,val_accuracy)



發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章