8-15(keras yolov3訓練自己的樣本)

The test environment is
- Python 2.7
- Keras 2.2.4
- tensorflow 1.10.0

  1. 下載keras-yolov3 https://github.com/qqwweee/keras-yolo3
  2. 根據其readme,我們來研究train.py
    不管那些參數及訓練代碼,先找其樣本相關的處理:
    17樣本描述文件annotation_path = 'train.txt' 需要注意:文件描述格式
    41-48 這裏分割訓練集和驗證集
    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines)*val_split)
    num_train = len(lines) - num_val

59等,訓練:model.fit_generator(data_generator_wrapper(...))
184 data_generator_wrapper -> 165

175image, box = get_random_data(annotation_lines[i], input_shape, random=True)
這裏的 annotation_lines爲上面的lines[:,part]
現在研究yolo3.utils 的get_random_data,然後再回過頭去看train.py的相關代碼.

def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
    '''random preprocessing for real-time data augmentation'''

	# 以空格爲分隔符,包含 \n
    line = annotation_line.split()

	#取第一個圖片
    image = Image.open(line[0])
    iw, ih = image.size
    h, w = input_shape
    
	#進行字符分割,以','分割,從第二位取分割後的str轉爲int. 然後轉爲矩陣
    #     path/to/img2.jpg 120,300,250,600,2    Box format: `x_min,y_min,x_max,y_max,class_id` (no space).
    box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]]) 

    if not random:
        # resize image
        scale = min(w/iw, h/ih)
        nw = int(iw*scale)
        nh = int(ih*scale)
        dx = (w-nw)//2
        dy = (h-nh)//2
        image_data=0
        if proc_img:
            image = image.resize((nw,nh), Image.BICUBIC)
            new_image = Image.new('RGB', (w,h), (128,128,128))
            new_image.paste(image, (dx, dy))
            image_data = np.array(new_image)/255.

        # correct boxes
        box_data = np.zeros((max_boxes,5))
        if len(box)>0:
            np.random.shuffle(box)
            if len(box)>max_boxes: box = box[:max_boxes]
            box[:, [0,2]] = box[:, [0,2]]*scale + dx
            box[:, [1,3]] = box[:, [1,3]]*scale + dy
            box_data[:len(box)] = box

        return image_data, box_data

	#所以輸入圖像組大小可以不是416*416;下面有圖像縮放\標籤糾正的過程
    # resize image
    new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
    scale = rand(.25, 2)
    if new_ar < 1:
        nh = int(scale*h)
        nw = int(nh*new_ar)
    else:
        nw = int(scale*w)
        nh = int(nw/new_ar)
    image = image.resize((nw,nh), Image.BICUBIC)

    # place image
    dx = int(rand(0, w-nw))
    dy = int(rand(0, h-nh))
    new_image = Image.new('RGB', (w,h), (128,128,128))
    new_image.paste(image, (dx, dy))
    image = new_image

    # flip image or not
    flip = rand()<.5
    if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)

    # distort image
    hue = rand(-hue, hue)
    sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
    val = rand(1, val) if rand()<.5 else 1/rand(1, val)
    x = rgb_to_hsv(np.array(image)/255.)
    x[..., 0] += hue
    x[..., 0][x[..., 0]>1] -= 1
    x[..., 0][x[..., 0]<0] += 1
    x[..., 1] *= sat
    x[..., 2] *= val
    x[x>1] = 1
    x[x<0] = 0
    image_data = hsv_to_rgb(x) # numpy array, 0 to 1

    # correct boxes
    box_data = np.zeros((max_boxes,5))
    if len(box)>0:
        np.random.shuffle(box)
        box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
        box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
        if flip: box[:, [0,2]] = w - box[:, [2,0]]
        box[:, 0:2][box[:, 0:2]<0] = 0
        box[:, 2][box[:, 2]>w] = w
        box[:, 3][box[:, 3]>h] = h
        box_w = box[:, 2] - box[:, 0]
        box_h = box[:, 3] - box[:, 1]
        box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
        if len(box)>max_boxes: box = box[:max_boxes]
        box_data[:len(box)] = box

    return image_data, box_data
  1. 所以讀完了後就能確定怎麼命名了(雖然readme寫明白了-_-)
  2. 數據集爲ccpd_base
    生成train.txt的代碼:
# -*- coding:utf-8
import cv2
import os
import sys
import numpy as np
import random

traintxt = 'train.txt'
imgs_path = '/home/jiteng/private_app/dataset/ccpd/ccpd_dataset/ccpd_base/' #199,998 項

img_names = os.listdir(imgs_path)
start_get = random.randint(0,180)
start_get*1000
img_names = img_names[start_get:start_get+10000]
print len(img_names)
f = open(traintxt,'a+')

for imgname in img_names:
    #imgname   --   025-95_113-154&383_386&473-386&473_177&454_154&383_363&402-0_0_22_27_27_33_16-37-15.jpg
    name = imgs_path + imgname
    image = cv2.imread(name)

    label = imgname.split('-')[2]      #154&383_386&473
    label = label.replace('&',',')
    label = label.replace('_',',')

    save_str = name + ' ' + label + ',0\n'
    print save_str

    f.write(save_str)

f.close()

  1. 接下來就安裝步驟進行訓練,選擇 original pretrained weights(還沒訓練,之後再記錄)

Make sure you have run python convert.py -w yolov3.cfg yolov3.weights model_data/yolo_weights.h5
The file model_data/yolo_weights.h5 is used to load pretrained weights.
Modify train.py and start training.
python train.py
Use your trained weights or checkpoint weights with command line option --model model_file when using yolo_video.py
Remember to modify class path or anchor path, with --classes class_file and --anchors anchor_file.
If you want to use original pretrained weights for YOLOv3:
1. wget https://pjreddie.com/media/files/darknet53.conv.74
2. rename it as darknet53.weights
3. python convert.py -w darknet53.cfg darknet53.weights model_data/darknet53_weights.h5
4. use model_data/darknet53_weights.h5 in train.py

  1. 訓練用這個替換它的train.py
#!/usr/bin/env python
# -- coding: utf-8 --
"""
Copyright (c) 2018. All rights reserved.
Created by C. L. Wang on 2018/7/4
"""
import os
import numpy as np
import tensorflow as tf
import keras.backend as K
from keras.backend import mean
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from keras.utils import plot_model
from keras.utils.training_utils import multi_gpu_model

from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data


def _main():
    import os
    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
    from keras import backend as K
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    K.set_session(sess)

    annotation_path = 'train.txt'  # 數據
    classes_path = 'model_data/voc_classes.txt'  # 類別

    log_dir = 'logs/000/'  # 日誌文件夾

    # pretrained_path = 'model_data/yolo_weights.h5'  # 預訓練模型
    pretrained_path = 'model_data/yolo_weights.h5'  # 預訓練模型
    anchors_path = 'model_data/yolo_anchors.txt'  # anchors

    class_names = get_classes(classes_path)  # 類別列表
    num_classes = len(class_names)  # 類別數
    anchors = get_anchors(anchors_path)  # anchors列表

    input_shape = (416, 416)  # 32的倍數,輸入圖像

    model = create_model(input_shape, anchors, num_classes,
                         freeze_body=2,
                         weights_path=pretrained_path)  # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
                                 monitor='val_loss', save_weights_only=True,
                                 save_best_only=True, period=3)  # 只存儲weights,
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)  # 當評價指標不在提升時,減少學習率
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)  # 測試集準確率,下降前終止

    val_split = 0.1  # 訓練和驗證的比例
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(47)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)  # 驗證集數量
    num_train = len(lines) - num_val  # 訓練集數量

    # 
    # model = multi_gpu_model(model, gpus=2) # 設置使用2個gpu,該句放在模型compile之前
    """
    把目標當成一個輸入,構成多輸入模型,把loss寫成一個層,作爲最後的輸出,搭建模型的時候,
    就只需要將模型的output定義爲loss,而compile的時候,
    直接將loss設置爲y_pred(因爲模型的輸出就是loss,所以y_pred就是loss),
    無視y_true,訓練的時候,y_true隨便扔一個符合形狀的數組進去就行了。
    """
    if False:
        model.compile(optimizer=Adam(lr=1e-3), loss={
            # 使用定製的 yolo_loss Lambda層
            'yolo_loss': lambda y_true, y_pred: y_pred})  # 損失函數

        batch_size = 32  # batch尺寸
        print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
        model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(
                                lines[num_train:], batch_size, input_shape, anchors, num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=50,
                            initial_epoch=0,
                            callbacks=[logging, checkpoint])
        model.save_weights(log_dir + 'trained_weights_stage_1.h5')  # 存儲最終的參數,再訓練過程中,通過回調存儲

    if True:  # 全部訓練
        for i in range(len(model.layers)):
            model.layers[i].trainable = True

        model.compile(optimizer=Adam(lr=1e-4),
                      loss={'yolo_loss': lambda y_true, y_pred: y_pred})  # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size = 8  # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))

        model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors,
                                                                   num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=100,
                            initial_epoch=50,
                            callbacks=[logging, checkpoint, reduce_lr, early_stopping])
        model.save_weights(log_dir + 'trained_weights_final.h5')


def get_classes(classes_path):
    '''loads the classes'''
    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]
    return class_names


def get_anchors(anchors_path):
    '''loads the anchors from a file'''
    with open(anchors_path) as f:
        anchors = f.readline()
    anchors = [float(x) for x in anchors.split(',')]
    return np.array(anchors).reshape(-1, 2)


def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
                 weights_path='model_data/yolo_weights.h5'):
    K.clear_session()  # 清除session
    h, w = input_shape  # 尺寸
    image_input = Input(shape=(w, h, 3))  # 圖片輸入格式
    num_anchors = len(anchors)  # anchor數量

    # YOLO的三種尺度,每個尺度的anchor數,類別數+邊框4個+置信度1
    y_true = [Input(shape=(h // {0: 32, 1: 16, 2: 8}[l], w // {0: 32, 1: 16, 2: 8}[l],
                           num_anchors // 3, num_classes + 5)) for l in range(3)]

    model_body = yolo_body(image_input, num_anchors // 3, num_classes)  # model
    print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))

    if load_pretrained:  # 加載預訓練模型
        model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)  # 加載參數,跳過錯誤
        print('Load weights {}.'.format(weights_path))
        if freeze_body in [1, 2]:
            # Freeze darknet53 body or freeze all but 3 output layers.
            num = (185, len(model_body.layers) - 3)[freeze_body - 1]
            for i in range(num):
                model_body.layers[i].trainable = False  # 將其他層的訓練關閉
            print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))

    model_loss = Lambda(yolo_loss,
                        output_shape=(1,), name='yolo_loss',
                        arguments={'anchors': anchors,
                                   'num_classes': num_classes,
                                   'ignore_thresh': 0.5}
                        )(model_body.output + y_true)
    model = Model(inputs=[model_body.input] + y_true, outputs=model_loss)  # 模型,inputs和outputs
    #plot_model(model, to_file=os.path.join('model_data', 'model.png'), show_shapes=True, show_layer_names=True)
    model.summary()

    return model


def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
    '''data generator for fit_generator'''
    n = len(annotation_lines)
    i = 0
    while True:
        image_data = []
        box_data = []
        for b in range(batch_size):
            if i == 0:
                np.random.shuffle(annotation_lines)
            image, box = get_random_data(annotation_lines[i], input_shape, random=True)  # 獲取圖片和盒子
            image_data.append(image)  # 添加圖片
            box_data.append(box)  # 添加盒子
            i = (i + 1) % n
        image_data = np.array(image_data)
        box_data = np.array(box_data)
        y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)  # 真值
        yield [image_data] + y_true, np.zeros(batch_size)


def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
    """
    用於條件檢查
    """
    n = len(annotation_lines)  # 標註圖片的行數
    if n == 0 or batch_size <= 0: return None
    return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)


if __name__ == '__main__':
    _main()
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章