yolo训练自己的模型

 

一、将标注好的图片生成可识别的格式:
    1.创建文件夹,结构按照voc的结构
    2.将原始照片放在JPEGImages里面,将xml文件放在Annotations里面。
    3.运行create_list.py将图片分成test,val,train三个部分,同时在ImageSets的Main里面生产对应的三个.txt文件。
    4.运行my_voc_label.py生产图片对应的labels的信息
    5.运行:cat 2017_train.txt 2017_val.txt > train.txt这个命令,将train,val合并到一个文件夹。
二、修改配置配置文件
    1.voc.data文件的修改
         1 classes= 20  (修改成对应的类的个数)
         2 train  = <path-to-voc>/train.txt  (训练文件的绝对路径)
         3 valid  = <path-to-voc>2007_test.txt (测试文件的绝对路径)
         4 names = data/voc.names   (对应的类名称的文件)
         5 backup = backup  (训练备份的文件夹)
    2.voc.names
        将类的名称按行写在文件中
    3.yolo-voc.cfg
        修改batch、subdivisions、learning_rate、max_batches等参数
        在文件的最后修改[convolutional]里面的filters需要修改(修改最后一层卷积层核参数个数,
            计算公式是依旧自己数据的类别数filter=num×(classes + coords + 1)=5×(1+4+1)=30)
        classes改成对应的个数


运行结果可视化
    1.先运行extract_log.py,将保存的日志文件进行处理,对应生成loss和iou的文件
    2.运行对应的train_iou_visualization.py或者train_loss_visualization.py文件,生成iou和loss的图表。
# coding=utf-8
# 该文件用来提取训练log,去除不可解析的log后使log文件格式化,生成新的log文件供可视化工具绘图
def extract_log(log_file,new_log_file,key_word):
    f = open(log_file)
    train_log = open(new_log_file, 'w')
    for line in f:
    # 去除多gpu的同步log
        if 'Syncing' in line:
            continue
    # 去除除零错误的log
        if 'nan' in line:
            continue
        if key_word in line:
            train_log.write(line)

    f.close()
    train_log.close()

extract_log('/home/darknet/log/yolo-voc.txt','person_train_log_loss.txt','images')   #voc_train_log.txt 用于绘制loss曲线
extract_log('/home/darknet/log/yolo-voc.txt','person_train_log_iou.txt','IOU')

 

train_iou_visualization.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline

lines =143785
result = pd.read_csv('person_train_log_iou.txt', skiprows=[x for x in range(lines) if (x%10==0 or x%10==9) ] ,error_bad_lines=False, names=['Region Avg IOU', 'Class', 'Obj', 'No Obj', 'Avg Recall','count'])
result.head()

result['Region Avg IOU']=result['Region Avg IOU'].str.split(': ').str.get(1)
result['Class']=result['Class'].str.split(': ').str.get(1)
result['Obj']=result['Obj'].str.split(': ').str.get(1)
result['No Obj']=result['No Obj'].str.split(': ').str.get(1)
result['Avg Recall']=result['Avg Recall'].str.split(': ').str.get(1)
result['count']=result['count'].str.split(': ').str.get(1)
result.head()
result.tail()

#print(result.head())
# print(result.tail())
# print(result.dtypes)
print(result['Region Avg IOU'])

result['Region Avg IOU']=pd.to_numeric(result['Region Avg IOU'])
result['Class']=pd.to_numeric(result['Class'])
result['Obj']=pd.to_numeric(result['Obj'])
result['No Obj']=pd.to_numeric(result['No Obj'])
result['Avg Recall']=pd.to_numeric(result['Avg Recall'])
result['count']=pd.to_numeric(result['count'])
result.dtypes

fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(result['Region Avg IOU'].values,label='Region Avg IOU')
#ax.plot(result['Class'].values,label='Class')
#ax.plot(result['Obj'].values,label='Obj')
#ax.plot(result['No Obj'].values,label='No Obj')
#ax.plot(result['Avg Recall'].values,label='Avg Recall')
#ax.plot(result['count'].values,label='count')
ax.legend(loc='best')
#ax.set_title('The Region Avg IOU curves')
ax.set_title('The Region Avg IOU curves')
ax.set_xlabel('batches')
#fig.savefig('Avg IOU')
fig.savefig('Region Avg IOU')

 train_loss_visualization

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline

lines =11744
# result = pd.read_csv('person_train_log_loss.txt', skiprows=[x for x in range(lines) if ((x%10!=9) |(x<1000))] ,error_bad_lines=False, names=['loss', 'avg', 'rate', 'seconds', 'images'])
result = pd.read_csv('person_train_log_loss.txt',error_bad_lines=False, names=['loss', 'avg', 'rate', 'seconds', 'images'])

result.head()

result['loss']=result['loss'].str.split(' ').str.get(1)
result['avg']=result['avg'].str.split(' ').str.get(1)
result['rate']=result['rate'].str.split(' ').str.get(1)
result['seconds']=result['seconds'].str.split(' ').str.get(1)
result['images']=result['images'].str.split(' ').str.get(1)
result.head()
result.tail()

#print(result.head())
# print(result.tail())
# print(result.dtypes)

print('[1]',result['loss'].values.shape)
print('[2]',result['avg'].values.shape)
print('[3]',result['rate'].values.shape)
print('[4]',result['seconds'].values.shape)
print('[5]',result['images'].values.shape)

result['loss']=pd.to_numeric(result['loss'])
result['avg']=pd.to_numeric(result['avg'])
result['rate']=pd.to_numeric(result['rate'])
result['seconds']=pd.to_numeric(result['seconds'])
result['images']=pd.to_numeric(result['images'])
result.dtypes



fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(result['avg'].values,label='avg_loss')
#ax.plot(result['loss'].values,label='loss')
ax.legend(loc='best')
ax.set_title('The loss curves')
ax.set_xlabel('batches')
fig.savefig('avg_loss')
#fig.savefig('loss')

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章