本系列作者:木凌
時間:2016年11月。
文章連接:http://blog.csdn.net/u014540717
QQ交流羣:554590241
本系列文章會持續更新,主要會分以下幾個部分:
1、darknet下的yolo源代碼解讀
2、將yolo移植到mxnet下
3、模型壓縮與加速
白天需要工作,只有晚上時間寫,所以可能更新速度有點慢,還有就是該系列博文不一定會嚴格按照以上三點的順序來寫,也可能移植到caffe下,在caffe下進行壓縮和加速。
一、訓練
我用的是VOC2007的數據集,下載指令如下:
$curl -O http://pjreddie.com/media/files/VOCtrainval_06-Nov-2007.tar
$curl -O http://pjreddie.com/media/files/VOCtest_06-Nov-2007.tar
$tar xf VOCtrainval_06-Nov-2007.tar
$tar xf VOCtest_06-Nov-2007.tar
運行以下代碼,將.xml
文件轉換成.txt
文件,以備YOLO訓練時數據解析:
import xml.etree.ElementTree as ET
import pickle
import os
from os import listdir, getcwd
from os.path import join
sets=[('2007', 'train'), ('2007', 'val')]
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
def convert(size, box):
dw = 1./size[0]
dh = 1./size[1]
x = (box[0] + box[1])/2.0
y = (box[2] + box[3])/2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return (x,y,w,h)
def convert_annotation(year, image_id):
in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml'%(year, image_id))
out_file = open('VOCdevkit/VOC%s/labels/%s.txt'%(year, image_id), 'w')
tree=ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult) == 1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
bb = convert((w,h), b)
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
wd = getcwd()
for year, image_set in sets:
if not os.path.exists('VOCdevkit/VOC%s/labels/'%(year)):
os.makedirs('VOCdevkit/VOC%s/labels/'%(year))
image_ids = open('VOCdevkit/VOC%s/ImageSets/Main/%s.txt'%(year, image_set)).read().strip().split()
list_file = open('%s_%s.txt'%(year, image_set), 'w')
for image_id in image_ids:
list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg\n'%(wd, year, image_id))
convert_annotation(year, image_id)
list_file.close()
在yolo.c
中找到以下代碼,並修改:
//指向你剛纔生成的train.txt文件
char *train_images = "/data/voc/train.txt";
//新建個文件夾,然後指向這裏就可以,訓練生成的模型文件會保存在這裏
char *backup_directory = "/home/pjreddie/backup/";
運行以下指令開始訓練:
./darknet yolo train cfg/yolo.train.cfg extraction.conv.weights
二、源碼解讀
1.首先我們看一下訓練的數據流,從main函數開始看,該函數在darknet.c
文件中:
//darknet.c
int main(int argc, char **argv)
{
//test_resize("data/bad.jpg");
//test_box();
//test_convolutional_layer();
if(argc < 2){
fprintf(stderr, "usage: %s <function>\n", argv[0]);
return 0;
}
gpu_index = find_int_arg(argc, argv, "-i", 0);
if(find_arg(argc, argv, "-nogpu")) {
gpu_index = -1;
}
#ifndef GPU
gpu_index = -1;
#else
if(gpu_index >= 0){
cuda_set_device(gpu_index);
}
#endif
if (0 == strcmp(argv[1], "average")){
average(argc, argv);
} else if (0 == strcmp(argv[1], "yolo")){
//第一個參數是yolo,所以跳轉到run_yolo函數
run_yolo(argc, argv);
} else {
fprintf(stderr, "Not an option: %s\n", argv[1]);
}
return 0;
}
//yolo.c
void run_yolo(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .2);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *cfg = argv[3];
char *weights = (argc > 4) ? argv[4] : 0;
char *filename = (argc > 5) ? argv[5]: 0;
if(0==strcmp(argv[2], "test")) test_yolo(cfg, weights, filename, thresh);
//第二個參數是train,所以跳轉到了train_yolo函數
else if(0==strcmp(argv[2], "train")) train_yolo(cfg, weights);
voc_names, 20, frame_skip, prefix);
}
void train_yolo(char *cfgfile, char *weightfile)
{
char *train_images = "/data/voc/train.txt";
char *backup_directory = "/home/pjreddie/backup/";
/*srand函數是隨機數發生器的初始化函數。
srand和rand()配合使用產生僞隨機數序列。rand函數在產生隨機數前,需要系統提供的生成僞隨機數序列的
種子,rand根據這個種子的值產生一系列隨機數。如果系統提供的種子沒有變化,每次調用rand函數生成的僞
隨機數序列都是一樣的。*/
srand(time(0));
/*第三個參數是:`cfg/yolo.train.cfg`,`basecfg()`這個函數把`cfg/yolo.train.cfg`變成了
`yolo0train.cfg`,然後用base指針指向`yolo0train.cfg`*/
char *base = basecfg(cfgfile);
//打印"yolo"字樣
printf("%s\n", base);
float avg_loss = -1;
//解析網絡構架,下面會仔細分析該函數
network net = parse_network_cfg(cfgfile);
//加載預訓練參數,下面會仔細分析該函數
if(weightfile){
load_weights(&net, weightfile);
}
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
/*imgs是一次加載到內存的圖像數量,如果佔內存太大的話可以把subdivisions或者batch調小一點
詳細可以參考知乎:[深度機器學習中的batch的大小對學習效果有何影響?](https://www.zhihu.com/question/32673260)*/
int imgs = net.batch*net.subdivisions;
//net.seen就是已經經過網絡訓練(看)的圖片數量,算出的i就是已經經過了多少次訓練
int i = *net.seen/imgs;
data train, buffer;
layer l = net.layers[net.n - 1];
//side就是論文中的7
int side = l.side;
int classes = l.classes;
//jitter是什麼意思呢?可以參考這篇博客:[非均衡數據集處理:利用抖動(jittering)生成額外數據](http://weibo.com/1402400261/EgMr4vCC2?type=comment#_rnd1478833653326)
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = {0};
args.w = net.w;
args.h = net.h;
args.paths = paths;
//n就是一次加載到內存中的圖片數量
args.n = imgs;
//m是待訓練圖片的總數量
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
//7*7個網格
args.num_boxes = side;
args.d = &buffer;
args.type = REGION_DATA;
//調節圖片旋轉角度、曝光度、飽和度、色調等,來增加圖片數量
args.angle = net.angle;
args.exposure = net.exposure;
args.saturation = net.saturation;
args.hue = net.hue;
//聲明線程ID
pthread_t load_thread = load_data_in_thread(args);
clock_t time;
//while(i*imgs < N*120){
while(get_current_batch(net) < net.max_batches){
i += 1;
time=clock();
/*pthread_join()函數,以阻塞的方式等待thread指定的線程結束。當函數返回時,被等待線程的資源被
收回。如果線程已經結束,那麼該函數會立即返回。*/
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data_in_thread(args);
printf("Loaded: %lf seconds\n", sec(clock()-time));
time=clock();
//開始訓練
float loss = train_network(net, train);
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
printf("%d: %f, %f avg, %f rate, %lf seconds, %d images\n", i, loss, avg_loss, get_current_rate(net), sec(clock()-time), i*imgs);
//每100次或者1000次保存一次權重
if(i%1000==0 || (i < 1000 && i%100 == 0)){
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
看到這裏,我想你已經能可以自己來訓練VOC數據集了。但這只是剛剛開始,我們要想將YOLO移植到mxnet下,就需要了解其具體函數的實現過程,請看下一節內容。
(END)