1.yolo訓練數據準備
darknet訓練數據比較繁瑣,數據準備參考 [yolo訓練數據製作](http://blog.csdn.net/burning_keyboard/article/details/71055557)
2.網絡準備
1.修改 cfg/voc.data:
classes= 10 //類別數量
train = ../your_save_path/trainImagePath.txt //相對路徑設置爲
validate = ../zifu1/validateImagePath.txt
names = data/voc.names // voc.names 類別的名稱,這個文件去改一下
backup = backup // 記錄每迭代100次得到的模型 .weights文件
2.修改網絡結構 cfg/yolo-voc.cfg:
(1)最後一個卷積層
[convolutional]
size=1
stride=1
pad=1
filters=125 // 125 = 5(anchor box) * (20(class) + 4(coor) + 1(confidence))
... // 根據class數算好filters個數
(2)[region]
anchors = 1.08,1.19, 3.42,4.41, 6.63,11.38, 9.42,5.11, 16.62,10.52
bias_match=1
classes=10 // 改成目標分類數量
coords=4
num=5 // anchor box 數量
...
(3)預訓練文件cfg/darknet19_448.conv.23:在其他數據集上pretrain的模型做爲初值
3.訓練命令
cd darknet
./darknet detector train cfg/voc.data cfg/yolo-voc.cfg cfg/darknet19_448.conv.23
每train一個batch會用validate檢測數據檢測一次,顯示迭代次數,學習率,recall值,已訓練圖片數等,
每100次會在backup裏存一次 weights
4.檢測命令
./darknet detector test cfg/voc.data cfg/yolo-voc.cfg backup/yolo-voc_400.weights testImage/738780.jpg
5.yolo-darknet主函數入口
1.所有腳本在src文件夾。主函數入口在darknet.c中。其中有run_yolo和run_detector都能用於訓練檢測。
2.進入detector.c文件可以找到解析訓練和檢測命令的函數:
void run_detector(int argc, char **argv)//dark.c中如果命令如果第二個參數是detector,則調用這個
{
//尋找是否有參數prefix
char *prefix = find_char_arg(argc, argv, "-prefix", 0);//prefix默認參數是0,argv爲二維數組,存儲了參數字符串
//尋找是否有參數thresh參數,thresh爲輸出的閾值
float thresh = find_float_arg(argc, argv, "-thresh", .24);//0.24,thresh默認參數爲0.24
//尋找是否有參數hier_thresh,默認爲0.5
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
//類似上面的
int cam_index = find_int_arg(argc, argv, "-c", 0);
//類似上面的
int frame_skip = find_int_arg(argc, argv, "-s", 0);
//如果輸入參數小於4個,輸出正確語法如何使用
//printf 應該等價於 fprintf(stdout, ...),這裏stderr和stdout默認輸出設備都是屏幕,但是stderr一般指標準出錯輸入設備
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
//類似上面
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
//檢查是否指定GPU運算
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
//檢查clear參數
int clear = find_arg(argc, argv, "-clear");
//存data文件路徑
char *datacfg = argv[3];
//存cfg文件路徑
char *cfg = argv[4];
//存weight文件路徑
char *weights = (argc > 5) ? argv[5] : 0;
//存待檢測文件路徑
char *filename = (argc > 6) ? argv[6]: 0;
//根據第三個參數的內容,調用不同的函數,並傳入之前解析的參數
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, hier_thresh);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh)
{
//options存儲分類的標籤等基本訓練信息
list *options = read_data_cfg(datacfg);
//抽取標籤名稱
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
//加載位於data/labels下的字符圖片,用於顯示矩形框名稱
image **alphabet = load_alphabet();
//用netweork.h中自定義的network結構體存儲模型文件,函數位於parser.c
network net = parse_network_cfg(cfgfile);
//讀取結構對應的權重文件
if(weightfile){
load_weights(&net, weightfile);
}
set_batch_network(&net, 1);
srand(2222222);
clock_t time;
char buff[256];
char *input = buff;
int j;
float nms=.4;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
//輸入圖片大小經過resize至輸入大小
image sized = resize_image(im, net.w, net.h);
layer l = net.layers[net.n-1];
box *boxes = calloc(l.w*l.h*l.n, sizeof(box));
float **probs = calloc(l.w*l.h*l.n, sizeof(float *));
for(j = 0; j < l.w*l.h*l.n; ++j) probs[j] = calloc(l.classes + 1, sizeof(float *));
//X指向圖片的data元素,即圖片像素
float *X = sized.data;
time=clock();
//network_predict函數負責預測當前圖片的數據X
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
get_region_boxes(l, 1, 1, thresh, probs, boxes, 0, 0, hier_thresh);
if (l.softmax_tree && nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
else if (nms) do_nms_sort(boxes, probs, l.w*l.h*l.n, l.classes, nms);
draw_detections(im, l.w*l.h*l.n, thresh, boxes, probs, names, alphabet, l.classes);
save_image(im, "predictions");
show_image(im, "predictions");
free_image(im);
free_image(sized);
free(boxes);
free_ptrs((void **)probs, l.w*l.h*l.n);
#ifdef OPENCV
cvWaitKey(0);
cvDestroyAllWindows();
#endif
if (filename) break;
}
}
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
//解析data文件,用自定義鏈表options存儲訓練集基本信息,函數位於option_list.c
list *options = read_data_cfg(datacfg);
//從options中找訓練集
char *train_images = option_find_str(options, "train", "data/train.list");
//從options中找backup路徑
char *backup_directory = option_find_str(options, "backup", "/backup/");
//初始化隨機種子數
srand(time(0));
//此函數位於utils.c,返回cfg文件不帶後綴的名字
char *base = basecfg(cfgfile);
printf(" hahaha %s\n", base);
float avg_loss = -1;
network *nets = calloc(ngpus, sizeof(network));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = parse_network_cfg(cfgfile);
if(weightfile){
load_weights(&nets[i], weightfile);
}
if(clear) *nets[i].seen = 0;
nets[i].learning_rate *= ngpus;
}
srand(time(0));
network net = nets[0];
int imgs = net.batch * net.subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
data train, buffer;
layer l = net.layers[net.n - 1];
int classes = l.classes;
printf("error test!%d\n",classes);
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
printf("error test!\n");
load_args args = {0};
args.w = net.w;
args.h = net.h;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
args.threads = 8;
args.angle = net.angle;
args.exposure = net.exposure;
args.saturation = net.saturation;
args.hue = net.hue;
pthread_t load_thread = load_data(args);
printf("after load data!\n");
clock_t time;
int count = 0;
//while(i*imgs < N*120){
while(get_current_batch(net) < net.max_batches){
printf("error test while!\n");
if(l.random && count++%10 == 0){
printf("Resizing\n");
int dim = (rand() % 10 + 10) * 32;
if (get_current_batch(net)+200 > net.max_batches) dim = 608;
//int dim = (rand() % 4 + 16) * 32;
printf("%d\n", dim);
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
for(i = 0; i < ngpus; ++i){
resize_network(nets + i, dim, dim);
}
net = nets[0];
}
printf("error test while1!\n");
time=clock();
pthread_join(load_thread, 0);
printf("error test while!\n");
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
image im = float_to_image(448, 448, 3, train.X.vals[10]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
printf("%d %d %d %d\n", truth.x, truth.y, truth.w, truth.h);
draw_bbox(im, b, 8, 1,0,0);
}
save_image(im, "truth11");
*/
printf("Loaded: %lf seconds\n", sec(clock()-time));
time=clock();
float loss = 0;
#ifdef GPU
if(ngpus == 1){
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
printf("%d: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), sec(clock()-time), i*imgs);
if(i%1000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
[實驗思考]
yolo的官網上對於安裝和訓練介紹的很詳細,並且提供yolo,small-yolo,tiny-yolo,yolo2的模型下載。
在自己的小破筆記本上測試了一下效果,配置gtx960m/i5-6700HQ 使用了cuda和cudnn。yolov2檢測在16fps左右,tiny-yolov2可到40-50fps。漏檢不明顯。我初步實驗是用yolo2檢測字符,樣本字大清晰,效果很好。
出現問題了:
用於檢測細長的柱子(超過400個長得差不多的柱子,檢測目標也是這種柱子),第一次訓練,樣本大小差別較大45×180,25×100,15×50左右,iter大loss過小,過擬合檢測不到目標。迭代1000次左右,loss在零點幾,漏檢多。
考慮到yolo2輸入1920×900大小左右的圖片,目標寬度大概20-30像素,yolo2下采樣32倍,是可以檢測出很多柱子,但是定位精度不好。yolo2輸入是會自動縮放到302×302~608×608,最終檢測是在下采樣32倍後的格子上,不像faster-rcnn系列是逐像素檢測。所以目標不能過小,一是保證格子裏還有目標特徵,而是影響anchorbox在相對格子的定位。
重新制作樣本,柱子大概寬度在20-30,高度在80-100分佈。效果是好一些,但是同樣大小的柱子,有的檢測出來有的檢測不出來,定位精度還是很低。就考慮放大目標,比如修改輸入圖像resize的倍數:在detector.c line:89左右
int dim = (rand() % 10 + 10) * 32;
if (get_current_batch(net)+200 > net.max_batches) dim = 608;
//int dim = (rand() % 4 + 16) * 32;
重新訓練效果沒有明顯改善,所以懷疑是我目標太窄,網絡把背景也學習了。
老師讓用pvanet檢測一下看效果,結果雖然檢出率和yolo2相當,但是定位很準,且通過剪裁放大檢測圖片,pvanet檢出率明顯提高,老師就說可以中止yolo2的學習了。結果這個窄目標檢測不好的問題我一直沒解決。如果有前輩路過能指點一下就很感激了TAT。
yolo是我上手深度學習的地一個網絡,我覺得它的性能沒有掌握髮揮好(因爲用yolo2官網模型可以檢測出視頻裏很小的罐頭和鳥,可見小目標還是可以檢測的),就停止學習轉向pvanet,挺可惜的。但是darknet這個架構...
之後借pvanet入手caffe,另做總結。