yolo训练自己的数据实践总结

1.yolo训练数据准备

darknet训练数据比较繁琐,数据准备参考 [yolo训练数据制作](http://blog.csdn.net/burning_keyboard/article/details/71055557)

2.网络准备

1.修改 cfg/voc.data:
  classes= 10                                     //类别数量
  train  = ../your_save_path/trainImagePath.txt   //相对路径设置为
  validate = ../zifu1/validateImagePath.txt
  names = data/voc.names                          // voc.names 类别的名称,这个文件去改一下
  backup = backup                                 // 记录每迭代100次得到的模型 .weights文件

 2.修改网络结构 cfg/yolo-voc.cfg:
 (1)最后一个卷积层
     [convolutional]
     size=1
     stride=1
     pad=1
     filters=125 // 125 = 5(anchor box) * (20(class) + 4(coor) + 1(confidence))
     ...         // 根据class数算好filters个数
 (2)[region]
      anchors = 1.08,1.19,  3.42,4.41,  6.63,11.38,  9.42,5.11,  16.62,10.52
      bias_match=1
      classes=10       // 改成目标分类数量
      coords=4
      num=5            // anchor box 数量
      ...
 (3)预训练文件cfg/darknet19_448.conv.23:在其他数据集上pretrain的模型做为初值

3.训练命令

 cd darknet
 ./darknet detector train cfg/voc.data cfg/yolo-voc.cfg cfg/darknet19_448.conv.23
 每train一个batch会用validate检测数据检测一次,显示迭代次数,学习率,recall值,已训练图片数等,
 每100次会在backup里存一次 weights

4.检测命令

 ./darknet detector test cfg/voc.data cfg/yolo-voc.cfg backup/yolo-voc_400.weights testImage/738780.jpg

5.yolo-darknet主函数入口

 1.所有脚本在src文件夹。主函数入口在darknet.c中。其中有run_yolo和run_detector都能用于训练检测。

 2.进入detector.c文件可以找到解析训练和检测命令的函数:
    void run_detector(int argc, char **argv)//dark.c中如果命令如果第二个参数是detector,则调用这个
{
    //寻找是否有参数prefix
    char *prefix = find_char_arg(argc, argv, "-prefix", 0);//prefix默认参数是0,argv为二维数组,存储了参数字符串
    //寻找是否有参数thresh参数,thresh为输出的阈值
    float thresh = find_float_arg(argc, argv, "-thresh", .24);//0.24,thresh默认参数为0.24
    //寻找是否有参数hier_thresh,默认为0.5
    float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
    //类似上面的
    int cam_index = find_int_arg(argc, argv, "-c", 0);
    //类似上面的
    int frame_skip = find_int_arg(argc, argv, "-s", 0);
    //如果输入参数小于4个,输出正确语法如何使用
    //printf 应该等价于 fprintf(stdout, ...),这里stderr和stdout默认输出设备都是屏幕,但是stderr一般指标准出错输入设备 
    if(argc < 4){
        fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
        return;
    }
    //类似上面
    char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
    //检查是否指定GPU运算
    char *outfile = find_char_arg(argc, argv, "-out", 0);
    int *gpus = 0;
    int gpu = 0;
    int ngpus = 0;
    if(gpu_list){
        printf("%s\n", gpu_list);
        int len = strlen(gpu_list);
        ngpus = 1;
        int i;
        for(i = 0; i < len; ++i){
            if (gpu_list[i] == ',') ++ngpus;
        }
        gpus = calloc(ngpus, sizeof(int));
        for(i = 0; i < ngpus; ++i){
            gpus[i] = atoi(gpu_list);
            gpu_list = strchr(gpu_list, ',')+1;
        }
    } else {
        gpu = gpu_index;
        gpus = &gpu;
        ngpus = 1;
    }
    //检查clear参数
    int clear = find_arg(argc, argv, "-clear");
    //存data文件路径
    char *datacfg = argv[3];
    //存cfg文件路径
    char *cfg = argv[4];
    //存weight文件路径
    char *weights = (argc > 5) ? argv[5] : 0;
    //存待检测文件路径
    char *filename = (argc > 6) ? argv[6]: 0;
    //根据第三个参数的内容,调用不同的函数,并传入之前解析的参数
    if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh);
    else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
    else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
    else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
    else if(0==strcmp(argv[2], "demo")) {
        list *options = read_data_cfg(datacfg);
        int classes = option_find_int(options, "classes", 20);
        char *name_list = option_find_str(options, "names", "data/names.list");
        char **names = get_labels(name_list);
        demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, hier_thresh);
    }
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh)
{
    //options存储分类的标签等基本训练信息
    list *options = read_data_cfg(datacfg);
   //抽取标签名称
    char *name_list = option_find_str(options, "names", "data/names.list");
    char **names = get_labels(name_list);
    //加载位于data/labels下的字符图片,用于显示矩形框名称
    image **alphabet = load_alphabet();
    //用netweork.h中自定义的network结构体存储模型文件,函数位于parser.c
    network net = parse_network_cfg(cfgfile);
//读取结构对应的权重文件
    if(weightfile){
        load_weights(&net, weightfile);
    }
    set_batch_network(&net, 1);
    srand(2222222);
    clock_t time;
    char buff[256];
    char *input = buff;
    int j;
    float nms=.4;
    while(1){
        if(filename){
            strncpy(input, filename, 256);
        } else {
            printf("Enter Image Path: ");
            fflush(stdout);
            input = fgets(input, 256, stdin);
            if(!input) return;
            strtok(input, "\n");
        }
        image im = load_image_color(input,0,0);
//输入图片大小经过resize至输入大小
        image sized = resize_image(im, net.w, net.h);
        layer l = net.layers[net.n-1];

        box *boxes = calloc(l.w*l.h*l.n, sizeof(box));
        float **probs = calloc(l.w*l.h*l.n, sizeof(float *));
        for(j = 0; j < l.w*l.h*l.n; ++j) probs[j] = calloc(l.classes + 1, sizeof(float *));
        //X指向图片的data元素,即图片像素
        float *X = sized.data;
        time=clock();
        //network_predict函数负责预测当前图片的数据X
        network_predict(net, X);
        printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
        get_region_boxes(l, 1, 1, thresh, probs, boxes, 0, 0, hier_thresh);
        if (l.softmax_tree && nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
        else if (nms) do_nms_sort(boxes, probs, l.w*l.h*l.n, l.classes, nms);
        draw_detections(im, l.w*l.h*l.n, thresh, boxes, probs, names, alphabet, l.classes);
        save_image(im, "predictions");
        show_image(im, "predictions");

        free_image(im);
        free_image(sized);
        free(boxes);
        free_ptrs((void **)probs, l.w*l.h*l.n);
#ifdef OPENCV
        cvWaitKey(0);
        cvDestroyAllWindows();
#endif
        if (filename) break;
    }
}
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
    //解析data文件,用自定义链表options存储训练集基本信息,函数位于option_list.c
    list *options = read_data_cfg(datacfg);
    //从options中找训练集
    char *train_images = option_find_str(options, "train", "data/train.list");
    //从options中找backup路径
    char *backup_directory = option_find_str(options, "backup", "/backup/");
    //初始化随机种子数
    srand(time(0));
    //此函数位于utils.c,返回cfg文件不带后缀的名字
    char *base = basecfg(cfgfile);
    printf(" hahaha %s\n", base);
    float avg_loss = -1;
    network *nets = calloc(ngpus, sizeof(network));

    srand(time(0));
    int seed = rand();
    int i;
    for(i = 0; i < ngpus; ++i){
        srand(seed);
#ifdef GPU
        cuda_set_device(gpus[i]);
#endif
        nets[i] = parse_network_cfg(cfgfile);
        if(weightfile){
            load_weights(&nets[i], weightfile);
        }
        if(clear) *nets[i].seen = 0;
        nets[i].learning_rate *= ngpus;
    }
    srand(time(0));
    network net = nets[0];

    int imgs = net.batch * net.subdivisions * ngpus;
    printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
    data train, buffer;

    layer l = net.layers[net.n - 1];

    int classes = l.classes;
    printf("error test!%d\n",classes);
    float jitter = l.jitter;

    list *plist = get_paths(train_images);
    //int N = plist->size;
    char **paths = (char **)list_to_array(plist);

    printf("error test!\n");
    load_args args = {0};
    args.w = net.w;
    args.h = net.h;
    args.paths = paths;
    args.n = imgs;
    args.m = plist->size;
    args.classes = classes;
    args.jitter = jitter;
    args.num_boxes = l.max_boxes;
    args.d = &buffer;
    args.type = DETECTION_DATA;
    args.threads = 8;

    args.angle = net.angle;
    args.exposure = net.exposure;
    args.saturation = net.saturation;
    args.hue = net.hue;

    pthread_t load_thread = load_data(args);
    printf("after load data!\n");
    clock_t time;
    int count = 0;
    //while(i*imgs < N*120){
    while(get_current_batch(net) < net.max_batches){
        printf("error test while!\n");
        if(l.random && count++%10 == 0){
            printf("Resizing\n");
            int dim = (rand() % 10 + 10) * 32;
            if (get_current_batch(net)+200 > net.max_batches) dim = 608;
            //int dim = (rand() % 4 + 16) * 32;
            printf("%d\n", dim);
            args.w = dim;
            args.h = dim;

            pthread_join(load_thread, 0);
            train = buffer;
            free_data(train);
            load_thread = load_data(args);

            for(i = 0; i < ngpus; ++i){
                resize_network(nets + i, dim, dim);
            }
            net = nets[0];
        }
        printf("error test while1!\n");
        time=clock();
        pthread_join(load_thread, 0);
        printf("error test while!\n");
        train = buffer;
        load_thread = load_data(args);

        /*
           int k;
           for(k = 0; k < l.max_boxes; ++k){
           box b = float_to_box(train.y.vals[10] + 1 + k*5);
           if(!b.x) break;
           printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
           }
           image im = float_to_image(448, 448, 3, train.X.vals[10]);
           int k;
           for(k = 0; k < l.max_boxes; ++k){
           box b = float_to_box(train.y.vals[10] + 1 + k*5);
           printf("%d %d %d %d\n", truth.x, truth.y, truth.w, truth.h);
           draw_bbox(im, b, 8, 1,0,0);
           }
           save_image(im, "truth11");
         */

        printf("Loaded: %lf seconds\n", sec(clock()-time));

        time=clock();
        float loss = 0;
#ifdef GPU
        if(ngpus == 1){
            loss = train_network(net, train);
        } else {
            loss = train_networks(nets, ngpus, train, 4);
        }
#else
        loss = train_network(net, train);
#endif
        if (avg_loss < 0) avg_loss = loss;
        avg_loss = avg_loss*.9 + loss*.1;

        i = get_current_batch(net);
        printf("%d: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), sec(clock()-time), i*imgs);
        if(i%1000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
            if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
            char buff[256];
            sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
            save_weights(net, buff);
        }
        free_data(train);
    }
#ifdef GPU
    if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
    char buff[256];
    sprintf(buff, "%s/%s_final.weights", backup_directory, base);
    save_weights(net, buff);
}

[实验思考]

yolo的官网上对于安装和训练介绍的很详细,并且提供yolo,small-yolo,tiny-yolo,yolo2的模型下载。
在自己的小破笔记本上测试了一下效果,配置gtx960m/i5-6700HQ 使用了cuda和cudnn。yolov2检测在16fps左右,tiny-yolov2可到40-50fps。漏检不明显。我初步实验是用yolo2检测字符,样本字大清晰,效果很好。

出现问题了:
用于检测细长的柱子(超过400个长得差不多的柱子,检测目标也是这种柱子),第一次训练,样本大小差别较大45×180,25×100,15×50左右,iter大loss过小,过拟合检测不到目标。迭代1000次左右,loss在零点几,漏检多。
考虑到yolo2输入1920×900大小左右的图片,目标宽度大概20-30像素,yolo2下采样32倍,是可以检测出很多柱子,但是定位精度不好。yolo2输入是会自动缩放到302×302~608×608,最终检测是在下采样32倍后的格子上,不像faster-rcnn系列是逐像素检测。所以目标不能过小,一是保证格子里还有目标特征,而是影响anchorbox在相对格子的定位。
重新制作样本,柱子大概宽度在20-30,高度在80-100分布。效果是好一些,但是同样大小的柱子,有的检测出来有的检测不出来,定位精度还是很低。就考虑放大目标,比如修改输入图像resize的倍数:在detector.c line:89左右
int dim = (rand() % 10 + 10) * 32;
            if (get_current_batch(net)+200 > net.max_batches) dim = 608;
            //int dim = (rand() % 4 + 16) * 32;
 重新训练效果没有明显改善,所以怀疑是我目标太窄,网络把背景也学习了。
 老师让用pvanet检测一下看效果,结果虽然检出率和yolo2相当,但是定位很准,且通过剪裁放大检测图片,pvanet检出率明显提高,老师就说可以中止yolo2的学习了。结果这个窄目标检测不好的问题我一直没解决。如果有前辈路过能指点一下就很感激了TAT。
 yolo是我上手深度学习的地一个网络,我觉得它的性能没有掌握发挥好(因为用yolo2官网模型可以检测出视频里很小的罐头和鸟,可见小目标还是可以检测的),就停止学习转向pvanet,挺可惜的。但是darknet这个架构...
 之后借pvanet入手caffe,另做总结。
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章