YOLOv3計算各類的AP及MAP

首先需要對測試集做批量測試,即需要將每個測試圖像輸入到模型中,得到測試結果。然後統計測試結果;

本文用的事darknet中valid接口函數,這裏valid可以作爲訓練時候,使用驗證集檢測模型訓練情況,這裏使用valid對訓練好的模型做測試;(即用來批量統計輸入測試圖像經過模型得到的結果)

看下源碼detector.c中run_detector函數中valid接口用法

具體用法:

./darknet detector valid cfg/voc.data cfg/yolov3-voc-6.cfg yolov3-voc-6_final.weights -out ""

注意voc.data 中vailde改成測試集路徑

數據集路徑格式

測試結果默認保存在當前路徑下的./results文件夾下,如果沒有,新建;

輸出測試圖像數

vim bicycle.txt

             008153  0.005640    231.401428  410.399536  375.000000  490.319397

             按列,分別爲:圖像名稱 | 置信度 | xmin,ymin,xmax,ymax

計算各類的MAP

python reval_voc_py3.py --year 2007 --classes data/coco-6.names --image_set test --voc_dir /home/nxt/xxx/darknet/VOCdevkit --output_dir results

部分輸出結果:

 resultsEvaluating detections

VOC07 metric? Yes

devkit_path= /home/nxt/xx/darknet/VOCdevkit , year =  2007

!!! cachefile =  /home/nxt/xxx/darknet/VOCdevkit/annotations_cache/annots.pkl

AP for bicycle = 0.8458

!!! cachefile =  /home/nxt/xxx/darknet/VOCdevkit/annotations_cache/annots.pkl

AP for bus = 0.8877

!!! cachefile =  /home/nxt/xxx/darknet/VOCdevkit/annotations_cache/annots.pkl

AP for car = 0.8566

.....

reval_voc_py3.py非官方計算方法,從google瞭解,官方使用的MATALB的工具箱計算法,需自行了解,此處代碼從github找到的,時間就有點忘,後期想起來,補從地址

# reval_voc_py3.py
# !/usr/bin/env python

# Adapt from ->
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# <- Written by Yaping Sun

"""Reval = re-eval. Re-evaluate saved detections."""

import os, sys, argparse
import numpy as np
import _pickle as cPickle
#import cPickle

from voc_eval_py3 import voc_eval

def parse_args():
    """
    Parse input arguments
    """
    parser = argparse.ArgumentParser(description='Re-evaluate results')
    parser.add_argument('output_dir', nargs=1, help='results directory',
                        type=str)
    parser.add_argument('--voc_dir', dest='voc_dir', default='data/VOCdevkit', type=str)
    parser.add_argument('--year', dest='year', default='2017', type=str)
    parser.add_argument('--image_set', dest='image_set', default='test', type=str)

    parser.add_argument('--classes', dest='class_file', default='data/voc.names', type=str)

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
    return args

def get_voc_results_file_template(image_set, out_dir = 'results'):
    #filename = 'comp4_det_' + image_set + '_{:s}.txt'
    filename = '{:s}.txt'
    path = os.path.join(out_dir, filename)
    return path

def do_python_eval(devkit_path, year, image_set, classes, output_dir = 'results'):
    annopath = os.path.join(
        devkit_path,
        'VOC' + year+'_test', # voc2007_test
        'Annotations',
        '{}.xml')
    imagesetfile = os.path.join(
        devkit_path,
        'VOC' + year+'_test',
        'ImageSets',
        'Main',
        image_set + '.txt')
    cachedir = os.path.join(devkit_path, 'annotations_cache')
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(year) < 2010 else False
    print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
    print('devkit_path=',devkit_path,', year = ',year)

    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for i, cls in enumerate(classes):
        if cls == '__background__':
            continue
        filename = get_voc_results_file_template(image_set).format(cls)
        rec, prec, ap = voc_eval(
            filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
            use_07_metric=use_07_metric)
        print('rec:', rec.shape)
        #np.savetxt('%s.txt',i, rec)
        print('prec:', prec.shape)
        #print(prec)
        aps += [ap]
        print('AP for {} = {:.4f}'.format(cls, ap))
        cls_prec = cls+'_prec' 
        np.savetxt(cls,rec)
        np.savetxt(cls_prec,prec)
        with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
            cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
    print('Mean AP = {:.4f}'.format(np.mean(aps)))
    print('~~~~~~~~')
    print('Results:')
    for ap in aps:
        print('{:.3f}'.format(ap))
    print('{:.3f}'.format(np.mean(aps)))
    print('~~~~~~~~')
    print('')
    print('--------------------------------------------------------------')
    print('Results computed with the **unofficial** Python eval code.')
    print('Results should be very close to the official MATLAB eval code.')
    print('-- Thanks, The Management')
    print('--------------------------------------------------------------')



if __name__ == '__main__':
    args = parse_args()  # input parameter

    output_dir = os.path.abspath(args.output_dir[0]) # output dir
    with open(args.class_file, 'r') as f:
        lines = f.readlines()

    classes = [t.strip('\n') for t in lines] # class names

    print('Evaluating detections')
    do_python_eval(args.voc_dir, args.year, args.image_set, classes, output_dir)

voc_eval.py代碼原味python2這裏我更爲python3版本

#!/usr/bin/env python

# Adapt from ->
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# <- Written by Yaping Sun

"""Reval = re-eval. Re-evaluate saved detections."""

import os, sys, argparse
import numpy as np
import _pickle as cPickle
#import cPickle

from voc_eval_py3 import voc_eval

def parse_args():
    """
    Parse input arguments
    """
    parser = argparse.ArgumentParser(description='Re-evaluate results')
    parser.add_argument('output_dir', nargs=1, help='results directory',
                        type=str)
    parser.add_argument('--voc_dir', dest='voc_dir', default='data/VOCdevkit', type=str)
    parser.add_argument('--year', dest='year', default='2017', type=str)
    parser.add_argument('--image_set', dest='image_set', default='test', type=str)

    parser.add_argument('--classes', dest='class_file', default='data/voc.names', type=str)

    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()
    return args

def get_voc_results_file_template(image_set, out_dir = 'results'):
    #filename = 'comp4_det_' + image_set + '_{:s}.txt'
    filename = '{:s}.txt'
    path = os.path.join(out_dir, filename)
    return path

def do_python_eval(devkit_path, year, image_set, classes, output_dir = 'results'):
    annopath = os.path.join(
        devkit_path,
        'VOC' + year+'_test', # voc2007_test
        'Annotations',
        '{}.xml')
    imagesetfile = os.path.join(
        devkit_path,
        'VOC' + year+'_test',
        'ImageSets',
        'Main',
        image_set + '.txt')
    cachedir = os.path.join(devkit_path, 'annotations_cache')
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(year) < 2010 else False
    print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
    print('devkit_path=',devkit_path,', year = ',year)

    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    for i, cls in enumerate(classes):
        if cls == '__background__':
            continue
        filename = get_voc_results_file_template(image_set).format(cls)
        rec, prec, ap = voc_eval(
            filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
            use_07_metric=use_07_metric)
        print('rec:', rec.shape)
        #np.savetxt('%s.txt',i, rec)
        print('prec:', prec.shape)
        #print(prec)
        aps += [ap]
        print('AP for {} = {:.4f}'.format(cls, ap))
        cls_prec = cls+'_prec' 
        np.savetxt(cls,rec)
        np.savetxt(cls_prec,prec)
        with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
            cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
    print('Mean AP = {:.4f}'.format(np.mean(aps)))
    print('~~~~~~~~')
    print('Results:')
    for ap in aps:
        print('{:.3f}'.format(ap))
    print('{:.3f}'.format(np.mean(aps)))
    print('~~~~~~~~')
    print('')
    print('--------------------------------------------------------------')
    print('Results computed with the **unofficial** Python eval code.')
    print('Results should be very close to the official MATLAB eval code.')
    print('-- Thanks, The Management')
    print('--------------------------------------------------------------')



if __name__ == '__main__':
    args = parse_args()  # input parameter

    output_dir = os.path.abspath(args.output_dir[0]) # output dir
    with open(args.class_file, 'r') as f:
        lines = f.readlines()

    classes = [t.strip('\n') for t in lines] # class names

    print('Evaluating detections')
    do_python_eval(args.voc_dir, args.year, args.image_set, classes, output_dir)

 

本人略菜,有問題請指出;

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章