opencv dnn調用深度學習模型分類及目標檢測代碼

本文opencv版本3.3(3.3以上版本支持dnn)

經過測試opencv調用深度學習比caffe提供的C++接口效率還要高一些

分類及目標檢測代碼如下:
#include <string>

#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
#include <iostream>

#include "opencv/cv.h"
#include "opencv/highgui.h"
#include "opencv/cvwimage.h"
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/dnn.hpp>
#include <opencv2/dnn/shape_utils.hpp>
#include <opencv2/core/utils/trace.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/core/utility.hpp>

using namespace cv;
using namespace cv::dnn;

const size_t width = 300;
const size_t height = 300;

//尋找出概率最高的一類
static void getMaxClass(const Mat &probBlob, int *classId, double *classProb)
{
    Mat probMat = probBlob.reshape(1, 1);
    Point classNumber;

    minMaxLoc(probMat, NULL, classProb, NULL, &classNumber);
    *classId = classNumber.x;
}
//從標籤文件讀取分類 空格爲標誌
static std::vector<String> readClassNames(const char *filename = "labels.txt")
{
    std::vector<String> classNames;

    std::ifstream fp(filename);
    if (!fp.is_open())
    {
        std::cerr << "File with classes labels not found: " << filename << std::endl;
        exit(-1);
    }

    std::string name;
    while (!fp.eof())
    {
        std::getline(fp, name);
        if (name.length())
            classNames.push_back(name.substr(name.find(' ') + 1));
    }
    fp.close();
    return classNames;
}

static std::string classification(String modelTxt, String modelBin,String imageFile,const char *lablesFile,cv::Rect tRect)
{
    //初始化
    CV_TRACE_FUNCTION();
    //讀取模型參數和模型結構文件
    //modelTxt = "caffenet.prototxt";
    //modelBin = "caffenet.caffemodel";
    //讀取圖片
    //imageFile = "test0.jpg";

    //合成網絡
    Net net = dnn::readNetFromCaffe(modelTxt, modelBin);
    //判斷網絡是否生成成功
    if (net.empty())
    {
        LOGE("Can't load network by using the following files: ");
        return NULL;
    }
    LOGE("net read successfully");

    //讀取圖片
    Mat img1 = imread(imageFile);
    Mat img = img1 (cv::Rect(tRect.x, tRect.y, tRect.width, tRect.height));

    //imshow("image", img);
    if (img.empty())
    {
        std::cerr << "Can't read image from the file: " << imageFile << std::endl;
        return NULL;
    }
    LOGE("image read sucessfully");

    /*  Mat inputBlob = blobFromImage(img, 1, Size(224, 224),
                                 Scalar(104, 117, 123)); */

    //構造blob,爲傳入網絡做準備,圖片不能直接進入網絡
    Mat inputBlob = blobFromImage(img, 1, Size(224, 224));

    Mat prob;
    cv::TickMeter t;
    for (int i = 0; i < 10; i++)//此處可以只循環一次
    {
        CV_TRACE_REGION("forward");

        //將構建的blob傳入網絡data層
        net.setInput(inputBlob,"data");
        //計時
        t.start();
        //前向預測
        prob = net.forward("softmax");
        //停止計時
        t.stop();
     }

    int classId;
    double classProb;
    //找出最高的概率ID存儲在classId,對應的標籤在classProb中
    getMaxClass(prob, &classId, &classProb);

    //打印出結果
    std::vector<String> classNames = readClassNames(lablesFile);
    LOGE("Best class: #%d",classId);
    LOGE("class name %s",classNames.at(classId).c_str());
    LOGE("Probability: %d",(int)classProb * 100);
     //打印出花費時間
    LOGE("Time: %f ms",(double)t.getTimeMilli() / t.getCounter());

    return classNames.at(classId).c_str();

}

static Mat getMean(const size_t& imageHeight, const size_t& imageWidth)
{
    Mat mean;

    const int meanValues[3] = {104, 117, 123};
    std::vector<Mat> meanChannels;
    for(int i = 0; i < 3; i++)
    {
        Mat channel((int)imageHeight, (int)imageWidth, CV_32F, Scalar(meanValues[i]));
        meanChannels.push_back(channel);
    }
    cv::merge(meanChannels, mean);
    return mean;
}

static Mat preprocess(const Mat& frame)
{
    Mat preprocessed;
    frame.convertTo(preprocessed, CV_32F);
    resize(preprocessed, preprocessed, Size(width, height)); //SSD accepts 300x300 RGB-images

    Mat mean = getMean(width, height);
    cv::subtract(preprocessed, mean, preprocessed);

    return preprocessed;
}

static cv::Rect ssdCheck(String modelTxt, String modelBinary,String imageFile)
{
    cv::Rect dstObject(0,0,0,0);
    //讀取圖片
    Mat srcImage = imread(imageFile);

    Ptr<dnn::Importer> importer;

    try {
        importer = dnn::createCaffeImporter(modelTxt,modelBinary);
    }
    catch (const cv::Exception &err) {
        LOGE(" import caffe model error");
    }

    if (!importer)
    {
        LOGE("Can't load network by using the following files:");
        LOGE("/mnt/sdcard/ssd.caffemodel /mnt/sdcard/ssd.prototxt");

        return dstObject;
    }

    //! [Initialize network]
    dnn::Net net;
    importer->populateNet(net);
    importer.release();          //We don't need importer anymore
    //! [Initialize network]

    cv::Mat frame = srcImage;

    if (frame.channels() == 4)
        cvtColor(frame, frame, COLOR_BGRA2BGR);
    //! [Prepare blob]
    Mat preprocessedFrame = preprocess(frame);

    Mat inputBlob = blobFromImage(preprocessedFrame); //Convert Mat to batch of images
    //! [Prepare blob]

    //! [Set input blob]
    net.setInput(inputBlob, "data");                //set the network input
    //! [Set input blob]

 

    TickMeter tm;
    tm.start();

    //! [Make forward pass]
    Mat detection = net.forward("detection_out");                                  //compute output
    //! [Make forward pass]

    tm.stop();
    LOGE("process time %f",tm.getTimeMilli());

 

    Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());

    float confidenceThreshold = 0.5;
    for(int i = 0; i < detectionMat.rows; i++)
    {
        float confidence = detectionMat.at<float>(i, 2);

        if(confidence > confidenceThreshold)
        {
            size_t objectClass = (size_t)(detectionMat.at<float>(i, 1));

            float xLeftBottom = detectionMat.at<float>(i, 3) * frame.cols;
            float yLeftBottom = detectionMat.at<float>(i, 4) * frame.rows;
            float xRightTop = detectionMat.at<float>(i, 5) * frame.cols;
            float yRightTop = detectionMat.at<float>(i, 6) * frame.rows;

//            std::cout << " " << xLeftBottom
//            << " " << yLeftBottom
//            << " " << xRightTop
//            << " " << yRightTop << std::endl;
            dstObject.x=(int)xLeftBottom;
            dstObject.y=(int)yLeftBottom;
            dstObject.width=(int)(xRightTop - xLeftBottom);
            dstObject.height=(int)(yRightTop - yLeftBottom);
        }
    }

    return dstObject;

}

JNIEXPORT jlong JNICALL Java_com_deepTest
        (JNIEnv *env, jobject obj, jlong srcImg, jlong dstImg) {
    Mat *srcImage = (Mat *) srcImg;
    Mat *dstImage = (Mat *) dstImg;

    String modelConfiguration = "/mnt/sdcard/ssd.prototxt";
    String modelBinary = "/mnt/sdcard/ssd.caffemodel";
    Ptr<dnn::Importer> importer;

    try {
        importer = dnn::createCaffeImporter(modelConfiguration,modelBinary);
    }
    catch (const cv::Exception &err) {
        LOGE("import caffe model error");
    }

    if (!importer)
    {
        LOGE("Can't load network by using the following files:");
        LOGE("/mnt/sdcard/ssd.caffemodel /mnt/sdcard/ssd.prototxt");

        return (jlong)srcImage;
    }

    //! [Initialize network]
    dnn::Net net;
    importer->populateNet(net);
    importer.release();          //We don't need importer anymore
    //! [Initialize network]

    cv::Mat frame = *srcImage;

    if (frame.channels() == 4)
        cvtColor(frame, frame, COLOR_BGRA2BGR);
    //! [Prepare blob]
    Mat preprocessedFrame = preprocess(frame);

    Mat inputBlob = blobFromImage(preprocessedFrame); //Convert Mat to batch of images
    //! [Prepare blob]

    //! [Set input blob]
    net.setInput(inputBlob, "data");                //set the network input
    //! [Set input blob]

 

    TickMeter tm;
    tm.start();

    //! [Make forward pass]
    Mat detection = net.forward("detection_out");                                  //compute output
    //! [Make forward pass]

    tm.stop();
    LOGE("process time %f",tm.getTimeMilli());

 

    Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());

    float confidenceThreshold = 0.5;
    for(int i = 0; i < detectionMat.rows; i++)
    {
        float confidence = detectionMat.at<float>(i, 2);

        if(confidence > confidenceThreshold)
        {
            size_t objectClass = (size_t)(detectionMat.at<float>(i, 1));

            float xLeftBottom = detectionMat.at<float>(i, 3) * frame.cols;
            float yLeftBottom = detectionMat.at<float>(i, 4) * frame.rows;
            float xRightTop = detectionMat.at<float>(i, 5) * frame.cols;
            float yRightTop = detectionMat.at<float>(i, 6) * frame.rows;

//            std::cout << " " << xLeftBottom
//            << " " << yLeftBottom
//            << " " << xRightTop
//            << " " << yRightTop << std::endl;

            Rect object((int)xLeftBottom, (int)yLeftBottom,
                        (int)(xRightTop - xLeftBottom),
                        (int)(yRightTop - yLeftBottom));

            rectangle(frame, object, Scalar(0, 255, 0));
        }
    }

    frame.copyTo(*dstImage);
    return (jlong)dstImage;
}

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章