linux 測試openvino樣例

一、測試openvino自身的demo

 cd  /app/intel/openvino/deployment_tools/inference_engine/samples

 sh  build_samples.sh

在 /app/inference_engine_samples_build 下生產多個樣例,在這測試object_detection_sample_ssd

cd /app/inference_engine_samples_build/intel64/Release

./object_detection_sample_ssd -i /app/openvino_test/dog.jpg -m /app/openvino_test/ssd_mobilenet_v2_coco_2018_03_29/FP32/frozen_inference_graph.xml -d CPU -l /app/inference_engine_samples_build/intel64/Release/lib/libcpu_extension.so

 

得到以下結果

在運行的過程中可能會出現下面錯誤

./object_detection_sample_ssd: error while loading shared libraries: libformat_reader.so: cannot open shared object file: No such file or directory

vi ~/.bashrc

在最後添加 export LD_LIBRARY_PATH=$/app/inference_engine_samples_build/intel64/Release/lib

二、測試自己創建的工程

  例如測試ssd_mobilenetV2模型

   1、編輯ssd_mobilenetV2_openvino.cpp,如下:

#include <inference_engine.hpp>
#include <reshape_ssd_extension.hpp>
#include <ext_list.hpp>
#include<chrono>
#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace InferenceEngine;
using namespace std;

template <typename T>
void matU8ToBlob(const cv::Mat& orig_image, InferenceEngine::Blob::Ptr& blob, int batchIndex = 0) {
	InferenceEngine::SizeVector blobSize = blob->getTensorDesc().getDims();
	const size_t width = blobSize[3];
	const size_t height = blobSize[2];
	const size_t channels = blobSize[1];
	T* blob_data = blob->buffer().as<T*>();

	cv::Mat resized_image(orig_image);
	if (width != orig_image.size().width || height != orig_image.size().height) {
		cv::resize(orig_image, resized_image, cv::Size(width, height));
	}

	// 耗時操作!!
	for (size_t h = 0; h < height; h++) {
		uchar* curr_row = resized_image.ptr<uchar>(h);
		for (size_t w = 0; w < width; w++) {
			for (size_t c = 0; c < channels; c++) {
				blob_data[c * width * height + h * width + w] = *curr_row++;
			}
		}
	}

}

void frametoBlob(const Mat &frame, InferRequest::Ptr &inferRequest, const std::string & inputName) {
	Blob::Ptr frameBlob = inferRequest->GetBlob(inputName);
	matU8ToBlob<uint8_t>(frame, frameBlob);
}
int main(int argc, char** argv)
{

	string xml = "/app/openvino_test/ssd_mobilenet_v2_coco_2018_03_29/FP32/frozen_inference_graph.xml";
	string bin = "/app/openvino_test/ssd_mobilenet_v2_coco_2018_03_29/FP32/frozen_inference_graph.bin";
	string input_file = "/app/openvino_test/demo.mp4";

	//namedWindow("frame", WINDOW_NORMAL);
	namedWindow("pedestrian detection", WINDOW_NORMAL);

	typedef std::chrono::duration<double, std::ratio<1, 1000>> ms;



	// 創建IE插件
	// --------------------------- 1. Load inference engine -------------------------------------
	Core ie;

	IExtensionPtr cpuExtension, inPlaceExtension;
	
	cpuExtension = std::make_shared<Extensions::Cpu::CpuExtensions>();
	inPlaceExtension = std::make_shared<InPlaceExtension>();
	ie.AddExtension(cpuExtension, "CPU");
	ie.AddExtension(inPlaceExtension, "CPU");


	// -----------------------------------------------------------------------------------------------------


	// 加載網絡
	CNNNetReader network_reader;
	network_reader.ReadNetwork(xml);
	network_reader.ReadWeights(bin);

	// 獲取輸入輸出
	auto network = network_reader.getNetwork();
	InferenceEngine::InputsDataMap input_info(network.getInputsInfo());
	InferenceEngine::OutputsDataMap output_info(network.getOutputsInfo());

	// 設置輸入輸出
	for (auto &item : input_info) {
		auto input_data = item.second;
		input_data->setPrecision(Precision::U8);
		input_data->setLayout(Layout::NCHW);
	}
	auto inputName = input_info.begin()->first;

	for (auto &item : output_info) {
		auto output_data = item.second;
		output_data->setPrecision(Precision::FP32);
	}

	// 創建可執行網絡
	auto exec_network = ie.LoadNetwork(network, "CPU");

	// 請求推斷
	auto infer_request_curr = exec_network.CreateInferRequestPtr();
	auto infer_request_next = exec_network.CreateInferRequestPtr();

	VideoCapture capture(input_file);
	Mat curr_frame, next_frame;
	capture.read(curr_frame);
	//image.copyTo(curr_frame);
	int image_width = curr_frame.cols;
	int image_height = curr_frame.rows;
	bool isLastFrame = false;
	bool isFirstFrame = true;
	frametoBlob(curr_frame, infer_request_curr, inputName);
	while (true) {
		//if (!capture.read(next_frame)) {

		//image.copyTo(next_frame);
		if (!capture.read(next_frame))
		{
			if (next_frame.empty())
			{
				isLastFrame = true;
			}
		}
		auto t0 = std::chrono::high_resolution_clock::now();
		if (!isLastFrame) {
			frametoBlob(next_frame, infer_request_next, inputName);
		}

		// 開啓異步執行模型
		if (isFirstFrame) {
			infer_request_curr->StartAsync();
			infer_request_next->StartAsync();
			isFirstFrame = false;
		}
		else {
			if (!isLastFrame) {
				infer_request_next->StartAsync();
			}
		}

		// 檢查返回數據
		if (OK == infer_request_curr->Wait(IInferRequest::WaitMode::RESULT_READY)) {
			auto output_name = output_info.begin()->first;
			auto output = infer_request_curr->GetBlob(output_name);
			const float* detection = static_cast<PrecisionTrait<Precision::FP32>::value_type*>(output->buffer());
			const SizeVector outputDims = output->getTensorDesc().getDims();
			const int rows = outputDims[2];
			const int object_size = outputDims[3];
			for (int row = 0; row < rows; row++) {
				float label = detection[row*object_size + 1];
				float confidence = detection[row*object_size + 2];
				float x_min = detection[row*object_size + 3] * image_width;
				float y_min = detection[row*object_size + 4] * image_height;
				float x_max = detection[row*object_size + 5] * image_width;
				float y_max = detection[row*object_size + 6] * image_height;
				if (confidence > 0.5) {
					Rect object_box((int)x_min, (int)y_min, (int)(x_max - x_min), (int(y_max - y_min)));
					rectangle(curr_frame, object_box, Scalar(0, 0, 255), 2, 8, 0);
				}
			}

			// 計算執行時間
			auto t1 = std::chrono::high_resolution_clock::now();
			ms dtime = std::chrono::duration_cast<ms>(t1 - t0);
			ostringstream ss;
			ss << "SSD MobilenetV2 detection  fps: " << std::fixed << std::setprecision(2) << 1000 / dtime.count() << "fps";
			//ss << "detection time : " << std::fixed << std::setprecision(2) << dtime.count() << " ms";
			printf("SSD MobilenetV2 detection  fps = %f \n", 1000 / dtime.count());
			putText(curr_frame, ss.str(), Point(50, 50), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(0, 0, 255), 2, 8);
		}
		imshow("pedestrian detection", curr_frame);
		char c = waitKey(2);
		if (c == 27) {
			break;
		}
		if (isLastFrame) {
			break;
		}

		// 異步交換
		next_frame.copyTo(curr_frame);
		infer_request_curr.swap(infer_request_next);
	}

	capture.release();
	destroyAllWindows();
	return 0;
}

2、編輯CMakeLists.txt

cmake_minimum_required(VERSION 3.12)
#項目名稱
project(ssd_mobilenetV2_demo)
#設置c++編譯器
set(CMAKE_CXX_STANDARD 11)

#項目中的include路徑
include_directories(
         /app/intel/openvino/deployment_tools/inference_engine/external/tbb/include
         /app/intel/openvino/opencv/include/opencv2
         /app/intel/openvino/deployment_tools/inference_engine/include
         /app/intel/openvino/deployment_tools/inference_engine/samples
         /app/intel/openvino/deployment_tools/inference_engine/src/extension
        )

#項目中lib路徑
link_directories(
      /app/intel/openvino/deployment_tools/inference_engine/lib/intel64
      /app/inference_engine_samples_build/intel64/Release/lib
      /usr/local/lib64
      /app/intel/openvino/deployment_tools/inference_engine/external/tbb/lib
     )

#動態庫路徑
link_libraries(
      /app/intel/openvino/opencv/lib/libopencv_core.so
      /app/intel/openvino/opencv/lib/libopencv_highgui.so
      /app/intel/openvino/opencv/lib/libopencv_imgcodecs.so
      /app/intel/openvino/opencv/lib/libopencv_videoio.so
      /app/intel/openvino/opencv/lib/libopencv_imgproc.so
      /app/intel/openvino/opencv/lib/libopencv_videoio_ffmpeg.so
      /app/inference_engine_samples_build/intel64/Release/lib/libcpu_extension.so
 /app/intel/openvino/deployment_tools/inference_engine/lib/intel64/libinference_engine.so  
)

add_executable(ssd_mobilenetV2_demo ssd_mobilenetV2_openvino.cpp)

3、將CMakeLists.txt 和ssd_mobilenetV2_openvino.cpp放在同一目錄下例如/app/openvino_test/ssd_demo/

   cd /app/openvino_test/ssd_demo/

   mkdir build

   cd build

   cmake ..

   make

  4、編譯成功的話,/app/openvino_test/ssd_demo/build路徑下有一個ssd_mobilenetV2_demo工程

    運行 ./ssd_mobilenetV2_demo

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章