dlib實現人臉識別(二)通過描述文件和標籤文件實現人臉識別

#include <iostream>
#include <fstream>
#include <dlib/dnn.h>
#include <dlib/data_io.h>
#include <dlib/image_processing.h>
#include <dlib/gui_widgets.h>
#include <dlib/dnn.h>
#include <dlib/gui_widgets.h>
#include <dlib/clustering.h>
#include <dlib/string.h>
#include <dlib/image_io.h>
#include <dlib/opencv.h>
#include <opencv2/highgui/highgui.hpp>
#include <dlib/image_processing/frontal_face_detector.h>
#include <dlib/image_processing/render_face_detections.h>
#include <dlib/image_processing.h>
#include <dlib/image_processing/frontal_face_detector.h>

#include <Eigen/Dense>
#include "timestramp.h"

using namespace std;
using namespace dlib;
using namespace cv;
using namespace Eigen;

// ----------------------------------------------------------------------------------------

template <long num_filters, typename SUBNET> using con5d = con<num_filters, 5, 5, 2, 2, SUBNET>;
template <long num_filters, typename SUBNET> using con5 = con<num_filters, 5, 5, 1, 1, SUBNET>;

template <typename SUBNET> using downsampler = relu<affine<con5d<32, relu<affine<con5d<32, relu<affine<con5d<16, SUBNET>>>>>>>>>;
template <typename SUBNET> using rcon5 = relu<affine<con5<45, SUBNET>>>;

using net_type = loss_mmod<con<1, 9, 9, 1, 1, rcon5<rcon5<rcon5<downsampler<input_rgb_image_pyramid<pyramid_down<6>>>>>>>>;


template <template <int, template<typename>class, int, typename> class block, int N, template<typename>class BN, typename SUBNET>
using residual = add_prev1<block<N, BN, 1, tag1<SUBNET>>>;

template <template <int, template<typename>class, int, typename> class block, int N, template<typename>class BN, typename SUBNET>
using residual_down = add_prev2<avg_pool<2, 2, 2, 2, skip1<tag2<block<N, BN, 2, tag1<SUBNET>>>>>>;

template <int N, template <typename> class BN, int stride, typename SUBNET>
using block = BN<con<N, 3, 3, 1, 1, relu<BN<con<N, 3, 3, stride, stride, SUBNET>>>>>;

template <int N, typename SUBNET> using ares = relu<residual<block, N, affine, SUBNET>>;
template <int N, typename SUBNET> using ares_down = relu<residual_down<block, N, affine, SUBNET>>;

template <typename SUBNET> using alevel0 = ares_down<256, SUBNET>;
template <typename SUBNET> using alevel1 = ares<256, ares<256, ares_down<256, SUBNET>>>;
template <typename SUBNET> using alevel2 = ares<128, ares<128, ares_down<128, SUBNET>>>;
template <typename SUBNET> using alevel3 = ares<64, ares<64, ares<64, ares_down<64, SUBNET>>>>;
template <typename SUBNET> using alevel4 = ares<32, ares<32, ares<32, SUBNET>>>;

using anet_type = loss_metric<fc_no_bias<128, avg_pool_everything<
	alevel0<
	alevel1<
	alevel2<
	alevel3<
	alevel4<
	max_pool<3, 3, 2, 2, relu<affine<con<32, 7, 7, 2, 2,
	input_rgb_image_sized<150>
	>>>>>>>>>>>>;

double GetMinDis(matrix<float, 0, 1> srcData)
{
	double dTmp = 0.0;
	for (int i = 0; i < 128; i++)
		dTmp += sqrt(pow(srcData(i), 2));

	return dTmp/10;
}

string FindNearestFaceData(matrix<float, 0, 1> srcData, std::vector<matrix<float, 0, 1>> allData, std::vector<string> arrLabel)
{
	double dMin = 0.0;
	int inx = 0;
	double dTresh = 0.40;
	for (int i = 0; i < allData.size(); i++)
	{
		double deler = GetMinDis(srcData - allData[i]);
		if (i == 0)
		{
			dMin = deler;
			inx = i;
		}
		else if(deler - dMin < 1e-7)
		{
			dMin = deler;
			inx = i;
		}

		cout << "Matrix:" << deler << endl;
	}

	if (dMin < dTresh)
	{
		return arrLabel[inx];
	}
	return "unknow";
}

int main(int argc, char* argv[])
{
	try
	{
		//創建人臉檢測對象
		net_type net;
		deserialize("..\\MakeFaceLabel\\mmod_human_face_detector.dat") >> net;

		//創建人臉特徵點檢測對象
		shape_predictor sp;
		deserialize("./../MakeFaceLabel/shape_predictor_68_face_landmarks.dat") >> sp;

		//創建人臉識別對象
		anet_type facerec;
		deserialize("./../MakeFaceLabel/dlib_face_recognition_resnet_model_v1.dat") >> facerec;

		//解析descriptors
		std::vector<matrix<float, 0, 1>> arrSerialize;
		deserialize("./../MakeFaceLabel/assigner.dat") >> arrSerialize;

		//解析標籤文件
		std::vector<string> arrLabel;
		deserialize("./../MakeFaceLabel/label.dat") >> arrLabel;

		 //cv::VideoCapture cap("E:\\code\\alcode\\dlib-19.15\\examples\\Webcam_pose\\video.mp4");
		//cv::VideoCapture cap("D:\\VSCodeProj\\FaceDetector\\video.mp4");
		cv::VideoCapture cap("D:\\CloudMusic\\MV\\lz_bks.mp4");
		//cv::VideoCapture cap(0);
		if (!cap.isOpened())
		{
			cerr << "Unable to connect to camera" << endl;
			return 1;
		}

		cv::Mat temp;
		image_window im_win;
		while (cap.read(temp))
		{
			timestramp ti;
			cv_image<bgr_pixel> cimg(temp);
			matrix<rgb_pixel> img;
			dlib::assign_image(img, cimg);

			auto dets = net(img);
			std::vector<matrix<rgb_pixel>> faces;
			for (auto&& d : dets)
			{
				// get the landmarks for this dog's face
				auto shape = sp(img, d.rect);
				matrix<rgb_pixel> face_chip;
				extract_image_chip(img, get_face_chip_details(shape, 150, 0.25), face_chip);
				faces.push_back(move(face_chip));
			}

			im_win.set_title("image");
			im_win.set_image(img);
			im_win.clear_overlay();

			if (faces.size() == 0)
			{
				cout << "No faces found in image!" << endl;
				continue;
			}

			{
				std::vector<matrix<float, 0, 1>> face_descriptors = facerec(faces);
				for (int i = 0; i < dets.size(); i++)
				{
					string strName = FindNearestFaceData(face_descriptors[i], arrSerialize, arrLabel);
					im_win.add_overlay(dets[i].rect, rgb_pixel(0, 255, 0), strName);
				}
			}
		}
	}
	catch (exception &e)
	{
		cout << e.what() << endl;
	}

	return 0;
}

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章