在使用神經網絡進行視頻的結構化的時候,當視頻流的個數足夠大的時候,需要使用隊列來起到緩衝的作用。下面的代碼就是我實現的,在SSD模型檢測時,視頻流的個數多的時候,視頻流產生數據速度遠遠高於檢測圖片的速度,所以需要隊列
#include <caffe/caffe.hpp>
#ifdef USE_OPENCV
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#endif
#include <algorithm>
#include <iomanip>
#include <iosfwd>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <iostream>
#include <caffe/layers/my_video_data_layer.h>
#include <caffe/util/bbox_util.hpp>
#include <csignal>
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/split.hpp>
#include <sys/stat.h>
#include <dirent.h>
#include <uuid/uuid.h>
#include <time.h>
using namespace std;
#ifdef USE_OPENCV
using namespace caffe;
//初始化視頻參數結構體
struct thread_data{
int thread_id;
string message;
MyQueue<queue_data> *myQueue;
};
class Detector{
public:
Detector(const string& model_file,
const string& weights_file,
const string& mean_value);
std::vector<vector<float> > Detect(const cv::Mat& img);
private:
void SetMean(const string& mean_value);
void WrapInputLayer(std::vector<cv::Mat>* input_channels);
void Preprocess(const cv::Mat& img,
std::vector<cv::Mat>* input_channels);
shared_ptr<Net<float> > net_;
cv::Size input_geometry_;
int num_channels_;
cv::Mat mean_;
};
DEFINE_string(gpu, "0",
"Optional; run in GPU mode on given device IDs separated by ','."
"Use '-gpu all' to run on all available GPUs. The effective training "
"batch size is multiplied by the number of devices.");
DEFINE_string(mean_value, "104,117,123",
"If specified, can be one value or can be same as image channels"
" - would subtract from the corresponding channel). Separated by ','."
"Either mean_file or mean_value should be provided, not both.");
DEFINE_double(confidence_threshold, 0.3,
"Only store detections with score higher than the threshold.");
DEFINE_string(rtsp_file, "list.txt",
"If provided, store the detection results in the rtsp_file.");
//獲取可用的GPU設備
static void get_gpus(vector<int>* gpus){
if (FLAGS_gpu == "all"){
int count = 0;
#ifndef CPU_ONLY
CUDA_CHECK(cudaGetDeviceCount(&count));
#else
NO_GPU;
#endif
for (int i = 0; i < count; ++i){
gpus->push_back(i);
}
}else if (FLAGS_gpu.size()){
vector<string> strings;
boost::split(strings, FLAGS_gpu, boost::is_any_of(","));
for (int i = 0; i < strings.size();++i){
gpus->push_back(boost::lexical_cast<int>(strings[i]));
}
}else{
CHECK_EQ(gpus->size(), 0);
}
}
Detector::Detector(const string &model_file, const string &weights_file,
const string &mean_value) {
//設置GPU的編號和模型
vector<int> gpus;
get_gpus(&gpus);
if (gpus.size() != 0){
LOG(INFO) << "Use GPU with device ID " << gpus[0];
#ifndef CPU_ONLY
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, gpus[0]);
LOG(INFO) << "GPU device name: " << device_prop.name;
#endif
Caffe::SetDevice(gpus[0]);
Caffe::set_mode(Caffe::GPU);
} else {
LOG(INFO) << "Use CPU.";
Caffe::set_mode(Caffe::CPU);
}
//加載網絡
net_.reset(new Net<float >(model_file, TEST));
net_->CopyTrainedLayersFrom(weights_file);
CHECK_EQ(net_->num_inputs(), 1) << "Network should have exactly one input.";
CHECK_EQ(net_->num_outputs(), 1) << "Netwarok should have exactly one output.";
Blob<float>* input_layer = net_->input_blobs()[0];
num_channels_ = input_layer->channels();
CHECK(num_channels_ == 3 || num_channels_ == 1)
<< "Input layer should have 1 or 3 channels.";
input_geometry_ = cv::Size(input_layer->width(), input_layer->height());
SetMean(mean_value);
}
//物體檢測
std::vector<vector<float> > Detector::Detect(const cv::Mat &img) {
Blob<float >* input_layer = net_->input_blobs()[0];
input_layer->Reshape(1, num_channels_,
input_geometry_.height, input_geometry_.width);
net_->Reshape();
std::vector<cv::Mat> input_channels;
WrapInputLayer(&input_channels);
Preprocess(img, &input_channels);
net_->Forward();
Blob<float >* result_blob = net_->output_blobs()[0];
const float* result = result_blob->cpu_data();
const int num_det = result_blob->height();
vector<vector<float> > detections;
for (int k = 0; k < num_det; ++k){
if (result[0] == -1){
result += 7;
continue;
}
vector<float > detection(result, result + 7);
detections.push_back(detection);
result += 7;
}
return detections;
}
//設置均值
void Detector::SetMean(const string &mean_value) {
cv::Scalar channel_mean;
if (!mean_value.empty()){
stringstream ss(mean_value);
vector<float > values;
string item;
while (getline(ss, item, ',')){
float value = std::atof(item.c_str());
values.push_back(value);
}
CHECK(values.size() == 1 || values.size() == num_channels_) <<
"Specify either 1 mean_value or as many as channels: "<< num_channels_;
std::vector<cv::Mat> channels;
for (int i = 0; i < num_channels_; ++i){
cv::Mat channel(input_geometry_.height, input_geometry_.width, CV_32FC1,
cv::Scalar(values[i]));
channels.push_back(channel);
}
cv::merge(channels, mean_);
}
}
//將網絡的輸入層包裝成單獨的cv::Mat對象(每個通道一個)。
// 通過這種方式,我們保存了一個memcpy操作,並且不需要依賴cudaMemcpy2D。最後的預處理操作將把單獨的通道直接寫入輸入層。
void Detector::WrapInputLayer(std::vector<cv::Mat> *input_channels) {
Blob<float>* input_layer = net_->input_blobs()[0];
int width = input_layer->width();
int height = input_layer->height();
float* input_data = input_layer->mutable_cpu_data();
for (int i = 0; i < input_layer->channels();++i){
cv::Mat channel(height,width, CV_32FC1, input_data);
input_channels->push_back(channel);
input_data += width * height;
}
}
void Detector::Preprocess(const cv::Mat &img, std::vector<cv::Mat> *input_channels) {
cv::Mat sample;
if (img.channels() == 3 && num_channels_ == 1)
cv::cvtColor(img, sample, cv::COLOR_BGR2GRAY);
else if (img.channels() == 4 && num_channels_ == 1)
cv::cvtColor(img, sample, cv::COLOR_BGRA2GRAY);
else if (img.channels() == 4 && num_channels_ == 3)
cv::cvtColor(img, sample, cv::COLOR_BGRA2BGR);
else if (img.channels() == 1 && num_channels_ == 3)
cv::cvtColor(img, sample, cv::COLOR_GRAY2BGR);
else
sample = img;
cv::Mat sample_resized;
if (sample.size() != input_geometry_)
cv::resize(sample, sample_resized, input_geometry_);
else
sample_resized = sample;
cv::Mat sample_float;
if (num_channels_ == 3)
sample_resized.convertTo(sample_float, CV_32FC3);
else
sample_resized.convertTo(sample_float, CV_32FC1);
cv::Mat sample_normalized;
cv::subtract(sample_float, mean_, sample_normalized);
//該操作將把單獨的BGR平面直接寫入網絡的輸入層,因爲它由input_channels中的cv::Mat對象包裝。
cv::split(sample_normalized, *input_channels);
CHECK(reinterpret_cast<float*>(input_channels->at(0).data)
== net_->input_blobs()[0]->cpu_data())
<<"Input channels are not wrapping the input layer of the network.";
}
//初始化視頻和解碼視頻
void videoDecode(void *threaddata){
struct thread_data *my_data;
my_data = (struct thread_data *) threaddata;
int result;
Video *video1 = new Video((my_data->message).c_str(),0);
result = video1->Init();
//大於0就是初始化成功
if (result >= 0 ){
int ret;
queue_data queueData;
cv::Mat img;
while(1){
ret= video1->Decode(img);
if (ret == 1){
queueData.thread_id = my_data->thread_id;
queueData.img = img;
my_data->myQueue->push(queueData);
}
}
}
else if (result < 0){
video1->~Video();
cout << "監聽視頻失敗:"<<my_data->message<<endl;
return;
}
}
//啓動多線程倆初始化視頻參數
void queue_video(MyQueue<queue_data> *myQueue_){
std::ifstream infile(FLAGS_rtsp_file.c_str());
std::string file;
boost::thread_group tg;
list<shared_ptr<thread_data> > list_td;
int i = 0;
while (infile >> file) {
thread_data *td = new thread_data;
//輪流啓動一個個rtsp的線程
td->thread_id = i;
td->message =file;
td->myQueue = myQueue_;
tg.create_thread(boost::bind(&videoDecode,td));
list_td.push_back(shared_ptr<thread_data>(td));
++i;
}
tg.join_all();
}
//初始化模型參數和檢測圖片
void detectImg(MyQueue<queue_data> *myQueue_){
// Initialize the network.
const string& model_file = "/models/SSD_300x300/deploy.prototxt";
const string& weights_file = "/models/SSD_300x300_ft_video/VGG_VOC0712Plus_SSD_300x300_ft_iter_160000.caffemodel";
const string& mean_value = FLAGS_mean_value;
const float confidence_threshold = FLAGS_confidence_threshold;
Detector detector(model_file, weights_file, mean_value);
cv::Mat img;
vector<vector<float> > previous_detections;
vector<vector<float> > current_detections;
vector<vector<float> > last_detections;
map<int,vector<vector<float> > > mymap;
map<int,vector<vector<float> > >::iterator mapIterator;
const vector<cv::Scalar>& colors = GetColors(21);
char buffer[50];
double scale = 1;
int thickness = 2;
int fontface = cv::FONT_HERSHEY_SIMPLEX;
int baseline = 0;
while(true){
queue_data queueData = myQueue_->pop();
img = queueData.img;
CHECK(!img.empty()) << "Error when read frame";
std::vector<vector<float> > detections = detector.Detect(img);
vector<vector<float> >().swap(current_detections);
for (int i = 0; i < detections.size(); ++i) {
const vector<float>& d = detections[i];
// Detection format: [image_id, label, score, xmin, ymin, xmax, ymax].
CHECK_EQ(d.size(), 7);
const float score = d[2];
if (score >= confidence_threshold) {
//data format:[label, score, xmin, ymin, xmax, ymax];
//保存圖片中的檢測物體
vector<float> data(6);
data[0] = d[1];
data[1] = score;
data[2] = d[3];
data[3] = d[4];
data[4] = d[5];
data[5] = d[6];
}
}
vector<vector<float> >().swap(previous_detections);
mapIterator = mymap.find(queueData.thread_id);
if (mapIterator != mymap.end()){
previous_detections = mymap.find(queueData.thread_id)->second;
last_detections = excludeFixedObjects(&previous_detections,current_detections);
mymap[queueData.thread_id] = previous_detections;
}
else {
last_detections = current_detections;
mymap[queueData.thread_id] = last_detections;
}
//循環將最後的結果展示出來
char* cstr = new char[120];
char* cstr1 = new char[120];
char buf[1024];
for (int i = 0; i < last_detections.size(); ++i) {
const vector<float>& bboxes = last_detections[i];
const float label = bboxes[0];
const int width = img.cols;
const int height = img.rows;
const cv::Scalar& color = colors[bboxes[1]];
//data format:[label, score, xmin, ymin, xmax, ymax];
bbox1.set_xmin(bboxes[2]);
bbox1.set_ymin(bboxes[3]);
bbox1.set_xmax(bboxes[4]);
bbox1.set_ymax(bboxes[5]);
float bbox1_size = BBoxSize(bbox1) * width * height;
//刪除小面積的物體
if (bbox1_size < 4000){
continue;
}
int xmin = (bboxes[2]* width);
int ymin = (bboxes[3]* height);
int xmax = (bboxes[4]* width);
int ymax = (bboxes[5]* height);
cv::Point top_left_pt(xmin, ymin);
cv::Point bottom_right_pt(xmax, ymax);
cv::rectangle(img, top_left_pt, bottom_right_pt, color, 4);
cv::Point bottom_left_pt(xmin, ymax);
snprintf(buffer, sizeof(buffer), "%f", bboxes[0]);
cv::Size text = cv::getTextSize(buffer, fontface, scale, thickness,
&baseline);
cv::rectangle(
img, bottom_left_pt + cv::Point(0, 0),
bottom_left_pt + cv::Point(text.width, -text.height-baseline),
color, CV_FILLED);
cv::putText(img, buffer, bottom_left_pt - cv::Point(0, baseline),
fontface, scale, CV_RGB(0, 0, 0), thickness, 8);
}
stringstream stream;
stream<<queueData.thread_id;
cv::imshow(stream.str(), img);
if (cv::waitKey(1) == 27) {
return;
}
}
}
//啓動兩個線程,一個用來處理視頻數據,一個用來跑網絡
void startThread(MyQueue<queue_data> myQueue_){
boost::thread thrd(boost::bind(&queue_video,&myQueue_));
boost::thread thrd1(boost::bind(&detectImg,&myQueue_));
thrd.join();
thrd1.join();
}
int main(int argc, char** argv){
gflags::ParseCommandLineFlags(&argc, &argv, true);
//存儲隊列的數據
MyQueue<queue_data> myQueue_;
//啓動準換數據的線程和模型初始化線程
startThread(myQueue_);
return 0;
}
#endif // USE_OPENCV