CPU方案:Libtorch、OpenCV-DNN、OpenVINO、ONNX(有個runtime可以調)
GPU方案:TensorRT、OpenCV-DNN(需要重新編譯,帶上CUDA)
import os
import torch
from model import U2NET # full size version 173.6 MB
def main():
model_name = 'u2net'
model_dir = os.path.join(os.getcwd(), 'saved_models', model_name , model_name + '.pth')
print("................................................")
print(model_dir)
print("................................................")
if model_name == 'u2net':
print("...load U2NET---173.6 MB")
net = U2NET(3, 1)
net.load_state_dict(torch.load(model_dir, map_location=torch.device('cpu')))
net.eval()
# --------- model 序列化 ---------
#example = torch.zeros(1, 3, 512, 512).to(device='cuda')
example = torch.zeros(1, 3, 512, 512)
torch_script_module = torch.jit.trace(net, example)
torch_script_module.save('human2-cpu.pt')
print('over')
if __name__ == "__main__":
main()
torch::Tensor normPRED(torch::Tensor d)
{
at::Tensor ma, mi;
torch::Tensor dn;
ma = torch::max(d);
mi = torch::min(d);
dn = (d - mi) / (ma - mi);
return dn;
}
void bgr_u2net(cv::Mat& image_src, cv::Mat& result, torch::jit::Module& model){ //1.模型已經導入 auto device = torch::Device("cpu"); //2.輸入圖片,變換到320 cv::Mat image_src1 = image_src.clone(); cv::resize(image_src, image_src, cv::Size(320, 320)); cv::cvtColor(image_src, image_src, cv::COLOR_BGR2RGB); // 3.圖像轉換爲Tensor torch::Tensor tensor_image_src = torch::from_blob(image_src.data, { image_src.rows, image_src.cols, 3 }, torch::kByte); torch::Tensor tensor_bgr = torch::from_blob(image_src1.data, { image_src1.rows, image_src1.cols,3 }, torch::kByte); tensor_image_src = tensor_image_src.permute({ 2,0,1 }); // RGB -> BGR互換 tensor_image_src = tensor_image_src.toType(torch::kFloat); tensor_image_src = tensor_image_src.div(255); tensor_image_src = tensor_image_src.unsqueeze(0); // 拿掉第一個維度 [3, 320, 320] std::cout << tensor_image_src.sizes() << std::endl; // [1, 3, 320, 320] //同樣方法處理 tensor_bgr = tensor_bgr.permute({ 2,0,1 }); tensor_bgr = tensor_bgr.toType(torch::kFloat); tensor_bgr = tensor_bgr.div(255); tensor_bgr = tensor_bgr.unsqueeze(0); //4.網絡前向計算 auto src = tensor_image_src.to(device); auto outputs = model.forward({ src }).toTuple()->elements(); auto pred = outputs[0].toTensor(); auto res_tensor = (pred * torch::ones_like(src)); std::cout << torch::ones_like(src).sizes() << std::endl; std::cout << src.sizes() << std::endl; res_tensor = normPRED(res_tensor); res_tensor = res_tensor.squeeze(0).detach(); res_tensor = res_tensor.mul(255).clamp(0, 255).to(torch::kU8); //mul函數,表示張量中每個元素乘與一個數,clamp表示夾緊,限制在一個範圍內輸出 res_tensor = res_tensor.to(torch::kCPU); //5.輸出最終結果 cv::Mat resultImg(res_tensor.size(1), res_tensor.size(2), CV_8UC3); std::memcpy((void*)resultImg.data, res_tensor.data_ptr(), sizeof(torch::kU8) * res_tensor.numel()); cv::resize(resultImg, resultImg, cv::Size(image_src1.cols, image_src1.rows), cv::INTER_LINEAR); result = resultImg.clone();}
int main(){ cv::Mat srcImg = cv::imread("e:/template/people2.jpg"); cv::Mat srcImg_; cv::resize(srcImg, srcImg_, cv::Size(512, 512)); if (srcImg_.channels() == 4) cv::cvtColor(srcImg_, srcImg_, cv::COLOR_BGRA2BGR); std::string strModelPath = "e:/template/human2-cpu.pt"; // load model of cpu torch::jit::script::Module styleModule; // load style model auto device_type = at::kCPU; if (torch::cuda::is_available()) { std::cout << "gpu" << std::endl; device_type = at::kCUDA; } try { styleModule = torch::jit::load(strModelPath); styleModule.to(device_type); } catch (const c10::Error& e) { std::cerr << "errir code: -2, error loading the model\n"; return -1; } cv::Mat dstImg; bgr_u2net(srcImg_, dstImg, styleModule); cv::imshow("dstImg", dstImg); cv::waitKey(0); return 1;}
//大小統一,獲得模板
cv::resize(dstImg, dstImg, srcImg.size());
cv::Mat backgroundImg, forgroundImg,result, mask;
cv::cvtColor(dstImg, mask, cv::COLOR_BGR2GRAY);
cv::threshold(mask, mask,100,255, cv::THRESH_BINARY);
//前背景分離
srcImg.copyTo(forgroundImg, mask);
cv::bitwise_not(mask, mask);
srcImg.copyTo(backgroundImg, mask);
//處理後合併
cv::cvtColor(backgroundImg, backgroundImg, cv::COLOR_BGR2GRAY);
cv::cvtColor(backgroundImg, backgroundImg, cv::COLOR_GRAY2BGR);
result = backgroundImg + forgroundImg;
cv::imshow("mask", mask);
cv::imshow("forgroundImg", forgroundImg);
cv::imshow("backgroundImg", backgroundImg);
cv::imshow("result", result);