tensorflow c++加載checkpoint model.meta model.index報錯解決及轉cv::Mat爲Tensor

在上一篇中已經成功用tensorflow C++測試過好幾個例子,說明tensorflow c++安裝編譯運行已經沒有問題。

這次不是加載.pb模型而是要加載如下圖所示的模型:

這是python版本:

import tensorflow as tf
import  numpy as np
from skimage import io, transform
import cv2
sess = tf.Session()
new_saver = tf.train.import_meta_graph(r'D:\CNN_test_result_sizhuangzao_model\v2_101_ziji_9\model\model.meta')
new_saver.restore(sess,tf.train.latest_checkpoint(r'D:\CNN_test_result_sizhuangzao_model\v2_101_ziji_9\model'))

graph = tf.get_default_graph()
input_x = graph.get_tensor_by_name("input:0")
is_training_x=graph.get_tensor_by_name("is_training:0")
#print (sess.run("input:0"))

jpg_path=r"D:\CNN_test_result_sizhuangzao\chaodacesiji1\pos.txt"
file = open(jpg_path)
lines = file.readlines()
num=0
for line in lines: 
    img0 = io.imread(line.strip())
    l=img0.shape
    k=l[0]
    c=l[1]
    num+=1
    print(num)
    if  c/k<5:
        continue
    img = transform.resize(img0, (48,160, 1))
    feed_dict={input_x:np.reshape(img, [-1, 48, 160, 1]),is_training_x:False}

    prob_op = graph.get_operation_by_name('output')
    out_softmax = graph.get_tensor_by_name("output:0")
    img_out_softmax = sess.run(out_softmax,feed_dict)
    prediction_labels = np.argmax(img_out_softmax,1)

同事說在python下可以正常運行預測。

然後我在C++下是仿照

https://blog.csdn.net/seniusen/article/details/82972734 

這個例子寫的:

#include "tensorflow/core/framework/graph.pb.h"
#include <tensorflow/core/public/session_options.h>
#include <tensorflow/core/protobuf/meta_graph.pb.h>
#include <fstream>
#include <utility>
#include <vector>
#include <Eigen/Core>
#include <Eigen/Dense>

#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"

using namespace std;
using namespace tensorflow;
using namespace tensorflow::ops;
using tensorflow::Flag;
using tensorflow::Tensor;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::int32;

typedef std::vector<std::pair<std::string, tensorflow::Tensor>> tensor_dict;
using tensorflow::Status;

static Status ReadtheFile(tensorflow::Env* env, const string& filename,Tensor* output) {
  tensorflow::uint64 file_size = 0;
  TF_RETURN_IF_ERROR(env->GetFileSize(filename, &file_size));

  string contents;
  contents.resize(file_size);

  std::unique_ptr<tensorflow::RandomAccessFile> file;
  TF_RETURN_IF_ERROR(env->NewRandomAccessFile(filename, &file));

  tensorflow::StringPiece data;
  TF_RETURN_IF_ERROR(file->Read(0, file_size, &data, &(contents)[0]));
  if (data.size() != file_size) {
    return tensorflow::errors::DataLoss("Truncated read of '", filename,
                                        "' expected ", file_size, " got ",
                                        data.size());
  }
//  output->scalar<string>()() = data.ToString();
  output->scalar<string>()() = string(data);
  return Status::OK();
}

Status ReadImageFile(const string& file_name, const int input_height,
                               const int input_width, const float input_mean,
                               const float input_std,
                               std::vector<Tensor>* out_tensors) {
  auto root = tensorflow::Scope::NewRootScope();
  using namespace ::tensorflow::ops;

  string input_name = "file_reader";
  string output_name = "normalized";

  // read file_name into a tensor named input
  Tensor input(tensorflow::DT_STRING, tensorflow::TensorShape());
  TF_RETURN_IF_ERROR(ReadtheFile(tensorflow::Env::Default(), file_name, &input));

  // use a placeholder to read input data
  auto file_reader =Placeholder(root.WithOpName("input"), tensorflow::DataType::DT_STRING);

  std::vector<std::pair<string, tensorflow::Tensor>> inputs = {{"input", input},};

  // Now try to figure out what kind of file it is and decode it.
    const int wanted_channels = 1;
    tensorflow::Output image_reader;
	if (tensorflow::str_util::EndsWith(file_name, ".png"))
	{
	  image_reader = DecodePng(root.WithOpName("png_reader"), file_reader,
							   DecodePng::Channels(wanted_channels));
	}
	else if (tensorflow::str_util::EndsWith(file_name, ".gif"))
	{
	  // gif decoder returns 4-D tensor, remove the first dim
	  image_reader =
		  Squeeze(root.WithOpName("squeeze_first_dim"),
				  DecodeGif(root.WithOpName("gif_reader"), file_reader));
	}
	else if (tensorflow::str_util::EndsWith(file_name, ".bmp"))
	{
	  image_reader = DecodeBmp(root.WithOpName("bmp_reader"), file_reader);
	}
	else
	{
	  // Assume if it's neither a PNG nor a GIF then it must be a JPEG.
	  image_reader = DecodeJpeg(root.WithOpName("jpeg_reader"), file_reader,
								DecodeJpeg::Channels(wanted_channels));
	}
  // Now cast the image data to float so we can do normal math on it.
  auto float_caster =Cast(root.WithOpName("float_caster"), image_reader, tensorflow::DT_FLOAT);

  auto dims_expander = ExpandDims(root.WithOpName("expand"), float_caster, 0);

  float input_max = 255;
  Div(root.WithOpName("div"),dims_expander,input_max);

  tensorflow::GraphDef graph;
  TF_RETURN_IF_ERROR(root.ToGraphDef(&graph));

  std::unique_ptr<tensorflow::Session> session(
      tensorflow::NewSession(tensorflow::SessionOptions()));
  TF_RETURN_IF_ERROR(session->Create(graph));
//  std::vector<Tensor> out_tensors;
//  TF_RETURN_IF_ERROR(session->Run({}, {output_name + ":0", output_name + ":1"},
//                                    {}, &out_tensors));
  TF_RETURN_IF_ERROR(session->Run({inputs}, {"div"}, {}, out_tensors));
  return Status::OK();
}

int main()
{
  Session* session;
  Status status = NewSession(SessionOptions(), &session);

  const std::string graph_fn = "/media/root/Ubuntu311/projects/Ecology_projects/tensorflowtest/model-0617/model.meta";
  MetaGraphDef graphdef;
  Status status_load = ReadBinaryProto(Env::Default(), graph_fn, &graphdef); //從meta文件中讀取圖模型;
  if (!status_load.ok()) {
        std::cout << "ERROR: Loading model failed..." << graph_fn << std::endl;
        std::cout << status_load.ToString() << "\n";
        return -1;
  }

  Status status_create = session->Create(graphdef.graph_def()); //將模型導入會話Session中;
  if (!status_create.ok()) {
        std::cout << "ERROR: Creating graph in session failed..." << status_create.ToString() << std::endl;
        return -1;
  }
  cout << "Session successfully created.Load model successfully!"<< endl;

  // 讀入預先訓練好的模型的權重
  const std::string checkpointPath = "/media/root/Ubuntu311/projects/Ecology_projects/tensorflowtest/model-0617/model";
  Tensor checkpointPathTensor(DT_STRING, TensorShape());
  checkpointPathTensor.scalar<std::string>()() = checkpointPath;
  status = session->Run(
		  {{ graphdef.saver_def().filename_tensor_name(), checkpointPathTensor },},
		  {},{graphdef.saver_def().restore_op_name()},nullptr);
  if (!status.ok())
  {
	  throw runtime_error("Error loading checkpoint from " + checkpointPath + ": " + status.ToString());
  }
  cout << "Load weights successfully!"<< endl;


  //read image for prediction...
  string image_path= "/media/root/Ubuntu311/projects/Ecology_projects/copy/cnn-imgs/AABW.jpg";
   int input_height =48;
   int input_width=160;
   int input_mean=0;
   int input_std=1;
   std::vector<Tensor> resized_tensors;
   Status read_tensor_status =
       ReadImageFile(image_path, input_height, input_width, input_mean,
                               input_std, &resized_tensors);
   if (!read_tensor_status.ok()) {
     LOG(ERROR) << read_tensor_status;
     cout<<"resing error"<<endl;
     return -1;
   }

   const Tensor& resized_tensor = resized_tensors[0];
   std::cout <<"Read image successfully: "<< resized_tensor.DebugString()<<endl;

   std::string Input1Name = "input";
   std::string Input2Name = "is_training";
   vector<std::pair<string, Tensor> > inputs;
   inputs.push_back(std::make_pair(Input1Name, resized_tensor));
   inputs.push_back(std::make_pair(Input2Name, resized_tensor));

   vector<tensorflow::Tensor> outputs;
   string output2="out_softmax";
   Status status_run = session->Run(inputs, {output2}, {}, &outputs);
   if (!status_run.ok()) {
       std::cout << "ERROR: RUN failed..."  << std::endl;
       std::cout << status_run.ToString() << "\n";
       return -1;
   }
   //Fetch output value
   std::cout << "Output tensor size:" << outputs.size() << std::endl;
   for (std::size_t i = 0; i < outputs.size(); i++) {
       std::cout <<"result: "<<i<<" :"<< outputs[i].DebugString()<<endl;
   }
  cout << "Prediction successfully!"<< endl;

  return 0;
}

運行時報錯信息如下:

2019-07-03 14:46:51.826344: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
2019-07-03 14:46:51.857340: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 3407945000 Hz
2019-07-03 14:46:51.858724: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x109c780 executing computations on platform Host. Devices:
2019-07-03 14:46:51.858753: I tensorflow/compiler/xla/service/service.cc:175]   StreamExecutor device (0): <undefined>, <undefined>
2019-07-03 14:46:52.337448: I tensorflow/core/common_runtime/optimization_registry.cc:35] Running all optimization passes in grouping 0. If you see this a lot, you might be extending the graph too many times (which means you modify the graph many times before execution). Try reducing graph modifications or using SavedModel to avoid any graph modification
2019-07-03 14:46:52.627237: I tensorflow/core/common_runtime/optimization_registry.cc:35] Running all optimization passes in grouping 1. If you see this a lot, you might be extending the graph too many times (which means you modify the graph many times before execution). Try reducing graph modifications or using SavedModel to avoid any graph modification
Session successfully created.Load model successfully!
2019-07-03 14:46:53.305255: I tensorflow/core/common_runtime/optimization_registry.cc:35] Running all optimization passes in grouping 2. If you see this a lot, you might be extending the graph too many times (which means you modify the graph many times before execution). Try reducing graph modifications or using SavedModel to avoid any graph modification
2019-07-03 14:46:53.440792: W tensorflow/compiler/jit/mark_for_compilation_pass.cc:1337] (One-time warning): Not using XLA:CPU for cluster because envvar TF_XLA_FLAGS=--tf_xla_cpu_global_jit was not set.  If you want XLA:CPU, either set that envvar, or use experimental_jit_scope to enable XLA:CPU.  To confirm that XLA is active, pass --vmodule=xla_compilation_cache=1 (as a proper command-line flag, not via TF_XLA_FLAGS) or set the envvar XLA_FLAGS=--xla_hlo_profile.
2019-07-03 14:46:53.459186: I tensorflow/core/common_runtime/optimization_registry.cc:35] Running all optimization passes in grouping 3. If you see this a lot, you might be extending the graph too many times (which means you modify the graph many times before execution). Try reducing graph modifications or using SavedModel to avoid any graph modification
Load weights successfully!
2019-07-03 14:46:54.499491: I tensorflow/core/common_runtime/optimization_registry.cc:35] Running all optimization passes in grouping 0. If you see this a lot, you might be extending the graph too many times (which means you modify the graph many times before execution). Try reducing graph modifications or using SavedModel to avoid any graph modification
2019-07-03 14:46:54.499908: I tensorflow/core/common_runtime/optimization_registry.cc:35] Running all optimization passes in grouping 1. If you see this a lot, you might be extending the graph too many times (which means you modify the graph many times before execution). Try reducing graph modifications or using SavedModel to avoid any graph modification
2019-07-03 14:46:54.505033: I tensorflow/core/common_runtime/optimization_registry.cc:35] Running all optimization passes in grouping 2. If you see this a lot, you might be extending the graph too many times (which means you modify the graph many times before execution). Try reducing graph modifications or using SavedModel to avoid any graph modification
2019-07-03 14:46:54.506574: I tensorflow/core/common_runtime/optimization_registry.cc:35] Running all optimization passes in grouping 3. If you see this a lot, you might be extending the graph too many times (which means you modify the graph many times before execution). Try reducing graph modifications or using SavedModel to avoid any graph modification
Read image successfully: Tensor<type: float shape: [1,48,160,1] values: [[[0.592156887][0.603921592][0.603921592]]]...>
2019-07-03 14:47:02.087086: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:486] model_pruner failed: Invalid argument: Invalid input graph.

我知道這樣寫是錯誤的,因爲官網和谷歌上都沒找到同時有"input:0"和"is_training:0"的例子,所以我不知道怎麼將python部分轉到c++。

通過在tensorflow官網提問已經找到答案:(改成下面這樣就可以了)

int main()
{
  Session* session;
  Status status = NewSession(SessionOptions(), &session);

  const std::string graph_fn = "/media/root/Ubuntu311/projects/Ecology_projects/tensorflowtest/model-0617/model.meta";
  MetaGraphDef graphdef;
  Status status_load = ReadBinaryProto(Env::Default(), graph_fn, &graphdef); //從meta文件中讀取圖模型;
  if (!status_load.ok()) {
        std::cout << "ERROR: Loading model failed..." << graph_fn << std::endl;
        std::cout << status_load.ToString() << "\n";
        return -1;
  }

  Status status_create = session->Create(graphdef.graph_def()); //將模型導入會話Session中;
  if (!status_create.ok()) {
        std::cout << "ERROR: Creating graph in session failed..." << status_create.ToString() << std::endl;
        return -1;
  }
  cout << "Session successfully created.Load model successfully!"<< endl;

  // 讀入預先訓練好的模型的權重
  const std::string checkpointPath = "/media/root/Ubuntu311/projects/Ecology_projects/tensorflowtest/model-0617/model";
  Tensor checkpointPathTensor(DT_STRING, TensorShape());
  checkpointPathTensor.scalar<std::string>()() = checkpointPath;
  status = session->Run(
		  {{ graphdef.saver_def().filename_tensor_name(), checkpointPathTensor },},
		  {},{graphdef.saver_def().restore_op_name()},nullptr);
  if (!status.ok())
  {
	  throw runtime_error("Error loading checkpoint from " + checkpointPath + ": " + status.ToString());
  }
  cout << "Load weights successfully!"<< endl;


  //read image for prediction...
  string image_path= "/media/root/Ubuntu311/projects/Ecology_projects/copy/cnn-imgs/AABW.jpg";
   int input_height =48;
   int input_width=160;
   int input_mean=0;
   int input_std=1;
   std::vector<Tensor> resized_tensors;
   Status read_tensor_status =ReadImageFile(image_path, input_height, input_width, input_mean,input_std, &resized_tensors);
   if (!read_tensor_status.ok()) {
     LOG(ERROR) << read_tensor_status;
     cout<<"resing error"<<endl;
     return -1;
   }

   const Tensor& resized_tensor = resized_tensors[0];
   std::cout <<"Read image successfully: "<< resized_tensor.DebugString()<<endl;

   vector<std::pair<string, Tensor> > inputs;
   std::string Input1Name = "input";
   inputs.push_back(std::make_pair(Input1Name, resized_tensor));
   Tensor is_training_val(DT_BOOL,TensorShape());
   is_training_val.scalar<bool>()()=false;
   std::string Input2Name = "is_training";
   inputs.push_back(std::make_pair(Input2Name, is_training_val));

//   Tensor is_training(DT_BOOL,TensorShape());
//   is_training.scalar<bool>()()=false;
//   vector<std::pair<string, Tensor> > inputs={{"input",resized_tensor},{"is_training",is_training}};

   vector<tensorflow::Tensor> outputs;
   string output="output";
   Status status_run = session->Run(inputs, {output}, {}, &outputs);
   if (!status_run.ok()) {
       std::cout << "ERROR: RUN failed..."  << std::endl;
       std::cout << status_run.ToString() << "\n";
       return -1;
   }
   //Fetch output value
   std::cout << "Output tensor size:" << outputs.size() << std::endl;
   for (std::size_t i = 0; i < outputs.size(); i++) {
       std::cout <<"result: "<<i<<" :"<< outputs[i].DebugString()<<endl;
   }
  cout << "Prediction successfully!"<< endl;

  Tensor t = outputs[0];                   // Fetch the first tensor
	int ndim2 = t.shape().dims();             // Get the dimension of the tensor
	auto tmap = t.tensor<float, 2>();        // Tensor Shape: [batch_size, target_class_num]
	int output_dim = t.shape().dim_size(1);  // Get the target_class_num from 1st dimension
	std::vector<double> tout;

	// Argmax: Get Final Prediction Label and Probability
	int output_class_id = -1;
	double output_prob = 0.0;
	for (int j = 0; j < output_dim; j++)
	{
		  std::cout << "Class " << j << " prob:" << tmap(0, j) << "," << std::endl;
		  if (tmap(0, j) >= output_prob) {
				output_class_id = j;
				output_prob = tmap(0, j);
			 }
	}

	std::cout << "Final class id: " << output_class_id << std::endl;
	std::cout << "Final class prob: " << output_prob << std::endl;

  return 0;
}

已經能夠完整跑通了

但是這個概率有點問題,結果據說是linux系統顯示的問題,不用理會。這是正確的結果。

其實這次的問題就是對tensorflow和這個第三方庫不瞭解導致的。後來無意中看到 https://juejin.im/post/5a14e2c16fb9a0451e3f6efc 大神原來已經給出了答案,怪我自己沒找到。

另外這些鏈接也建議大家看下。

https://blog.csdn.net/luoyexuge/article/details/82852023 https://www.cnblogs.com/buyizhiyou/p/10412967.html https://blog.csdn.net/luoyexuge/article/details/81905886 http://www.tensorfly.cn/bbs/forum.php?mod=forumdisplay&fid=36 http://www.tensorfly.cn/tfdoc/api_docs/cc/ClassStatus.html#string_tensorflow_Status_ToString https://spockwangs.github.io/blog/2018/01/13/train-using-tensorflow-c-plus-plus-api/ https://blog.csdn.net/fly_time2012/article/details/80841377 https://blog.csdn.net/xinchen1234/article/details/78750079

https://zhuanlan.zhihu.com/p/42187985 https://www.csdn.net/article/2015-12-16/2826496 https://yiyibooks.cn/yiyi/tensorflow_13/tutorials/image_recognition.html

 

然後我參考上面的試了一下用opencv讀圖,然後預測的:

#include <tensorflow/core/protobuf/meta_graph.pb.h>
#include <fstream>
#include <utility>
#include <vector>
#include <Eigen/Core>
#include <Eigen/Dense>
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"

#include <opencv2/opencv.hpp>

using namespace std;
using namespace tensorflow;
using namespace tensorflow::ops;
using tensorflow::Flag;
using tensorflow::Tensor;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::int32;
using tensorflow::Status;

//參考 https://stackoverflow.com/questions/39379747/import-opencv-mat-into-c-tensorflow-without-copying
void  mat_to_tensor(cv::Mat  img,Tensor* tensor,int rows,int cols){
    cv::resize(img,img,cv::Size(rows,cols));
    img=img-128.0;
    float *p=tensor->flat<float>().data();
    cv::Mat imagePixels(rows,cols,CV_32FC1,p);
    img.convertTo(imagePixels,CV_32FC1);
}


int main()
{
  Session* session;
  Status status = NewSession(SessionOptions(), &session);

  const std::string graph_fn = "/media/root/Ubuntu311/projects/Ecology_projects/tensorflowtest/model-0617/model.meta";
  MetaGraphDef graphdef;
  Status status_load = ReadBinaryProto(Env::Default(), graph_fn, &graphdef); //從meta文件中讀取圖模型;
  if (!status_load.ok()) {
        std::cout << "ERROR: Loading model failed..." << graph_fn << std::endl;
        std::cout << status_load.ToString() << "\n";
        return -1;
  }

  Status status_create = session->Create(graphdef.graph_def()); //將模型導入會話Session中;
  if (!status_create.ok()) {
        std::cout << "ERROR: Creating graph in session failed..." << status_create.ToString() << std::endl;
        return -1;
  }
  cout << "Session successfully created.Load model successfully!"<< endl;

  // 讀入預先訓練好的模型的權重
  const std::string checkpointPath = "/media/root/Ubuntu311/projects/Ecology_projects/tensorflowtest/model-0617/model";
  Tensor checkpointPathTensor(DT_STRING, TensorShape());
  checkpointPathTensor.scalar<std::string>()() = checkpointPath;
  status = session->Run(
		  {{ graphdef.saver_def().filename_tensor_name(), checkpointPathTensor },},
		  {},{graphdef.saver_def().restore_op_name()},nullptr);
  if (!status.ok())
  {
	  throw runtime_error("Error loading checkpoint from " + checkpointPath + ": " + status.ToString());
  }
  cout << "Load weights successfully!"<< endl;


  //read image for prediction...
  cv::Mat inputimg=cv::imread("/media/root/Ubuntu311/projects/Ecology_projects/copy/cnn-imgs/AABW.JPG",0);
  Tensor resized_tensor(tensorflow::DT_FLOAT, tensorflow::TensorShape({1,48, 160,1}));
  mat_to_tensor(inputimg,&resized_tensor,48,160);
   std::cout <<"Read image successfully: "<< resized_tensor.DebugString()<<endl;

   vector<std::pair<string, Tensor> > inputs;
   std::string Input1Name = "input";
   inputs.push_back(std::make_pair(Input1Name, resized_tensor));
   Tensor is_training_val(DT_BOOL,TensorShape());
   is_training_val.scalar<bool>()()=false;
   std::string Input2Name = "is_training";
   inputs.push_back(std::make_pair(Input2Name, is_training_val));

//   Tensor is_training(DT_BOOL,TensorShape());
//   is_training.scalar<bool>()()=false;
//   vector<std::pair<string, Tensor> > inputs={{"input",resized_tensor},{"is_training",is_training}};

   vector<tensorflow::Tensor> outputs;
   string output="output";
   Status status_run = session->Run(inputs, {output}, {}, &outputs);
   if (!status_run.ok()) {
       std::cout << "ERROR: RUN failed..."  << std::endl;
       std::cout << status_run.ToString() << "\n";
       return -1;
   }
   //Fetch output value
   std::cout << "Output tensor size:" << outputs.size() << std::endl;
   for (std::size_t i = 0; i < outputs.size(); i++) {
       std::cout <<"result: "<<i<<" :"<< outputs[i].DebugString()<<endl;
   }
  cout << "Prediction successfully!"<< endl;

  Tensor t = outputs[0];                   // Fetch the first tensor
  int ndim2 = t.shape().dims();             // Get the dimension of the tensor
  auto tmap = t.tensor<float, 2>();        // Tensor Shape: [batch_size, target_class_num]
  int output_dim = t.shape().dim_size(1);  // Get the target_class_num from 1st dimension
  std::vector<double> tout;

  // Argmax: Get Final Prediction Label and Probability
  int output_class_id = -1;
  double output_prob = 0.0;
  for (int j = 0; j < output_dim; j++)
  {
		std::cout << "Class " << j << " prob:" << tmap(0, j) << "," << std::endl;
		if (tmap(0, j) >= output_prob) {
				output_class_id = j;
				output_prob = tmap(0, j);
		}
  }

  std::cout << "Final class id: " << output_class_id << std::endl;
  std::cout << "Final class prob: " << output_prob << std::endl;

  return 0;
}

我不知道張量shape那裏是否那樣填寫?!python的是{-1,48,160,1},但C++必須不能爲負數。

後來找到 https://stackoverflow.com/questions/39379747/import-opencv-mat-into-c-tensorflow-without-copying?rq=1 這位大神的,然後修改了下:

int main()
{
	  Session* session;
	  Status status = NewSession(SessionOptions(), &session);

	  const std::string graph_fn = "/media/root/Ubuntu311/projects/Ecology_projects/tensorflowtest/model-0708/model.meta";
	  MetaGraphDef graphdef;
	  Status status_load = ReadBinaryProto(Env::Default(), graph_fn, &graphdef); //從meta文件中讀取圖模型;
	  if (!status_load.ok()) {
	        std::cout << "ERROR: Loading model failed..." << graph_fn << std::endl;
	        std::cout << status_load.ToString() << "\n";
	        return -1;
	  }

	  Status status_create = session->Create(graphdef.graph_def()); //將模型導入會話Session中;
	  if (!status_create.ok()) {
	        std::cout << "ERROR: Creating graph in session failed..." << status_create.ToString() << std::endl;
	        return -1;
	  }
	  cout << "Session successfully created.Load model successfully!"<< endl;

	  // 讀入預先訓練好的模型的權重
	  const std::string checkpointPath = "/media/root/Ubuntu311/projects/Ecology_projects/tensorflowtest/model-0708/model";
	  Tensor checkpointPathTensor(DT_STRING, TensorShape());
	  checkpointPathTensor.scalar<std::string>()() = checkpointPath;
	  status = session->Run(
			  {{ graphdef.saver_def().filename_tensor_name(), checkpointPathTensor },},
			  {},{graphdef.saver_def().restore_op_name()},nullptr);
	  if (!status.ok())
	  {
		  throw runtime_error("Error loading checkpoint from " + checkpointPath + ": " + status.ToString());
	  }
	  cout << "Load weights successfully!"<< endl;


	  //read image for prediction...
	  cv::Mat srcimg= cv::imread("/media/root/Ubuntu311/projects/Ecology_projects/copy/cnn-imgs/AABW22496.jpg",0);
	  //cv::resize(srcimg,srcimg,cv::Size(224,96));
	  Tensor resized_tensor(DT_FLOAT, TensorShape({1,96,224,1}));
	  float *imgdata = resized_tensor.flat<float>().data();
	  cv::Mat cameraImg(96, 224, CV_32FC1, imgdata);
	  srcimg.convertTo(cameraImg, CV_32FC1);
	  //對圖像做預處理
	  cameraImg=cameraImg/255;
	  std::cout <<"Read image successfully: "<< resized_tensor.DebugString()<<endl;

	   vector<std::pair<string, Tensor> > inputs;
	   std::string Input1Name = "input";
	   inputs.push_back(std::make_pair(Input1Name, resized_tensor));
	   Tensor is_training_val(DT_BOOL,TensorShape());
	   is_training_val.scalar<bool>()()=false;
	   std::string Input2Name = "is_training";
	   inputs.push_back(std::make_pair(Input2Name, is_training_val));

	   vector<tensorflow::Tensor> outputs;
	   string output="output";
	   Status status_run = session->Run(inputs, {output}, {}, &outputs);
	   if (!status_run.ok()) {
	       std::cout << "ERROR: RUN failed..."  << std::endl;
	       std::cout << status_run.ToString() << "\n";
	       return -1;
	   }
	   //Fetch output value
	   for (std::size_t i = 0; i < outputs.size(); i++) {
	       std::cout <<"result: "<<i<<" :"<< outputs[i].DebugString()<<endl;
	   }
	  cout << "Prediction successfully!"<< endl;

	  Tensor t = outputs[0];                   // Fetch the first tensor
	  int ndim2 = t.shape().dims();             // Get the dimension of the tensor
	  auto tmap = t.tensor<float, 2>();        // Tensor Shape: [batch_size, target_class_num]
	  int output_dim = t.shape().dim_size(1);  // Get the target_class_num from 1st dimension
	  std::vector<double> tout;

	  // Argmax: Get Final Prediction Label and Probability
	  int output_class_id = -1;
	  double output_prob = 0.0;
	  for (int j = 0; j < output_dim; j++)
	  {
			std::cout << "Class " << j << " prob:" << tmap(0, j) << "," << std::endl;
			if (tmap(0, j) >= output_prob) {
					output_class_id = j;
					output_prob = tmap(0, j);
			}
	  }

	  std::cout << "Final class id: " << output_class_id << std::endl;
	  std::cout << "Final class prob: " << output_prob << std::endl;


	return 0;
}

這樣就可以了。

看!識別正確!!!

 

 

 

開心,放張二狗子的美圖

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章