Linux編譯(cross-compilation)

主機host系統:ubuntu16.04

目標硬件:ARM imx6qsabresd CortexA9

交叉編譯可執行文件格式:ELF 32-bit LSB executable, ARM, EABI5 version 1 (SYSV), dynamically linked, interpreter /lib/ld-linux-armhf.so.3, for GNU/Linux 3.2.0

1.交叉編譯

1.1 安裝編譯環境

sudo apt-get install gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf g++-multilib-arm-linux-gnueabihf pkg-config-arm-linux-gnueabihf 
sudo apt-get install gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf g++-multilib-arm-linux-gnueabihf pkg-config-arm-linux-gnueabihf 

完全不需要虛擬機編譯出一個與ARM板子相同的交叉編譯環境,以上環境能兼容。

1.2 交叉編譯helloworld

#include<iostream>
int main()
{
    std::cout<<"hello world!"<<std::endl;
}
arm-linux-gnueabihf-g++ helloworld.cpp -o helloworld

通過以下命令查看兼容性(是否能在ARM板子上執行)

file helloworld
file helloworld

最後上傳或者拷貝到ARM板子中執行驗證即可。

1.3 交叉編譯opencv

參考網站:https://docs.opencv.org/3.4.3/d0/d76/tutorial_arm_crosscompile_with_cmake.html

修改opencv/platforms/linux目錄下的arm-gnueabi.toolchain.cmake中的CMAKE_CXX_COMPILER(arm-linux-gnueabihf-g++),以及CMAKE_CXX_FLAGS(可能用到-march=armv7-a -mfpu=neon -mfloat-abi=hard -mcpu=cortex-a9),CMAKE_EXE_LINKER_FLAGS(opencv需要的-lopencv_core -lopencv_imgproc -lopencv_imgcodecs),示例:

set(CMAKE_SYSTEM_NAME Linux)
#set( CMAKE_SYSTEM_PROCESSOR arm )
set( CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
set( CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)

SET(CMAKE_CXX_FLAGS "-march=armv7-a -mfpu=neon -mfloat-abi=hard -mcpu=cortex-a9 -c -Wall ")
set(CMAKE_EXE_LINKER_FLAGS "-lopencv_core -lopencv_imgproc -lopencv_imgcodecs")

SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# for libraries and headers in the target directories
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

執行:

cmake -DCMAKE_TOOLCHAIN_FILE=../arm-gnueabi.toolchain.cmake ../../..
cmake -DCMAKE_TOOLCHAIN_FILE=../arm-gnueabi.toolchain.cmake ../../..

然後make即可

  • 編譯安裝到指定路徑(目錄)

如果需要編譯(bin/host/lib/shared)到指定目錄(如host)下,添加參數 -D CMAKE_INSTALL_PREFIX=./host即可,然後執行make install,完成之後即可在build/host目錄下找到(bin/host/lib/shared)文件。

#opencv目錄下自建build目錄,並切換到build下
cmake -D CMAKE_INSTALL_PREFIX=./host ..
make install -j8

find_package(OpenCV REQUIRED)找不到OpenCV時,CMakeLists中添加自定義安裝的opencv庫,參考:自定義引入OpenCV版本 

2.編譯動態鏈接.so(cross-compile)

參考網站:https://www.cnblogs.com/52php/p/5681711.html

//....test.c....
#include<stdio.h>
int fun()
{
    return 1;
}
arm-linux-gnueabihf-gcc test.c -I./ -fPIC -shared -o libtest.so

編譯可執行文件若需要調用libtest.so文件,直接採用"-ltest"即可鏈接到libtest.so文件。

3.交叉編譯人臉檢測(Shiguang Shan)

在SeetaFaceEngine-master/FaceDetection目錄下,修改CMakeLists.txt

  • 修改arm兼容的交叉編譯環境
  • 各個庫修改爲arm兼容的庫(最重要的是修改arm的opencv頭文件以及庫文件)
  • OPENMP以及SSE設置爲OFF
  • EXAMPLES根據自己的需要決定
cmake_minimum_required(VERSION 3.1.0)

project(seeta_facedet_lib)

set(CMAKE_SYSTEM_NAME Linux)
#set( CMAKE_SYSTEM_PROCESSOR arm )
set( CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
set( CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)

SET(CMAKE_CXX_FLAGS "-march=armv7-a -mfpu=neon -mfloat-abi=hard -mcpu=cortex-a9")
#set(CMAKE_EXE_LINKER_FLAGS "-lopencv_core -lopencv_imgproc -lopencv_imgcodecs")

SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# for libraries and headers in the target directories
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

set(CMAKE_BUILD_TYPE Debug)
# Build options
option(BUILD_EXAMPLES  "Set to ON to build examples"  ON)
option(USE_OPENMP      "Set to ON to build use openmp"  OFF)
option(USE_SSE         "Set to ON to build use SSE"  OFF)

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2")

# Use C++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
message(STATUS "C++11 support has been enabled by default.")

# Use SSE
if (USE_SSE)
    add_definitions(-DUSE_SSE)
    message(STATUS "Use SSE")
    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.1")
endif()

# Use OpenMP
if (USE_OPENMP)
    find_package(OpenMP QUIET)
    if (OPENMP_FOUND)
        message(STATUS "Use OpenMP")
        add_definitions(-DUSE_OPENMP)
        set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
        set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
        set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}")
    endif()
endif()

include_directories(include)

set(src_files 
    src/util/nms.cpp
    src/util/image_pyramid.cpp
    src/io/lab_boost_model_reader.cpp
    src/io/surf_mlp_model_reader.cpp
    src/feat/lab_feature_map.cpp
    src/feat/surf_feature_map.cpp
    src/classifier/lab_boosted_classifier.cpp
    src/classifier/mlp.cpp
    src/classifier/surf_mlp.cpp
    src/face_detection.cpp
    src/fust.cpp
    )

# Build shared library
add_library(seeta_facedet_lib SHARED ${src_files})
set(facedet_required_libs seeta_facedet_lib)

# Build examples
if (BUILD_EXAMPLES)
    message(STATUS "Build with examples.")
    #find_package(OpenCV)
    include_directories("../../../OpenCV_ARM/include")

#    if (NOT OpenCV_FOUND)
#        message(WARNING "OpenCV not found. Test will not be built.")
#    else()
#        include_directories(${OpenCV_INCLUDE_DIRS})
#        list(APPEND facedet_required_libs ${OpenCV_LIBS})

        add_executable(facedet_test src/test/facedetection_test.cpp)
        target_link_libraries(facedet_test ${facedet_required_libs} -lopencv_core -lopencv_imgproc -lopencv_imgcodecs -lopencv_videoio -lopencv_video -L../../../OpenCV_ARM/lib)
#    endif()
endif()

4.CMake基礎入門

參考網站:https://cmake.org/cmake/help/v3.14/manual/cmake.1.html

  • 基礎認識:

CMake是用來將一個項目源代碼編譯成一個項目的可執行文件。

  • 使用:
mkdir build
cd build
cmake ../    #編譯CMakeLists.txt中寫好的內容,產生CMakeCache.txt
make
mkdir build
cd build
cmake ../    #編譯CMakeLists.txt中寫好的內容,產生CMakeCache.txt
make

CMakeLists.txt文件規定了編譯目標,以及項目的依賴。

CMakeCache.txt在執行cmake命令後產生,用一個樹狀結構標識項目目錄,存儲固定的信息。

  • CMakeLists.txt編寫
  • 步驟:

示例參考:https://www.cnblogs.com/cv-pr/p/6206921.html

①指定cmake版本

cmake_minimum_required(VERSION 3.2)

②指定項目名稱,一般與項目文件夾名相同即可

PROJECT(Test)

③添加頭文件目錄(它相當於g++選項中的-I參數的作用,也相當於環境變量中增加路徑到CPLUS_INCLUDE_PATH變量的作用)

INCLUDE_DIRECTORIES(
include
)

④添加源文件目錄

AUX_SOURCE_DIRECTORY(src DIR_SRCS)

⑤設置環境變量,編譯用到的源文件全部需要添加

SET(TEST_MATH
${DIR_SRCS}
)

⑥添加可執行文件

ADD_EXECUTABLE(${PROJECT_NAME} ${TEST})

⑦添加可執行文件所需庫文件

TARGET_LINK_LIBRARIES(${PROJECT_NAME} -lopencv_core)

多種寫法

#比如(以下寫法(包括備註中的)都可以): 
TARGET_LINK_LIBRARIES(myProject hello)   # 連接libhello.so庫
TARGET_LINK_LIBRARIES(myProject libhello.a)
TARGET_LINK_LIBRARIES(myProject libhello.so)

#再如:
TARGET_LINK_LIBRARIES(myProject libeng.so)  #這些庫名寫法都可以。
TARGET_LINK_LIBRARIES(myProject eng)
TARGET_LINK_LIBRARIES(myProject -leng)

以上cmake基本步驟算是完成,還可以添加其他的功能:

LINK_DIRECTORIES(添加需要鏈接的庫文件目錄)

link_directories(directory1 directory2 ...)   #它相當於g++命令的-L選項的作用,也相當於環境變量中增加LD_LIBRARY_PATH的路徑的作用

LINK_LIBRARIES (添加需要鏈接的庫文件路徑,注意這裏是全路徑。還是推薦使用⑦)

LINK_LIBRARIES("/opt/MATLAB/R2012a/bin/glnxa64/libeng.so")

add_subdirectory(NAME)     (添加一個文件夾進行編譯,該文件夾下的CMakeLists.txt 負責編譯該文件夾下的源碼. NAME是相對於調用add_subdirectory的CMakeListst.txt的相對路徑)

5.交叉編譯人臉檢測+人臉對齊(Shiguang Shan)

step1:SeetaFaceEngine-master目錄下創建build文件夾,以及CMakeLists.txt(內容如下)

step2:build目錄下編譯

cmake_minimum_required(VERSION 2.8.4)

project(seeta_fa_lib)

set(CMAKE_SYSTEM_NAME Linux)
#set( CMAKE_SYSTEM_PROCESSOR arm )
set( CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
set( CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)

SET(CMAKE_CXX_FLAGS "-march=armv7-a -mfpu=neon -mfloat-abi=hard -mcpu=cortex-a9")

SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# for libraries and headers in the target directories
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

# Build options
option(BUILD_EXAMPLES  "Set to ON to build examples"  ON)

# Use C++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
message(STATUS "C++11 support has been enabled by default.")

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2")

set(fd FaceDetection)    #起別名,後面引用帶${...}
set(fa FaceAlignment)
include_directories(${fd}/include)
include_directories(${fa}/include)

set(fd_src_files 
    ${fd}/src/util/nms.cpp
    ${fd}/src/util/image_pyramid.cpp
    ${fd}/src/io/lab_boost_model_reader.cpp
    ${fd}/src/io/surf_mlp_model_reader.cpp
    ${fd}/src/feat/lab_feature_map.cpp
    ${fd}/src/feat/surf_feature_map.cpp
    ${fd}/src/classifier/lab_boosted_classifier.cpp
    ${fd}/src/classifier/mlp.cpp
    ${fd}/src/classifier/surf_mlp.cpp
    ${fd}/src/face_detection.cpp
    ${fd}/src/fust.cpp
    )
add_library(seeta_facedet_lib SHARED ${fd_src_files})
set(facedet_required_libs seeta_facedet_lib)

set(fa_src_files 
    ${fa}/src/cfan.cpp
    ${fa}/src/face_alignment.cpp
    ${fa}/src/sift.cpp
    )

add_library(seeta_fa_lib SHARED ${fa_src_files})
set(fa_required_libs seeta_fa_lib)

if (BUILD_EXAMPLES)
    message(STATUS "Build with examples.")
    include_directories("../../OpenCV_ARM/include")
    include_directories("../../OpenCV_ARM/include/opencv")
    include_directories("../../OpenCV_ARM/include/opencv2")
    link_directories("../../OpenCV_ARM/lib")
    add_executable(fa_test ${fa}/src/test/face_alignment_test.cpp)
    target_link_libraries(fa_test ${fa_required_libs} ${facedet_required_libs} -lopencv_core -lopencv_imgproc -lopencv_imgcodecs -lopencv_videoio -lopencv_video -L../../OpenCV_ARM/lib)
endif()

imx6q運行時間:

real    0m3.526s
user    0m3.410s
sys     0m0.110s 

6.交叉編譯人臉檢測+人臉對齊+人臉驗證(Shiguang Shan)

主要遇到的問題:

  • 採用add_subdirectories()方法,找不到自己編譯好的OpenCV_ARM中的頭文件(include)和庫文件(lib),因此我將所有的CMakeLists中的功能寫到一箇中;
  • FaceIdentification/src/test/test_face_verification.cpp中包含的OpenCV頭文件可能存在路徑問題,比如我修改了#include "opencv2/highgui/highgui.hpp"
#基本配置環境
cmake_minimum_required(VERSION 2.8.4)

project(seeta_fa_lib)

set(CMAKE_SYSTEM_NAME Linux)
#set( CMAKE_SYSTEM_PROCESSOR arm )
set( CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
set( CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)

SET(CMAKE_CXX_FLAGS "-march=armv7-a -mfpu=neon -mfloat-abi=hard -mcpu=cortex-a9")

SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# for libraries and headers in the target directories
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

# Build options
option(BUILD_EXAMPLES  "Set to ON to build examples"  ON)

# Use C++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
message(STATUS "C++11 support has been enabled by default.")

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2")

#人臉檢測(FaceDetection)+人臉對齊(FaceAlignment)
set(fd FaceDetection)
set(fa FaceAlignment)
include_directories(${fd}/include)
include_directories(${fa}/include)

set(fd_src_files 
    ${fd}/src/util/${fd}/src/classifier/surf_mlp.cpp
    ${fd}/src/face_detection.cpp
    ${fd}/src/fust.cpp
    )
add_library(seeta_facedet_lib SHARED ${fd_src_files})
set(facedet_required_libs seeta_facedet_lib)

set(fa_src_files 
    ${fa}/src/cfan.cpp
    ${fa}/src/face_alignment.cpp
    ${fa}/src/sift.cpp
    )

add_library(seeta_fa_lib SHARED ${fa_src_files})
set(fa_required_libs seeta_fa_lib)

#人臉識別(FaceIdentification)的配置環境
set (VIPLNET_VERSION_MAJOR 4)
set (VIPLNET_VERSION_MINOR 5)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS} -std=c++11 -O2 -g -ggdb")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} -std=c++11 -O2")

MESSAGE(STATUS "other platform: ${CMAKE_SYSTEM_NAME}")

set(fi FaceIdentification)
set(VIPLNET_INCLUDE_DIR ${fi}/include)
set(VIPLNET_SRC_DIR ${fi}/src)
# set __VIOL_LOG__ macro
# add_definitions(-D__VIPL_LOG__)

include_directories(${VIPLNET_INCLUDE_DIR})
include_directories(${VIPLNET_SRC_DIR})

#add_subdirectory(${fi}/src)

aux_source_directory(${fi}/src SRC_LIST)
aux_source_directory(${fi}/tools TOOLS_LIST)
    ${fd}/src/util/image_pyramid.cpp
    ${fd}/src/io/lab_boost_model_reader.cpp
    ${fd}/src/io/surf_mlp_model_reader.cpp
    ${fd}/src/feat/lab_feature_map.cpp
    ${fd}/src/feat/surf_feature_map.cpp
    ${fd}/src/classifier/lab_boosted_classifier.cpp
    ${fd}/src/classifier/mlp.cpp
    ${fd}/src/classifier/surf_mlp.cpp
    ${fd}/src/face_detection.cpp
    ${fd}/src/fust.cpp
    )
add_library(seeta_facedet_lib SHARED ${fd_src_files})
set(facedet_required_libs seeta_facedet_lib)

set(fa_src_files 
    ${fa}/src/cfan.cpp
    ${fa}/src/face_alignment.cpp
    ${fa}/src/sift.cpp
    )

add_library(seeta_fa_lib SHARED ${fa_src_files})
set(fa_required_libs seeta_fa_lib)

#FaceIdentification
set (VIPLNET_VERSION_MAJOR 4)
set (VIPLNET_VERSION_MINOR 5)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS} -std=c++11 -O2 -g -ggdb")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} -std=c++11 -O2")

MESSAGE(STATUS "other platform: ${CMAKE_SYSTEM_NAME}")

set(fi FaceIdentification)
set(VIPLNET_INCLUDE_DIR ${fi}/include)
set(VIPLNET_SRC_DIR ${fi}/src)
# set __VIOL_LOG__ macro
# add_definitions(-D__VIPL_LOG__)

include_directories(${VIPLNET_INCLUDE_DIR})
include_directories(${VIPLNET_SRC_DIR})

#add_subdirectory(${fi}/src)

aux_source_directory(${fi}/src SRC_LIST)
aux_source_directory(${fi}/tools TOOLS_LIST)
add_library(viplnet SHARED ${SRC_LIST} ${TOOLS_LIST})
set_target_properties(viplnet PROPERTIES 
  VERSION ${VIPLNET_VERSION_MAJOR}.${VIPLNET_VERSION_MINOR} 
  SOVERSION ${VIPLNET_VERSION_MAJOR}.${VIPLNET_VERSION_MINOR})

if (BUILD_EXAMPLES)
    message(STATUS "Build with examples.")
    include_directories("../../OpenCV_ARM/include")
    include_directories("../../OpenCV_ARM/include/opencv")
    include_directories("../../OpenCV_ARM/include/opencv2")
    link_directories("../../OpenCV_ARM/lib")
    add_executable(fi_verification_test ${fi}/src/test/test_face_verification.cpp)
    target_link_libraries(fi_verification_test ${fa_required_libs} ${facedet_required_libs} viplnet -lopencv_core -lopencv_imgproc -lopencv_imgcodecs -lopencv_videoio -lopencv_video -L../../OpenCV_ARM/lib)
endif()

計算同一張圖片的識別效果,即計算同一張圖片的相似度(識別率=1),計算同一人不同圖片(本人一張正裝照和手機拍的照片,識別率=0.643433)。因此,不同設備下,識別精度不會損失,識別時間在arm平臺下會超級慢,識別時間如下:

Freescale i.MX6 Quad/DualLite (Device Tree)運行時間 Intel(R) Core(TM) i7-7800X CPU @ 3.50GHz運行時間
real    0m13.799s real    0m1.007s
user    0m12.870s user    0m1.638s
sys     0m0.970s sys     0m0.330s

7.樹莓派( cross-compilation )

樹莓派連接兩個USB攝像頭,同時獲取2個攝像頭的frame,保存爲圖片。(兩個攝像頭不能接在一個usb hub上)

#include <iostream>

#include "unistd.h"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/videoio.hpp"

using namespace std;

int main(int argc, char *argv[])
{
    cout<<"this is a test program!"<<endl;
    
    cv::VideoCapture cap0(0);
    cv::VideoCapture cap1(1);
    cap0.set(cv::CAP_PROP_FRAME_WIDTH,1024);
    cap0.set(cv::CAP_PROP_FRAME_HEIGHT,800);
    cap1.set(cv::CAP_PROP_FRAME_WIDTH,1024);
    cap1.set(cv::CAP_PROP_FRAME_HEIGHT,800);
    if(!cap0.isOpened())
    {
        cout<<"the camera 0 cannot be opened!"<<endl;
        return -1;
    }
    else if(!cap1.isOpened())
    {
        cout<<"the camera 1 cannot be opened!"<<endl;
        return -1;
    }
    cv::Mat frame0;
    cv::Mat frame1;
    bool stop = false;
    while(!stop)
    {
        cap0>>frame0;
        if(!frame0.empty())
        {
          cv::imwrite("0_1.jpg",frame0);
          cout<<"0_1.jpg"<<endl;
        }
        cap1>>frame1;
        if(!frame1.empty())
        {
          cv::imwrite("1_1.jpg",frame1);
          cout<<"1_1.jpg"<<endl;
        }
        sleep(10);
    }
    frame0.release();
    frame1.release();
    return 0;
}

樹莓派ARMv7l實現時間延時:使用unistd.h中的sleep()即可。

交叉編譯CMakeLists.txt文件如下:

cmake_minimum_required(VERSION 2.8)
cmake_policy(SET CMP0015 NEW)
project(opencv_camera)

set(CMAKE_SYSTEM_NAME Linux)
set( CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
set( CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)

SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# for libraries and headers in the target directories
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

SET(CMAKE_CXX_FLAGS "-march=armv7-a -mfpu=neon -mfloat-abi=hard")
set(CMAKE_BUILD_TYPE Debug)

# Use C++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
message(STATUS "C++11 support has been enabled by default.")

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2")

include_directories(
    include
    include/opencv
    include/opencv2
)

link_directories(lib)
set(src opencv_camera.cpp)
add_executable(opencv_camera ${src})
target_link_libraries(opencv_camera libopencv_core.so libopencv_imgproc.so libopencv_imgcodecs.so libopencv_videoio.so libopencv_video.so libopencv_highgui.so)

將前文交叉編譯完成的OpenCV庫文件lib,與OpenCV會用到的頭文件include複製到CMakeLists.txt所在目錄下。

注意:複製lib和include之後,如果編譯可執行文件時,還是存在找不到OpenCV相關的庫,或者OpenCV相關文件未定義的錯誤,則需考慮include_directories以及link_directories中包含的目錄不準確,或者存在漏庫/頭文件的情況。

8.librealsense編譯

測試系統:ubuntu16.04

測試硬件:Intel RealSense D435

8.1 編譯SDK 

  • 新建一個項目目錄intelrealsense
  • 下載

終端切換至intelrealsense目錄,git clone https://github.com/IntelRealSense/librealsense.git

  • 編譯

參考網站:https://dev.intelrealsense.com/docs/compiling-librealsense-for-linux-ubuntu-guide

如果需要編譯在arm64(rockchip rk3399 aarch64)上,參考網站編譯,則直接在rockpi終端,librealsense目錄下運行./scripts/libuvc_installation.sh即可,當然也可以加入一些優化選項,比如將scripts/libuvc_installation.sh中cmake ../ -DFORCE_LIBUVC=true -DCMAKE_BUILD_TYPE=release修改爲:cmake ../ -DFORCE_LIBUVC=true -DCMAKE_BUILD_TYPE=release -DBUILD_EXAMPLES=OFF -DBUILD_GRAPHICAL_EXAMPLES=OFF -DBUILD_WITH_OPENMP=OFF -DBUILD_UNIT_TESTS=OFF -DENABLE_ZERO_COPY=ON

8.2 編譯測距示例

參考網站:https://blog.csdn.net/dieju8330/article/details/85420584

  • 網站中的測距程序和CMakeLists.txt放於intelrealsense下,並在intelrealsense目錄下新建一個lib目錄;
  • 將8.1中編譯生成的librealsense2.so.2.21.0複製到lib目錄下;
  • CMakeLists.txt修改爲
project(measure_distance)
cmake_minimum_required(VERSION 2.8)

cmake_policy(SET CMP0015 NEW)

include_directories("librealsense/include/")

set(CMAKE_CXX_FLAGS "-std=c++11")
#尋找opencv庫
find_package(OpenCV REQUIRED)
#message(STATUS ${OpenCV_INCLUDE_DIRS})
#添加頭文件
include_directories(${OpenCV_INCLUDE_DIRS})
link_directories(lib) #添加複製出來的鏈接庫

add_executable(measure_distance measure_distance.cpp)
#鏈接Opencv庫
target_link_libraries(measure_distance ${OpenCV_LIBS} )
#添加後可進行調試
set( CMAKE_BUILD_TYPE Debug )
#set(DEPENDENCIES realsense2 )

target_link_libraries(measure_distance librealsense2.so)
  • 編譯測距程序,生成可執行文件
mkdir build && cd build && cmake .. && make -j8

8.3 error

terminate called after throwing an instance of 'rs2::invalid_value_error'
  what():  API version mismatch: librealsense.so was compiled with API version 2.21.0 but the application was compiled with 2.19.1! Make sure correct version of the library is installed (make install)

Linux系統下的編譯,編寫CMakeLists.txt時,需要找到程序需要的庫文件、頭文件,但庫/頭文件需要對應到同一版本。如果在CMakeLists.txt文件中使用了link_directories,則編譯器會去系統默認的頭文件位置(其中一個爲/usr/local/include/)查找需要的頭文件。這個錯誤就是,我手動編譯出來的API是2.21.0,但是未包含對應版本的頭文件,編譯過程自動到/usr/local/include/目錄下查找對應API的頭文件,導致兩者版本不匹配。

9.編譯ffmpeg

參考網站:http://www.cnblogs.com/CoderTian/p/6655568.html

下載:

git clone https://github.com/FFmpeg/FFmpeg.git

配置命令:(編譯至host目錄下)

./configure --prefix=host --enable-shared --disable-static --disable-doc 

編譯:make -j8  && make install 

host目錄下編譯產生:

├── bin
│   ├── ffmpeg
│   └── ffprobe
├── include
│   ├── libavcodec
│   │   ├── ac3_parser.h
│   │   ├── adts_parser.h
│   │   ├── avcodec.h
……
│   ├── libavdevice
│   │   ├── avdevice.h
│   │   └── version.h
│   ├── libavfilter
│   │   ├── avfilter.h
│   │   ├── buffersink.h
│   │   ├── buffersrc.h
│   │   └── version.h
│   ├── libavformat
│   │   ├── avformat.h
│   │   ├── avio.h
│   │   └── version.h
│   ├── libavutil
│   │   ├── adler32.h
│   │   ├── aes_ctr.h
│   │   ├── aes.h
……
│   ├── libswresample
│   │   ├── swresample.h
│   │   └── version.h
│   └── libswscale
│       ├── swscale.h
│       └── version.h
├── lib
│   ├── libavcodec.so -> libavcodec.so.58.52.101
│   ├── libavcodec.so.58 -> libavcodec.so.58.52.101
│   ├── libavcodec.so.58.52.101
│   ├── libavdevice.so -> libavdevice.so.58.7.100
│   ├── libavdevice.so.58 -> libavdevice.so.58.7.100
│   ├── libavdevice.so.58.7.100
│   ├── libavfilter.so -> libavfilter.so.7.53.100
│   ├── libavfilter.so.7 -> libavfilter.so.7.53.100
……

編譯muxing.cpp

從doc/examples複製到host目錄下,後綴改爲cpp,並將頭文件改爲:

#define __STDC_CONSTANT_MACROS
#include <stdio.h>
#ifdef __cplusplus
extern "C"
{
#endif
#include <stdlib.h>
#include <string.h>
#include <math.h>

#include <libavutil/avassert.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
#include <libavutil/error.h>
#include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
#ifdef __cplusplus
};
#endif

host目錄下建立CMakeLists.txt

project(ffmpeg_muxing)
cmake_minimum_required(VERSION 2.8)
cmake_policy(SET CMP0015 NEW)

include_directories(include)
set(CMAKE_CXX_FLAGS "-std=c++11")
##添加頭文件
link_directories(lib) #添加複製出來的鏈接庫

add_executable(push_flow_muxing muxing.cpp)
#添加後可進行調試
set( CMAKE_BUILD_TYPE Debug )

target_link_libraries(push_flow_muxing libavcodec.so libavformat.so libavutil.so libswresample.so libswscale.so)

編譯muxing.cpp的可執行文件:

mkdir build && cd build && cmake .. && make 

修改bug參考網站 :https://github.com/usc-imi/aeo-light/issues/8

host/include/libavutil/error.h:121:95: error: taking address of temporary array av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum)
host/include/libavutil/error.h:121:95: error: taking address of temporary array av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum)

如果需要將自己寫的代碼編譯成.so文件並鏈接到可執行文件,只需要將CMakeLists.txt改爲以下方式:(編譯完成之後會生成一個liblive_libs.so文件以及一個可執行文件app)

………………
set(src_files
    src/1.cpp
    src/2.cpp
    src/3.cpp
    src/4.cpp
   )
add_library(live_libs SHARED ${src_files})

aux_source_directory(./src DIR_SRCS)
add_executable(app ${DIR_SRCS})
target_link_libraries(app ${live_libs})

10.Rock Pi 4B(RK3399)配置Paddle-Lite

參考網站1:https://paddlepaddle.github.io/Paddle-Lite/

參考網站2:https://github.com/YunYang1994/tensorflow-yolov3

 

step1 配置基本工具

  • gcc、g++、git、make、wget、python
  • cmake(建議使用3.10或以上版本)

step2 將tensorflow-yolov3模型轉爲Paddle-Lite兼容的模型

轉換工具:X2Paddle

模型來源:模型格式爲(.pb)

  • 按照參考網站2配置完成tensorflow框架下的yolov3,part 1. Quick start。(注意:下載的模型放在checkpoint路徑下;運行過程可能存在一些問題,core/utils.py中的scale = min(iw/w, ih/h)需要改爲scale = min(float(iw)/w, float(ih)/h))
  • 按照X2Paddle轉換(.pb)爲PaddlePaddle深度學習框架產出的模型格式:x2paddle --framework=tensorflow --model=tf_model.pb --save_dir=pd_model(可能需要輸入tensor的shape,以及模型轉換失敗,則參考FAQ
  • 模型優化:我是直接在x86_64普通電腦下轉的,因此下載工具鏈接:./model_optimize_tool.x86_64-linux-gnu --model_dir=inference_model/ –--optimize_out_type=naive_buffer  --optimize_out =optimize_model_path/(在RK3399上armv8系統架構下載鏈接。)

step3 編譯Paddle-Lite-Demo:(step2其實也不是必須的。step3可能需要編譯opencv交叉編譯手動編譯

$ cd Paddle-Lite-Demo/PaddleLite-armlinux-demo/image_classification_demo
$ ./run.sh armv8 # RK3399

實現的演示效果如下:

分類cat 目標檢測

11.Rock Pi 4B(RK3399)配置TensorFlow-Lite v1.14

參考網站1:https://www.wandouip.com/t5i214530/

參考網站2:https://blog.csdn.net/computerme/article/details/80345065

參考網站3:https://github.com/tensorflow/tensorflow/issues/32073

參考網站4:https://tensorflow.google.cn/lite/guide/build_arm64

硬件內核(uname -a):Linux linaro-alip 4.4.154-90-rockchip-ga14f6502e045 #22 SMP Tue Jul 30 10:32:28 UTC 2019 aarch64 GNU/Linux

交叉編譯(在x86_64主機上編譯,執行直接拷貝至rockpi):

11.1 clone或者下載TensorFlow代碼(最好選定爲r1.14分支)

git clone [email protected]:tensorflow/tensorflow.git

11.2 根據參考網站4配置交叉編譯環境

sudo apt-get install crossbuild-essential-arm64

11.3 下載TensorFlow所需依賴(TensorFlow根目錄下運行,國內服務器可能會斷開連接,則參考網站1,下載後解壓至tensorflow/lite/tools/make/downloads目錄下)

./tensorflow/lite/tools/make/download_dependencies.sh

11.4 編譯(TensorFlow根目錄下運行)

./tensorflow/lite/tools/make/build_aarch64_lib.sh

以上步驟能成功編譯出一個靜態庫:tensorflow/lite/tools/make/gen/aarch64_armv8-a/lib/libtensorflow-lite.a

11.5 編譯label_image分類Demo

參考網站1網站3修改tensorflow/lite/tools/make/Makefile文件,修改後的文件如下:

①有MINIMAL的地方,都仿照添加LABEL_IMAGE信息

②添加tensorflow/lite/tools/evaluation/utils.cc文件(否則會報錯undefined reference:"tflite::evaluation::CreateGPUDelegate(tflite::FlateBufferModel)")

# Make uses /bin/sh by default, which is incompatible with the bashisms seen
# below.
SHELL := /bin/bash

# Find where we're running from, so we can store generated files here.
ifeq ($(origin MAKEFILE_DIR), undefined)
	MAKEFILE_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
endif

# Try to figure out the host system
HOST_OS :=
ifeq ($(OS),Windows_NT)
	HOST_OS = windows
else
	UNAME_S := $(shell uname -s)
	ifeq ($(UNAME_S),Linux)
		HOST_OS := linux
	endif
	ifeq ($(UNAME_S),Darwin)
		HOST_OS := osx
	endif
endif

HOST_ARCH := $(shell if uname -m | grep -q i[345678]86; then echo x86_32; else uname -m; fi)

# Override these on the make command line to target a specific architecture. For example:
# make -f tensorflow/lite/tools/make/Makefile TARGET=rpi TARGET_ARCH=armv7l
TARGET := $(HOST_OS)
TARGET_ARCH := $(HOST_ARCH)

INCLUDES := \
-I. \
-I$(MAKEFILE_DIR)/../../../../../ \
-I$(MAKEFILE_DIR)/../../../../../../ \
-I$(MAKEFILE_DIR)/downloads/ \
-I$(MAKEFILE_DIR)/downloads/eigen \
-I$(MAKEFILE_DIR)/downloads/absl \
-I$(MAKEFILE_DIR)/downloads/gemmlowp \
-I$(MAKEFILE_DIR)/downloads/neon_2_sse \
-I$(MAKEFILE_DIR)/downloads/farmhash/src \
-I$(MAKEFILE_DIR)/downloads/flatbuffers/include \
-I$(OBJDIR)
# This is at the end so any globally-installed frameworks like protobuf don't
# override local versions in the source tree.
INCLUDES += -I/usr/local/include

# These are the default libraries needed, but they can be added to or
# overridden by the platform-specific settings in target makefiles.
LIBS := \
-lstdc++ \
-lpthread \
-lm \
-lz

# There are no rules for compiling objects for the host system (since we don't
# generate things like the protobuf compiler that require that), so all of
# these settings are for the target compiler.
CXXFLAGS := -O3 -DNDEBUG -fPIC
CXXFLAGS += $(EXTRA_CXXFLAGS)
CFLAGS := ${CXXFLAGS}
CXXFLAGS += --std=c++11
LDOPTS := -L/usr/local/lib
ARFLAGS := -r
TARGET_TOOLCHAIN_PREFIX :=
CC_PREFIX :=

ifeq ($(HOST_OS),windows)
CXXFLAGS += -fext-numeric-literals -D__LITTLE_ENDIAN__
endif

# This library is the main target for this makefile. It will contain a minimal
# runtime that can be linked in to other programs.
LIB_NAME := libtensorflow-lite.a

# Benchmark static library and binary
BENCHMARK_LIB_NAME := benchmark-lib.a
BENCHMARK_BINARY_NAME := benchmark_model
BENCHMARK_PERF_OPTIONS_BINARY_NAME := benchmark_model_performance_options

# A small example program that shows how to link against the library.
MINIMAL_SRCS := \
	tensorflow/lite/examples/minimal/minimal.cc
LABEL_IMAGE_SRCS := \
        tensorflow/lite/examples/label_image/label_image.cc\
        tensorflow/lite/examples/label_image/bitmap_helpers.cc\
        tensorflow/lite/tools/evaluation/utils.cc
        
# What sources we want to compile, must be kept in sync with the main Bazel
# build files.

PROFILER_SRCS := \
  tensorflow/lite/profiling/memory_info.cc \
	tensorflow/lite/profiling/time.cc

PROFILE_SUMMARIZER_SRCS := \
	tensorflow/lite/profiling/profile_summarizer.cc \
	tensorflow/core/util/stats_calculator.cc

CMD_LINE_TOOLS_SRCS := \
	tensorflow/lite/tools/command_line_flags.cc

CORE_CC_ALL_SRCS := \
$(wildcard tensorflow/lite/*.cc) \
$(wildcard tensorflow/lite/*.c) \
$(wildcard tensorflow/lite/c/*.c) \
$(wildcard tensorflow/lite/core/*.cc) \
$(wildcard tensorflow/lite/core/api/*.cc) \
$(wildcard tensorflow/lite/experimental/resource_variable/*.cc) \
$(wildcard tensorflow/lite/experimental/ruy/*.cc)
ifneq ($(BUILD_TYPE),micro)
CORE_CC_ALL_SRCS += \
$(wildcard tensorflow/lite/kernels/*.cc) \
$(wildcard tensorflow/lite/kernels/internal/*.cc) \
$(wildcard tensorflow/lite/kernels/internal/optimized/*.cc) \
$(wildcard tensorflow/lite/kernels/internal/reference/*.cc) \
$(PROFILER_SRCS) \
tensorflow/lite/tools/make/downloads/farmhash/src/farmhash.cc \
tensorflow/lite/tools/make/downloads/fft2d/fftsg.c \
tensorflow/lite/tools/make/downloads/flatbuffers/src/util.cpp
CORE_CC_ALL_SRCS += \
	$(shell find tensorflow/lite/tools/make/downloads/absl/absl/ \
	             -type f -name \*.cc | grep -v test | grep -v benchmark | grep -v synchronization | grep -v debugging)
endif
# Remove any duplicates.
CORE_CC_ALL_SRCS := $(sort $(CORE_CC_ALL_SRCS))
CORE_CC_EXCLUDE_SRCS := \
$(wildcard tensorflow/lite/*test.cc) \
$(wildcard tensorflow/lite/*/*test.cc) \
$(wildcard tensorflow/lite/*/*/benchmark.cc) \
$(wildcard tensorflow/lite/*/*/example*.cc) \
$(wildcard tensorflow/lite/*/*/test*.cc) \
$(wildcard tensorflow/lite/*/*/*test.cc) \
$(wildcard tensorflow/lite/*/*/*/*test.cc) \
$(wildcard tensorflow/lite/kernels/*test_main.cc) \
$(wildcard tensorflow/lite/kernels/*test_util*.cc) \
$(MINIMAL_SRCS)\
$(LABEL_IMAGE_SRCS)

BUILD_WITH_MMAP ?= true
ifeq ($(BUILD_TYPE),micro)
	BUILD_WITH_MMAP=false
endif
ifeq ($(BUILD_TYPE),windows)
	BUILD_WITH_MMAP=false
endif
ifeq ($(BUILD_WITH_MMAP),true)
	CORE_CC_EXCLUDE_SRCS += tensorflow/lite/mmap_allocation.cc
else
	CORE_CC_EXCLUDE_SRCS += tensorflow/lite/mmap_allocation_disabled.cc
endif

BUILD_WITH_NNAPI ?= true
ifeq ($(BUILD_TYPE),micro)
	BUILD_WITH_NNAPI=false
endif
ifeq ($(TARGET),windows)
	BUILD_WITH_NNAPI=false
endif
ifeq ($(TARGET),ios)
	BUILD_WITH_NNAPI=false
endif
ifeq ($(TARGET),rpi)
	BUILD_WITH_NNAPI=false
endif
ifeq ($(TARGET),generic-aarch64)
	BUILD_WITH_NNAPI=false
endif
ifeq ($(BUILD_WITH_NNAPI),true)
	CORE_CC_ALL_SRCS += tensorflow/lite/delegates/nnapi/nnapi_delegate.cc
  CORE_CC_ALL_SRCS += tensorflow/lite/delegates/nnapi/quant_lstm_sup.cc
	CORE_CC_ALL_SRCS += tensorflow/lite/nnapi/nnapi_implementation.cc
	CORE_CC_ALL_SRCS += tensorflow/lite/nnapi/nnapi_util.cc
	LIBS += -lrt
else
	CORE_CC_ALL_SRCS += tensorflow/lite/delegates/nnapi/nnapi_delegate_disabled.cc
	CORE_CC_ALL_SRCS += tensorflow/lite/nnapi/nnapi_implementation_disabled.cc
endif

ifeq ($(TARGET),ios)
	CORE_CC_EXCLUDE_SRCS += tensorflow/lite/minimal_logging_android.cc
	CORE_CC_EXCLUDE_SRCS += tensorflow/lite/minimal_logging_default.cc
else
	CORE_CC_EXCLUDE_SRCS += tensorflow/lite/minimal_logging_android.cc
	CORE_CC_EXCLUDE_SRCS += tensorflow/lite/minimal_logging_ios.cc
endif


# Filter out all the excluded files.
TF_LITE_CC_SRCS := $(filter-out $(CORE_CC_EXCLUDE_SRCS), $(CORE_CC_ALL_SRCS))

# Benchmark sources
BENCHMARK_SRCS_DIR := tensorflow/lite/tools/benchmark
EVALUATION_UTILS_SRCS := \
  tensorflow/lite/tools/evaluation/utils.cc
BENCHMARK_ALL_SRCS := \
	$(wildcard $(BENCHMARK_SRCS_DIR)/*.cc) \
	$(PROFILE_SUMMARIZER_SRCS) \
	$(CMD_LINE_TOOLS_SRCS) \
	$(EVALUATION_UTILS_SRCS)

BENCHMARK_MAIN_SRC := $(BENCHMARK_SRCS_DIR)/benchmark_main.cc
BENCHMARK_PERF_OPTIONS_SRC := \
	$(BENCHMARK_SRCS_DIR)/benchmark_tflite_performance_options_main.cc
BENCHMARK_LIB_SRCS := $(filter-out \
	$(wildcard $(BENCHMARK_SRCS_DIR)/*_test.cc) \
	$(BENCHMARK_MAIN_SRC) \
	$(BENCHMARK_PERF_OPTIONS_SRC) \
	$(BENCHMARK_SRCS_DIR)/benchmark_plus_flex_main.cc, \
	$(BENCHMARK_ALL_SRCS))

# These target-specific makefiles should modify or replace options like
# CXXFLAGS or LIBS to work for a specific targetted architecture. All logic
# based on platforms or architectures should happen within these files, to
# keep this main makefile focused on the sources and dependencies.
include $(wildcard $(MAKEFILE_DIR)/targets/*_makefile.inc)

ALL_SRCS := \
	$(MINIMAL_SRCS) \
	$(LABEL_IMAGE_SRCS) \
	$(PROFILER_SRCS) \
	$(PROFILER_SUMMARIZER_SRCS) \
	$(TF_LITE_CC_SRCS) \
	$(BENCHMARK_LIB_SRCS) \
  $(CMD_LINE_TOOLS_SRCS)

# Where compiled objects are stored.
GENDIR := $(MAKEFILE_DIR)/gen/$(TARGET)_$(TARGET_ARCH)/
OBJDIR := $(GENDIR)obj/
BINDIR := $(GENDIR)bin/
LIBDIR := $(GENDIR)lib/

LIB_PATH := $(LIBDIR)$(LIB_NAME)
BENCHMARK_LIB := $(LIBDIR)$(BENCHMARK_LIB_NAME)
BENCHMARK_BINARY := $(BINDIR)$(BENCHMARK_BINARY_NAME)
BENCHMARK_PERF_OPTIONS_BINARY := $(BINDIR)$(BENCHMARK_PERF_OPTIONS_BINARY_NAME)
MINIMAL_BINARY := $(BINDIR)minimal
LABEL_IMAGE_BINARY := $(BINDIR)label_image

CXX := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}g++
CC := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}gcc
AR := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}ar

MINIMAL_OBJS := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(MINIMAL_SRCS))))
LABEL_IMAGE_OBJS := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(LABEL_IMAGE_SRCS))))

LIB_OBJS := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(TF_LITE_CC_SRCS)))))

BENCHMARK_MAIN_OBJ := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(BENCHMARK_MAIN_SRC))))

BENCHMARK_PERF_OPTIONS_OBJ := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(BENCHMARK_PERF_OPTIONS_SRC))))

BENCHMARK_LIB_OBJS := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(BENCHMARK_LIB_SRCS))))

# For normal manually-created TensorFlow Lite C++ source files.
$(OBJDIR)%.o: %.cc
	@mkdir -p $(dir $@)
	$(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
# For normal manually-created TensorFlow Lite C source files.
$(OBJDIR)%.o: %.c
	@mkdir -p $(dir $@)
	$(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
$(OBJDIR)%.o: %.cpp
	@mkdir -p $(dir $@)
	$(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@

# The target that's compiled if there's no command-line arguments.
all: $(LIB_PATH)  $(MINIMAL_BINARY) $(BENCHMARK_BINARY) $(BENCHMARK_PERF_OPTIONS_BINARY) $(LABEL_IMAGE_BINARY)

# The target that's compiled for micro-controllers
micro: $(LIB_PATH)

# Hack for generating schema file bypassing flatbuffer parsing
tensorflow/lite/schema/schema_generated.h:
	@cp -u tensorflow/lite/schema/schema_generated.h.OPENSOURCE tensorflow/lite/schema/schema_generated.h

# Gathers together all the objects we've compiled into a single '.a' archive.
$(LIB_PATH): tensorflow/lite/schema/schema_generated.h $(LIB_OBJS)
	@mkdir -p $(dir $@)
	$(AR) $(ARFLAGS) $(LIB_PATH) $(LIB_OBJS)

lib: $(LIB_PATH)

$(MINIMAL_BINARY): $(MINIMAL_OBJS) $(LIB_PATH)
	@mkdir -p $(dir $@)
	$(CXX) $(CXXFLAGS) $(INCLUDES) \
	-o $(MINIMAL_BINARY) $(MINIMAL_OBJS) \
	$(LIBFLAGS) $(LIB_PATH) $(LDFLAGS) $(LIBS)
$(LABEL_IMAGE_BINARY): $(LABEL_IMAGE_OBJS) $(LIB_PATH)
	@mkdir -p $(dir $@)
	$(CXX) $(CXXFLAGS) $(INCLUDES) \
	-o $(LABEL_IMAGE_BINARY) $(LABEL_IMAGE_OBJS) \
	$(LIBFLAGS) $(LIB_PATH) $(LDFLAGS) $(LIBS)

minimal: $(MINIMAL_BINARY)
label_image: $(LABEL_IMAGE_BINARY)

$(BENCHMARK_LIB) : $(LIB_PATH) $(BENCHMARK_LIB_OBJS)
	@mkdir -p $(dir $@)
	$(AR) $(ARFLAGS) $(BENCHMARK_LIB) $(LIB_OBJS) $(BENCHMARK_LIB_OBJS)

benchmark_lib: $(BENCHMARK_LIB)

$(BENCHMARK_BINARY) : $(BENCHMARK_MAIN_OBJ) $(BENCHMARK_LIB)
	@mkdir -p $(dir $@)
	$(CXX) $(CXXFLAGS) $(INCLUDES) \
	-o $(BENCHMARK_BINARY) $(BENCHMARK_MAIN_OBJ) \
	$(LIBFLAGS) $(BENCHMARK_LIB) $(LDFLAGS) $(LIBS)

$(BENCHMARK_PERF_OPTIONS_BINARY) : $(BENCHMARK_PERF_OPTIONS_OBJ) $(BENCHMARK_LIB)
	@mkdir -p $(dir $@)
	$(CXX) $(CXXFLAGS) $(INCLUDES) \
	-o $(BENCHMARK_PERF_OPTIONS_BINARY) $(BENCHMARK_PERF_OPTIONS_OBJ) \
	$(LIBFLAGS) $(BENCHMARK_LIB) $(LDFLAGS) $(LIBS)

benchmark: $(BENCHMARK_BINARY) $(BENCHMARK_PERF_OPTIONS_BINARY)

libdir:
	@echo $(LIBDIR)

# Gets rid of all generated files.
clean:
	rm -rf $(MAKEFILE_DIR)/gen

# Gets rid of target files only, leaving the host alone. Also leaves the lib
# directory untouched deliberately, so we can persist multiple architectures
# across builds for iOS and Android.
cleantarget:
	rm -rf $(OBJDIR)
	rm -rf $(BINDIR)

$(DEPDIR)/%.d: ;
.PRECIOUS: $(DEPDIR)/%.d

-include $(patsubst %,$(DEPDIR)/%.d,$(basename $(ALL_SRCS)))

按照11.4再次編譯,最終編譯生成:tensorflow/lite/tools/make/gen/aarch64_armv8-a/bin/label_image

TensorFlow根目錄下使用以下命令可查看這個可執行文件的運行內核環境

file tensorflow/lite/tools/make/gen/aarch64_armv8-a/bin/label_image

輸出:ELF 64-bit LSB executable, ARM aarch64, version 1 (GNU/Linux), dynamically linked, interpreter /lib/ld-, for GNU/Linux 3.7.0, BuildID[sha1]=41093a674cedba6b014bd47da95a0a09b9aa9aa8, not stripped

11.6 在網站中下載model和labels文件,並拷貝模型和tensorflow/lite/tools/make/gen/aarch64_armv8-a/下的所有文件至rockpi中,運行命令:

./label_image -v 1 -m ./mobilenet_v1_1.0_224_quant.tflite -i ./grace_hopper.bmp -l ./labels_mobilenet_quant_v1_224.txt 

輸出:average time: 113.777 ms
            0.780392: 653 military uniform
            0.105882: 907 Windsor tie
            0.0156863: 458 bow tie
            0.0117647: 466 bulletproof vest
            0.00784314: 835 suit

11.7 測試目標檢測模型

參考網站:https://github.com/tensorflow/examples/tree/master/lite/examples/object_detection/raspberry_pi

①rockpi的內核是aarch64,因此下載網站中的tflite_runtime-1.14.0-cp35-cp35m-linux_aarch64.whl,並拷貝至rockpi中,使用pip install tflite_runtime-1.14.0-cp35-cp35m-linux_aarch64.whl安裝;

②按照網站中的配置過程,下載代碼,若不配置攝像頭,僅測試單張圖片,則修改網站中的detect_picamera.py

# python3
#
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example using TF Lite to detect objects with the Raspberry Pi camera."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import io
import re
import time

from annotation import Annotator

import numpy as np

from PIL import Image,ImageDraw
from tflite_runtime.interpreter import Interpreter

CAMERA_WIDTH = 640
CAMERA_HEIGHT = 480


def load_labels(path):
  """Loads the labels file. Supports files with or without index numbers."""
  with open(path, 'r', encoding='utf-8') as f:
    lines = f.readlines()
    labels = {}
    for row_number, content in enumerate(lines):
      pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
      if len(pair) == 2 and pair[0].strip().isdigit():
        labels[int(pair[0])] = pair[1].strip()
      else:
        labels[row_number] = pair[0].strip()
  return labels


def set_input_tensor(interpreter, image):
  """Sets the input tensor."""
  tensor_index = interpreter.get_input_details()[0]['index']
  input_tensor = interpreter.tensor(tensor_index)()[0]
  input_tensor[:, :] = image


def get_output_tensor(interpreter, index):
  """Returns the output tensor at the given index."""
  output_details = interpreter.get_output_details()[index]
  tensor = np.squeeze(interpreter.get_tensor(output_details['index']))
  return tensor


def detect_objects(interpreter, image, threshold):
  """Returns a list of detection results, each a dictionary of object info."""
  set_input_tensor(interpreter, image)
  interpreter.invoke()

  #import pdb;pdb.set_trace()
  # Get all output details
  boxes = get_output_tensor(interpreter, 0)
  classes = get_output_tensor(interpreter, 1)
  scores = get_output_tensor(interpreter, 2)
  count = int(get_output_tensor(interpreter, 3))

  results = []
  for i in range(count):
    if scores[i] >= threshold:
      result = {
          'bounding_box': boxes[i],
          'class_id': classes[i],
          'score': scores[i]
      }
      results.append(result)
  return results


def annotate_objects(image, results, labels):
  """Draws the bounding box and label for each object in the results."""
  image = image.resize(
      (CAMERA_WIDTH, CAMERA_HEIGHT), Image.ANTIALIAS)
  draw = ImageDraw.Draw(image)
  for obj in results:
    # Convert the bounding box figures from relative coordinates
    # to absolute coordinates based on the original resolution
    ymin, xmin, ymax, xmax = obj['bounding_box']
    xmin = int(xmin * CAMERA_WIDTH)
    xmax = int(xmax * CAMERA_WIDTH)
    ymin = int(ymin * CAMERA_HEIGHT)
    ymax = int(ymax * CAMERA_HEIGHT)

    # Overlay the box, label, and score on the camera preview
    draw.rectangle((xmin,ymin,xmax,ymax),None,'red')
    draw.text([xmin, ymin],
                   '%s\n%.2f' % (labels[obj['class_id']], obj['score']))
  image.save('car_out.jpg','JPEG')

def main():
  parser = argparse.ArgumentParser(
      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  parser.add_argument(
      '--model', help='File path of .tflite file.', required=True)
  parser.add_argument(
      '--labels', help='File path of labels file.', required=True)
  parser.add_argument(
      '--threshold',
      help='Score threshold for detected objects.',
      required=False,
      type=float,
      default=0.1)
  args = parser.parse_args()

  labels = load_labels(args.labels)
  interpreter = Interpreter(args.model)
  interpreter.allocate_tensors()
  _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']

  img_d = Image.open('car.jpg').convert('RGB')
  image = img_d.resize(
      (input_width, input_height), Image.ANTIALIAS)
  start_time = time.monotonic()
  results = detect_objects(interpreter, image, args.threshold)
  elapsed_ms = (time.monotonic() - start_time) * 1000
  annotate_objects(img_d, results, labels)

if __name__ == '__main__':
  main()

執行下面的命令即可 

python3 detect_picamera.py --model detect.tflite --labels coco_labels.txt
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章