基於UE4+ OpenCV 的混合現實 (webCamera, mix-reality, blue screen matting)

總體效果

       類似混合現實,實現展示玩家玩的VR遊戲的實況


基本思想

       利用openCV 插件,通過攝像頭獲取到玩家在藍幕中玩遊戲的場景,再使用藍幕技術,對圖像做處理,獲取玩家的圖像;最後和遊戲場景進行疊加。

具體實現( UE 4.12,  Opencv 2.4.13)

1, 在UE4 中打開 vs,添加一個class,  繼承 Actor, WebCameraReader

2. UE4 中使用OpenCV 插件

     方法一: 使用項目的dll 包含,即在項目中包含 OpenCV 文件中的 include, 然後在dll 中包含  \opencv\build\x64\vc12\lib (64位系統); 

     方法二: 把openCV的dll,做成一個插件調用;具體步驟  整理下插件的寫法:① 在項目文件中,添加Plugins 文件夾,之後在Plugins 文件夾裏面再創建一個插件的文件夾,比如  Plugins\CoolPlugin;   下面是CoolPlugin 的一些文件夾

     

     uplugin 文件中,是需要編輯一些插件的信息,這些後面會顯示在UE編輯器的插件信息中,如下所示,可以有一些關於插件的具體信息

   

{
	"FileVersion": 3,
	"Version": 1,
	"VersionName": "1.0",
	"FriendlyName": "Webcamera",
	"Description": "Adds web camera support",
	"Category": "WebcamReader",
	"CreatedBy": "Milen",
	"CreatedByURL": "http://blog.csdn.net/himilong/article/details/52829231",
	"DocsURL": "http://blog.csdn.net/himilong/article/details/52829231",
	"MarketplaceURL": "",
	"SupportURL": "http://blog.csdn.net/himilong/article/details/52829231",
	"Modules": [
		{
			"Name": "CoolPlugin",
			"Type": "Editor",
			"LoadingPhase": "Default"
		}
	],
	"EnabledByDefault": false,
	"CanContainContent": false,
	"IsBetaVersion": false,
	"Installed": false
}
   在文件夾,ThirdParty 創建OpenCV ,裏面我們可以加入需要用到的include(OpenCV\Includes) 文件以及  dll, lib文件(OpenCV\Libraries).

    這些準備工作做好之後,就可以在代碼中加載需要用到的lib 文件, dll 文件了。 在 CoolPlugin\Source\CoolPlugin 中,創建CoolPlugin.Buid.cs 文件

    

using UnrealBuildTool;
using System.IO;
public class CoolPlugin : ModuleRules
{
    public CoolPlugin(TargetInfo target)
    {
        PrivateDependencyModuleNames.AddRange(
            new string[] {
                "Engine",
                "UnrealEd",
                "InputCore",
                "Core",
                "Slate",
                "SlateCore",
                "EditorStyle",
                "CoreUObject",
                "CoolPlugin"   // add plugin file name 
            });

        PrivateIncludePaths.AddRange(new[] { "CoolPlugin/Private" });

          LoadOpenCV(target);  //load dll or  lib files
    }
    //LoadOpenCV(Target);

    private string ModulePath
    {
        get
        {
            RulesAssembly r;
            FileReference CheckProjectFile;
            UProjectInfo.TryGetProjectForTarget("CoolProject", out CheckProjectFile);

            r = RulesCompiler.CreateProjectRulesAssembly(CheckProjectFile);
            FileReference f = r.GetModuleFileName(this.GetType().Name);

            return Path.GetDirectoryName(f.CanonicalName);

        }
        //get { return Path.GetDirectoryName(RulesCompiler.GetModuleFilename(this.GetType().Name)); }
    }
    private string ThirdPartyPath
    {
        get { return Path.GetFullPath(Path.Combine(ModulePath, "../../ThirdParty/")); }
    }

    public bool LoadOpenCV(TargetInfo Target)
    {
        bool isLibrarySupported = false;

        // Create OpenCV Path 
        string OpenCVPath = Path.Combine(ThirdPartyPath, "OpenCV");

        // Get Library Path 
        string LibPath = "";
        bool isdebug = Target.Configuration == UnrealTargetConfiguration.Debug && BuildConfiguration.bDebugBuildsActuallyUseDebugCRT;
        if (Target.Platform == UnrealTargetPlatform.Win64)
        {
            LibPath = Path.Combine(OpenCVPath, "Libraries", "Win64");
            isLibrarySupported = true;
        }
        else if (Target.Platform == UnrealTargetPlatform.Win32)
        {
            LibPath = Path.Combine(OpenCVPath, "Libraries", "Win32");
            isLibrarySupported = true;
        }
        else
        {
            string Err = string.Format("{0} dedicated server is made to depend on {1}. We want to avoid this, please correct module dependencies.", Target.Platform.ToString(), this.ToString()); System.Console.WriteLine(Err);
        }
       // PublicIncludePaths.AddRange(new string[] { "<Modulename>/Public", "<Modulename>/Classes" });

        //PublicIncludePaths.AddRange(new string[] { Path.Combine(OpenCVPath, "Includes") });
        if (isLibrarySupported)
        {
            //Add Include path 
            PublicIncludePaths.AddRange(new string[] { Path.Combine(OpenCVPath, "Includes") });

            // Add Library Path 
            PublicLibraryPaths.Add(LibPath);  

            if (!isdebug)
            {
                PublicAdditionalLibraries.Add("opencv_imgproc2413.lib");

                //Add Dynamic Libraries (Debug Version)
                PublicDelayLoadDLLs.Add("opencv_imgproc2413.dll");


                PublicAdditionalLibraries.Add("opencv_core2413.lib");

                //Add Dynamic Libraries (Debug Version)
                PublicDelayLoadDLLs.Add("opencv_core2413.dll");

                PublicAdditionalLibraries.Add("opencv_highgui2413.lib");

                //Add Dynamic Libraries (Debug Version)
                PublicDelayLoadDLLs.Add("opencv_highgui2413.dll");
            }
            else
            {
                //Add Static Libraries (Debug Version)
                //PublicAdditionalLibraries.Add("opencv_ts300d.lib");
                PublicAdditionalLibraries.Add("opencv_imgproc2413d.lib");

                //Add Dynamic Libraries (Debug Version)
                PublicDelayLoadDLLs.Add("opencv_imgproc2413d.dll");

                PublicAdditionalLibraries.Add("opencv_core2413d.lib");

                //Add Dynamic Libraries (Debug Version)
                PublicDelayLoadDLLs.Add("opencv_core2413d.dll");

                PublicAdditionalLibraries.Add("opencv_highgui2413d.lib");

                //Add Dynamic Libraries (Debug Version)
                PublicDelayLoadDLLs.Add("opencv_highgui2413d.dll");
            }
        }

        Definitions.Add(string.Format("WITH_OPENCV_BINDING={0}", isLibrarySupported ? 1 : 0));

        return isLibrarySupported;
    }
}
      最後,在CoolPlugin\Source\CoolPlugin\Private 中添加插件本身需要用到的代碼。


3. 在webcamReader.h 中包含 使用到的OpenCV 中文件

#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"	
#include "opencv2/imgproc.hpp"
#include "opencv2/video.hpp"
 

定義後面需要用到的幾個主要用到的函數,onNextVideoFrame() 是在每獲取到新的一幀圖像之後的event, videoTextur 用來保存每次讀取到的幀。 update 顧名思義就是不停的讀圖像,更新幀。

// Fill out your copyright notice in the Description page of Project Settings.

UCLASS()
class MYPROJECT_API AWebcamReader : public AActor
{
	GENERATED_BODY()
	
public:	
	// Sets default values for this actor's properties
	AWebcamReader();

	// deallocates memory for the opencv fields
	~AWebcamReader();

	// Called when the game starts or when spawned
	virtual void BeginPlay() override;
	
	// Called every frame
	virtual void Tick( float DeltaSeconds ) override;

	// The device ID opened by the Video Stream
	UPROPERTY(BlueprintReadWrite, EditAnywhere, Category = Webcam)
		int32 CameraID ;

	//UPROPERTY(interp, Category = CameraSettings, meta = (ShowOnlyInnerProperties)) 
	//	struct FPostProcessSettings PostProcessSettings;

	UPROPERTY(interp, BlueprintReadWrite, Category = CameraSettings)//, meta = (ShowOnlyInnerProperties)) 
		struct FPostProcessSettings PostProcessSettings;


	// If the webcam images should be resized every frame
	UPROPERTY(BlueprintReadWrite, EditAnywhere, Category = Webcam)
		bool ShouldResize;

	// The targeted resize width and height (width, height)
	UPROPERTY(BlueprintReadWrite, EditAnywhere, Category = Webcam)
		FVector2D ResizeDeminsions;

	// The rate at which the color data array and video texture is updated (in frames per second)
	UPROPERTY(BlueprintReadWrite, EditAnywhere, Category = Webcam)
		float RefreshRate;

	// The refresh timer
	UPROPERTY(BlueprintReadWrite, Category = Webcam)
		float RefreshTimer;

	// Blueprint Event called every time the video frame is updated
	UFUNCTION(BlueprintNativeEvent, Category = Webcam)
		void OnNextVideoFrame();

	// OpenCV fields
	cv::Mat* frame;
	cv::VideoCapture* stream;
	cv::Size* size;

	// OpenCV prototypes
	void UpdateFrame();
	void UpdateTexture();

	// If the stream has succesfully opened yet
	UPROPERTY(BlueprintReadWrite, Category = Webcam)
		bool isStreamOpen;

	// The videos width and height (width, height)
	UPROPERTY(BlueprintReadWrite, Category = Webcam)
		FVector2D VideoSize;

	// The current video frame's corresponding texture
	UPROPERTY(BlueprintReadWrite, Category = Webcam)
		UTexture2D* VideoTexture;

	// The current data array
	UPROPERTY(BlueprintReadWrite, Category = Webcam)
		TArray<FColor> Data;

protected:

	// Use this function to update the texture rects you want to change:
	// NOTE: There is a method called UpdateTextureRegions in UTexture2D but it is compiled WITH_EDITOR and is not marked as ENGINE_API so it cannot be linked
	// from plugins.
	// FROM: https://wiki.unrealengine.com/Dynamic_Textures
	void UpdateTextureRegions(UTexture2D* Texture, int32 MipIndex, uint32 NumRegions, FUpdateTextureRegion2D* Regions, uint32 SrcPitch, uint32 SrcBpp, uint8* SrcData, bool bFreeData);

	// Pointer to update texture region 2D struct
	FUpdateTextureRegion2D* VideoUpdateTextureRegion;
	
};


4. cpp 文件, 首先初始化這個類,包含攝像機的ID,更新頻率 以及程序用到的變量等

AWebcamReader::AWebcamReader()
{
 	// Set this actor to call Tick() every frame.  You can turn this off to improve performance if you don't need it.
	PrimaryActorTick.bCanEverTick = true;

	// Initialize OpenCV and webcam properties
	CameraID = 0;
	RefreshRate = 15;
	isStreamOpen = false;
	VideoSize = FVector2D(0, 0);
	ShouldResize = false;
	ResizeDeminsions = FVector2D(320, 240);
	stream = new cv::VideoCapture();
	frame = new cv::Mat();
	RefreshTimer = 1000000.0f;
}

在begin 中,打開攝像機,讀取第一幀

void AWebcamReader::BeginPlay()
{
	Super::BeginPlay();
	
	stream->open(CameraID);
	if (stream->isOpened())
	{
		//Initialize stream
		isStreamOpen = true;
		UpdateFrame();
		VideoSize = FVector2D(frame->cols, frame->rows);
		size = new cv::Size(ResizeDeminsions.X, ResizeDeminsions.Y);
		VideoTexture = UTexture2D::CreateTransient(VideoSize.X, VideoSize.Y);
		VideoTexture->UpdateResource();
		VideoUpdateTextureRegion = new FUpdateTextureRegion2D(0, 0, 0, 0, VideoSize.X, VideoSize.Y);

		//Initialize data array
		Data.Init(FColor(0, 0, 0, 255), VideoSize.X * VideoSize.Y);
	}
}

tick() 函數中進行更新,並不斷的調用 onNextVideoFrame(), 這個在後面的藍圖中,需要用到。 updateframe() 讀數據,updatetexture()更新videotexture。

// Called every frame
void AWebcamReader::Tick( float DeltaTime )
{
	Super::Tick( DeltaTime );

	RefreshTimer += DeltaTime;
	if (isStreamOpen && RefreshTimer >= 1.0f / RefreshRate)
	{
		RefreshTimer -= 1.0f / RefreshRate;
		UpdateFrame();
		UpdateTexture();
		OnNextVideoFrame();
	}

}



5, 新建一個material, 作爲後期渲染的材質(material domain 設置爲 post process), 按如下藍圖,實現對人物意外的背景色的剔除。其中需要注意 創建的texture parameter,這個在後面的作爲 settextureparametervalue 的 paramer name. 如下藍圖的主要思想就是對pix 的RGB 進行分流,背景色 使用sceneTexture, 而人物保持不變. 此處背景是綠色,而綠色,人物的皮膚顏色,或者白色衣服等玩家可能出現顏色的G分量都比較大(),綜合就採用相加的方法,作爲if判斷的A變量。背景色 R+ G 比較小,而人物的顏色比較大。從而區分



6. 編譯上面的類之後,回到UE4 的編輯中,新建一個 webCamReader 的藍圖類


7, 使用上面的材質,創建 dynamic material instance,然後動態改變其 parameter value。post process 是添加的 component, 用於後期剔除人物背景。

set unbound 勾選,可以實現全屏的後期效果。這些可以在unreal 文檔中查詢到,不再敘述。


8, 點擊 play 就可以實現本文剛開始的功能了!


Plugin Download:

http://download.csdn.net/detail/himilong/9681340


Reference :


uploadvr.com/epic-unreal-engine-4-mixed-reality-support/

opencv-srf.blogspot.com/2013/05/installing-configuring-opencv-with-vs.html

wiki.unrealengine.com/Integrating_OpenCV_Into_Unreal_Engine_4

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章