Analysis::checkBidir2Nx2N()

/*
	基於perd2Nx2N的預測信息計算bidir的預測信息
	函數前提:必須先進行了pred2Nx2N的計算

	過程:
		1.若限制了bidir,或之前在計算pred2Nx2N模式中前後向bestME的cost存在MAX,則不進行bidir分析,返回bidir的sa8d和cost爲MAX
		2.將pred2Nx2N中分析的前後向bestME作爲bidir的先後向bestME
		3.將pred2Nx2N中分析的前後向ref、mvp、mvpIdx作爲birir的前後向ref、mvp、mvpIdx
		4.初始化bidir的cost,並設置partSize=2Nx2N,predMode=Inter,dir=3,mergeFlag=false
		5.加載pred2Nx2N的前後向MV作爲bidir的前後向MV,並計算mvd
		6.進行運動補償
		7.計算predYUV的distortion
		8.計算bidir的bits開銷,進而計算其sa8d開銷
		9.檢查bidir前後向MV是否存在非0向量,若存在則
			1.以0向量爲中心,merange爲範圍重新設置搜索窗口
			2.檢查mvp是否在搜索窗口內
		10.若bidir前後向MV存在非0向量,且重置搜索窗口後mvp都在搜索窗口內,則進行0向量檢查
			1.計算0MV的distortion
				·若m_bChromaSa8d且有chroma
					1.設置運動估計最優向量爲0MV
					2.進行運動補償
					3.累計上色度和亮度的satd
				·否則
					1.取先後向參考幀co-located像素
					2.對前後向參考幀co-located像素進行均值計算,爲0MV的bidir預測像素
					3.計算sa8d
			2.計算0MV的bits和cost
			3.基於0MV的cost,重新檢查最優MVP
			4.重新計算0MV更新最優MVP後的bits和cost
		11.若0MV的cost小於之前bidir的cost,則更新其cost、mvd、mvdIdx、0mv,並重新進行運動補償,否則恢復MV爲原來pred2Nx2N的MV
*/
void Analysis::checkBidir2Nx2N(const Mode& inter2Nx2N, Mode& bidir2Nx2N, const CUGeom& cuGeom)
{
	//取bidir2Nx2N的CUdata作爲輸出
    CUData& cu = bidir2Nx2N.cu;

	//若限制了雙向預測 || 2Nx2N前向開銷MAX || 2Nx2N後向開銷MAX
    if (cu.isBipredRestriction() || inter2Nx2N.bestME[0][0].cost == MAX_UINT || inter2Nx2N.bestME[0][1].cost == MAX_UINT)
    {
		//結束雙向預測計算,輸出其sa8d和rdcost爲MAX
        bidir2Nx2N.sa8dCost = MAX_INT64;
        bidir2Nx2N.rdCost = MAX_INT64;
        return;
    }

	//取原始YUV
    const Yuv& fencYuv = *bidir2Nx2N.fencYuv;
    MV   mvzero(0, 0);
    int  partEnum = cuGeom.log2CUSize - 2;

	//將2Nx2N的前向最優ME作爲bidir的前向最優,其後向最優ME作爲bidir的後向最優
    bidir2Nx2N.bestME[0][0] = inter2Nx2N.bestME[0][0];
    bidir2Nx2N.bestME[0][1] = inter2Nx2N.bestME[0][1];

	//取bidir的前後向最優ref、mvp、mvpIdx
    MotionData* bestME = bidir2Nx2N.bestME[0];
    int ref0    = bestME[0].ref;
    MV  mvp0    = bestME[0].mvp;
    int mvpIdx0 = bestME[0].mvpIdx;
    int ref1    = bestME[1].ref;
    MV  mvp1    = bestME[1].mvp;
    int mvpIdx1 = bestME[1].mvpIdx;

	//初始化bidir的cost
    bidir2Nx2N.initCosts();
	//設置partSize爲2Nx2N
    cu.setPartSizeSubParts(SIZE_2Nx2N);
	//設置predMode爲inter
    cu.setPredModeSubParts(MODE_INTER);
	//設置預測方向爲3,即雙向
    cu.setPUInterDir(3, 0, 0);
	//設置前後向參考幀
    cu.setPURefIdx(0, (int8_t)ref0, 0, 0);
    cu.setPURefIdx(1, (int8_t)ref1, 0, 0);
	//設置前後向mvpIdx
    cu.m_mvpIdx[0][0] = (uint8_t)mvpIdx0;
    cu.m_mvpIdx[1][0] = (uint8_t)mvpIdx1;
	//置mergeFlag爲false
    cu.m_mergeFlag[0] = 0;

    /* Estimate cost of BIDIR using best 2Nx2N L0 and L1 motion vectors */
	//設置前向MV及計算mvd
    cu.setPUMv(0, bestME[0].mv, 0, 0);
    cu.m_mvd[0][0] = bestME[0].mv - mvp0;
	//設置後向MV及計算mvd
    cu.setPUMv(1, bestME[1].mv, 0, 0);
    cu.m_mvd[1][0] = bestME[1].mv - mvp1;

	//構造PU
    PredictionUnit pu(cu, cuGeom, 0);
	//進行運動補償
    motionCompensation(cu, pu, bidir2Nx2N.predYuv, true, m_bChromaSa8d && (m_csp != X265_CSP_I400 && m_frame->m_fencPic->m_picCsp != X265_CSP_I400));

	//計算predYUV的distortion
    int sa8d = primitives.cu[partEnum].sa8d(fencYuv.m_buf[0], fencYuv.m_size, bidir2Nx2N.predYuv.m_buf[0], bidir2Nx2N.predYuv.m_size);
    //若m_bChromaSa8d且有色度,則累加上chroma的distortion
	if (m_bChromaSa8d && (m_csp != X265_CSP_I400 && m_frame->m_fencPic->m_picCsp != X265_CSP_I400))
    {
        /* Add in chroma distortion */
        sa8d += primitives.chroma[m_csp].cu[partEnum].sa8d(fencYuv.m_buf[1], fencYuv.m_csize, bidir2Nx2N.predYuv.m_buf[1], bidir2Nx2N.predYuv.m_csize);
        sa8d += primitives.chroma[m_csp].cu[partEnum].sa8d(fencYuv.m_buf[2], fencYuv.m_csize, bidir2Nx2N.predYuv.m_buf[2], bidir2Nx2N.predYuv.m_csize);
    }

	//累計bits開銷
    bidir2Nx2N.sa8dBits = bestME[0].bits + bestME[1].bits + m_listSelBits[2] - (m_listSelBits[0] + m_listSelBits[1]);
    
	//通過distortion和bits計算rdcost
	bidir2Nx2N.sa8dCost = sa8d + m_rdCost.getCost(bidir2Nx2N.sa8dBits);

	//前後向的MV是否存在非0向量
    bool bTryZero = bestME[0].mv.notZero() || bestME[1].mv.notZero();
    if (bTryZero)	//若存在非0向量
    {
        /* Do not try zero MV if unidir motion predictors are beyond
         * valid search area */
        MV mvmin, mvmax;
        int merange = X265_MAX(m_param->sourceWidth, m_param->sourceHeight);
		//以0向量爲中心,merange爲範圍設置搜索窗口[mvmin,mvmax]
        setSearchRange(cu, mvzero, merange, mvmin, mvmax);
        mvmax.y += 2; // there is some pad for subpel refine
		//超分4倍後的搜索窗口
		mvmin <<= 2;
        mvmax <<= 2;
		//檢查mvp是否超出了搜索窗口
        bTryZero &= bestME[0].mvp.checkRange(mvmin, mvmax);
        bTryZero &= bestME[1].mvp.checkRange(mvmin, mvmax);
    }
    if (bTryZero)	//若前後向MV存在非0向量,且mvp都在以0向量爲中心的搜索窗口內
    {
        /* Estimate cost of BIDIR using coincident blocks */
        Yuv& tmpPredYuv = m_rqt[cuGeom.depth].tmpPredYuv;

        int zsa8d;

		//若m_bChromaSa8d且有色度
        if (m_bChromaSa8d && (m_csp != X265_CSP_I400 && m_frame->m_fencPic->m_picCsp != X265_CSP_I400))
        {
			//設置前後向量爲0向量
            cu.m_mv[0][0] = mvzero;
            cu.m_mv[1][0] = mvzero;

			//進行亮度和色度的運動補償
            motionCompensation(cu, pu, tmpPredYuv, true, true);
			//累計上亮度和色度的sa8d
            zsa8d  = primitives.cu[partEnum].sa8d(fencYuv.m_buf[0], fencYuv.m_size, tmpPredYuv.m_buf[0], tmpPredYuv.m_size);
            zsa8d += primitives.chroma[m_csp].cu[partEnum].sa8d(fencYuv.m_buf[1], fencYuv.m_csize, tmpPredYuv.m_buf[1], tmpPredYuv.m_csize);
            zsa8d += primitives.chroma[m_csp].cu[partEnum].sa8d(fencYuv.m_buf[2], fencYuv.m_csize, tmpPredYuv.m_buf[2], tmpPredYuv.m_csize);

        }
        else	//只有算亮度
        {
			//取前後參考幀的co-located像素,其實也就是0MV
            pixel *fref0 = m_slice->m_mref[0][ref0].getLumaAddr(pu.ctuAddr, pu.cuAbsPartIdx);
            pixel *fref1 = m_slice->m_mref[1][ref1].getLumaAddr(pu.ctuAddr, pu.cuAbsPartIdx);
            //計算refStride
			intptr_t refStride = m_slice->m_mref[0][0].lumaStride;
            //對前後參考像素進行均值計算,即爲我們雙向預測的預測像素
			primitives.pu[partEnum].pixelavg_pp[(tmpPredYuv.m_size % 64 == 0) && (refStride % 64 == 0)](tmpPredYuv.m_buf[0], tmpPredYuv.m_size, fref0, refStride, fref1, refStride, 32);
            //計算sa8d
			zsa8d = primitives.cu[partEnum].sa8d(fencYuv.m_buf[0], fencYuv.m_size, tmpPredYuv.m_buf[0], tmpPredYuv.m_size);
        }
		//計算前後幀間預測的bits開銷
        uint32_t bits0 = bestME[0].bits - m_me.bitcost(bestME[0].mv, mvp0) + m_me.bitcost(mvzero, mvp0);
        uint32_t bits1 = bestME[1].bits - m_me.bitcost(bestME[1].mv, mvp1) + m_me.bitcost(mvzero, mvp1);
        //根據sa8d和bits開銷計算0向量時候的MV
		uint32_t zcost = zsa8d + m_rdCost.getCost(bits0) + m_rdCost.getCost(bits1);

        /* refine MVP selection for zero mv, updates: mvp, mvpidx, bits, cost */
		//以運動估計最優向量爲0向量爲基準,重新檢查前後向最優MVP
        mvp0 = checkBestMVP(inter2Nx2N.amvpCand[0][ref0], mvzero, mvpIdx0, bits0, zcost);
        mvp1 = checkBestMVP(inter2Nx2N.amvpCand[1][ref1], mvzero, mvpIdx1, bits1, zcost);

		//重新計算bits和cost
        uint32_t zbits = bits0 + bits1 + m_listSelBits[2] - (m_listSelBits[0] + m_listSelBits[1]);
        zcost = zsa8d + m_rdCost.getCost(zbits);

		//若0MV的cost < 之前計算的雙向預測的cost,則將其更新爲雙向預測最優預測
        if (zcost < bidir2Nx2N.sa8dCost)
        {
			//更新bits和cost
            bidir2Nx2N.sa8dBits = zbits;
            bidir2Nx2N.sa8dCost = zcost;

			//更新前向MV、mvd及mvpIdx
            cu.setPUMv(0, mvzero, 0, 0);
            cu.m_mvd[0][0] = mvzero - mvp0;
            cu.m_mvpIdx[0][0] = (uint8_t)mvpIdx0;

			//更新後向MV、mvd及mvpIdx
            cu.setPUMv(1, mvzero, 0, 0);
            cu.m_mvd[1][0] = mvzero - mvp1;
            cu.m_mvpIdx[1][0] = (uint8_t)mvpIdx1;

			//若m_bChromaSa8d,則predYUV全拷貝
            if (m_bChromaSa8d) /* real MC was already performed */
                bidir2Nx2N.predYuv.copyFromYuv(tmpPredYuv);
            //非則進行運動補償
			else
                motionCompensation(cu, pu, bidir2Nx2N.predYuv, true, m_csp != X265_CSP_I400 && m_frame->m_fencPic->m_picCsp != X265_CSP_I400);
        }
		//0MV的cost > 之前計算的雙向預測的cost,則保存之前的數據不變,且恢復bestME的MV
        else if (m_bChromaSa8d && (m_csp != X265_CSP_I400 && m_frame->m_fencPic->m_picCsp != X265_CSP_I400))
        {
            /* recover overwritten motion vectors */
            cu.m_mv[0][0] = bestME[0].mv;
            cu.m_mv[1][0] = bestME[1].mv;
        }
    }
}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章