xIntraCodingTUBlock()函數是幀內預測函數的一個關鍵函數,裏面進行亮度預測模式的具體實現,以及亮度和色度的殘差的變換以及量化,該函數是亮度預測和色度預測都要用的統一函數,今天抽空講一下該函數的代碼細節。
該函數的主要流程如下:
一、 初始化各種參數,定義一些變量。
二、 如果是亮度分量,則進行亮度分量的預測編碼(MIp模式或者傳統的角度預測)
三、 預測結束之後計算殘差(非聯合模式下,Cb和Cr分量預測塊分別進行殘差計算;若爲聯合模式,首先只對Cb分量預測塊進行殘差計算),這裏殘差的計算需要注意一下,首先定義一個piResi,將原始數據複製到piResi中,copyFrom( piOrg );以便後面減去預測值計算殘差用。然後再減去預測值得到殘差值,subtract(tmpPred)。
四、 對殘差進行變換和量化,大致步驟如下:
- 根據不同的顏色分量選擇合適的QP和Lambda,
- 如果當前顏色分量的殘差是單獨編碼,則對該殘差調用scaleSignal()函數進行適當的微調;
- 針對聯合色度模式,再爲Cr分量預測塊單獨進行一些初始化的操作;
- 如果聯合模式啓用,首先獲取Cr塊的殘差,然後用之前求好的Cb的殘差減去Cr的殘差求平均:jointResi = (cbResi - crResi)/2,此處的操作是通過下面的函數實現的,piResi.subtractAndHalve( crResi );這裏有關聯合色度殘差編碼模式的具體技術細節我早之前的博客講過了,鏈接如下:H.266/VVC相關技術學習筆記:色度殘差聯合編碼技術
五、 如果是聯合模式,則爲了確保聯合模式能夠保留單獨模式在兩個分量的色度塊具有相同的殘差,要將Lambda設置爲更加寬鬆一些,就是設置Lambda更小一些;若不是聯合模式,Lambda的值應該設置大一些
六、 調用m_pcTrQuant->transformNxN()函數對殘差信號進行變換和量化,該函數是殘差信號的變換以及量化的主函數入口。
七、 然後調用m_pcTrQuant->invTransformNxN()函數對變換量化編碼之後的殘差信號進行反量化反變換得到一個解碼的殘差。最後調用piReco.reconstruct()函數計算當前塊的重建值,用於計算失真,在外層函數計算RDcost用。這裏需要注意的是若爲傳統模式,則在外層有兩次Cb和Cr的循環,計算各自分量塊的重建值;若爲聯合模式,則在外層函數只對Cb預測塊進行調用,piResi爲聯合殘差,保存在Cb塊內,只計算Cb塊的重建值,Cr塊的重建值需要在後面單獨計算。
八、 這裏判斷是否是是聯合色度模式,如果是,則Cr預測塊的解碼殘差和重建值需要單獨計算,Cr的殘差等於Cb殘差的負值(crResi.copyAndNegate( piResi )),因爲這裏已經是聯合色度模式了,所以piResi裏存的就是聯合殘差。然後再調用crReco.reconstruct(crPred, crResi, cs.slice->clpRng( COMPONENT_Cr ))函數計算Cr塊的重建值。
九、 最後更新當前預測塊的色度分量的失真,用於外層計算RDcost。
以下是該函數的所有代碼,重要的地方我都有詳細的備註:
//這是幀內預測函數的一個關鍵函數,裏面進行亮度預測模式的具體實現,以及亮度和色度的殘差的變換以及量化。亮度預測和色度預測都要用的統一函數
void IntraSearch::xIntraCodingTUBlock(TransformUnit &tu, const ComponentID &compID, const bool &checkCrossCPrediction, Distortion& ruiDist, const int &default0Save1Load2, uint32_t* numSig, std::vector<TrMode>* trModes, const bool loadTr)
{
if (!tu.blocks[compID].valid())
{
return;
}
CodingStructure &cs = *tu.cs;
#if JVET_N0671_RDCOST_FIX
m_pcRdCost->setChromaFormat(cs.sps->getChromaFormatIdc());
#endif
const CompArea &area = tu.blocks[compID];
const SPS &sps = *cs.sps;
const PPS &pps = *cs.pps;
const ChannelType chType = toChannelType(compID);
const int bitDepth = sps.getBitDepth(chType);
PelBuf piOrg = cs.getOrgBuf (area);//圖像原始數據
PelBuf piPred = cs.getPredBuf (area);//圖像預測數據
PelBuf piResi = cs.getResiBuf (area);//圖像殘差數據
PelBuf piOrgResi = cs.getOrgResiBuf(area);//圖像原始殘差數據
PelBuf piReco = cs.getRecoBuf (area);//圖像重建數據
const PredictionUnit &pu = *cs.getPU(area.pos(), chType);
const uint32_t uiChFinalMode = PU::getFinalIntraMode(pu, chType);//最終選中的幀內預測模式
const bool bUseCrossCPrediction = pps.getPpsRangeExtension().getCrossComponentPredictionEnabledFlag() && isChroma( compID ) && PU::isChromaIntraModeCrossCheckMode( pu ) && checkCrossCPrediction;
const bool ccUseRecoResi = m_pcEncCfg->getUseReconBasedCrossCPredictionEstimate();
#if INCLUDE_ISP_CFG_FLAG
const bool ispSplitIsAllowed = sps.getUseISP() && CU::canUseISPSplit( *tu.cu, compID );
#else
const bool ispSplitIsAllowed = CU::canUseISPSplit( *tu.cu, compID );
#endif
//===== init availability pattern =====
#if JVET_N0054_JOINT_CHROMA
//定義聯合CbCr殘差編碼,1爲啓用聯合編碼,0爲關閉
bool jointCbCr = tu.jointCbCr && compID == COMPONENT_Cb;
//各種亮度預測編碼
if ( compID == COMPONENT_Y )
{
#endif
PelBuf sharedPredTS( m_pSharedPredTransformSkip[compID], area );
if( default0Save1Load2 != 2 )
{
initIntraPatternChType( *tu.cu, area );
//===== get prediction signal =====
//獲取預測信號,得到對應色度候選模式的預測值
if( compID != COMPONENT_Y && PU::isLMCMode( uiChFinalMode ) )
{
{
xGetLumaRecPixels( pu, area );
}
predIntraChromaLM( compID, piPred, pu, area, uiChFinalMode );
}
else
{
#if JVET_N0217_MATRIX_INTRAPRED
if( PU::isMIP( pu, chType ) )
{
predIntraMip( compID, piPred, pu );
}
else
{
#endif
predIntraAng( compID, piPred, pu );
#if JVET_N0217_MATRIX_INTRAPRED
}
#endif
}
// save prediction
//保存預測塊的信息,包括預測值
if( default0Save1Load2 == 1 )
{
sharedPredTS.copyFrom( piPred );
}
}
else
{
// load prediction
piPred.copyFrom( sharedPredTS );
}
#if JVET_N0054_JOINT_CHROMA
}
#endif
DTRACE( g_trace_ctx, D_PRED, "@(%4d,%4d) [%2dx%2d] IMode=%d\n", tu.lx(), tu.ly(), tu.lwidth(), tu.lheight(), uiChFinalMode );
//DTRACE_PEL_BUF( D_PRED, piPred, tu, tu.cu->predMode, COMPONENT_Y );
const Slice &slice = *cs.slice;
bool flag = slice.getReshapeInfo().getUseSliceReshaper() && (slice.isIntra() || (!slice.isIntra() && m_pcReshape->getCTUFlag()));
//如果色度的Reshape被激活
if (flag && slice.getReshapeInfo().getSliceReshapeChromaAdj() && isChroma(compID))
{
const Area area = tu.Y().valid() ? tu.Y() : Area(recalcPosition(tu.chromaFormat, tu.chType, CHANNEL_TYPE_LUMA, tu.blocks[tu.chType].pos()), recalcSize(tu.chromaFormat, tu.chType, CHANNEL_TYPE_LUMA, tu.blocks[tu.chType].size()));
const CompArea &areaY = CompArea(COMPONENT_Y, tu.chromaFormat, area );
PelBuf piPredY;
piPredY = cs.picture->getPredBuf(areaY);
const Pel avgLuma = piPredY.computeAvg();
//定義adj是色度殘差的規模
int adj = m_pcReshape->calculateChromaAdj(avgLuma);
tu.setChromaAdj(adj);
}
//獲取殘差信號
//===== get residual signal =====
piResi.copyFrom( piOrg );//首先將原始數據複製到piResi中,以便後面減去預測值計算殘差用
//如果是亮度TU
if (slice.getReshapeInfo().getUseSliceReshaper() && m_pcReshape->getCTUFlag() && compID==COMPONENT_Y)
{
CompArea tmpArea(COMPONENT_Y, area.chromaFormat, Position(0, 0), area.size());
PelBuf tmpPred = m_tmpStorageLCU.getBuf(tmpArea);
tmpPred.copyFrom(piPred);
piResi.rspSignal(m_pcReshape->getFwdLUT());
//減去預測值計算殘差
piResi.subtract(tmpPred);
}
else//如果是色度TU
//減去預測值計算殘差
piResi.subtract( piPred );
if (pps.getPpsRangeExtension().getCrossComponentPredictionEnabledFlag() && isLuma(compID))
{
piOrgResi.copyFrom (piResi);//piOrgResi和piResi一樣
}
if (bUseCrossCPrediction)
{
if (xCalcCrossComponentPredictionAlpha(tu, compID, ccUseRecoResi) == 0)
{
return;
}
CrossComponentPrediction::crossComponentPrediction(tu, compID, cs.getResiBuf(tu.Y()), piResi, piResi, false);
}
//對殘差進行變換和量化
//===== transform and quantization =====
//爲RDOQ初始化估計數組
//--- init rate estimation arrays for RDOQ ---
//--- transform and quantization ---
TCoeff uiAbsSum = 0;//變換量化後的殘差信號的總和
const QpParam cQP(tu, compID);//定義色度的量化參數
#if RDOQ_CHROMA_LAMBDA
m_pcTrQuant->selectLambda(compID);//根據不同的顏色分量選擇合適的Lambda
#endif
flag =flag && (tu.blocks[compID].width*tu.blocks[compID].height > 4);
if (flag && isChroma(compID) && slice.getReshapeInfo().getSliceReshapeChromaAdj() )
{
int cResScaleInv = tu.getChromaAdj();
double cResScale = round((double)(1 << CSCALE_FP_PREC) / (double)cResScaleInv);
m_pcTrQuant->setLambda(m_pcTrQuant->getLambda() / (cResScale*cResScale));
#if JVET_N0054_JOINT_CHROMA
//如果聯合色度關閉
if ( !jointCbCr ) // Joint CbCr signal is to be scaled in the case of joint chroma
//聯合色度殘差的情況下,CbCr的殘差將會被縮減
#endif
//縮減殘差信號
piResi.scaleSignal(cResScaleInv, 1, tu.cu->cs->slice->clpRng(compID));
}
#if JVET_N0054_JOINT_CHROMA
//定義Cr預測塊的預測區域、原始Cr值、預測值、殘差值、重建值
const CompArea &crArea = tu.blocks [ COMPONENT_Cr ];
PelBuf crOrg = cs.getOrgBuf ( crArea );
PelBuf crPred = cs.getPredBuf ( crArea );
PelBuf crResi = cs.getResiBuf ( crArea );
PelBuf crReco = cs.getRecoBuf ( crArea );
//聯合色度模式開啓
if ( jointCbCr )
{
// Get Cr prediction and residual、
//獲取Cr的預測值和殘差
crResi.copyFrom( crOrg );
crResi.subtract( crPred );
// Create joint residual and store it for Cb component: jointResi = (cbResi - crResi)/2
//定義CbCr聯合殘差並且將其存儲到Cb分量的預測塊中,jointResi = (cbResi - crResi)/2
//殘差相減並且求平均
piResi.subtractAndHalve( crResi );
// Scale the joint signal
//縮放聯合殘差信號
if ( flag && slice.getReshapeInfo().getSliceReshapeChromaAdj() )
piResi.scaleSignal(tu.getChromaAdj(), 1, tu.cu->cs->slice->clpRng(compID));
// Lambda is loosened for the joint mode with respect to single modes as the same residual is used for both chroma blocks
//爲了確保聯合模式保留單獨模式在兩個分量的色度塊具有相同的殘差這樣的特性,要將Lambda設置爲更加寬鬆一些,就是設置Lambda更小一些
m_pcTrQuant->setLambda( 0.60 * m_pcTrQuant->getLambda() );//計算setLambda的值並返回,該Lambda
}
else if ( isChroma(compID) && tu.cu->cs->slice->getSliceQp() > 18 )//如果不是聯合模式,且是色度分量且QP>18
m_pcTrQuant->setLambda( 1.10 * m_pcTrQuant->getLambda());//計算setLambda的值並返回,Lambda的值設置大一些
#endif
double diagRatio = 0, horVerRatio = 0;
if( trModes )//具體用什麼變換模式
{
//殘差信號的變換以及量化的主函數入口
m_pcTrQuant->transformNxN( tu, compID, cQP, trModes, CU::isIntra( *tu.cu ) ? m_pcEncCfg->getIntraMTSMaxCand() : m_pcEncCfg->getInterMTSMaxCand(), ispSplitIsAllowed ? &diagRatio : nullptr, ispSplitIsAllowed ? &horVerRatio : nullptr );
tu.mtsIdx = trModes->at(0).first;
}
m_pcTrQuant->transformNxN( tu, compID, cQP, uiAbsSum, m_CABACEstimator->getCtx(), loadTr, &diagRatio, &horVerRatio );
#if INCLUDE_ISP_CFG_FLAG
if ( !tu.cu->ispMode && isLuma(compID) && ispSplitIsAllowed && tu.mtsIdx == MTS_DCT2_DCT2 && ispSplitIsAllowed )
#else
if ( !tu.cu->ispMode && isLuma(compID) && ispSplitIsAllowed && tu.mtsIdx == MTS_DCT2_DCT2 )
#endif
{
m_intraModeDiagRatio .push_back(diagRatio);
m_intraModeHorVerRatio .push_back(horVerRatio);
m_intraModeTestedNormalIntra.push_back((int)uiChFinalMode);
}
DTRACE( g_trace_ctx, D_TU_ABS_SUM, "%d: comp=%d, abssum=%d\n", DTRACE_GET_COUNTER( g_trace_ctx, D_TU_ABS_SUM ), compID, uiAbsSum );
//--- inverse transform ---
if (uiAbsSum > 0)//若果變換量化後的信號總和存在,則反量化反變換
{
//反量化反變換主函數
m_pcTrQuant->invTransformNxN(tu, compID, piResi, cQP);
}
else//否則初始化爲0
{
//初始化殘差信號爲0
piResi.fill(0);
}
//===== reconstruction =====
//在解碼器端計算重建值
if (flag && uiAbsSum > 0 && isChroma(compID) && slice.getReshapeInfo().getSliceReshapeChromaAdj() )
{
//對解碼後的殘差信號進行微調
piResi.scaleSignal(tu.getChromaAdj(), 0, tu.cu->cs->slice->clpRng(compID));
}
if (bUseCrossCPrediction)
{
CrossComponentPrediction::crossComponentPrediction(tu, compID, cs.getResiBuf(tu.Y()), piResi, piResi, true);
}
if (slice.getReshapeInfo().getUseSliceReshaper() && m_pcReshape->getCTUFlag() && compID == COMPONENT_Y)
{
CompArea tmpArea(COMPONENT_Y, area.chromaFormat, Position(0,0), area.size());
PelBuf tmpPred = m_tmpStorageLCU.getBuf(tmpArea);
tmpPred.copyFrom(piPred);//拿到預測信號,用於重建信號的計算
//根據compID計算單個色度分量的重建圖像數據(若爲傳統模式,則在外層有兩次Cb和Cr的循環,計算各自分量的重建值;
// 若爲聯合模式,則在外層函數只對Cb預測塊進行調用,piResi爲聯合殘差,保存在Cb塊內,只計算Cb塊的重建值)
piReco.reconstruct(tmpPred, piResi, cs.slice->clpRng(compID));//殘差+預測值
}
else
piReco.reconstruct(piPred, piResi, cs.slice->clpRng( compID ));
#if JVET_N0054_JOINT_CHROMA
if ( jointCbCr )//如果是聯合模式,則Cr預測塊的解碼殘差和重建值需要單獨計算
{
// Cr uses negative of the signalled Cb residual
if (uiAbsSum > 0)//Cr的殘差等於Cb殘差的負值,因爲這裏已經是聯合色度模式了,所以piResi裏存的就是聯合殘差
crResi.copyAndNegate( piResi );
else
crResi.fill(0);
tu.getCoeffs(COMPONENT_Cr).fill(0);
// Set cbf also for Cr
TU::setCbfAtDepth (tu, COMPONENT_Cr, tu.depth, uiAbsSum > 0 ? true : false);
// Cr reconstruction and its contribution to the total error
//計算Cr預測塊的重建值
crReco.reconstruct(crPred, crResi, cs.slice->clpRng( COMPONENT_Cr ));
#if WCG_EXT
if ( m_pcEncCfg->getLumaLevelToDeltaQPMapping().isEnabled() ||
(m_pcEncCfg->getReshaper()
&& slice.getReshapeInfo().getUseSliceReshaper()
&& (m_pcReshape->getCTUFlag() || (isChroma(compID) && m_pcEncCfg->getReshapeIntraCMD()))))
{
const CPelBuf orgLuma = cs.getOrgBuf( cs.area.blocks[COMPONENT_Y] );
ruiDist += m_pcRdCost->getDistPart( crOrg, crReco, bitDepth, COMPONENT_Cr, DF_SSE_WTD, &orgLuma );
}
else
#endif
{
ruiDist += m_pcRdCost->getDistPart( crOrg, crReco, bitDepth, COMPONENT_Cr, DF_SSE );
}
}
#endif
//===== update distortion =====
#if WCG_EXT
if (m_pcEncCfg->getLumaLevelToDeltaQPMapping().isEnabled() || (m_pcEncCfg->getReshaper()
&& slice.getReshapeInfo().getUseSliceReshaper() && (m_pcReshape->getCTUFlag() || (isChroma(compID) && m_pcEncCfg->getReshapeIntraCMD()))))
{
const CPelBuf orgLuma = cs.getOrgBuf( cs.area.blocks[COMPONENT_Y] );
if (compID == COMPONENT_Y && !(m_pcEncCfg->getLumaLevelToDeltaQPMapping().isEnabled()))
{
CompArea tmpArea1(COMPONENT_Y, area.chromaFormat, Position(0, 0), area.size());
PelBuf tmpRecLuma = m_tmpStorageLCU.getBuf(tmpArea1);
tmpRecLuma.copyFrom(piReco);
tmpRecLuma.rspSignal(m_pcReshape->getInvLUT());
ruiDist += m_pcRdCost->getDistPart(piOrg, tmpRecLuma, sps.getBitDepth(toChannelType(compID)), compID, DF_SSE_WTD, &orgLuma);
}
else
ruiDist += m_pcRdCost->getDistPart(piOrg, piReco, bitDepth, compID, DF_SSE_WTD, &orgLuma);
}
else
#endif
{
ruiDist += m_pcRdCost->getDistPart( piOrg, piReco, bitDepth, compID, DF_SSE );
}
}