x 265におけるmotionCompensation()解析(バージョン2.8)

7589 ワード

一.運動補償:


動き補償の基本原理は、符号化器が画像シーケンスのN番目のフレームを処理するとき、動き補償の中核技術である動き推定ME(Motion Estimation)を用いて、N番目のフレームの予測フレームN´を得ることである.実際の符号化伝送では、必ずしもNフレーム目を伝送するのではなく、Nフレーム目とその予測フレームN´との差△を伝送する.動き推定が非常に有効である場合、△中得確率は基本的にゼロ付近に分布し、△元の画像のNフレーム目よりもエネルギーがずっと小さく、符号化伝送△に必要なビット数もずっと少ない.

二.ソース注釈分析:

/*
 =============Analysed by: yangxin
 =============Date:        2018.10
 =============Function:    motionCompensation()  
*/
void Predict::motionCompensation(const CUData& cu, const PredictionUnit& pu, Yuv& predYuv, bool bLuma, bool bChroma)
{
    int refIdx0 = cu.m_refIdx[0][pu.puAbsPartIdx];
    int refIdx1 = cu.m_refIdx[1][pu.puAbsPartIdx];

    if (cu.m_slice->isInterP())
    {
        /* P Slice */
        WeightValues wv0[3];

        X265_CHECK(refIdx0 >= 0, "invalid P refidx
"); X265_CHECK(refIdx0 < cu.m_slice->m_numRefIdx[0], "P refidx out of range
"); const WeightParam *wp0 = cu.m_slice->m_weightPredTable[0][refIdx0]; MV mv0 = cu.m_mv[0][pu.puAbsPartIdx]; cu.clipMv(mv0);//-- mv if (cu.m_slice->m_pps->bUseWeightPred && wp0->bPresentFlag) //-- { for (int plane = 0; plane < (bChroma ? 3 : 1); plane++) { wv0[plane].w = wp0[plane].inputWeight; wv0[plane].offset = wp0[plane].inputOffset * (1 << (X265_DEPTH - 8)); wv0[plane].shift = wp0[plane].log2WeightDenom; wv0[plane].round = wp0[plane].log2WeightDenom >= 1 ? 1 << (wp0[plane].log2WeightDenom - 1) : 0; } ShortYuv& shortYuv = m_predShortYuv[0]; if (bLuma) predInterLumaShort(pu, shortYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0); if (bChroma) predInterChromaShort(pu, shortYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0); addWeightUni(pu, predYuv, shortYuv, wv0, bLuma, bChroma); } else//-- { if (bLuma) predInterLumaPixel(pu, predYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0);//-- if (bChroma) predInterChromaPixel(pu, predYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0);//-- } } else { /* B Slice */ WeightValues wv0[3], wv1[3]; const WeightParam *pwp0, *pwp1; X265_CHECK(refIdx0 < cu.m_slice->m_numRefIdx[0], "bidir refidx0 out of range
"); X265_CHECK(refIdx1 < cu.m_slice->m_numRefIdx[1], "bidir refidx1 out of range
"); if (cu.m_slice->m_pps->bUseWeightedBiPred) { pwp0 = refIdx0 >= 0 ? cu.m_slice->m_weightPredTable[0][refIdx0] : NULL; pwp1 = refIdx1 >= 0 ? cu.m_slice->m_weightPredTable[1][refIdx1] : NULL; if (pwp0 && pwp1 && (pwp0->bPresentFlag || pwp1->bPresentFlag)) { /* biprediction weighting */ for (int plane = 0; plane < (bChroma ? 3 : 1); plane++) { wv0[plane].w = pwp0[plane].inputWeight; wv0[plane].o = pwp0[plane].inputOffset * (1 << (X265_DEPTH - 8)); wv0[plane].shift = pwp0[plane].log2WeightDenom; wv0[plane].round = 1 << pwp0[plane].log2WeightDenom; wv1[plane].w = pwp1[plane].inputWeight; wv1[plane].o = pwp1[plane].inputOffset * (1 << (X265_DEPTH - 8)); wv1[plane].shift = wv0[plane].shift; wv1[plane].round = wv0[plane].round; } } else { /* uniprediction weighting, always outputs to wv0 */ const WeightParam* pwp = (refIdx0 >= 0) ? pwp0 : pwp1; for (int plane = 0; plane < (bChroma ? 3 : 1); plane++) { wv0[plane].w = pwp[plane].inputWeight; wv0[plane].offset = pwp[plane].inputOffset * (1 << (X265_DEPTH - 8)); wv0[plane].shift = pwp[plane].log2WeightDenom; wv0[plane].round = pwp[plane].log2WeightDenom >= 1 ? 1 << (pwp[plane].log2WeightDenom - 1) : 0; } } } else pwp0 = pwp1 = NULL; if (refIdx0 >= 0 && refIdx1 >= 0) { MV mv0 = cu.m_mv[0][pu.puAbsPartIdx]; MV mv1 = cu.m_mv[1][pu.puAbsPartIdx]; cu.clipMv(mv0); cu.clipMv(mv1); if (bLuma) { predInterLumaShort(pu, m_predShortYuv[0], *cu.m_slice->m_refReconPicList[0][refIdx0], mv0); predInterLumaShort(pu, m_predShortYuv[1], *cu.m_slice->m_refReconPicList[1][refIdx1], mv1); } if (bChroma) { predInterChromaShort(pu, m_predShortYuv[0], *cu.m_slice->m_refReconPicList[0][refIdx0], mv0); predInterChromaShort(pu, m_predShortYuv[1], *cu.m_slice->m_refReconPicList[1][refIdx1], mv1); } if (pwp0 && pwp1 && (pwp0->bPresentFlag || pwp1->bPresentFlag)) addWeightBi(pu, predYuv, m_predShortYuv[0], m_predShortYuv[1], wv0, wv1, bLuma, bChroma); else predYuv.addAvg(m_predShortYuv[0], m_predShortYuv[1], pu.puAbsPartIdx, pu.width, pu.height, bLuma, bChroma); } else if (refIdx0 >= 0) { MV mv0 = cu.m_mv[0][pu.puAbsPartIdx]; cu.clipMv(mv0); if (pwp0 && pwp0->bPresentFlag) { ShortYuv& shortYuv = m_predShortYuv[0]; if (bLuma) predInterLumaShort(pu, shortYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0); if (bChroma) predInterChromaShort(pu, shortYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0); addWeightUni(pu, predYuv, shortYuv, wv0, bLuma, bChroma); } else { if (bLuma) predInterLumaPixel(pu, predYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0); if (bChroma) predInterChromaPixel(pu, predYuv, *cu.m_slice->m_refReconPicList[0][refIdx0], mv0); } } else { MV mv1 = cu.m_mv[1][pu.puAbsPartIdx]; cu.clipMv(mv1); /* uniprediction to L1 */ X265_CHECK(refIdx1 >= 0, "refidx1 was not positive
"); if (pwp1 && pwp1->bPresentFlag) { ShortYuv& shortYuv = m_predShortYuv[0]; if (bLuma) predInterLumaShort(pu, shortYuv, *cu.m_slice->m_refReconPicList[1][refIdx1], mv1); if (bChroma) predInterChromaShort(pu, shortYuv, *cu.m_slice->m_refReconPicList[1][refIdx1], mv1); addWeightUni(pu, predYuv, shortYuv, wv0, bLuma, bChroma); } else { if (bLuma) predInterLumaPixel(pu, predYuv, *cu.m_slice->m_refReconPicList[1][refIdx1], mv1); if (bChroma) predInterChromaPixel(pu, predYuv, *cu.m_slice->m_refReconPicList[1][refIdx1], mv1); } } } }