Commit 00a46387 authored by Frank Bossen's avatar Frank Bossen

Merge branch 'refactor_mv' into 'master'

Refactor of MV related code

See merge request jvet/VVCSoftware_VTM!530
parents 0a7f3c89 4c9b8420
Pipeline #1506 passed with stage
......@@ -477,6 +477,11 @@ static constexpr int MV_MANTISSA_BITCOUNT = 6;
static constexpr int MV_MANTISSA_UPPER_LIMIT = ((1 << (MV_MANTISSA_BITCOUNT - 1)) - 1);
static constexpr int MV_MANTISSA_LIMIT = (1 << (MV_MANTISSA_BITCOUNT - 1));
static constexpr int MV_EXPONENT_MASK = ((1 << MV_EXPONENT_BITCOUNT) - 1);
static constexpr int MV_BITS = 18;
static constexpr int MV_MAX = (1 << (MV_BITS - 1)) - 1;
static constexpr int MV_MIN = -(1 << (MV_BITS - 1));
static const int PIC_ANALYZE_CW_BINS = 32;
static const int PIC_CODE_CW_BINS = 16;
#if JVET_N0220_LMCS_SIMPLIFICATION
......
......@@ -41,6 +41,8 @@
#include "Slice.h"
const MvPrecision Mv::m_amvrPrecision[3] = { MV_PRECISION_QUARTER, MV_PRECISION_INT, MV_PRECISION_4PEL }; // for cu.imv=0, 1 and 2
const MvPrecision Mv::m_amvrPrecAffine[3] = { MV_PRECISION_QUARTER, MV_PRECISION_SIXTEENTH, MV_PRECISION_INT }; // for cu.imv=0, 1 and 2
const MvPrecision Mv::m_amvrPrecIbc[3] = { MV_PRECISION_INT, MV_PRECISION_INT, MV_PRECISION_4PEL }; // for cu.imv=0, 1 and 2
void roundAffineMv( int& mvx, int& mvy, int nShift )
{
......
......@@ -53,7 +53,8 @@ enum MvPrecision
MV_PRECISION_INT = 2, // 1-pel, shift 2 bits from 4-pel
MV_PRECISION_HALF = 3, // 1/2-pel
MV_PRECISION_QUARTER = 4, // 1/4-pel (the precision of regular MV difference signaling), shift 4 bits from 4-pel
MV_PRECISION_INTERNAL = 6, // 1/16-pel (the precision of internal MV), shift 6 bits from 4-pel
MV_PRECISION_SIXTEENTH = 6, // 1/16-pel (the precision of internal MV), shift 6 bits from 4-pel
MV_PRECISION_INTERNAL = 2 + MV_FRACTIONAL_BITS_INTERNAL,
};
/// basic motion vector class
......@@ -61,9 +62,12 @@ class Mv
{
private:
static const MvPrecision m_amvrPrecision[3];
static const MvPrecision m_amvrPrecAffine[3];
static const MvPrecision m_amvrPrecIbc[3];
#if JVET_N0334_MVCLIPPING
static const int mvClipPeriod = (1 << 18);
static const int halMvClipPeriod = (1 << 17);
static const int mvClipPeriod = (1 << MV_BITS);
static const int halMvClipPeriod = (1 << (MV_BITS - 1));
#endif
public:
......@@ -158,7 +162,7 @@ public:
const int offset = (1 << (i - 1));
hor = (hor + offset - (hor >= 0)) >> i;
ver = (ver + offset - (ver >= 0)) >> i;
}
}
#else
hor >>= i;
ver >>= i;
......@@ -189,11 +193,11 @@ public:
const Mv scaleMv( int iScale ) const
{
#if JVET_N0335_N0085_MV_ROUNDING
const int mvx = Clip3(-131072, 131071, (iScale * getHor() + 128 - (iScale * getHor() >= 0)) >> 8);
const int mvy = Clip3(-131072, 131071, (iScale * getVer() + 128 - (iScale * getVer() >= 0)) >> 8);
const int mvx = Clip3(MV_MIN, MV_MAX, (iScale * getHor() + 128 - (iScale * getHor() >= 0)) >> 8);
const int mvy = Clip3(MV_MIN, MV_MAX, (iScale * getVer() + 128 - (iScale * getVer() >= 0)) >> 8);
#else
const int mvx = Clip3( -131072, 131071, (iScale * getHor() + 127 + (iScale * getHor() < 0)) >> 8 );
const int mvy = Clip3( -131072, 131071, (iScale * getVer() + 127 + (iScale * getVer() < 0)) >> 8 );
const int mvx = Clip3(MV_MIN, MV_MAX, (iScale * getHor() + 127 + (iScale * getHor() < 0)) >> 8);
const int mvy = Clip3(MV_MIN, MV_MAX, (iScale * getVer() + 127 + (iScale * getVer() < 0)) >> 8);
#endif
return Mv( mvx, mvy );
}
......@@ -219,22 +223,61 @@ public:
}
}
void changePrecisionAmvr(const int amvr, const MvPrecision& dst)
{
changePrecision(m_amvrPrecision[amvr], dst);
}
void roundToPrecision(const MvPrecision& src, const MvPrecision& dst)
{
changePrecision(src, dst);
changePrecision(dst, src);
}
void roundToAmvrSignalPrecision(const MvPrecision& src, const int amvr)
// translational MV
void changeTransPrecInternal2Amvr(const int amvr)
{
changePrecision(MV_PRECISION_INTERNAL, m_amvrPrecision[amvr]);
}
void changeTransPrecAmvr2Internal(const int amvr)
{
changePrecision(m_amvrPrecision[amvr], MV_PRECISION_INTERNAL);
}
void roundTransPrecInternal2Amvr(const int amvr)
{
roundToPrecision(src, m_amvrPrecision[amvr]);
roundToPrecision(MV_PRECISION_INTERNAL, m_amvrPrecision[amvr]);
}
// affine MV
void changeAffinePrecInternal2Amvr(const int amvr)
{
changePrecision(MV_PRECISION_INTERNAL, m_amvrPrecAffine[amvr]);
}
void changeAffinePrecAmvr2Internal(const int amvr)
{
changePrecision(m_amvrPrecAffine[amvr], MV_PRECISION_INTERNAL);
}
void roundAffinePrecInternal2Amvr(const int amvr)
{
roundToPrecision(MV_PRECISION_INTERNAL, m_amvrPrecAffine[amvr]);
}
// IBC block vector
void changeIbcPrecInternal2Amvr(const int amvr)
{
changePrecision(MV_PRECISION_INTERNAL, m_amvrPrecIbc[amvr]);
}
void changeIbcPrecAmvr2Internal(const int amvr)
{
changePrecision(m_amvrPrecIbc[amvr], MV_PRECISION_INTERNAL);
}
void roundIbcPrecInternal2Amvr(const int amvr)
{
roundToPrecision(MV_PRECISION_INTERNAL, m_amvrPrecIbc[amvr]);
}
Mv getSymmvdMv(const Mv& curMvPred, const Mv& tarMvPred)
{
return Mv(tarMvPred.hor - hor + curMvPred.hor, tarMvPred.ver - ver + curMvPred.ver);
......
This diff is collapsed.
......@@ -180,7 +180,6 @@ namespace PU
void getAffineMergeCand( const PredictionUnit &pu, AffineMergeCtx& affMrgCtx, const int mrgCandIdx = -1 );
void setAllAffineMvField ( PredictionUnit &pu, MvField *mvField, RefPicList eRefList );
void setAllAffineMv ( PredictionUnit &pu, Mv affLT, Mv affRT, Mv affLB, RefPicList eRefList
, bool setHighPrec = false
#if JVET_N0334_MVCLIPPING
, bool clipCPMVs = false
#endif
......
......@@ -758,32 +758,28 @@ void DecCu::xDeriveCUMV( CodingUnit &cu )
// Mv mv[3];
CHECK( pu.refIdx[eRefList] < 0, "Unexpected negative refIdx." );
const int imvShift = ( !cu.cs->pcv->isEncoder && pu.cu->imv == 2 ) ? MV_FRACTIONAL_BITS_DIFF : 0;
pu.mvdAffi[eRefList][0] <<= imvShift;
pu.mvdAffi[eRefList][1] <<= imvShift;
if (!cu.cs->pcv->isEncoder)
{
pu.mvdAffi[eRefList][0].changeAffinePrecAmvr2Internal(pu.cu->imv);
pu.mvdAffi[eRefList][1].changeAffinePrecAmvr2Internal(pu.cu->imv);
if (cu.affineType == AFFINEMODEL_6PARAM)
{
pu.mvdAffi[eRefList][2].changeAffinePrecAmvr2Internal(pu.cu->imv);
}
}
Mv mvLT = affineAMVPInfo.mvCandLT[mvp_idx] + pu.mvdAffi[eRefList][0];
Mv mvRT = affineAMVPInfo.mvCandRT[mvp_idx] + pu.mvdAffi[eRefList][1];
mvRT += pu.mvdAffi[eRefList][0];
if ( pu.cu->imv != 1 )
{
mvLT.changePrecision( MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL );
mvRT.changePrecision( MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL );
}
Mv mvLB;
if ( cu.affineType == AFFINEMODEL_6PARAM )
{
pu.mvdAffi[eRefList][2] <<= imvShift;
mvLB = affineAMVPInfo.mvCandLB[mvp_idx] + pu.mvdAffi[eRefList][2];
mvLB += pu.mvdAffi[eRefList][0];
if ( pu.cu->imv != 1 )
{
mvLB.changePrecision( MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL );
}
}
#if JVET_N0334_MVCLIPPING
PU::setAllAffineMv(pu, mvLT, mvRT, mvLB, eRefList, false, true);
PU::setAllAffineMv(pu, mvLT, mvRT, mvLB, eRefList, true);
#else
PU::setAllAffineMv( pu, mvLT, mvRT, mvLB, eRefList );
#endif
......@@ -799,9 +795,10 @@ void DecCu::xDeriveCUMV( CodingUnit &cu )
#if REUSE_CU_RESULTS
if (!cu.cs->pcv->isEncoder)
#endif
mvd <<= 2;
{
mvd.changeIbcPrecAmvr2Internal(pu.cu->imv);
}
pu.mv[REF_PIC_LIST_0] = amvpInfo.mvCand[pu.mvpIdx[REF_PIC_LIST_0]] + mvd;
pu.mv[REF_PIC_LIST_0].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL);
#if JVET_N0334_MVCLIPPING
pu.mv[REF_PIC_LIST_0].mvCliptoStorageBitDepth();
#endif
......@@ -816,8 +813,11 @@ void DecCu::xDeriveCUMV( CodingUnit &cu )
AMVPInfo amvpInfo;
PU::fillMvpCand(pu, eRefList, pu.refIdx[eRefList], amvpInfo);
pu.mvpNum [eRefList] = amvpInfo.numCand;
if (!cu.cs->pcv->isEncoder)
{
pu.mvd[eRefList].changeTransPrecAmvr2Internal(pu.cu->imv);
}
pu.mv[eRefList] = amvpInfo.mvCand[pu.mvpIdx[eRefList]] + pu.mvd[eRefList];
pu.mv[eRefList].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL);
#if JVET_N0334_MVCLIPPING
pu.mv[eRefList].mvCliptoStorageBitDepth();
#endif
......@@ -843,8 +843,8 @@ void DecCu::xDeriveCUMV( CodingUnit &cu )
const int picWidth = pu.cs->slice->getSPS()->getPicWidthInLumaSamples();
const int picHeight = pu.cs->slice->getSPS()->getPicHeightInLumaSamples();
const unsigned int lcuWidth = pu.cs->slice->getSPS()->getMaxCUWidth();
int xPred = pu.mv[0].getHor()>>4;
int yPred = pu.mv[0].getVer()>>4;
int xPred = pu.mv[0].getHor() >> MV_FRACTIONAL_BITS_INTERNAL;
int yPred = pu.mv[0].getVer() >> MV_FRACTIONAL_BITS_INTERNAL;
CHECK(!PU::isBlockVectorValid(pu, cuPelX, cuPelY, roiWidth, roiHeight, picWidth, picHeight, 0, 0, xPred, yPred, lcuWidth), "invalid block vector for IBC detected.");
}
}
......
......@@ -1565,12 +1565,13 @@ void CABACWriter::prediction_unit( const PredictionUnit& pu )
else if (CU::isIBC(*pu.cu))
{
ref_idx(pu, REF_PIC_LIST_0);
mvd_coding(pu.mvd[REF_PIC_LIST_0], pu.cu->imv);
Mv mvd = pu.mvd[REF_PIC_LIST_0];
mvd.changeIbcPrecInternal2Amvr(pu.cu->imv);
mvd_coding(mvd, 0); // already changed to signaling precision
mvp_flag(pu, REF_PIC_LIST_0);
}
else
{
int8_t affineMvdShift = pu.cu->imv ? ( pu.cu->imv == 1 ? -1 : 1 ) : 0;
inter_pred_idc( pu );
affine_flag ( *pu.cu );
smvd_mode( pu );
......@@ -1579,16 +1580,24 @@ void CABACWriter::prediction_unit( const PredictionUnit& pu )
ref_idx ( pu, REF_PIC_LIST_0 );
if ( pu.cu->affine )
{
mvd_coding( pu.mvdAffi[REF_PIC_LIST_0][0], affineMvdShift );
mvd_coding( pu.mvdAffi[REF_PIC_LIST_0][1], affineMvdShift );
Mv mvd = pu.mvdAffi[REF_PIC_LIST_0][0];
mvd.changeAffinePrecInternal2Amvr(pu.cu->imv);
mvd_coding(mvd, 0); // already changed to signaling precision
mvd = pu.mvdAffi[REF_PIC_LIST_0][1];
mvd.changeAffinePrecInternal2Amvr(pu.cu->imv);
mvd_coding(mvd, 0); // already changed to signaling precision
if ( pu.cu->affineType == AFFINEMODEL_6PARAM )
{
mvd_coding( pu.mvdAffi[REF_PIC_LIST_0][2], affineMvdShift );
mvd = pu.mvdAffi[REF_PIC_LIST_0][2];
mvd.changeAffinePrecInternal2Amvr(pu.cu->imv);
mvd_coding(mvd, 0); // already changed to signaling precision
}
}
else
{
mvd_coding( pu.mvd[REF_PIC_LIST_0], pu.cu->imv );
Mv mvd = pu.mvd[REF_PIC_LIST_0];
mvd.changeTransPrecInternal2Amvr(pu.cu->imv);
mvd_coding(mvd, 0); // already changed to signaling precision
}
mvp_flag ( pu, REF_PIC_LIST_0 );
}
......@@ -1601,16 +1610,24 @@ void CABACWriter::prediction_unit( const PredictionUnit& pu )
{
if ( pu.cu->affine )
{
mvd_coding( pu.mvdAffi[REF_PIC_LIST_1][0], affineMvdShift );
mvd_coding( pu.mvdAffi[REF_PIC_LIST_1][1], affineMvdShift );
Mv mvd = pu.mvdAffi[REF_PIC_LIST_1][0];
mvd.changeAffinePrecInternal2Amvr(pu.cu->imv);
mvd_coding(mvd, 0); // already changed to signaling precision
mvd = pu.mvdAffi[REF_PIC_LIST_1][1];
mvd.changeAffinePrecInternal2Amvr(pu.cu->imv);
mvd_coding(mvd, 0); // already changed to signaling precision
if ( pu.cu->affineType == AFFINEMODEL_6PARAM )
{
mvd_coding( pu.mvdAffi[REF_PIC_LIST_1][2], affineMvdShift );
mvd = pu.mvdAffi[REF_PIC_LIST_1][2];
mvd.changeAffinePrecInternal2Amvr(pu.cu->imv);
mvd_coding(mvd, 0); // already changed to signaling precision
}
}
else
{
mvd_coding( pu.mvd[REF_PIC_LIST_1], pu.cu->imv );
Mv mvd = pu.mvd[REF_PIC_LIST_1];
mvd.changeTransPrecInternal2Amvr(pu.cu->imv);
mvd_coding(mvd, 0); // already changed to signaling precision
}
}
}
......@@ -2433,7 +2450,7 @@ void CABACWriter::mvd_coding( const Mv &rMvd, int8_t imv )
verMvd >>= 2;
if( imv == 2 )//IMV_4PEL
{
CHECK( (horMvd % 4) != 0 && (verMvd % 4) != 0, "IMV: MVD is not a multiple of 8" );
CHECK( (horMvd % 4) != 0 && (verMvd % 4) != 0, "IMV: MVD is not a multiple of 16" );
horMvd >>= 2;
verMvd >>= 2;
}
......
This diff is collapsed.
......@@ -355,7 +355,7 @@ protected:
RefPicList eRefPicList,
int iRefIdx
);
uint32_t xCalcAffineMVBits ( PredictionUnit& pu, Mv mvCand[3], Mv mvPred[3], bool mvHighPrec = false );
uint32_t xCalcAffineMVBits ( PredictionUnit& pu, Mv mvCand[3], Mv mvPred[3] );
void xCopyAMVPInfo ( AMVPInfo* pSrc, AMVPInfo* pDst );
uint32_t xGetMvpIdxBits ( int iIdx, int iNum );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment