diff --git a/source/Lib/CommonLib/CommonDef.h b/source/Lib/CommonLib/CommonDef.h index 65802ad15793bd7ca429043f47e84793ab79bdca..07004135a64c2f60584963d33c5fcc53f5521431 100644 --- a/source/Lib/CommonLib/CommonDef.h +++ b/source/Lib/CommonLib/CommonDef.h @@ -245,9 +245,12 @@ static const int LOG2_MAX_NUM_ROWS_MINUS1 = 7; static const int CABAC_INIT_PRESENT_FLAG = 1; -static const int LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS = 4; -static const int CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS = 8; -static const int VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE = 2; ///< additional precision bit for MV storage +static const int MV_FRACTIONAL_BITS_INTERNAL = 4; +static const int MV_FRACTIONAL_BITS_SIGNAL = 2; +static const int MV_FRACTIONAL_BITS_DIFF = MV_FRACTIONAL_BITS_INTERNAL - MV_FRACTIONAL_BITS_SIGNAL; +static const int LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS_SIGNAL = 1 << MV_FRACTIONAL_BITS_SIGNAL; +static const int LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS = 1 << MV_FRACTIONAL_BITS_INTERNAL; +static const int CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS = 1 << (MV_FRACTIONAL_BITS_INTERNAL + 1); static const int MAX_NUM_LONG_TERM_REF_PICS = 33; static const int NUM_LONG_TERM_REF_PIC_SPS = 0; diff --git a/source/Lib/CommonLib/ContextModelling.cpp b/source/Lib/CommonLib/ContextModelling.cpp index 6acb2c27ceb36c4db1088374374b92fa01d0dad5..bbfac11a65808d1c2c7782669c51452443387a1a 100644 --- a/source/Lib/CommonLib/ContextModelling.cpp +++ b/source/Lib/CommonLib/ContextModelling.cpp @@ -373,7 +373,7 @@ void MergeCtx::setMergeInfo( PredictionUnit& pu, int candIdx ) { pu.cu->cpr = true; pu.bv = pu.mv[REF_PIC_LIST_0]; - pu.bv >>= (2 + VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE); // used for only integer resolution + pu.bv.changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_INT); // used for only integer resolution } #endif #if JVET_L0646_GBI @@ -385,7 +385,7 @@ void MergeCtx::setMergeInfo( PredictionUnit& pu, int candIdx ) void MergeCtx::setMmvdMergeCandiInfo(PredictionUnit& pu, int candIdx) { const Slice &slice = *pu.cs->slice; - const int mvShift = VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + const int mvShift = MV_FRACTIONAL_BITS_DIFF; const int refMvdCands[8] = { 1 << mvShift , 2 << mvShift , 4 << mvShift , 8 << mvShift , 16 << mvShift , 32 << mvShift, 64 << mvShift , 128 << mvShift }; int fPosGroup = 0; int fPosBaseIdx = 0; diff --git a/source/Lib/CommonLib/InterPrediction.cpp b/source/Lib/CommonLib/InterPrediction.cpp index 7f890813451c923c64a6f821474b81656d059002..41381e2fe17c43799d73fcbbc2bc0d1ba8316826 100644 --- a/source/Lib/CommonLib/InterPrediction.cpp +++ b/source/Lib/CommonLib/InterPrediction.cpp @@ -76,9 +76,9 @@ InterPrediction::InterPrediction() for( uint32_t c = 0; c < MAX_NUM_COMPONENT; c++ ) { - for( uint32_t i = 0; i < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS; i++ ) + for( uint32_t i = 0; i < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS_SIGNAL; i++ ) { - for( uint32_t j = 0; j < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS; j++ ) + for( uint32_t j = 0; j < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS_SIGNAL; j++ ) { m_filteredBlock[i][j][c] = nullptr; } @@ -107,9 +107,9 @@ void InterPrediction::destroy() for( uint32_t c = 0; c < MAX_NUM_COMPONENT; c++ ) { - for( uint32_t i = 0; i < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS; i++ ) + for( uint32_t i = 0; i < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS_SIGNAL; i++ ) { - for( uint32_t j = 0; j < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS; j++ ) + for( uint32_t j = 0; j < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS_SIGNAL; j++ ) { xFree( m_filteredBlock[i][j][c] ); m_filteredBlock[i][j][c] = nullptr; @@ -163,11 +163,11 @@ void InterPrediction::init( RdCost* pcRdCost, ChromaFormat chromaFormatIDC ) int extWidth = MAX_CU_SIZE + 16; int extHeight = MAX_CU_SIZE + 1; #endif - for( uint32_t i = 0; i < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS; i++ ) + for( uint32_t i = 0; i < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS_SIGNAL; i++ ) { m_filteredBlockTmp[i][c] = ( Pel* ) xMalloc( Pel, ( extWidth + 4 ) * ( extHeight + 7 + 4 ) ); - for( uint32_t j = 0; j < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS; j++ ) + for( uint32_t j = 0; j < LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS_SIGNAL; j++ ) { m_filteredBlock[i][j][c] = ( Pel* ) xMalloc( Pel, extWidth * extHeight ); } @@ -637,12 +637,8 @@ void InterPrediction::xPredInterBlk ( const ComponentID& compID, const Predictio const ChromaFormat chFmt = pu.chromaFormat; const bool rndRes = !bi; - int iAddPrecShift = 0; - - iAddPrecShift = VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - - int shiftHor = 2 + iAddPrecShift + ::getComponentScaleX(compID, chFmt); - int shiftVer = 2 + iAddPrecShift + ::getComponentScaleY(compID, chFmt); + int shiftHor = MV_FRACTIONAL_BITS_INTERNAL + ::getComponentScaleX(compID, chFmt); + int shiftVer = MV_FRACTIONAL_BITS_INTERNAL + ::getComponentScaleY(compID, chFmt); int xFrac = _mv.hor & ((1 << shiftHor) - 1); int yFrac = _mv.ver & ((1 << shiftVer) - 1); @@ -653,8 +649,6 @@ void InterPrediction::xPredInterBlk ( const ComponentID& compID, const Predictio JVET_J0090_SET_CACHE_ENABLE( false ); } #endif - xFrac <<= VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE - iAddPrecShift; - yFrac <<= VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE - iAddPrecShift; PelBuf &dstBuf = dstPic.bufs[compID]; unsigned width = dstBuf.width; @@ -821,7 +815,7 @@ void InterPrediction::xPredAffineBlk( const ComponentID& compID, const Predictio PelBuf tmpBuf = PelBuf(m_filteredBlockTmp[0][compID], pu.blocks[compID]); const int vFilterSize = isLuma(compID) ? NTAPS_LUMA : NTAPS_CHROMA; - const int shift = iBit - 4 + VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE + 2; + const int shift = iBit - 4 + MV_FRACTIONAL_BITS_INTERNAL; // get prediction block by block for ( int h = 0; h < cxHeight; h += blockHeight ) diff --git a/source/Lib/CommonLib/InterPrediction.h b/source/Lib/CommonLib/InterPrediction.h index 75e803d2eb6f3806df5e64852bb3fabf8bd8f6ea..e56f3d05b746aa9afc4a591b00a694862bf1f3bb 100644 --- a/source/Lib/CommonLib/InterPrediction.h +++ b/source/Lib/CommonLib/InterPrediction.h @@ -80,8 +80,8 @@ protected: InterpolationFilter m_if; Pel* m_acYuvPred [NUM_REF_PIC_LIST_01][MAX_NUM_COMPONENT]; - Pel* m_filteredBlock [LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][MAX_NUM_COMPONENT]; - Pel* m_filteredBlockTmp [LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][MAX_NUM_COMPONENT]; + Pel* m_filteredBlock [LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS_SIGNAL][LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS_SIGNAL][MAX_NUM_COMPONENT]; + Pel* m_filteredBlockTmp [LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS_SIGNAL][MAX_NUM_COMPONENT]; ChromaFormat m_currChromaFormat; diff --git a/source/Lib/CommonLib/InterpolationFilter.cpp b/source/Lib/CommonLib/InterpolationFilter.cpp index abcef170f7a7675d4930ceaf77218519540b3607..fd91a3550d270b776bc526335b4a8fe5cb609a71 100644 --- a/source/Lib/CommonLib/InterpolationFilter.cpp +++ b/source/Lib/CommonLib/InterpolationFilter.cpp @@ -55,7 +55,7 @@ CacheModel* InterpolationFilter::m_cacheModel; // Tables // ==================================================================================================================== -const TFilterCoeff InterpolationFilter::m_lumaFilter[LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE][NTAPS_LUMA] = +const TFilterCoeff InterpolationFilter::m_lumaFilter[LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][NTAPS_LUMA] = { { 0, 0, 0, 64, 0, 0, 0, 0 }, { 0, 1, -3, 63, 4, -2, 1, 0 }, @@ -75,7 +75,7 @@ const TFilterCoeff InterpolationFilter::m_lumaFilter[LUMA_INTERPOLATION_FILTER_S { 0, 1, -2, 4, 63, -3, 1, 0 } }; -const TFilterCoeff InterpolationFilter::m_chromaFilter[CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE][NTAPS_CHROMA] = +const TFilterCoeff InterpolationFilter::m_chromaFilter[CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][NTAPS_CHROMA] = { { 0, 64, 0, 0 }, { -1, 63, 2, 0 }, @@ -112,7 +112,7 @@ const TFilterCoeff InterpolationFilter::m_chromaFilter[CHROMA_INTERPOLATION_FILT }; #if JVET_L0256_BIO -const TFilterCoeff InterpolationFilter::m_bilinearFilter[LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE][NTAPS_BILINEAR] = +const TFilterCoeff InterpolationFilter::m_bilinearFilter[LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][NTAPS_BILINEAR] = { { 64, 0, }, { 60, 4, }, @@ -477,7 +477,7 @@ void InterpolationFilter::filterHor( const ComponentID compID, Pel const *src, i } else if( isLuma( compID ) ) { - CHECK( frac < 0 || frac >= ( LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE ), "Invalid fraction" ); + CHECK( frac < 0 || frac >= LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS, "Invalid fraction" ); #if JVET_L0256_BIO if( nFilterIdx == 1 ) { @@ -492,7 +492,7 @@ void InterpolationFilter::filterHor( const ComponentID compID, Pel const *src, i else { const uint32_t csx = getComponentScaleX( compID, fmt ); - CHECK( frac < 0 || csx >= 2 || ( frac << ( 1 - csx ) ) >= ( CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE ), "Invalid fraction" ); + CHECK( frac < 0 || csx >= 2 || ( frac << ( 1 - csx ) ) >= CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS, "Invalid fraction" ); filterHor<NTAPS_CHROMA>( clpRng, src, srcStride, dst, dstStride, width, height, isLast, m_chromaFilter[frac << ( 1 - csx )] ); } } @@ -526,7 +526,7 @@ void InterpolationFilter::filterVer( const ComponentID compID, Pel const *src, i } else if( isLuma( compID ) ) { - CHECK( frac < 0 || frac >= ( LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE ), "Invalid fraction" ); + CHECK( frac < 0 || frac >= LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS, "Invalid fraction" ); #if JVET_L0256_BIO if (nFilterIdx == 1) { @@ -541,7 +541,7 @@ void InterpolationFilter::filterVer( const ComponentID compID, Pel const *src, i else { const uint32_t csy = getComponentScaleY( compID, fmt ); - CHECK( frac < 0 || csy >= 2 || ( frac << ( 1 - csy ) ) >= ( CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE ), "Invalid fraction" ); + CHECK( frac < 0 || csy >= 2 || ( frac << ( 1 - csy ) ) >= CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS, "Invalid fraction" ); filterVer<NTAPS_CHROMA>( clpRng, src, srcStride, dst, dstStride, width, height, isFirst, isLast, m_chromaFilter[frac << ( 1 - csy )] ); } } diff --git a/source/Lib/CommonLib/InterpolationFilter.h b/source/Lib/CommonLib/InterpolationFilter.h index 4f246d9bed3ff90076ed125212a79a9da8761112..3d42b5649f326672434e35417436c52711e6057a 100644 --- a/source/Lib/CommonLib/InterpolationFilter.h +++ b/source/Lib/CommonLib/InterpolationFilter.h @@ -54,10 +54,10 @@ */ class InterpolationFilter { - static const TFilterCoeff m_lumaFilter[LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE][NTAPS_LUMA]; ///< Luma filter taps - static const TFilterCoeff m_chromaFilter[CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE][NTAPS_CHROMA]; ///< Chroma filter taps + static const TFilterCoeff m_lumaFilter[LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][NTAPS_LUMA]; ///< Luma filter taps + static const TFilterCoeff m_chromaFilter[CHROMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][NTAPS_CHROMA]; ///< Chroma filter taps #if JVET_L0256_BIO - static const TFilterCoeff m_bilinearFilter[LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE][NTAPS_BILINEAR]; ///< bilinear filter taps + static const TFilterCoeff m_bilinearFilter[LUMA_INTERPOLATION_FILTER_SUB_SAMPLE_POSITIONS][NTAPS_BILINEAR]; ///< bilinear filter taps #endif public: template<bool isFirst, bool isLast> diff --git a/source/Lib/CommonLib/LoopFilter.cpp b/source/Lib/CommonLib/LoopFilter.cpp index 70c9316b36fef224165dbe53cbe4707b3e4f6f20..76e8601cbe23b223664054bbe6ca8e06263e0ada 100644 --- a/source/Lib/CommonLib/LoopFilter.cpp +++ b/source/Lib/CommonLib/LoopFilter.cpp @@ -485,8 +485,7 @@ unsigned LoopFilter::xGetBoundaryStrengthSingle ( const CodingUnit& cu, const De if( 0 <= miQ.refIdx[0] ) { mvQ0 = miQ.mv[0]; } if( 0 <= miQ.refIdx[1] ) { mvQ1 = miQ.mv[1]; } - int nThreshold = 4; - nThreshold = 4 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + int nThreshold = 1 << MV_FRACTIONAL_BITS_INTERNAL; unsigned uiBs = 0; //th can be optimized @@ -539,8 +538,7 @@ unsigned LoopFilter::xGetBoundaryStrengthSingle ( const CodingUnit& cu, const De Mv mvP0 = miP.mv[0]; Mv mvQ0 = miQ.mv[0]; - int nThreshold = 4; - nThreshold = 4 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + int nThreshold = 1 << MV_FRACTIONAL_BITS_INTERNAL; return ( ( abs( mvQ0.getHor() - mvP0.getHor() ) >= nThreshold ) || ( abs( mvQ0.getVer() - mvP0.getVer() ) >= nThreshold ) ) ? 1 : 0; } diff --git a/source/Lib/CommonLib/Mv.cpp b/source/Lib/CommonLib/Mv.cpp index 413815b135d60a35ff72122a956d1be22d74d42a..6c513564f91e536bc0b45073680e17685c2ed571 100644 --- a/source/Lib/CommonLib/Mv.cpp +++ b/source/Lib/CommonLib/Mv.cpp @@ -40,18 +40,7 @@ #include "Common.h" #include "Slice.h" -void roundMV( Mv & rMV, unsigned imvShift ) -{ - CHECK( imvShift == 0, "roundMV called for imvShift=0" ); - int offset = 1 << ( imvShift - 1 ); -#if JVET_L0377_AMVR_ROUNDING_ALIGN - rMV.setHor(rMV.getHor() >= 0 ? ((rMV.getHor() + offset) >> imvShift) << imvShift : -(((-rMV.getHor() + offset) >> imvShift)) << imvShift); - rMV.setVer(rMV.getVer() >= 0 ? ((rMV.getVer() + offset) >> imvShift) << imvShift : -(((-rMV.getVer() + offset) >> imvShift)) << imvShift); -#else - rMV.setHor( ( ( rMV.getHor() + offset ) >> imvShift ) << imvShift ); - rMV.setVer( ( ( rMV.getVer() + offset ) >> imvShift ) << imvShift ); -#endif -} +const MvPrecision Mv::m_amvrPrecision[3] = { MV_PRECISION_QUARTER, MV_PRECISION_INT, MV_PRECISION_4PEL }; // for cu.imv=0, 1 and 2 void roundAffineMv( int& mvx, int& mvy, int nShift ) { @@ -66,8 +55,7 @@ void clipMv( Mv& rcMv, const Position& pos, #endif const SPS& sps ) { - int iMvShift = 2; - iMvShift += VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + int iMvShift = MV_FRACTIONAL_BITS_INTERNAL; int iOffset = 8; int iHorMax = ( sps.getPicWidthInLumaSamples() + iOffset - ( int ) pos.x - 1 ) << iMvShift; int iHorMin = ( -( int ) sps.getMaxCUWidth() - iOffset - ( int ) pos.x + 1 ) << iMvShift; diff --git a/source/Lib/CommonLib/Mv.h b/source/Lib/CommonLib/Mv.h index ec0146e4a701cd860ed69bcb565670eedac3a7f1..7c287694da728e6d4d98e7b8ef843d268cdd4483 100644 --- a/source/Lib/CommonLib/Mv.h +++ b/source/Lib/CommonLib/Mv.h @@ -47,9 +47,20 @@ // Class definition // ==================================================================================================================== +enum MvPrecision +{ + MV_PRECISION_4PEL = 0, // 4-pel + MV_PRECISION_INT = 2, // 1-pel, shift 2 bits from 4-pel + MV_PRECISION_QUARTER = 4, // 1/4-pel (the precision of regular MV difference signaling), shift 4 bits from 4-pel + MV_PRECISION_INTERNAL = 6, // 1/16-pel (the precision of internal MV), shift 6 bits from 4-pel +}; + /// basic motion vector class class Mv { +private: + static const MvPrecision m_amvrPrecision[3]; + public: int hor; ///< horizontal component of motion vector int ver; ///< vertical component of motion vector @@ -159,16 +170,39 @@ public: return Mv( mvx, mvy ); } - void roundMV2SignalPrecision() + void changePrecision(const MvPrecision& src, const MvPrecision& dst) + { + const int shift = (int)dst - (int)src; + if (shift >= 0) + { + *this <<= shift; + } + else + { + const int rightShift = -shift; + const int nOffset = 1 << (rightShift - 1); + hor = hor >= 0 ? (hor + nOffset) >> rightShift : -((-hor + nOffset) >> rightShift); + ver = ver >= 0 ? (ver + nOffset) >> rightShift : -((-ver + nOffset) >> rightShift); + } + } + + void changePrecisionAmvr(const int amvr, const MvPrecision& dst) + { + changePrecision(m_amvrPrecision[amvr], dst); + } + + void roundToPrecision(const MvPrecision& src, const MvPrecision& dst) { - const int nShift = VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - const int nOffset = 1 << (nShift - 1); - hor = hor >= 0 ? (hor + nOffset) >> nShift : -((-hor + nOffset) >> nShift); - ver = ver >= 0 ? (ver + nOffset) >> nShift : -((-ver + nOffset) >> nShift); - hor = hor >= 0 ? (hor) << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE : -((-hor) << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE); - ver = ver >= 0 ? (ver) << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE : -((-ver) << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE); + changePrecision(src, dst); + changePrecision(dst, src); + } + + void roundToAmvrSignalPrecision(const MvPrecision& src, const int amvr) + { + roundToPrecision(src, m_amvrPrecision[amvr]); } };// END CLASS DEFINITION MV + #if JVET_L0293_CPR namespace std { @@ -182,7 +216,6 @@ namespace std }; }; #endif -void roundMV( Mv& rcMv, unsigned imvShift ); void clipMv ( Mv& rcMv, const struct Position& pos, #if JVET_L0231_WRAPAROUND const struct Size& size, diff --git a/source/Lib/CommonLib/UnitTools.cpp b/source/Lib/CommonLib/UnitTools.cpp index 942d98aa417e7db769dee89a0b8dbb20626a5959..d6fda1c0b6b825f74be5a55337472f2c7eba55dd 100644 --- a/source/Lib/CommonLib/UnitTools.cpp +++ b/source/Lib/CommonLib/UnitTools.cpp @@ -2250,11 +2250,9 @@ void PU::fillMvpCand(PredictionUnit &pu, const RefPicList &eRefPicList, const in if( pu.cu->imv != 0) { - unsigned imvShift = pu.cu->imv << 1; - imvShift += VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; for( int i = 0; i < pInfo->numCand; i++ ) { - roundMV( pInfo->mvCand[i], imvShift ); + pInfo->mvCand[i].roundToAmvrSignalPrecision(MV_PRECISION_INTERNAL, pu.cu->imv); } } @@ -2313,9 +2311,7 @@ void PU::fillMvpCand(PredictionUnit &pu, const RefPicList &eRefPicList, const in #if JVET_L0266_HMVP if (pu.cu->imv != 0) { - unsigned imvShift = pu.cu->imv << 1; - imvShift += VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - roundMV(cColMv, imvShift); + cColMv.roundToAmvrSignalPrecision(MV_PRECISION_INTERNAL, pu.cu->imv); } int i = 0; for (i = 0; i < pInfo->numCand; i++) @@ -2352,20 +2348,10 @@ void PU::fillMvpCand(PredictionUnit &pu, const RefPicList &eRefPicList, const in pInfo->mvCand[pInfo->numCand] = Mv( 0, 0 ); pInfo->numCand++; } - for (Mv &mv : pInfo->mvCand) - { - const int nShift = VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - const int nOffset = 1 << (nShift - 1); - mv.hor = mv.hor >= 0 ? (mv.hor + nOffset) >> nShift : -((-mv.hor + nOffset) >> nShift); - mv.ver = mv.ver >= 0 ? (mv.ver + nOffset) >> nShift : -((-mv.ver + nOffset) >> nShift); - } - if (pu.cu->imv != 0) + for (Mv &mv : pInfo->mvCand) { - unsigned imvShift = pu.cu->imv << 1; - for (int i = 0; i < pInfo->numCand; i++) - { - roundMV(pInfo->mvCand[i], imvShift); - } + mv.changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + mv.roundToAmvrSignalPrecision(MV_PRECISION_QUARTER, pu.cu->imv); } } @@ -2426,13 +2412,13 @@ bool PU::addAffineMVPCandUnscaled( const PredictionUnit &pu, const RefPicList &r xInheritedAffineMv( pu, neibPU, eRefPicListIndex, outputAffineMv ); - outputAffineMv[0].roundMV2SignalPrecision(); - outputAffineMv[1].roundMV2SignalPrecision(); + outputAffineMv[0].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + outputAffineMv[1].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); affiAMVPInfo.mvCandLT[affiAMVPInfo.numCand] = outputAffineMv[0]; affiAMVPInfo.mvCandRT[affiAMVPInfo.numCand] = outputAffineMv[1]; if ( pu.cu->affineType == AFFINEMODEL_6PARAM ) { - outputAffineMv[2].roundMV2SignalPrecision(); + outputAffineMv[2].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); affiAMVPInfo.mvCandLB[affiAMVPInfo.numCand] = outputAffineMv[2]; } affiAMVPInfo.numCand++; @@ -2599,8 +2585,6 @@ void PU::xInheritedAffineMv( const PredictionUnit &pu, const PredictionUnit* puN void PU::fillAffineMvpCand(PredictionUnit &pu, const RefPicList &eRefPicList, const int &refIdx, AffineAMVPInfo &affiAMVPInfo) { - const int nShift = VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - const int nOffset = 1 << (nShift - 1); affiAMVPInfo.numCand = 0; if (refIdx < 0) @@ -2655,11 +2639,11 @@ void PU::fillAffineMvpCand(PredictionUnit &pu, const RefPicList &eRefPicList, co xInheritedAffineMv( pu, puNeighbour, eTestRefPicList, outputAffineMv ); - outputAffineMv[0].roundMV2SignalPrecision(); - outputAffineMv[1].roundMV2SignalPrecision(); + outputAffineMv[0].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + outputAffineMv[1].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); if ( pu.cu->affineType == AFFINEMODEL_6PARAM ) { - outputAffineMv[2].roundMV2SignalPrecision(); + outputAffineMv[2].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); } if ( affiAMVPInfo.numCand == 0 @@ -2680,12 +2664,9 @@ void PU::fillAffineMvpCand(PredictionUnit &pu, const RefPicList &eRefPicList, co { for (int i = 0; i < affiAMVPInfo.numCand; i++) { - affiAMVPInfo.mvCandLT[i].hor = affiAMVPInfo.mvCandLT[i].hor >= 0 ? (affiAMVPInfo.mvCandLT[i].hor + nOffset) >> nShift : -((-affiAMVPInfo.mvCandLT[i].hor + nOffset) >> nShift); - affiAMVPInfo.mvCandLT[i].ver = affiAMVPInfo.mvCandLT[i].ver >= 0 ? (affiAMVPInfo.mvCandLT[i].ver + nOffset) >> nShift : -((-affiAMVPInfo.mvCandLT[i].ver + nOffset) >> nShift); - affiAMVPInfo.mvCandRT[i].hor = affiAMVPInfo.mvCandRT[i].hor >= 0 ? (affiAMVPInfo.mvCandRT[i].hor + nOffset) >> nShift : -((-affiAMVPInfo.mvCandRT[i].hor + nOffset) >> nShift); - affiAMVPInfo.mvCandRT[i].ver = affiAMVPInfo.mvCandRT[i].ver >= 0 ? (affiAMVPInfo.mvCandRT[i].ver + nOffset) >> nShift : -((-affiAMVPInfo.mvCandRT[i].ver + nOffset) >> nShift); - affiAMVPInfo.mvCandLB[i].hor = affiAMVPInfo.mvCandLB[i].hor >= 0 ? (affiAMVPInfo.mvCandLB[i].hor + nOffset) >> nShift : -((-affiAMVPInfo.mvCandLB[i].hor + nOffset) >> nShift); - affiAMVPInfo.mvCandLB[i].ver = affiAMVPInfo.mvCandLB[i].ver >= 0 ? (affiAMVPInfo.mvCandLB[i].ver + nOffset) >> nShift : -((-affiAMVPInfo.mvCandLB[i].ver + nOffset) >> nShift); + affiAMVPInfo.mvCandLT[i].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + affiAMVPInfo.mvCandRT[i].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + affiAMVPInfo.mvCandLB[i].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); } return; } @@ -2743,9 +2724,9 @@ void PU::fillAffineMvpCand(PredictionUnit &pu, const RefPicList &eRefPicList, co outputAffineMv[2] = amvpInfo2.mvCand[0]; - outputAffineMv[0].roundMV2SignalPrecision(); - outputAffineMv[1].roundMV2SignalPrecision(); - outputAffineMv[2].roundMV2SignalPrecision(); + outputAffineMv[0].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + outputAffineMv[1].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + outputAffineMv[2].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); #if JVET_L0271_AFFINE_AMVP_SIMPLIFY if ( cornerMVPattern == 7 || (cornerMVPattern == 3 && pu.cu->affineType == AFFINEMODEL_4PARAM) ) @@ -2765,7 +2746,7 @@ void PU::fillAffineMvpCand(PredictionUnit &pu, const RefPicList &eRefPicList, co int vy2 = (outputAffineMv[0].getVer() << shift) + ((outputAffineMv[1].getHor() - outputAffineMv[0].getHor()) << (shift + g_aucLog2[curHeight] - g_aucLog2[curWidth])); roundAffineMv( vx2, vy2, shift ); outputAffineMv[2].set( vx2, vy2 ); - outputAffineMv[2].roundMV2SignalPrecision(); + outputAffineMv[2].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); } if ( cornerMVPattern == 5 ) // V0 V2 are available, derived V1 @@ -2775,7 +2756,7 @@ void PU::fillAffineMvpCand(PredictionUnit &pu, const RefPicList &eRefPicList, co int vy1 = (outputAffineMv[0].getVer() << shift) - ((outputAffineMv[2].getHor() - outputAffineMv[0].getHor()) << (shift + g_aucLog2[curWidth] - g_aucLog2[curHeight])); roundAffineMv( vx1, vy1, shift ); outputAffineMv[1].set( vx1, vy1 ); - outputAffineMv[1].roundMV2SignalPrecision(); + outputAffineMv[1].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); } if ( affiAMVPInfo.numCand == 0 @@ -2850,7 +2831,7 @@ void PU::fillAffineMvpCand(PredictionUnit &pu, const RefPicList &eRefPicList, co if ( (C0Avail && getColocatedMVP( pu, eRefPicList, posC0, cColMv, refIdxCol )) || getColocatedMVP( pu, eRefPicList, posC1, cColMv, refIdxCol ) ) { - cColMv.roundMV2SignalPrecision(); + cColMv.roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); affiAMVPInfo.mvCandLT[affiAMVPInfo.numCand] = cColMv; affiAMVPInfo.mvCandRT[affiAMVPInfo.numCand] = cColMv; affiAMVPInfo.mvCandLB[affiAMVPInfo.numCand] = cColMv; @@ -2874,12 +2855,9 @@ void PU::fillAffineMvpCand(PredictionUnit &pu, const RefPicList &eRefPicList, co for (int i = 0; i < affiAMVPInfo.numCand; i++) { - affiAMVPInfo.mvCandLT[i].hor = affiAMVPInfo.mvCandLT[i].hor >= 0 ? (affiAMVPInfo.mvCandLT[i].hor + nOffset) >> nShift : -((-affiAMVPInfo.mvCandLT[i].hor + nOffset) >> nShift); - affiAMVPInfo.mvCandLT[i].ver = affiAMVPInfo.mvCandLT[i].ver >= 0 ? (affiAMVPInfo.mvCandLT[i].ver + nOffset) >> nShift : -((-affiAMVPInfo.mvCandLT[i].ver + nOffset) >> nShift); - affiAMVPInfo.mvCandRT[i].hor = affiAMVPInfo.mvCandRT[i].hor >= 0 ? (affiAMVPInfo.mvCandRT[i].hor + nOffset) >> nShift : -((-affiAMVPInfo.mvCandRT[i].hor + nOffset) >> nShift); - affiAMVPInfo.mvCandRT[i].ver = affiAMVPInfo.mvCandRT[i].ver >= 0 ? (affiAMVPInfo.mvCandRT[i].ver + nOffset) >> nShift : -((-affiAMVPInfo.mvCandRT[i].ver + nOffset) >> nShift); - affiAMVPInfo.mvCandLB[i].hor = affiAMVPInfo.mvCandLB[i].hor >= 0 ? (affiAMVPInfo.mvCandLB[i].hor + nOffset) >> nShift : -((-affiAMVPInfo.mvCandLB[i].hor + nOffset) >> nShift); - affiAMVPInfo.mvCandLB[i].ver = affiAMVPInfo.mvCandLB[i].ver >= 0 ? (affiAMVPInfo.mvCandLB[i].ver + nOffset) >> nShift : -((-affiAMVPInfo.mvCandLB[i].ver + nOffset) >> nShift); + affiAMVPInfo.mvCandLT[i].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + affiAMVPInfo.mvCandRT[i].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + affiAMVPInfo.mvCandLB[i].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); } @@ -3046,9 +3024,6 @@ void PU::addAMVPHMVPCand(const PredictionUnit &pu, const RefPicList eRefPicList, MotionInfo neibMi; int i = 0; - unsigned imvShift = imv << 1; - imvShift += VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - int num_avai_candInLUT = slice.getAvailableLUTMrgNum(); int num_allowedCand = std::min(MAX_NUM_HMVP_AVMPCANDS, num_avai_candInLUT); @@ -3070,7 +3045,7 @@ void PU::addAMVPHMVPCand(const PredictionUnit &pu, const RefPicList eRefPicList, Mv pmv = neibMi.mv[eRefPicListIndex]; if (imv != 0) { - roundMV(pmv, imvShift); + pmv.roundToAmvrSignalPrecision(MV_PRECISION_INTERNAL, imv); } for (i = 0; i < info.numCand; i++) { @@ -3871,20 +3846,15 @@ void PU::setAllAffineMvField( PredictionUnit &pu, MvField *mvField, RefPicList e pu.refIdx[eRefList] = mvField[0].refIdx; } -void PU::setAllAffineMv( PredictionUnit& pu, Mv affLT, Mv affRT, Mv affLB, RefPicList eRefList - , bool setHighPrec -) +void PU::setAllAffineMv( PredictionUnit& pu, Mv affLT, Mv affRT, Mv affLB, RefPicList eRefList, bool setHighPrec) { int width = pu.Y().width; int shift = MAX_CU_DEPTH; if (setHighPrec) { - affLT.hor = affLT.hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - affLT.ver = affLT.ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - affRT.hor = affRT.hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - affRT.ver = affRT.ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - affLB.hor = affLB.hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - affLB.ver = affLB.ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + affLT.changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); + affRT.changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); + affLB.changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); } int deltaMvHorX, deltaMvHorY, deltaMvVerX, deltaMvVerY; deltaMvHorX = (affRT - affLT).getHor() << (shift - g_aucLog2[width]); @@ -4137,8 +4107,7 @@ bool PU::getInterMergeSubPuMvpCand(const PredictionUnit &pu, MergeCtx& mrgCtx, b /////////////////////////////////////////////////////////////////////// //////// GET Initial Temporal Vector //////// /////////////////////////////////////////////////////////////////////// - int mvPrec = 2; - mvPrec += VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + int mvPrec = MV_FRACTIONAL_BITS_INTERNAL; #if !JVET_L0257_ATMVP_COLBLK_CLIP int mvRndOffs = (1 << mvPrec) >> 1; #endif @@ -4407,21 +4376,16 @@ void PU::applyImv( PredictionUnit& pu, MergeCtx &mrgCtx, InterPrediction *interP { if( !pu.mergeFlag ) { - unsigned imvShift = pu.cu->imv << 1; if( pu.interDir != 2 /* PRED_L1 */ ) { - if (pu.cu->imv) - { - pu.mvd[0] = Mv( pu.mvd[0].hor << imvShift, pu.mvd[0].ver << imvShift ); - } + pu.mvd[0].changePrecisionAmvr( pu.cu->imv, MV_PRECISION_QUARTER); unsigned mvp_idx = pu.mvpIdx[0]; AMVPInfo amvpInfo; PU::fillMvpCand(pu, REF_PIC_LIST_0, pu.refIdx[0], amvpInfo); pu.mvpNum[0] = amvpInfo.numCand; pu.mvpIdx[0] = mvp_idx; pu.mv [0] = amvpInfo.mvCand[mvp_idx] + pu.mvd[0]; - pu.mv[0].hor = pu.mv[0].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - pu.mv[0].ver = pu.mv[0].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + pu.mv[0].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); #if JVET_L0293_CPR if (pu.interDir == 1 && pu.cs->slice->getRefPic(REF_PIC_LIST_0, pu.refIdx[REF_PIC_LIST_0])->getPOC() == pu.cs->slice->getPOC()) { @@ -4434,7 +4398,7 @@ void PU::applyImv( PredictionUnit& pu, MergeCtx &mrgCtx, InterPrediction *interP { if( !( pu.cu->cs->slice->getMvdL1ZeroFlag() && pu.interDir == 3 ) && pu.cu->imv )/* PRED_BI */ { - pu.mvd[1] = Mv( pu.mvd[1].hor << imvShift, pu.mvd[1].ver << imvShift ); + pu.mvd[1].changePrecisionAmvr(pu.cu->imv, MV_PRECISION_QUARTER); } unsigned mvp_idx = pu.mvpIdx[1]; AMVPInfo amvpInfo; @@ -4442,8 +4406,7 @@ void PU::applyImv( PredictionUnit& pu, MergeCtx &mrgCtx, InterPrediction *interP pu.mvpNum[1] = amvpInfo.numCand; pu.mvpIdx[1] = mvp_idx; pu.mv [1] = amvpInfo.mvCand[mvp_idx] + pu.mvd[1]; - pu.mv[1].hor = pu.mv[1].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - pu.mv[1].ver = pu.mv[1].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + pu.mv[1].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); } } else @@ -5018,7 +4981,6 @@ void CU::resetMVDandMV2Int( CodingUnit& cu, InterPrediction *interPred ) if( !pu.mergeFlag ) { - unsigned imvShift = cu.imv << 1; if( pu.interDir != 2 /* PRED_L1 */ ) { Mv mv = pu.mv[0]; @@ -5028,7 +4990,7 @@ void CU::resetMVDandMV2Int( CodingUnit& cu, InterPrediction *interPred ) pu.mvpNum[0] = amvpInfo.numCand; mvPred = amvpInfo.mvCand[pu.mvpIdx[0]]; - roundMV ( mv, imvShift ); + mv.roundToAmvrSignalPrecision(MV_PRECISION_QUARTER, cu.imv); pu.mv[0] = mv; Mv mvDiff = mv - mvPred; pu.mvd[0] = mvDiff; @@ -5042,7 +5004,7 @@ void CU::resetMVDandMV2Int( CodingUnit& cu, InterPrediction *interPred ) pu.mvpNum[1] = amvpInfo.numCand; mvPred = amvpInfo.mvCand[pu.mvpIdx[1]]; - roundMV ( mv, imvShift ); + mv.roundToAmvrSignalPrecision(MV_PRECISION_QUARTER, cu.imv); Mv mvDiff = mv - mvPred; if( pu.cu->cs->slice->getMvdL1ZeroFlag() && pu.interDir == 3 /* PRED_BI */ ) diff --git a/source/Lib/CommonLib/dtrace_blockstatistics.cpp b/source/Lib/CommonLib/dtrace_blockstatistics.cpp index 310c3e15137c7c4cd6089fc236b78124609b0277..63e2b4f530ce4de285d09f696f8b41a5b13ea38d 100644 --- a/source/Lib/CommonLib/dtrace_blockstatistics.cpp +++ b/source/Lib/CommonLib/dtrace_blockstatistics.cpp @@ -409,7 +409,7 @@ void getAndStoreBlockStatistics(const CodingStructure& cs, const UnitArea& ctuAr void writeAllData(const CodingStructure& cs, const UnitArea& ctuArea) { const int maxNumChannelType = cs.pcv->chrFormat != CHROMA_400 && CS::isDualITree( cs ) ? 2 : 1; - const int nShift = VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + const int nShift = MV_FRACTIONAL_BITS_DIFF; const int nOffset = 1 << (nShift - 1); for( int ch = 0; ch < maxNumChannelType; ch++ ) { @@ -682,7 +682,7 @@ void writeAllData(const CodingStructure& cs, const UnitArea& ctuArea) void writeAllCodedData(const CodingStructure & cs, const UnitArea & ctuArea) { - const int nShift = VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + const int nShift = MV_FRACTIONAL_BITS_DIFF; const int nOffset = 1 << (nShift - 1); const int maxNumChannelType = cs.pcv->chrFormat != CHROMA_400 && CS::isDualITree(cs) ? 2 : 1; diff --git a/source/Lib/DecoderLib/DecCu.cpp b/source/Lib/DecoderLib/DecCu.cpp index 5de6917ae789c812c51d1982d4c0fd45dcfc80b9..1f411d4e7a42c8f6f2c735f2631731681142e688 100644 --- a/source/Lib/DecoderLib/DecCu.cpp +++ b/source/Lib/DecoderLib/DecCu.cpp @@ -656,18 +656,15 @@ void DecCu::xDeriveCUMV( CodingUnit &cu ) Mv mvLT = affineAMVPInfo.mvCandLT[mvp_idx] + pu.mvdAffi[eRefList][0]; Mv mvRT = affineAMVPInfo.mvCandRT[mvp_idx] + pu.mvdAffi[eRefList][1]; mvRT += pu.mvdAffi[eRefList][0]; - mvLT.hor = mvLT.hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvLT.ver = mvLT.ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvRT.hor = mvRT.hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvRT.ver = mvRT.ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + mvLT.changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); + mvRT.changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); Mv mvLB; if ( cu.affineType == AFFINEMODEL_6PARAM ) { mvLB = affineAMVPInfo.mvCandLB[mvp_idx] + pu.mvdAffi[eRefList][2]; mvLB += pu.mvdAffi[eRefList][0]; - mvLB.hor = mvLB.hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvLB.ver = mvLB.ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + mvLB.changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); } PU::setAllAffineMv( pu, mvLT, mvRT, mvLB, eRefList ); } @@ -691,15 +688,13 @@ void DecCu::xDeriveCUMV( CodingUnit &cu ) #if REUSE_CU_RESULTS if (!cu.cs->pcv->isEncoder) #endif - mvd <<= 2; + mvd.changePrecision(MV_PRECISION_INT, MV_PRECISION_QUARTER); } pu.mv [eRefList] = amvpInfo.mvCand[pu.mvpIdx[eRefList]] + mvd; #else pu.mv [eRefList] = amvpInfo.mvCand[pu.mvpIdx [eRefList]] + pu.mvd[eRefList]; #endif - - pu.mv[eRefList].hor = pu.mv[eRefList].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - pu.mv[eRefList].ver = pu.mv[eRefList].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + pu.mv[eRefList].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); } } } diff --git a/source/Lib/EncoderLib/InterSearch.cpp b/source/Lib/EncoderLib/InterSearch.cpp index 7d8bbbafb0f6e0119c5690e5f3ff3335e6c1e271..e6cf8673c4cdca186d764e79330b7c0bff687621 100644 --- a/source/Lib/EncoderLib/InterSearch.cpp +++ b/source/Lib/EncoderLib/InterSearch.cpp @@ -768,9 +768,8 @@ int InterSearch::xCPRSearchMVChromaRefine(PredictionUnit& pu, tempSad = sadBestCand[cand]; - Mv mvFullPrecision = cMVCand[cand]; - mvFullPrecision <<= ( 2 + VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE); - pu.mv[0] = mvFullPrecision; + pu.mv[0] = cMVCand[cand]; + pu.mv[0].changePrecision(MV_PRECISION_INT, MV_PRECISION_INTERNAL); pu.interDir = 1; pu.refIdx[0] = pu.cs->slice->getNumRefIdx(REF_PIC_LIST_0) - 1; // last idx in the list @@ -1418,9 +1417,7 @@ bool InterSearch::predCPRSearch(CodingUnit& cu, Partitioner& partitioner, const pu.mvd[REF_PIC_LIST_0] >>= (2); pu.refIdx[REF_PIC_LIST_0] = pu.cs->slice->getNumRefIdx(REF_PIC_LIST_0) - 1; - - pu.mv[REF_PIC_LIST_0].hor = pu.mv[REF_PIC_LIST_0].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - pu.mv[REF_PIC_LIST_0].ver = pu.mv[REF_PIC_LIST_0].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + pu.mv[REF_PIC_LIST_0].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); m_ctuRecord[cu.lumaPos()][cu.lumaSize()].bvRecord[pu.bv] = cost; } @@ -1732,8 +1729,7 @@ void InterSearch::predInterSearch(CodingUnit& cu, Partitioner& partitioner) cMvBi [1] = cMvPredBi[1][bestBiPRefIdxL1]; iRefIdxBi[1] = bestBiPRefIdxL1; pu.mv [REF_PIC_LIST_1] = cMvBi[1]; - pu.mv[REF_PIC_LIST_1].hor = pu.mv[REF_PIC_LIST_1].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - pu.mv[REF_PIC_LIST_1].ver = pu.mv[REF_PIC_LIST_1].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + pu.mv[REF_PIC_LIST_1].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); pu.refIdx[REF_PIC_LIST_1] = iRefIdxBi[1]; pu.mvpIdx[REF_PIC_LIST_1] = bestBiPMvpL1; @@ -1805,8 +1801,7 @@ void InterSearch::predInterSearch(CodingUnit& cu, Partitioner& partitioner) if ( iIter == 0 && !cs.slice->getMvdL1ZeroFlag()) { pu.mv [1 - iRefList] = cMv [1 - iRefList]; - pu.mv[1 - iRefList].hor = pu.mv[1 - iRefList].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - pu.mv[1 - iRefList].ver = pu.mv[1 - iRefList].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + pu.mv[1 - iRefList].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); pu.refIdx[1 - iRefList] = iRefIdx[1 - iRefList]; PelUnitBuf predBufTmp = m_tmpPredStorage[1 - iRefList].getBuf( UnitAreaRelative(cu, pu) ); @@ -1877,8 +1872,7 @@ void InterSearch::predInterSearch(CodingUnit& cu, Partitioner& partitioner) { // Set motion pu.mv [eRefPicList] = cMvBi [iRefList]; - pu.mv[eRefPicList].hor = pu.mv[eRefPicList].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - pu.mv[eRefPicList].ver = pu.mv[eRefPicList].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + pu.mv[eRefPicList].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); pu.refIdx[eRefPicList] = iRefIdxBi[iRefList]; PelUnitBuf predBufTmp = m_tmpPredStorage[iRefList].getBuf( UnitAreaRelative(cu, pu) ); @@ -1947,10 +1941,8 @@ void InterSearch::predInterSearch(CodingUnit& cu, Partitioner& partitioner) uiLastMode = 2; pu.mv [REF_PIC_LIST_0] = cMvBi[0]; pu.mv [REF_PIC_LIST_1] = cMvBi[1]; - pu.mv[REF_PIC_LIST_0].hor = pu.mv[REF_PIC_LIST_0].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - pu.mv[REF_PIC_LIST_0].ver = pu.mv[REF_PIC_LIST_0].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - pu.mv[REF_PIC_LIST_1].hor = pu.mv[REF_PIC_LIST_1].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - pu.mv[REF_PIC_LIST_1].ver = pu.mv[REF_PIC_LIST_1].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + pu.mv[REF_PIC_LIST_0].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); + pu.mv[REF_PIC_LIST_1].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); pu.mvd [REF_PIC_LIST_0] = cMvBi[0] - cMvPredBi[0][iRefIdxBi[0]]; pu.mvd [REF_PIC_LIST_1] = cMvBi[1] - cMvPredBi[1][iRefIdxBi[1]]; pu.refIdx[REF_PIC_LIST_0] = iRefIdxBi[0]; @@ -1965,8 +1957,7 @@ void InterSearch::predInterSearch(CodingUnit& cu, Partitioner& partitioner) { uiLastMode = 0; pu.mv [REF_PIC_LIST_0] = cMv[0]; - pu.mv [REF_PIC_LIST_0].hor = pu.mv[REF_PIC_LIST_0].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - pu.mv [REF_PIC_LIST_0].ver = pu.mv[REF_PIC_LIST_0].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + pu.mv [REF_PIC_LIST_0].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); pu.mvd [REF_PIC_LIST_0] = cMv[0] - cMvPred[0][iRefIdx[0]]; pu.refIdx[REF_PIC_LIST_0] = iRefIdx[0]; pu.mvpIdx[REF_PIC_LIST_0] = aaiMvpIdx[0][iRefIdx[0]]; @@ -1977,8 +1968,7 @@ void InterSearch::predInterSearch(CodingUnit& cu, Partitioner& partitioner) { uiLastMode = 1; pu.mv [REF_PIC_LIST_1] = cMv[1]; - pu.mv [REF_PIC_LIST_1].hor = pu.mv[REF_PIC_LIST_1].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - pu.mv [REF_PIC_LIST_1].ver = pu.mv[REF_PIC_LIST_1].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + pu.mv [REF_PIC_LIST_1].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); pu.mvd [REF_PIC_LIST_1] = cMv[1] - cMvPred[1][iRefIdx[1]]; pu.refIdx[REF_PIC_LIST_1] = iRefIdx[1]; pu.mvpIdx[REF_PIC_LIST_1] = aaiMvpIdx[1][iRefIdx[1]]; @@ -2324,8 +2314,7 @@ Distortion InterSearch::xGetTemplateCost( const PredictionUnit& pu, Distortion uiCost = std::numeric_limits<Distortion>::max(); const Picture* picRef = pu.cu->slice->getRefPic( eRefPicList, iRefIdx ); - cMvCand.hor = cMvCand.hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - cMvCand.ver = cMvCand.ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + cMvCand.changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); clipMv( cMvCand, pu.cu->lumaPos(), #if JVET_L0231_WRAPAROUND pu.cu->lumaSize(), @@ -2368,12 +2357,10 @@ Distortion InterSearch::xGetAffineTemplateCost( PredictionUnit& pu, PelUnitBuf& // prediction pattern const bool bi = pu.cu->slice->testWeightPred() && pu.cu->slice->getSliceType()==P_SLICE; Mv mv[3]; - mv[0].hor = acMvCand[0].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mv[0].ver = acMvCand[0].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mv[1].hor = acMvCand[1].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mv[1].ver = acMvCand[1].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mv[2].hor = acMvCand[2].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mv[2].ver = acMvCand[2].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + memcpy(mv, acMvCand, sizeof(mv)); + mv[0].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); + mv[1].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); + mv[2].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); xPredAffineBlk(COMPONENT_Y, pu, picRef, mv, predBuf, bi, pu.cu->slice->clpRng(COMPONENT_Y)); if( bi ) { @@ -2546,10 +2533,9 @@ void InterSearch::xSetSearchRange ( const PredictionUnit& pu, , IntTZSearchStruct& cStruct ) { - const int iMvShift = 2 + VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + const int iMvShift = MV_FRACTIONAL_BITS_INTERNAL; Mv cFPMvPred = cMvPred; - cFPMvPred.hor = cFPMvPred.hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - cFPMvPred.ver = cFPMvPred.ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + cFPMvPred.changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); clipMv( cFPMvPred, pu.cu->lumaPos(), #if JVET_L0231_WRAPAROUND pu.cu->lumaSize(), @@ -2711,15 +2697,13 @@ void InterSearch::xTZSearch( const PredictionUnit& pu, const bool bNewZeroNeighbourhoodTest = bExtendedSettings; int iSearchRange = m_iSearchRange; - rcMv.hor = rcMv.hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - rcMv.ver = rcMv.ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + rcMv.changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); clipMv( rcMv, pu.cu->lumaPos(), #if JVET_L0231_WRAPAROUND pu.cu->lumaSize(), #endif *pu.cs->sps ); - rcMv.hor = rcMv.hor >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - rcMv.ver = rcMv.ver >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + rcMv.changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); rcMv.divideByPowerOf2(2); // init TZSearchStruct @@ -2751,16 +2735,13 @@ void InterSearch::xTZSearch( const PredictionUnit& pu, if (pIntegerMv2Nx2NPred != 0) { Mv integerMv2Nx2NPred = *pIntegerMv2Nx2NPred; - integerMv2Nx2NPred <<= 2; - integerMv2Nx2NPred.hor = integerMv2Nx2NPred.hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - integerMv2Nx2NPred.ver = integerMv2Nx2NPred.ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + integerMv2Nx2NPred.changePrecision(MV_PRECISION_INT, MV_PRECISION_INTERNAL); clipMv( integerMv2Nx2NPred, pu.cu->lumaPos(), #if JVET_L0231_WRAPAROUND pu.cu->lumaSize(), #endif *pu.cs->sps ); - integerMv2Nx2NPred.hor = integerMv2Nx2NPred.hor >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - integerMv2Nx2NPred.ver = integerMv2Nx2NPred.ver >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + integerMv2Nx2NPred.changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); integerMv2Nx2NPred.divideByPowerOf2(2); if ((rcMv != integerMv2Nx2NPred) && @@ -2986,15 +2967,13 @@ void InterSearch::xTZSearchSelective( const PredictionUnit& pu, int iStartX = 0; int iStartY = 0; int iDist = 0; - rcMv.hor = rcMv.hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - rcMv.ver = rcMv.ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + rcMv.changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); clipMv( rcMv, pu.cu->lumaPos(), #if JVET_L0231_WRAPAROUND pu.cu->lumaSize(), #endif *pu.cs->sps ); - rcMv.hor = rcMv.hor >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - rcMv.ver = rcMv.ver >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + rcMv.changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); rcMv.divideByPowerOf2(2); // init TZSearchStruct @@ -3020,16 +2999,13 @@ void InterSearch::xTZSearchSelective( const PredictionUnit& pu, if ( pIntegerMv2Nx2NPred != 0 ) { Mv integerMv2Nx2NPred = *pIntegerMv2Nx2NPred; - integerMv2Nx2NPred <<= 2; - integerMv2Nx2NPred.hor = integerMv2Nx2NPred.hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - integerMv2Nx2NPred.ver = integerMv2Nx2NPred.ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + integerMv2Nx2NPred.changePrecision(MV_PRECISION_INT, MV_PRECISION_INTERNAL); clipMv( integerMv2Nx2NPred, pu.cu->lumaPos(), #if JVET_L0231_WRAPAROUND pu.cu->lumaSize(), #endif *pu.cs->sps ); - integerMv2Nx2NPred.hor = integerMv2Nx2NPred.hor >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - integerMv2Nx2NPred.ver = integerMv2Nx2NPred.ver >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + integerMv2Nx2NPred.changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); integerMv2Nx2NPred.divideByPowerOf2(2); xTZSearchHelp( cStruct, integerMv2Nx2NPred.getHor(), integerMv2Nx2NPred.getVer(), 0, 0); @@ -3150,8 +3126,8 @@ void InterSearch::xPatternSearchIntRefine(PredictionUnit& pu, IntTZSearchStruct& CHECK( (cBaseMvd[0].getHor() & 0x03) != 0 || (cBaseMvd[0].getVer() & 0x03) != 0 , "xPatternSearchIntRefine(): AMVP cand 0 Mvd issue."); CHECK( (cBaseMvd[1].getHor() & 0x03) != 0 || (cBaseMvd[1].getVer() & 0x03) != 0 , "xPatternSearchIntRefine(): AMVP cand 1 Mvd issue."); - roundMV(cBaseMvd[0], cStruct.imvShift); - roundMV(cBaseMvd[1], cStruct.imvShift); + cBaseMvd[0].roundToAmvrSignalPrecision(MV_PRECISION_QUARTER, pu.cu->imv); + cBaseMvd[1].roundToAmvrSignalPrecision(MV_PRECISION_QUARTER, pu.cu->imv); int mvOffset = 1 << cStruct.imvShift; @@ -3169,15 +3145,13 @@ void InterSearch::xPatternSearchIntRefine(PredictionUnit& pu, IntTZSearchStruct& if ( iMVPIdx == 0 || cTestMv[0] != cTestMv[1]) { Mv cTempMV = cTestMv[iMVPIdx]; - cTempMV.hor = cTempMV.hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - cTempMV.ver = cTempMV.ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + cTempMV.changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); clipMv(cTempMV, pu.cu->lumaPos(), #if JVET_L0231_WRAPAROUND pu.cu->lumaSize(), #endif sps); - cTempMV.hor = cTempMV.hor >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - cTempMV.ver = cTempMV.ver >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + cTempMV.changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); m_cDistParam.cur.buf = cStruct.piRefY + cStruct.iRefStride * (cTempMV.getVer() >> 2) + (cTempMV.getHor() >> 2); uiDist = uiSATD = (Distortion) (m_cDistParam.distFunc( m_cDistParam ) * fWeight); } @@ -3427,9 +3401,9 @@ void InterSearch::xPredAffineInterSearch( PredictionUnit& pu, int mvScaleHor = nbMv[0].getHor() << shift; int mvScaleVer = nbMv[0].getVer() << shift; Mv dMv = nbMv[1] - nbMv[0]; - mvScaleHor <<= VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvScaleVer <<= VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - dMv <<= VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + mvScaleHor <<= MV_FRACTIONAL_BITS_DIFF; + mvScaleVer <<= MV_FRACTIONAL_BITS_DIFF; + dMv <<= MV_FRACTIONAL_BITS_DIFF; dMvHorX = dMv.getHor() << (shift - g_aucLog2[mvInfo->w]); dMvHorY = dMv.getVer() << (shift - g_aucLog2[mvInfo->w]); dMvVerX = -dMvHorY; @@ -3443,7 +3417,7 @@ void InterSearch::xPredAffineInterSearch( PredictionUnit& pu, pu.cu->lumaSize(), #endif *pu.cs->sps); - mvTmp[0].roundMV2SignalPrecision(); + mvTmp[0].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); vx = mvScaleHor + dMvHorX * (pu.Y().x + pu.Y().width - mvInfo->x) + dMvVerX * (pu.Y().y - mvInfo->y); vy = mvScaleVer + dMvHorY * (pu.Y().x + pu.Y().width - mvInfo->x) + dMvVerY * (pu.Y().y - mvInfo->y); roundAffineMv(vx, vy, shift); @@ -3453,11 +3427,9 @@ void InterSearch::xPredAffineInterSearch( PredictionUnit& pu, pu.cu->lumaSize(), #endif *pu.cs->sps); - mvTmp[1].roundMV2SignalPrecision(); - mvTmp[0].hor = mvTmp[0].hor >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvTmp[0].ver = mvTmp[0].ver >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvTmp[1].hor = mvTmp[1].hor >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvTmp[1].ver = mvTmp[1].ver >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + mvTmp[1].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + mvTmp[0].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + mvTmp[1].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); Distortion tmpCost = xGetAffineTemplateCost(pu, origBuf, predBuf, mvTmp, aaiMvpIdx[iRefList][iRefIdxTemp], AMVP_MAX_NUM_CANDS, eRefPicList, iRefIdxTemp); if (tmpCost < uiCandCost) { @@ -3470,16 +3442,12 @@ void InterSearch::xPredAffineInterSearch( PredictionUnit& pu, if ( pu.cu->affineType == AFFINEMODEL_6PARAM ) { Mv mvFour[3]; - mvAffine4Para[iRefList][iRefIdxTemp][0].hor = mvAffine4Para[iRefList][iRefIdxTemp][0].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvAffine4Para[iRefList][iRefIdxTemp][0].ver = mvAffine4Para[iRefList][iRefIdxTemp][0].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvAffine4Para[iRefList][iRefIdxTemp][1].hor = mvAffine4Para[iRefList][iRefIdxTemp][1].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvAffine4Para[iRefList][iRefIdxTemp][1].ver = mvAffine4Para[iRefList][iRefIdxTemp][1].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + mvAffine4Para[iRefList][iRefIdxTemp][0].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); + mvAffine4Para[iRefList][iRefIdxTemp][1].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); mvFour[0] = mvAffine4Para[iRefList][iRefIdxTemp][0]; mvFour[1] = mvAffine4Para[iRefList][iRefIdxTemp][1]; - mvAffine4Para[iRefList][iRefIdxTemp][0].hor = mvAffine4Para[iRefList][iRefIdxTemp][0].hor >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvAffine4Para[iRefList][iRefIdxTemp][0].ver = mvAffine4Para[iRefList][iRefIdxTemp][0].ver >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvAffine4Para[iRefList][iRefIdxTemp][1].hor = mvAffine4Para[iRefList][iRefIdxTemp][1].hor >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvAffine4Para[iRefList][iRefIdxTemp][1].ver = mvAffine4Para[iRefList][iRefIdxTemp][1].ver >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + mvAffine4Para[iRefList][iRefIdxTemp][0].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + mvAffine4Para[iRefList][iRefIdxTemp][1].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); int shift = MAX_CU_DEPTH; int vx2 = (mvFour[0].getHor() << shift) - ((mvFour[1].getVer() - mvFour[0].getVer()) << (shift + g_aucLog2[pu.lheight()] - g_aucLog2[pu.lwidth()])); @@ -3488,11 +3456,10 @@ void InterSearch::xPredAffineInterSearch( PredictionUnit& pu, vy2 >>= shift; mvFour[2].hor = vx2; mvFour[2].ver = vy2; - mvFour[2].roundMV2SignalPrecision(); + mvFour[2].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); for (int i = 0; i < 3; i++) { - mvFour[i].hor = mvFour[i].hor >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvFour[i].ver = mvFour[i].ver >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + mvFour[i].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); } Distortion uiCandCostInherit = xGetAffineTemplateCost( pu, origBuf, predBuf, mvFour, aaiMvpIdx[iRefList][iRefIdxTemp], AMVP_MAX_NUM_CANDS, eRefPicList, iRefIdxTemp ); if ( uiCandCostInherit < uiCandCost ) @@ -4160,12 +4127,9 @@ void InterSearch::xAffineMotionEstimation( PredictionUnit& pu, // Set start Mv position, use input mv as started search mv Mv acMvTemp[3]; ::memcpy( acMvTemp, acMv, sizeof(Mv)*3 ); - acMvTemp[0].hor = acMvTemp[0].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - acMvTemp[0].ver = acMvTemp[0].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - acMvTemp[1].hor = acMvTemp[1].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - acMvTemp[1].ver = acMvTemp[1].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - acMvTemp[2].hor = acMvTemp[2].hor << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - acMvTemp[2].ver = acMvTemp[2].ver << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + acMvTemp[0].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); + acMvTemp[1].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); + acMvTemp[2].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); // Set delta mv // malloc buffer @@ -4221,10 +4185,9 @@ void InterSearch::xAffineMotionEstimation( PredictionUnit& pu, DTRACE( g_trace_ctx, D_COMMON, "#mvPredForBits=(%d,%d) \n", acMvPred[i].getHor(), acMvPred[i].getVer() ); m_pcRdCost->setPredictor( acMvPred[i] ); DTRACE( g_trace_ctx, D_COMMON, "#mvForBits=(%d,%d) \n", acMvTemp[i].getHor(), acMvTemp[i].getVer() ); - Mv mv0; - mv0.hor = acMvTemp[0].hor >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mv0.ver = acMvTemp[0].ver >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - const int shift = VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + Mv mv0 = acMvTemp[0]; + mv0.changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + const int shift = MV_FRACTIONAL_BITS_DIFF; Mv secondPred; if ( i != 0 ) { @@ -4329,14 +4292,14 @@ void InterSearch::xAffineMotionEstimation( PredictionUnit& pu, dDeltaMv[3] = -dAffinePara[3] * width + dAffinePara[2]; } - acDeltaMv[0] = Mv( (int)(dDeltaMv[0] * 4 + SIGN( dDeltaMv[0] ) * 0.5) << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, (int)(dDeltaMv[2] * 4 + SIGN( dDeltaMv[2] ) * 0.5) << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE + acDeltaMv[0] = Mv( (int)(dDeltaMv[0] * 4 + SIGN( dDeltaMv[0] ) * 0.5) << MV_FRACTIONAL_BITS_DIFF, (int)(dDeltaMv[2] * 4 + SIGN( dDeltaMv[2] ) * 0.5) << MV_FRACTIONAL_BITS_DIFF ); - acDeltaMv[1] = Mv( (int)(dDeltaMv[1] * 4 + SIGN( dDeltaMv[1] ) * 0.5) << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, (int)(dDeltaMv[3] * 4 + SIGN( dDeltaMv[3] ) * 0.5) << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE + acDeltaMv[1] = Mv( (int)(dDeltaMv[1] * 4 + SIGN( dDeltaMv[1] ) * 0.5) << MV_FRACTIONAL_BITS_DIFF, (int)(dDeltaMv[3] * 4 + SIGN( dDeltaMv[3] ) * 0.5) << MV_FRACTIONAL_BITS_DIFF ); if ( pu.cu->affineType == AFFINEMODEL_6PARAM ) { - acDeltaMv[2] = Mv( (int)(dDeltaMv[4] * 4 + SIGN( dDeltaMv[4] ) * 0.5) << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, (int)(dDeltaMv[5] * 4 + SIGN( dDeltaMv[5] ) * 0.5) << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE + acDeltaMv[2] = Mv( (int)(dDeltaMv[4] * 4 + SIGN( dDeltaMv[4] ) * 0.5) << MV_FRACTIONAL_BITS_DIFF, (int)(dDeltaMv[5] * 4 + SIGN( dDeltaMv[5] ) * 0.5) << MV_FRACTIONAL_BITS_DIFF ); } @@ -4360,7 +4323,7 @@ void InterSearch::xAffineMotionEstimation( PredictionUnit& pu, acMvTemp[i] += acDeltaMv[i]; acMvTemp[i].hor = Clip3( -32768, 32767, acMvTemp[i].hor ); acMvTemp[i].ver = Clip3( -32768, 32767, acMvTemp[i].ver ); - acMvTemp[i].roundMV2SignalPrecision(); + acMvTemp[i].roundToPrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); clipMv(acMvTemp[i], pu.cu->lumaPos(), #if JVET_L0231_WRAPAROUND pu.cu->lumaSize(), @@ -4379,10 +4342,9 @@ void InterSearch::xAffineMotionEstimation( PredictionUnit& pu, for ( int i = 0; i < mvNum; i++ ) { m_pcRdCost->setPredictor( acMvPred[i] ); - Mv mv0; - mv0.hor = acMvTemp[0].hor >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mv0.ver = acMvTemp[0].ver >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - const int shift = VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + Mv mv0 = acMvTemp[0]; + mv0.changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + const int shift = MV_FRACTIONAL_BITS_DIFF; Mv secondPred; if ( i != 0 ) { @@ -4416,10 +4378,9 @@ void InterSearch::xAffineMotionEstimation( PredictionUnit& pu, for (int i = 0; i < mvNum; i++) { m_pcRdCost->setPredictor(acMvPred[i]); - Mv mv0; - mv0.hor = ctrlPtMv[0].hor >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mv0.ver = ctrlPtMv[0].ver >> VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - const int shift = VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + Mv mv0 = ctrlPtMv[0]; + mv0.changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + const int shift = MV_FRACTIONAL_BITS_DIFF; Mv secondPred; if (i != 0) { @@ -4442,9 +4403,9 @@ void InterSearch::xAffineMotionEstimation( PredictionUnit& pu, if (uiCostBest <= AFFINE_ME_LIST_MVP_TH*m_hevcCost) { Mv mvPredTmp[3] = { acMvPred[0], acMvPred[1], acMvPred[2] }; - mvPredTmp[0] <<= VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvPredTmp[1] <<= VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - mvPredTmp[2] <<= VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; + mvPredTmp[0].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); + mvPredTmp[1].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); + mvPredTmp[2].changePrecision(MV_PRECISION_QUARTER, MV_PRECISION_INTERNAL); Mv mvME[3]; ::memcpy(mvME, acMv, sizeof(Mv) * 3); Mv dMv = mvME[0] - mvPredTmp[0]; @@ -4497,7 +4458,7 @@ void InterSearch::xAffineMotionEstimation( PredictionUnit& pu, acMvTemp[0] = centerMv[0]; for (int i = 0; i < 4; i++) { - acMvTemp[1].set(centerMv[1].getHor() + (testPos[i][0] << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE), centerMv[1].getVer() + (testPos[i][1] << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE)); + acMvTemp[1].set(centerMv[1].getHor() + (testPos[i][0] << MV_FRACTIONAL_BITS_DIFF), centerMv[1].getVer() + (testPos[i][1] << MV_FRACTIONAL_BITS_DIFF)); checkCPMVRdCost(acMvTemp); } } @@ -4505,14 +4466,9 @@ void InterSearch::xAffineMotionEstimation( PredictionUnit& pu, } #endif - const int nShift = VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE; - const int nOffset = 1 << (nShift - 1); - acMv[0].hor = acMv[0].hor >= 0 ? (acMv[0].hor + nOffset) >> nShift : -((-acMv[0].hor + nOffset) >> nShift); - acMv[0].ver = acMv[0].ver >= 0 ? (acMv[0].ver + nOffset) >> nShift : -((-acMv[0].ver + nOffset) >> nShift); - acMv[1].hor = acMv[1].hor >= 0 ? (acMv[1].hor + nOffset) >> nShift : -((-acMv[1].hor + nOffset) >> nShift); - acMv[1].ver = acMv[1].ver >= 0 ? (acMv[1].ver + nOffset) >> nShift : -((-acMv[1].ver + nOffset) >> nShift); - acMv[2].hor = acMv[2].hor >= 0 ? (acMv[2].hor + nOffset) >> nShift : -((-acMv[2].hor + nOffset) >> nShift); - acMv[2].ver = acMv[2].ver >= 0 ? (acMv[2].ver + nOffset) >> nShift : -((-acMv[2].ver + nOffset) >> nShift); + acMv[0].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + acMv[1].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); + acMv[2].changePrecision(MV_PRECISION_INTERNAL, MV_PRECISION_QUARTER); // free buffer for (int i = 0; i<iParaNum; i++) @@ -4605,24 +4561,24 @@ void InterSearch::xExtDIFUpSamplingH( CPelBuf* pattern ) const ChromaFormat chFmt = m_currChromaFormat; - m_if.filterHor(COMPONENT_Y, srcPtr, srcStride, m_filteredBlockTmp[0][0], intStride, width + 1, height + filterSize, 0 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, chFmt, clpRng); - m_if.filterHor(COMPONENT_Y, srcPtr, srcStride, m_filteredBlockTmp[2][0], intStride, width + 1, height + filterSize, 2 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, chFmt, clpRng); + m_if.filterHor(COMPONENT_Y, srcPtr, srcStride, m_filteredBlockTmp[0][0], intStride, width + 1, height + filterSize, 0 << MV_FRACTIONAL_BITS_DIFF, false, chFmt, clpRng); + m_if.filterHor(COMPONENT_Y, srcPtr, srcStride, m_filteredBlockTmp[2][0], intStride, width + 1, height + filterSize, 2 << MV_FRACTIONAL_BITS_DIFF, false, chFmt, clpRng); intPtr = m_filteredBlockTmp[0][0] + halfFilterSize * intStride + 1; dstPtr = m_filteredBlock[0][0][0]; - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width + 0, height + 0, 0 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width + 0, height + 0, 0 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); intPtr = m_filteredBlockTmp[0][0] + (halfFilterSize - 1) * intStride + 1; dstPtr = m_filteredBlock[2][0][0]; - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width + 0, height + 1, 2 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width + 0, height + 1, 2 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); intPtr = m_filteredBlockTmp[2][0] + halfFilterSize * intStride; dstPtr = m_filteredBlock[0][2][0]; - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width + 1, height + 0, 0 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width + 1, height + 0, 0 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); intPtr = m_filteredBlockTmp[2][0] + (halfFilterSize - 1) * intStride; dstPtr = m_filteredBlock[2][2][0]; - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width + 1, height + 1, 2 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width + 1, height + 1, 2 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); } @@ -4667,7 +4623,7 @@ void InterSearch::xExtDIFUpSamplingQ( CPelBuf* pattern, Mv halfPelRef ) { srcPtr += 1; } - m_if.filterHor(COMPONENT_Y, srcPtr, srcStride, intPtr, intStride, width, extHeight, 1 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, chFmt, clpRng); + m_if.filterHor(COMPONENT_Y, srcPtr, srcStride, intPtr, intStride, width, extHeight, 1 << MV_FRACTIONAL_BITS_DIFF, false, chFmt, clpRng); // Horizontal filter 3/4 srcPtr = pattern->buf - halfFilterSize*srcStride - 1; @@ -4680,7 +4636,7 @@ void InterSearch::xExtDIFUpSamplingQ( CPelBuf* pattern, Mv halfPelRef ) { srcPtr += 1; } - m_if.filterHor(COMPONENT_Y, srcPtr, srcStride, intPtr, intStride, width, extHeight, 3 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, chFmt, clpRng); + m_if.filterHor(COMPONENT_Y, srcPtr, srcStride, intPtr, intStride, width, extHeight, 3 << MV_FRACTIONAL_BITS_DIFF, false, chFmt, clpRng); // Generate @ 1,1 intPtr = m_filteredBlockTmp[1][0] + (halfFilterSize-1) * intStride; @@ -4689,12 +4645,12 @@ void InterSearch::xExtDIFUpSamplingQ( CPelBuf* pattern, Mv halfPelRef ) { intPtr += intStride; } - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 1 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 1 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); // Generate @ 3,1 intPtr = m_filteredBlockTmp[1][0] + (halfFilterSize-1) * intStride; dstPtr = m_filteredBlock[3][1][0]; - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 3 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 3 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); if (halfPelRef.getVer() != 0) { @@ -4705,7 +4661,7 @@ void InterSearch::xExtDIFUpSamplingQ( CPelBuf* pattern, Mv halfPelRef ) { intPtr += intStride; } - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 2 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 2 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); // Generate @ 2,3 intPtr = m_filteredBlockTmp[3][0] + (halfFilterSize - 1) * intStride; @@ -4714,19 +4670,19 @@ void InterSearch::xExtDIFUpSamplingQ( CPelBuf* pattern, Mv halfPelRef ) { intPtr += intStride; } - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 2 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 2 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); } else { // Generate @ 0,1 intPtr = m_filteredBlockTmp[1][0] + halfFilterSize * intStride; dstPtr = m_filteredBlock[0][1][0]; - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 0 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 0 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); // Generate @ 0,3 intPtr = m_filteredBlockTmp[3][0] + halfFilterSize * intStride; dstPtr = m_filteredBlock[0][3][0]; - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 0 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 0 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); } if (halfPelRef.getHor() != 0) @@ -4742,7 +4698,7 @@ void InterSearch::xExtDIFUpSamplingQ( CPelBuf* pattern, Mv halfPelRef ) { intPtr += intStride; } - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 1 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 1 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); // Generate @ 3,2 intPtr = m_filteredBlockTmp[2][0] + (halfFilterSize - 1) * intStride; @@ -4755,7 +4711,7 @@ void InterSearch::xExtDIFUpSamplingQ( CPelBuf* pattern, Mv halfPelRef ) { intPtr += intStride; } - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 3 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 3 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); } else { @@ -4766,7 +4722,7 @@ void InterSearch::xExtDIFUpSamplingQ( CPelBuf* pattern, Mv halfPelRef ) { intPtr += intStride; } - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 1 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 1 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); // Generate @ 3,0 intPtr = m_filteredBlockTmp[0][0] + (halfFilterSize - 1) * intStride + 1; @@ -4775,7 +4731,7 @@ void InterSearch::xExtDIFUpSamplingQ( CPelBuf* pattern, Mv halfPelRef ) { intPtr += intStride; } - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 3 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 3 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); } // Generate @ 1,3 @@ -4785,12 +4741,12 @@ void InterSearch::xExtDIFUpSamplingQ( CPelBuf* pattern, Mv halfPelRef ) { intPtr += intStride; } - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 1 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 1 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); // Generate @ 3,3 intPtr = m_filteredBlockTmp[3][0] + (halfFilterSize - 1) * intStride; dstPtr = m_filteredBlock[3][3][0]; - m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 3 << VCEG_AZ07_MV_ADD_PRECISION_BIT_FOR_STORE, false, true, chFmt, clpRng); + m_if.filterVer(COMPONENT_Y, intPtr, intStride, dstPtr, dstStride, width, height, 3 << MV_FRACTIONAL_BITS_DIFF, false, true, chFmt, clpRng); }