Newer
Older

Karsten Suehring
committed
m_CABACEstimator->getCtx() = other->m_CABACEstimator->getCtx();
}
#endif
void EncCu::xCheckModeSplit(CodingStructure *&tempCS, CodingStructure *&bestCS, Partitioner &partitioner, const EncTestMode& encTestMode
, LutMotionCand* &tempMotCandLUTs
, LutMotionCand* &bestMotCandLUTs
, UnitArea parArea
)

Karsten Suehring
committed
{
const int qp = encTestMode.qp;
const PPS &pps = *tempCS->pps;
const Slice &slice = *tempCS->slice;
const bool bIsLosslessMode = false; // False at this level. Next level down may set it to true.
const int oldPrevQp = tempCS->prevQP[partitioner.chType];
const uint32_t currDepth = partitioner.currDepth;
const unsigned wParIdx = gp_sizeIdxInfo->idxFrom(parArea.lwidth());
const unsigned hParIdx = gp_sizeIdxInfo->idxFrom(parArea.lheight());
tempCS->slice->copyMotionLUTs(tempMotCandLUTs, m_pSplitTempMotLUTs[wParIdx][hParIdx]);

Karsten Suehring
committed
const PartSplit split = getPartSplit( encTestMode );
CHECK( split == CU_DONT_SPLIT, "No proper split provided!" );
tempCS->initStructData( qp, bIsLosslessMode );
m_CABACEstimator->getCtx() = m_CurrCtx->start;
const TempCtx ctxStartSP( m_CtxCache, SubCtx( Ctx::SplitFlag, m_CABACEstimator->getCtx() ) );
#if JVET_M0421_SPLIT_SIG
const TempCtx ctxStartQt( m_CtxCache, SubCtx( Ctx::SplitQtFlag, m_CABACEstimator->getCtx() ) );
const TempCtx ctxStartHv( m_CtxCache, SubCtx( Ctx::SplitHvFlag, m_CABACEstimator->getCtx() ) );
const TempCtx ctxStart12( m_CtxCache, SubCtx( Ctx::Split12Flag, m_CABACEstimator->getCtx() ) );
#else
const TempCtx ctxStartBT( m_CtxCache, SubCtx( Ctx::BTSplitFlag, m_CABACEstimator->getCtx() ) );

Karsten Suehring
committed

Karsten Suehring
committed
#if JVET_M0421_SPLIT_SIG
m_CABACEstimator->split_cu_mode( split, *tempCS, partitioner );
#else
if( partitioner.getImplicitSplit( *tempCS ) != CU_QUAD_SPLIT )
{
if( partitioner.canSplit( CU_QUAD_SPLIT, *tempCS ) )

Karsten Suehring
committed
{
m_CABACEstimator->split_cu_flag( split == CU_QUAD_SPLIT, *tempCS, partitioner );
}
if( split != CU_QUAD_SPLIT )
{
m_CABACEstimator->split_cu_mode_mt( split, *tempCS, partitioner );

Karsten Suehring
committed
}

Karsten Suehring
committed
const double factor = ( tempCS->currQP[partitioner.chType] > 30 ? 1.1 : 1.075 );
const double cost = m_pcRdCost->calcRdCost( uint64_t( m_CABACEstimator->getEstFracBits() + ( ( bestCS->fracBits ) / factor ) ), Distortion( bestCS->dist / factor ) );

Karsten Suehring
committed
m_CABACEstimator->getCtx() = SubCtx( Ctx::SplitFlag, ctxStartSP );
#if JVET_M0421_SPLIT_SIG
m_CABACEstimator->getCtx() = SubCtx( Ctx::SplitQtFlag, ctxStartQt );
m_CABACEstimator->getCtx() = SubCtx( Ctx::SplitHvFlag, ctxStartHv );
m_CABACEstimator->getCtx() = SubCtx( Ctx::Split12Flag, ctxStart12 );
#else
m_CABACEstimator->getCtx() = SubCtx( Ctx::BTSplitFlag, ctxStartBT );

Karsten Suehring
committed
if( cost > bestCS->cost )
{
xCheckBestMode( tempCS, bestCS, partitioner, encTestMode );
return;

Karsten Suehring
committed
}
partitioner.splitCurrArea( split, *tempCS );
m_CurrCtx++;
tempCS->getRecoBuf().fill( 0 );
AffineMVInfo tmpMVInfo;
bool isAffMVInfoSaved;
m_pcInterSearch->savePrevAffMVInfo(0, tmpMVInfo, isAffMVInfoSaved);

Karsten Suehring
committed
do
{
const auto &subCUArea = partitioner.currArea();
if( tempCS->picture->Y().contains( subCUArea.lumaPos() ) )
{
const unsigned wIdx = gp_sizeIdxInfo->idxFrom( subCUArea.lwidth () );
const unsigned hIdx = gp_sizeIdxInfo->idxFrom( subCUArea.lheight() );
CodingStructure *tempSubCS = m_pTempCS[wIdx][hIdx];
CodingStructure *bestSubCS = m_pBestCS[wIdx][hIdx];
tempCS->initSubStructure( *tempSubCS, partitioner.chType, subCUArea, false );
tempCS->initSubStructure( *bestSubCS, partitioner.chType, subCUArea, false );
LutMotionCand *tempSubMotCandLUTs = m_pTempMotLUTs[wIdx][hIdx];
LutMotionCand *bestSubMotCandLUTs = m_pBestMotLUTs[wIdx][hIdx];
if (tempCS->chType == CHANNEL_TYPE_LUMA)
{
tempCS->slice->copyMotionLUTs(tempMotCandLUTs, tempSubMotCandLUTs);
tempCS->slice->copyMotionLUTs(tempMotCandLUTs, bestSubMotCandLUTs);
}

Karsten Suehring
committed
xCompressCU( tempSubCS, bestSubCS, partitioner
, tempSubMotCandLUTs
, bestSubMotCandLUTs
);

Karsten Suehring
committed
if( bestSubCS->cost == MAX_DOUBLE )
{
CHECK( split == CU_QUAD_SPLIT, "Split decision reusing cannot skip quad split" );
tempCS->cost = MAX_DOUBLE;
m_CurrCtx--;
partitioner.exitCurrSplit();

Karsten Suehring
committed
xCheckBestMode( tempCS, bestCS, partitioner, encTestMode );
{
std::swap(tempMotCandLUTs, bestMotCandLUTs);
}

Karsten Suehring
committed
return;
}
bool keepResi = KEEP_PRED_AND_RESI_SIGNALS;
tempCS->useSubStructure( *bestSubCS, partitioner.chType, CS::getArea( *tempCS, subCUArea, partitioner.chType ), KEEP_PRED_AND_RESI_SIGNALS, true, keepResi, keepResi );
tempCS->slice->copyMotionLUTs(bestSubMotCandLUTs, tempMotCandLUTs);

Karsten Suehring
committed
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
if(currDepth < pps.getMaxCuDQPDepth())
{
tempCS->prevQP[partitioner.chType] = bestSubCS->prevQP[partitioner.chType];
}
tempSubCS->releaseIntermediateData();
bestSubCS->releaseIntermediateData();
}
} while( partitioner.nextPart( *tempCS ) );
partitioner.exitCurrSplit();
m_CurrCtx--;
// Finally, generate split-signaling bits for RD-cost check
const PartSplit implicitSplit = partitioner.getImplicitSplit( *tempCS );
{
bool enforceQT = implicitSplit == CU_QUAD_SPLIT;
#if HM_QTBT_REPRODUCE_FAST_LCTU_BUG
// LARGE CTU bug

Karsten Suehring
committed
{
unsigned minDepth = 0;
unsigned maxDepth = g_aucLog2[tempCS->sps->getCTUSize()] - g_aucLog2[tempCS->sps->getMinQTSize(slice.getSliceType(), partitioner.chType)];

Karsten Suehring
committed
if( auto ad = dynamic_cast<AdaptiveDepthPartitioner*>( &partitioner ) )
{
ad->setMaxMinDepth( minDepth, maxDepth, *tempCS );
}
if( minDepth > partitioner.currQtDepth )
{
// enforce QT
enforceQT = true;
}
}
#endif
if( !enforceQT )
{
m_CABACEstimator->resetBits();
#if JVET_M0421_SPLIT_SIG
m_CABACEstimator->split_cu_mode( split, *tempCS, partitioner );
#else

Karsten Suehring
committed
if( partitioner.canSplit( CU_QUAD_SPLIT, *tempCS ) )
{
m_CABACEstimator->split_cu_flag( split == CU_QUAD_SPLIT, *tempCS, partitioner );
}
if( split != CU_QUAD_SPLIT )
{
m_CABACEstimator->split_cu_mode_mt( split, *tempCS, partitioner );
}

Karsten Suehring
committed
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
tempCS->fracBits += m_CABACEstimator->getEstFracBits(); // split bits
}
}
tempCS->cost = m_pcRdCost->calcRdCost( tempCS->fracBits, tempCS->dist );
// Check Delta QP bits for splitted structure
xCheckDQP( *tempCS, partitioner, true );
// If the configuration being tested exceeds the maximum number of bytes for a slice / slice-segment, then
// a proper RD evaluation cannot be performed. Therefore, termination of the
// slice/slice-segment must be made prior to this CTU.
// This can be achieved by forcing the decision to be that of the rpcTempCU.
// The exception is each slice / slice-segment must have at least one CTU.
if (bestCS->cost != MAX_DOUBLE)
{
#if HEVC_TILES_WPP
const TileMap& tileMap = *tempCS->picture->tileMap;
#endif
#if HEVC_TILES_WPP || HEVC_DEPENDENT_SLICES
const uint32_t CtuAddr = CU::getCtuAddr( *bestCS->getCU( partitioner.chType ) );
#endif
const bool isEndOfSlice = slice.getSliceMode() == FIXED_NUMBER_OF_BYTES
&& ((slice.getSliceBits() + CS::getEstBits(*bestCS)) > slice.getSliceArgument() << 3)
#if HEVC_TILES_WPP
&& CtuAddr != tileMap.getCtuTsToRsAddrMap(slice.getSliceCurStartCtuTsAddr())
#endif
#if HEVC_DEPENDENT_SLICES
&& CtuAddr != tileMap.getCtuTsToRsAddrMap(slice.getSliceSegmentCurStartCtuTsAddr());
#else
;
#endif
#if HEVC_DEPENDENT_SLICES
const bool isEndOfSliceSegment = slice.getSliceSegmentMode() == FIXED_NUMBER_OF_BYTES
&& ((slice.getSliceSegmentBits() + CS::getEstBits(*bestCS)) > slice.getSliceSegmentArgument() << 3)
&& CtuAddr != tileMap.getCtuTsToRsAddrMap(slice.getSliceSegmentCurStartCtuTsAddr());
// Do not need to check slice condition for slice-segment since a slice-segment is a subset of a slice.
if (isEndOfSlice || isEndOfSliceSegment)
#else
if(isEndOfSlice)
#endif
{
bestCS->cost = MAX_DOUBLE;
}
}
// RD check for sub partitioned coding structure.

Karsten Suehring
committed
xCheckBestMode( tempCS, bestCS, partitioner, encTestMode );
if (isAffMVInfoSaved)
m_pcInterSearch->addAffMVInfo(tmpMVInfo);
if (!slice.isIntra()
&& tempCS->chType == CHANNEL_TYPE_LUMA
)
{
std::swap(tempMotCandLUTs, bestMotCandLUTs);
}
tempCS->slice->copyMotionLUTs(m_pSplitTempMotLUTs[wParIdx][hParIdx], tempMotCandLUTs);
}

Karsten Suehring
committed
tempCS->releaseIntermediateData();
tempCS->prevQP[partitioner.chType] = oldPrevQp;
}
void EncCu::xCheckRDCostIntra( CodingStructure *&tempCS, CodingStructure *&bestCS, Partitioner &partitioner, const EncTestMode& encTestMode )
{

Karsten Suehring
committed
double bestInterCost = m_modeCtrl->getBestInterCost();
double costSize2Nx2NemtFirstPass = m_modeCtrl->getEmtSize2Nx2NFirstPassCost();
bool skipSecondEmtPass = m_modeCtrl->getSkipSecondEMTPass();
const SPS &sps = *tempCS->sps;
#endif
const PPS &pps = *tempCS->pps;
#if !JVET_M0464_UNI_MTS

Karsten Suehring
committed
const CodingUnit *bestCU = bestCS->getCU( partitioner.chType );
const int maxSizeEMT = EMT_INTRA_MAX_CU_WITH_QTBT;

Karsten Suehring
committed
uint8_t considerEmtSecondPass = ( sps.getSpsNext().getUseIntraEMT() && isLuma( partitioner.chType ) && partitioner.currArea().lwidth() <= maxSizeEMT && partitioner.currArea().lheight() <= maxSizeEMT ) ? 1 : 0;

Karsten Suehring
committed
Distortion interHad = m_modeCtrl->getInterHad();

Karsten Suehring
committed
for( uint8_t emtCuFlag = 0; emtCuFlag <= considerEmtSecondPass; emtCuFlag++ )
{
//Possible early EMT tests interruptions
//2) Second EMT pass. This "if clause" is necessary because of the NSST and PDPC "for loops".
if( emtCuFlag && skipSecondEmtPass )
{
continue;
}
//3) if interHad is 0, only try further modes if some intra mode was already better than inter
if( m_pcEncCfg->getUsePbIntraFast() && !tempCS->slice->isIntra() && bestCU && CU::isInter( *bestCS->getCU( partitioner.chType ) ) && interHad == 0 )
{
continue;
}

Karsten Suehring
committed
tempCS->initStructData( encTestMode.qp, encTestMode.lossless );
CodingUnit &cu = tempCS->addCU( CS::getArea( *tempCS, tempCS->area, partitioner.chType ), partitioner.chType );
partitioner.setCUData( cu );
cu.slice = tempCS->slice;
#if HEVC_TILES_WPP
cu.tileIdx = tempCS->picture->tileMap->getTileIdxMap( tempCS->area.lumaPos() );
#endif
cu.skip = false;
cu.mmvdSkip = false;

Karsten Suehring
committed
cu.predMode = MODE_INTRA;
cu.transQuantBypass = encTestMode.lossless;
cu.chromaQpAdj = cu.transQuantBypass ? 0 : m_cuChromaQpOffsetIdxPlus1;
cu.qp = encTestMode.qp;
//cu.ipcm = false;

Karsten Suehring
committed
cu.emtFlag = emtCuFlag;

Karsten Suehring
committed
CU::addPUs( cu );
tempCS->interHad = interHad;
if( isLuma( partitioner.chType ) )
{
m_pcIntraSearch->estIntraPredLumaQT( cu, partitioner );
if (m_pcEncCfg->getUsePbIntraFast() && tempCS->dist == std::numeric_limits<Distortion>::max()
&& tempCS->interHad == 0)
{
interHad = 0;
// JEM assumes only perfect reconstructions can from now on beat the inter mode
m_modeCtrl->enforceInterHad( 0 );

Karsten Suehring
committed
continue;

Karsten Suehring
committed
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
}
if( !CS::isDualITree( *tempCS ) )
{
cu.cs->picture->getRecoBuf( cu.Y() ).copyFrom( cu.cs->getRecoBuf( COMPONENT_Y ) );
}
}
if( tempCS->area.chromaFormat != CHROMA_400 && ( partitioner.chType == CHANNEL_TYPE_CHROMA || !CS::isDualITree( *tempCS ) ) )
{
m_pcIntraSearch->estIntraPredChromaQT( cu, partitioner );
}
cu.rootCbf = false;
for( uint32_t t = 0; t < getNumberValidTBlocks( *cu.cs->pcv ); t++ )
{
cu.rootCbf |= cu.firstTU->cbf[t] != 0;
}
// Get total bits for current mode: encode CU
m_CABACEstimator->resetBits();
if( pps.getTransquantBypassEnabledFlag() )
{
m_CABACEstimator->cu_transquant_bypass_flag( cu );
}
if( !cu.cs->slice->isIntra()

Karsten Suehring
committed
{
m_CABACEstimator->cu_skip_flag ( cu );
}
m_CABACEstimator->pred_mode ( cu );
m_CABACEstimator->extend_ref_line( cu );

Karsten Suehring
committed
m_CABACEstimator->cu_pred_data ( cu );
m_CABACEstimator->pcm_data ( cu, partitioner );

Karsten Suehring
committed
// Encode Coefficients
CUCtx cuCtx;
cuCtx.isDQPCoded = true;
cuCtx.isChromaQpAdjCoded = true;
m_CABACEstimator->cu_residual( cu, partitioner, cuCtx );
tempCS->fracBits = m_CABACEstimator->getEstFracBits();
tempCS->cost = m_pcRdCost->calcRdCost(tempCS->fracBits, tempCS->dist);
xEncodeDontSplit( *tempCS, partitioner );
xCheckDQP( *tempCS, partitioner );

Karsten Suehring
committed
// we save the cost of the modes for the first EMT pass
if( !emtCuFlag ) static_cast< double& >( costSize2Nx2NemtFirstPass ) = tempCS->cost;

Karsten Suehring
committed
#if WCG_EXT
DTRACE_MODE_COST( *tempCS, m_pcRdCost->getLambda( true ) );
#else
DTRACE_MODE_COST( *tempCS, m_pcRdCost->getLambda() );
#endif
xCheckBestMode( tempCS, bestCS, partitioner, encTestMode );

Karsten Suehring
committed
//now we check whether the second pass of SIZE_2Nx2N and the whole Intra SIZE_NxN should be skipped or not
if( !emtCuFlag && !tempCS->slice->isIntra() && bestCU && bestCU->predMode != MODE_INTRA && m_pcEncCfg->getFastInterEMT() )

Karsten Suehring
committed
{
const double thEmtInterFastSkipIntra = 1.4; // Skip checking Intra if "2Nx2N using DCT2" is worse than best Inter mode
if( costSize2Nx2NemtFirstPass > thEmtInterFastSkipIntra * bestInterCost )
{
skipSecondEmtPass = true;
m_modeCtrl->setSkipSecondEMTPass( true );
break;
}
}

Karsten Suehring
committed
} //for emtCuFlag
}
void EncCu::xCheckIntraPCM(CodingStructure *&tempCS, CodingStructure *&bestCS, Partitioner &partitioner, const EncTestMode& encTestMode )
{
tempCS->initStructData( encTestMode.qp, encTestMode.lossless );
CodingUnit &cu = tempCS->addCU( CS::getArea( *tempCS, tempCS->area, partitioner.chType ), partitioner.chType );

Karsten Suehring
committed
partitioner.setCUData( cu );
cu.slice = tempCS->slice;
#if HEVC_TILES_WPP
cu.tileIdx = tempCS->picture->tileMap->getTileIdxMap( tempCS->area.lumaPos() );
#endif
cu.skip = false;
cu.mmvdSkip = false;

Karsten Suehring
committed
cu.predMode = MODE_INTRA;
cu.transQuantBypass = encTestMode.lossless;
cu.chromaQpAdj = cu.transQuantBypass ? 0 : m_cuChromaQpOffsetIdxPlus1;
cu.qp = encTestMode.qp;
cu.ipcm = true;
tempCS->addPU( CS::getArea( *tempCS, tempCS->area, partitioner.chType ), partitioner.chType );
tempCS->addTU( CS::getArea( *tempCS, tempCS->area, partitioner.chType ), partitioner.chType );

Karsten Suehring
committed
m_pcIntraSearch->IPCMSearch(*tempCS, partitioner);
m_CABACEstimator->getCtx() = m_CurrCtx->start;
m_CABACEstimator->resetBits();
if( tempCS->pps->getTransquantBypassEnabledFlag() )
{
m_CABACEstimator->cu_transquant_bypass_flag( cu );
}
if( !cu.cs->slice->isIntra()

Karsten Suehring
committed
{
m_CABACEstimator->cu_skip_flag ( cu );
}
m_CABACEstimator->pred_mode ( cu );
m_CABACEstimator->pcm_data ( cu, partitioner );

Karsten Suehring
committed
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
tempCS->fracBits = m_CABACEstimator->getEstFracBits();
tempCS->cost = m_pcRdCost->calcRdCost(tempCS->fracBits, tempCS->dist);
xEncodeDontSplit( *tempCS, partitioner );
xCheckDQP( *tempCS, partitioner );
#if WCG_EXT
DTRACE_MODE_COST( *tempCS, m_pcRdCost->getLambda( true ) );
#else
DTRACE_MODE_COST( *tempCS, m_pcRdCost->getLambda() );
#endif
xCheckBestMode( tempCS, bestCS, partitioner, encTestMode );
}
void EncCu::xCheckDQP( CodingStructure& cs, Partitioner& partitioner, bool bKeepCtx )
{
CHECK( bKeepCtx && cs.cus.size() <= 1 && partitioner.getImplicitSplit( cs ) == CU_DONT_SPLIT, "bKeepCtx should only be set in split case" );
CHECK( !bKeepCtx && cs.cus.size() > 1, "bKeepCtx should never be set for non-split case" );
if( !cs.pps->getUseDQP() )
{
return;
}

Christian Helmrich
committed
if (CS::isDualITree(cs) && isChroma(partitioner.chType))
{
return;
}

Karsten Suehring
committed
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
if( bKeepCtx && partitioner.currDepth != cs.pps->getMaxCuDQPDepth() )
{
return;
}
if( !bKeepCtx && partitioner.currDepth > cs.pps->getMaxCuDQPDepth() )
{
return;
}
CodingUnit* cuFirst = cs.getCU( partitioner.chType );
CHECK( !cuFirst, "No CU available" );
bool hasResidual = false;
for( const auto &cu : cs.cus )
{
if( cu->rootCbf )
{
hasResidual = true;
break;
}
}
int predQP = CU::predictQP( *cuFirst, cs.prevQP[partitioner.chType] );
if( hasResidual )
{
TempCtx ctxTemp( m_CtxCache );
if( !bKeepCtx ) ctxTemp = SubCtx( Ctx::DeltaQP, m_CABACEstimator->getCtx() );
m_CABACEstimator->resetBits();
m_CABACEstimator->cu_qp_delta( *cuFirst, predQP, cuFirst->qp );
cs.fracBits += m_CABACEstimator->getEstFracBits(); // dQP bits
cs.cost = m_pcRdCost->calcRdCost(cs.fracBits, cs.dist);
if( !bKeepCtx ) m_CABACEstimator->getCtx() = SubCtx( Ctx::DeltaQP, ctxTemp );
// NOTE: reset QPs for CUs without residuals up to first coded CU
for( const auto &cu : cs.cus )
{
if( cu->rootCbf )
{
break;
}
cu->qp = predQP;
}
}
else
{
// No residuals: reset CU QP to predicted value
for( const auto &cu : cs.cus )
{
cu->qp = predQP;
}
}
}
void EncCu::xFillPCMBuffer( CodingUnit &cu )
{
const ChromaFormat format = cu.chromaFormat;
const uint32_t numberValidComponents = getNumberValidComponents(format);
for( auto &tu : CU::traverseTUs( cu ) )
{
for( uint32_t ch = 0; ch < numberValidComponents; ch++ )
{
const ComponentID compID = ComponentID( ch );
const CompArea &compArea = tu.blocks[ compID ];
const CPelBuf source = tu.cs->getOrgBuf( compArea );
PelBuf destination = tu.getPcmbuf( compID );
destination.copyFrom( source );
}
}
}
void EncCu::xCheckRDCostMerge2Nx2N( CodingStructure *&tempCS, CodingStructure *&bestCS, Partitioner &partitioner, const EncTestMode& encTestMode )
{
const Slice &slice = *tempCS->slice;
CHECK( slice.getSliceType() == I_SLICE, "Merge modes not available for I-slices" );
tempCS->initStructData( encTestMode.qp, encTestMode.lossless );
MergeCtx mergeCtx;
const SPS &sps = *tempCS->sps;
if( sps.getSpsNext().getUseSubPuMvp() )
{
Size bufSize = g_miScaling.scale( tempCS->area.lumaSize() );
mergeCtx.subPuMvpMiBuf = MotionBuf( m_SubPuMiBuf, bufSize );
}
setMergeBestSATDCost( MAX_DOUBLE );

Karsten Suehring
committed
{
// first get merge candidates
CodingUnit cu( tempCS->area );
cu.cs = tempCS;
cu.predMode = MODE_INTER;
cu.slice = tempCS->slice;
#if HEVC_TILES_WPP
cu.tileIdx = tempCS->picture->tileMap->getTileIdxMap(tempCS->area.lumaPos());
#endif
PredictionUnit pu( tempCS->area );
pu.cu = &cu;
pu.cs = tempCS;
PU::getInterMergeCandidates(pu, mergeCtx
, 0
);
PU::restrictBiPredMergeCands(pu, mergeCtx);
PU::getInterMMVDMergeCandidates(pu, mergeCtx);

Karsten Suehring
committed
}
bool candHasNoResidual[MRG_MAX_NUM_CANDS + MMVD_ADD_NUM];
for (uint32_t ui = 0; ui < MRG_MAX_NUM_CANDS + MMVD_ADD_NUM; ui++)
{
candHasNoResidual[ui] = false;
}

Karsten Suehring
committed
bool bestIsSkip = false;
bool bestIsMMVDSkip = true;
PelUnitBuf acMergeBuffer[MRG_MAX_NUM_CANDS];
PelUnitBuf acMergeRealBuffer[MMVD_MRG_MAX_RD_BUF_NUM];
PelUnitBuf * acMergeTempBuffer[MMVD_MRG_MAX_RD_NUM];
PelUnitBuf * singleMergeTempBuffer;
int insertPos;
unsigned uiNumMrgSATDCand = mergeCtx.numValidMergeCand + MMVD_ADD_NUM;
static_vector<unsigned, MRG_MAX_NUM_CANDS + MMVD_ADD_NUM> RdModeList;
bool mrgTempBufSet = false;
for (unsigned i = 0; i < MRG_MAX_NUM_CANDS + MMVD_ADD_NUM; i++)
{
RdModeList.push_back(i);
}

Karsten Suehring
committed
const UnitArea localUnitArea(tempCS->area.chromaFormat, Area(0, 0, tempCS->area.Y().width, tempCS->area.Y().height));
for (unsigned i = 0; i < MMVD_MRG_MAX_RD_BUF_NUM; i++)
{
acMergeRealBuffer[i] = m_acMergeBuffer[i].getBuf(localUnitArea);
if (i < MMVD_MRG_MAX_RD_NUM)
{
acMergeTempBuffer[i] = acMergeRealBuffer + i;
}
else
{
singleMergeTempBuffer = acMergeRealBuffer + i;
}
}
static_vector<unsigned, MRG_MAX_NUM_CANDS + MMVD_ADD_NUM> RdModeList2; // store the Intra mode for Intrainter
RdModeList2.clear();
bool isIntrainterEnabled = sps.getSpsNext().getUseMHIntra();
if (bestCS->area.lwidth() * bestCS->area.lheight() < 64 || bestCS->area.lwidth() >= MAX_CU_SIZE || bestCS->area.lheight() >= MAX_CU_SIZE)
{
isIntrainterEnabled = false;
}
bool isTestSkipMerge[MRG_MAX_NUM_CANDS]; // record if the merge candidate has tried skip mode
for (uint32_t idx = 0; idx < MRG_MAX_NUM_CANDS; idx++)
{
isTestSkipMerge[idx] = false;
}
if( m_pcEncCfg->getUseFastMerge() || isIntrainterEnabled)

Karsten Suehring
committed
{
uiNumMrgSATDCand = NUM_MRG_SATD_CAND;
if (isIntrainterEnabled)
{
uiNumMrgSATDCand += 1;
}

Karsten Suehring
committed
bestIsSkip = false;
if( auto blkCache = dynamic_cast< CacheBlkInfoCtrl* >( m_modeCtrl ) )
{
if (slice.getSPS()->getSpsNext().getCPRMode())
{
ComprCUCtx cuECtx = m_modeCtrl->getComprCUCtx();
bestIsSkip = blkCache->isSkip(tempCS->area) && cuECtx.bestCU;
}
else

Karsten Suehring
committed
bestIsSkip = blkCache->isSkip( tempCS->area );
bestIsMMVDSkip = blkCache->isMMVDSkip(tempCS->area);

Karsten Suehring
committed
}
if (isIntrainterEnabled) // always perform low complexity check
{
bestIsSkip = false;
}
static_vector<double, MRG_MAX_NUM_CANDS + MMVD_ADD_NUM> candCostList;

Karsten Suehring
committed
// 1. Pass: get SATD-cost for selected candidates and reduce their count
if( !bestIsSkip )
{
RdModeList.clear();
mrgTempBufSet = true;
const double sqrtLambdaForFirstPass = m_pcRdCost->getMotionLambda( encTestMode.lossless );
CodingUnit &cu = tempCS->addCU( tempCS->area, partitioner.chType );
const double sqrtLambdaForFirstPassIntra = m_pcRdCost->getMotionLambda(cu.transQuantBypass) / double(1 << SCALE_BITS);

Karsten Suehring
committed
partitioner.setCUData( cu );
cu.slice = tempCS->slice;
#if HEVC_TILES_WPP
cu.tileIdx = tempCS->picture->tileMap->getTileIdxMap( tempCS->area.lumaPos() );
#endif
cu.skip = false;
cu.mmvdSkip = false;
cu.triangle = false;

Karsten Suehring
committed
//cu.affine
cu.predMode = MODE_INTER;
//cu.LICFlag
cu.transQuantBypass = encTestMode.lossless;
cu.chromaQpAdj = cu.transQuantBypass ? 0 : m_cuChromaQpOffsetIdxPlus1;
cu.qp = encTestMode.qp;
//cu.emtFlag is set below
PredictionUnit &pu = tempCS->addPU( cu, partitioner.chType );
DistParam distParam;
const bool bUseHadamard= !encTestMode.lossless;
m_pcRdCost->setDistParam (distParam, tempCS->getOrgBuf().Y(), m_acMergeBuffer[0].Y(), sps.getBitDepth (CHANNEL_TYPE_LUMA), COMPONENT_Y, bUseHadamard);
const UnitArea localUnitArea( tempCS->area.chromaFormat, Area( 0, 0, tempCS->area.Y().width, tempCS->area.Y().height) );
uint32_t cprCand = 0;
uint32_t numValidMv = mergeCtx.numValidMergeCand;

Karsten Suehring
committed
for( uint32_t uiMergeCand = 0; uiMergeCand < mergeCtx.numValidMergeCand; uiMergeCand++ )
{
if ((mergeCtx.interDirNeighbours[uiMergeCand] == 1 || mergeCtx.interDirNeighbours[uiMergeCand] == 3) && tempCS->slice->getRefPic(REF_PIC_LIST_0, mergeCtx.mvFieldNeighbours[uiMergeCand << 1].refIdx)->getPOC() == tempCS->slice->getPOC())
{
cprCand++;
numValidMv--;
continue;
}

Karsten Suehring
committed
mergeCtx.setMergeInfo( pu, uiMergeCand );
PU::spanMotionInfo( pu, mergeCtx );
distParam.cur = singleMergeTempBuffer->Y();
m_pcInterSearch->motionCompensation(pu, *singleMergeTempBuffer);
acMergeBuffer[uiMergeCand] = m_acRealMergeBuffer[uiMergeCand].getBuf(localUnitArea);
acMergeBuffer[uiMergeCand].copyFrom(*singleMergeTempBuffer);

Karsten Suehring
committed
if( mergeCtx.interDirNeighbours[uiMergeCand] == 3 && mergeCtx.mrgTypeNeighbours[uiMergeCand] == MRG_TYPE_DEFAULT_N )
{
mergeCtx.mvFieldNeighbours[2*uiMergeCand].mv = pu.mv[0];
mergeCtx.mvFieldNeighbours[2*uiMergeCand+1].mv = pu.mv[1];
}
Distortion uiSad = distParam.distFunc(distParam);
uint32_t uiBitsCand = uiMergeCand + 1;
if( uiMergeCand == tempCS->slice->getMaxNumMergeCand() - 1 )
{
uiBitsCand--;
}
uiBitsCand++; // for mmvd_flag

Karsten Suehring
committed
double cost = (double)uiSad + (double)uiBitsCand * sqrtLambdaForFirstPass;
insertPos = -1;
updateDoubleCandList(uiMergeCand, cost, RdModeList, candCostList, RdModeList2, (uint32_t)NUM_LUMA_MODE, uiNumMrgSATDCand, &insertPos);
if (insertPos != -1)
{
if (insertPos == RdModeList.size() - 1)
{
swap(singleMergeTempBuffer, acMergeTempBuffer[insertPos]);
}
else
{
for (uint32_t i = uint32_t(RdModeList.size()) - 1; i > insertPos; i--)
{
swap(acMergeTempBuffer[i - 1], acMergeTempBuffer[i]);
}
swap(singleMergeTempBuffer, acMergeTempBuffer[insertPos]);
}
}
CHECK(std::min(uiMergeCand + 1 - cprCand, uiNumMrgSATDCand) != RdModeList.size(), "");

Karsten Suehring
committed
}
if (numValidMv < uiNumMrgSATDCand)
uiNumMrgSATDCand = numValidMv;
if (numValidMv == 0)
return;
if (isIntrainterEnabled)
{
int numTestIntraMode = 4;
// prepare for Intra bits calculation
const TempCtx ctxStart(m_CtxCache, m_CABACEstimator->getCtx());
const TempCtx ctxStartIntraMode(m_CtxCache, SubCtx(Ctx::MHIntraPredMode, m_CABACEstimator->getCtx()));
// for Intrainter fast, recored the best intra mode during the first round for mrege 0
int bestMHIntraMode = -1;
double bestMHIntraCost = MAX_DOUBLE;
// save the to-be-tested merge candidates
uint32_t MHIntraMergeCand[NUM_MRG_SATD_CAND];
for (uint32_t mergeCnt = 0; mergeCnt < std::min(NUM_MRG_SATD_CAND, (const int) uiNumMrgSATDCand); mergeCnt++)
{
MHIntraMergeCand[mergeCnt] = RdModeList[mergeCnt];
}
for (uint32_t mergeCnt = 0; mergeCnt < std::min( std::min(NUM_MRG_SATD_CAND, (const int)uiNumMrgSATDCand), 4); mergeCnt++)
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
{
uint32_t mergeCand = MHIntraMergeCand[mergeCnt];
acMergeBuffer[mergeCand] = m_acRealMergeBuffer[mergeCand].getBuf(localUnitArea);
// estimate merge bits
uint32_t bitsCand = mergeCand + 1;
if (mergeCand == pu.cs->slice->getMaxNumMergeCand() - 1)
{
bitsCand--;
}
// first round
for (uint32_t intraCnt = 0; intraCnt < numTestIntraMode; intraCnt++)
{
pu.intraDir[0] = (intraCnt < 2) ? intraCnt : ((intraCnt == 2) ? HOR_IDX : VER_IDX);
// fast 2
if (mergeCnt > 0 && bestMHIntraMode != pu.intraDir[0])
{
continue;
}
int narrowCase = PU::getNarrowShape(pu.lwidth(), pu.lheight());
if (narrowCase == 1 && pu.intraDir[0] == HOR_IDX)
{
continue;
}
if (narrowCase == 2 && pu.intraDir[0] == VER_IDX)
{
continue;
}
// generate intrainter Y prediction
if (mergeCnt == 0)
{
bool isUseFilter = IntraPrediction::useFilteredIntraRefSamples(COMPONENT_Y, pu, true, pu);
m_pcIntraSearch->initIntraPatternChType(*pu.cu, pu.Y(), isUseFilter);
m_pcIntraSearch->predIntraAng(COMPONENT_Y, pu.cs->getPredBuf(pu).Y(), pu, isUseFilter);
m_pcIntraSearch->switchBuffer(pu, COMPONENT_Y, pu.cs->getPredBuf(pu).Y(), m_pcIntraSearch->getPredictorPtr2(COMPONENT_Y, intraCnt));
}
pu.cs->getPredBuf(pu).copyFrom(acMergeBuffer[mergeCand]);
m_pcIntraSearch->geneWeightedPred(COMPONENT_Y, pu.cs->getPredBuf(pu).Y(), pu, m_pcIntraSearch->getPredictorPtr2(COMPONENT_Y, intraCnt));
// calculate cost
distParam.cur = pu.cs->getPredBuf(pu).Y();
Distortion sadValue = distParam.distFunc(distParam);
m_CABACEstimator->getCtx() = SubCtx(Ctx::MHIntraPredMode, ctxStartIntraMode);
uint64_t fracModeBits = m_pcIntraSearch->xFracModeBitsIntra(pu, pu.intraDir[0], CHANNEL_TYPE_LUMA);
double cost = (double)sadValue + (double)(bitsCand + 1) * sqrtLambdaForFirstPass + (double)fracModeBits * sqrtLambdaForFirstPassIntra;
insertPos = -1;
updateDoubleCandList(mergeCand + MRG_MAX_NUM_CANDS + MMVD_ADD_NUM, cost, RdModeList, candCostList, RdModeList2, pu.intraDir[0], uiNumMrgSATDCand, &insertPos);
if (insertPos != -1)
{
for (int i = int(RdModeList.size()) - 1; i > insertPos; i--)
{
swap(acMergeTempBuffer[i - 1], acMergeTempBuffer[i]);
}
swap(singleMergeTempBuffer, acMergeTempBuffer[insertPos]);
}
// fast 2
if (mergeCnt == 0 && cost < bestMHIntraCost)
{
bestMHIntraMode = pu.intraDir[0];
bestMHIntraCost = cost;
}
}
}
m_CABACEstimator->getCtx() = ctxStart;
}
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
cu.mmvdSkip = true;
int tempNum = 0;
tempNum = MMVD_ADD_NUM;
bool allowDirection[4] = { true, true, true, true };
for (uint32_t mergeCand = mergeCtx.numValidMergeCand; mergeCand < mergeCtx.numValidMergeCand + tempNum; mergeCand++)
{
const int mmvdMergeCand = mergeCand - mergeCtx.numValidMergeCand;
int bitsBaseIdx = 0;
int bitsRefineStep = 0;
int bitsDirection = 2;
int bitsCand = 0;
int baseIdx;
int refineStep;
int direction;
baseIdx = mmvdMergeCand / MMVD_MAX_REFINE_NUM;
refineStep = (mmvdMergeCand - (baseIdx * MMVD_MAX_REFINE_NUM)) / 4;
direction = (mmvdMergeCand - baseIdx * MMVD_MAX_REFINE_NUM - refineStep * 4) % 4;
if (refineStep == 0)
{
allowDirection[direction] = true;
}
if (allowDirection[direction] == false)
{
continue;
}
bitsBaseIdx = baseIdx + 1;
if (baseIdx == MMVD_BASE_MV_NUM - 1)
{
bitsBaseIdx--;
}
bitsRefineStep = refineStep + 1;
if (refineStep == MMVD_REFINE_STEP - 1)
{
bitsRefineStep--;
}
bitsCand = bitsBaseIdx + bitsRefineStep + bitsDirection;
bitsCand++; // for mmvd_flag
mergeCtx.setMmvdMergeCandiInfo(pu, mmvdMergeCand);
PU::spanMotionInfo(pu, mergeCtx);
distParam.cur = singleMergeTempBuffer->Y();
m_pcInterSearch->motionCompensation(pu, *singleMergeTempBuffer);
Distortion uiSad = distParam.distFunc(distParam);
double cost = (double)uiSad + (double)bitsCand * sqrtLambdaForFirstPass;
allowDirection[direction] = cost > 1.3 * candCostList[0] ? 0 : 1;
insertPos = -1;
updateDoubleCandList(mergeCand, cost, RdModeList, candCostList, RdModeList2, (uint32_t)NUM_LUMA_MODE, uiNumMrgSATDCand, &insertPos);
if (insertPos != -1)
{
for (int i = int(RdModeList.size()) - 1; i > insertPos; i--)
{
swap(acMergeTempBuffer[i - 1], acMergeTempBuffer[i]);
}
swap(singleMergeTempBuffer, acMergeTempBuffer[insertPos]);
}
}

Karsten Suehring
committed
// Try to limit number of candidates using SATD-costs
for( uint32_t i = 1; i < uiNumMrgSATDCand; i++ )
{
if( candCostList[i] > MRG_FAST_RATIO * candCostList[0] )
{
uiNumMrgSATDCand = i;
break;
}
}
setMergeBestSATDCost( candCostList[0] );
if (isIntrainterEnabled)
{
for (uint32_t mergeCnt = 0; mergeCnt < uiNumMrgSATDCand; mergeCnt++)
{
if (RdModeList[mergeCnt] >= (MRG_MAX_NUM_CANDS + MMVD_ADD_NUM))
{
pu.intraDir[0] = RdModeList2[mergeCnt];
pu.intraDir[1] = DM_CHROMA_IDX;
uint32_t bufIdx = (pu.intraDir[0] > 1) ? (pu.intraDir[0] == HOR_IDX ? 2 : 3) : pu.intraDir[0];
bool isUseFilter = IntraPrediction::useFilteredIntraRefSamples(COMPONENT_Cb, pu, true, pu);
m_pcIntraSearch->initIntraPatternChType(*pu.cu, pu.Cb(), isUseFilter);
m_pcIntraSearch->predIntraAng(COMPONENT_Cb, pu.cs->getPredBuf(pu).Cb(), pu, isUseFilter);
m_pcIntraSearch->switchBuffer(pu, COMPONENT_Cb, pu.cs->getPredBuf(pu).Cb(), m_pcIntraSearch->getPredictorPtr2(COMPONENT_Cb, bufIdx));
isUseFilter = IntraPrediction::useFilteredIntraRefSamples(COMPONENT_Cr, pu, true, pu);
m_pcIntraSearch->initIntraPatternChType(*pu.cu, pu.Cr(), isUseFilter);
m_pcIntraSearch->predIntraAng(COMPONENT_Cr, pu.cs->getPredBuf(pu).Cr(), pu, isUseFilter);
m_pcIntraSearch->switchBuffer(pu, COMPONENT_Cr, pu.cs->getPredBuf(pu).Cr(), m_pcIntraSearch->getPredictorPtr2(COMPONENT_Cr, bufIdx));
}
}

Karsten Suehring
committed
tempCS->initStructData( encTestMode.qp, encTestMode.lossless );
}
else
{
if (bestIsMMVDSkip)
{
uiNumMrgSATDCand = mergeCtx.numValidMergeCand + MMVD_ADD_NUM;
}
else
{
uiNumMrgSATDCand = mergeCtx.numValidMergeCand;
}

Karsten Suehring
committed
}
}
const uint32_t iteration = encTestMode.lossless ? 1 : 2;
// 2. Pass: check candidates using full RD test
for( uint32_t uiNoResidualPass = 0; uiNoResidualPass < iteration; uiNoResidualPass++ )
{
for( uint32_t uiMrgHADIdx = 0; uiMrgHADIdx < uiNumMrgSATDCand; uiMrgHADIdx++ )
{
uint32_t uiMergeCand = RdModeList[uiMrgHADIdx];
if(uiMergeCand < mergeCtx.numValidMergeCand)
if ((mergeCtx.interDirNeighbours[uiMergeCand] == 1 || mergeCtx.interDirNeighbours[uiMergeCand] == 3) && tempCS->slice->getRefPic(REF_PIC_LIST_0, mergeCtx.mvFieldNeighbours[uiMergeCand << 1].refIdx)->getPOC() == tempCS->slice->getPOC())
{
continue;
}
if (uiNoResidualPass != 0 && uiMergeCand >= (MRG_MAX_NUM_CANDS + MMVD_ADD_NUM)) // intrainter does not support skip mode
{
uiMergeCand -= (MRG_MAX_NUM_CANDS + MMVD_ADD_NUM); // for skip, map back to normal merge candidate idx and try RDO
if (isTestSkipMerge[uiMergeCand])
{