Newer
Older

Karsten Suehring
committed
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
}
}
else if( m_pcEncCfg->getLambdaFromQPEnable() )
{
dQPFactor = 0.57*dQPFactor;
}
double dLambda = dQPFactor*pow( 2.0, qp_temp/3.0 );
int depth = slice->getDepth();
if( !m_pcEncCfg->getLambdaFromQPEnable() && depth>0 )
{
int qp_temp_slice = slice->getSliceQp() + bitdepth_luma_qp_scale - SHIFT_QP; // avoid lambda over adjustment, use slice_qp here
dLambda *= Clip3( 2.00, 4.00, (qp_temp_slice / 6.0) ); // (j == B_SLICE && p_cur_frm->layer != 0 )
}
if( !m_pcEncCfg->getUseHADME() && slice->getSliceType( ) != I_SLICE )
{
dLambda *= 0.95;
}
const int temporalId = m_pcEncCfg->getGOPEntry( m_pcSliceEncoder->getGopId() ).m_temporalId;
const std::vector<double> &intraLambdaModifiers = m_pcEncCfg->getIntraLambdaModifier();
double lambdaModifier;
if( slice->getSliceType( ) != I_SLICE || intraLambdaModifiers.empty())
{
lambdaModifier = m_pcEncCfg->getLambdaModifier(temporalId);
}
else
{
lambdaModifier = intraLambdaModifiers[(temporalId < intraLambdaModifiers.size()) ? temporalId : (intraLambdaModifiers.size() - 1)];
}
dLambda *= lambdaModifier;
int qpBDoffset = slice->getSPS()->getQpBDOffset(CHANNEL_TYPE_LUMA);

Christian Helmrich
committed
int iQP = Clip3(-qpBDoffset, MAX_QP, (int)floor((double)dQP + 0.5));

Karsten Suehring
committed
m_pcSliceEncoder->setUpLambda(slice, dLambda, iQP);
#else

Christian Helmrich
committed
int iQP = dQP;

Karsten Suehring
committed
const double oldQP = (double)slice->getSliceQpBase();

Christian Helmrich
committed
#if ENABLE_QPA_SUB_CTU
const double oldLambda = (m_pcEncCfg->getUsePerceptQPA() && !m_pcEncCfg->getUseRateCtrl() && slice->getPPS()->getUseDQP()) ? slice->getLambdas()[0] :
m_pcSliceEncoder->calculateLambda (slice, m_pcSliceEncoder->getGopId(), slice->getDepth(), oldQP, oldQP, iQP);
#else

Karsten Suehring
committed
const double oldLambda = m_pcSliceEncoder->calculateLambda (slice, m_pcSliceEncoder->getGopId(), slice->getDepth(), oldQP, oldQP, iQP);

Christian Helmrich
committed
#endif
const double newLambda = oldLambda * pow (2.0, ((double)dQP - oldQP) / 3.0);

Karsten Suehring
committed
#if RDOQ_CHROMA_LAMBDA
const double chromaLambda = newLambda / m_pcRdCost->getChromaWeight();
const double lambdaArray[MAX_NUM_COMPONENT] = {newLambda, chromaLambda, chromaLambda};
m_pcTrQuant->setLambdas (lambdaArray);
#else
m_pcTrQuant->setLambda (newLambda);
#endif
if (updateRdCostLambda)
{
m_pcRdCost->setLambda (newLambda, slice->getSPS()->getBitDepths());
}

Karsten Suehring
committed
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
#endif
}
#endif
#if ENABLE_SPLIT_PARALLELISM
//#undef DEBUG_PARALLEL_TIMINGS
//#define DEBUG_PARALLEL_TIMINGS 1
void EncCu::xCompressCUParallel( CodingStructure *&tempCS, CodingStructure *&bestCS, Partitioner &partitioner )
{
const unsigned wIdx = gp_sizeIdxInfo->idxFrom( partitioner.currArea().lwidth() );
const unsigned hIdx = gp_sizeIdxInfo->idxFrom( partitioner.currArea().lheight() );
Picture* picture = tempCS->picture;
int numJobs = m_modeCtrl->getNumParallelJobs( *bestCS, partitioner );
bool jobUsed [NUM_RESERVERD_SPLIT_JOBS];
std::fill( jobUsed, jobUsed + NUM_RESERVERD_SPLIT_JOBS, false );
const UnitArea currArea = CS::getArea( *tempCS, partitioner.currArea(), partitioner.chType );
#if ENABLE_WPP_PARALLELISM
const int wppTId = picture->scheduler.getWppThreadId();
#endif
const bool doParallel = !m_pcEncCfg->getForceSingleSplitThread();
#if _MSC_VER && ENABLE_WPP_PARALLELISM
#pragma omp parallel for schedule(dynamic,1) num_threads(NUM_SPLIT_THREADS_IF_MSVC) if(doParallel)
#else
omp_set_num_threads( m_pcEncCfg->getNumSplitThreads() );
#pragma omp parallel for schedule(dynamic,1) if(doParallel)
#endif
for( int jId = 1; jId <= numJobs; jId++ )
{
// thread start
#if ENABLE_WPP_PARALLELISM
picture->scheduler.setWppThreadId( wppTId );
#endif
picture->scheduler.setSplitThreadId();
picture->scheduler.setSplitJobId( jId );
Partitioner* jobPartitioner = PartitionerFactory::get( *tempCS->slice );
EncCu* jobCuEnc = m_pcEncLib->getCuEncoder( picture->scheduler.getSplitDataId( jId ) );
auto* jobBlkCache = dynamic_cast<CacheBlkInfoCtrl*>( jobCuEnc->m_modeCtrl );
jobPartitioner->copyState( partitioner );
jobCuEnc ->copyState( this, *jobPartitioner, currArea, true );
if( jobBlkCache )
{
jobBlkCache->tick();
}
CodingStructure *&jobBest = jobCuEnc->m_pBestCS[wIdx][hIdx];
CodingStructure *&jobTemp = jobCuEnc->m_pTempCS[wIdx][hIdx];
jobUsed[jId] = true;
jobCuEnc->xCompressCU( jobTemp, jobBest, *jobPartitioner );
delete jobPartitioner;
picture->scheduler.setSplitJobId( 0 );
// thread stop
}
picture->scheduler.setSplitThreadId( 0 );
int bestJId = 0;
double bestCost = bestCS->cost;
for( int jId = 1; jId <= numJobs; jId++ )
{
EncCu* jobCuEnc = m_pcEncLib->getCuEncoder( picture->scheduler.getSplitDataId( jId ) );
if( jobUsed[jId] && jobCuEnc->m_pBestCS[wIdx][hIdx]->cost < bestCost )
{
bestCost = jobCuEnc->m_pBestCS[wIdx][hIdx]->cost;
bestJId = jId;
}
}
if( bestJId > 0 )
{
copyState( m_pcEncLib->getCuEncoder( picture->scheduler.getSplitDataId( bestJId ) ), partitioner, currArea, false );
m_CurrCtx->best = m_CABACEstimator->getCtx();
tempCS = m_pTempCS[wIdx][hIdx];
bestCS = m_pBestCS[wIdx][hIdx];
}
const int bitDepthY = tempCS->sps->getBitDepth( CH_L );
const UnitArea clipdArea = clipArea( currArea, *picture );
CHECK( calcCheckSum( picture->getRecoBuf( clipdArea.Y() ), bitDepthY ) != calcCheckSum( bestCS->getRecoBuf( clipdArea.Y() ), bitDepthY ), "Data copied incorrectly!" );
picture->finishParallelPart( currArea );
if( auto *blkCache = dynamic_cast<CacheBlkInfoCtrl*>( m_modeCtrl ) )
{
for( int jId = 1; jId <= numJobs; jId++ )
{
if( !jobUsed[jId] || jId == bestJId ) continue;
auto *jobBlkCache = dynamic_cast<CacheBlkInfoCtrl*>( m_pcEncLib->getCuEncoder( picture->scheduler.getSplitDataId( jId ) )->m_modeCtrl );
CHECK( !jobBlkCache, "If own mode controller has blk info cache capability so should all other mode controllers!" );
blkCache->CacheBlkInfoCtrl::copyState( *jobBlkCache, partitioner.currArea() );
}
blkCache->tick();
}
}
void EncCu::copyState( EncCu* other, Partitioner& partitioner, const UnitArea& currArea, const bool isDist )
{
const unsigned wIdx = gp_sizeIdxInfo->idxFrom( partitioner.currArea().lwidth () );
const unsigned hIdx = gp_sizeIdxInfo->idxFrom( partitioner.currArea().lheight() );
if( isDist )
{
other->m_pBestCS[wIdx][hIdx]->initSubStructure( *m_pBestCS[wIdx][hIdx], partitioner.chType, partitioner.currArea(), false );
other->m_pTempCS[wIdx][hIdx]->initSubStructure( *m_pTempCS[wIdx][hIdx], partitioner.chType, partitioner.currArea(), false );
}
else
{
CodingStructure* dst = m_pBestCS[wIdx][hIdx];
const CodingStructure *src = other->m_pBestCS[wIdx][hIdx];
bool keepResi = KEEP_PRED_AND_RESI_SIGNALS;
dst->useSubStructure( *src, partitioner.chType, currArea, KEEP_PRED_AND_RESI_SIGNALS, true, keepResi, keepResi );
dst->cost = src->cost;
dst->dist = src->dist;
dst->fracBits = src->fracBits;
dst->features = src->features;
}
if( isDist )
{
m_CurrCtx = m_CtxBuffer.data();
}
m_pcInterSearch->copyState( *other->m_pcInterSearch );
m_modeCtrl ->copyState( *other->m_modeCtrl, partitioner.currArea() );
m_pcRdCost ->copyState( *other->m_pcRdCost );
m_pcTrQuant ->copyState( *other->m_pcTrQuant );
m_CABACEstimator->getCtx() = other->m_CABACEstimator->getCtx();
}
#endif
void EncCu::xCheckModeSplit(CodingStructure *&tempCS, CodingStructure *&bestCS, Partitioner &partitioner, const EncTestMode& encTestMode
, LutMotionCand* &tempMotCandLUTs
, LutMotionCand* &bestMotCandLUTs
, UnitArea parArea
)

Karsten Suehring
committed
{
const int qp = encTestMode.qp;
const PPS &pps = *tempCS->pps;
const Slice &slice = *tempCS->slice;
const bool bIsLosslessMode = false; // False at this level. Next level down may set it to true.
const int oldPrevQp = tempCS->prevQP[partitioner.chType];
const uint32_t currDepth = partitioner.currDepth;
const unsigned wParIdx = gp_sizeIdxInfo->idxFrom(parArea.lwidth());
const unsigned hParIdx = gp_sizeIdxInfo->idxFrom(parArea.lheight());
tempCS->slice->copyMotionLUTs(tempMotCandLUTs, m_pSplitTempMotLUTs[wParIdx][hParIdx]);

Karsten Suehring
committed
const PartSplit split = getPartSplit( encTestMode );
CHECK( split == CU_DONT_SPLIT, "No proper split provided!" );
tempCS->initStructData( qp, bIsLosslessMode );
m_CABACEstimator->getCtx() = m_CurrCtx->start;
const TempCtx ctxStartSP( m_CtxCache, SubCtx( Ctx::SplitFlag, m_CABACEstimator->getCtx() ) );
#if JVET_M0421_SPLIT_SIG
const TempCtx ctxStartQt( m_CtxCache, SubCtx( Ctx::SplitQtFlag, m_CABACEstimator->getCtx() ) );
const TempCtx ctxStartHv( m_CtxCache, SubCtx( Ctx::SplitHvFlag, m_CABACEstimator->getCtx() ) );
const TempCtx ctxStart12( m_CtxCache, SubCtx( Ctx::Split12Flag, m_CABACEstimator->getCtx() ) );
#else
const TempCtx ctxStartBT( m_CtxCache, SubCtx( Ctx::BTSplitFlag, m_CABACEstimator->getCtx() ) );

Karsten Suehring
committed

Karsten Suehring
committed
#if JVET_M0421_SPLIT_SIG
m_CABACEstimator->split_cu_mode( split, *tempCS, partitioner );
#else
if( partitioner.getImplicitSplit( *tempCS ) != CU_QUAD_SPLIT )
{
if( partitioner.canSplit( CU_QUAD_SPLIT, *tempCS ) )

Karsten Suehring
committed
{
m_CABACEstimator->split_cu_flag( split == CU_QUAD_SPLIT, *tempCS, partitioner );
}
if( split != CU_QUAD_SPLIT )
{
m_CABACEstimator->split_cu_mode_mt( split, *tempCS, partitioner );

Karsten Suehring
committed
}

Karsten Suehring
committed
const double factor = ( tempCS->currQP[partitioner.chType] > 30 ? 1.1 : 1.075 );
#if JVET_M0428_ENC_DB_OPT
tempCS->useDbCost = m_pcEncCfg->getUseEncDbOpt();
if (!tempCS->useDbCost)
CHECK(bestCS->costDbOffset != 0, "error");
const double cost = m_pcRdCost->calcRdCost( uint64_t( m_CABACEstimator->getEstFracBits() + ( ( bestCS->fracBits ) / factor ) ), Distortion( bestCS->dist / factor ) ) + bestCS->costDbOffset / factor;
#else
const double cost = m_pcRdCost->calcRdCost( uint64_t( m_CABACEstimator->getEstFracBits() + ( ( bestCS->fracBits ) / factor ) ), Distortion( bestCS->dist / factor ) );

Karsten Suehring
committed
m_CABACEstimator->getCtx() = SubCtx( Ctx::SplitFlag, ctxStartSP );
#if JVET_M0421_SPLIT_SIG
m_CABACEstimator->getCtx() = SubCtx( Ctx::SplitQtFlag, ctxStartQt );
m_CABACEstimator->getCtx() = SubCtx( Ctx::SplitHvFlag, ctxStartHv );
m_CABACEstimator->getCtx() = SubCtx( Ctx::Split12Flag, ctxStart12 );
#else
m_CABACEstimator->getCtx() = SubCtx( Ctx::BTSplitFlag, ctxStartBT );

Karsten Suehring
committed
if (cost > bestCS->cost + bestCS->costDbOffset

Christian Helmrich
committed
#if ENABLE_QPA_SUB_CTU
|| (m_pcEncCfg->getUsePerceptQPA() && !m_pcEncCfg->getUseRateCtrl() && pps.getUseDQP() && (pps.getMaxCuDQPDepth() > 0) && (split == CU_HORZ_SPLIT || split == CU_VERT_SPLIT) &&
(partitioner.currArea().lwidth() == tempCS->pcv->maxCUWidth) && (partitioner.currArea().lheight() == tempCS->pcv->maxCUHeight)) // force quad-split or no split at CTU level

Christian Helmrich
committed
)
{
xCheckBestMode( tempCS, bestCS, partitioner, encTestMode );
return;

Karsten Suehring
committed
}
#if JVET_M0483_IBC
if ((!slice.isIntra() || slice.getSPS()->getIBCFlag())
#else
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
&& tempCS->chType == CHANNEL_TYPE_LUMA
)
{
tempCS->slice->copyMotionLUTs(tempMotCandLUTs, tempCS->slice->getMotionLUTs());
}
int startShareThisLevel = 0;
const uint32_t uiLPelX = tempCS->area.Y().lumaPos().x;
const uint32_t uiTPelY = tempCS->area.Y().lumaPos().y;
int splitRatio = 1;
CHECK(!(split == CU_QUAD_SPLIT || split == CU_HORZ_SPLIT || split == CU_VERT_SPLIT
|| split == CU_TRIH_SPLIT || split == CU_TRIV_SPLIT), "invalid split type");
splitRatio = (split == CU_HORZ_SPLIT || split == CU_VERT_SPLIT) ? 1 : 2;
bool isOneChildSmall = ((tempCS->area.lwidth())*(tempCS->area.lheight()) >> splitRatio) < MRG_SHARELIST_SHARSIZE;
if ((((tempCS->area.lwidth())*(tempCS->area.lheight())) > (MRG_SHARELIST_SHARSIZE * 1)))
{
m_shareState = NO_SHARE;
}
if (m_shareState == NO_SHARE)//init state
{
if (isOneChildSmall)
{
m_shareState = GEN_ON_SHARED_BOUND;//share start state
startShareThisLevel = 1;
}
}
#if JVET_M0483_IBC
if ((m_shareState == GEN_ON_SHARED_BOUND) && (!slice.isIntra() || slice.getSPS()->getIBCFlag()))
#else
if ((m_shareState == GEN_ON_SHARED_BOUND) && (!slice.isIntra()))
{
#if JVET_M0170_MRG_SHARELIST
tempCS->slice->copyMotionLUTs(tempCS->slice->getMotionLUTs(), tempCS->slice->m_MotionCandLuTsBkup);
m_shareBndPosX = uiLPelX;
m_shareBndPosY = uiTPelY;
m_shareBndSizeW = tempCS->area.lwidth();
m_shareBndSizeH = tempCS->area.lheight();
m_shareState = SHARING;
#endif
}
m_pcInterSearch->setShareState(m_shareState);
setShareStateDec(m_shareState);
#endif

Karsten Suehring
committed
partitioner.splitCurrArea( split, *tempCS );
m_CurrCtx++;
tempCS->getRecoBuf().fill( 0 );
#if JVET_M0427_INLOOP_RESHAPER
tempCS->getPredBuf().fill(0);
#endif
AffineMVInfo tmpMVInfo;
bool isAffMVInfoSaved;
m_pcInterSearch->savePrevAffMVInfo(0, tmpMVInfo, isAffMVInfoSaved);

Karsten Suehring
committed
do
{
const auto &subCUArea = partitioner.currArea();
if( tempCS->picture->Y().contains( subCUArea.lumaPos() ) )
{
const unsigned wIdx = gp_sizeIdxInfo->idxFrom( subCUArea.lwidth () );
const unsigned hIdx = gp_sizeIdxInfo->idxFrom( subCUArea.lheight() );
CodingStructure *tempSubCS = m_pTempCS[wIdx][hIdx];
CodingStructure *bestSubCS = m_pBestCS[wIdx][hIdx];
tempCS->initSubStructure( *tempSubCS, partitioner.chType, subCUArea, false );
tempCS->initSubStructure( *bestSubCS, partitioner.chType, subCUArea, false );
LutMotionCand *tempSubMotCandLUTs = m_pTempMotLUTs[wIdx][hIdx];
LutMotionCand *bestSubMotCandLUTs = m_pBestMotLUTs[wIdx][hIdx];
if (tempCS->chType == CHANNEL_TYPE_LUMA)
{
tempCS->slice->copyMotionLUTs(tempMotCandLUTs, tempSubMotCandLUTs);
tempCS->slice->copyMotionLUTs(tempMotCandLUTs, bestSubMotCandLUTs);
}
#if JVET_M0170_MRG_SHARELIST
tempSubCS->sharedBndPos.x = (m_shareState == SHARING) ? m_shareBndPosX : tempSubCS->area.Y().lumaPos().x;
tempSubCS->sharedBndPos.y = (m_shareState == SHARING) ? m_shareBndPosY : tempSubCS->area.Y().lumaPos().y;
tempSubCS->sharedBndSize.width = (m_shareState == SHARING) ? m_shareBndSizeW : tempSubCS->area.lwidth();
tempSubCS->sharedBndSize.height = (m_shareState == SHARING) ? m_shareBndSizeH : tempSubCS->area.lheight();
bestSubCS->sharedBndPos.x = (m_shareState == SHARING) ? m_shareBndPosX : tempSubCS->area.Y().lumaPos().x;
bestSubCS->sharedBndPos.y = (m_shareState == SHARING) ? m_shareBndPosY : tempSubCS->area.Y().lumaPos().y;
bestSubCS->sharedBndSize.width = (m_shareState == SHARING) ? m_shareBndSizeW : tempSubCS->area.lwidth();
bestSubCS->sharedBndSize.height = (m_shareState == SHARING) ? m_shareBndSizeH : tempSubCS->area.lheight();
#endif
xCompressCU( tempSubCS, bestSubCS, partitioner
, tempSubMotCandLUTs
, bestSubMotCandLUTs
);

Karsten Suehring
committed
if( bestSubCS->cost == MAX_DOUBLE )
{
CHECK( split == CU_QUAD_SPLIT, "Split decision reusing cannot skip quad split" );
tempCS->cost = MAX_DOUBLE;
#if JVET_M0428_ENC_DB_OPT
tempCS->costDbOffset = 0;
tempCS->useDbCost = m_pcEncCfg->getUseEncDbOpt();
#endif

Karsten Suehring
committed
m_CurrCtx--;
partitioner.exitCurrSplit();

Karsten Suehring
committed
xCheckBestMode( tempCS, bestCS, partitioner, encTestMode );
{
std::swap(tempMotCandLUTs, bestMotCandLUTs);
}

Karsten Suehring
committed
return;
}
bool keepResi = KEEP_PRED_AND_RESI_SIGNALS;
tempCS->useSubStructure( *bestSubCS, partitioner.chType, CS::getArea( *tempCS, subCUArea, partitioner.chType ), KEEP_PRED_AND_RESI_SIGNALS, true, keepResi, keepResi );
tempCS->slice->copyMotionLUTs(bestSubMotCandLUTs, tempMotCandLUTs);

Karsten Suehring
committed
if(currDepth < pps.getMaxCuDQPDepth())
{
tempCS->prevQP[partitioner.chType] = bestSubCS->prevQP[partitioner.chType];
}
tempSubCS->releaseIntermediateData();
bestSubCS->releaseIntermediateData();
}
} while( partitioner.nextPart( *tempCS ) );
partitioner.exitCurrSplit();
if (startShareThisLevel == 1)
{
m_shareState = NO_SHARE;
m_pcInterSearch->setShareState(m_shareState);
setShareStateDec(m_shareState);
}
#endif

Karsten Suehring
committed
m_CurrCtx--;
// Finally, generate split-signaling bits for RD-cost check
const PartSplit implicitSplit = partitioner.getImplicitSplit( *tempCS );
{
bool enforceQT = implicitSplit == CU_QUAD_SPLIT;
#if HM_QTBT_REPRODUCE_FAST_LCTU_BUG
// LARGE CTU bug

Karsten Suehring
committed
{
unsigned minDepth = 0;
unsigned maxDepth = g_aucLog2[tempCS->sps->getCTUSize()] - g_aucLog2[tempCS->sps->getMinQTSize(slice.getSliceType(), partitioner.chType)];

Karsten Suehring
committed
if( auto ad = dynamic_cast<AdaptiveDepthPartitioner*>( &partitioner ) )
{
ad->setMaxMinDepth( minDepth, maxDepth, *tempCS );
}
if( minDepth > partitioner.currQtDepth )
{
// enforce QT
enforceQT = true;
}
}
#endif
if( !enforceQT )
{
m_CABACEstimator->resetBits();
#if JVET_M0421_SPLIT_SIG
m_CABACEstimator->split_cu_mode( split, *tempCS, partitioner );
#else

Karsten Suehring
committed
if( partitioner.canSplit( CU_QUAD_SPLIT, *tempCS ) )
{
m_CABACEstimator->split_cu_flag( split == CU_QUAD_SPLIT, *tempCS, partitioner );
}
if( split != CU_QUAD_SPLIT )
{
m_CABACEstimator->split_cu_mode_mt( split, *tempCS, partitioner );
}

Karsten Suehring
committed
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
tempCS->fracBits += m_CABACEstimator->getEstFracBits(); // split bits
}
}
tempCS->cost = m_pcRdCost->calcRdCost( tempCS->fracBits, tempCS->dist );
// Check Delta QP bits for splitted structure
xCheckDQP( *tempCS, partitioner, true );
// If the configuration being tested exceeds the maximum number of bytes for a slice / slice-segment, then
// a proper RD evaluation cannot be performed. Therefore, termination of the
// slice/slice-segment must be made prior to this CTU.
// This can be achieved by forcing the decision to be that of the rpcTempCU.
// The exception is each slice / slice-segment must have at least one CTU.
if (bestCS->cost != MAX_DOUBLE)
{
#if HEVC_TILES_WPP
const TileMap& tileMap = *tempCS->picture->tileMap;
#endif
#if HEVC_TILES_WPP || HEVC_DEPENDENT_SLICES
const uint32_t CtuAddr = CU::getCtuAddr( *bestCS->getCU( partitioner.chType ) );
#endif
const bool isEndOfSlice = slice.getSliceMode() == FIXED_NUMBER_OF_BYTES
&& ((slice.getSliceBits() + CS::getEstBits(*bestCS)) > slice.getSliceArgument() << 3)
#if HEVC_TILES_WPP
&& CtuAddr != tileMap.getCtuTsToRsAddrMap(slice.getSliceCurStartCtuTsAddr())
#endif
#if HEVC_DEPENDENT_SLICES
&& CtuAddr != tileMap.getCtuTsToRsAddrMap(slice.getSliceSegmentCurStartCtuTsAddr());
#else
;
#endif
#if HEVC_DEPENDENT_SLICES
const bool isEndOfSliceSegment = slice.getSliceSegmentMode() == FIXED_NUMBER_OF_BYTES
&& ((slice.getSliceSegmentBits() + CS::getEstBits(*bestCS)) > slice.getSliceSegmentArgument() << 3)
&& CtuAddr != tileMap.getCtuTsToRsAddrMap(slice.getSliceSegmentCurStartCtuTsAddr());
// Do not need to check slice condition for slice-segment since a slice-segment is a subset of a slice.
if (isEndOfSlice || isEndOfSliceSegment)
#else
if(isEndOfSlice)
#endif
{
bestCS->cost = MAX_DOUBLE;

Karsten Suehring
committed
}
}
#if JVET_M0428_ENC_DB_OPT
else
{
bestCS->costDbOffset = 0;
}
tempCS->useDbCost = m_pcEncCfg->getUseEncDbOpt();
#endif

Karsten Suehring
committed
// RD check for sub partitioned coding structure.

Karsten Suehring
committed
xCheckBestMode( tempCS, bestCS, partitioner, encTestMode );
if (isAffMVInfoSaved)
m_pcInterSearch->addAffMVInfo(tmpMVInfo);
if ((!slice.isIntra() || slice.getSPS()->getIBCFlag())
{
std::swap(tempMotCandLUTs, bestMotCandLUTs);
}
tempCS->slice->copyMotionLUTs(m_pSplitTempMotLUTs[wParIdx][hParIdx], tempMotCandLUTs);
}

Karsten Suehring
committed
tempCS->releaseIntermediateData();
tempCS->prevQP[partitioner.chType] = oldPrevQp;
}
void EncCu::xCheckRDCostIntra( CodingStructure *&tempCS, CodingStructure *&bestCS, Partitioner &partitioner, const EncTestMode& encTestMode )
{

Karsten Suehring
committed
double bestInterCost = m_modeCtrl->getBestInterCost();
double costSize2Nx2NemtFirstPass = m_modeCtrl->getEmtSize2Nx2NFirstPassCost();
bool skipSecondEmtPass = m_modeCtrl->getSkipSecondEMTPass();
const SPS &sps = *tempCS->sps;
#endif
const PPS &pps = *tempCS->pps;
#if !JVET_M0464_UNI_MTS

Karsten Suehring
committed
const CodingUnit *bestCU = bestCS->getCU( partitioner.chType );
const int maxSizeEMT = EMT_INTRA_MAX_CU_WITH_QTBT;
uint8_t considerEmtSecondPass = ( sps.getUseIntraEMT() && isLuma( partitioner.chType ) && partitioner.currArea().lwidth() <= maxSizeEMT && partitioner.currArea().lheight() <= maxSizeEMT ) ? 1 : 0;

Karsten Suehring
committed
#if JVET_M0102_INTRA_SUBPARTITIONS
bool useIntraSubPartitions = false;
double maxCostAllowedForChroma = MAX_DOUBLE;
#if JVET_M0464_UNI_MTS
const CodingUnit *bestCU = bestCS->getCU( partitioner.chType );
#endif
#endif

Karsten Suehring
committed
Distortion interHad = m_modeCtrl->getInterHad();

Karsten Suehring
committed
for( uint8_t emtCuFlag = 0; emtCuFlag <= considerEmtSecondPass; emtCuFlag++ )
{
//Possible early EMT tests interruptions
//2) Second EMT pass. This "if clause" is necessary because of the NSST and PDPC "for loops".
if( emtCuFlag && skipSecondEmtPass )
{
continue;
}
//3) if interHad is 0, only try further modes if some intra mode was already better than inter
if( m_pcEncCfg->getUsePbIntraFast() && !tempCS->slice->isIntra() && bestCU && CU::isInter( *bestCS->getCU( partitioner.chType ) ) && interHad == 0 )
{
continue;
}

Karsten Suehring
committed
tempCS->initStructData( encTestMode.qp, encTestMode.lossless );
CodingUnit &cu = tempCS->addCU( CS::getArea( *tempCS, tempCS->area, partitioner.chType ), partitioner.chType );
partitioner.setCUData( cu );
cu.slice = tempCS->slice;
#if HEVC_TILES_WPP
cu.tileIdx = tempCS->picture->tileMap->getTileIdxMap( tempCS->area.lumaPos() );
#endif
cu.skip = false;
cu.mmvdSkip = false;

Karsten Suehring
committed
cu.predMode = MODE_INTRA;
cu.transQuantBypass = encTestMode.lossless;
cu.chromaQpAdj = cu.transQuantBypass ? 0 : m_cuChromaQpOffsetIdxPlus1;
cu.qp = encTestMode.qp;
//cu.ipcm = false;

Karsten Suehring
committed
cu.emtFlag = emtCuFlag;
#if JVET_M0102_INTRA_SUBPARTITIONS
cu.ispMode = NOT_INTRA_SUBPARTITIONS;
#endif

Karsten Suehring
committed
CU::addPUs( cu );
tempCS->interHad = interHad;
#if JVET_M0428_ENC_DB_OPT
m_bestModeUpdated = tempCS->useDbCost = bestCS->useDbCost = false;
#endif

Karsten Suehring
committed
if( isLuma( partitioner.chType ) )
{
#if JVET_M0102_INTRA_SUBPARTITIONS
//the Intra SubPartitions mode uses the value of the best cost so far (luma if it is the fast version) to avoid test non-necessary lines
const double bestCostSoFar = CS::isDualITree( *tempCS ) ? m_modeCtrl->getBestCostWithoutSplitFlags() : bestCU && bestCU->predMode == MODE_INTRA ? bestCS->lumaCost : bestCS->cost;
m_pcIntraSearch->estIntraPredLumaQT( cu, partitioner, bestCostSoFar );
useIntraSubPartitions = cu.ispMode != NOT_INTRA_SUBPARTITIONS;
if( !CS::isDualITree( *tempCS ) )
{
tempCS->lumaCost = m_pcRdCost->calcRdCost( tempCS->fracBits, tempCS->dist );
if( useIntraSubPartitions )
{
//the difference between the best cost so far and the current luma cost is stored to avoid testing the Cr component if the cost of luma + Cb is larger than the best cost
maxCostAllowedForChroma = bestCS->cost < MAX_DOUBLE ? bestCS->cost - tempCS->lumaCost : MAX_DOUBLE;
}
}
#else

Karsten Suehring
committed
m_pcIntraSearch->estIntraPredLumaQT( cu, partitioner );

Karsten Suehring
committed
if (m_pcEncCfg->getUsePbIntraFast() && tempCS->dist == std::numeric_limits<Distortion>::max()
&& tempCS->interHad == 0)
{
interHad = 0;
// JEM assumes only perfect reconstructions can from now on beat the inter mode
m_modeCtrl->enforceInterHad( 0 );

Karsten Suehring
committed
continue;

Karsten Suehring
committed
}
if( !CS::isDualITree( *tempCS ) )
{
cu.cs->picture->getRecoBuf( cu.Y() ).copyFrom( cu.cs->getRecoBuf( COMPONENT_Y ) );
#if JVET_M0427_INLOOP_RESHAPER
cu.cs->picture->getPredBuf(cu.Y()).copyFrom(cu.cs->getPredBuf(COMPONENT_Y));
#endif

Karsten Suehring
committed
}
}
if( tempCS->area.chromaFormat != CHROMA_400 && ( partitioner.chType == CHANNEL_TYPE_CHROMA || !CS::isDualITree( *tempCS ) ) )
{
#if JVET_M0102_INTRA_SUBPARTITIONS
TUIntraSubPartitioner subTuPartitioner( partitioner );
m_pcIntraSearch->estIntraPredChromaQT( cu, ( !useIntraSubPartitions || ( CS::isDualITree( *cu.cs ) && !isLuma( CHANNEL_TYPE_CHROMA ) ) ) ? partitioner : subTuPartitioner, maxCostAllowedForChroma );
if( useIntraSubPartitions && !cu.ispMode )
{
//At this point the temp cost is larger than the best cost. Therefore, we can already skip the remaining calculations
#if JVET_M0464_UNI_MTS
return;
#else
continue;
#endif
}
#else

Karsten Suehring
committed
m_pcIntraSearch->estIntraPredChromaQT( cu, partitioner );

Karsten Suehring
committed
}
cu.rootCbf = false;
for( uint32_t t = 0; t < getNumberValidTBlocks( *cu.cs->pcv ); t++ )
{
cu.rootCbf |= cu.firstTU->cbf[t] != 0;
}
// Get total bits for current mode: encode CU
m_CABACEstimator->resetBits();
if( pps.getTransquantBypassEnabledFlag() )
{
m_CABACEstimator->cu_transquant_bypass_flag( cu );
}
if ((!cu.cs->slice->isIntra() || cu.cs->slice->getSPS()->getIBCFlag())

Karsten Suehring
committed
{
m_CABACEstimator->cu_skip_flag ( cu );
}
m_CABACEstimator->pred_mode ( cu );
m_CABACEstimator->extend_ref_line( cu );
#if JVET_M0102_INTRA_SUBPARTITIONS
m_CABACEstimator->isp_mode ( cu );
#endif

Karsten Suehring
committed
m_CABACEstimator->cu_pred_data ( cu );
m_CABACEstimator->pcm_data ( cu, partitioner );

Karsten Suehring
committed
// Encode Coefficients
CUCtx cuCtx;
cuCtx.isDQPCoded = true;
cuCtx.isChromaQpAdjCoded = true;
m_CABACEstimator->cu_residual( cu, partitioner, cuCtx );
tempCS->fracBits = m_CABACEstimator->getEstFracBits();
tempCS->cost = m_pcRdCost->calcRdCost(tempCS->fracBits, tempCS->dist);
#if JVET_M0102_INTRA_SUBPARTITIONS
#if !JVET_M0464_UNI_MTS
double bestIspCost = cu.ispMode ? CS::isDualITree(*tempCS) ? tempCS->cost : tempCS->lumaCost : MAX_DOUBLE;
#endif
const double tmpCostWithoutSplitFlags = tempCS->cost;
#endif

Karsten Suehring
committed
xEncodeDontSplit( *tempCS, partitioner );
xCheckDQP( *tempCS, partitioner );
#if JVET_M0102_INTRA_SUBPARTITIONS
if( tempCS->cost < bestCS->cost )
{
m_modeCtrl->setBestCostWithoutSplitFlags( tmpCostWithoutSplitFlags );
}
#endif

Karsten Suehring
committed
// we save the cost of the modes for the first EMT pass
if( !emtCuFlag ) static_cast< double& >( costSize2Nx2NemtFirstPass ) = tempCS->cost;

Karsten Suehring
committed
#if WCG_EXT
DTRACE_MODE_COST( *tempCS, m_pcRdCost->getLambda( true ) );
#else
DTRACE_MODE_COST( *tempCS, m_pcRdCost->getLambda() );
#endif
xCheckBestMode( tempCS, bestCS, partitioner, encTestMode );
#if JVET_M0102_INTRA_SUBPARTITIONS
//we decide to skip the second emt pass or not according to the ISP results
if (considerEmtSecondPass && cu.ispMode && !emtCuFlag && tempCS->slice->isIntra())
{
double bestCostDct2NoIsp = m_modeCtrl->getEmtFirstPassNoIspCost();
CHECKD(bestCostDct2NoIsp <= bestIspCost, "wrong cost!");
double nSamples = (double)(cu.lwidth() << g_aucLog2[cu.lheight()]);
double threshold = 1 + 1.4 / sqrt(nSamples);
if (bestCostDct2NoIsp > bestIspCost*threshold)
{
skipSecondEmtPass = true;
m_modeCtrl->setSkipSecondEMTPass(true);
break;
}
}
#endif

Karsten Suehring
committed
//now we check whether the second pass of SIZE_2Nx2N and the whole Intra SIZE_NxN should be skipped or not
if( !emtCuFlag && !tempCS->slice->isIntra() && bestCU && bestCU->predMode != MODE_INTRA && m_pcEncCfg->getFastInterEMT() )

Karsten Suehring
committed
{
const double thEmtInterFastSkipIntra = 1.4; // Skip checking Intra if "2Nx2N using DCT2" is worse than best Inter mode
if( costSize2Nx2NemtFirstPass > thEmtInterFastSkipIntra * bestInterCost )
{
skipSecondEmtPass = true;
m_modeCtrl->setSkipSecondEMTPass( true );
break;
}
}

Karsten Suehring
committed
} //for emtCuFlag
}
void EncCu::xCheckIntraPCM(CodingStructure *&tempCS, CodingStructure *&bestCS, Partitioner &partitioner, const EncTestMode& encTestMode )
{
tempCS->initStructData( encTestMode.qp, encTestMode.lossless );
CodingUnit &cu = tempCS->addCU( CS::getArea( *tempCS, tempCS->area, partitioner.chType ), partitioner.chType );

Karsten Suehring
committed
partitioner.setCUData( cu );
cu.slice = tempCS->slice;
#if HEVC_TILES_WPP
cu.tileIdx = tempCS->picture->tileMap->getTileIdxMap( tempCS->area.lumaPos() );
#endif
cu.skip = false;
cu.mmvdSkip = false;

Karsten Suehring
committed
cu.predMode = MODE_INTRA;
cu.transQuantBypass = encTestMode.lossless;
cu.chromaQpAdj = cu.transQuantBypass ? 0 : m_cuChromaQpOffsetIdxPlus1;
cu.qp = encTestMode.qp;
cu.ipcm = true;
tempCS->addPU( CS::getArea( *tempCS, tempCS->area, partitioner.chType ), partitioner.chType );
tempCS->addTU( CS::getArea( *tempCS, tempCS->area, partitioner.chType ), partitioner.chType );

Karsten Suehring
committed
m_pcIntraSearch->IPCMSearch(*tempCS, partitioner);
m_CABACEstimator->getCtx() = m_CurrCtx->start;
m_CABACEstimator->resetBits();
if( tempCS->pps->getTransquantBypassEnabledFlag() )
{
m_CABACEstimator->cu_transquant_bypass_flag( cu );
}
if ((!cu.cs->slice->isIntra() || cu.cs->slice->getSPS()->getIBCFlag())

Karsten Suehring
committed
{
m_CABACEstimator->cu_skip_flag ( cu );
}
m_CABACEstimator->pred_mode ( cu );
m_CABACEstimator->pcm_data ( cu, partitioner );

Karsten Suehring
committed
tempCS->fracBits = m_CABACEstimator->getEstFracBits();
tempCS->cost = m_pcRdCost->calcRdCost(tempCS->fracBits, tempCS->dist);
xEncodeDontSplit( *tempCS, partitioner );
xCheckDQP( *tempCS, partitioner );

Karsten Suehring
committed
#if WCG_EXT
DTRACE_MODE_COST( *tempCS, m_pcRdCost->getLambda( true ) );
#else
DTRACE_MODE_COST( *tempCS, m_pcRdCost->getLambda() );
#endif
xCheckBestMode( tempCS, bestCS, partitioner, encTestMode );
}
void EncCu::xCheckDQP( CodingStructure& cs, Partitioner& partitioner, bool bKeepCtx )
{
CHECK( bKeepCtx && cs.cus.size() <= 1 && partitioner.getImplicitSplit( cs ) == CU_DONT_SPLIT, "bKeepCtx should only be set in split case" );
CHECK( !bKeepCtx && cs.cus.size() > 1, "bKeepCtx should never be set for non-split case" );
if( !cs.pps->getUseDQP() )
{
return;
}

Christian Helmrich
committed
if (CS::isDualITree(cs) && isChroma(partitioner.chType))
{
return;
}

Karsten Suehring
committed
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
if( bKeepCtx && partitioner.currDepth != cs.pps->getMaxCuDQPDepth() )
{
return;
}
if( !bKeepCtx && partitioner.currDepth > cs.pps->getMaxCuDQPDepth() )
{
return;
}
CodingUnit* cuFirst = cs.getCU( partitioner.chType );
CHECK( !cuFirst, "No CU available" );
bool hasResidual = false;
for( const auto &cu : cs.cus )
{
if( cu->rootCbf )
{
hasResidual = true;
break;
}
}
int predQP = CU::predictQP( *cuFirst, cs.prevQP[partitioner.chType] );
if( hasResidual )
{
TempCtx ctxTemp( m_CtxCache );
if( !bKeepCtx ) ctxTemp = SubCtx( Ctx::DeltaQP, m_CABACEstimator->getCtx() );
m_CABACEstimator->resetBits();
m_CABACEstimator->cu_qp_delta( *cuFirst, predQP, cuFirst->qp );
cs.fracBits += m_CABACEstimator->getEstFracBits(); // dQP bits
cs.cost = m_pcRdCost->calcRdCost(cs.fracBits, cs.dist);
if( !bKeepCtx ) m_CABACEstimator->getCtx() = SubCtx( Ctx::DeltaQP, ctxTemp );
// NOTE: reset QPs for CUs without residuals up to first coded CU
for( const auto &cu : cs.cus )
{
if( cu->rootCbf )
{
break;
}
cu->qp = predQP;
}
}
else
{
// No residuals: reset CU QP to predicted value
for( const auto &cu : cs.cus )
{
cu->qp = predQP;
}
}
}
void EncCu::xFillPCMBuffer( CodingUnit &cu )
{
const ChromaFormat format = cu.chromaFormat;
const uint32_t numberValidComponents = getNumberValidComponents(format);
for( auto &tu : CU::traverseTUs( cu ) )
{
for( uint32_t ch = 0; ch < numberValidComponents; ch++ )
{
const ComponentID compID = ComponentID( ch );
const CompArea &compArea = tu.blocks[ compID ];
const CPelBuf source = tu.cs->getOrgBuf( compArea );
PelBuf destination = tu.getPcmbuf( compID );
#if JVET_M0427_INLOOP_RESHAPER
if (tu.cs->slice->getReshapeInfo().getUseSliceReshaper() && m_pcReshape->getCTUFlag() && compID == COMPONENT_Y)
{
CompArea tmpArea(COMPONENT_Y, compArea.chromaFormat, Position(0, 0), compArea.size());
PelBuf tempOrgBuf = m_tmpStorageLCU->getBuf(tmpArea);
tempOrgBuf.copyFrom(source);
tempOrgBuf.rspSignal(m_pcReshape->getFwdLUT());
destination.copyFrom(tempOrgBuf);
}
else
#endif
destination.copyFrom( source );

Karsten Suehring
committed
}
}
}
#if JVET_M0253_HASH_ME
void EncCu::xCheckRDCostHashInter( CodingStructure *&tempCS, CodingStructure *&bestCS, Partitioner &partitioner, const EncTestMode& encTestMode )
{
bool isPerfectMatch = false;
tempCS->initStructData(encTestMode.qp, encTestMode.lossless);