Skip to content
Snippets Groups Projects
Commit 9eca0c5d authored by Frank Bossen's avatar Frank Bossen
Browse files

Merge branch 'JVET-V0056' into 'master'

JVET-V0056: Changes to MCTF

See merge request jvet/VVCSoftware_VTM!2059
parents 94a796dd 226c3040
Branches master
No related tags found
No related merge requests found
......@@ -55,7 +55,7 @@
//########### place macros to be removed in next cycle below this line ###############
#define JVET_V0056_MCTF 1 // JVET-V0056: Changes to MCTF
#define JVET_S0078_NOOUTPUTPRIORPICFLAG 0 // JVET-S0078: Handling of NoOutputOfPriorPicsFlag in output process
......
......@@ -43,10 +43,14 @@
// Constructor / destructor / initialization / destroy
// ====================================================================================================================
#if JVET_V0056_MCTF
const int EncTemporalFilter::m_range = 4;
#else
const int EncTemporalFilter::m_range = 2;
const double EncTemporalFilter::m_chromaFactor = 0.55;
const double EncTemporalFilter::m_sigmaMultiplier = 9.0;
const double EncTemporalFilter::m_sigmaZeroPoint = 10.0;
#endif
const double EncTemporalFilter::m_chromaFactor = 0.55;
const double EncTemporalFilter::m_sigmaMultiplier = 9.0;
const double EncTemporalFilter::m_sigmaZeroPoint = 10.0;
const int EncTemporalFilter::m_motionVectorFactor = 16;
const int EncTemporalFilter::m_padding = 128;
const int EncTemporalFilter::m_interpolationFilter[16][8] =
......@@ -69,6 +73,15 @@ const int EncTemporalFilter::m_interpolationFilter[16][8] =
{ 0, 0, -2, 4, 64, -3, 1, 0 } //15-->-->
};
#if JVET_V0056_MCTF
const double EncTemporalFilter::m_refStrengths[3][4] =
{ // abs(POC offset)
// 1, 2 3 4
{0.85, 0.57, 0.41, 0.33}, // m_range * 2
{1.13, 0.97, 0.81, 0.57}, // m_range
{0.30, 0.30, 0.30, 0.30} // otherwise
};
#else
const double EncTemporalFilter::m_refStrengths[3][2] =
{ // abs(POC offset)
// 1, 2
......@@ -76,6 +89,7 @@ const double EncTemporalFilter::m_refStrengths[3][2] =
{1.20, 1.00}, // m_range
{0.30, 0.30} // otherwise
};
#endif
EncTemporalFilter::EncTemporalFilter() :
m_FrameSkip(0),
......@@ -105,23 +119,23 @@ void EncTemporalFilter::init(const int frameSkip,
m_FrameSkip = frameSkip;
for (int i = 0; i < MAX_NUM_CHANNEL_TYPE; i++)
{
m_inputBitDepth[i] = inputBitDepth[i];
m_inputBitDepth[i] = inputBitDepth[i];
m_MSBExtendedBitDepth[i] = msbExtendedBitDepth[i];
m_internalBitDepth[i] = internalBitDepth[i];
m_internalBitDepth[i] = internalBitDepth[i];
}
m_sourceWidth = width;
m_sourceWidth = width;
m_sourceHeight = height;
for (int i = 0; i < 2; i++)
{
m_pad[i] = pad[i];
}
m_clipInputVideoToRec709Range = rec709;
m_inputFileName = filename;
m_inputFileName = filename;
m_chromaFormatIDC = inputChromaFormatIDC;
m_inputColourSpaceConvert = colorSpaceConv;
m_area = Area(0, 0, width, height);
m_QP = qp;
m_QP = qp;
m_temporalFilterStrengths = temporalFilterStrengths;
m_gopBasedTemporalFilterFutureReference = gopBasedTemporalFilterFutureReference;
}
......@@ -153,11 +167,10 @@ bool EncTemporalFilter::filter(PelStorage *orgPic, int receivedPoc)
yuvFrames.open(m_inputFileName, false, m_inputBitDepth, m_MSBExtendedBitDepth, m_internalBitDepth);
yuvFrames.skipFrames(std::max(offset + receivedPoc - m_range, 0), m_sourceWidth - m_pad[0], m_sourceHeight - m_pad[1], m_chromaFormatIDC);
std::deque<TemporalFilterSourcePicInfo> srcFrameInfo;
int firstFrame = receivedPoc + offset - m_range;
int lastFrame = receivedPoc + offset + m_range;
int lastFrame = receivedPoc + offset + m_range;
if (!m_gopBasedTemporalFilterFutureReference)
{
lastFrame = receivedPoc + offset - 1;
......@@ -192,7 +205,7 @@ bool EncTemporalFilter::filter(PelStorage *orgPic, int receivedPoc)
continue;
}
srcFrameInfo.push_back(TemporalFilterSourcePicInfo());
TemporalFilterSourcePicInfo &srcPic=srcFrameInfo.back();
TemporalFilterSourcePicInfo &srcPic = srcFrameInfo.back();
PelStorage dummyPicBufferTO; // Only used temporary in yuvFrames.read
srcPic.picBuffer.create(m_chromaFormatIDC, m_area, 0, m_padding);
......@@ -240,20 +253,20 @@ bool EncTemporalFilter::filter(PelStorage *orgPic, int receivedPoc)
void EncTemporalFilter::subsampleLuma(const PelStorage &input, PelStorage &output, const int factor) const
{
const int newWidth = input.Y().width / factor;
const int newWidth = input.Y().width / factor;
const int newHeight = input.Y().height / factor;
output.create(m_chromaFormatIDC, Area(0, 0, newWidth, newHeight), 0, m_padding);
const Pel* srcRow = input.Y().buf;
const Pel* srcRow = input.Y().buf;
const int srcStride = input.Y().stride;
Pel *dstRow = output.Y().buf;
Pel *dstRow = output.Y().buf;
const int dstStride = output.Y().stride;
for (int y = 0; y < newHeight; y++, srcRow+=factor*srcStride, dstRow+=dstStride)
for (int y = 0; y < newHeight; y++, srcRow += factor * srcStride, dstRow += dstStride)
{
const Pel *inRow = srcRow;
const Pel *inRowBelow = srcRow+srcStride;
Pel *target = dstRow;
const Pel *inRowBelow = srcRow + srcStride;
Pel *target = dstRow;
for (int x = 0; x < newWidth; x++)
{
......@@ -275,19 +288,19 @@ int EncTemporalFilter::motionErrorLuma(const PelStorage &orig,
const int besterror = 8 * 8 * 1024 * 1024) const
{
const Pel* origOrigin = orig.Y().buf;
const int origStride = orig.Y().stride;
const Pel *buffOrigin = buffer.Y().buf;
const int buffStride = buffer.Y().stride;
const int origStride = orig.Y().stride;
const Pel* buffOrigin = buffer.Y().buf;
const int buffStride = buffer.Y().stride;
int error = 0;// dx * 10 + dy * 10;
int error = 0;
if (((dx | dy) & 0xF) == 0)
{
dx /= m_motionVectorFactor;
dy /= m_motionVectorFactor;
for (int y1 = 0; y1 < bs; y1++)
{
const Pel* origRowStart = origOrigin + (y+y1)*origStride + x;
const Pel* bufferRowStart = buffOrigin + (y+y1+dy)*buffStride + (x+dx);
const Pel* origRowStart = origOrigin + (y + y1) * origStride + x;
const Pel* bufferRowStart = buffOrigin + (y + y1 + dy) * buffStride + (x + dx);
for (int x1 = 0; x1 < bs; x1 += 2)
{
int diff = origRowStart[x1] - bufferRowStart[x1];
......@@ -311,7 +324,7 @@ int EncTemporalFilter::motionErrorLuma(const PelStorage &orig,
for (int y1 = 1; y1 < bs + 7; y1++)
{
const int yOffset = y + y1 + (dy >> 4) - 3;
const Pel *sourceRow = buffOrigin + (yOffset)*buffStride + 0;
const Pel *sourceRow = buffOrigin + yOffset * buffStride + 0;
for (int x1 = 0; x1 < bs; x1++)
{
sum = 0;
......@@ -329,10 +342,10 @@ int EncTemporalFilter::motionErrorLuma(const PelStorage &orig,
}
}
const Pel maxSampleValue = (1<<m_internalBitDepth[CHANNEL_TYPE_LUMA])-1;
const Pel maxSampleValue = (1 << m_internalBitDepth[CHANNEL_TYPE_LUMA]) - 1;
for (int y1 = 0; y1 < bs; y1++)
{
const Pel *origRow = origOrigin + (y+y1)*origStride + 0;
const Pel *origRow = origOrigin + (y + y1) * origStride;
for (int x1 = 0; x1 < bs; x1++)
{
sum = 0;
......@@ -360,15 +373,25 @@ int EncTemporalFilter::motionErrorLuma(const PelStorage &orig,
void EncTemporalFilter::motionEstimationLuma(Array2D<MotionVector> &mvs, const PelStorage &orig, const PelStorage &buffer, const int blockSize,
const Array2D<MotionVector> *previous, const int factor, const bool doubleRes) const
{
#if JVET_V0056_MCTF
int range = doubleRes ? 0 : 5;
#else
int range = 5;
#endif
const int stepSize = blockSize;
const int origWidth = orig.Y().width;
const int origHeight = orig.Y().height;
#if JVET_V0056_MCTF
for (int blockY = 0; blockY + blockSize <= origHeight; blockY += stepSize)
{
for (int blockX = 0; blockX + blockSize <= origWidth; blockX += stepSize)
#else
for (int blockY = 0; blockY + blockSize < origHeight; blockY += stepSize)
{
for (int blockX = 0; blockX + blockSize < origWidth; blockX += stepSize)
#endif
{
MotionVector best;
......@@ -378,10 +401,18 @@ void EncTemporalFilter::motionEstimationLuma(Array2D<MotionVector> &mvs, const P
}
else
{
#if JVET_V0056_MCTF
for (int py = -1; py <= 1; py++)
#else
for (int py = -2; py <= 2; py++)
#endif
{
int testy = blockY / (2 * blockSize) + py;
#if JVET_V0056_MCTF
for (int px = -1; px <= 1; px++)
#else
for (int px = -2; px <= 2; px++)
#endif
{
int testx = blockX / (2 * blockSize) + px;
if ((testx >= 0) && (testx < origWidth / (2 * blockSize)) && (testy >= 0) && (testy < origHeight / (2 * blockSize)))
......@@ -395,6 +426,13 @@ void EncTemporalFilter::motionEstimationLuma(Array2D<MotionVector> &mvs, const P
}
}
}
#if JVET_V0056_MCTF
int error = motionErrorLuma(orig, buffer, blockX, blockY, 0, 0, blockSize, best.error);
if (error < best.error)
{
best.set(0, 0, error);
}
#endif
}
MotionVector prevBest = best;
for (int y2 = prevBest.y / m_motionVectorFactor - range; y2 <= prevBest.y / m_motionVectorFactor + range; y2++)
......@@ -409,7 +447,7 @@ void EncTemporalFilter::motionEstimationLuma(Array2D<MotionVector> &mvs, const P
}
}
if (doubleRes)
{ // merge into one loop, probably with precision array (here [12, 3] or maybe [4, 1]) with setable number of iterations
{
prevBest = best;
int doubleRange = 3 * 4;
for (int y2 = prevBest.y - doubleRange; y2 <= prevBest.y + doubleRange; y2 += 4)
......@@ -421,7 +459,6 @@ void EncTemporalFilter::motionEstimationLuma(Array2D<MotionVector> &mvs, const P
{
best.set(x2, y2, error);
}
}
}
......@@ -436,11 +473,53 @@ void EncTemporalFilter::motionEstimationLuma(Array2D<MotionVector> &mvs, const P
{
best.set(x2, y2, error);
}
}
}
}
#if JVET_V0056_MCTF
if (blockY > 0)
{
MotionVector aboveMV = mvs.get(blockX / stepSize, (blockY - stepSize) / stepSize);
int error = motionErrorLuma(orig, buffer, blockX, blockY, aboveMV.x, aboveMV.y, blockSize, best.error);
if (error < best.error)
{
best.set(aboveMV.x, aboveMV.y, error);
}
}
if (blockX > 0)
{
MotionVector leftMV = mvs.get((blockX - stepSize) / stepSize, blockY / stepSize);
int error = motionErrorLuma(orig, buffer, blockX, blockY, leftMV.x, leftMV.y, blockSize, best.error);
if (error < best.error)
{
best.set(leftMV.x, leftMV.y, error);
}
}
// calculate average
double avg = 0.0;
for (int x1 = 0; x1 < blockSize; x1++)
{
for (int y1 = 0; y1 < blockSize; y1++)
{
avg = avg + orig.Y().at(blockX + x1, blockY + y1);
}
}
avg = avg / (blockSize * blockSize);
// calculate variance
double variance = 0;
for (int x1 = 0; x1 < blockSize; x1++)
{
for (int y1 = 0; y1 < blockSize; y1++)
{
int pix = orig.Y().at(blockX + x1, blockY + y1);
variance = variance + (pix - avg) * (pix - avg);
}
}
best.error = (int)(20 * ((best.error + 5.0) / (variance + 5.0)) + (best.error / (blockSize * blockSize)) / 50);
#endif
mvs.get(blockX / stepSize, blockY / stepSize) = best;
}
}
......@@ -448,7 +527,7 @@ void EncTemporalFilter::motionEstimationLuma(Array2D<MotionVector> &mvs, const P
void EncTemporalFilter::motionEstimation(Array2D<MotionVector> &mv, const PelStorage &orgPic, const PelStorage &buffer, const PelStorage &origSubsampled2, const PelStorage &origSubsampled4) const
{
const int width = m_sourceWidth;
const int width = m_sourceWidth;
const int height = m_sourceHeight;
Array2D<MotionVector> mv_0(width / 16, height / 16);
Array2D<MotionVector> mv_1(width / 16, height / 16);
......@@ -469,25 +548,25 @@ void EncTemporalFilter::motionEstimation(Array2D<MotionVector> &mv, const PelSto
void EncTemporalFilter::applyMotion(const Array2D<MotionVector> &mvs, const PelStorage &input, PelStorage &output) const
{
static const int lumaBlockSize=8;
static const int lumaBlockSize = 8;
for(int c=0; c< getNumberValidComponents(m_chromaFormatIDC); c++)
for(int c = 0; c < getNumberValidComponents(m_chromaFormatIDC); c++)
{
const ComponentID compID=(ComponentID)c;
const int csx=getComponentScaleX(compID, m_chromaFormatIDC);
const int csy=getComponentScaleY(compID, m_chromaFormatIDC);
const int blockSizeX = lumaBlockSize>>csx;
const int blockSizeY = lumaBlockSize>>csy;
const ComponentID compID = (ComponentID)c;
const int csx = getComponentScaleX(compID, m_chromaFormatIDC);
const int csy = getComponentScaleY(compID, m_chromaFormatIDC);
const int blockSizeX = lumaBlockSize >> csx;
const int blockSizeY = lumaBlockSize >> csy;
const int height = input.bufs[c].height;
const int width = input.bufs[c].width;
const Pel maxValue = (1<<m_internalBitDepth[toChannelType(compID)])-1;
const Pel maxValue = (1 << m_internalBitDepth[toChannelType(compID)]) - 1;
const Pel *srcImage = input.bufs[c].buf;
const int srcStride = input.bufs[c].stride;
const int srcStride = input.bufs[c].stride;
Pel *dstImage = output.bufs[c].buf;
int dstStride = output.bufs[c].stride;
int dstStride = output.bufs[c].stride;
for (int y = 0, blockNumY = 0; y + blockSizeY <= height; y += blockSizeY, blockNumY++)
{
......@@ -496,23 +575,23 @@ void EncTemporalFilter::applyMotion(const Array2D<MotionVector> &mvs, const PelS
const MotionVector &mv = mvs.get(blockNumX,blockNumY);
const int dx = mv.x >> csx ;
const int dy = mv.y >> csy ;
const int xInt = mv.x >> (4+csx) ;
const int yInt = mv.y >> (4+csy) ;
const int xInt = mv.x >> (4 + csx) ;
const int yInt = mv.y >> (4 + csy) ;
const int *xFilter = m_interpolationFilter[dx & 0xf];
const int *yFilter = m_interpolationFilter[dy & 0xf]; // will add 6 bit.
const int numFilterTaps=7;
const int centreTapOffset=3;
const int numFilterTaps = 7;
const int centerTapOffset = 3;
int tempArray[lumaBlockSize + numFilterTaps][lumaBlockSize];
for (int by = 1; by < blockSizeY + numFilterTaps; by++)
{
const int yOffset = y + by + yInt - centreTapOffset;
const Pel *sourceRow = srcImage+yOffset*srcStride;
const int yOffset = y + by + yInt - centerTapOffset;
const Pel *sourceRow = srcImage + yOffset * srcStride;
for (int bx = 0; bx < blockSizeX; bx++)
{
int base = x + bx + xInt - centreTapOffset;
int base = x + bx + xInt - centerTapOffset;
const Pel *rowStart = sourceRow + base;
int sum = 0;
......@@ -527,10 +606,10 @@ void EncTemporalFilter::applyMotion(const Array2D<MotionVector> &mvs, const PelS
}
}
Pel *dstRow = dstImage+y*dstStride;
for (int by = 0; by < blockSizeY; by++, dstRow+=dstStride)
Pel *dstRow = dstImage + y * dstStride;
for (int by = 0; by < blockSizeY; by++, dstRow += dstStride)
{
Pel *dstPel=dstRow+x;
Pel *dstPel = dstRow + x;
for (int bx = 0; bx < blockSizeX; bx++, dstPel++)
{
int sum = 0;
......@@ -553,7 +632,11 @@ void EncTemporalFilter::applyMotion(const Array2D<MotionVector> &mvs, const PelS
}
void EncTemporalFilter::bilateralFilter(const PelStorage &orgPic,
#if JVET_V0056_MCTF
std::deque<TemporalFilterSourcePicInfo> &srcFrameInfo,
#else
const std::deque<TemporalFilterSourcePicInfo> &srcFrameInfo,
#endif
PelStorage &newOrgPic,
double overallStrength) const
{
......@@ -566,7 +649,7 @@ void EncTemporalFilter::bilateralFilter(const PelStorage &orgPic,
}
int refStrengthRow = 2;
if (numRefs == m_range*2)
if (numRefs == m_range * 2)
{
refStrengthRow = 0;
}
......@@ -578,44 +661,101 @@ void EncTemporalFilter::bilateralFilter(const PelStorage &orgPic,
const double lumaSigmaSq = (m_QP - m_sigmaZeroPoint) * (m_QP - m_sigmaZeroPoint) * m_sigmaMultiplier;
const double chromaSigmaSq = 30 * 30;
for(int c=0; c< getNumberValidComponents(m_chromaFormatIDC); c++)
for(int c = 0; c < getNumberValidComponents(m_chromaFormatIDC); c++)
{
const ComponentID compID=(ComponentID)c;
const ComponentID compID = (ComponentID)c;
const int height = orgPic.bufs[c].height;
const int width = orgPic.bufs[c].width;
const Pel *srcPelRow = orgPic.bufs[c].buf;
const int srcStride = orgPic.bufs[c].stride;
Pel *dstPelRow = newOrgPic.bufs[c].buf;
const int dstStride = newOrgPic.bufs[c].stride;
const double sigmaSq = isChroma(compID)? chromaSigmaSq : lumaSigmaSq;
const Pel* srcPelRow = orgPic.bufs[c].buf;
const int srcStride = orgPic.bufs[c].stride;
Pel* dstPelRow = newOrgPic.bufs[c].buf;
const int dstStride = newOrgPic.bufs[c].stride;
const double sigmaSq = isChroma(compID) ? chromaSigmaSq : lumaSigmaSq;
const double weightScaling = overallStrength * (isChroma(compID) ? m_chromaFactor : 0.4);
const Pel maxSampleValue = (1<<m_internalBitDepth[toChannelType(compID)])-1;
const double bitDepthDiffWeighting=1024.0 / (maxSampleValue+1);
for (int y = 0; y < height; y++, srcPelRow+=srcStride, dstPelRow+=dstStride)
const Pel maxSampleValue = (1 << m_internalBitDepth[toChannelType(compID)]) - 1;
const double bitDepthDiffWeighting = 1024.0 / (maxSampleValue + 1);
#if JVET_V0056_MCTF
const int lumaBlockSize = 8;
const int csx = getComponentScaleX(compID, m_chromaFormatIDC);
const int csy = getComponentScaleY(compID, m_chromaFormatIDC);
const int blockSizeX = lumaBlockSize >> csx;
const int blockSizeY = lumaBlockSize >> csy;
#endif
for (int y = 0; y < height; y++, srcPelRow += srcStride, dstPelRow += dstStride)
{
const Pel *srcPel=srcPelRow;
Pel *dstPel=dstPelRow;
const Pel *srcPel = srcPelRow;
Pel *dstPel = dstPelRow;
for (int x = 0; x < width; x++, srcPel++, dstPel++)
{
const int orgVal = (int) *srcPel;
double temporalWeightSum = 1.0;
double newVal = (double) orgVal;
#if JVET_V0056_MCTF
if ((y % blockSizeY == 0) && (x % blockSizeX == 0))
{
for (int i = 0; i < numRefs; i++)
{
double variance = 0, diffsum = 0;
for (int y1 = 0; y1 < blockSizeY - 1; y1++)
{
for (int x1 = 0; x1 < blockSizeX - 1; x1++)
{
int pix = *(srcPel + x1);
int pixR = *(srcPel + x1 + 1);
int pixD = *(srcPel + x1 + srcStride);
int ref = *(correctedPics[i].bufs[c].buf + ((y + y1) * correctedPics[i].bufs[c].stride + x + x1));
int refR = *(correctedPics[i].bufs[c].buf + ((y + y1) * correctedPics[i].bufs[c].stride + x + x1 + 1));
int refD = *(correctedPics[i].bufs[c].buf + ((y + y1 + 1) * correctedPics[i].bufs[c].stride + x + x1));
int diff = pix - ref;
int diffR = pixR - refR;
int diffD = pixD - refD;
variance += diff * diff;
diffsum += (diffR - diff) * (diffR - diff);
diffsum += (diffD - diff) * (diffD - diff);
}
}
srcFrameInfo[i].mvs.get(x / blockSizeX, y / blockSizeY).noise = (int) round((300 * variance + 50) / (10 * diffsum + 50));
}
}
double minError = 9999999;
for (int i = 0; i < numRefs; i++)
{
minError = std::min(minError, (double) srcFrameInfo[i].mvs.get(x / blockSizeX, y / blockSizeY).error);
}
#endif
for (int i = 0; i < numRefs; i++)
{
const Pel *pCorrectedPelPtr=correctedPics[i].bufs[c].buf+(y*correctedPics[i].bufs[c].stride+x);
#if JVET_V0056_MCTF
const int error = srcFrameInfo[i].mvs.get(x / blockSizeX, y / blockSizeY).error;
const int noise = srcFrameInfo[i].mvs.get(x / blockSizeX, y / blockSizeY).noise;
#endif
const Pel *pCorrectedPelPtr = correctedPics[i].bufs[c].buf + (y * correctedPics[i].bufs[c].stride + x);
const int refVal = (int) *pCorrectedPelPtr;
double diff = (double)(refVal - orgVal);
diff *= bitDepthDiffWeighting;
double diffSq = diff * diff;
#if JVET_V0056_MCTF
const int index = std::min(3, std::abs(srcFrameInfo[i].origOffset) - 1);
double ww = 1, sw = 1;
ww *= (noise < 25) ? 1.0 : 0.6;
sw *= (noise < 25) ? 1.0 : 0.8;
ww *= (error < 50) ? 1.2 : ((error > 100) ? 0.6 : 1.0);
sw *= (error < 50) ? 1.0 : 0.8;
ww *= ((minError + 1) / (error + 1));
double weight = weightScaling * m_refStrengths[refStrengthRow][index] * ww * exp(-diffSq / (2 * sw * sigmaSq));
#else
const int index = std::min(1, std::abs(srcFrameInfo[i].origOffset) - 1);
const double weight = weightScaling * m_refStrengths[refStrengthRow][index] * exp(-diffSq / (2 * sigmaSq));
#endif
newVal += weight * refVal;
temporalWeightSum += weight;
}
newVal /= temporalWeightSum;
Pel sampleVal = (Pel)round(newVal);
sampleVal=(sampleVal<0?0 : (sampleVal>maxSampleValue ? maxSampleValue : sampleVal));
sampleVal = (sampleVal < 0 ? 0 : (sampleVal > maxSampleValue ? maxSampleValue : sampleVal));
*dstPel = sampleVal;
}
}
......
......@@ -51,7 +51,12 @@ struct MotionVector
{
int x, y;
int error;
#if JVET_V0056_MCTF
int noise;
MotionVector() : x(0), y(0), error(INT_LEAST32_MAX), noise(0) {}
#else
MotionVector() : x(0), y(0), error(INT_LEAST32_MAX) {}
#endif
void set(int vectorX, int vectorY, int errorValue) { x = vectorX; y = vectorY; error = errorValue; }
};
......@@ -67,21 +72,21 @@ public:
void allocate(int width, int height, const T& value=T())
{
m_width=width;
m_height=height;
v.resize(std::size_t(m_width*m_height), value);
m_width = width;
m_height = height;
v.resize(std::size_t(m_width * m_height), value);
}
T& get(int x, int y)
{
assert(x<m_width && y<m_height);
return v[y*m_width+x];
assert(x < m_width && y < m_height);
return v[y * m_width + x];
}
const T& get(int x, int y) const
{
assert(x<m_width && y<m_height);
return v[y*m_width+x];
assert(x < m_width && y < m_height);
return v[y * m_width + x];
}
};
......@@ -129,7 +134,11 @@ private:
static const int m_motionVectorFactor;
static const int m_padding;
static const int m_interpolationFilter[16][8];
#if JVET_V0056_MCTF
static const double m_refStrengths[3][4];
#else
static const double m_refStrengths[3][2];
#endif
// Private member variables
int m_FrameSkip;
......@@ -155,7 +164,11 @@ private:
const Array2D<MotionVector> *previous=0, const int factor = 1, const bool doubleRes = false) const;
void motionEstimation(Array2D<MotionVector> &mvs, const PelStorage &orgPic, const PelStorage &buffer, const PelStorage &origSubsampled2, const PelStorage &origSubsampled4) const;
#if JVET_V0056_MCTF
void bilateralFilter(const PelStorage &orgPic, std::deque<TemporalFilterSourcePicInfo> &srcFrameInfo, PelStorage &newOrgPic, double overallStrength) const;
#else
void bilateralFilter(const PelStorage &orgPic, const std::deque<TemporalFilterSourcePicInfo> &srcFrameInfo, PelStorage &newOrgPic, double overallStrength) const;
#endif
void applyMotion(const Array2D<MotionVector> &mvs, const PelStorage &input, PelStorage &output) const;
}; // END CLASS DEFINITION EncTemporalFilter
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment