Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • jvet/VVCSoftware_VTM
  • chenhuanbang/VVCSoftware_VTM
  • ezhizng/VVCSoftware_VTM
  • XZheng/VVCSoftware_VTM
  • YanZhang/VVCSoftware_VTM
  • xiaozhongxu/VVCSoftware_VTM
  • bossen/VVCSoftware_VTM
  • XiangLi/VVCSoftware_VTM
  • s.iwamura/VVCSoftware_VTM
  • yuling.hsiao/VVCSoftware_VTM
  • ccc2384823/VVCSoftware_VTM
  • yuchisu/VVCSoftware_VTM
  • schwarz/VVCSoftware_VTM
  • philippe.hanhart/VVCSoftware_VTM
  • nanh/VVCSoftware_VTM
  • guillaume.laroche/VVCSoftware_VTM
  • Kenneth/VVCSoftware_VTM
  • jonatan/VVCSoftware_VTM
  • Alexey/VVCSoftware_VTM
  • saintspear/VVCSoftware_VTM
  • xinzhao/VVCSoftware_VTM
  • Seungsoo/VVCSoftware_VTM
  • jamesxxiu/VVCSoftware_VTM
  • leolzhao/VVCSoftware_VTM
  • ywchen/VVCSoftware_VTM
  • kevin625/VVCSoftware_VTM
  • Zhang/VVCSoftware_VTM
  • zhangkai/VVCSoftware_VTM
  • YCSun/VVCSoftware_VTM
  • ksuehring/VVCSoftware_VTM
  • AbeKiyo/VVCSoftware_VTM
  • naeri.park/VVCSoftware_VTM
  • ling/VVCSoftware_VTM
  • aikiho/VVCSoftware_VTM
  • JangwonChoi/VVCSoftware_VTM
  • Shelly/VVCSoftware_VTM
  • blaeser/VVCSoftware_VTM
  • crhelmrich/VVCSoftware_VTM
  • keydel/VVCSoftware_VTM
  • adam_wieckowski/VVCSoftware_VTM
  • hashimry/VVCSoftware_VTM
  • yimingli/VVCSoftware_VTM
  • zhipin/VVCSoftware_VTM
  • chernyak/VVCSoftware_VTM
  • jvet-l-ahg-14/VVCSoftware_VTM
  • sauer/VVCSoftware_VTM
  • fbarbier/VVCSoftware_VTM
  • misrak/VVCSoftware_VTM
  • ikai/VVCSoftware_VTM
  • tlu/VVCSoftware_VTM
  • Yasugi/VVCSoftware_VTM
  • peterchuang/VVCSoftware_VTM
  • wanght99/VVCSoftware_VTM
  • yuhan/VVCSoftware_VTM
  • hongbin/VVCSoftware_VTM
  • jiahaoli/VVCSoftware_VTM
  • cfd/VVCSoftware_VTM
  • ruoyangyu/VVCSoftware_VTM
  • chujoh/VVCSoftware_VTM
  • lijingya/VVCSoftware_VTM
  • hinz/VVCSoftware_VTM
  • tamse.anish/VVCSoftware_VTM
  • mcoban/VVCSoftware_VTM
  • george/VVCSoftware_VTM
  • jeeva.raj/VVCSoftware_VTM
  • antoine/VVCSoftware_VTM
  • moonmo.koo/VVCSoftware_VTM
  • deluxan/VVCSoftware_VTM
  • bheng/VVCSoftware_VTM
  • lzz8246/VVCSoftware_VTM
  • delagrangep/VVCSoftware_VTM
  • jiechen/VVCSoftware_VTM
  • hendry197/VVCSoftware_VTM
  • LGE_VCC/VVCSoftware_VTM
  • asegall/VVCSoftware_VTM
  • pbcowan/VVCSoftware_VTM
  • forayr/VVCSoftware_VTM
  • JT/VVCSoftware_VTM
  • Zhou/VVCSoftware_VTM
  • yjpiao/VVCSoftware_VTM
  • fabrice.leleannec/VVCSoftware_VTM
  • tpoirier/VVCSoftware_VTM
  • PoHan.Lin/VVCSoftware_VTM
  • jzxu/VVCSoftware_VTM
  • junghak.nam/VVCSoftware_VTM
  • guichunli/VVCSoftware_VTM
  • xianglinwang/VVCSoftware_VTM
  • chunchic/VVCSoftware_VTM
  • chrisr12/VVCSoftware_VTM
  • ks_kashyap/VVCSoftware_VTM
  • minhua/VVCSoftware_VTM
  • Sheng-Yen.Lin/VVCSoftware_VTM
  • hegilmez/VVCSoftware_VTM
  • swongah/VVCSoftware_VTM
  • merkle/VVCSoftware_VTM
  • sunyucheng/VVCSoftware_VTM
  • kirchhoffer/VVCSoftware_VTM
  • vdrugeon/VVCSoftware_VTM
  • jennylai/VVCSoftware_VTM
  • rickxu/VVCSoftware_VTM
  • seuhong/VVCSoftware_VTM
  • chollmann/VVCSoftware_VTM
  • jvet-n-ce8-public/VVCSoftware_VTM
  • martin.m.pettersson/VVCSoftware_VTM
  • siekmann/VVCSoftware_VTM
  • aramasub/VVCSoftware_VTM
  • zhiyilin/VVCSoftware_VTM
  • EricLin/VVCSoftware_VTM
  • mengwang/VVCSoftware_VTM
  • m.sarwer/VVCSoftware_VTM
  • agnesedong/VVCSoftware_VTM
  • geonjungko/VVCSoftware_VTM
  • bray/VVCSoftware_VTM
  • yhchao/VVCSoftware_VTM
  • Zhu/VVCSoftware_VTM
  • ykato/VVCSoftware_VTM
  • ZhipinDeng/VVCSoftware_VTM
  • jasonjung/VVCSoftware_VTM
  • hanhuang/VVCSoftware_VTM
  • seregin/VVCSoftware_VTM
  • wchen1014/VVCSoftware_VTM
  • Auyeung/VVCSoftware_VTM
  • Morris/VVCSoftware_VTM
  • lphamvan/VVCSoftware_VTM
  • dmehlem/VVCSoftware_VTM
  • shih-ta.hsiang/VVCSoftware_VTM
  • ysanchez/VVCSoftware_VTM
  • baegn74/VVCSoftware_VTM
  • kazui/VVCSoftware_VTM
  • yuwenhe_vvc/VVCSoftware_VTM
  • rickard/VVCSoftware_VTM
  • wangyang.cs/VVCSoftware_VTM
  • xwmeng/VVCSoftware_VTM
  • takeshi.tsukuba/VVCSoftware_VTM
  • yixindu/VVCSoftware_VTM
  • baixiu.wz/VVCSoftware_VTM
  • hm.jang/VVCSoftware_VTM
  • Ted/VVCSoftware_VTM
  • nguyen/VVCSoftware_VTM
  • chaohsiu/VVCSoftware_VTM
  • francoise/VVCSoftware_VTM
  • Yin/VVCSoftware_VTM
  • Morigami/VVCSoftware_VTM
  • sagar.kotecha/VVCSoftware_VTM
  • hwsun/VVCSoftware_VTM
  • pierrick.bouvier/VVCSoftware_VTM
  • XiangMa/VVCSoftware_VTM
  • LouiseLee/VVCSoftware_VTM
  • chenps/VVCSoftware_VTM
  • karls/VVCSoftware_VTM
  • biaowang/VVCSoftware_VTM
  • hangao/VVCSoftware_VTM
  • Jin/VVCSoftware_VTM
  • analci/VVCSoftware_VTM
  • KuiFan/VVCSoftware_VTM
  • hobingzhang/VVCSoftware_VTM
  • audrey.turquin/VVCSoftware_VTM
  • rlliao/VVCSoftware_VTM
  • winken/VVCSoftware_VTM
  • hallapur/VVCSoftware_VTM
  • T.Hashimoto/VVCSoftware_VTM
  • AnandMeher/VVCSoftware_VTM
  • semihese/VVCSoftware_VTM
  • ouedraogo/VVCSoftware_VTM
  • arthurcerveira/VVCSoftware_VTM
  • sunmi.yoo/VVCSoftware_VTM
  • Cynthia/VVCSoftware_VTM
  • yang/VVCSoftware_VTM
  • yuyoon/VVCSoftware_VTM
  • jslee/VVCSoftware_VTM
  • weimin.zeng/VVCSoftware_VTM
  • edrthomas/VVCSoftware_VTM
  • Mitsuru.Katsumata/VVCSoftware_VTM
  • adybrowne/VVCSoftware_VTM
  • jack.enhorn/VVCSoftware_VTM
  • Palanivel/VVCSoftware_VTM
  • olena.chubach/VVCSoftware_VTM
  • juvenalluo/VVCSoftware_VTM
  • yylee/VVCSoftware_VTM
  • bross/VVCSoftware_VTM
  • jvet-ahg-nnvc/VVCSoftware_VTM
  • jacob/VVCSoftware_VTM
  • dmytro.rusanovskyy/VVCSoftware_VTM
  • karamnaser/VVCSoftware_VTM
  • milos.radosavljevic/VVCSoftware_VTM
  • Keming/VVCSoftware_VTM
  • pj/VVCSoftware_VTM
  • cwkuo/VVCSoftware_VTM
  • BD/VVCSoftware_VTM
  • bartnik/VVCSoftware_VTM
  • Fangjun.Pu/VVCSoftware_VTM
  • nikolay.shostak/VVCSoftware_VTM
  • kirill.suverov/VVCSoftware_VTM
  • Xile_Zhou/VVCSoftware_VTM
  • ksuehring/vvc-software-vtm-nnvc
  • guether/VVCSoftware_VTM
  • salmonc/VVCSoftware_VTM
  • eeehey/VVCSoftware_VTM
  • marie-pierre.gallasso/VVCSoftware_VTM
  • jvet-ahg-fgt/VTM
  • liaojq/VVCSoftware_VTM
  • axel.ricard/VVCSoftware_VTM
  • XiangLi/nnvc
  • sw.xie/VVCSoftware_VTM
  • jeeva.raj/vvc-software-vtm-tu-c
  • XiangLi/tu-c
  • msantamaria/nnvc
  • cjj490168650/VVCSoftware_VTM
  • Yun_li/VVCSoftware_VTM
  • Zhengang/vvc-software-vtm
  • lvzhuoyi/vvc-software-vtm-nnvc
  • Kenneth/vvc-software-vtm-nn
  • biatekt/vvc-software-vtm
  • jvet-ahg-gcc/VVCSoftware_VTM
  • JINGYING/VVCSoftware_VTM
  • furban/VVCSoftware_VTM
  • yanning/VVCSoftware_VTM
  • zhuochen/VVCSoftware_VTM
  • Kaifa/VVCSoftware_VTM_AJ0048
  • yueli/VVCSoftware_VTM
  • tokumo/VVCSoftware_VTM
221 results
Show changes
Commits on Source (38)
Showing
with 1155 additions and 18 deletions
......@@ -56,6 +56,11 @@ TransformSkipFast : 1 # Fast Transform skipping (0: OFF, 1
TransformSkipLog2MaxSize : 5
SAOLcuBoundary : 0 # SAOLcuBoundary using non-deblocked pixels (0: OFF, 1: ON)
#=========== TemporalFilter =================
TemporalFilter : 0 # Enable/disable GOP Based Temporal Filter
TemporalFilterFutureReference : 0 # Enable/disable reading future frames
TemporalFilterStrengthFrame4 : 0.4 # Enable filter at every 4th frame with strength
#============ Slices ================
SliceMode : 0 # 0: Disable all slice options.
# 1: Enforce maximum number of LCU in an slice,
......
......@@ -56,6 +56,11 @@ TransformSkipFast : 1 # Fast Transform skipping (0: OFF, 1
TransformSkipLog2MaxSize : 5
SAOLcuBoundary : 0 # SAOLcuBoundary using non-deblocked pixels (0: OFF, 1: ON)
#=========== TemporalFilter =================
TemporalFilter : 0 # Enable/disable GOP Based Temporal Filter
TemporalFilterFutureReference : 0 # Enable/disable reading future frames
TemporalFilterStrengthFrame4 : 0.4 # Enable filter at every 4th frame with strength
#============ Slices ================
SliceMode : 0 # 0: Disable all slice options.
# 1: Enforce maximum number of LCU in an slice,
......
......@@ -70,6 +70,12 @@ TransformSkipFast : 1 # Fast Transform skipping (0: OFF, 1
TransformSkipLog2MaxSize : 5
SAOLcuBoundary : 0 # SAOLcuBoundary using non-deblocked pixels (0: OFF, 1: ON)
#=========== TemporalFilter =================
TemporalFilter : 0 # Enable/disable GOP Based Temporal Filter
TemporalFilterFutureReference : 1 # Enable/disable reading future frames
TemporalFilterStrengthFrame8 : 0.95 # Enable filter at every 8th frame with given strength
TemporalFilterStrengthFrame16 : 1.5 # Enable filter at every 16th frame with given strength, longer intervals has higher priority
#============ Slices ================
SliceMode : 0 # 0: Disable all slice options.
# 1: Enforce maximum number of LCU in an slice,
......
......@@ -890,6 +890,32 @@ Picture output options: output upscaled (2), decoded but in full resolution buff
\end{OptionTableNoShorthand}
%%
%% GOP based temporal filter parameters
%%
\begin{OptionTableNoShorthand}{GOP based temporal filter paramters}{tab:gop-based-temporal-filter}
\Option{TemporalFilter} &
%\ShortOption{\None} &
\Default{false} &
Enables or disables GOP based temporal filter.
\\
\Option{TemporalFilterFutureReference} &
%\ShortOption{\None} &
\Default{true} &
Enables or disable referencing future frames in the GOP based temporal filter. Can be used to disable future referencing for
low delay configurations.
\\
\Option{TemporalFilterStrengthFrame*} &
%\ShortOption{\None} &
\Default{} &
Strength for every * frame in GOP based temporal filter, where * is an integer. E.g. --TemporalFilterStrengthFrame8 0.95 will
enable GOP based temporal filter at every 8th frame with strength 0.95. Longer intervals overrides shorter when there are
multiple matches.
\\
\end{OptionTableNoShorthand}
%%
%% profile, level and conformance options
%%
......
......@@ -48,6 +48,10 @@
#include "AppEncHelper360/TExt360AppEncTop.h"
#endif
#if JVET_O0549_ENCODER_ONLY_FILTER
#include "EncoderLib/EncTemporalFilter.h"
#endif
using namespace std;
//! \ingroup EncoderApp
......@@ -644,6 +648,9 @@ void EncApp::xInitLibCfg()
m_cEncLib.setCropOffsetBottom (m_cropOffsetBottom);
m_cEncLib.setCalculateHdrMetrics (m_calculateHdrMetrics);
#endif
#if JVET_O0549_ENCODER_ONLY_FILTER
m_cEncLib.setGopBasedTemporalFilterEnabled(m_gopBasedTemporalFilterEnabled);
#endif
}
void EncApp::xCreateLib( std::list<PelUnitBuf*>& recBufList
......@@ -744,6 +751,17 @@ void EncApp::encode()
TExt360AppEncTop ext360(*this, m_cEncLib.getGOPEncoder()->getExt360Data(), *(m_cEncLib.getGOPEncoder()), orgPic);
#endif
#if JVET_O0549_ENCODER_ONLY_FILTER
EncTemporalFilter temporalFilter;
if (m_gopBasedTemporalFilterEnabled)
{
temporalFilter.init(m_FrameSkip, m_inputBitDepth, m_MSBExtendedBitDepth, m_internalBitDepth, m_iSourceWidth, m_iSourceHeight,
m_aiPad, m_bClipInputVideoToRec709Range, m_inputFileName, m_chromaFormatIDC,
m_inputColourSpaceConvert, m_iQP, m_gopBasedTemporalFilterStrengths,
m_gopBasedTemporalFilterFutureReference);
}
#endif
while ( !bEos )
{
// read input YUV file
......@@ -760,6 +778,13 @@ void EncApp::encode()
m_cVideoIOYuvInputFile.read( orgPic, trueOrgPic, ipCSC, m_aiPad, m_InputChromaFormatIDC, m_bClipInputVideoToRec709Range );
#endif
#if JVET_O0549_ENCODER_ONLY_FILTER
if (m_gopBasedTemporalFilterEnabled)
{
temporalFilter.filter(&orgPic, m_iFrameRcvd);
}
#endif
// increase number of received frames
m_iFrameRcvd++;
......
......@@ -608,6 +608,27 @@ static inline istream& operator >> (std::istream &in, EncAppCfg::OptionalValue<T
}
#endif
#if JVET_O0549_ENCODER_ONLY_FILTER
template <class T1, class T2>
static inline istream& operator >> (std::istream& in, std::map<T1, T2>& map)
{
T1 key;
T2 value;
try
{
in >> key;
in >> value;
}
catch (...)
{
in.setstate(ios::failbit);
}
map[key] = value;
return in;
}
#endif
static void
automaticallySelectRExtProfile(const bool bUsingGeneralRExtTools,
const bool bUsingChromaQPAdjustment,
......@@ -1374,6 +1395,14 @@ bool EncAppCfg::parseCfg( int argc, char* argv[] )
( "UpscaledOutput", m_upscaledOutput, 0, "Output upscaled (2), decoded but in full resolution buffer (1) or decoded cropped (0, default) picture for RPR" )
;
#if JVET_O0549_ENCODER_ONLY_FILTER
opts.addOptions()
("TemporalFilter", m_gopBasedTemporalFilterEnabled, false, "Enable GOP based temporal filter. Disabled per default")
("TemporalFilterFutureReference", m_gopBasedTemporalFilterFutureReference, true, "Enable referencing of future frames in the GOP based temporal filter. This is typically disabled for Low Delay configurations.")
("TemporalFilterStrengthFrame*", m_gopBasedTemporalFilterStrengths, std::map<int, double>(), "Strength for every * frame in GOP based temporal filter, where * is an integer."
" E.g. --TemporalFilterStrengthFrame8 0.95 will enable GOP based temporal filter at every 8th frame with strength 0.95");
#endif
#if EXTENSION_360_VIDEO
TExt360AppEncCfg::TExt360AppEncCfgContext ext360CfgContext;
m_ext360.addOptions(opts, ext360CfgContext);
......@@ -3368,6 +3397,12 @@ bool EncAppCfg::xCheckParameter()
xConfirmPara( m_decodeBitstreams[0] == m_bitstreamFileName, "Debug bitstream and the output bitstream cannot be equal.\n" );
xConfirmPara( m_decodeBitstreams[1] == m_bitstreamFileName, "Decode2 bitstream and the output bitstream cannot be equal.\n" );
xConfirmPara(unsigned(m_LMChroma) > 1, "LMMode exceeds range (0 to 1)");
#if JVET_O0549_ENCODER_ONLY_FILTER
if (m_gopBasedTemporalFilterEnabled)
{
xConfirmPara(m_temporalSubsampleRatio != 1, "GOP Based Temporal Filter only support Temporal sub-sample ratio 1");
}
#endif
#if EXTENSION_360_VIDEO
check_failed |= m_ext360.verifyParameters();
#endif
......@@ -3690,7 +3725,9 @@ void EncAppCfg::xPrintParameter()
{
msg( VERBOSE, "RPR:%d", 0 );
}
#if JVET_O0549_ENCODER_ONLY_FILTER
msg(VERBOSE, "TemporalFilter:%d ", m_gopBasedTemporalFilterEnabled);
#endif
#if EXTENSION_360_VIDEO
m_ext360.outputConfigurationSummary();
#endif
......
......@@ -39,6 +39,14 @@
#define __ENCAPPCFG__
#include "CommonLib/CommonDef.h"
#if JVET_O0549_ENCODER_ONLY_FILTER
#include <map>
template <class T1, class T2>
static inline std::istream& operator >> (std::istream &in, std::map<T1, T2> &map);
#include "Utilities/program_options_lite.h"
#endif
#include "EncoderLib/EncCfg.h"
#if EXTENSION_360_VIDEO
......@@ -48,6 +56,9 @@
#if JVET_O0756_CALCULATE_HDRMETRICS
#include "HDRLib/inc/DistortionMetric.H"
#endif
#if JVET_O0549_ENCODER_ONLY_FILTER
namespace po = df::program_options_lite;
#endif
#include <sstream>
#include <vector>
......@@ -605,6 +616,12 @@ protected:
int m_switchPocPeriod;
int m_upscaledOutput; ////< Output upscaled (2), decoded cropped but in full resolution buffer (1) or decoded cropped (0, default) picture for RPR.
#if JVET_O0549_ENCODER_ONLY_FILTER
bool m_gopBasedTemporalFilterEnabled; ///< GOP-based Temporal Filter enable/disable
bool m_gopBasedTemporalFilterFutureReference; ///< Enable/disable future frame references in the GOP-based Temporal Filter
std::map<int, double> m_gopBasedTemporalFilterStrengths; ///< Filter strength per frame for the GOP-based Temporal Filter
#endif
#if EXTENSION_360_VIDEO
TExt360AppEncCfg m_ext360;
friend class TExt360AppEncCfg;
......
......@@ -118,6 +118,9 @@ struct AreaBuf : public Size
void subtract ( const AreaBuf<const T> &other );
void extendSingleBorderPel();
void extendBorderPel ( unsigned margin );
#if JVET_O0549_ENCODER_ONLY_FILTER
void extendBorderPel(unsigned marginX, unsigned marginY);
#endif
void addWeightedAvg ( const AreaBuf<const T> &other1, const AreaBuf<const T> &other2, const ClpRng& clpRng, const int8_t gbiIdx);
void removeWeightHighFreq ( const AreaBuf<T>& other, const bool bClip, const ClpRng& clpRng, const int8_t iGbiWeight);
void addAvg ( const AreaBuf<const T> &other1, const AreaBuf<const T> &other2, const ClpRng& clpRng );
......@@ -526,6 +529,46 @@ void AreaBuf<T>::updateHistogram( std::vector<int32_t>& hist ) const
}
}
#if JVET_O0549_ENCODER_ONLY_FILTER
template<typename T>
void AreaBuf<T>::extendBorderPel(unsigned marginX, unsigned marginY)
{
T* p = buf;
int h = height;
int w = width;
int s = stride;
CHECK((w + 2 * marginX) > s, "Size of buffer too small to extend");
// do left and right margins
for (int y = 0; y < h; y++)
{
for (int x = 0; x < marginX; x++)
{
*(p - marginX + x) = p[0];
p[w + x] = p[w - 1];
}
p += s;
}
// p is now the (0,height) (bottom left of image within bigger picture
p -= (s + marginX);
// p is now the (-margin, height-1)
for (int y = 0; y < marginY; y++)
{
::memcpy(p + (y + 1) * s, p, sizeof(T) * (w + (marginX << 1)));
}
// p is still (-marginX, height-1)
p -= ((h - 1) * s);
// p is now (-marginX, 0)
for (int y = 0; y < marginY; y++)
{
::memcpy(p - (y + 1) * s, p, sizeof(T) * (w + (marginX << 1)));
}
}
#endif
template<typename T>
void AreaBuf<T>::extendBorderPel( unsigned margin )
{
......@@ -693,6 +736,9 @@ struct UnitBuf
void addWeightedAvg ( const UnitBuf<const T> &other1, const UnitBuf<const T> &other2, const ClpRngs& clpRngs, const uint8_t gbiIdx = GBI_DEFAULT, const bool chromaOnly = false, const bool lumaOnly = false);
void addAvg ( const UnitBuf<const T> &other1, const UnitBuf<const T> &other2, const ClpRngs& clpRngs, const bool chromaOnly = false, const bool lumaOnly = false);
void extendSingleBorderPel();
#if JVET_O0549_ENCODER_ONLY_FILTER
void extendBorderPel(unsigned marginX, unsigned marginY);
#endif
void extendBorderPel ( unsigned margin );
void removeHighFreq ( const UnitBuf<T>& other, const bool bClip, const ClpRngs& clpRngs
, const int8_t gbiWeight = g_GbiWeights[GBI_DEFAULT]
......@@ -802,6 +848,17 @@ void UnitBuf<T>::extendSingleBorderPel()
}
}
#if JVET_O0549_ENCODER_ONLY_FILTER
template<typename T>
void UnitBuf<T>::extendBorderPel(unsigned marginX, unsigned marginY)
{
for (unsigned i = 0; i < bufs.size(); i++)
{
bufs[i].extendBorderPel(marginX >> getComponentScaleX(ComponentID(i), chromaFormat), marginY >> getComponentScaleY(ComponentID(i), chromaFormat));
}
}
#endif
template<typename T>
void UnitBuf<T>::extendBorderPel( unsigned margin )
{
......
......@@ -297,6 +297,9 @@ void InterPrediction::xSubPuMC( PredictionUnit& pu, PelUnitBuf& predBuf, const R
int fstStep = (!verMC ? puHeight : puWidth);
int secStep = (!verMC ? puWidth : puHeight);
pu.refIdx[0] = 0; pu.refIdx[1] = pu.cs->slice->getSliceType() == B_SLICE ? 0 : -1;
bool scaled = !PU::isRefPicSameSize( pu );
m_subPuMC = true;
for (int fstDim = fstStart; fstDim < fstEnd; fstDim += fstStep)
......@@ -313,7 +316,7 @@ void InterPrediction::xSubPuMC( PredictionUnit& pu, PelUnitBuf& predBuf, const R
while (later < secEnd)
{
const MotionInfo &laterMi = !verMC ? pu.getMotionInfo(Position{ later, fstDim }) : pu.getMotionInfo(Position{ fstDim, later });
if (laterMi == curMi)
if (!scaled && laterMi == curMi)
{
length += secStep;
}
......@@ -419,7 +422,10 @@ void InterPrediction::xPredInterUni(const PredictionUnit& pu, const RefPicList&
if( !pu.cu->affine )
{
clipMv( mv[0], pu.cu->lumaPos(), pu.cu->lumaSize(), sps, *pu.cs->pps );
if( pu.cu->slice->getScalingRatio( eRefPicList, iRefIdx ) == SCALE_1X )
{
clipMv( mv[0], pu.cu->lumaPos(), pu.cu->lumaSize(), sps, *pu.cs->pps );
}
}
for( uint32_t comp = COMPONENT_Y; comp < pcYuvPred.bufs.size() && comp <= m_maxCompIDToPred; comp++ )
......@@ -998,8 +1004,11 @@ void InterPrediction::xPredAffineBlk( const ComponentID& compID, const Predictio
{
wrapRef = false;
m_storedMv[h / AFFINE_MIN_BLOCK_SIZE * MVBUFFER_SIZE + w / AFFINE_MIN_BLOCK_SIZE].set(iMvScaleTmpHor, iMvScaleTmpVer);
iMvScaleTmpHor = std::min<int>(iHorMax, std::max<int>(iHorMin, iMvScaleTmpHor));
iMvScaleTmpVer = std::min<int>(iVerMax, std::max<int>(iVerMin, iMvScaleTmpVer));
if( scalingRatio == SCALE_1X )
{
iMvScaleTmpHor = std::min<int>(iHorMax, std::max<int>(iHorMin, iMvScaleTmpHor));
iMvScaleTmpVer = std::min<int>(iVerMax, std::max<int>(iVerMin, iMvScaleTmpVer));
}
}
}
else
......@@ -1014,8 +1023,11 @@ void InterPrediction::xPredAffineBlk( const ComponentID& compID, const Predictio
else
{
wrapRef = false;
curMv.hor = std::min<int>(iHorMax, std::max<int>(iHorMin, curMv.hor));
curMv.ver = std::min<int>(iVerMax, std::max<int>(iVerMin, curMv.ver));
if( scalingRatio == SCALE_1X )
{
curMv.hor = std::min<int>(iHorMax, std::max<int>(iHorMin, curMv.hor));
curMv.ver = std::min<int>(iVerMax, std::max<int>(iVerMin, curMv.ver));
}
}
iMvScaleTmpHor = curMv.hor;
iMvScaleTmpVer = curMv.ver;
......@@ -2384,17 +2396,24 @@ bool InterPrediction::xPredInterBlkRPR( const std::pair<int, int>& scalingRatio,
int offX = 1 << ( posShift - shiftHor - 1 );
int offY = 1 << ( posShift - shiftVer - 1 );
x0Int = ( ( blk.pos().x << ( 4 + ::getComponentScaleX( compID, chFmt ) ) ) + mv.getHor() )* scalingRatio.first;
x0Int = ( ( blk.pos().x << ( 4 + ::getComponentScaleX( compID, chFmt ) ) ) + mv.getHor() )* (int64_t)scalingRatio.first;
x0Int = SIGN( x0Int ) * ( ( llabs( x0Int ) + ( (long long)1 << ( 7 + ::getComponentScaleX( compID, chFmt ) ) ) ) >> ( 8 + ::getComponentScaleX( compID, chFmt ) ) );
y0Int = ( ( blk.pos().y << ( 4 + ::getComponentScaleY( compID, chFmt ) ) ) + mv.getVer() )* scalingRatio.second;
y0Int = ( ( blk.pos().y << ( 4 + ::getComponentScaleY( compID, chFmt ) ) ) + mv.getVer() )* (int64_t)scalingRatio.second;
y0Int = SIGN( y0Int ) * ( ( llabs( y0Int ) + ( (long long)1 << ( 7 + ::getComponentScaleY( compID, chFmt ) ) ) ) >> ( 8 + ::getComponentScaleY( compID, chFmt ) ) );
const int extSize = isLuma( compID ) ? 1 : 2;
int vFilterSize = isLuma( compID ) ? NTAPS_LUMA : NTAPS_CHROMA;
int refHeight = height * scalingRatio.second >> SCALE_RATIO_BITS;
int yInt0 = ( (int32_t)y0Int + offY ) >> posShift;
yInt0 = std::min( std::max( -4, yInt0 ), ( refPicHeight >> ::getComponentScaleY( compID, chFmt ) ) + 4 );
int xInt0 = ( (int32_t)x0Int + offX ) >> posShift;
xInt0 = std::min( std::max( -4, xInt0 ), ( refPicWidth >> ::getComponentScaleX( compID, chFmt ) ) + 4 );
int refHeight = ((((int32_t)y0Int + (height-1) * stepY) + offY ) >> posShift) - ((((int32_t)y0Int + 0 * stepY) + offY ) >> posShift) + 1;
refHeight = std::max<int>( 1, refHeight );
CHECK( MAX_CU_SIZE * MAX_SCALING_RATIO < refHeight + vFilterSize - 1 + extSize, "Buffer size is not enough, increase MAX_SCALING_RATIO" );
......@@ -2403,12 +2422,6 @@ bool InterPrediction::xPredInterBlkRPR( const std::pair<int, int>& scalingRatio,
int tmpStride = width;
int yInt0 = ( (int32_t)y0Int + offY ) >> posShift;
yInt0 = std::min( std::max( 0, yInt0 ), ( refPicHeight >> ::getComponentScaleY( compID, chFmt ) ) );
int xInt0 = ( (int32_t)x0Int + offX ) >> posShift;
xInt0 = std::min( std::max( 0, xInt0 ), ( refPicWidth >> ::getComponentScaleX( compID, chFmt ) ) );
int xInt = 0, yInt = 0;
for( col = 0; col < width; col++ )
......
......@@ -1749,7 +1749,15 @@ inline uint32_t QuantRDOQ::xGetCodedLevelTSPred(double& rd64CodedCost
double dErr = 0.0;
dErr = double(levelDouble - (Intermediate_Int(absLevel) << qBits));
coeffLevelError[errorInd] = dErr * dErr * errorScale;
#if JVET_P0298_DISABLE_LEVELMAPPING_IN_BYPASS
int modAbsLevel = absLevel;
if (cctx.numCtxBins() >= 4)
{
modAbsLevel = cctx.deriveModCoeff(rightPixel, belowPixel, absLevel, m_bdpcm);
}
#else
int modAbsLevel = cctx.deriveModCoeff(rightPixel, belowPixel, absLevel, m_bdpcm);
#endif
#if JVET_P0072_SIMPLIFIED_TSRC
int numCtxBins = 0;
double dCurrCost = coeffLevelError[errorInd] + xGetICost(xGetICRateTS(modAbsLevel, fracBitsPar, cctx, fracBitsAccess, fracBitsSign, fracBitsGt1, numCtxBins, sign, ricePar, useLimitedPrefixLength, maxLog2TrDynamicRange));
......
......@@ -50,6 +50,9 @@
#include <assert.h>
#include <cassert>
#define JVET_P0298_DISABLE_LEVELMAPPING_IN_BYPASS 1 // JVET-P0298: Disable level mapping in bypass mode
#define JVET_P0325_CHANGE_MERGE_CANDIDATE_ORDER 1 // JVET-P0325: reorder the spatial merge candidates
#define JVET_P0578_MINIMUM_CU_SIZE_CONSTRAINT 1 // JVET-P0578: minimum CU size constraint
......@@ -91,6 +94,8 @@
#define JVET_P0164_ALF_SYNTAX_SIMP 1 // JVET-p0164: simplify alf syntax with method2
#define JVET_O0549_ENCODER_ONLY_FILTER 1 // JVET-O0549: Encoder-only temporal filter, no decoder changes
#define JVET_P0042_FIX_INTER_DIR_CTX 1 // JVET-P0042: Fix overlap in context between the bi-pred flag for 8x8 CUs and the L0/L1 flag for all size CUs
#define JVET_P0111_CHROMA_422_FIX 1 // JVET-P0422: Bug fix of chroma 422 intra mode mapping
......
......@@ -893,6 +893,72 @@ void PU::getInterMergeCandidates( const PredictionUnit &pu, MergeCtx& mrgCtx,
const Position posLB = pu.Y().bottomLeft();
MotionInfo miAbove, miLeft, miAboveLeft, miAboveRight, miBelowLeft;
#if JVET_P0325_CHANGE_MERGE_CANDIDATE_ORDER
// above
const PredictionUnit *puAbove = cs.getPURestricted(posRT.offset(0, -1), pu, pu.chType);
bool isAvailableB1 = puAbove && isDiffMER(pu, *puAbove) && pu.cu != puAbove->cu && CU::isInter(*puAbove->cu);
if (isAvailableB1)
{
miAbove = puAbove->getMotionInfo(posRT.offset(0, -1));
// get Inter Dir
mrgCtx.interDirNeighbours[cnt] = miAbove.interDir;
mrgCtx.useAltHpelIf[cnt] = miAbove.useAltHpelIf;
// get Mv from Above
mrgCtx.GBiIdx[cnt] = (mrgCtx.interDirNeighbours[cnt] == 3) ? puAbove->cu->GBiIdx : GBI_DEFAULT;
mrgCtx.mvFieldNeighbours[cnt << 1].setMvField(miAbove.mv[0], miAbove.refIdx[0]);
if (slice.isInterB())
{
mrgCtx.mvFieldNeighbours[(cnt << 1) + 1].setMvField(miAbove.mv[1], miAbove.refIdx[1]);
}
if (mrgCandIdx == cnt && canFastExit)
{
return;
}
cnt++;
}
// early termination
if (cnt == maxNumMergeCand)
{
return;
}
//left
const PredictionUnit* puLeft = cs.getPURestricted(posLB.offset(-1, 0), pu, pu.chType);
const bool isAvailableA1 = puLeft && isDiffMER(pu, *puLeft) && pu.cu != puLeft->cu && CU::isInter(*puLeft->cu);
if (isAvailableA1)
{
miLeft = puLeft->getMotionInfo(posLB.offset(-1, 0));
if (!isAvailableB1 || (miAbove != miLeft))
{
// get Inter Dir
mrgCtx.interDirNeighbours[cnt] = miLeft.interDir;
mrgCtx.useAltHpelIf[cnt] = miLeft.useAltHpelIf;
mrgCtx.GBiIdx[cnt] = (mrgCtx.interDirNeighbours[cnt] == 3) ? puLeft->cu->GBiIdx : GBI_DEFAULT;
// get Mv from Left
mrgCtx.mvFieldNeighbours[cnt << 1].setMvField(miLeft.mv[0], miLeft.refIdx[0]);
if (slice.isInterB())
{
mrgCtx.mvFieldNeighbours[(cnt << 1) + 1].setMvField(miLeft.mv[1], miLeft.refIdx[1]);
}
if (mrgCandIdx == cnt && canFastExit)
{
return;
}
cnt++;
}
}
#else
//left
const PredictionUnit* puLeft = cs.getPURestricted( posLB.offset( -1, 0 ), pu, pu.chType );
......@@ -960,6 +1026,7 @@ void PU::getInterMergeCandidates( const PredictionUnit &pu, MergeCtx& mrgCtx,
cnt++;
}
}
#endif
// early termination
if( cnt == maxNumMergeCand )
......
......@@ -3590,7 +3590,11 @@ void CABACReader::residual_coding_subblockTS( CoeffCodingContext& cctx, TCoeff*
tcoeff += ( rem << 1 );
#endif
}
#if JVET_P0298_DISABLE_LEVELMAPPING_IN_BYPASS
if (!cctx.bdpcm() && cutoffVal)
#else
if (!cctx.bdpcm())
#endif
{
if (tcoeff > 0)
{
......
......@@ -3290,10 +3290,16 @@ void CABACWriter::residual_coding_subblockTS( CoeffCodingContext& cctx, const TC
{
unsigned absLevel;
cctx.neighTS(rightPixel, belowPixel, scanPos, coeff);
#if JVET_P0298_DISABLE_LEVELMAPPING_IN_BYPASS
cutoffVal = (scanPos <= lastScanPosPass2 ? 10 : (scanPos <= lastScanPosPass1 ? 2 : 0));
absLevel = cctx.deriveModCoeff(rightPixel, belowPixel, abs(coeff[cctx.blockPos(scanPos)]), cctx.bdpcm()||!cutoffVal);
#else
absLevel = cctx.deriveModCoeff(rightPixel, belowPixel, abs(coeff[cctx.blockPos(scanPos)]), cctx.bdpcm());
#if JVET_P0072_SIMPLIFIED_TSRC
cutoffVal = (scanPos <= lastScanPosPass2 ? 10 : (scanPos <= lastScanPosPass1 ? 2 : 0));
#endif
#endif
if( absLevel >= cutoffVal )
{
int rice = cctx.templateAbsSumTS( scanPos, coeff );
......
......@@ -450,6 +450,9 @@ protected:
bool m_bFastUDIUseMPMEnabled;
bool m_bFastMEForGenBLowDelayEnabled;
bool m_bUseBLambdaForNonKeyLowDelayPictures;
#if JVET_O0549_ENCODER_ONLY_FILTER
bool m_gopBasedTemporalFilterEnabled;
#endif
//====== Slice ========
SliceConstraint m_sliceMode;
int m_sliceArgument;
......@@ -1192,6 +1195,10 @@ public:
bool getFastUDIUseMPMEnabled () { return m_bFastUDIUseMPMEnabled; }
bool getFastMEForGenBLowDelayEnabled () { return m_bFastMEForGenBLowDelayEnabled; }
bool getUseBLambdaForNonKeyLowDelayPictures () { return m_bUseBLambdaForNonKeyLowDelayPictures; }
#if JVET_O0549_ENCODER_ONLY_FILTER
void setGopBasedTemporalFilterEnabled(bool flag) { m_gopBasedTemporalFilterEnabled = flag; }
bool getGopBasedTemporalFilterEnabled() { return m_gopBasedTemporalFilterEnabled; }
#endif
bool getCrossComponentPredictionEnabledFlag () const { return m_crossComponentPredictionEnabledFlag; }
void setCrossComponentPredictionEnabledFlag (const bool value) { m_crossComponentPredictionEnabledFlag = value; }
......
......@@ -3645,7 +3645,11 @@ void EncGOP::xCalculateAddPSNR(Picture* pcPic, PelUnitBuf cPicD, const AccessUni
const CPelUnitBuf& pic = cPicD;
CHECK(!(conversion == IPCOLOURSPACE_UNCHANGED), "Unspecified error");
// const CPelUnitBuf& org = (conversion != IPCOLOURSPACE_UNCHANGED) ? pcPic->getPicYuvTrueOrg()->getBuf() : pcPic->getPicYuvOrg()->getBuf();
#if JVET_O0549_ENCODER_ONLY_FILTER
const CPelUnitBuf& org = (sps.getUseReshaper() || m_pcCfg->getGopBasedTemporalFilterEnabled()) ? pcPic->getTrueOrigBuf() : pcPic->getOrigBuf();
#else
const CPelUnitBuf& org = sps.getUseReshaper() ? pcPic->getTrueOrigBuf() : pcPic->getOrigBuf();
#endif
#if ENABLE_QPA
const bool useWPSNR = m_pcEncLib->getUseWPSNR();
#endif
......
/* The copyright in this software is being made available under the BSD
* License, included below. This software may be subject to other third party
* and contributor rights, including patent rights, and no such rights are
* granted under this license.
*
* Copyright (c) 2010-2019, ITU/ISO/IEC
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/** \file EncTemporalFilter.cpp
\brief EncTemporalFilter class
*/
#include "EncTemporalFilter.h"
#include <math.h>
#if JVET_O0549_ENCODER_ONLY_FILTER
// ====================================================================================================================
// Constructor / destructor / initialization / destroy
// ====================================================================================================================
const int EncTemporalFilter::m_range = 2;
const double EncTemporalFilter::m_chromaFactor = 0.55;
const double EncTemporalFilter::m_sigmaMultiplier = 9.0;
const double EncTemporalFilter::m_sigmaZeroPoint = 10.0;
const int EncTemporalFilter::m_motionVectorFactor = 16;
const int EncTemporalFilter::m_padding = 128;
const int EncTemporalFilter::m_interpolationFilter[16][8] =
{
{ 0, 0, 0, 64, 0, 0, 0, 0 }, //0
{ 0, 1, -3, 64, 4, -2, 0, 0 }, //1 -->-->
{ 0, 1, -6, 62, 9, -3, 1, 0 }, //2 -->
{ 0, 2, -8, 60, 14, -5, 1, 0 }, //3 -->-->
{ 0, 2, -9, 57, 19, -7, 2, 0 }, //4
{ 0, 3, -10, 53, 24, -8, 2, 0 }, //5 -->-->
{ 0, 3, -11, 50, 29, -9, 2, 0 }, //6 -->
{ 0, 3, -11, 44, 35, -10, 3, 0 }, //7 -->-->
{ 0, 1, -7, 38, 38, -7, 1, 0 }, //8
{ 0, 3, -10, 35, 44, -11, 3, 0 }, //9 -->-->
{ 0, 2, -9, 29, 50, -11, 3, 0 }, //10-->
{ 0, 2, -8, 24, 53, -10, 3, 0 }, //11-->-->
{ 0, 2, -7, 19, 57, -9, 2, 0 }, //12
{ 0, 1, -5, 14, 60, -8, 2, 0 }, //13-->-->
{ 0, 1, -3, 9, 62, -6, 1, 0 }, //14-->
{ 0, 0, -2, 4, 64, -3, 1, 0 } //15-->-->
};
const double EncTemporalFilter::m_refStrengths[3][2] =
{ // abs(POC offset)
// 1, 2
{0.85, 0.60}, // m_range * 2
{1.20, 1.00}, // m_range
{0.30, 0.30} // otherwise
};
EncTemporalFilter::EncTemporalFilter() :
m_FrameSkip(0),
m_chromaFormatIDC(NUM_CHROMA_FORMAT),
m_sourceWidth(0),
m_sourceHeight(0),
m_QP(0),
m_clipInputVideoToRec709Range(false),
m_inputColourSpaceConvert(NUMBER_INPUT_COLOUR_SPACE_CONVERSIONS)
{}
void EncTemporalFilter::init(const int frameSkip,
const int inputBitDepth[MAX_NUM_CHANNEL_TYPE],
const int msbExtendedBitDepth[MAX_NUM_CHANNEL_TYPE],
const int internalBitDepth[MAX_NUM_CHANNEL_TYPE],
const int width,
const int height,
const int *pad,
const bool rec709,
const std::string &filename,
const ChromaFormat inputChromaFormatIDC,
const InputColourSpaceConversion colorSpaceConv,
const int qp,
const std::map<int, double> &temporalFilterStrengths,
const bool gopBasedTemporalFilterFutureReference)
{
m_FrameSkip = frameSkip;
for (int i = 0; i < MAX_NUM_CHANNEL_TYPE; i++)
{
m_inputBitDepth[i] = inputBitDepth[i];
m_MSBExtendedBitDepth[i] = msbExtendedBitDepth[i];
m_internalBitDepth[i] = internalBitDepth[i];
}
m_sourceWidth = width;
m_sourceHeight = height;
for (int i = 0; i < 2; i++)
{
m_pad[i] = pad[i];
}
m_clipInputVideoToRec709Range = rec709;
m_inputFileName = filename;
m_chromaFormatIDC = inputChromaFormatIDC;
m_inputColourSpaceConvert = colorSpaceConv;
m_area = Area(0, 0, width, height);
m_QP = qp;
m_temporalFilterStrengths = temporalFilterStrengths;
m_gopBasedTemporalFilterFutureReference = gopBasedTemporalFilterFutureReference;
}
// ====================================================================================================================
// Public member functions
// ====================================================================================================================
bool EncTemporalFilter::filter(PelStorage *orgPic, int receivedPoc)
{
bool isFilterThisFrame = false;
if (m_QP >= 17) // disable filter for QP < 17
{
for (map<int, double>::iterator it = m_temporalFilterStrengths.begin(); it != m_temporalFilterStrengths.end(); ++it)
{
int filteredFrame = it->first;
if (receivedPoc % filteredFrame == 0)
{
isFilterThisFrame = true;
break;
}
}
}
if (isFilterThisFrame)
{
int offset = m_FrameSkip;
VideoIOYuv yuvFrames;
yuvFrames.open(m_inputFileName, false, m_inputBitDepth, m_MSBExtendedBitDepth, m_internalBitDepth);
yuvFrames.skipFrames(std::max(offset + receivedPoc - m_range, 0), m_sourceWidth - m_pad[0], m_sourceHeight - m_pad[1], m_chromaFormatIDC);
std::deque<TemporalFilterSourcePicInfo> srcFrameInfo;
int firstFrame = receivedPoc + offset - m_range;
int lastFrame = receivedPoc + offset + m_range;
if (!m_gopBasedTemporalFilterFutureReference)
{
lastFrame = receivedPoc + offset - 1;
}
int origOffset = -m_range;
// subsample original picture so it only needs to be done once
PelStorage origPadded;
origPadded.create(m_chromaFormatIDC, m_area, 0, m_padding);
origPadded.copyFrom(*orgPic);
origPadded.extendBorderPel(m_padding, m_padding);
PelStorage origSubsampled2;
PelStorage origSubsampled4;
subsampleLuma(origPadded, origSubsampled2);
subsampleLuma(origSubsampled2, origSubsampled4);
// determine motion vectors
for (int poc = firstFrame; poc <= lastFrame; poc++)
{
if (poc < 0)
{
origOffset++;
continue; // frame not available
}
else if (poc == offset + receivedPoc)
{ // hop over frame that will be filtered
yuvFrames.skipFrames(1, m_sourceWidth - m_pad[0], m_sourceHeight - m_pad[1], m_chromaFormatIDC);
origOffset++;
continue;
}
srcFrameInfo.push_back(TemporalFilterSourcePicInfo());
TemporalFilterSourcePicInfo &srcPic=srcFrameInfo.back();
PelStorage dummyPicBufferTO; // Only used temporary in yuvFrames.read
srcPic.picBuffer.create(m_chromaFormatIDC, m_area, 0, m_padding);
dummyPicBufferTO.create(m_chromaFormatIDC, m_area, 0, m_padding);
if (!yuvFrames.read(srcPic.picBuffer, dummyPicBufferTO, m_inputColourSpaceConvert, m_pad, m_chromaFormatIDC, m_clipInputVideoToRec709Range))
{
return false; // eof or read fail
}
srcPic.picBuffer.extendBorderPel(m_padding, m_padding);
srcPic.mvs.allocate(m_sourceWidth / 4, m_sourceHeight / 4);
motionEstimation(srcPic.mvs, origPadded, srcPic.picBuffer, origSubsampled2, origSubsampled4);
srcPic.origOffset = origOffset;
origOffset++;
}
// filter
PelStorage newOrgPic;
newOrgPic.create(m_chromaFormatIDC, m_area, 0, m_padding);
double overallStrength = -1.0;
for (map<int, double>::iterator it = m_temporalFilterStrengths.begin(); it != m_temporalFilterStrengths.end(); ++it)
{
int frame = it->first;
double strength = it->second;
if (receivedPoc % frame == 0)
{
overallStrength = strength;
}
}
bilateralFilter(origPadded, srcFrameInfo, newOrgPic, overallStrength);
// move filtered to orgPic
orgPic->copyFrom(newOrgPic);
yuvFrames.close();
return true;
}
return false;
}
// ====================================================================================================================
// Private member functions
// ====================================================================================================================
void EncTemporalFilter::subsampleLuma(const PelStorage &input, PelStorage &output, const int factor) const
{
const int newWidth = input.Y().width / factor;
const int newHeight = input.Y().height / factor;
output.create(m_chromaFormatIDC, Area(0, 0, newWidth, newHeight), 0, m_padding);
const Pel* srcRow = input.Y().buf;
const int srcStride = input.Y().stride;
Pel *dstRow = output.Y().buf;
const int dstStride = output.Y().stride;
for (int y = 0; y < newHeight; y++, srcRow+=factor*srcStride, dstRow+=dstStride)
{
const Pel *inRow = srcRow;
const Pel *inRowBelow = srcRow+srcStride;
Pel *target = dstRow;
for (int x = 0; x < newWidth; x++)
{
target[x] = (inRow[0] + inRowBelow[0] + inRow[1] + inRowBelow[1] + 2) >> 2;
inRow += 2;
inRowBelow += 2;
}
}
output.extendBorderPel(m_padding, m_padding);
}
int EncTemporalFilter::motionErrorLuma(const PelStorage &orig,
const PelStorage &buffer,
const int x,
const int y,
int dx,
int dy,
const int bs,
const int besterror = 8 * 8 * 1024 * 1024) const
{
const Pel* origOrigin = orig.Y().buf;
const int origStride = orig.Y().stride;
const Pel *buffOrigin = buffer.Y().buf;
const int buffStride = buffer.Y().stride;
int error = 0;// dx * 10 + dy * 10;
if (((dx | dy) & 0xF) == 0)
{
dx /= m_motionVectorFactor;
dy /= m_motionVectorFactor;
for (int y1 = 0; y1 < bs; y1++)
{
const Pel* origRowStart = origOrigin + (y+y1)*origStride + x;
const Pel* bufferRowStart = buffOrigin + (y+y1+dy)*buffStride + (x+dx);
for (int x1 = 0; x1 < bs; x1 += 2)
{
int diff = origRowStart[x1] - bufferRowStart[x1];
error += diff * diff;
diff = origRowStart[x1 + 1] - bufferRowStart[x1 + 1];
error += diff * diff;
}
if (error > besterror)
{
return error;
}
}
}
else
{
const int *xFilter = m_interpolationFilter[dx & 0xF];
const int *yFilter = m_interpolationFilter[dy & 0xF];
int tempArray[64 + 8][64];
int sum, base;
for (int y1 = 1; y1 < bs + 7; y1++)
{
const int yOffset = y + y1 + (dy >> 4) - 3;
const Pel *sourceRow = buffOrigin + (yOffset)*buffStride + 0;
for (int x1 = 0; x1 < bs; x1++)
{
sum = 0;
base = x + x1 + (dx >> 4) - 3;
const Pel *rowStart = sourceRow + base;
sum += xFilter[1] * rowStart[1];
sum += xFilter[2] * rowStart[2];
sum += xFilter[3] * rowStart[3];
sum += xFilter[4] * rowStart[4];
sum += xFilter[5] * rowStart[5];
sum += xFilter[6] * rowStart[6];
tempArray[y1][x1] = sum;
}
}
const Pel maxSampleValue = (1<<m_internalBitDepth[CHANNEL_TYPE_LUMA])-1;
for (int y1 = 0; y1 < bs; y1++)
{
const Pel *origRow = origOrigin + (y+y1)*origStride + 0;
for (int x1 = 0; x1 < bs; x1++)
{
sum = 0;
sum += yFilter[1] * tempArray[y1 + 1][x1];
sum += yFilter[2] * tempArray[y1 + 2][x1];
sum += yFilter[3] * tempArray[y1 + 3][x1];
sum += yFilter[4] * tempArray[y1 + 4][x1];
sum += yFilter[5] * tempArray[y1 + 5][x1];
sum += yFilter[6] * tempArray[y1 + 6][x1];
sum = (sum + (1 << 11)) >> 12;
sum = sum < 0 ? 0 : (sum > maxSampleValue ? maxSampleValue : sum);
error += (sum - origRow[x + x1]) * (sum - origRow[x + x1]);
}
if (error > besterror)
{
return error;
}
}
}
return error;
}
void EncTemporalFilter::motionEstimationLuma(Array2D<MotionVector> &mvs, const PelStorage &orig, const PelStorage &buffer, const int blockSize,
const Array2D<MotionVector> *previous, const int factor, const bool doubleRes) const
{
int range = 5;
const int stepSize = blockSize;
const int origWidth = orig.Y().width;
const int origHeight = orig.Y().height;
for (int blockY = 0; blockY + blockSize < origHeight; blockY += stepSize)
{
for (int blockX = 0; blockX + blockSize < origWidth; blockX += stepSize)
{
MotionVector best;
if (previous == NULL)
{
range = 8;
}
else
{
for (int py = -2; py <= 2; py++)
{
int testy = blockY / (2 * blockSize) + py;
for (int px = -2; px <= 2; px++)
{
int testx = blockX / (2 * blockSize) + px;
if ((testx >= 0) && (testx < origWidth / (2 * blockSize)) && (testy >= 0) && (testy < origHeight / (2 * blockSize)))
{
MotionVector old = previous->get(testx, testy);
int error = motionErrorLuma(orig, buffer, blockX, blockY, old.x * factor, old.y * factor, blockSize, best.error);
if (error < best.error)
{
best.set(old.x * factor, old.y * factor, error);
}
}
}
}
}
MotionVector prevBest = best;
for (int y2 = prevBest.y / m_motionVectorFactor - range; y2 <= prevBest.y / m_motionVectorFactor + range; y2++)
{
for (int x2 = prevBest.x / m_motionVectorFactor - range; x2 <= prevBest.x / m_motionVectorFactor + range; x2++)
{
int error = motionErrorLuma(orig, buffer, blockX, blockY, x2 * m_motionVectorFactor, y2 * m_motionVectorFactor, blockSize, best.error);
if (error < best.error)
{
best.set(x2 * m_motionVectorFactor, y2 * m_motionVectorFactor, error);
}
}
}
if (doubleRes)
{ // merge into one loop, probably with precision array (here [12, 3] or maybe [4, 1]) with setable number of iterations
prevBest = best;
int doubleRange = 3 * 4;
for (int y2 = prevBest.y - doubleRange; y2 <= prevBest.y + doubleRange; y2 += 4)
{
for (int x2 = prevBest.x - doubleRange; x2 <= prevBest.x + doubleRange; x2 += 4)
{
int error = motionErrorLuma(orig, buffer, blockX, blockY, x2, y2, blockSize, best.error);
if (error < best.error)
{
best.set(x2, y2, error);
}
}
}
prevBest = best;
doubleRange = 3;
for (int y2 = prevBest.y - doubleRange; y2 <= prevBest.y + doubleRange; y2++)
{
for (int x2 = prevBest.x - doubleRange; x2 <= prevBest.x + doubleRange; x2++)
{
int error = motionErrorLuma(orig, buffer, blockX, blockY, x2, y2, blockSize, best.error);
if (error < best.error)
{
best.set(x2, y2, error);
}
}
}
}
mvs.get(blockX / stepSize, blockY / stepSize) = best;
}
}
}
void EncTemporalFilter::motionEstimation(Array2D<MotionVector> &mv, const PelStorage &orgPic, const PelStorage &buffer, const PelStorage &origSubsampled2, const PelStorage &origSubsampled4) const
{
const int width = m_sourceWidth;
const int height = m_sourceHeight;
Array2D<MotionVector> mv_0(width / 16, height / 16);
Array2D<MotionVector> mv_1(width / 16, height / 16);
Array2D<MotionVector> mv_2(width / 16, height / 16);
PelStorage bufferSub2;
PelStorage bufferSub4;
subsampleLuma(buffer, bufferSub2);
subsampleLuma(bufferSub2, bufferSub4);
motionEstimationLuma(mv_0, origSubsampled4, bufferSub4, 16);
motionEstimationLuma(mv_1, origSubsampled2, bufferSub2, 16, &mv_0, 2);
motionEstimationLuma(mv_2, orgPic, buffer, 16, &mv_1, 2);
motionEstimationLuma(mv, orgPic, buffer, 8, &mv_2, 1, true);
}
void EncTemporalFilter::applyMotion(const Array2D<MotionVector> &mvs, const PelStorage &input, PelStorage &output) const
{
static const int lumaBlockSize=8;
for(int c=0; c< getNumberValidComponents(m_chromaFormatIDC); c++)
{
const ComponentID compID=(ComponentID)c;
const int csx=getComponentScaleX(compID, m_chromaFormatIDC);
const int csy=getComponentScaleY(compID, m_chromaFormatIDC);
const int blockSizeX = lumaBlockSize>>csx;
const int blockSizeY = lumaBlockSize>>csy;
const int height = input.bufs[c].height;
const int width = input.bufs[c].width;
const Pel maxValue = (1<<m_internalBitDepth[toChannelType(compID)])-1;
const Pel *srcImage = input.bufs[c].buf;
const int srcStride = input.bufs[c].stride;
Pel *dstImage = output.bufs[c].buf;
int dstStride = output.bufs[c].stride;
for (int y = 0, blockNumY = 0; y + blockSizeY <= height; y += blockSizeY, blockNumY++)
{
for (int x = 0, blockNumX = 0; x + blockSizeX <= width; x += blockSizeX, blockNumX++)
{
const MotionVector &mv = mvs.get(blockNumX,blockNumY);
const int dx = mv.x >> csx ;
const int dy = mv.y >> csy ;
const int xInt = mv.x >> (4+csx) ;
const int yInt = mv.y >> (4+csy) ;
const int *xFilter = m_interpolationFilter[dx & 0xf];
const int *yFilter = m_interpolationFilter[dy & 0xf]; // will add 6 bit.
const int numFilterTaps=7;
const int centreTapOffset=3;
int tempArray[lumaBlockSize + numFilterTaps][lumaBlockSize];
for (int by = 1; by < blockSizeY + numFilterTaps; by++)
{
const int yOffset = y + by + yInt - centreTapOffset;
const Pel *sourceRow = srcImage+yOffset*srcStride;
for (int bx = 0; bx < blockSizeX; bx++)
{
int base = x + bx + xInt - centreTapOffset;
const Pel *rowStart = sourceRow + base;
int sum = 0;
sum += xFilter[1] * rowStart[1];
sum += xFilter[2] * rowStart[2];
sum += xFilter[3] * rowStart[3];
sum += xFilter[4] * rowStart[4];
sum += xFilter[5] * rowStart[5];
sum += xFilter[6] * rowStart[6];
tempArray[by][bx] = sum;
}
}
Pel *dstRow = dstImage+y*dstStride;
for (int by = 0; by < blockSizeY; by++, dstRow+=dstStride)
{
Pel *dstPel=dstRow+x;
for (int bx = 0; bx < blockSizeX; bx++, dstPel++)
{
int sum = 0;
sum += yFilter[1] * tempArray[by + 1][bx];
sum += yFilter[2] * tempArray[by + 2][bx];
sum += yFilter[3] * tempArray[by + 3][bx];
sum += yFilter[4] * tempArray[by + 4][bx];
sum += yFilter[5] * tempArray[by + 5][bx];
sum += yFilter[6] * tempArray[by + 6][bx];
sum = (sum + (1 << 11)) >> 12;
sum = sum < 0 ? 0 : (sum > maxValue ? maxValue : sum);
*dstPel = sum;
}
}
}
}
}
}
void EncTemporalFilter::bilateralFilter(const PelStorage &orgPic,
const std::deque<TemporalFilterSourcePicInfo> &srcFrameInfo,
PelStorage &newOrgPic,
double overallStrength) const
{
const int numRefs = int(srcFrameInfo.size());
std::vector<PelStorage> correctedPics(numRefs);
for (int i = 0; i < numRefs; i++)
{
correctedPics[i].create(m_chromaFormatIDC, m_area, 0, m_padding);
applyMotion(srcFrameInfo[i].mvs, srcFrameInfo[i].picBuffer, correctedPics[i]);
}
int refStrengthRow = 2;
if (numRefs == m_range*2)
{
refStrengthRow = 0;
}
else if (numRefs == m_range)
{
refStrengthRow = 1;
}
const double lumaSigmaSq = (m_QP - m_sigmaZeroPoint) * (m_QP - m_sigmaZeroPoint) * m_sigmaMultiplier;
const double chromaSigmaSq = 30 * 30;
for(int c=0; c< getNumberValidComponents(m_chromaFormatIDC); c++)
{
const ComponentID compID=(ComponentID)c;
const int height = orgPic.bufs[c].height;
const int width = orgPic.bufs[c].width;
const Pel *srcPelRow = orgPic.bufs[c].buf;
const int srcStride = orgPic.bufs[c].stride;
Pel *dstPelRow = newOrgPic.bufs[c].buf;
const int dstStride = newOrgPic.bufs[c].stride;
const double sigmaSq = isChroma(compID)? chromaSigmaSq : lumaSigmaSq;
const double weightScaling = overallStrength * (isChroma(compID) ? m_chromaFactor : 0.4);
const Pel maxSampleValue = (1<<m_internalBitDepth[toChannelType(compID)])-1;
const double bitDepthDiffWeighting=1024.0 / (maxSampleValue+1);
for (int y = 0; y < height; y++, srcPelRow+=srcStride, dstPelRow+=dstStride)
{
const Pel *srcPel=srcPelRow;
Pel *dstPel=dstPelRow;
for (int x = 0; x < width; x++, srcPel++, dstPel++)
{
const int orgVal = (int) *srcPel;
double temporalWeightSum = 1.0;
double newVal = (double) orgVal;
for (int i = 0; i < numRefs; i++)
{
const Pel *pCorrectedPelPtr=correctedPics[i].bufs[c].buf+(y*correctedPics[i].bufs[c].stride+x);
const int refVal = (int) *pCorrectedPelPtr;
double diff = (double)(refVal - orgVal);
diff *= bitDepthDiffWeighting;
double diffSq = diff * diff;
const int index = std::min(1, std::abs(srcFrameInfo[i].origOffset) - 1);
const double weight = weightScaling * m_refStrengths[refStrengthRow][index] * exp(-diffSq / (2 * sigmaSq));
newVal += weight * refVal;
temporalWeightSum += weight;
}
newVal /= temporalWeightSum;
Pel sampleVal = (Pel)round(newVal);
sampleVal=(sampleVal<0?0 : (sampleVal>maxSampleValue ? maxSampleValue : sampleVal));
*dstPel = sampleVal;
}
}
}
}
//! \}
#endif
/* The copyright in this software is being made available under the BSD
* License, included below. This software may be subject to other third party
* and contributor rights, including patent rights, and no such rights are
* granted under this license.
*
* Copyright (c) 2010-2019, ITU/ISO/IEC
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/** \file EncTemporalFilter.h
\brief EncTemporalFilter class (header)
*/
#ifndef __TEMPORAL_FILTER__
#define __TEMPORAL_FILTER__
#include "EncLib.h"
#include "CommonLib/Buffer.h"
#include <sstream>
#include <map>
#include <deque>
#if JVET_O0549_ENCODER_ONLY_FILTER
//! \ingroup EncoderLib
//! \{
struct MotionVector
{
int x, y;
int error;
MotionVector() : x(0), y(0), error(INT_LEAST32_MAX) {}
void set(int vectorX, int vectorY, int errorValue) { x = vectorX; y = vectorY; error = errorValue; }
};
template <class T>
struct Array2D
{
private:
int m_width, m_height;
std::vector< T > v;
public:
Array2D() : m_width(0), m_height(0), v() { }
Array2D(int width, int height, const T& value=T()) : m_width(0), m_height(0), v() { allocate(width, height, value); }
void allocate(int width, int height, const T& value=T())
{
m_width=width;
m_height=height;
v.resize(std::size_t(m_width*m_height), value);
}
T& get(int x, int y)
{
assert(x<m_width && y<m_height);
return v[y*m_width+x];
}
const T& get(int x, int y) const
{
assert(x<m_width && y<m_height);
return v[y*m_width+x];
}
};
struct TemporalFilterSourcePicInfo
{
TemporalFilterSourcePicInfo() : picBuffer(), mvs(), origOffset(0) { }
PelStorage picBuffer;
Array2D<MotionVector> mvs;
int origOffset;
};
// ====================================================================================================================
// Class definition
// ====================================================================================================================
class EncTemporalFilter
{
public:
EncTemporalFilter();
~EncTemporalFilter() {}
void init(const int frameSkip,
const int inputBitDepth[MAX_NUM_CHANNEL_TYPE],
const int msbExtendedBitDepth[MAX_NUM_CHANNEL_TYPE],
const int internalBitDepth[MAX_NUM_CHANNEL_TYPE],
const int width,
const int height,
const int *pad,
const bool rec709,
const std::string &filename,
const ChromaFormat inputChroma,
const InputColourSpaceConversion colorSpaceConv,
const int qp,
const std::map<int, double> &temporalFilterStrengths,
const bool gopBasedTemporalFilterFutureReference);
bool filter(PelStorage *orgPic, int frame);
private:
// Private static member variables
static const int m_range;
static const double m_chromaFactor;
static const double m_sigmaMultiplier;
static const double m_sigmaZeroPoint;
static const int m_motionVectorFactor;
static const int m_padding;
static const int m_interpolationFilter[16][8];
static const double m_refStrengths[3][2];
// Private member variables
int m_FrameSkip;
std::string m_inputFileName;
int m_inputBitDepth[MAX_NUM_CHANNEL_TYPE];
int m_MSBExtendedBitDepth[MAX_NUM_CHANNEL_TYPE];
int m_internalBitDepth[MAX_NUM_CHANNEL_TYPE];
ChromaFormat m_chromaFormatIDC;
int m_sourceWidth;
int m_sourceHeight;
int m_QP;
std::map<int, double> m_temporalFilterStrengths;
int m_pad[2];
bool m_clipInputVideoToRec709Range;
InputColourSpaceConversion m_inputColourSpaceConvert;
Area m_area;
bool m_gopBasedTemporalFilterFutureReference;
// Private functions
void subsampleLuma(const PelStorage &input, PelStorage &output, const int factor = 2) const;
int motionErrorLuma(const PelStorage &orig, const PelStorage &buffer, const int x, const int y, int dx, int dy, const int bs, const int besterror) const;
void motionEstimationLuma(Array2D<MotionVector> &mvs, const PelStorage &orig, const PelStorage &buffer, const int bs,
const Array2D<MotionVector> *previous=0, const int factor = 1, const bool doubleRes = false) const;
void motionEstimation(Array2D<MotionVector> &mvs, const PelStorage &orgPic, const PelStorage &buffer, const PelStorage &origSubsampled2, const PelStorage &origSubsampled4) const;
void bilateralFilter(const PelStorage &orgPic, const std::deque<TemporalFilterSourcePicInfo> &srcFrameInfo, PelStorage &newOrgPic, double overallStrength) const;
void applyMotion(const Array2D<MotionVector> &mvs, const PelStorage &input, PelStorage &output) const;
}; // END CLASS DEFINITION EncTemporalFilter
//! \}
#endif
#endif // __TEMPORAL_FILTER__
......@@ -96,8 +96,22 @@ namespace df
}
else
{
#if JVET_O0549_ENCODER_ONLY_FILTER_POL
if (opt_name.size() > 0 && opt_name.back() == '*')
{
string prefix_name = opt_name.substr(0, opt_name.size() - 1);
names->opt_prefix.push_back(prefix_name);
opt_prefix_map[prefix_name].push_back(names);
}
else
{
names->opt_long.push_back(opt_name);
opt_long_map[opt_name].push_back(names);
}
#else
names->opt_long.push_back(opt_name);
opt_long_map[opt_name].push_back(names);
#endif
}
opt_start += opt_end + 1;
}
......@@ -150,6 +164,12 @@ namespace df
{
out << "--" << entry.opt_long.front();
}
#if JVET_O0549_ENCODER_ONLY_FILTER_POL
else if (!entry.opt_prefix.empty())
{
out << "--" << entry.opt_prefix.front() << "*";
}
#endif
}
/* format the help text */
......@@ -271,6 +291,9 @@ namespace df
bool OptionWriter::storePair(bool allow_long, bool allow_short, const string& name, const string& value)
{
bool found = false;
#if JVET_O0549_ENCODER_ONLY_FILTER_POL
std::string val = value;
#endif
Options::NamesMap::iterator opt_it;
if (allow_long)
{
......@@ -290,15 +313,34 @@ namespace df
found = true;
}
}
#if JVET_O0549_ENCODER_ONLY_FILTER_POL
bool allow_prefix = allow_long;
if (allow_prefix && !found)
{
for (opt_it = opts.opt_prefix_map.begin(); opt_it != opts.opt_prefix_map.end(); opt_it++)
{
std::string name_prefix = name.substr(0, opt_it->first.size());
if (name_prefix == opt_it->first)
{
// prepend value matching *
val = name.substr(name_prefix.size()) + std::string(" ") + val;
found = true;
break;
}
}
}
#endif
if (!found)
{
error_reporter.error(where())
<< "Unknown option `" << name << "' (value:`" << value << "')\n";
return false;
}
#if JVET_O0549_ENCODER_ONLY_FILTER_POL
setOptions((*opt_it).second, val, error_reporter);
#else
setOptions((*opt_it).second, value, error_reporter);
#endif
return true;
}
......
......@@ -36,6 +36,8 @@
#include <list>
#include <map>
#define JVET_O0549_ENCODER_ONLY_FILTER_POL 1 // JVET-O0549: Encoder-only GOP-based temporal filter. Program Options Lite related changes.
#ifndef __PROGRAM_OPTIONS_LITE__
#define __PROGRAM_OPTIONS_LITE__
......@@ -196,6 +198,9 @@ namespace df
}
std::list<std::string> opt_long;
std::list<std::string> opt_short;
#if JVET_O0549_ENCODER_ONLY_FILTER_POL
std::list<std::string> opt_prefix;
#endif
OptionBase* opt;
};
......@@ -207,6 +212,9 @@ namespace df
typedef std::map<std::string, NamesPtrList> NamesMap;
NamesMap opt_long_map;
NamesMap opt_short_map;
#if JVET_O0549_ENCODER_ONLY_FILTER_POL
NamesMap opt_prefix_map;
#endif
};
/* Class with templated overloaded operator(), for use by Options::addOptions() */
......