Skip to content
Snippets Groups Projects
Commit c4b62706 authored by Franck Galpin's avatar Franck Galpin Committed by Zhihuang Xie
Browse files

More flexible input q

parent a07baec1
No related branches found
No related tags found
No related merge requests found
...@@ -162,7 +162,9 @@ void NNFilterHOP::resizeInputs(int width, int height) ...@@ -162,7 +162,9 @@ void NNFilterHOP::resizeInputs(int width, int height)
cerr << "[ERROR] issue init model NNFilterHOP " << endl; cerr << "[ERROR] issue init model NNFilterHOP " << endl;
exit(-1); exit(-1);
} }
m_input_quantizer = m_model->getInputsTemplate()[0].quantizer; // assume all image inputs have same quantizer assert(nb_inputs==m_inputs.size());
for(int i=0;i<nb_inputs;++i)
m_input_quantizer[i] = m_model->getInputsTemplate()[i].quantizer;
} }
#if NN_HOP_UNIFIED_TEMPORAL_FILTERING #if NN_HOP_UNIFIED_TEMPORAL_FILTERING
...@@ -379,17 +381,17 @@ void NNFilterHOP::filterBlock(Picture &pic, UnitArea inferArea, int extLeft, int ...@@ -379,17 +381,17 @@ void NNFilterHOP::filterBlock(Picture &pic, UnitArea inferArea, int extLeft, int
const double inputScaleIpb = (1 << log2InputIbpScale); const double inputScaleIpb = (1 << log2InputIbpScale);
std::vector<InputData> listInputData; std::vector<InputData> listInputData;
listInputData.push_back({ NN_INPUT_REC, 0, inputScalePred, m_input_quantizer - log2InputBitdepth, true, true }); listInputData.push_back({ NN_INPUT_REC, 0, inputScalePred, m_input_quantizer[0] - log2InputBitdepth, true, true });
listInputData.push_back({ NN_INPUT_PRED, 1, inputScalePred, m_input_quantizer - log2InputBitdepth, true, true }); listInputData.push_back({ NN_INPUT_PRED, 1, inputScalePred, m_input_quantizer[1] - log2InputBitdepth, true, true });
listInputData.push_back({ NN_INPUT_BS, 2, inputScalePred, m_input_quantizer - log2InputBitdepth, true, true }); listInputData.push_back({ NN_INPUT_BS, 2, inputScalePred, m_input_quantizer[2] - log2InputBitdepth, true, true });
listInputData.push_back({ NN_INPUT_GLOBAL_QP, 3, inputScaleQp, m_input_quantizer - log2InputQpScale, true, false }); listInputData.push_back({ NN_INPUT_GLOBAL_QP, 3, inputScaleQp, m_input_quantizer[3] - log2InputQpScale, true, false });
listInputData.push_back({ NN_INPUT_LOCAL_QP, 4, inputScaleQp, m_input_quantizer - log2InputQpScale, true, false }); listInputData.push_back({ NN_INPUT_LOCAL_QP, 4, inputScaleQp, m_input_quantizer[4] - log2InputQpScale, true, false });
#if NN_HOP_UNIFIED_FORCE_USE #if NN_HOP_UNIFIED_FORCE_USE
if (m_forceIntraType) { if (m_forceIntraType) {
listInputData.push_back({ NN_INPUT_ZERO, 5, inputScaleIpb, m_input_quantizer - log2InputIbpScale, true, false }); listInputData.push_back({ NN_INPUT_ZERO, 5, inputScaleIpb, m_input_quantizer[5] - log2InputIbpScale, true, false });
} else } else
#endif #endif
listInputData.push_back({ NN_INPUT_IPB, 5, inputScaleIpb, m_input_quantizer - log2InputIbpScale, true, false }); listInputData.push_back({ NN_INPUT_IPB, 5, inputScaleIpb, m_input_quantizer[5] - log2InputIbpScale, true, false });
NNInference::prepareInputs<TypeSadlHOP>(&pic, inferArea, m_inputs, seqQp, sliceQp, -1 /* sliceType */, listInputData); NNInference::prepareInputs<TypeSadlHOP>(&pic, inferArea, m_inputs, seqQp, sliceQp, -1 /* sliceType */, listInputData);
......
...@@ -54,6 +54,7 @@ public: ...@@ -54,6 +54,7 @@ public:
static constexpr float nnResidueScaleDerivationUpBound = 1.25f; static constexpr float nnResidueScaleDerivationUpBound = 1.25f;
static constexpr float nnResidueScaleDerivationLowBound = 0.0625f; static constexpr float nnResidueScaleDerivationLowBound = 0.0625f;
static constexpr int max_scale=(1<<log2ResidueScale); static constexpr int max_scale=(1<<log2ResidueScale);
static constexpr int nb_inputs=6;
#if NN_HOP_UNIFIED_TEMPORAL_FILTERING #if NN_HOP_UNIFIED_TEMPORAL_FILTERING
static constexpr int minimumTidUseTemporalFiltering = 3; static constexpr int minimumTidUseTemporalFiltering = 3;
#endif #endif
...@@ -130,7 +131,7 @@ public: ...@@ -130,7 +131,7 @@ public:
#endif #endif
private: private:
int m_blocksize[2]; // current inputs size int m_blocksize[2]; // current inputs size
int m_input_quantizer = 0; int m_input_quantizer[nb_inputs] = {};
void resizeInputs(int width, int height); void resizeInputs(int width, int height);
std::unique_ptr<sadl::Model<TypeSadlHOP>> m_model; std::unique_ptr<sadl::Model<TypeSadlHOP>> m_model;
std::vector<sadl::Tensor<TypeSadlHOP>> m_inputs; std::vector<sadl::Tensor<TypeSadlHOP>> m_inputs;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment