diff --git a/.gitattributes b/.gitattributes
index 3ffacb90d438646e5b09d2feedd4d5ea43fb4e2e..0fb9c9cc6b8c75548425a6bfd36ba9d35b0f419a 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -503,3 +503,7 @@ models/super_resolution/Nnsr_LumaCNNSR_Intra_int16.sadl filter=lfs diff=lfs merg
 *.index filter=lfs diff=lfs merge=lfs -text
 *.pb filter=lfs diff=lfs merge=lfs -text
 *.data-* filter=lfs diff=lfs merge=lfs -text
+models/NnlfSet1_CombinedIntraInter/NnlfSet1_ChromaCNNFilter_float.sadl filter=lfs diff=lfs merge=lfs -text
+models/NnlfSet1_CombinedIntraInter/NnlfSet1_ChromaCNNFilter_int16.sadl filter=lfs diff=lfs merge=lfs -text
+models/NnlfSet1_CombinedIntraInter/NnlfSet1_LumaCNNFilter_float.sadl filter=lfs diff=lfs merge=lfs -text
+models/NnlfSet1_CombinedIntraInter/NnlfSet1_LumaCNNFilter_int16.sadl filter=lfs diff=lfs merge=lfs -text
diff --git a/models/NnlfSet1_CombinedIntraInter/NnlfSet1_ChromaCNNFilter_float.sadl b/models/NnlfSet1_CombinedIntraInter/NnlfSet1_ChromaCNNFilter_float.sadl
new file mode 100644
index 0000000000000000000000000000000000000000..9c97127cab27dd58bcee9656ff0b73186bcf5d30
--- /dev/null
+++ b/models/NnlfSet1_CombinedIntraInter/NnlfSet1_ChromaCNNFilter_float.sadl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4393b9259a5481dbed304ff8a7b540baaa20ff1aa8eb580303d821384fa1ab62
+size 6227994
diff --git a/models/NnlfSet1_CombinedIntraInter/NnlfSet1_ChromaCNNFilter_int16.sadl b/models/NnlfSet1_CombinedIntraInter/NnlfSet1_ChromaCNNFilter_int16.sadl
new file mode 100644
index 0000000000000000000000000000000000000000..54a12de9c05b890d1fe5797fc2349fa12c52daf5
--- /dev/null
+++ b/models/NnlfSet1_CombinedIntraInter/NnlfSet1_ChromaCNNFilter_int16.sadl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b9a925c92dfbeb92a399644813eed33ccdf868ad3cf1cab01fcf973534485b04
+size 3118008
diff --git a/models/NnlfSet1_CombinedIntraInter/NnlfSet1_LumaCNNFilter_float.sadl b/models/NnlfSet1_CombinedIntraInter/NnlfSet1_LumaCNNFilter_float.sadl
new file mode 100644
index 0000000000000000000000000000000000000000..6c1a316001d9f1fc65b4f35a84227f72cdca497d
--- /dev/null
+++ b/models/NnlfSet1_CombinedIntraInter/NnlfSet1_LumaCNNFilter_float.sadl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:69dfde3e6d86d0ea1e32709f141e6de28f80cc4d7aa599c12b5a4dce648f5c69
+size 6203784
diff --git a/models/NnlfSet1_CombinedIntraInter/NnlfSet1_LumaCNNFilter_int16.sadl b/models/NnlfSet1_CombinedIntraInter/NnlfSet1_LumaCNNFilter_int16.sadl
new file mode 100644
index 0000000000000000000000000000000000000000..044c31f6adafed016b2c4e678b076916580908bf
--- /dev/null
+++ b/models/NnlfSet1_CombinedIntraInter/NnlfSet1_LumaCNNFilter_int16.sadl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b6ab3f657ea1d0ce83b13f2514f166ef24873fa26a1909aaec67e189fe87ea8d
+size 3105902
diff --git a/source/App/EncoderApp/EncAppCfg.cpp b/source/App/EncoderApp/EncAppCfg.cpp
index 3f5d257c121a6017149d44d17a9f3ad721b9552f..ac2a63060bc5a01c0816e274546bfcd15fc0d921 100644
--- a/source/App/EncoderApp/EncAppCfg.cpp
+++ b/source/App/EncoderApp/EncAppCfg.cpp
@@ -1440,10 +1440,10 @@ bool EncAppCfg::parseCfg( int argc, char* argv[] )
   ( "NnlfSet1InferSizeBase",                          m_nnlfSet1InferSizeBase,                   128u, "Base inference size of NN-based loop filter set 1" )
   ( "NnlfSet1InferSizeExtension",                     m_nnlfSet1InferSizeExtension,                8u, "Extension of inference size of NN-based loop filter set 1" )
   ( "NnlfSet1MaxNumParams",                           m_nnlfSet1MaxNumParams,                      3u, "Number of conditional parameters of NN-based loop filter set 1" )
-  ( "NnlfSet1InterLumaModel",                         m_nnlfSet1InterLumaModelName,              string("models/NnlfSet1_LumaCNNFilter_InterSlice_int16.sadl"), "NnlfSet1 inter luma model name")
-  ( "NnlfSet1InterChromaModel",                       m_nnlfSet1InterChromaModelName,            string("models/NnlfSet1_ChromaCNNFilter_InterSlice_int16.sadl"), "NnlfSet1 inter chroma model name")
-  ( "NnlfSet1IntraLumaModel",                         m_nnlfSet1IntraLumaModelName,              string("models/NnlfSet1_LumaCNNFilter_IntraSlice_int16.sadl"), "NnlfSet1 intra luma model name")
-  ( "NnlfSet1IntraChromaModel",                       m_nnlfSet1IntraChromaModelName,            string("models/NnlfSet1_ChromaCNNFilter_IntraSlice_int16.sadl"), "NnlfSet1 intra chroma model name")
+  ( "NnlfSet1InterLumaModel",                         m_nnlfSet1InterLumaModelName,              string("models/NnlfSet1_CombinedIntraInter/NnlfSet1_LumaCNNFilter_int16.sadl"), "NnlfSet1 luma model name")
+  ( "NnlfSet1InterChromaModel",                       m_nnlfSet1InterChromaModelName,            string("models/NnlfSet1_CombinedIntraInter/NnlfSet1_ChromaCNNFilter_int16.sadl"), "NnlfSet1 chroma model name")
+  ( "NnlfSet1IntraLumaModel",                         m_nnlfSet1IntraLumaModelName,              string("models/NnlfSet1_CombinedIntraInter/NnlfSet1_LumaCNNFilter_int16.sadl"), "NnlfSet1 luma model name")
+  ( "NnlfSet1IntraChromaModel",                       m_nnlfSet1IntraChromaModelName,            string("models/NnlfSet1_CombinedIntraInter/NnlfSet1_ChromaCNNFilter_int16.sadl"), "NnlfSet1 chroma model name")
 #if JVET_AC0177_MULTI_FRAME
   ( "NnlfSet1AlternativeInterLumaModel",              m_nnlfSet1AlternativeInterLumaModelName,   string("models/NnlfSet1_LumaCNNFilter_InterSlice_MultiframePrior_Tid345_int16.sadl"), "NnlfSet1 alternative inter luma model name")
   ( "NnlfSet1Multiframe",                             m_nnlfSet1Multiframe,                      false, "Input multiple frames in NN-based loop filter set 1" )
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/README.md b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..094385b56b712831cfdd3ca0bf4ce6ee95ac76fb
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/README.md
@@ -0,0 +1,329 @@
+Datasets that are used:
+- BVIDVC (764 out of 800 sequences)
+- DIV2K (800 images)
+- DIV2K_X2 (800 images, DIV2K images downsampled by 2)
+
+Storage requirement:
+- Original YUVs of BVIDVC, DIV2K, and DIV2K_X2 are about 0.4 TB. 
+- In Stage 1, BVIDVC coded in RA with QP 20,25,...,40 is needed. This is about 6 TB.
+	- The YUVs include picture-before-deblocking, prediction, and bs (boundary strength).
+	- These YUVs can be removed after Stage 1 training.
+- In Stage 2, BVIDVC coded in AI and RA with QP 17,22,...,42 is needed. This is about 7 (AI) + 9 (RA) = 16 TB. In addition, DIV2K and DIV2K_X2 in AI with QP 17,22,...,42 are needed, and this is about 0.2 TB.
+	- The YUVs include picture-before-deblocking, prediction, and bs for AI.
+	- The YUVs include picture-before-deblocking, prediction, bs, and block type IPB for RA.
+
+There are two stages of training. In Stage 1, an inter luma model is trained using VTM-11.0 encoded RA BVIDVC data. In Stage 2, a luma IPB model and a chroma model are trained using both AI and RA data.
+
+The training script uses json files to read data in the data loader. The json file follows NNVC-3.0 style.
+```
+{
+ "suffix_rec_before_dbf": "_pictureBeforeDb.yuv",
+ "suffix_pred": "_mpr.yuv",
+ "suffix_partition_cu_average": "_mpa.yuv",
+ "suffix_bs": "_bs.yuv",
+ "suffix_qp": "_qp.dat",
+ "suffix_slicetype": "_slicetype.dat",
+ "suffix_bpm": "_bpm.yuv",
+ "data": [
+  {
+   "bsname": "0001_2040x1400_25fps_8bit_420_qp42.bin",
+   "qp_base": 42,
+   "basename": "0001_2040x1400_25fps_8bit_420_qp42",
+   "width": 2040,
+   "height": 1400,
+   "data_count": 1,
+   "dirname": "/proj/video_no_backup/videosim/eliudux/training_data/xcheck_yuv2/ai_574d_DIV2K/0001_2040x1400_25fps_8bit_420_qp42/",
+   "original_yuv": "/proj/video_data3/videosim/data/DIV2K/DIV2K_train_HR_yuv/0001_2040x1400_25fps_8bit_420.yuv",
+   "original_temporal_subsample": 1,
+   "original_frame_skip": 0,
+   "original_bitdepth": 8
+  }
+```
+
+Stage 0 Preparing original YUV data
+=================
+To prepare the original YUV data, go to stage0_prepare_yuv and follow Instruction_prepareYUV.md.
+
+The cfg files of BVIDVC, DIV2K, and DIV2K_X2 can be found in VVCSoftware_VTM/training/training_scripts/Nn_Filtering_Set_1/Scripts/LumaIntra/data_extraction_scripts/.
+
+
+Stage 1 Data generation and training
+=================
+
+Stage1_1:
+---------
+Checkout and compile the sofware (VTM-11.0 commit 8bf80e55603674bbbbad165c31e79729cde2579b) in stage1_1_vtm_enc/ to generate the encoder, to be used for encoding.
+```bash
+mkdir stage1_1_vtm_enc
+cd stage1_1_vtm_enc
+git clone https://vcgit.hhi.fraunhofer.de/jvet-ahg-nnvc/VVCSoftware_VTM.git
+cd VVCSoftware_VTM
+git checkout VTM-11.0
+mkdir build
+cd build
+cmake .. -DCMAKE_BUILD_TYPE=Release
+make -j 4
+```
+
+Stage1_2:
+---------
+Prepare libtorch 1.9.1
+```bash
+mkdir libtorch191
+cd libtorch191
+wget https://download.pytorch.org/libtorch/cpu/libtorch-shared-with-deps-1.9.1%2Bcpu.zip
+unzip libtorch-shared-with-deps-1.9.1+cpu.zip
+realpath libtorch
+```
+--> This will show the path to the libtorch. Include this path in the VTM cmake command below.
+(Mine: /proj/video_data4/videosim/eliudux/JVET_NN_EE/ab-ee1-1_5/training_EE115/libtorch191/libtorch)
+
+Compile the software in stage1_2_vtm_dec to generate the decoder, to be used for YUV data extraction. Use the above libtorch directory for '-DCMAKE_PREFIX_PATH='.
+```bash
+cd ../stage1_2_vtm_dec/
+git clone https://vcgit.hhi.fraunhofer.de/jvet-w-ee1/VVCSoftware_VTM.git
+mv VVCSoftware_VTM extractmoreinfovtm
+cd extractmoreinfovtm
+git checkout 015182482556b6c77a089135dd77b1a7867f5820
+git apply --whitespace=fix ../extract_stage1_2.patch
+mkdir build
+cd build
+cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=/proj/video_data4/videosim/eliudux/JVET_NN_EE/ab-ee1-1_5/training_EE115/libtorch191/libtorch
+make -j 4
+```
+(The software is aligned with the encoder in stage1_1, and no actuall NN is used.)
+
+Stage1_3 a):
+---------
+- A script 'stage1_3_data_generation/dec_direct_tile3_574d.py' is privided to encode, decode, and generate data
+	- Modify the directories to your local envrionment
+		1. Change <software_root_dir> to the directory of the EE1-1.5 training folder
+		2. Choose a suitable output root directory <output_root_dir> for saving YUVs
+		3. Change <path_to_original_xxx_yuvs> to the original YUVs from Stage 0.
+	- If you have already encoded the sequences and now only want to deocde, use "onlyDecode = True". Change the binary directory "bindir" (about Line 99) and per-sequence bin direcotry "bin_dir" (about Line 118) to your bitstream directory.
+	- The output YUVs are saved for training. If you want to remove them after decoding, use "removeYUV = True". (Do not set to True if these are needed in training.)
+	- You may want to change the job submittion command "bsub -o /dev/null" (about Line 152 and 154) to fit your cluster. We are using LSF and start a job with " bsub -o /dev/null 'python3 jobtostart.py' ".
+	- Enable the two "break"s at the end for debugging to run only one encoding/decoding job. Remove them when ready to run for all sequences.
+	- If your system cannot cope with that many simultaneous decodings, use something like time.sleep(20) to wait 20 seconds after starting a decoding instead of doing it instantly.
+
+Let your RA YUV data directory have a unique string 'ra_', so the data loader knows it is RA data. Let your AI YUV data directory have a unique string 'ai_', so the data loader knows it is AI data. 
+
+- Generate RA data
+
+Compress BVIDVC with Random Access configuration, with QP 20,25,30,35,40. This data is used in stage-1 training.
+```bash
+python stage1_3_data_generation/dec_direct_tile3_574d.py BVIDVC ra D
+python stage1_3_data_generation/dec_direct_tile3_574d.py BVIDVC ra C
+python stage1_3_data_generation/dec_direct_tile3_574d.py BVIDVC ra B
+python stage1_3_data_generation/dec_direct_tile3_574d.py BVIDVC ra A
+```	
+
+As an exmaple to double check, go to the output YUV folder  'ra_574d_BVIDVC/DAdvertisingMassagesBangkokVidevo_480x272_25fps_10bit_420_qp40' and get the md5sum of the YUVs:
+
+md5sum DAdvertisingMassagesBangkokVidevo_480x272_25fps_10bit_420_qp40_bs.yuv
+--> cd6a055650a0c618da8d904f1b8535ed
+
+md5sum DAdvertisingMassagesBangkokVidevo_480x272_25fps_10bit_420_qp40_mpr.yuv
+--> adf1e30a1c62508f989792f134cb37c1
+
+md5sum DAdvertisingMassagesBangkokVidevo_480x272_25fps_10bit_420_qp40_pictureBeforeDb.yuv
+--> d33210da16d685103cfc5c291a7ee1a9
+
+
+- Generate AI data
+
+Compress BVIDVC, DIV2K, DIV2K_X2 with All Intra configuration, with QP 17,22,27,32,37,42. A configuration file 'cfg/encoder_intra_vtm_subsample1.cfg' is included with TemporalSubsampleRatio = 1, that is, all the 64 frames of BVIDVC are compressed, not every 8th frame. This data is used in stage-2 training.
+```bash
+python stage1_3_data_generation/dec_direct_tile3_574d.py DIV2K ai 0
+python stage1_3_data_generation/dec_direct_tile3_574d.py DIV2K_X2 ai 0
+python stage1_3_data_generation/dec_direct_tile3_574d.py BVIDVC ai A
+python stage1_3_data_generation/dec_direct_tile3_574d.py BVIDVC ai B
+python stage1_3_data_generation/dec_direct_tile3_574d.py BVIDVC ai C
+python stage1_3_data_generation/dec_direct_tile3_574d.py BVIDVC ai D
+```	
+
+As an exmaple to double check, go to the output YUV folder  'ai_574d_BVIDVC/AAdvertisingMassagesBangkokVidevo_3840x2176_25fps_10bit_420_qp42' and get the md5sum of the YUVs:
+
+md5sum AAdvertisingMassagesBangkokVidevo_3840x2176_25fps_10bit_420_qp42_bs.yuv
+--> f7ea68a6cacdf5690948a9128df9225b
+
+md5sum AAdvertisingMassagesBangkokVidevo_3840x2176_25fps_10bit_420_qp42_mpr.yuv
+--> 6868ad6bade436e3f6af80da9894c767 
+
+md5sum AAdvertisingMassagesBangkokVidevo_3840x2176_25fps_10bit_420_qp42_pictureBeforeDb.yuv
+--> e9e482679abe605d33eef64894a8b3ab
+
+Stage1_3 b):
+---------
+Go to create_json/ and follow the README.md file.
+
+Generate a json file containing the whole RA dataset from Stage1_3 a) for training. Similarly, create a json file for the validation dataset. Let the validation set be as simple as possible (e.g., containing only one sequence).
+
+
+Stage1_4:
+---------
+Train the inter luma model using the RA data. 
+
+First, use L1 loss with learning rate 1e-4 for the first 294 epochs. Then, use L1 loss with learning rate 1e-5 for the 295 to 455 epochs.
+
+Go to stage1_interluma/. Run the training script with your GPU environment. "--switch_epochs 295" will switch the learning rate from epoch 295.
+```bash
+python3 train_interluma.py --save_ckp_path './ckp/' --loss_function 'L1' --learning_rate 1e-4 --epochs 455 --switch_lr 1e-5 --switch_epochs 295 --batchsize 64 --num_workers 20 --input_json_train db_stage1.json --input_json_valid db_valid.json --tag InterLuma
+```
+
+The print out shows that the trainig dataset has a size of 236840 ( = 764 BVIDVC seqs * 62 RA frames * 5 QPs). 
+Depending on how many RA frames are used, the total size may vary.
+```
+epoch 1
+ave loss: 1.053876e-02, last loss: 0.010539  [    0/236840]   9 seconds since last print out.
+```
+
+The checkpoints are saved under 'ckp/InterLuma/'.
+
+
+Convert to Pytorch JIT model
+---------
+Go to create_models/.
+
+In generate_models.py, Line 128, modify the checkpoint directory "best_ckp_path2" to the epoch 455 directory you get from the above step.
+
+If you need to change the model output direcory, modify "save_model_path2".
+Then, run
+```bash
+python3 generate_models.py
+```
+
+
+Stage 2 Data generation and training
+=================
+
+Stage2_1
+---------
+- Clone the software
+```bash
+cd stage2_1_vtm/
+git clone https://vcgit.hhi.fraunhofer.de/jvet-w-ee1/VVCSoftware_VTM.git
+mv VVCSoftware_VTM extractmoreinfovtm
+cd extractmoreinfovtm
+git checkout 015182482556b6c77a089135dd77b1a7867f5820
+git apply --whitespace=fix ../extract_stage2_1.patch
+```
+
+In the source code, replace the *inter luma model* in
+source/Lib/CommonLib/CNNFilter.cpp, line 59
+@@ void CNNFilter::initBSlice(int qp)
+```
+std::string sLumaModelName = "/proj/video_data3/videosim/eliudux/myCurrentWork/training_data/jvet-ab-ee1-1_5/training_EE115/stage2_1_vtm/extractmoreinfovtm/trained_models/InterY_9b7d_ep455.pt";
+```
+with the with the model you trained from Stage 1. 
+
+Also, change all the model directories (Lines 44, 45, 59, 60) from "/proj/video_data3/videosim/eliudux/myCurrentWork/training_data/jvet-ab-ee1-1_5/" to the model directories.
+
+Reference models for intra luma and inter luma can be found at https://vcgit.hhi.fraunhofer.de/jvet-ab-ee1/VVCSoftware_VTM.git, branch EE1-1.5, VVCSoftware_VTM/training_EE115/stage2_1_vtm/trained_models.
+
+The inter chroma model and the intra chroma model are in stage2_1_vtm/extractmoreinfovtm/models/. 
+
+Compile the code including the libtorch 1.9.1 path
+```
+mkdir build
+cd build
+cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=/proj/video_data4/videosim/eliudux/JVET_NN_EE/ab-ee1-1_5/training_EE115/libtorch191/libtorch
+make -j 4
+```
+
+The training scripts of the intra luma model can be found at https://vcgit.hhi.fraunhofer.de/jvet-aa-ee1/VVCSoftware_VTM.git, branch EE1-1.2.
+
+The training scripts of the intra chroma and inter chroma models can be found at https://vcgit.hhi.fraunhofer.de/jvet-z-ee1/VVCSoftware_VTM.git, branch EE1-1.6.
+
+
+Stage2_2 a)
+---------
+- Use the encoder and decoder from stage2_1_vtm to encode, decode, and generate the RA data
+- A script 'stage2_2_data_generation/dec_direct_tile3_93dd.py' is privided to encode, decode, and generate data
+	- Modify the directories to your local envrionment		
+		1. Change <software_root_dir> to the current directory of the CombinedIntraInter folder
+		2. Choose a suitable output root directory <output_root_dir> for saving YUVs. Better to have the same <output_root_dir> as in stage1_3_data_generation/dec_direct_tile3_574d.py, so the training sets for both AI and RA locate in the same <output_root_dir>.
+		3. Change <path_to_original_xxx_yuvs> to the original YUVs from Stage 0.
+	- If you have already encoded the sequences and now only want to deocde, use "onlyDecode = True".  Change the binary directory "bindir" (about Line 57) and per-sequence binary directory "bin_dir" (about Line 75) to your bitstream directory.
+	- The output YUVs are saved for training. If you want to remove them after decoding, use "removeYUV = True". (Do not set to True if these are needed in training.)
+	- You may want to change the job submittion command "bsub -o /dev/null " (about Line 110 and 112) to fit your cluster.
+	- Enable the two "break"s at the end for debugging to run only one encoding/decoding job. Remove them when ready to submit jobs for all sequences.
+	- If your system cannot cope with that many simultaneous decodings, use something like time.sleep(20) to wait 20 seconds after starting a decoding instead of doing it instantly.
+```bash
+python stage2_2_data_generation/dec_direct_tile3_93dd.py A
+python stage2_2_data_generation/dec_direct_tile3_93dd.py B
+python stage2_2_data_generation/dec_direct_tile3_93dd.py C
+python stage2_2_data_generation/dec_direct_tile3_93dd.py D
+```	
+
+Stage2_2 b)
+---------
+Go to create_json/ and follow the README.md file.
+
+Generate a json file containing the AI data from Stage1_3 and RA data from Stage2_2 for training. Similarly, create a json file for the validation dataset. Let the validation set be as simple as possible (e.g., containing only one sequence).
+
+Stage2_3
+---------
+Train the luma IPB model with AI and RA data. 
+
+If you have more than one GPU, the training of luma IPB (Stage2_3) and the training of chroma (Stage2_4) can run at the same time.
+
+Go to stage2_3_training_lumaIPB/.
+
+First, use L1 loss with learning rate 1e-4 for the first 310 epochs. Switch to MSE loss and learning rate 1e-5 from epoch 311 and train until epoch 340.
+
+```bash
+python3 train_luma.py --save_ckp_path './ckp/' --loss_function 'L1' --learning_rate 1e-4 --epochs 340 --mse_lr 1e-5 --mse_epochs 311 --batchsize 32 --num_workers 20 --input_json_train db_stage2.json --input_json_valid db_avalid.json --tag Luma
+```
+
+The print out shows that the trainig data set has a size of 293808 ( = (800 DIV2K + 800 DIV2K_X2 + 764 BVIDVC * 32 AI frames + 764 BVIDVC * 30 RA frames) * 6 QPs ). 
+Depending on how many AI and RA frames are used, the total size may vary.
+```
+epoch 1
+ave loss: 9.661742e-01, last loss: 0.966174  [    0/293808]   26 seconds since last print out.
+```
+
+The checkpoints are saved under 'ckp/Luma/'.
+
+Stage2_4
+---------
+Train the chroma model with AI and RA data. 
+
+Go to stage2_4_training_chroma/.
+
+Use the training json file and validation json file same as stage2_3 luma training.
+
+First, use L1 loss with learning rate 1e-4 for the first 300 epochs. Switch to MSE and learining rate 1e-4 from epoch 301 and train until epoch 400.
+```bash
+python3 train_chroma.py --save_ckp_path './ckp/' --loss_function 'L1' --learning_rate 1e-4 --epochs 400 --mse_lr 1e-5 --mse_epochs 301 --batchsize 32 --num_workers 20 --input_json_train db_stage2.json --input_json_valid db_valid.json --tag Chroma
+```
+
+The checkpoints are saved under 'ckp/Chroma/'.
+
+
+Convert to SADL model
+------
+Clone the SADL repository to your local disk and compile SADL by running 
+```bash
+git clone https://vcgit.hhi.fraunhofer.de/jvet-ahg-nnvc/sadl.git
+cd sadl
+sh sample/sample_test.sh
+```
+SADL build directory is sadl/sample_test/. The executable "naive_quantization" is needed for generating integer SADL models.
+
+Go to create_models/.
+
+In generate_models.py, 
+Line 131, comment out the code "savemodel(...)" as this is not needed at this stage.
+
+In Lines 137 and 138, give the right SADL build directory "sadl_build_dir" and the right SADL main.py directory "sadl_main_dir".
+
+To save the luma IPB model, give the right directory of the luma IPB checkpoint "best_ckp_path28d" and output model directory, and enable Lines 148 to 150. 
+
+To save the chroma model, give the right directory of the chroma checkpoint "best_ckp_path33c" and output model directory, and enable Lines 159 to 161.
+
+Then, run
+```bash
+python3 generate_models.py
+```
+to generate the luma IPB and chroma models in both floating point and integer.
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/README.md b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..22601042d903f7fe82450e7e3a50b4b4536e9304
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/README.md
@@ -0,0 +1,31 @@
+Scripts in this folder are used to create .json files and slice qp .dat files to align the data loader in NNVC, when the training data is not generated by NNVC decoder. 
+
+In create_json_sliceqp_per_seq.py, modify the output_root_dir and origin yuv directories accordingly. Run the script to generate .json files and slice qp .dat files for each sequence in the dataset.
+
+Stage1 dataset
+```
+python3 create_json_sliceqp_per_seq.py ra_574d_BVIDVC
+```
+
+Stage2 datasets
+```
+python3 create_json_sliceqp_per_seq.py ai_574d_BVIDVC
+python3 create_json_sliceqp_per_seq.py ra_93dd_BVIDVC
+python3 create_json_sliceqp_per_seq.py DIV2K
+python3 create_json_sliceqp_per_seq.py DIV2K_X2
+```
+
+Then, in steps.py, modify yuv_root_dir. Enable the lines for stage1 (line 18 to 20) or stage 2 (line 31 to 33). Run the script to generate a json file containing the whole set that is needed.
+```
+python3 steps.py
+```
+
+To generate a simple test set or a simple validation set, copy one sequence to a new directory and let this be the full set. Create a .json for this.
+
+Let yuv_root_dir be, e.g., '/proj/video_no_backup/videosim/eliudux/training_data/xcheck_yuv2/'
+```
+cd <yuv_root_dir>
+mkdir ai_574d_DIV2K_valid
+cp -r ai_574d_DIV2K/0001_2040x1400_25fps_8bit_420_qp42/ ai_574d_DIV2K_valid/0001_2040x1400_25fps_8bit_420_qp42/ 
+```
+Enable steps.py line 41 to 43 and run it.
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/concatenate_dataset.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/concatenate_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..68b39107c581e734619ff36a15765490c6770ad5
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/concatenate_dataset.py
@@ -0,0 +1,151 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import argparse
+import glob
+import sys
+import json
+import re
+import os
+
+parser = argparse.ArgumentParser(prog='concatenate dataset', usage='create a global dataset from all the json file in a given directory. ', 
+                                  formatter_class=argparse.RawDescriptionHelpFormatter,
+                                 epilog=
+'''2 modes available:
+   concatenate_dataset.py --input_dir dir1 --input_dir dir2 --output_json pre_dataset.json
+   concatenate_dataset.py --input_json pre_dataset.json --input_dir_encoder direnc1 --input_dir_encoder direnc2 --output_json dataset.json''')
+parser.add_argument("--input_dir_json", action="append", nargs='+', type=str, help="directory containing individual json files. Multiple options possible.")
+parser.add_argument("--dataset_range", type=str, default='', help="train or test dataset range (such as 0-1000), use all data by default")
+parser.add_argument("--input_json", action="store", nargs='?', type=str, help="input json database.")
+parser.add_argument("--input_dir_encoder", action="append", nargs='+', type=str, help="directory containing individual encoder log files or encoder cfg files. Multiple options possible.")
+parser.add_argument("--log_extension", default="log", action="store", nargs='?', type=str, help="encoder log extension")
+parser.add_argument("--output_json", action="store", nargs='?', type=str, help="name of the output file with concatenated files", required=True)
+args=parser.parse_args()
+
+# mode 1: concatenate all indiviual dataset into 1 file, setting the dirname to find the data
+if args.input_dir_json is not None:
+    header={}
+    lastheader=None
+    db=[]
+    flat=[d for d1 in args.input_dir_json for d in d1]
+    for d in flat:
+        files = sorted(glob.glob(d+'*/*.json'))
+        if args.dataset_range:
+            temp = args.dataset_range.split('-')
+            range_start, range_end = list(map(lambda x: int(x), temp))
+            files = files[range_start:range_end]
+        print("Processing directory {}: {} files".format(d,len(files)))
+        for f in files:
+           with open(f, "r") as file:
+               content = file.read()
+               dcontent = json.loads(content)
+               header={}
+               for key in dcontent:
+                   if "suffix_" in key:
+                       header[key]=dcontent[key]
+               #if lastheader is not None and not lastheader == header:
+               #    sys.exit("File {} does not contain the same data as other files".format(f))
+               lastheader = header
+               for data in dcontent['data']:
+                   if 'dirname' not in data: # no dirname yet 
+                      data['dirname']=f[:-len(os.path.basename(f))]
+                      #data['dirname']=d
+                   db.append(data)
+    jout=header
+    jout["data"]=db
+    s = json.dumps(jout,indent=1)
+    with open(args.output_json, "w") as file:
+      file.write(s)
+
+
+# mode 2: consolidate a dataset file by adding information on original yuv from encoder logs information     
+if args.input_json is not None:
+    db_logs={}
+    flat=[d for d1 in args.input_dir_encoder for d in d1]
+    for d in flat:
+        files = glob.glob(d+'*/*.'+args.log_extension)
+        print("Processing directory {}: {} files".format(d,len(files)))
+        for f in files:
+           with open(f, "r") as file:
+              info={"FrameSkip": 0, "TemporalSubsampleRatio": 1} # default              
+              name=None
+              for line in file:
+                  m = re.match("^Input\s*File\s*:\s*([^\s]+)", line)
+                  if m:
+                      info['InputFile']=m.group(1)
+                  m = re.match("^Bitstream\s*File\s*:\s*([^\s]+)", line)
+                  if m:
+                      #name=os.path.basename(m.group(1))
+                      name=m.group(1)
+                  m = re.match("^TemporalSubsampleRatio\s*:\s*([0-9]+)", line)
+                  if m:
+                      info['TemporalSubsampleRatio']=m.group(1)
+#                  m = re.match("^QP\s*:\s*([0-9]+)", line)
+ #                 if m:
+  #                    info['QP']=m.group(1)
+                  m = re.match("^FrameSkip\s*:\s*([0-9]+)", line)
+                  if m:
+                      info['FrameSkip']=m.group(1)
+                  m = re.match("^Input\s+bit\s+depth\s*:\s*\(Y:([0-9]+),", line)
+                  if m:
+                      info['InputBitDepth']=m.group(1)
+                  m = re.match("^InputBitDepth\s*:\s*([0-9]+)", line)
+                  if m:
+                       info['InputBitDepth']=m.group(1)
+              if name is not None:
+                  if len(info) != 4:
+                    sys.exit("Not enough information extracted for bitstream {}".format(name))
+                  db_logs[name]=info        
+    #print(db_logs)
+    with open(args.input_json, "r") as file:
+      content = file.read()
+      dcontent = json.loads(content)
+      for d in dcontent['data']:
+        for kk in db_logs:      
+          if d['bsname'] in kk:
+                #print(kk)    
+                info=db_logs[kk]
+                if ('/ra' in d['dirname'] and '/ra' in kk) or ('/ai' in d['dirname'] and '/i' in kk) :
+                  #print(d['dirname'])
+                  #print(info['FullBsPath'])  
+                  d['original_yuv']=info['InputFile']
+                  d['original_temporal_subsample']=int(info['TemporalSubsampleRatio'])
+                  d['original_frame_skip']=int(info['FrameSkip'])
+    #              d['qp_base']=int(info['QP'])
+                  d['original_bitdepth']=int(info['InputBitDepth'])
+                  break
+                 
+      s = json.dumps(dcontent,indent=1)
+      with open(args.output_json, "w") as file:
+        file.write(s)
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/create_json_sliceqp_per_seq.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/create_json_sliceqp_per_seq.py
new file mode 100644
index 0000000000000000000000000000000000000000..b901447af08d79c9e258b02b6bfc217b194327b0
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/create_json_sliceqp_per_seq.py
@@ -0,0 +1,156 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+import sys
+import os
+import shutil
+
+current_dir = os.getcwd()
+json_template = os.path.join(current_dir, 'template/json_template.json')
+writeJson = True
+writeSliceqp = True
+
+dataset_name = sys.argv[1]
+# Select which dataset to write json files
+# Choose from 'ai_574d_BVIDVC', 'ra_574d_BVIDVC', 'ra_93dd_BVIDVC', 'DIV2K', 'DIV2K_X2'.
+
+# Modify the output_root_dir and origin yuv directories below accordingly.
+path_to_original_DIV2K_yuvs = '/proj/video_data3/videosim/data/DIV2K/DIV2K_train_HR_yuv/'
+path_to_original_DIV2K_X2_yuvs = '/proj/video_data3/videosim/data/DIV2K/NTIRE_2017_Low_Res_Images/DIV2K_train_LR_bicubic/X2_yuv/'
+path_to_original_BVI_DVC_yuvs = '/proj/video_data3/videosim/data/BVI_DVC/'
+
+output_root_dir = '/proj/video_no_backup/videosim/eliudux/training_data/xcheck_yuv2/'
+
+if dataset_name == 'ai_574d_BVIDVC':
+    yuv_dataset_dir = output_root_dir + 'ai_574d_BVIDVC/'
+    origin_yuv_dir = path_to_original_BVI_DVC_yuvs
+elif dataset_name == 'ra_574d_BVIDVC':
+    yuv_dataset_dir = output_root_dir + 'ra_574d_BVIDVC/'
+    origin_yuv_dir = path_to_original_BVI_DVC_yuvs
+elif dataset_name == 'ra_93dd_BVIDVC':
+    yuv_dataset_dir = output_root_dir + 'ra_93dd_BVIDVC/'
+    origin_yuv_dir = path_to_original_BVI_DVC_yuvs
+elif dataset_name ==  'DIV2K':    
+    yuv_dataset_dir = output_root_dir + 'ai_574d_DIV2K/'
+    origin_yuv_dir = path_to_original_DIV2K_yuvs
+elif dataset_name ==  'DIV2K_X2':
+    yuv_dataset_dir = output_root_dir + 'ai_574d_DIV2K_X2/'
+    origin_yuv_dir = path_to_original_DIV2K_X2_yuvs
+else:        
+    print('Exiting. Unknow dataset name', dataset_name)
+    exit()
+      
+def writeJsonandSliceQp( subfolder):    
+    folder_dir = yuv_dataset_dir + subfolder + '/'
+    #print(folder_dir)
+    string1 = subfolder.split('_')
+    size = string1[1].split('x')
+    width = size[0]
+    height = size[1]
+    base_qp = string1[-1][-2:]
+    seq_name = subfolder.split('_qp')[0]
+    origin_yuv = origin_yuv_dir + seq_name + '.yuv'
+    if writeJson:            
+        json_output_file = folder_dir  + subfolder + '.json'
+        file2 = open(json_output_file,"w+")
+        with open(json_template, encoding="utf8") as infile:
+            for loadnumber, line in enumerate(infile):
+                if "suffix_rec_before_dbf" in line:
+                    s1 = ' "suffix_rec_before_dbf": "_pictureBeforeDb.yuv",\n'
+                elif "suffix_pred" in line:
+                    s1 = ' "suffix_pred": "_mpr.yuv",\n'
+                elif "suffix_partition_cu_average" in line:
+                    s1 = ' "suffix_partition_cu_average": "_mpa.yuv",\n'
+                elif "bsname" in line:
+                    s1 = '   "bsname": "' + subfolder + '.bin",\n'
+                elif "qp_base" in line:
+                    s1 = '   "qp_base": ' + base_qp + ',\n'
+                elif "basename" in line:
+                    s1 = '   "basename": "' + subfolder + '",\n'
+                elif "width" in line:
+                    s1 = '   "width": ' + width + ',\n'
+                elif "height" in line:
+                    s1 = '   "height": ' + height + ',\n'
+                elif "data_count" in line:
+                    if 'DIV2K' in origin_yuv_dir:
+                        s1 = '   "data_count": 1,\n'
+                    elif '65frm' in origin_yuv_dir:
+                        s1 = '   "data_count": 65,\n'
+                    else:
+                        s1 = '   "data_count": 64,\n'
+                elif "dirname" in line:
+                    s1 = '   "dirname": "' + folder_dir + '",\n'
+                elif "original_yuv" in line:
+                    s1 = '   "original_yuv": "' +  origin_yuv + '",\n'
+                elif "original_bitdepth" in line:
+                    if 'DIV2K' in dataset_name:
+                        s1 = '   "original_bitdepth": 8\n'
+                    else:
+                        s1 = line
+                else:
+                    s1 = line
+                    
+                file2.write(s1)
+        file2.close()
+    
+    if writeSliceqp:
+        sliceqp_output_file = folder_dir  + subfolder + '_qp.dat'
+        if 'DIV2K' in dataset_name:
+            qptemplate = os.path.join(current_dir, 'template/ai_DIV2K_1frm_qp'+str(base_qp)+'.dat')
+        elif 'ai_574d_BVIDVC' in dataset_name:
+            qptemplate = os.path.join(current_dir, 'template/ai_BVIDVC_64frm_qp'+str(base_qp)+'.dat')
+        elif 'ra' in dataset_name and 'BVIDVC' in dataset_name:
+            qptemplate = os.path.join(current_dir, 'template/ra_BVIDVC_64frm_qp'+str(base_qp)+'.dat')
+        else:
+            print('Wrong dataset name',dataset_name)
+            exit()
+            
+        if not os.path.exists(qptemplate):
+            print('Template of slice qp: ',qptemplate,' for',dataset_name,' does not exit.')
+            exit()
+            
+        if not os.path.exists(sliceqp_output_file):
+            shutil.copy(qptemplate, sliceqp_output_file)
+
+      
+cnt = 0
+for subfolder in os.listdir(yuv_dataset_dir):     
+    writeJsonandSliceQp(subfolder)
+    cnt += 1
+
+    if cnt%1000 == 0:
+        print('\trunning',cnt,'sequences...')
+    
+    
+print('Total', cnt, 'folders.')    
+
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/steps.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/steps.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6dd21aca10805a7031e69c14bae806b43173940
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/steps.py
@@ -0,0 +1,77 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import sys
+import os
+
+
+sw_dir = os.getcwd()
+py_concat_data_dir = os.path.join(sw_dir, "concatenate_dataset.py")
+
+# Name of the json file
+db_json_dir_stage1 = "db_stage1.json" 
+db_json_dir_stage2 = "db_stage2.json" 
+db_json_dir_valid = "db_valid.json" 
+
+yuv_root_dir = '/proj/video_no_backup/videosim/eliudux/training_data/xcheck_yuv2/'
+
+#stage 1 dataset
+yuv_dataset_dir0 = yuv_root_dir + 'ra_574d_BVIDVC/'
+py_concat_cmd = ["python", py_concat_data_dir, "--input_dir_json",yuv_dataset_dir0, "--output_json",db_json_dir_stage1]
+py_concat_cmd = " ".join(py_concat_cmd)
+#print("\n",py_concat_cmd)
+#os.system(py_concat_cmd)  
+#print('Save', db_json_dir_stage1)    
+
+#stage 2 dataset
+yuv_dataset_dir1 = yuv_root_dir + 'ai_574d_BVIDVC/'
+yuv_dataset_dir2 = yuv_root_dir + 'ra_93dd_BVIDVC/'
+yuv_dataset_dir3 = yuv_root_dir + 'ai_574d_DIV2K/'
+yuv_dataset_dir4 = yuv_root_dir + 'ai_574d_DIV2K_X2/'
+
+# process all directories
+py_concat_cmd = ["python", py_concat_data_dir, "--input_dir_json",yuv_dataset_dir1, "--input_dir_json",yuv_dataset_dir2, "--input_dir_json",yuv_dataset_dir3, "--input_dir_json",yuv_dataset_dir4, "--output_json",db_json_dir_stage2]
+py_concat_cmd = " ".join(py_concat_cmd)
+print("\n",py_concat_cmd)
+os.system(py_concat_cmd)
+print('Save', db_json_dir_stage2)
+
+
+# create a simple set   
+yuv_dataset_dir5 = yuv_root_dir + 'ai_574d_DIV2K_valid/'
+py_concat_cmd = ["python", py_concat_data_dir, "--input_dir_json",yuv_dataset_dir5, "--output_json",db_json_dir_valid]
+py_concat_cmd = " ".join(py_concat_cmd)
+#print("\n",py_concat_cmd)
+#os.system(py_concat_cmd)
+#print('Save', db_json_dir_valid)
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp17.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp17.dat
new file mode 100644
index 0000000000000000000000000000000000000000..279914a07cb1fca2050a08a504f0ec91b3beb28d
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp17.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp22.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp22.dat
new file mode 100644
index 0000000000000000000000000000000000000000..43fc660047087308fe9577b3b1dc89e3c089a50d
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp22.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp27.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp27.dat
new file mode 100644
index 0000000000000000000000000000000000000000..4ec474443d057f5ccfb14ea79f0d1e80409dc25e
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp27.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp32.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp32.dat
new file mode 100644
index 0000000000000000000000000000000000000000..c3a6a913164038d1513cc1cd1d74285117f78c72
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp32.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp37.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp37.dat
new file mode 100644
index 0000000000000000000000000000000000000000..053f15eec9b490d7bfb8a3f8dd6e3a3cf7e8cab9
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp37.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp42.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp42.dat
new file mode 100644
index 0000000000000000000000000000000000000000..4b05baab689cac8c57c45fd261b34e7d8a0d38a1
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_BVIDVC_64frm_qp42.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp17.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp17.dat
new file mode 100644
index 0000000000000000000000000000000000000000..11fe3cf40d418d1772b2ec0d802b6a9bc2f78f89
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp17.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp22.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp22.dat
new file mode 100644
index 0000000000000000000000000000000000000000..e5da38c06784692060a8cefeb3c00291e8412607
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp22.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp27.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp27.dat
new file mode 100644
index 0000000000000000000000000000000000000000..5ab0a696ca945036646d2fa48dcc1cf914050ca2
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp27.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp32.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp32.dat
new file mode 100644
index 0000000000000000000000000000000000000000..b8690a20fa0ad81ddea3153155f8021a5ff977de
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp32.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp37.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp37.dat
new file mode 100644
index 0000000000000000000000000000000000000000..c06fa56ca580cc71337364c2165d4c2926656f45
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp37.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp42.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp42.dat
new file mode 100644
index 0000000000000000000000000000000000000000..5ab4cca8a8c3756794165d5a9520a326d297aecb
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ai_DIV2K_1frm_qp42.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/json_template.json b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/json_template.json
new file mode 100644
index 0000000000000000000000000000000000000000..16a8a29e0d280b1512bb2891a0e432a3d04493d3
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/json_template.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:12a3ce483f5178dd376ff165f1b4516fb0a268e9ff93fc2d848155b4b8c5c087
+size 903
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp17.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp17.dat
new file mode 100644
index 0000000000000000000000000000000000000000..71a36fe23c15f784cfd9a8a331d5fddd8ea53773
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp17.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp20.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp20.dat
new file mode 100644
index 0000000000000000000000000000000000000000..648bc940323e67b2d52a954a80c2f5e2cfd4e2f9
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp20.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp22.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp22.dat
new file mode 100644
index 0000000000000000000000000000000000000000..bc370be95d11e5880a9560fa8e203f814d4b9bfb
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp22.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp25.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp25.dat
new file mode 100644
index 0000000000000000000000000000000000000000..c16316b34a555edaaaefccdbed7c9e5bb148c27d
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp25.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp27.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp27.dat
new file mode 100644
index 0000000000000000000000000000000000000000..80964a362b9d9e991754a3f693692c7f4d7c2014
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp27.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp30.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp30.dat
new file mode 100644
index 0000000000000000000000000000000000000000..2728bd88f22e82a7a29921c8d288457536072e7a
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp30.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp32.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp32.dat
new file mode 100644
index 0000000000000000000000000000000000000000..b07ed66cf359087f5868ed62bc98bb7c3d943fa5
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp32.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp35.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp35.dat
new file mode 100644
index 0000000000000000000000000000000000000000..4f2db3ad95722f99cda03cceb8584b984b6daf19
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp35.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp37.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp37.dat
new file mode 100644
index 0000000000000000000000000000000000000000..248ce1a059be29c772748ce94e52a40fb9dcf24a
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp37.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp40.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp40.dat
new file mode 100644
index 0000000000000000000000000000000000000000..dbd83276606b2888ab69ba68a6b9df473de38534
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp40.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp42.dat b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp42.dat
new file mode 100644
index 0000000000000000000000000000000000000000..bd450a70ff73e594ddbbd8baeb83ac6e5841cf08
Binary files /dev/null and b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_json/template/ra_BVIDVC_64frm_qp42.dat differ
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/generate_models.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/generate_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..de238c0677c8f79033ea996f9b5678a142259b61
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/generate_models.py
@@ -0,0 +1,161 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import os
+from sys import platform
+import torch
+import torch.nn as nn
+import subprocess
+import onnx
+
+from models.myinterY import ConditionalNet1 as net_RA
+from models.myinterY_ipb_noAtten_sadl_res import ConditionalNet1 as net_ipb_noAtten_res
+from models.myinterUV_sadl_res import ConditionalNet0 as net_uv_res
+
+
+def savemodel(model0,save_model_path,best_ckp_path):
+    
+    model = torch.jit.script(model0) 
+    checkpoint = torch.load(best_ckp_path, map_location=torch.device('cpu'))
+    model.load_state_dict(checkpoint['model_state_dict'])  
+    torch.jit.save(model, save_model_path)
+
+    print('Load ckp:',best_ckp_path)
+    print('Model saved:',save_model_path)
+
+def saveSADLmodel(sadl_main_dir, model0_res,best_ckp_path, num_input, model_dir, model_name):
+
+    #print('load ckp:', best_ckp_path)  
+    checkpoint = torch.load(best_ckp_path, map_location=torch.device('cpu'))
+    #print(checkpoint.keys())
+    model0_res.load_state_dict(checkpoint['model_state_dict'])      
+
+    w=32
+    h=32
+    dummy_rec = torch.ones(1, 1, h, w, requires_grad=True)
+    
+    inputs_torch = tuple(dummy_rec for _ in range(num_input))
+    #input_names=['rec','input','input0','input_ipb','input_skip','input_qp']    
+    
+    output_onnx_dir = model_dir+model_name+ "_res.onnx"
+    output_sadl_dir = model_dir+model_name+ "_res.sadl"
+    torch.onnx.export(model0_res, inputs_torch, output_onnx_dir)
+    cmdpy = ["python3",sadl_main_dir,"--input_onnx",output_onnx_dir,"--output",output_sadl_dir]
+    cmdpy = " ".join(cmdpy)
+    subprocess.call(cmdpy,shell=True)
+    print(output_sadl_dir)
+
+
+def saveSADLmodelChromaOrig(sadl_main_dir, model0_res,best_ckp_path, num_input, model_dir, model_name):
+    
+    checkpoint = torch.load(best_ckp_path, map_location=torch.device('cpu'))
+    
+    model0_res.load_state_dict(checkpoint['model_state_dict'])      
+
+    w=32
+    h=32
+    dummy_rec = torch.ones(1, 1, h, w, requires_grad=True)
+    dummy_rec_uv = torch.ones(1, 2, h//2, w//2, requires_grad=True)
+    dummy_qp = torch.ones(1, 1, h//2, w//2, requires_grad=True)
+    
+    inputs_torch = (dummy_rec, dummy_rec_uv, dummy_rec_uv, dummy_rec_uv, dummy_qp)  
+    
+    output_onnx_dir = model_dir+model_name+ "_res.onnx"
+    output_sadl_dir = model_dir+model_name+ "_res.sadl"
+    torch.onnx.export(model0_res, inputs_torch, output_onnx_dir)
+    cmdpy = ["python3",sadl_main_dir,"--input_onnx",output_onnx_dir,"--output",output_sadl_dir]
+    cmdpy = " ".join(cmdpy)
+    print(cmdpy)
+    os.system(cmdpy)
+    print(output_sadl_dir)
+    
+def quantizeSADL(sadl_dir,quant_dir, model_name, quantizer_str):  
+    
+    float_sadl_dir = quant_dir+model_name+ "_res.sadl"
+    
+    print('Input float SADL model before quantize: ',float_sadl_dir)
+    
+    # apply quantization
+    naive_quantizer_cpp_dir = sadl_dir + 'naive_quantization'
+    int_sadl_dir = quant_dir+model_name+ "_res_int16.sadl"
+    file = open(quantizer_str)
+    q_str = file.readlines()[0]
+    cmd_quantize = ["echo '", q_str ," ' | ", naive_quantizer_cpp_dir, float_sadl_dir, int_sadl_dir, ";"]
+    cmd_quantize = " ".join(cmd_quantize)
+    print(cmd_quantize)
+    os.system(cmd_quantize)
+    
+    file.close()      
+ 
+#output folder
+folder1 = 'out_models/' 
+os.makedirs(folder1,exist_ok=True) 
+ 
+# stage 1 - inter luma model 
+model_ra = net_RA()
+save_model_path2 = folder1 + "InterY_9b7d_ep455_xcheck.pt"
+best_ckp_path2 = "../stage1_4_training_interluma/checkpoint/ra_L1_lr5_branch294_9b7d/epoch_0455.pt" 
+
+print('\nGenerate inter luma in Pytorch.')
+savemodel(model_ra,save_model_path2,best_ckp_path2)
+
+
+# stage 2 
+
+#SADL dir. Keep the last '/'
+sadl_build_dir = '/proj/video_data3/videosim/eliudux/myCurrentWork/training_data/sadl_master/sadl/sample_test/'
+sadl_main_dir = '/proj/video_data3/videosim/eliudux/myCurrentWork/training_data/sadl_master/sadl/converter/main.py'
+q_str_file = "sadl_q_str.txt"
+
+# luma 
+
+model_ipb_noAtten_res = net_ipb_noAtten_res()
+best_ckp_path28d = "../luma/xcheck_ckp/Luma_nnvc3/epoch_0244.pt"  
+model_sadl_dir28d = "./"+folder1
+model_name28d = "Luma_nnvc3_ep244"
+
+#print('\nGenerate luma IPB in SADL.')
+#saveSADLmodel(sadl_main_dir, model_ipb_noAtten_res,best_ckp_path28d, 5, model_sadl_dir28d, model_name28d)
+#quantizeSADL(sadl_build_dir, folder1, model_name28d, q_str_file)
+
+
+# chroma
+model_uv_res = net_uv_res()
+best_ckp_path33c = "../chroma/xcheck_ckp/Chroma_nnvc3_from125/epoch_0250.pt" 
+model_sadl_dir33c = "./"+folder1
+model_name33c = "Chroma_nnvc3_from125_ep250"
+
+#print('\nGenerate chroma in SADL.')
+#saveSADLmodelChromaOrig(sadl_main_dir, model_uv_res,best_ckp_path33c, 5, model_sadl_dir33c, model_name33c)
+#quantizeSADL(sadl_build_dir, folder1, model_name33c, q_str_file)
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/models/myinterUV_sadl_res.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/models/myinterUV_sadl_res.py
new file mode 100644
index 0000000000000000000000000000000000000000..80dec9ca6434e51b652ca405adc09573a00ab88b
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/models/myinterUV_sadl_res.py
@@ -0,0 +1,94 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+import torch
+import torch.nn as nn
+
+class conv3x3_f(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size = 3,stride = 1):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=1 )
+        self.relu = nn.PReLU()
+    def forward(self,rec):
+        y = self.relu(self.conv(rec))
+        return y
+
+class conv1x1_f(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size = 1):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=0)
+        self.relu = nn.PReLU()
+    def forward(self,input):
+        y = self.relu(self.conv(input))
+        return y
+    
+class ResidualBlock(nn.Module):    
+    def __init__(self, in_channels, out_channels, no_features, kernel_size = 3):
+        super().__init__()
+        self.conv1 = nn.Conv2d(in_channels, no_features, kernel_size, padding=1)
+        self.relu = nn.PReLU()
+        self.conv2 = nn.Conv2d(no_features, out_channels, kernel_size, padding=1)
+    def forward(self,x):
+        res = self.conv2(self.relu(self.conv1(x)))
+        out = res + x
+        return out
+
+class ConditionalNet0(nn.Module):
+    def __init__(self, in_channels=1, out_channels=4, no_features=96, kernel_size = 3):
+        super().__init__()
+        self.convLuma = conv3x3_f(1,no_features,kernel_size,stride=2)
+        self.convRec = conv3x3_f(2,no_features,kernel_size,stride=1)
+        self.convPred = conv3x3_f(2,no_features,kernel_size,stride=1)
+        self.convBs = conv3x3_f(2,no_features,kernel_size,stride=1)
+        self.convQp = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.fuse = conv1x1_f(96*5, no_features)
+        self.transitionH = conv3x3_f(no_features, no_features,stride=2)
+        self.backbone = nn.ModuleList([ResidualBlock(no_features,no_features,no_features) for i in range(8)])
+        self.last_layer = nn.Sequential(nn.Conv2d(no_features,no_features,kernel_size,padding=1),
+                                        nn.PReLU(),
+                                        nn.Conv2d(no_features,out_channels*2,kernel_size,padding=1),
+                                        #nn.PixelShuffle(2),
+                                       )
+    def forward(self,input,rec,input0,input1,qp):
+        input2 = qp
+        input3 = torch.cat([self.convLuma(input),self.convRec(rec), self.convPred(input0), 
+                            self.convBs(input1), self.convQp(input2)], 1) 
+        
+        inputbackbone = self.transitionH(self.fuse(input3 ) ) 
+        
+        for i,backbone in enumerate(self.backbone):
+            outputbackbone = backbone(inputbackbone) 
+            inputbackbone = outputbackbone 
+         
+        x = self.last_layer(inputbackbone) # + rec
+        return x
\ No newline at end of file
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/models/myinterY.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/models/myinterY.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2be3de5701cadb415b72b65801147d548ee8a03
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/models/myinterY.py
@@ -0,0 +1,108 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import torch
+import torch.nn as nn
+
+class conv3x3_f(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size = 3,stride = 1):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=1 )
+        self.relu = nn.PReLU()
+    def forward(self,rec):
+        y = self.relu(self.conv(rec))
+        return y
+
+class conv1x1_f(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size = 1):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=0)
+        self.relu = nn.PReLU()
+    def forward(self,input):
+        y = self.relu(self.conv(input))
+        return y
+    
+class ResidualBlock2(nn.Module):    
+    def __init__(self, in_channels, out_channels, no_features, kernel_size = 3):
+        super().__init__()
+        self.conv1 = nn.Conv2d(in_channels, no_features, kernel_size, padding=1)
+        self.relu = nn.PReLU()
+        self.conv2 = nn.Conv2d(no_features, out_channels, kernel_size, padding=1)
+    def forward(self,x):
+        res = self.conv2(self.relu(self.conv1(x)))
+        return res
+
+class SpatialGate(nn.Module):
+    def __init__(self, in_channels, out_channels, no_features, stride = 1, kernel_size = 3):
+        super().__init__()
+        self.conv1 = nn.Conv2d(in_channels, no_features, kernel_size, stride, padding=1)
+        self.relu = nn.PReLU()
+        self.conv2 = nn.Conv2d(no_features, out_channels, kernel_size, stride=1, padding=1)
+    def forward(self,x):
+        y = self.conv2(self.relu(self.conv1(x)))
+        return y  
+
+class ConditionalNet1(nn.Module):
+    def __init__(self, in_channels=1, out_channels=4, no_features=96, kernel_size = 3):
+        super().__init__()
+        self.convRec = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convPred = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convBs = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convQp = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.fuse = conv1x1_f(4*no_features, no_features)
+        self.transitionH = conv3x3_f(no_features, no_features,stride=2)
+        self.backbone = nn.ModuleList([ResidualBlock2(no_features,no_features,no_features) for i in range(8)])
+        self.mask = nn.ModuleList([SpatialGate(4, 1, 32, stride=2 )for i in range(8)]) 
+        self.last_layer = nn.Sequential(nn.Conv2d(no_features,no_features,kernel_size,padding=1),
+                                        nn.PReLU(),
+                                        nn.Conv2d(no_features,out_channels,kernel_size,padding=1),
+                                        nn.PixelShuffle(2),
+                                       )
+    def forward(self,rec,input,input0,qp):
+        qp = torch.unsqueeze(torch.unsqueeze(qp, 2), 3)
+        input1 = qp.expand_as(rec) 
+        input2 = torch.cat([self.convRec(rec), self.convPred(input),  
+                            self.convBs(input0), self.convQp(input1)], 1)
+        
+        inputbackbone = self.transitionH(self.fuse(input2 ) )         
+        inputspatial = torch.cat([rec, input, input0, input1], 1) 
+        for backbone, mask in zip(self.backbone,self.mask):
+            outputbackbone = backbone(inputbackbone) 
+            spatialattention = mask(inputspatial)            
+            spatialattention = spatialattention.expand_as(outputbackbone) 
+            y = outputbackbone * spatialattention + outputbackbone 
+            inputbackbone = inputbackbone + y           
+         
+        x = self.last_layer(inputbackbone) + rec
+        return x 
\ No newline at end of file
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/models/myinterY_ipb_noAtten_sadl_res.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/models/myinterY_ipb_noAtten_sadl_res.py
new file mode 100644
index 0000000000000000000000000000000000000000..75d8693863c41046b6270c04f28c4416dd76aa30
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/models/myinterY_ipb_noAtten_sadl_res.py
@@ -0,0 +1,104 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import torch
+import torch.nn as nn
+
+class conv3x3_f(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size = 3,stride = 1):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=1 )
+        self.relu = nn.PReLU()
+    def forward(self,rec):
+        y = self.relu(self.conv(rec))
+        return y
+
+class conv1x1_f(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size = 1):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=0)
+        self.relu = nn.PReLU()
+    def forward(self,input):
+        y = self.relu(self.conv(input))
+        return y
+    
+class ResidualBlock2(nn.Module):    
+    def __init__(self, in_channels, out_channels, no_features, kernel_size = 3):
+        super().__init__()
+        self.conv1 = nn.Conv2d(in_channels, no_features, kernel_size, padding=1)
+        self.relu = nn.PReLU()
+        self.conv2 = nn.Conv2d(no_features, out_channels, kernel_size, padding=1)
+    def forward(self,x):
+        res = self.conv2(self.relu(self.conv1(x)))
+        return res
+
+class SpatialGate(nn.Module):
+    def __init__(self, in_channels, out_channels, no_features, stride = 1, kernel_size = 3):
+        super().__init__()
+        self.conv1 = nn.Conv2d(in_channels, no_features, kernel_size, stride, padding=1)
+        self.relu = nn.PReLU()
+        self.conv2 = nn.Conv2d(no_features, out_channels, kernel_size, stride=1, padding=1)
+    def forward(self,x):
+        y = self.conv2(self.relu(self.conv1(x)))
+        return y  
+
+class ConditionalNet1(nn.Module):
+    def __init__(self, in_channels=1, out_channels=4, no_features=96, kernel_size = 3):
+        super().__init__()
+        self.convRec = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convPred = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convBs = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convIpb = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convQp = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.fuse = conv1x1_f(5*no_features, no_features)
+        self.transitionH = conv3x3_f(no_features, no_features,stride=2)
+        self.backbone = nn.ModuleList([ResidualBlock2(no_features,no_features,no_features) for i in range(8)])
+        self.last_layer = nn.Sequential(nn.Conv2d(no_features,no_features,kernel_size,padding=1),
+                                        nn.PReLU(),
+                                        nn.Conv2d(no_features,out_channels,kernel_size,padding=1),
+                                        #nn.PixelShuffle(2),
+                                       )
+
+    def forward(self,rec,input,input0,input_ipb,qp):
+        input1 = qp
+        input2 = torch.cat([self.convRec(rec), self.convPred(input),  
+                            self.convBs(input0), self.convIpb(input_ipb), self.convQp(input1)], 1)
+        
+        inputbackbone = self.transitionH(self.fuse(input2 ) )         
+        for backbone in self.backbone:
+            outputbackbone = backbone(inputbackbone)          
+            inputbackbone = inputbackbone + outputbackbone           
+         
+        x = self.last_layer(inputbackbone) #+ rec
+        return x 
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/sadl_q_str.txt b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/sadl_q_str.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bd7c7f7850b529ec5fb88058c3cf455a560be0f3
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/create_models/sadl_q_str.txt
@@ -0,0 +1 @@
+0 13   1 13   2 13   3 13   4 13   5 13   6 0   7 13   9 13   11 13   12 0   13 13   15 13   17 13   18 0   19 13   21 13   23 13   24 0   25 13   27 13   29 13   30 0   31 13   33 13   35 0   37 13   38 0   39 13   41 13   43 13   44 0   45 13   47 13   49 13   50 0   51 13   53 13   55 13   56 0   57 13   60 13   61 0   62 13   64 13   66 13   67 0   68 13   71 13   72 0   73 13   75 13   77 13   78 0   79 13   82 13   83 0   84 13   86 13   88 13   89 0   90 13   93 13   94 0   95 13   97 13   99 13   100 0   101 13   104 13   105 0   106 13   108 13   110 13   111 0   112 13   115 13   116 0   117 13   119 13   121 13   122 0   123 13   126 13   127 0   128 13   130 13   132 13   133 0   134 13   137 13   138 0   139 13   141 13   143 13   144 0   145 13   
\ No newline at end of file
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/Instruction_prepareYUV.md b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/Instruction_prepareYUV.md
new file mode 100644
index 0000000000000000000000000000000000000000..499ea5f47a5f1ea88f35eb264ca7d9461de1657d
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/Instruction_prepareYUV.md
@@ -0,0 +1,100 @@
+
+Download ffmpeg:
+================
+
+wget https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz
+tar xvf ffmpeg-release-amd64-static.tar.xz
+cd ffmpeg-5.1.1-amd64-static
+pwd
+
+--> This is now your path 'ffmpeg_path' in png2yuv_subsampled.py and png2yuv_fullres.py
+--> It is also your path 'executablefile' in mp42yuv_BVI-DVC.py
+(Mine: /proj/video_data4/videosim/eliudux/JVET_NN_EE/ab-ee1-1_5/training_EE115/stage0_prepare_yuv/ffmpeg-5.1.1-amd64-static)
+
+
+Prepare subsampled yuv-files for DIV2K
+=====================================
+
+Goto https://data.vision.ee.ethz.ch/cvl/DIV2K/
+Download half resolution images from http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_LR_bicubic_X2.zip
+unzip DIV2K_train_LR_bicubic_X2.zip
+cd DIV2K_train_LR_bicubic/X2
+pwd
+--> This is now your path 'srcdir' in png2yuv_subsampled.py
+(Mine: /proj/video_no_backup/videosim/ejacstr/extract/DIV2K_test_yuv/DIV2K_train_LR_bicubic/X2)
+cd ..
+mkdir X2_crop
+cd X2_crop
+pwd
+--> This is now your path 'cropimgdir' in png2yuv_subsampled.py
+(Mine: /proj/video_no_backup/videosim/ejacstr/extract/DIV2K_test_yuv/X2_crop)
+cd ..
+mkdir X2_yuv
+cd X2_yuv
+pwd
+--> This is now your path 'destdir' in png2yuv_subsampled.py and 'path_to_original_DIV2K_X2_yuvs' in training scripts.
+
+After changing all the paths in png2yuv_subsampled.py according to the above, run png2yuv_subsampled.py using
+python3 png2yuv_subsampled.py
+
+The subsampled versions of DIV2K images will afterwards reside in 'destdir' of the form
+
+0084x2_1016x680_25fps_8bit_420.yuv
+signifying image 84, downsampled a factor of two (x2), resolution 1016x680, 25 fps, 8bit 420.
+
+To double check, the md5sum of 0261x2_1016x672_25fps_8bit_420.yuv is 4a59410da708867930f01134c4337376.
+
+
+Prepare full resolution yuv-files for DIV2K:
+===========================================
+
+Goto https://data.vision.ee.ethz.ch/cvl/DIV2K/
+Download full resolution images from http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip 
+unzip DIV2K_train_HR.zip
+cd DIV2K_train_HR
+pwd
+--> This is now your path 'srcdir' in png2yuv_fullres.py
+cd ..
+mkdir DIV2K_train_HR_crop
+cd DIV2K_train_HR_crop
+pwd
+--> This is now your path 'cropimgdir' in png2yuv_fullres.py
+cd ..
+mkdir DIV2K_train_HR_yuv
+cd DIV2K_train_HR_yuv
+pwd
+--> This is now your path 'destdir' in png2yuv_fullres.py and 'path_to_original_DIV2K_yuvs' in training scripts.
+
+After changing all the paths in png2yuv_fullres.py according to the above, run png2yuv_fullres.py using
+python3 png2yuv_fullres.py
+
+The full resolution yuv versions of the DIV2K images will afterwards reside in 'destdir' of the form
+
+0461_1440x2040_25fps_8bit_420.yuv
+signifying image 461, no downsampling (lack of x2), resolution 1440x2040, 25 fps, 8bit 420.
+
+To double check, the md5sum of 0298_2040x1224_25fps_8bit_420.yuv is b980c46e03342bb25f6f6a0f266cdd9d.
+
+
+Prepare yuv-files for BVI-DVC:
+==============================
+
+Download the BVI-DVC dataset as per the instructions in the NNVC-CTC.
+--> The path where you have downloaded all the mp4 files is now your 'srcdir' in mp42yuv_BVI-DVC.py
+
+Find a suitable path to put your .yuv videos
+--> This is now your 'destdir' in mp42yuv_BVI-DVC.py and 'path_to_original_BVI_DVC_yuvs' in training scripts.
+
+--> Change the path 'executablefile' in mp42yuv_BVI-DVC.py to the ffmpeg path you created above.
+
+Run mp42yuv_BVI-DVC.py four times for the different sizes A (3840x2176), B (1920x1088), C (960x544) and D (480x272):
+
+python3 mp42yuv_BVI-DVC.py A
+python3 mp42yuv_BVI-DVC.py B
+python3 mp42yuv_BVI-DVC.py C
+python3 mp42yuv_BVI-DVC.py D
+
+Note that each of these scripts starts 191 simultaneous decodings. If your system cannot cope with that many simultaneous decodings, use something like time.sleep(20) to wait 20 seconds after starting a decoding instead of doing it instantly.  
+
+To double check, the md5sum of DAdvertisingMassagesBangkokVidevo_480x272_25fps_10bit_420.yuv is 01ac6cf75932b103a96555c0dcc0bd9b.
+
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/csv/training-data.csv b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/csv/training-data.csv
new file mode 100644
index 0000000000000000000000000000000000000000..281cfd1f97a20b6449367d8c45b821ba924afa68
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/csv/training-data.csv
@@ -0,0 +1,867 @@
+url,Sequence,md5sum
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/dji/IceAerial_1920x1080_30fps_420_10bit.zip,IceAerial_1920x1080_30fps_420_10bit.yuv,ddf98a14de95c4d3549ed176749e1865
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/dji/IceAerial_3840x2160_30fps_420_10bit.zip,IceAerial_3840x2160_30fps_420_10bit.yuv,b6d6b978fff23a7dbc910bf951c53069
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/dji/IceRiver_1920x1080_30fps_420_10bit.zip,IceRiver_1920x1080_30fps_420_10bit.yuv,97c972fbed73e1d4644b1303b36acb44
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/dji/IceRiver_3840x2160_30fps_420_10bit.zip,IceRiver_3840x2160_30fps_420_10bit.yuv,a6c41da1fb2e18c0f0f6003bb01de25b
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/dji/IceRock_1920x1080_30fps_420_10bit.zip,IceRock_1920x1080_30fps_420_10bit.yuv,f034ad1500d2952cd724f13aa7aa7df8
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/dji/IceRock_3840x2160_30fps_420_10bit.zip,IceRock_3840x2160_30fps_420_10bit.yuv,1354f65b737a59345ad1524c93a4a343
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/dji/IceRock2_1920x1080_30fps_420_10bit.zip,IceRock2_1920x1080_30fps_420_10bit.yuv,1d7470e54eb28e4c374a08bde81fe7ec
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/dji/IceRock2_3840x2160_30fps_420_10bit.zip,IceRock2_3840x2160_30fps_420_10bit.yuv,ed18f5adc265a8552fad269cf7a2333a
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p_Netflix/Netflix_Aerial_1920x1080_60fps_10bit_420.yuv.zip,Netflix_Aerial_1920x1080_60fps_10bit_420.yuv,52ccc9b13eb550e9879783e9603f5c88
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p_Netflix/Netflix_BarScene_1920x1080_60fps_10bit_420.yuv.zip,Netflix_BarScene_1920x1080_60fps_10bit_420.yuv,89b3195543e8e9a4014d38d425d4c9a3
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p_Netflix/Netflix_Crosswalk_1920x1080_60fps_10bit_420.yuv.zip,Netflix_Crosswalk_1920x1080_60fps_10bit_420.yuv,05b39d5afc0d6a6e8e2fcc08cb2ea441
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p_Netflix/Netflix_DrivingPOV_1920x1080_60fps_10bit_420.yuv.zip,Netflix_DrivingPOV_1920x1080_60fps_10bit_420.yuv,71aa6ab712e2152c1d4142db8350ca34
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p_Netflix/Netflix_PierSeaside_1920x1080_60fps_10bit_420.yuv.zip,Netflix_PierSeaside_1920x1080_60fps_10bit_420.yuv,434781b00cd8698cfe7ebfc6c017a5f1
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p_Netflix/Netflix_SquareAndTimelapse_1920x1080_60fps_10bit_420.yuv.zip,Netflix_SquareAndTimelapse_1920x1080_60fps_10bit_420.yuv,a0a46f38a4dc0a691e00e9e9e94d0a42
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p_Netflix/Netflix_WindAndNature_1920x1080_60fps_10bit_420.yuv.zip,Netflix_WindAndNature_1920x1080_60fps_10bit_420.yuv,9f229c47ec4141724a875de744f89531
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p/Fountains_1920x1080_30fps_10bit_420.zip,Fountains_1920x1080_30fps_10bit_420.yuv,340d57144861ac26541c02ef970fbf54
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p/FreeSardines1_1920x1080_120fps_10bit_420.zip,FreeSardines1_1920x1080_120fps_10bit_420.yuv,c043e8f72348a63cf1fec2e8ca6eaeae
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p/Metro_1920x1080_60fps_10bit_420.zip,Metro_1920x1080_60fps_10bit_420.yuv,1030673bd52a31705ccbba36a2959f4f
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p/Rowing2_1920x1080_120fps_10bit_420.zip,Rowing2_1920x1080_120fps_10bit_420.yuv,34cd44fbee06812a4b334540671995c8
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p/Runners_1920x1080_30fps_10bit_420.zip,Runners_1920x1080_30fps_10bit_420.yuv,6e4e93faf1a806b0b0930dc72b716301
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p/RushHour_1920x1080_30fps_10bit_420.zip,RushHour_1920x1080_30fps_10bit_420.yuv,af80670d3fd880da7120b9964e936d55
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/1080p/SakuraGate_1920x1080_60fps_8bit.zip,SakuraGate_1920x1080_60_8bit.yuv,dd25a0252ff25130597adc775f47e2ed
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/huawei/Affine/AffineTestSeqAndCfg_HW.zip,ShieldsPart_1920x1080_50.yuv,17bf12a47d3d06bc8aa6039f53f40da3
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/huawei/Affine/AffineTestSeqAndCfg_HW.zip,JetsPart_1280x720_25.yuv,49336b6a13ca328feed0431e30006225
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/huawei/Affine/AffineTestSeqAndCfg_HW.zip,TractorPart_1920x1080_25.yuv,be503ae04289c380b02f98f0d272afc1
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/huawei/Affine/AffineTestSeqAndCfg_HW.zip,BlueSkyPart_1920x1080_25.yuv,c8b800542f71239340ff33412b0b49f1
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/huawei/Affine/AffineTestSeqAndCfg_HW.zip,StationKtaPart_1920x1080_25.yuv,dcba35f271c826f767ce78cbf16a87db
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/huawei/Affine/AffineTestSeqAndCfg_HW.zip,SpincalendarPart_1280x720_50.yuv,e48e274b042610b8e923ca4d02c7c3e1
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/JVET-F_drone/BeachMountain_3840x2160_30fps_420_10bit.zip,BeachMountain_3840x2160_30fps_420_10bit.yuv,a68db714004c4821cb933a780e64a7fa
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/JVET-F_drone/DroneTakeOff_3840x2160_30fps_420_10bit.zip,DroneTakeOff_3840x2160_30fps_420_10bit.yuv,b787e95fb3d3e066f4717ccf8875f972
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/JVET-F_drone/MountainBay_3840x2160_30fps_420_10bit.zip,MountainBay_3840x2160_30fps_420_10bit.yuv,8a70de944a56d9409f2a8c6684fc030c
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/JVET-G0096/BeachMountain/YUV/BeachMountain2_3840x2160_30fps_420_10bit.zip,BeachMountain2_3840x2160_30fps_420_10bit.yuv,d5f013f3b24934b3d0bd66bdedad6e80
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/JVET-G0096/MountainBay/YUV/MountainBay2_3840x2160_30fps_420_10bit.zip,MountainBay2_3840x2160_30fps_420_10bit.yuv,f27b6b70244fb083baac546958fcf696
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/JVET-J0060/BuildingEntrance_1920x1080_50_10b_420_BT.709.zip,BuildingEntrance_1920x1080_50_10b_420_BT.709.yuv,b379590c82357bb8dd7a1a06460820f0
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/JVET-J0060/ParkLake_1920x1080_50_10b_420_BT.709.zip,ParkLake_1920x1080_50_10b_420_BT.709.yuv,3232eebf4ddbcd7eea941525e968522f
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/JVET-K0294/TencentConcert1_1920x1080_25_8bit_420.zip,TencentConcert1_1920x1080_25_8bit_420.yuv,8e507327166081cd723e3315554286fa
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/Chimera/Netflix_Aerial_4096x2160_60fps_10bit_420.zip,Netflix_Aerial_4096x2160_60fps_10bit_420.yuv,3fb6042cce1547edd1605b6f9466f219
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/Chimera/Netflix_BarScene_4096x2160_60fps_10bit_420.zip,Netflix_BarScene_4096x2160_60fps_10bit_420.yuv,e954a42327971c1beafebea64bf08b68
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/Chimera/Netflix_Dancers_4096x2160_60fps_10bit_420.zip,Netflix_Dancers_4096x2160_60fps_10bit_420.yuv,e440efb517c01d27ecf072c29e31197b
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/Chimera/Netflix_DinnerScene_4096x2160_60fps_10bit_420.zip,Netflix_DinnerScene_4096x2160_60fps_10bit_420.yuv,4cf8bb5eb4610ab20266332a638c3c55
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/Chimera/Netflix_DrivingPOV_4096x2160_60fps_10bit_420.zip,Netflix_DrivingPOV_4096x2160_60fps_10bit_420.yuv,62293c0fdf219e2eecd38aa1f02de29f
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/Chimera/Netflix_PierSeaside_4096x2160_60fps_10bit_420.zip,Netflix_PierSeaside_4096x2160_60fps_10bit_420.yuv,68202d4a0427a53b406a4267b7f36fe6
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/Chimera/Netflix_RollerCoaster_4096x2160_60fps_10bit_420.zip,Netflix_RollerCoaster_4096x2160_60fps_10bit_420.yuv,8e99b7f486025ce8fccda4f3e818a186
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/Chimera/Netflix_ToddlerFountain_4096x2160_60fps_10bit_420.zip,Netflix_ToddlerFountain_4096x2160_60fps_10bit_420.yuv,31eb61104464922d91f42e5d5b49097e
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/Chimera/Netflix_WindAndNature_4096x2160_60fps_10bit_420.zip,Netflix_WindAndNature_4096x2160_60fps_10bit_420.yuv,91679bb1caca49ce4b77a50491dec9a9
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/ElFuente/CfEVersions/Netflix_Timelapse_long_1920x1080_60fps_10bit_420_CfE.zip,Netflix_Timelapse_long_1920x1080_60fps_10bit_420_CfE.yuv,3d0c4a356e092b401032a8a0a6b2b48e
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/ElFuente/LongVersions/Netflix_TimeLapse_long_4096x2160_60fps_10bit_420.zip,Netflix_TimeLapse_long_4096x2160_60fps_10bit_420.yuv,64c06dbacf66985142d6b196abfe0e2f
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/ElFuente/Netflix_Boat_4096x2160_60fps_10bit_420.zip,Netflix_Boat_4096x2160_60fps_10bit_420.yuv,26c4cf9a168fa186d2bf906e01200af3
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/ElFuente/Netflix_BoxingPractice_4096x2160_60fps_10bit_420.zip,Netflix_BoxingPractice_4096x2160_60fps_10bit_420.yuv,22c4308457c56f8ae0d07835a40bba79
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/ElFuente/Netflix_Crosswalk_4096x2160_60fps_10bit_420.zip,Netflix_Crosswalk_4096x2160_60fps_10bit_420.yuv,79125e4db2035e9576f992f3a69e377b
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/ElFuente/Netflix_Narrator_4096x2160_60fps_10bit_420.zip,Netflix_Narrator_4096x2160_60fps_10bit_420.yuv,1d83eca46991c60c79215e740e40897e
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/ElFuente/Netflix_SquareAndTimelapse_4096x2160_60fps_10bit_420.zip,Netflix_SquareAndTimelapse_4096x2160_60fps_10bit_420.yuv,a7ffe67fde0a41b313c7cc9317195d44
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/netflix/ElFuente/Netflix_TunnelFlag_4096x2160_60fps_10bit_420.zip,Netflix_TunnelFlag_4096x2160_60fps_10bit_420.yuv,7f13b01feca245e423c70e2c93aeca38
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/BundNightscape.zip,BundNightscape.yuv,0a131a1e2ed63a050524221393f6240b
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/ConstructionField.zip,ConstructionField.yuv,272eef3fc3e6e63a71ff2175a0042ae4
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/Fountains.zip,Fountains.yuv,662ce078ab5bcfa068b6b5f45f5cd930
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/Library.zip,Library.yuv,02961d0b2513797a27a388d5c7136f12
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/Marathon.zip,Marathon.yuv,3bc2bcbb48f9555488516a60c889796c
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/ResidentialBuilding.zip,ResidentialBuilding.yuv,e1b3a496b20a88fa88f7cd2e0261c68a
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/Runners.zip,Runners.yuv,3dcd6148d13e4fcf1b65a999fecd248a
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/RushHour.zip,RushHour.yuv,31aa90c9e1e0d21fdc99b139f4f11431
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/Scarf.zip,Scarf.yuv,38b660a01a4f75187d5c2c999c2ec3bc
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/TallBuildings.zip,TallBuildings.yuv,93326040e8e591a40847c7440ab0ca19
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/TrafficAndBuilding.zip,TrafficAndBuilding.yuv,8a0fa155c6c1e68ea9581021c2f56c2c
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/TrafficFlow.zip,TrafficFlow.yuv,c4c179b95518aa214a8e88a80f1d6510
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/TreeShade.zip,TreeShade.yuv,6a35af02b3d5a3949bee2372c69c3aa0
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/sjtu/UHD_YUV_420_8bit/Wood.zip,Wood.yuv,d7c6e0954971b4d08a4de724f06f5e44
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/Surveillance/BuildingHall_3840x2160_50fps_10bit_420.zip,BuildingHall_3840x2160_50fps_10bit_420.yuv,5ea0e6461feba2f0a39e0b399f3a9dea
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/Surveillance/CrossRoad1_3840x2160_50fps_10bit_420.zip,CrossRoad1_3840x2160_50fps_10bit_420.yuv,1c4fcfdfb1bf39fe0b35085668e8db3f
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/Surveillance/CrossRoad2_3840x2160_50fps_10bit_420.zip,CrossRoad2_3840x2160_50fps_10bit_420.yuv,76f98e7c3b4f2e45a72fcac967ba143c
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/Surveillance/CrossRoad3_3840x2160_50fps_10bit_420.zip,CrossRoad3_3840x2160_50fps_10bit_420.yuv,b2966d415372aa1e2236a708b053e6d7
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/Surveillance/DinningHall2_3840x2160_50fps_10bit_420.zip,DinningHall2_3840x2160_50fps_10bit_420.yuv,b8981a9c23fbddea3d1f8a2b0e9f912c
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/Surveillance/ResidentialGate1_3840x2160_50fps_10bit_420.zip,ResidentialGate1_3840x2160_50fps_10bit_420.yuv,7727d93b14de1257d10fb97031cbd83a
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/Geneva_201606/Surveillance/ParkLake_3840x2160_50fps_10bit_420.zip,ParkLake_3840x2160_50fps_10bit_420.yuv,e30fa7ec4757dd3adcb3c26d0643c238
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/candidates/TUT/RaceNight_3840x2160_50fps_10bit.zip,RaceNight_3840x2160_50fps_10bit.yuv,c06ed896df6e7dd90b4164df39652d50
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/testset/Drums_3840x2160_100fps_10bit_420_jvet.zip,Drums_3840x2160_100fps_10bit_420_jvet.yuv,c21030a629dd40ad5142de0e27f6284b
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/testset/ToddlerFountain_4096x2160_60fps_10bit_420_jvet.zip,ToddlerFountain_4096x2160_60fps_10bit_420_jvet.yuv,9aaf6e6a55488717513d15a003898523
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/candidates/sdr/hd/Meridian_1920x1080_30fps_8bit_420pf.zip,Meridian_1920x1080_30fps_8bit_420pf.yuv,514bdb1932c11352f024edc63e524d1d
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/candidates/sdr/hd/Rowing_1920x1080_60fps_8bit_420pf.zip,Rowing_1920x1080_60fps_8bit_420pf.yuv,77534c15d75ef495b3f2edb809f2e47b
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/candidates/sdr/hd/Show_1920x1080_25fps_8bit_420pf.zip,Show_1920x1080_25fps_8bit_420pf.yuv,4ea64ce60a5642f41a18276050a62aa1
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/candidates/sdr/hd/Stem_1920x1080_25fps_8bit_420pf.zip,Stem_1920x1080_25fps_8bit_420pf.yuv,9e17f565a8c1beb46a5bc02e680c64ff
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/candidates/sdr/hd/Waterfall_1920x1080_60fps_8bit_420pf.zip,Waterfall_1920x1080_60fps_8bit_420pf.yuv,a463beab434c49031ad75037d1769d41
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/candidates/sdr/hd/Witcher1_1920x1080_30fps_8bit_420pf.zip,Witcher1_1920x1080_30fps_8bit_420pf.yuv,f099b22d6bdf479d0c7d32e821255515
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/candidates/sdr/uhd/BodeMuseum_3840x2160_60fps_10bit_420.zip,BodeMuseum_3840x2160_60fps_10bit_420.yuv,b6a57adf4ad8d992b8c50a2eea5e501f
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/candidates/sdr/uhd/NeptuneFountain2_3840x2160_60fps_10bit_420.zip,NeptuneFountain2_3840x2160_60fps_10bit_420.yuv,a3faa2e56862ebbdfc1259721ff880d5
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/candidates/sdr/uhd/OberbaumSpree_3840x2160_60fps_10bit_420.zip,OberbaumSpree_3840x2160_60fps_10bit_420.yuv,6975b81c9e63c92b3bf4223796102da1
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/candidates/sdr/uhd/QuadrigaTree_3840x2160_60fps_10bit_420.zip,QuadrigaTree_3840x2160_60fps_10bit_420.yuv,6a5f48ac93ed3c210b6d05dbc69a07b2
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/candidates/sdr/uhd/SubwayTree_3840x2160_60fps_10bit_420.zip,SubwayTree_3840x2160_60fps_10bit_420.yuv,bf3b6fdcf7545e2bc729be9b8d3d32e7
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/candidates/sdr/uhd/TallBuildings_3840x2160_30fps_10bit_420pf.zip,TallBuildings_3840x2160_30fps_10bit_420pf.yuv,9a0a3f261d004fa86754751c82fb8b47
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/candidates/sdr/uhd/TiergartenParkway_3840x2160_60fps_10bit_420.zip,TiergartenParkway_3840x2160_60fps_10bit_420.yuv,cc17b64e1fb93879c0873444ca13e290
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/vtsequences/sdr-uhd/DrivingPOV3_3840x2160_60fps_10bit_420pf.zip,DrivingPOV3_3840x2160_60fps_10bit_420pf.yuv,e81b65724c4235128b2749ccb3b0fb4a
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/vtsequences/sdr-uhd/Marathon2_3840x2160_30fps_10bit_420pf.zip,Marathon2_3840x2160_30fps_10bit_420pf.yuv,c065dfb87be3b2e2ab0ce35094fd4eb4
+ftp://jvet@ftp.ient.rwth-aachen.de/ahg/verificationtests/vtsequences/sdr-uhd/NeptuneFountain3_3840x2160_60fps_10bit_420.zip,NeptuneFountain3_3840x2160_60fps_10bit_420.yuv,88fd87ea57df4a36200946025e8618aa
+ftp://jvet@ftp.ient.rwth-aachen.de/ce/JVET-K1031_Deblocking/RedKayak/red_kayak_1-300_1920x1080_30Hz_8bit_420.zip,red_kayak_1-300_1920x1080_30Hz_8bit_420.yuv,33b276220e7ea308d3fdf98e5fb5d491
+ftp://jvet@ftp.ient.rwth-aachen.de/ce/JVET-K1031_Deblocking/RedKayak/red_kayak_1920x1080_30p_420.zip,red_kayak_1920x1080_30p_420.yuv,a1f3075f6bdfc09e4544891e0ecd54ad
+ftp://jvet@ftp.ient.rwth-aachen.de/ce/JVET-O2026_Transforms/DIV2K_yuv420_10b_best100.zip,DIV2K_yuv420_10b_best100.yuv,4710dd5789983127840ae0c9ff49692b
+ftp://jvet@ftp.ient.rwth-aachen.de/ce/JVET-P2025_CCALF/RedKayak/red_kayak_1920x1080_29.97Hz_8bit_420.zip,red_kayak_1920x1080_29.97Hz_8bit_420.yuv,5aa0604dd7a1c30763fcd08f58af85df
+ftp://jvet@ftp.ient.rwth-aachen.de/jvet-cfp/jvet-cfe/SDR/BuildingHall1_3840x2160_50fps_10bit_420.zip,BuildingHall1_3840x2160_50fps_10bit_420.yuv,836a5a0558b24e8dde6b9a256e7aa468
+ftp://jvet@ftp.ient.rwth-aachen.de/jvet-cfp/jvet-cfe/SDR/Crosswalk1_4096x2160_60fps_10bit_420.zip,Crosswalk1_4096x2160_60fps_10bit_420.yuv,978a5dea90fe9125f6bce42aade55b61
+ftp://jvet@ftp.ient.rwth-aachen.de/jvet-cfp/SDR/BuildingHall2_3840x2160_50fps_10bit_420.zip,BuildingHall2_3840x2160_50fps_10bit_420.yuv,2ddae4186a3ef0fa6dae5e340e50ce49
+ftp://jvet@ftp.ient.rwth-aachen.de/jvet-cfp/SDR/RollerCoaster2_3840x2160_60fps_10bit_420.zip,RollerCoaster2_3840x2160_60fps_10bit_420.yuv,9b980d5b960215e3a09347dc2f6ae5b9
+ftp://jvet@ftp.ient.rwth-aachen.de/jvet-cfp/SDR/ToddlerFountain2_3840x2160_60fps_10bit_420.zip,ToddlerFountain2_3840x2160_60fps_10bit_420.yuv,66638fe3ba0446af0784f8170fb5d353
+file://BVI-DVC.zip,AAdvertisingMassagesBangkokVidevo_3840x2176_25fps_10bit_420.mp4,3be35ce524f7d4864a97551b359ce792
+file://BVI-DVC.zip,AAmericanFootballS2Harmonics_3840x2176_60fps_10bit_420.mp4,778f440724ec55351fdcbee2a2d94192
+file://BVI-DVC.zip,AAmericanFootballS3Harmonics_3840x2176_60fps_10bit_420.mp4,35cab64a250135f760952634e5f355e1
+file://BVI-DVC.zip,AAmericanFootballS4Harmonics_3840x2176_60fps_10bit_420.mp4,f72507e5359ae944798b1e15661535d5
+file://BVI-DVC.zip,AAnimalsS11Harmonics_3840x2176_60fps_10bit_420.mp4,800a511f39721f33b1f60240714daed2
+file://BVI-DVC.zip,AAnimalsS1Harmonics_3840x2176_60fps_10bit_420.mp4,d5b16c123653ada9256b4fd90cf0a826
+file://BVI-DVC.zip,ABangkokMarketVidevo_3840x2176_25fps_10bit_420.mp4,57c5f3e3b5d59ff3cd74d18862288624
+file://BVI-DVC.zip,ABasketballGoalScoredS1Videvo_3840x2176_25fps_10bit_420.mp4,3c3b4acc23c757895847fe1e0447b3e6
+file://BVI-DVC.zip,ABasketballGoalScoredS2Videvo_3840x2176_25fps_10bit_420.mp4,2125fd2b9418941bef8a9425dc5fc07d
+file://BVI-DVC.zip,ABasketballS1YonseiUniversity_3840x2176_30fps_10bit_420.mp4,c60ec1d45f323606583627f0653f1576
+file://BVI-DVC.zip,ABasketballS2YonseiUniversity_3840x2176_30fps_10bit_420.mp4,ac252849e626181e073cd0ca4018e49e
+file://BVI-DVC.zip,ABasketballS3YonseiUniversity_3840x2176_30fps_10bit_420.mp4,9ca02b1000f41625125ffb30bc568d31
+file://BVI-DVC.zip,ABoatsChaoPhrayaRiverVidevo_3840x2176_23fps_10bit_420.mp4,25bef7dc222c184ed82666a45e472eab
+file://BVI-DVC.zip,ABobbleheadBVIHFR_3840x2176_120fps_10bit_420.mp4,bba95dde31c25e1d9c630ad401fe3aa7
+file://BVI-DVC.zip,ABookcaseBVITexture_3840x2176_120fps_10bit_420.mp4,433b48f18cfe949084e6531f5fcfb690
+file://BVI-DVC.zip,ABoxingPracticeHarmonics_3840x2176_60fps_10bit_420.mp4,8ecc48f2f9dc478526542b612169c89f
+file://BVI-DVC.zip,ABricksBushesStaticBVITexture_3840x2176_120fps_10bit_420.mp4,22c91e75e59db0f0c5287d93c7333a97
+file://BVI-DVC.zip,ABricksLeavesBVITexture_3840x2176_120fps_10bit_420.mp4,24038e274a7ec48061a330c2cf8cc6c5
+file://BVI-DVC.zip,ABricksTiltingBVITexture_3840x2176_120fps_10bit_420.mp4,4bc1ced4955a728239be3331add7644c
+file://BVI-DVC.zip,ABubblesPitcherS1BVITexture_3840x2176_120fps_10bit_420.mp4,1a406315b337a803315ccadce98c89a6
+file://BVI-DVC.zip,ABuildingRoofS1IRIS_3840x2176_24fps_10bit_420.mp4,c178e3296811ef2ea8345df5fbfbc76c
+file://BVI-DVC.zip,ABuildingRoofS2IRIS_3840x2176_24fps_10bit_420.mp4,e97f7c85972b0f22c125e767a7b89ebf
+file://BVI-DVC.zip,ABuildingRoofS3IRIS_3840x2176_24fps_10bit_420.mp4,bf3eef0f6c127979be56f2cb33a859f3
+file://BVI-DVC.zip,ABuildingRoofS4IRIS_3840x2176_24fps_10bit_420.mp4,ccc00d93d584eff6580229912c0c70ca
+file://BVI-DVC.zip,ABuntingHangingAcrossHongKongVidevo_3840x2176_25fps_10bit_420.mp4,2e9ffbb498f783ec980b6b56edee3ed8
+file://BVI-DVC.zip,ABusyHongKongStreetVidevo_3840x2176_25fps_10bit_420.mp4,8835a1796c8690e4d169312a34f58533
+file://BVI-DVC.zip,ACalmingWaterBVITexture_3840x2176_120fps_10bit_420.mp4,bb19fd80ba858d545371facb85395999
+file://BVI-DVC.zip,ACarpetPanAverageBVITexture_3840x2176_120fps_10bit_420.mp4,e8efcf0878516698fb43105fe0bb1711
+file://BVI-DVC.zip,ACatchBVIHFR_3840x2176_120fps_10bit_420.mp4,0e040ef5722ca43ff33154d5168dfdb8
+file://BVI-DVC.zip,ACeramicsandSpicesMoroccoVidevo_3840x2176_50fps_10bit_420.mp4,a05f85f8498680fee2dd219800693dbb
+file://BVI-DVC.zip,ACharactersYonseiUniversity_3840x2176_30fps_10bit_420.mp4,42903855d5aff111d4eb2568956fc13d
+file://BVI-DVC.zip,AChristmasPresentsIRIS_3840x2176_24fps_10bit_420.mp4,52a70fd3fc8d9de27638f374ed2f8963
+file://BVI-DVC.zip,AChristmasRoomDareful_3840x2176_29fps_10bit_420.mp4,8879782c640dc0502a9034df1ef64aa2
+file://BVI-DVC.zip,AChurchInsideMCLJCV_3840x2176_30fps_10bit_420.mp4,436748af06068c310592828ee972a9d1
+file://BVI-DVC.zip,ACityScapesS1IRIS_3840x2176_24fps_10bit_420.mp4,346cf71a01b76a12771a997e2b7a7ef7
+file://BVI-DVC.zip,ACityScapesS2IRIS_3840x2176_24fps_10bit_420.mp4,2cf541de5d1c3b00c5d59df6b3840959
+file://BVI-DVC.zip,ACityScapesS3IRIS_3840x2176_24fps_10bit_420.mp4,9af748c84e9d3cbbbdcac0e89a108566
+file://BVI-DVC.zip,ACityStreetS1IRIS_3840x2176_24fps_10bit_420.mp4,15f4c01e74700115bc45142de5906efa
+file://BVI-DVC.zip,ACityStreetS3IRIS_3840x2176_24fps_10bit_420.mp4,79d49c9bf3e53f437c9562765f7ac47c
+file://BVI-DVC.zip,ACityStreetS4IRIS_3840x2176_24fps_10bit_420.mp4,dcd8946690c0ae7d32ef18107f4e4d83
+file://BVI-DVC.zip,ACityStreetS5IRIS_3840x2176_24fps_10bit_420.mp4,7210724bc60f00d9ee7e94b02cffd5b7
+file://BVI-DVC.zip,ACityStreetS6IRIS_3840x2176_24fps_10bit_420.mp4,3c8f8a6dc62c6cc454fcd6073d42124c
+file://BVI-DVC.zip,ACityStreetS7IRIS_3840x2176_24fps_10bit_420.mp4,0cdd37caff103ea3431b12bc41aaa3c0
+file://BVI-DVC.zip,ACloseUpBasketballSceneVidevo_3840x2176_25fps_10bit_420.mp4,32c61ab0b00c496ef5febc0f69eb63e0
+file://BVI-DVC.zip,ACloudsStaticBVITexture_3840x2176_120fps_10bit_420.mp4,6dba6b47e8448ee0244fc23597aa96d4
+file://BVI-DVC.zip,AColourfulDecorationWatPhoVidevo_3840x2176_50fps_10bit_420.mp4,12144697277e6b4b958bf3728f98f200
+file://BVI-DVC.zip,AColourfulKoreanLanternsVidevo_3840x2176_50fps_10bit_420.mp4,82332c13b1e69ae7ef2b221f25892639
+file://BVI-DVC.zip,AColourfulPaperLanternsVidevo_3840x2176_50fps_10bit_420.mp4,4cd0e1a9f604bcb54556489f9cf76325
+file://BVI-DVC.zip,AColourfulRugsMoroccoVidevo_3840x2176_50fps_10bit_420.mp4,41475fff59a328c90670077926de7dd5
+file://BVI-DVC.zip,AConstructionS2YonseiUniversity_3840x2176_30fps_10bit_420.mp4,ad8ba4b052d8c61bf2adedd48ec4a5a9
+file://BVI-DVC.zip,ACostaRicaS3Harmonics_3840x2176_60fps_10bit_420.mp4,b26b7b61647d370e0dbc48aae1157415
+file://BVI-DVC.zip,ACrosswalkHarmonics_3840x2176_60fps_10bit_420.mp4,ef0f90cfd5fa6c5e14b829bb667a62b9
+file://BVI-DVC.zip,ACrosswalkHongKong2S1Videvo_3840x2176_25fps_10bit_420.mp4,7c21a02d792184bffbbeb94953e35c35
+file://BVI-DVC.zip,ACrosswalkHongKong2S2Videvo_3840x2176_25fps_10bit_420.mp4,f631610013acce326395251f8d0c0e31
+file://BVI-DVC.zip,ACrosswalkHongKongVidevo_3840x2176_25fps_10bit_420.mp4,13c506c30306dd67079825afdd38c760
+file://BVI-DVC.zip,ACrowdRunMCLV_3840x2176_25fps_10bit_420.mp4,5daa85885b9cfd33692640b115dbef81
+file://BVI-DVC.zip,ACyclistS1BVIHFR_3840x2176_120fps_10bit_420.mp4,a9129f72e7bff9370dbdf43ac56d3b32
+file://BVI-DVC.zip,ACyclistVeniceBeachBoardwalkVidevo_3840x2176_25fps_10bit_420.mp4,c69ab65dbed5112365da14997f5e593e
+file://BVI-DVC.zip,ADollsScene1YonseiUniversity_3840x2176_30fps_10bit_420.mp4,21c5d733691e209a2be6826b9826a165
+file://BVI-DVC.zip,ADollsScene2YonseiUniversity_3840x2176_30fps_10bit_420.mp4,8e7fa4f066dea80c0c2f3a1a7188500d
+file://BVI-DVC.zip,ADowntownHongKongVidevo_3840x2176_25fps_10bit_420.mp4,7ec7b21b06f700d1737deb5e559981b5
+file://BVI-DVC.zip,ADrivingPOVHarmonics_3840x2176_60fps_10bit_420.mp4,7656b950e2b49c8541f04a3f445ff0d7
+file://BVI-DVC.zip,ADropsOnWaterBVITexture_3840x2176_120fps_10bit_420.mp4,88a417d5c5cd5617ff099dc1dbbb30a5
+file://BVI-DVC.zip,AElFuenteMaskLIVENetFlix_3840x2176_24fps_10bit_420.mp4,80a094322e7b700ea29ff863d0c84130
+file://BVI-DVC.zip,AEnteringHongKongStallS1Videvo_3840x2176_25fps_10bit_420.mp4,dd9090a11d11f19296d1a3f30653d4e2
+file://BVI-DVC.zip,AEnteringHongKongStallS2Videvo_3840x2176_25fps_10bit_420.mp4,67176ff1edd5a64fe47fed43b38ccfc4
+file://BVI-DVC.zip,AFerrisWheelTurningVidevo_3840x2176_50fps_10bit_420.mp4,3ff7e3b13a3a7ddf8addfe5871add492
+file://BVI-DVC.zip,AFirewoodS1IRIS_3840x2176_24fps_10bit_420.mp4,c7479133439293198b408cf17ee48588
+file://BVI-DVC.zip,AFirewoodS2IRIS_3840x2176_25fps_10bit_420.mp4,86c792a5ac4887c09beb51843190f150
+file://BVI-DVC.zip,AFitnessIRIS_3840x2176_24fps_10bit_420.mp4,3bf892967425fc2782f68b01f1df34c0
+file://BVI-DVC.zip,AFjordsS1Harmonics_3840x2176_60fps_10bit_420.mp4,8aff73bf2463e9c67d383094faa14753
+file://BVI-DVC.zip,AFlagShootTUMSVT_3840x2176_50fps_10bit_420.mp4,583736762bf35bcce13366d249239b2b
+file://BVI-DVC.zip,AFlowerChapelS1IRIS_3840x2176_24fps_10bit_420.mp4,09473f40c174f9112ac128f8559d84e0
+file://BVI-DVC.zip,AFlowerChapelS2IRIS_3840x2176_24fps_10bit_420.mp4,7c8060d7deb8722c0b5218107ae62297
+file://BVI-DVC.zip,AFlyingCountrysideDareful_3840x2176_29fps_10bit_420.mp4,7f1c839c62ab05e8a63355dc21e58f95
+file://BVI-DVC.zip,AFlyingMountainsDareful_3840x2176_29fps_10bit_420.mp4,f5ca728dfbf98ea54c6dcb66abd09c6b
+file://BVI-DVC.zip,AFlyingThroughLAStreetVidevo_3840x2176_23fps_10bit_420.mp4,cd067128a1f487ed10d7a992248c1171
+file://BVI-DVC.zip,AFungusZoomBVITexture_3840x2176_120fps_10bit_420.mp4,e261200dcf24d75aa394ce2694d57aba
+file://BVI-DVC.zip,AGrassBVITexture_3840x2176_120fps_10bit_420.mp4,987a87e21c9dcbaec8cf206eeedd7f49
+file://BVI-DVC.zip,AGrazTowerIRIS_3840x2176_24fps_10bit_420.mp4,afc4cafa805ff141f9356cd7824e4b06
+file://BVI-DVC.zip,AHamsterBVIHFR_3840x2176_120fps_10bit_420.mp4,4b0a4c92b0020753ef34e11c375ef650
+file://BVI-DVC.zip,AHarleyDavidsonIRIS_3840x2176_24fps_10bit_420.mp4,9e4dabd219d79a03dfa0aad7ea4aad2a
+file://BVI-DVC.zip,AHongKongIslandVidevo_3840x2176_25fps_10bit_420.mp4,b54d2a4781919f8da2f92b943f6717a4
+file://BVI-DVC.zip,AHongKongMarket1Videvo_3840x2176_25fps_10bit_420.mp4,5f2d03a1e483ef46b5f6d9f3cc986628
+file://BVI-DVC.zip,AHongKongMarket2Videvo_3840x2176_25fps_10bit_420.mp4,dab9877b4903d590f975c2115a110413
+file://BVI-DVC.zip,AHongKongMarket3S1Videvo_3840x2176_25fps_10bit_420.mp4,b93444f26a3a74dc0f1d9bed8862f98d
+file://BVI-DVC.zip,AHongKongMarket3S2Videvo_3840x2176_25fps_10bit_420.mp4,297db232897ec7c167bef244431b6944
+file://BVI-DVC.zip,AHongKongMarket4S1Videvo_3840x2176_25fps_10bit_420.mp4,1c7cb5702b6819b9755c5272266010f8
+file://BVI-DVC.zip,AHongKongMarket4S2Videvo_3840x2176_25fps_10bit_420.mp4,2a0942675694bda5bda2b764f7a1e282
+file://BVI-DVC.zip,AHongKongS1Harmonics_3840x2176_60fps_10bit_420.mp4,371c922fffc8c4a217d704db4e558d1f
+file://BVI-DVC.zip,AHongKongS2Harmonics_3840x2176_60fps_10bit_420.mp4,a02137972336e8bceac4cd9fe87fc037
+file://BVI-DVC.zip,AHongKongS3Harmonics_3840x2176_60fps_10bit_420.mp4,8bafe9a5856cd10689ef51cb5db6f182
+file://BVI-DVC.zip,AHorseDrawnCarriagesVidevo_3840x2176_50fps_10bit_420.mp4,b412fda312d1d04df66edbcc7963c4a5
+file://BVI-DVC.zip,AHorseStaringS1Videvo_3840x2176_50fps_10bit_420.mp4,7128c83e4528b531ac7bb6f581cc70ff
+file://BVI-DVC.zip,AHorseStaringS2Videvo_3840x2176_50fps_10bit_420.mp4,0f481041a62ae687dad2c3aecd8e6b26
+file://BVI-DVC.zip,AJockeyHarmonics_3840x2176_120fps_10bit_420.mp4,f7a102a7a77d667c028bd05908c85721
+file://BVI-DVC.zip,AJoggersS1BVIHFR_3840x2176_120fps_10bit_420.mp4,ca54eee0e757318d78d0db3851e044f8
+file://BVI-DVC.zip,AJoggersS2BVIHFR_3840x2176_120fps_10bit_420.mp4,d59b2e025c47992041fe80a00fb6732b
+file://BVI-DVC.zip,AKartingIRIS_3840x2176_24fps_10bit_420.mp4,99331903aa74703059cb0a9911ad71f5
+file://BVI-DVC.zip,AKoraDrumsVidevo_3840x2176_25fps_10bit_420.mp4,c8f36b3e22be8f50dc97998f04bbbd27
+file://BVI-DVC.zip,ALakeYonseiUniversity_3840x2176_30fps_10bit_420.mp4,166b9d5565cdbbbc470b6a9a90dfcc0c
+file://BVI-DVC.zip,ALampLeavesBVITexture_3840x2176_120fps_10bit_420.mp4,bba6eb029d154bbbc84da30f7bd4fd17
+file://BVI-DVC.zip,ALaundryHangingOverHongKongVidevo_3840x2176_25fps_10bit_420.mp4,73d8c986b12988ad87a3b0271776ea92
+file://BVI-DVC.zip,ALeaves1BVITexture_3840x2176_120fps_10bit_420.mp4,1817cf27669948ac6f7c4e6f4aa64703
+file://BVI-DVC.zip,ALeaves3BVITexture_3840x2176_120fps_10bit_420.mp4,e046a01dc0d5f3eb0cf262af45f254eb
+file://BVI-DVC.zip,ALowLevelShotAlongHongKongVidevo_3840x2176_25fps_10bit_420.mp4,f1cbad585ab5d568b82e5c2c455b2ad9
+file://BVI-DVC.zip,ALungshanTempleS1Videvo_3840x2176_50fps_10bit_420.mp4,3b08949a81e6cbd671be8a1186a09e9a
+file://BVI-DVC.zip,ALungshanTempleS2Videvo_3840x2176_50fps_10bit_420.mp4,62317550470239a113bbdde3ea17ff32
+file://BVI-DVC.zip,AManMoTempleVidevo_3840x2176_25fps_10bit_420.mp4,3f243314b8586cd9390bfc685765a367
+file://BVI-DVC.zip,AManStandinginProduceTruckVidevo_3840x2176_25fps_10bit_420.mp4,f89e6140dc1af3021ef3dfce2cd79667
+file://BVI-DVC.zip,AManWalkingThroughBangkokVidevo_3840x2176_25fps_10bit_420.mp4,f183b1ba8d2389c45dbbfb1a90afdcee
+file://BVI-DVC.zip,AMaplesS1YonseiUniversity_3840x2176_30fps_10bit_420.mp4,0809c6458d048957ecf52649d14bf8ab
+file://BVI-DVC.zip,AMaplesS2YonseiUniversity_3840x2176_30fps_10bit_420.mp4,a82d849ba995f7fbeffcf8f55003c028
+file://BVI-DVC.zip,AMirabellParkS1IRIS_3840x2176_24fps_10bit_420.mp4,4b5e0ed016d5e24fee1662f2f2c6ce4c
+file://BVI-DVC.zip,AMirabellParkS2IRIS_3840x2176_24fps_10bit_420.mp4,efacca4e6621b9dbfa5847c7efeeb87d
+file://BVI-DVC.zip,AMobileHarmonics_3840x2176_60fps_10bit_420.mp4,8c7444cb8fd67e79904107d5d0149a3b
+file://BVI-DVC.zip,AMoroccanCeramicsShopVidevo_3840x2176_50fps_10bit_420.mp4,8ede13a7a7ca99382745ecba112506c9
+file://BVI-DVC.zip,AMoroccanSlippersVidevo_3840x2176_50fps_10bit_420.mp4,82af7e122c34840dfac8391d3e7fd7af
+file://BVI-DVC.zip,AMuralPaintingVidevo_3840x2176_25fps_10bit_420.mp4,2ef978e7dff53155555e400243c5e385
+file://BVI-DVC.zip,AMyanmarS4Harmonics_3840x2176_60fps_10bit_420.mp4,fad2558de405aae8479439181efc8a50
+file://BVI-DVC.zip,AMyanmarS6Harmonics_3840x2176_60fps_10bit_420.mp4,a8a101353cab312086bf4c8a2e3ff04b
+file://BVI-DVC.zip,AMyeongDongVidevo_3840x2176_25fps_10bit_420.mp4,3d986933e71b57a3392f6fe6c691d509
+file://BVI-DVC.zip,ANewYorkStreetDareful_3840x2176_30fps_10bit_420.mp4,198644815e3d4fe74fb2132c3ec50e86
+file://BVI-DVC.zip,AOrangeBuntingoverHongKongVidevo_3840x2176_25fps_10bit_420.mp4,470dbb2aa7328c7b69a33cc83a207057
+file://BVI-DVC.zip,APaintingTiltingBVITexture_3840x2176_120fps_10bit_420.mp4,c60ed50f865beadd93b3adc2d7e21e43
+file://BVI-DVC.zip,AParkViolinMCLJCV_3840x2176_25fps_10bit_420.mp4,1444c8a27a3c3dbda8d56e1b1a767bb6
+file://BVI-DVC.zip,APedestriansSeoulatDawnVidevo_3840x2176_25fps_10bit_420.mp4,8dacfc477f4ec7d8159755261adde4dc
+file://BVI-DVC.zip,APeopleWalkingS1IRIS_3840x2176_24fps_10bit_420.mp4,69e6ca7cbb55d0e60d18ee6f3b0d1ecd
+file://BVI-DVC.zip,APersonRunningOutsideVidevo_3840x2176_50fps_10bit_420.mp4,ff040c7f08efc2fac85ea579f8729368
+file://BVI-DVC.zip,APillowsTransBVITexture_3840x2176_120fps_10bit_420.mp4,3cd8a8bcf4ab63e191558b92d7ca9acc
+file://BVI-DVC.zip,APlasmaFreeBVITexture_3840x2176_120fps_10bit_420.mp4,882ecbdc25d3fb7d8e94fa6f161158c4
+file://BVI-DVC.zip,APresentsChristmasTreeDareful_3840x2176_29fps_10bit_420.mp4,cf6dafc6a670852775d13a6e0345cb1b
+file://BVI-DVC.zip,AResidentialBuildingSJTU_3840x2176_60fps_10bit_420.mp4,9dc5145d909fa82bfbec9164a84b6449
+file://BVI-DVC.zip,ARunnersSJTU_3840x2176_60fps_10bit_420.mp4,ea4735f9444acfa9d5512e4ce2b48c34
+file://BVI-DVC.zip,ARuralSetupIRIS_3840x2176_24fps_10bit_420.mp4,3e73404671364155d216042f3935cc10
+file://BVI-DVC.zip,ARuralSetupS2IRIS_3840x2176_24fps_10bit_420.mp4,351223d5a5e594ff59df4b1bb0dc90c3
+file://BVI-DVC.zip,AScarfSJTU_3840x2176_60fps_10bit_420.mp4,a5e6cacdb21e88f61e8267f6c25edcae
+file://BVI-DVC.zip,ASeasideWalkIRIS_3840x2176_24fps_10bit_420.mp4,2ee91d4c1e2fc6586be7c48e508891b5
+file://BVI-DVC.zip,ASeekingMCLV_3840x2176_25fps_10bit_420.mp4,5b669febde882eb5b9063aaa56b3072f
+file://BVI-DVC.zip,ASeoulCanalatDawnVidevo_3840x2176_25fps_10bit_420.mp4,cc3af8f185d78cef78b7c3bfd774be66
+file://BVI-DVC.zip,AShoppingCentreVidevo_3840x2176_25fps_10bit_420.mp4,03ec02ababe89e053e88a0266588e9cb
+file://BVI-DVC.zip,ASignboardBoatLIVENetFlix_3840x2176_30fps_10bit_420.mp4,723925795c36db274af54bef364e0678
+file://BVI-DVC.zip,ASkyscraperBangkokVidevo_3840x2176_23fps_10bit_420.mp4,4c3254fcee7b0116e0e09d1eadaa36da
+file://BVI-DVC.zip,ASmokeClearBVITexture_3840x2176_120fps_10bit_420.mp4,370d74bf4f780805cc0d8f81fd06cd09
+file://BVI-DVC.zip,ASparklerBVIHFR_3840x2176_120fps_10bit_420.mp4,6f4ad44ebb872eb6f05a22013ed96788
+file://BVI-DVC.zip,ASquareAndTimelapseHarmonics_3840x2176_60fps_10bit_420.mp4,3feaf5ab186fd97cddce64847d5863d0
+file://BVI-DVC.zip,ASquareS1IRIS_3840x2176_24fps_10bit_420.mp4,7d63d4a397501e615d4dedc0c7c42a24
+file://BVI-DVC.zip,ASquareS2IRIS_3840x2176_24fps_10bit_420.mp4,1ef79e67bc8baab2baf5d00b4e8f93f0
+file://BVI-DVC.zip,AStreetArtVidevo_3840x2176_30fps_10bit_420.mp4,07c421542051a007c21c90261af531b2
+file://BVI-DVC.zip,AStreetDancerS1IRIS_3840x2176_24fps_10bit_420.mp4,101ab652985f252da84ffba1960429f0
+file://BVI-DVC.zip,AStreetDancerS2IRIS_3840x2176_24fps_10bit_420.mp4,649867893e5496f1c575fa493d51f129
+file://BVI-DVC.zip,AStreetDancerS3IRIS_3840x2176_24fps_10bit_420.mp4,62ff1dae603b6ca28a65da7edf628ebe
+file://BVI-DVC.zip,AStreetDancerS4IRIS_3840x2176_24fps_10bit_420.mp4,8cccb72df839ca75142859822f28662b
+file://BVI-DVC.zip,AStreetDancerS5IRIS_3840x2176_24fps_10bit_420.mp4,64acc8704f9662fe9f2920e65292b3aa
+file://BVI-DVC.zip,AStreetsOfIndiaS1Harmonics_3840x2176_60fps_10bit_420.mp4,131a8fbc9c53d240fc9c2a8c67352ca6
+file://BVI-DVC.zip,AStreetsOfIndiaS2Harmonics_3840x2176_60fps_10bit_420.mp4,9e6e5b052d3542ddf7502272a2b93b4d
+file://BVI-DVC.zip,AStreetsOfIndiaS3Harmonics_3840x2176_60fps_10bit_420.mp4,1847f28e57d633434d0ca3f7f631efd8
+file://BVI-DVC.zip,ATaiChiHongKongS1Videvo_3840x2176_25fps_10bit_420.mp4,5bb254ba25695972cba5cd6bbda85200
+file://BVI-DVC.zip,ATaiChiHongKongS2Videvo_3840x2176_25fps_10bit_420.mp4,d37e025fef204e8bc3fa7e68e950101e
+file://BVI-DVC.zip,ATaipeiCityRooftops8Videvo_3840x2176_25fps_10bit_420.mp4,5de0fb909e5d4017f1c4a92507b16527
+file://BVI-DVC.zip,ATaipeiCityRooftopsS1Videvo_3840x2176_25fps_10bit_420.mp4,bb00b0588f301af9faf3d6051bdd16da
+file://BVI-DVC.zip,ATaipeiCityRooftopsS2Videvo_3840x2176_25fps_10bit_420.mp4,8ba96d64a5b6644f2b50c9325bfa1797
+file://BVI-DVC.zip,ATaksinBridgeVidevo_3840x2176_23fps_10bit_420.mp4,e5314c84fd16af8268c8ef19344ba2f6
+file://BVI-DVC.zip,ATallBuildingsSJTU_3840x2176_60fps_10bit_420.mp4,e4bf5b0ffb53d75d814a504addd033d4
+file://BVI-DVC.zip,ATennisMCLV_3840x2176_24fps_10bit_420.mp4,06d70451c67dedfe9f211c0529ded274
+file://BVI-DVC.zip,ATouristsSatOutsideVidevo_3840x2176_25fps_10bit_420.mp4,4803c7156faea5e38ec9c31c8c181e18
+file://BVI-DVC.zip,AToyCalendarHarmonics_3840x2176_60fps_10bit_420.mp4,3165a4abb880209854e1e8cd3a05fc93
+file://BVI-DVC.zip,ATrackingDownHongKongSideVidevo_3840x2176_25fps_10bit_420.mp4,820e1cc6f21255dc19b8066844d8cee1
+file://BVI-DVC.zip,ATrackingPastRestaurantVidevo_3840x2176_25fps_10bit_420.mp4,2bc753d304a99006fc5b28d6b8f70fc0
+file://BVI-DVC.zip,ATrackingPastStallHongKongVidevo_3840x2176_25fps_10bit_420.mp4,8afc8620dbd567234cc634cea6799386
+file://BVI-DVC.zip,ATraditionalIndonesianKecakVidevo_3840x2176_25fps_10bit_420.mp4,fcb721c4703e78ed5c4a9ea67f3859d1
+file://BVI-DVC.zip,ATrafficandBuildingSJTU_3840x2176_60fps_10bit_420.mp4,b848c4b8fd3e472686cb12c571bb8fb5
+file://BVI-DVC.zip,ATrafficFlowSJTU_3840x2176_60fps_10bit_420.mp4,7085a0bdb0164e5b6ccd8c0e2c3c34d5
+file://BVI-DVC.zip,ATrafficonTasksinBridgeVidevo_3840x2176_25fps_10bit_420.mp4,2f6ca95bc9874e1a86579dd981baae49
+file://BVI-DVC.zip,ATreeWillsBVITexture_3840x2176_120fps_10bit_420.mp4,48e3e63d31d82d257f23345a2238a1c1
+file://BVI-DVC.zip,ATruckIRIS_3840x2176_24fps_10bit_420.mp4,ef6d007bafded5b3bc079744d5f0d9d6
+file://BVI-DVC.zip,ATunnelFlagS1Harmonics_3840x2176_60fps_10bit_420.mp4,d382ee1c6c4467a766fbc3d0d4fa1b20
+file://BVI-DVC.zip,AUnloadingVegetablesVidevo_3840x2176_25fps_10bit_420.mp4,99a1ade435a3b5e290d868e69874b237
+file://BVI-DVC.zip,AVegetableMarketS1LIVENetFlix_3840x2176_30fps_10bit_420.mp4,24184a55cadec0d1cdb5da0a1fe1ad43
+file://BVI-DVC.zip,AVegetableMarketS2LIVENetFlix_3840x2176_30fps_10bit_420.mp4,c6b847739a9288f928fe79d38ba042dd
+file://BVI-DVC.zip,AVegetableMarketS3LIVENetFlix_3840x2176_30fps_10bit_420.mp4,3c459e5975c750bf4af5fa0137bc103d
+file://BVI-DVC.zip,AVegetableMarketS4LIVENetFlix_3840x2176_30fps_10bit_420.mp4,88a1471b5793e8cc28e0a770bd828b72
+file://BVI-DVC.zip,AVeniceS1Harmonics_3840x2176_60fps_10bit_420.mp4,1a3d76cbcdb709be44d6621d3814c6b9
+file://BVI-DVC.zip,AVeniceS2Harmonics_3840x2176_60fps_10bit_420.mp4,60e53afc92c046cd6b9d4e46ccb17f43
+file://BVI-DVC.zip,AVeniceSceneIRIS_3840x2176_24fps_10bit_420.mp4,75834f84caf5a31253641d7f37d7f8c6
+file://BVI-DVC.zip,AWalkingDownKhaoStreetVidevo_3840x2176_25fps_10bit_420.mp4,5b877a8f6bb9e7ffae04905d4513fad1
+file://BVI-DVC.zip,AWalkingDownNorthRodeoVidevo_3840x2176_25fps_10bit_420.mp4,26b20ea09ce37f9beaa8e87f25392997
+file://BVI-DVC.zip,AWalkingThroughFootbridgeVidevo_3840x2176_25fps_10bit_420.mp4,fe0adf0b90aa3940fe4b24cf8ec00c5b
+file://BVI-DVC.zip,AWatPhoTempleVidevo_3840x2176_50fps_10bit_420.mp4,32f66184eb50e84d1db6a65347a0c451
+file://BVI-DVC.zip,AWoodSJTU_3840x2176_60fps_10bit_420.mp4,ab1b4c8c5ede391283b1bbf3e6245fcd
+file://BVI-DVC.zip,AWovenVidevo_3840x2176_25fps_10bit_420.mp4,51613019cfad7551b75011a11e5bcbce
+file://BVI-DVC.zip,BAdvertisingMassagesBangkokVidevo_1920x1088_25fps_10bit_420.mp4,ee84c0a0436607da967e56ad0c2f0324
+file://BVI-DVC.zip,BAmericanFootballS2Harmonics_1920x1088_60fps_10bit_420.mp4,c54e0eac9fa35a605c30a5d5aad399d1
+file://BVI-DVC.zip,BAmericanFootballS3Harmonics_1920x1088_60fps_10bit_420.mp4,e4a7a7e598b1c447b081f1ddc2f5e0e4
+file://BVI-DVC.zip,BAmericanFootballS4Harmonics_1920x1088_60fps_10bit_420.mp4,1a810eb07b91b844b6b514f24cfb7a51
+file://BVI-DVC.zip,BAnimalsS11Harmonics_1920x1088_60fps_10bit_420.mp4,9c11a09b6b0abd839f97fb9221107dd0
+file://BVI-DVC.zip,BAnimalsS1Harmonics_1920x1088_60fps_10bit_420.mp4,789d505188f06c54e28754f739556060
+file://BVI-DVC.zip,BBangkokMarketVidevo_1920x1088_25fps_10bit_420.mp4,f8dbdf0558c3026640c9140a91e5e9ca
+file://BVI-DVC.zip,BBasketballGoalScoredS1Videvo_1920x1088_25fps_10bit_420.mp4,8a4983f0705d0c74423b72f6321a1ef1
+file://BVI-DVC.zip,BBasketballGoalScoredS2Videvo_1920x1088_25fps_10bit_420.mp4,2df6a60841263f7a7b9502e95b64e061
+file://BVI-DVC.zip,BBasketballS1YonseiUniversity_1920x1088_30fps_10bit_420.mp4,63c9e8e5b45506acb0416c50f3bad91f
+file://BVI-DVC.zip,BBasketballS2YonseiUniversity_1920x1088_30fps_10bit_420.mp4,4221d90de731e62d38217fd9b478397b
+file://BVI-DVC.zip,BBasketballS3YonseiUniversity_1920x1088_30fps_10bit_420.mp4,e53a34814db0a0114f790683aec48e6f
+file://BVI-DVC.zip,BBoatsChaoPhrayaRiverVidevo_1920x1088_23fps_10bit_420.mp4,072a116361b08f59a05f54bef3032930
+file://BVI-DVC.zip,BBobbleheadBVIHFR_1920x1088_120fps_10bit_420.mp4,954f80e2df8e88a84a03feef0e86083a
+file://BVI-DVC.zip,BBookcaseBVITexture_1920x1088_120fps_10bit_420.mp4,a90b46a6a8830fc4b817f1cefa56eaf7
+file://BVI-DVC.zip,BBoxingPracticeHarmonics_1920x1088_60fps_10bit_420.mp4,32ff15a42986bf6dec7e5d79c2c3fb38
+file://BVI-DVC.zip,BBricksBushesStaticBVITexture_1920x1088_120fps_10bit_420.mp4,a9de9b42c6a470dc2bd3a15cdf24417e
+file://BVI-DVC.zip,BBricksLeavesBVITexture_1920x1088_120fps_10bit_420.mp4,325af2e06e30d4152af5a6ae63c6d2d8
+file://BVI-DVC.zip,BBricksTiltingBVITexture_1920x1088_120fps_10bit_420.mp4,ac7d9af11b5ed528a7e43f519b6aad01
+file://BVI-DVC.zip,BBubblesPitcherS1BVITexture_1920x1088_120fps_10bit_420.mp4,ba10d088aec94a4aec50b6059854e7ca
+file://BVI-DVC.zip,BBuildingRoofS1IRIS_1920x1088_24fps_10bit_420.mp4,23ed25514facee13b1f0a722645069d4
+file://BVI-DVC.zip,BBuildingRoofS2IRIS_1920x1088_24fps_10bit_420.mp4,33c635c38c6d5de6a48a67c08879ebc8
+file://BVI-DVC.zip,BBuildingRoofS3IRIS_1920x1088_24fps_10bit_420.mp4,c6f88a7bc1d7feff58d8df9532d89b36
+file://BVI-DVC.zip,BBuildingRoofS4IRIS_1920x1088_24fps_10bit_420.mp4,d8350528b3c4a8620c7dd2c87d58120a
+file://BVI-DVC.zip,BBuntingHangingAcrossHongKongVidevo_1920x1088_25fps_10bit_420.mp4,e31ed84d12d13d663076071d9afbc134
+file://BVI-DVC.zip,BBusyHongKongStreetVidevo_1920x1088_25fps_10bit_420.mp4,8437179d830579182b32407b7d3445bd
+file://BVI-DVC.zip,BCalmingWaterBVITexture_1920x1088_120fps_10bit_420.mp4,8eb95a4e8b3755ae60f0ebe56604511a
+file://BVI-DVC.zip,BCarpetPanAverageBVITexture_1920x1088_120fps_10bit_420.mp4,34eff09e814165cb682682cc8ee20db0
+file://BVI-DVC.zip,BCatchBVIHFR_1920x1088_120fps_10bit_420.mp4,ae35edb94424f8e643bf38d66caded2b
+file://BVI-DVC.zip,BCeramicsandSpicesMoroccoVidevo_1920x1088_50fps_10bit_420.mp4,4776abd5f5d72d003c20c995679b17b5
+file://BVI-DVC.zip,BCharactersYonseiUniversity_1920x1088_30fps_10bit_420.mp4,b3d9dfed78087e07fa239318d48119e0
+file://BVI-DVC.zip,BChristmasPresentsIRIS_1920x1088_24fps_10bit_420.mp4,ca8dec7af72a17f7fe37ab41960b9ea2
+file://BVI-DVC.zip,BChristmasRoomDareful_1920x1088_29fps_10bit_420.mp4,fb5014d2ce2e4b545958a3905a4b58b0
+file://BVI-DVC.zip,BChurchInsideMCLJCV_1920x1088_30fps_10bit_420.mp4,9732b3157b2b33c963123da3ea22a0db
+file://BVI-DVC.zip,BCityScapesS1IRIS_1920x1088_24fps_10bit_420.mp4,4f7486ba9dc4318655328f462801644b
+file://BVI-DVC.zip,BCityScapesS2IRIS_1920x1088_24fps_10bit_420.mp4,4fc109df9e1642bc42903c95fa09e30b
+file://BVI-DVC.zip,BCityScapesS3IRIS_1920x1088_24fps_10bit_420.mp4,cc411573734f76f25c005f807e254585
+file://BVI-DVC.zip,BCityStreetS1IRIS_1920x1088_24fps_10bit_420.mp4,7eb073c48a1c7ba965d48d0010065672
+file://BVI-DVC.zip,BCityStreetS3IRIS_1920x1088_24fps_10bit_420.mp4,e0bb656ef3314b91e04697eaa11efc33
+file://BVI-DVC.zip,BCityStreetS4IRIS_1920x1088_24fps_10bit_420.mp4,4daaa5022cd1a095ef8ca4086dfae00b
+file://BVI-DVC.zip,BCityStreetS5IRIS_1920x1088_24fps_10bit_420.mp4,6ccb5bd5d15d83875ffd1a862cb1be29
+file://BVI-DVC.zip,BCityStreetS6IRIS_1920x1088_24fps_10bit_420.mp4,708d670c441053aa1c6444cabf84963e
+file://BVI-DVC.zip,BCityStreetS7IRIS_1920x1088_24fps_10bit_420.mp4,ddd5aa1a8aca47481693b5093c4ba842
+file://BVI-DVC.zip,BCloseUpBasketballSceneVidevo_1920x1088_25fps_10bit_420.mp4,52a45ca4b0b02bcb292e7668cab82f52
+file://BVI-DVC.zip,BCloudsStaticBVITexture_1920x1088_120fps_10bit_420.mp4,0eb6a823a6a32d446017774c06ae27c5
+file://BVI-DVC.zip,BColourfulDecorationWatPhoVidevo_1920x1088_50fps_10bit_420.mp4,80f291ef5724bb80508ee98492e3377c
+file://BVI-DVC.zip,BColourfulKoreanLanternsVidevo_1920x1088_50fps_10bit_420.mp4,5c0c0c6165d9062c52edca97d2429904
+file://BVI-DVC.zip,BColourfulPaperLanternsVidevo_1920x1088_50fps_10bit_420.mp4,e937458fb13d6b0daa3ef6a0a063e267
+file://BVI-DVC.zip,BColourfulRugsMoroccoVidevo_1920x1088_50fps_10bit_420.mp4,1b064c350496b51445af1574e528ee7d
+file://BVI-DVC.zip,BConstructionS2YonseiUniversity_1920x1088_30fps_10bit_420.mp4,39127e93e5fa1c086b25da1a16655f93
+file://BVI-DVC.zip,BCostaRicaS3Harmonics_1920x1088_60fps_10bit_420.mp4,8079421eb9efe4145b92151416704327
+file://BVI-DVC.zip,BCrosswalkHarmonics_1920x1088_60fps_10bit_420.mp4,9abf01dbc9218f2029603e85953a27a2
+file://BVI-DVC.zip,BCrosswalkHongKong2S1Videvo_1920x1088_25fps_10bit_420.mp4,77e11effa7badb15ae97ecf6544bfcd8
+file://BVI-DVC.zip,BCrosswalkHongKong2S2Videvo_1920x1088_25fps_10bit_420.mp4,b93524cff270c74d94f50cd9b0b06bba
+file://BVI-DVC.zip,BCrosswalkHongKongVidevo_1920x1088_25fps_10bit_420.mp4,14b415dea7ffd4a5338999f443db3dd9
+file://BVI-DVC.zip,BCrowdRunMCLV_1920x1088_25fps_10bit_420.mp4,8f17fbf54f3fdb43a4c081b8d261acd6
+file://BVI-DVC.zip,BCyclistS1BVIHFR_1920x1088_120fps_10bit_420.mp4,3524ae2684d8e881325128ece21caefa
+file://BVI-DVC.zip,BCyclistVeniceBeachBoardwalkVidevo_1920x1088_25fps_10bit_420.mp4,e3b3da12b06a7dd634a79556bced4913
+file://BVI-DVC.zip,BDollsScene1YonseiUniversity_1920x1088_30fps_10bit_420.mp4,d3d6508e1b553f53c2e2a39ce69ab31c
+file://BVI-DVC.zip,BDollsScene2YonseiUniversity_1920x1088_30fps_10bit_420.mp4,525560d13a1a9a2713ce9644e6c6a2c2
+file://BVI-DVC.zip,BDowntownHongKongVidevo_1920x1088_25fps_10bit_420.mp4,95c2b7c4506d95bb6e936ee24b74acb9
+file://BVI-DVC.zip,BDrivingPOVHarmonics_1920x1088_60fps_10bit_420.mp4,32967c7b8459582e40753578cbaeaca8
+file://BVI-DVC.zip,BDropsOnWaterBVITexture_1920x1088_120fps_10bit_420.mp4,6d84d2c0bafe1e490e25204f23b4ce28
+file://BVI-DVC.zip,BElFuenteMaskLIVENetFlix_1920x1088_24fps_10bit_420.mp4,26fbcf1988c2b7462ef8311e13292a74
+file://BVI-DVC.zip,BEnteringHongKongStallS1Videvo_1920x1088_25fps_10bit_420.mp4,875807c343ff120cc01f874e86847a2f
+file://BVI-DVC.zip,BEnteringHongKongStallS2Videvo_1920x1088_25fps_10bit_420.mp4,85f144763d2f2e8f5a2e103f4472ce44
+file://BVI-DVC.zip,BFerrisWheelTurningVidevo_1920x1088_50fps_10bit_420.mp4,54e125eb3acb667875d96f767e1a2df3
+file://BVI-DVC.zip,BFirewoodS1IRIS_1920x1088_24fps_10bit_420.mp4,9c83c5bc3ac04dee3381950c7dfb2067
+file://BVI-DVC.zip,BFirewoodS2IRIS_1920x1088_25fps_10bit_420.mp4,5c3402957a6f082edc121dbcceb1e580
+file://BVI-DVC.zip,BFitnessIRIS_1920x1088_24fps_10bit_420.mp4,7c09addf8fb36fd99976aee8970a2154
+file://BVI-DVC.zip,BFjordsS1Harmonics_1920x1088_60fps_10bit_420.mp4,95dde65105714ca41211c2d69e40f9ba
+file://BVI-DVC.zip,BFlagShootTUMSVT_1920x1088_50fps_10bit_420.mp4,ed7931ec226b1280f098ecbeae4b12c2
+file://BVI-DVC.zip,BFlowerChapelS1IRIS_1920x1088_24fps_10bit_420.mp4,16cbf36465d632c7d8c5b602661b2b0e
+file://BVI-DVC.zip,BFlowerChapelS2IRIS_1920x1088_24fps_10bit_420.mp4,88d4681765467c60407b3012b24685c8
+file://BVI-DVC.zip,BFlyingCountrysideDareful_1920x1088_29fps_10bit_420.mp4,d570f55e939eaa2ec93b55043764dded
+file://BVI-DVC.zip,BFlyingMountainsDareful_1920x1088_29fps_10bit_420.mp4,4d1a238cdbcaa2845858828d3a922d07
+file://BVI-DVC.zip,BFlyingThroughLAStreetVidevo_1920x1088_23fps_10bit_420.mp4,9e7aa1e86098ca1777da55c82e32bd46
+file://BVI-DVC.zip,BFungusZoomBVITexture_1920x1088_120fps_10bit_420.mp4,f0b1b8e22bce943949403e8992a030d5
+file://BVI-DVC.zip,BGrassBVITexture_1920x1088_120fps_10bit_420.mp4,1eb25e43478aae5b7eb20b7b0809a59e
+file://BVI-DVC.zip,BGrazTowerIRIS_1920x1088_24fps_10bit_420.mp4,b440732feb8edbeeccaef9885d4d41a9
+file://BVI-DVC.zip,BHamsterBVIHFR_1920x1088_120fps_10bit_420.mp4,f5bcdf7263da61c28e2d173d9d51baee
+file://BVI-DVC.zip,BHarleyDavidsonIRIS_1920x1088_24fps_10bit_420.mp4,f2b62b0c4720320bf6c2aaa8cdad6f21
+file://BVI-DVC.zip,BHongKongIslandVidevo_1920x1088_25fps_10bit_420.mp4,83f4ec9d950d24c1cc1a27cddaf6cebb
+file://BVI-DVC.zip,BHongKongMarket1Videvo_1920x1088_25fps_10bit_420.mp4,8018c26905bd4454fe5920b59605c11c
+file://BVI-DVC.zip,BHongKongMarket2Videvo_1920x1088_25fps_10bit_420.mp4,4b5acf1a0b9b49399d61b7890d15f9df
+file://BVI-DVC.zip,BHongKongMarket3S1Videvo_1920x1088_25fps_10bit_420.mp4,ec988f3cc654507f0242075bdc08b567
+file://BVI-DVC.zip,BHongKongMarket3S2Videvo_1920x1088_25fps_10bit_420.mp4,efdf1d638bc481362f8aaadfc052c193
+file://BVI-DVC.zip,BHongKongMarket4S1Videvo_1920x1088_25fps_10bit_420.mp4,152942235656862de70ab51649ffe9f8
+file://BVI-DVC.zip,BHongKongMarket4S2Videvo_1920x1088_25fps_10bit_420.mp4,23cb5fd74cf99e0f5124bdb2943b2585
+file://BVI-DVC.zip,BHongKongS1Harmonics_1920x1088_60fps_10bit_420.mp4,26cd063930fd0ac36592178af9b3ab00
+file://BVI-DVC.zip,BHongKongS2Harmonics_1920x1088_60fps_10bit_420.mp4,341bc0ae78c5867c243ac05809ff5532
+file://BVI-DVC.zip,BHongKongS3Harmonics_1920x1088_60fps_10bit_420.mp4,941f0f9de5e1314e744ad8ea0b9349b7
+file://BVI-DVC.zip,BHorseDrawnCarriagesVidevo_1920x1088_50fps_10bit_420.mp4,bd99be1ed77c2e00efe1f68a2a60b444
+file://BVI-DVC.zip,BHorseStaringS1Videvo_1920x1088_50fps_10bit_420.mp4,bed872bac9ee91649f787e5b20bd2083
+file://BVI-DVC.zip,BHorseStaringS2Videvo_1920x1088_50fps_10bit_420.mp4,dfc892eb2ef8b0badf32be7664d2736d
+file://BVI-DVC.zip,BJockeyHarmonics_1920x1088_120fps_10bit_420.mp4,5c6f8112d6cca9d3b9d11d9408746088
+file://BVI-DVC.zip,BJoggersS1BVIHFR_1920x1088_120fps_10bit_420.mp4,cd3eb975fbb60f5ed06d9a68893e4fca
+file://BVI-DVC.zip,BJoggersS2BVIHFR_1920x1088_120fps_10bit_420.mp4,ae011d5f527fa6d77b8108dfbc6da6a6
+file://BVI-DVC.zip,BKartingIRIS_1920x1088_24fps_10bit_420.mp4,6df6cce024fcf6ae617ab6466dfe099f
+file://BVI-DVC.zip,BKoraDrumsVidevo_1920x1088_25fps_10bit_420.mp4,3726aaf277743cb61dd6f4a5c6fd61ce
+file://BVI-DVC.zip,BLakeYonseiUniversity_1920x1088_30fps_10bit_420.mp4,841fb28eacbd819ec07d772281fc69ec
+file://BVI-DVC.zip,BLampLeavesBVITexture_1920x1088_120fps_10bit_420.mp4,6b0ca0f4dcf070a1d328199745c6fed6
+file://BVI-DVC.zip,BLaundryHangingOverHongKongVidevo_1920x1088_25fps_10bit_420.mp4,61cea178bc15918b333ac463b4cc9527
+file://BVI-DVC.zip,BLeaves1BVITexture_1920x1088_120fps_10bit_420.mp4,935e6c0064709457cfc93323c3d4b465
+file://BVI-DVC.zip,BLeaves3BVITexture_1920x1088_120fps_10bit_420.mp4,038350b590d3d836e6daf9398a2596fb
+file://BVI-DVC.zip,BLowLevelShotAlongHongKongVidevo_1920x1088_25fps_10bit_420.mp4,a0204b04df016bb3ef2ae45bc2b3f129
+file://BVI-DVC.zip,BLungshanTempleS1Videvo_1920x1088_50fps_10bit_420.mp4,9b55f765c0de7177804684772c001005
+file://BVI-DVC.zip,BLungshanTempleS2Videvo_1920x1088_50fps_10bit_420.mp4,76df4367a49ece560f04c03082b1ccf4
+file://BVI-DVC.zip,BManMoTempleVidevo_1920x1088_25fps_10bit_420.mp4,784a39708daf8e31bca3ea9af5a4bbf8
+file://BVI-DVC.zip,BManStandinginProduceTruckVidevo_1920x1088_25fps_10bit_420.mp4,506873e382d306242454a3e868be91f4
+file://BVI-DVC.zip,BManWalkingThroughBangkokVidevo_1920x1088_25fps_10bit_420.mp4,4abe5e32ebf3162761f04a3d2e01bdfa
+file://BVI-DVC.zip,BMaplesS1YonseiUniversity_1920x1088_30fps_10bit_420.mp4,afdb6104abe64129f660c5ad62229d9d
+file://BVI-DVC.zip,BMaplesS2YonseiUniversity_1920x1088_30fps_10bit_420.mp4,e8d2d51f639484488b7941319ff58f64
+file://BVI-DVC.zip,BMirabellParkS1IRIS_1920x1088_24fps_10bit_420.mp4,5c4dd00a64f4356b3ad655bbeca4742f
+file://BVI-DVC.zip,BMirabellParkS2IRIS_1920x1088_24fps_10bit_420.mp4,444ee81c29bd1b9b9efb59c33dbe5adc
+file://BVI-DVC.zip,BMobileHarmonics_1920x1088_60fps_10bit_420.mp4,c036b134a4d68b67f7e2960eff90e1d9
+file://BVI-DVC.zip,BMoroccanCeramicsShopVidevo_1920x1088_50fps_10bit_420.mp4,37d735e8985f2090de02eb3e2d8eec82
+file://BVI-DVC.zip,BMoroccanSlippersVidevo_1920x1088_50fps_10bit_420.mp4,c828305ff094688e8f6484dba3fb4007
+file://BVI-DVC.zip,BMuralPaintingVidevo_1920x1088_25fps_10bit_420.mp4,2155e0f1983e7ca07e68e624e3269064
+file://BVI-DVC.zip,BMyanmarS4Harmonics_1920x1088_60fps_10bit_420.mp4,ed2b794674fbf699a1164687940177d0
+file://BVI-DVC.zip,BMyanmarS6Harmonics_1920x1088_60fps_10bit_420.mp4,c1f32704c64c6520683205a54b0d08b9
+file://BVI-DVC.zip,BMyeongDongVidevo_1920x1088_25fps_10bit_420.mp4,03e4061e9e907adfaf098571e0fb6dbf
+file://BVI-DVC.zip,BNewYorkStreetDareful_1920x1088_30fps_10bit_420.mp4,8f7d7c8f9143ae708dd4f96fe575a774
+file://BVI-DVC.zip,BOrangeBuntingoverHongKongVidevo_1920x1088_25fps_10bit_420.mp4,c499c53386b439d4cd72d2e4bb9393b0
+file://BVI-DVC.zip,BPaintingTiltingBVITexture_1920x1088_120fps_10bit_420.mp4,d59cc9a53b5d352bca721660eafa3160
+file://BVI-DVC.zip,BParkViolinMCLJCV_1920x1088_25fps_10bit_420.mp4,481d96c18cfb3d67097c8a5b9b6b2f01
+file://BVI-DVC.zip,BPedestriansSeoulatDawnVidevo_1920x1088_25fps_10bit_420.mp4,c5208cccebd2336f0b6364cfca407c9b
+file://BVI-DVC.zip,BPeopleWalkingS1IRIS_1920x1088_24fps_10bit_420.mp4,e861da0ed68c618ba6d0fcaaaf957cf5
+file://BVI-DVC.zip,BPersonRunningOutsideVidevo_1920x1088_50fps_10bit_420.mp4,4b182777d8211df1848a5ea1ccd9da09
+file://BVI-DVC.zip,BPillowsTransBVITexture_1920x1088_120fps_10bit_420.mp4,3701aebfbe0aa09450a9a8de30e65aea
+file://BVI-DVC.zip,BPlasmaFreeBVITexture_1920x1088_120fps_10bit_420.mp4,b065f339bb8a44be890fadb1297b05a5
+file://BVI-DVC.zip,BPresentsChristmasTreeDareful_1920x1088_29fps_10bit_420.mp4,4a5dde5f96b02838cf9f7716a0e41dd5
+file://BVI-DVC.zip,BResidentialBuildingSJTU_1920x1088_60fps_10bit_420.mp4,ec46486b37cb8f6956ac5a93b86bf0db
+file://BVI-DVC.zip,BRunnersSJTU_1920x1088_60fps_10bit_420.mp4,0b7c0e28d3da57aa600f42908ee95cef
+file://BVI-DVC.zip,BRuralSetupIRIS_1920x1088_24fps_10bit_420.mp4,af6b511308e55466528dbf96e0190b7a
+file://BVI-DVC.zip,BRuralSetupS2IRIS_1920x1088_24fps_10bit_420.mp4,441d849825d2710952f45677fb654bd7
+file://BVI-DVC.zip,BScarfSJTU_1920x1088_60fps_10bit_420.mp4,ffcbe7dbbe131e8578100e76efd51726
+file://BVI-DVC.zip,BSeasideWalkIRIS_1920x1088_24fps_10bit_420.mp4,02103c06e7fc61e28346aeafd028767d
+file://BVI-DVC.zip,BSeekingMCLV_1920x1088_25fps_10bit_420.mp4,0ca46879b7131de33cfea56da84f3f4a
+file://BVI-DVC.zip,BSeoulCanalatDawnVidevo_1920x1088_25fps_10bit_420.mp4,c5653da19e889bae425c7375c6f89c24
+file://BVI-DVC.zip,BShoppingCentreVidevo_1920x1088_25fps_10bit_420.mp4,58a1cd487dc4c1f558c67d2ef251b91c
+file://BVI-DVC.zip,BSignboardBoatLIVENetFlix_1920x1088_30fps_10bit_420.mp4,527be9b3a992e1be4c4af7067d82808e
+file://BVI-DVC.zip,BSkyscraperBangkokVidevo_1920x1088_23fps_10bit_420.mp4,f7b9546eda29ca2ae731aeb5f2c5f8a8
+file://BVI-DVC.zip,BSmokeClearBVITexture_1920x1088_120fps_10bit_420.mp4,4b36751fb7a55058a5df8ebab2d8fc79
+file://BVI-DVC.zip,BSparklerBVIHFR_1920x1088_120fps_10bit_420.mp4,8fabaab4dbec1fefa8f6b8552c327a02
+file://BVI-DVC.zip,BSquareAndTimelapseHarmonics_1920x1088_60fps_10bit_420.mp4,d85ac8dc525fedddcfdad4f152e51586
+file://BVI-DVC.zip,BSquareS1IRIS_1920x1088_24fps_10bit_420.mp4,2d1c8d116734a79c476cd3cd3e539f12
+file://BVI-DVC.zip,BSquareS2IRIS_1920x1088_24fps_10bit_420.mp4,a2b17d784d5e9427fcc6a7ceb420ed22
+file://BVI-DVC.zip,BStreetArtVidevo_1920x1088_30fps_10bit_420.mp4,83cec990fed2f0184612f61dad3359a3
+file://BVI-DVC.zip,BStreetDancerS1IRIS_1920x1088_24fps_10bit_420.mp4,d8f9a1dc101bb6ecb50289c41865f1cf
+file://BVI-DVC.zip,BStreetDancerS2IRIS_1920x1088_24fps_10bit_420.mp4,fa511362a966f63909e637490eee3f43
+file://BVI-DVC.zip,BStreetDancerS3IRIS_1920x1088_24fps_10bit_420.mp4,b4cadb25fd12d825d7532ab00288f438
+file://BVI-DVC.zip,BStreetDancerS4IRIS_1920x1088_24fps_10bit_420.mp4,76d25d075f2a1fbfe846a70564b99eb3
+file://BVI-DVC.zip,BStreetDancerS5IRIS_1920x1088_24fps_10bit_420.mp4,b4ea7bf0f2759ad5bd96f4126c9c469c
+file://BVI-DVC.zip,BStreetsOfIndiaS1Harmonics_1920x1088_60fps_10bit_420.mp4,21d75178b5dac273e0c6c55751942a4c
+file://BVI-DVC.zip,BStreetsOfIndiaS2Harmonics_1920x1088_60fps_10bit_420.mp4,c5badf8f0a165d73fbfd77404f7552d8
+file://BVI-DVC.zip,BStreetsOfIndiaS3Harmonics_1920x1088_60fps_10bit_420.mp4,b246a9ad82bcb36217aeeefb997ade63
+file://BVI-DVC.zip,BTaiChiHongKongS1Videvo_1920x1088_25fps_10bit_420.mp4,b1aa7b7613f24ca1cfc22bedca313a0e
+file://BVI-DVC.zip,BTaiChiHongKongS2Videvo_1920x1088_25fps_10bit_420.mp4,05265ca51dd2a2850e3c76e65960ffe5
+file://BVI-DVC.zip,BTaipeiCityRooftops8Videvo_1920x1088_25fps_10bit_420.mp4,84272fbf1cb2ad2b43f87a67c93fed61
+file://BVI-DVC.zip,BTaipeiCityRooftopsS1Videvo_1920x1088_25fps_10bit_420.mp4,39aab204f6861c64fd958a3688bd46d7
+file://BVI-DVC.zip,BTaipeiCityRooftopsS2Videvo_1920x1088_25fps_10bit_420.mp4,b260a63319f7549a9a14ca63a03fcb0b
+file://BVI-DVC.zip,BTaksinBridgeVidevo_1920x1088_23fps_10bit_420.mp4,f23d33d031ac5237c2439032d1a07485
+file://BVI-DVC.zip,BTallBuildingsSJTU_1920x1088_60fps_10bit_420.mp4,e1f855b9859f7f387ba1c45968a0a2f7
+file://BVI-DVC.zip,BTennisMCLV_1920x1088_24fps_10bit_420.mp4,bc523949ec6c0c885beaf7f9dcec444a
+file://BVI-DVC.zip,BTouristsSatOutsideVidevo_1920x1088_25fps_10bit_420.mp4,4bee3c6cbb0c417d20663fa35363e21b
+file://BVI-DVC.zip,BToyCalendarHarmonics_1920x1088_60fps_10bit_420.mp4,959dc1bba45cf7836f206602f0e6df8a
+file://BVI-DVC.zip,BTrackingDownHongKongSideVidevo_1920x1088_25fps_10bit_420.mp4,2228736c58c7de7203562b783eadd5f0
+file://BVI-DVC.zip,BTrackingPastRestaurantVidevo_1920x1088_25fps_10bit_420.mp4,d0a8193795cd7ad996694688e200d3de
+file://BVI-DVC.zip,BTrackingPastStallHongKongVidevo_1920x1088_25fps_10bit_420.mp4,c094df507390282403751ce19e54d744
+file://BVI-DVC.zip,BTraditionalIndonesianKecakVidevo_1920x1088_25fps_10bit_420.mp4,12b91c4c65ca73c26d7da28888c0861c
+file://BVI-DVC.zip,BTrafficandBuildingSJTU_1920x1088_60fps_10bit_420.mp4,a368574e73bc3107c11cf0599b4fdfcf
+file://BVI-DVC.zip,BTrafficFlowSJTU_1920x1088_60fps_10bit_420.mp4,9ab59dd575a408232c3e82d56aba2dc2
+file://BVI-DVC.zip,BTrafficonTasksinBridgeVidevo_1920x1088_25fps_10bit_420.mp4,8bdb28396b258c4bb6deafa1aba4201a
+file://BVI-DVC.zip,BTreeWillsBVITexture_1920x1088_120fps_10bit_420.mp4,407f17e72e70b80120503db55cdb6e38
+file://BVI-DVC.zip,BTruckIRIS_1920x1088_24fps_10bit_420.mp4,59ea0fb07760d7dded24b3eef3a0b7f9
+file://BVI-DVC.zip,BTunnelFlagS1Harmonics_1920x1088_60fps_10bit_420.mp4,95d14cb4c8d7c9aa665b9e5de470d8fa
+file://BVI-DVC.zip,BUnloadingVegetablesVidevo_1920x1088_25fps_10bit_420.mp4,7232645789d79cb6814721340d6ccdaa
+file://BVI-DVC.zip,BVegetableMarketS1LIVENetFlix_1920x1088_30fps_10bit_420.mp4,f319cce3d0ad5adfc9bfdc345efc7187
+file://BVI-DVC.zip,BVegetableMarketS2LIVENetFlix_1920x1088_30fps_10bit_420.mp4,64e21e94d0701848864e0a3d2f8f4221
+file://BVI-DVC.zip,BVegetableMarketS3LIVENetFlix_1920x1088_30fps_10bit_420.mp4,d1f90c010d5fce1c65d17e0ef26ff205
+file://BVI-DVC.zip,BVegetableMarketS4LIVENetFlix_1920x1088_30fps_10bit_420.mp4,7c26eafa27d105a7d1df9c481840b249
+file://BVI-DVC.zip,BVeniceS1Harmonics_1920x1088_60fps_10bit_420.mp4,867291146232b91bfd58dc9441342ce9
+file://BVI-DVC.zip,BVeniceS2Harmonics_1920x1088_60fps_10bit_420.mp4,03e201375c1deaf64f8391852c6c3292
+file://BVI-DVC.zip,BVeniceSceneIRIS_1920x1088_24fps_10bit_420.mp4,321f9b868a6e4714c631f0ca8b123b1b
+file://BVI-DVC.zip,BWalkingDownKhaoStreetVidevo_1920x1088_25fps_10bit_420.mp4,78c6e4cfac0f01fd97b2e2d832d82bac
+file://BVI-DVC.zip,BWalkingDownNorthRodeoVidevo_1920x1088_25fps_10bit_420.mp4,6dba403cc05414b9b3ac6b2d1edf98bd
+file://BVI-DVC.zip,BWalkingThroughFootbridgeVidevo_1920x1088_25fps_10bit_420.mp4,4bcdf75df4dacb6a75d889d910f9e223
+file://BVI-DVC.zip,BWatPhoTempleVidevo_1920x1088_50fps_10bit_420.mp4,0479b5018f52c51ca5ba9e4cd24a0a5f
+file://BVI-DVC.zip,BWoodSJTU_1920x1088_60fps_10bit_420.mp4,5b259a73830376b7f6a2c7f005915b4f
+file://BVI-DVC.zip,BWovenVidevo_1920x1088_25fps_10bit_420.mp4,69b480b03140a4ef6c5e3ea8574c4348
+file://BVI-DVC.zip,CAdvertisingMassagesBangkokVidevo_960x544_25fps_10bit_420.mp4,45401922a8ef8fc2d6ed020c598ed12d
+file://BVI-DVC.zip,CAmericanFootballS2Harmonics_960x544_60fps_10bit_420.mp4,1920d26ef816148c05a548617e0a1f8e
+file://BVI-DVC.zip,CAmericanFootballS3Harmonics_960x544_60fps_10bit_420.mp4,1e596ad72266d8fae9ff64b6d9358e9a
+file://BVI-DVC.zip,CAmericanFootballS4Harmonics_960x544_60fps_10bit_420.mp4,3ff00ad112c4f6fdeac76ac4219ec6f7
+file://BVI-DVC.zip,CAnimalsS11Harmonics_960x544_60fps_10bit_420.mp4,35be8bac3fc9a6c99f863bc9bfbfc915
+file://BVI-DVC.zip,CAnimalsS1Harmonics_960x544_60fps_10bit_420.mp4,ec9273da1ef8ffd6f054349cdd6238e4
+file://BVI-DVC.zip,CBangkokMarketVidevo_960x544_25fps_10bit_420.mp4,fc5e42be028a112cba4f2ec1e4e97d9c
+file://BVI-DVC.zip,CBasketballGoalScoredS1Videvo_960x544_25fps_10bit_420.mp4,0a6e85ca86148c5c616e59e2c7390ef8
+file://BVI-DVC.zip,CBasketballGoalScoredS2Videvo_960x544_25fps_10bit_420.mp4,d89795cff82e0699fa9fd6ce7ff4632d
+file://BVI-DVC.zip,CBasketballS1YonseiUniversity_960x544_30fps_10bit_420.mp4,92e7de7135d61b59d1b36e8523b95d83
+file://BVI-DVC.zip,CBasketballS2YonseiUniversity_960x544_30fps_10bit_420.mp4,4abcf520be44b3ec69047945e8fe8605
+file://BVI-DVC.zip,CBasketballS3YonseiUniversity_960x544_30fps_10bit_420.mp4,f4ffd3ece727426f4788247e4b9dab7d
+file://BVI-DVC.zip,CBoatsChaoPhrayaRiverVidevo_960x544_23fps_10bit_420.mp4,7418091bf4dc5115bfe8e15323f3d7e2
+file://BVI-DVC.zip,CBobbleheadBVIHFR_960x544_120fps_10bit_420.mp4,1ec9fd7027aeda3c2ffb6a88c63f4c98
+file://BVI-DVC.zip,CBookcaseBVITexture_960x544_120fps_10bit_420.mp4,c36b6cab92f1c4c0b09a97fcb6fbd60a
+file://BVI-DVC.zip,CBoxingPracticeHarmonics_960x544_60fps_10bit_420.mp4,6e9f26c876a49eeb9f8681c9b7b881fb
+file://BVI-DVC.zip,CBricksBushesStaticBVITexture_960x544_120fps_10bit_420.mp4,90aa9c5f8482b62a181d5a55362614e5
+file://BVI-DVC.zip,CBricksLeavesBVITexture_960x544_120fps_10bit_420.mp4,0845f059f99eb6f4ad9012701bbb42f7
+file://BVI-DVC.zip,CBricksTiltingBVITexture_960x544_120fps_10bit_420.mp4,17a22b084d5b3baf215df02e2954162f
+file://BVI-DVC.zip,CBubblesPitcherS1BVITexture_960x544_120fps_10bit_420.mp4,847ac0aad3f176986db54647ec5ede33
+file://BVI-DVC.zip,CBuildingRoofS1IRIS_960x544_24fps_10bit_420.mp4,328e394f0e720657cff44fc2521fcbad
+file://BVI-DVC.zip,CBuildingRoofS2IRIS_960x544_24fps_10bit_420.mp4,11bae9fdee3d69a91e436115bc4a0470
+file://BVI-DVC.zip,CBuildingRoofS3IRIS_960x544_24fps_10bit_420.mp4,df362b4848c9c77037b909657e799c7d
+file://BVI-DVC.zip,CBuildingRoofS4IRIS_960x544_24fps_10bit_420.mp4,347cfca55bce999e9ca546f94ac3d6c7
+file://BVI-DVC.zip,CBuntingHangingAcrossHongKongVidevo_960x544_25fps_10bit_420.mp4,d8a349405b750f2623f9907c0d27e4a8
+file://BVI-DVC.zip,CBusyHongKongStreetVidevo_960x544_25fps_10bit_420.mp4,4fbf1289272209b660b0701d2dc62194
+file://BVI-DVC.zip,CCalmingWaterBVITexture_960x544_120fps_10bit_420.mp4,b811bddf182da136a57782a8a8dc89a1
+file://BVI-DVC.zip,CCarpetPanAverageBVITexture_960x544_120fps_10bit_420.mp4,e1649c96d4e69d3aa3a427029ec4206b
+file://BVI-DVC.zip,CCatchBVIHFR_960x544_120fps_10bit_420.mp4,15eb3dbd4ac108db90f80762fdb20600
+file://BVI-DVC.zip,CCeramicsandSpicesMoroccoVidevo_960x544_50fps_10bit_420.mp4,e9bdf001026470e342519e465ee09626
+file://BVI-DVC.zip,CCharactersYonseiUniversity_960x544_30fps_10bit_420.mp4,0dc02e3f931f0a8cae26d87daaa0bd3c
+file://BVI-DVC.zip,CChristmasPresentsIRIS_960x544_24fps_10bit_420.mp4,e0fef963b71f92b0debc0adcf0361822
+file://BVI-DVC.zip,CChristmasRoomDareful_960x544_29fps_10bit_420.mp4,bbf6d6c13a119bbd3fb4a6417c7cf660
+file://BVI-DVC.zip,CChurchInsideMCLJCV_960x544_30fps_10bit_420.mp4,ce6e4ce4adb033970564b4ad2c03dc7b
+file://BVI-DVC.zip,CCityScapesS1IRIS_960x544_24fps_10bit_420.mp4,87c6a477cdc19b640be7da4d5c22fa16
+file://BVI-DVC.zip,CCityScapesS2IRIS_960x544_24fps_10bit_420.mp4,5a548a513337b3d32e9fc39b09febd06
+file://BVI-DVC.zip,CCityScapesS3IRIS_960x544_24fps_10bit_420.mp4,4167b4b91893d197d6e371d51eaab4ae
+file://BVI-DVC.zip,CCityStreetS1IRIS_960x544_24fps_10bit_420.mp4,349055ac68a4aecf09358e99cb9659c2
+file://BVI-DVC.zip,CCityStreetS3IRIS_960x544_24fps_10bit_420.mp4,1c440c4fcc8796a510f2d46be67625b3
+file://BVI-DVC.zip,CCityStreetS4IRIS_960x544_24fps_10bit_420.mp4,c799abc8ed35677cfd4941031d8e120c
+file://BVI-DVC.zip,CCityStreetS5IRIS_960x544_24fps_10bit_420.mp4,9c6daaf3d46d9a3524030c132b6cb7a2
+file://BVI-DVC.zip,CCityStreetS6IRIS_960x544_24fps_10bit_420.mp4,84ca9af0616d6c85a505a12b5baf7eac
+file://BVI-DVC.zip,CCityStreetS7IRIS_960x544_24fps_10bit_420.mp4,3efa84665ef55b876b3699b2e4dbd1e6
+file://BVI-DVC.zip,CCloseUpBasketballSceneVidevo_960x544_25fps_10bit_420.mp4,4660148e3f7ac018acf9745425b0601a
+file://BVI-DVC.zip,CCloudsStaticBVITexture_960x544_120fps_10bit_420.mp4,e36714a9d8ee6e3838b6423d8509fe87
+file://BVI-DVC.zip,CColourfulDecorationWatPhoVidevo_960x544_50fps_10bit_420.mp4,70da97630688416a2b8c5c3efaba0103
+file://BVI-DVC.zip,CColourfulKoreanLanternsVidevo_960x544_50fps_10bit_420.mp4,25599a5ee369161bbfad7d2990a5c764
+file://BVI-DVC.zip,CColourfulPaperLanternsVidevo_960x544_50fps_10bit_420.mp4,86d2ac07439d5f5db9022e3ea3903270
+file://BVI-DVC.zip,CColourfulRugsMoroccoVidevo_960x544_50fps_10bit_420.mp4,ed5bc8e46dbb75fe6715ddc4113a5921
+file://BVI-DVC.zip,CConstructionS2YonseiUniversity_960x544_30fps_10bit_420.mp4,12b57f9cad792742cea308185dd14961
+file://BVI-DVC.zip,CCostaRicaS3Harmonics_960x544_60fps_10bit_420.mp4,6c0207021d5a0a55eb9751d98073fbfa
+file://BVI-DVC.zip,CCrosswalkHarmonics_960x544_60fps_10bit_420.mp4,4c2e314ccc3e6304e9c27d810f923568
+file://BVI-DVC.zip,CCrosswalkHongKong2S1Videvo_960x544_25fps_10bit_420.mp4,ba868ff5c71d50f973452efe08f9b4d3
+file://BVI-DVC.zip,CCrosswalkHongKong2S2Videvo_960x544_25fps_10bit_420.mp4,d370f0354b00626c0f5731dbb0069332
+file://BVI-DVC.zip,CCrosswalkHongKongVidevo_960x544_25fps_10bit_420.mp4,8a0004677e9b54412fe8ed8ba9fd48cb
+file://BVI-DVC.zip,CCrowdRunMCLV_960x544_25fps_10bit_420.mp4,82b70a35232379434076fd059434a1aa
+file://BVI-DVC.zip,CCyclistS1BVIHFR_960x544_120fps_10bit_420.mp4,7691a23f5961c262daa2f80ae67069fc
+file://BVI-DVC.zip,CCyclistVeniceBeachBoardwalkVidevo_960x544_25fps_10bit_420.mp4,cd49b42384cfcd97b68d6e4c79f2da4f
+file://BVI-DVC.zip,CDollsScene1YonseiUniversity_960x544_30fps_10bit_420.mp4,8816a29426581a92f4040a7a3a1fa9d4
+file://BVI-DVC.zip,CDollsScene2YonseiUniversity_960x544_30fps_10bit_420.mp4,cde0f458b4cd9d5236be5f2c126d2746
+file://BVI-DVC.zip,CDowntownHongKongVidevo_960x544_25fps_10bit_420.mp4,3b627286b4e3fb111b11ea12cc9bec73
+file://BVI-DVC.zip,CDrivingPOVHarmonics_960x544_60fps_10bit_420.mp4,0c23ea9a67e7e14bee7fb770068ae20b
+file://BVI-DVC.zip,CDropsOnWaterBVITexture_960x544_120fps_10bit_420.mp4,e262536f3d65253f45e58093993cf816
+file://BVI-DVC.zip,CElFuenteMaskLIVENetFlix_960x544_24fps_10bit_420.mp4,78b99f8bb1267e6f9c2123a336a03730
+file://BVI-DVC.zip,CEnteringHongKongStallS1Videvo_960x544_25fps_10bit_420.mp4,11fff6906b976786154635aac0193ec2
+file://BVI-DVC.zip,CEnteringHongKongStallS2Videvo_960x544_25fps_10bit_420.mp4,9ee7c8460e13a2cb06ccb49c74231935
+file://BVI-DVC.zip,CFerrisWheelTurningVidevo_960x544_50fps_10bit_420.mp4,1c13229e31b883ed659b920e83bd31e1
+file://BVI-DVC.zip,CFirewoodS1IRIS_960x544_24fps_10bit_420.mp4,797f7c258608c23d55b17668dc3b8860
+file://BVI-DVC.zip,CFirewoodS2IRIS_960x544_25fps_10bit_420.mp4,4f772f45c66e278d545c9beaf5cefb2d
+file://BVI-DVC.zip,CFitnessIRIS_960x544_24fps_10bit_420.mp4,0449f99822a7f7c715e8d3054ccd9df3
+file://BVI-DVC.zip,CFjordsS1Harmonics_960x544_60fps_10bit_420.mp4,17958517dc0ee7e574ed35bd3da74743
+file://BVI-DVC.zip,CFlagShootTUMSVT_960x544_50fps_10bit_420.mp4,5a7c03399309fa8940d38cbd5ac603b4
+file://BVI-DVC.zip,CFlowerChapelS1IRIS_960x544_24fps_10bit_420.mp4,2abb88b414fcf307f980e297260ac46f
+file://BVI-DVC.zip,CFlowerChapelS2IRIS_960x544_24fps_10bit_420.mp4,c84fcf4ad39cdd2b05f1c8fd950c3e73
+file://BVI-DVC.zip,CFlyingCountrysideDareful_960x544_29fps_10bit_420.mp4,bc2d557a0fcf63b89f048e712430dab5
+file://BVI-DVC.zip,CFlyingMountainsDareful_960x544_29fps_10bit_420.mp4,02b36a7b0241ab18f39cd61717035ee1
+file://BVI-DVC.zip,CFlyingThroughLAStreetVidevo_960x544_23fps_10bit_420.mp4,a3a12ac8a59aad6623a54bc0b13b1f5e
+file://BVI-DVC.zip,CFungusZoomBVITexture_960x544_120fps_10bit_420.mp4,3e418f1eab7b157c304c5a9b5bd3645d
+file://BVI-DVC.zip,CGrassBVITexture_960x544_120fps_10bit_420.mp4,0f48e20d6935b9ab209f34a7493c1912
+file://BVI-DVC.zip,CGrazTowerIRIS_960x544_24fps_10bit_420.mp4,b621e363a364b660b51e43414bfc8c73
+file://BVI-DVC.zip,CHamsterBVIHFR_960x544_120fps_10bit_420.mp4,02ad6548d5188c2d23ffd1511728fa99
+file://BVI-DVC.zip,CHarleyDavidsonIRIS_960x544_24fps_10bit_420.mp4,586ad1957b28d7003f475315a2eaa61f
+file://BVI-DVC.zip,CHongKongIslandVidevo_960x544_25fps_10bit_420.mp4,12581a06c4bcf5e806a0ac1c7307c179
+file://BVI-DVC.zip,CHongKongMarket1Videvo_960x544_25fps_10bit_420.mp4,a112d27543ab60a9a7fbf64c93263917
+file://BVI-DVC.zip,CHongKongMarket2Videvo_960x544_25fps_10bit_420.mp4,6fd9307b9d36fb13362e7abb0770c8ec
+file://BVI-DVC.zip,CHongKongMarket3S1Videvo_960x544_25fps_10bit_420.mp4,0a2937bfe2b787cb83935927fb7dfae0
+file://BVI-DVC.zip,CHongKongMarket3S2Videvo_960x544_25fps_10bit_420.mp4,71fea001ee43d2025d09c48e58c01cd4
+file://BVI-DVC.zip,CHongKongMarket4S1Videvo_960x544_25fps_10bit_420.mp4,a56cedfde85be457b2e3b540699e92f0
+file://BVI-DVC.zip,CHongKongMarket4S2Videvo_960x544_25fps_10bit_420.mp4,e5d19800929fa817da3c234c55e04d6b
+file://BVI-DVC.zip,CHongKongS1Harmonics_960x544_60fps_10bit_420.mp4,896a74b186d1cee5833673d566956258
+file://BVI-DVC.zip,CHongKongS2Harmonics_960x544_60fps_10bit_420.mp4,f198866e2739dfe0d1b1fa111ae52d04
+file://BVI-DVC.zip,CHongKongS3Harmonics_960x544_60fps_10bit_420.mp4,3c36bb7a9ae0696727779ac03282f8d2
+file://BVI-DVC.zip,CHorseDrawnCarriagesVidevo_960x544_50fps_10bit_420.mp4,bb058bbea047914110bcf4a14327111b
+file://BVI-DVC.zip,CHorseStaringS1Videvo_960x544_50fps_10bit_420.mp4,b7e20daebf46e5f41e101ec7945daecf
+file://BVI-DVC.zip,CHorseStaringS2Videvo_960x544_50fps_10bit_420.mp4,f5d7930b4780014d89ef6a2c0eef1c7a
+file://BVI-DVC.zip,CJockeyHarmonics_960x544_120fps_10bit_420.mp4,683c333ffad98c2f2f96b87370d0aeb2
+file://BVI-DVC.zip,CJoggersS1BVIHFR_960x544_120fps_10bit_420.mp4,7c41a79a40888de423e00433f24a1dfe
+file://BVI-DVC.zip,CJoggersS2BVIHFR_960x544_120fps_10bit_420.mp4,5f38836da33bdfe17cf462c6afad4463
+file://BVI-DVC.zip,CKartingIRIS_960x544_24fps_10bit_420.mp4,2e4bc96c3ed14095b2cdc2d936411016
+file://BVI-DVC.zip,CKoraDrumsVidevo_960x544_25fps_10bit_420.mp4,321188873beff8ad6b33cf42df0dce22
+file://BVI-DVC.zip,CLakeYonseiUniversity_960x544_30fps_10bit_420.mp4,9ba46f725fea1dc9e8c948a967899638
+file://BVI-DVC.zip,CLampLeavesBVITexture_960x544_120fps_10bit_420.mp4,62460839549812f841c5492b34d99ba2
+file://BVI-DVC.zip,CLaundryHangingOverHongKongVidevo_960x544_25fps_10bit_420.mp4,31e39216e5292ea94eb0f0772ced39fc
+file://BVI-DVC.zip,CLeaves1BVITexture_960x544_120fps_10bit_420.mp4,f45f5b6ffb25cf6f5d434de5cd6e12bf
+file://BVI-DVC.zip,CLeaves3BVITexture_960x544_120fps_10bit_420.mp4,902f06172f55bc1f06c237e04fc9725e
+file://BVI-DVC.zip,CLowLevelShotAlongHongKongVidevo_960x544_25fps_10bit_420.mp4,e61c9170c2ded0d1caa167384e4cc0ca
+file://BVI-DVC.zip,CLungshanTempleS1Videvo_960x544_50fps_10bit_420.mp4,8d9cf915b8559c59dcb365d38083b524
+file://BVI-DVC.zip,CLungshanTempleS2Videvo_960x544_50fps_10bit_420.mp4,295a2cde95cfd2696d2713fe83695f9b
+file://BVI-DVC.zip,CManMoTempleVidevo_960x544_25fps_10bit_420.mp4,67647dd10cf0c9d005b7f82354366392
+file://BVI-DVC.zip,CManStandinginProduceTruckVidevo_960x544_25fps_10bit_420.mp4,104b9722233552cda6db3c06d1275522
+file://BVI-DVC.zip,CManWalkingThroughBangkokVidevo_960x544_25fps_10bit_420.mp4,c151bb97eb3141c1e223dd1963a0c533
+file://BVI-DVC.zip,CMaplesS1YonseiUniversity_960x544_30fps_10bit_420.mp4,a97101c99316776ad688e3ebc6e9acf4
+file://BVI-DVC.zip,CMaplesS2YonseiUniversity_960x544_30fps_10bit_420.mp4,d633dc3d6e8e862e185128093a1e76e1
+file://BVI-DVC.zip,CMirabellParkS1IRIS_960x544_24fps_10bit_420.mp4,4c66e9059fb296fd1576e28a7f581ccd
+file://BVI-DVC.zip,CMirabellParkS2IRIS_960x544_24fps_10bit_420.mp4,dc9cbccaf67970245dbf12b8688e4b44
+file://BVI-DVC.zip,CMobileHarmonics_960x544_60fps_10bit_420.mp4,25ff871261bebbf2419f99ff8577eb2a
+file://BVI-DVC.zip,CMoroccanCeramicsShopVidevo_960x544_50fps_10bit_420.mp4,58f134c0198cea648a743694c43fc433
+file://BVI-DVC.zip,CMoroccanSlippersVidevo_960x544_50fps_10bit_420.mp4,506a755a5cecd398ea2e97f2ddbd229a
+file://BVI-DVC.zip,CMuralPaintingVidevo_960x544_25fps_10bit_420.mp4,9aeec0ea9ce338cc9ec6f9ed976509d2
+file://BVI-DVC.zip,CMyanmarS4Harmonics_960x544_60fps_10bit_420.mp4,ca0cb777fb9696549bb5e897ebeda8cb
+file://BVI-DVC.zip,CMyanmarS6Harmonics_960x544_60fps_10bit_420.mp4,e2df98b025cf5651b00b1ee7dbd9d530
+file://BVI-DVC.zip,CMyeongDongVidevo_960x544_25fps_10bit_420.mp4,dac016ce49337388292ed8b07d796077
+file://BVI-DVC.zip,CNewYorkStreetDareful_960x544_30fps_10bit_420.mp4,0306a758cb5519010fd9185dfd6cb9e4
+file://BVI-DVC.zip,COrangeBuntingoverHongKongVidevo_960x544_25fps_10bit_420.mp4,0e9c3088a2ce9e61dd598b89787b81a5
+file://BVI-DVC.zip,CPaintingTiltingBVITexture_960x544_120fps_10bit_420.mp4,6b9cbd5082b749cce739a0b1309eea67
+file://BVI-DVC.zip,CParkViolinMCLJCV_960x544_25fps_10bit_420.mp4,b261c2f6a9929febac8c5baa92c9b2fc
+file://BVI-DVC.zip,CPedestriansSeoulatDawnVidevo_960x544_25fps_10bit_420.mp4,5f36bfa38cbdebb2c0f3fac6d317a272
+file://BVI-DVC.zip,CPeopleWalkingS1IRIS_960x544_24fps_10bit_420.mp4,9451d10d972f38f09a8e9d7d3b64a46f
+file://BVI-DVC.zip,CPersonRunningOutsideVidevo_960x544_50fps_10bit_420.mp4,6c174e366ce2a7ed6a4122a98013cce4
+file://BVI-DVC.zip,CPillowsTransBVITexture_960x544_120fps_10bit_420.mp4,3b7eefd23b39cf053b3f520c83008f27
+file://BVI-DVC.zip,CPlasmaFreeBVITexture_960x544_120fps_10bit_420.mp4,51f0df5610c03c7510db7409f931a36c
+file://BVI-DVC.zip,CPresentsChristmasTreeDareful_960x544_29fps_10bit_420.mp4,13c07477ff1735cd529127a4e9659cc0
+file://BVI-DVC.zip,CResidentialBuildingSJTU_960x544_60fps_10bit_420.mp4,1ab2f8f60f31786c8d2bebe76573e668
+file://BVI-DVC.zip,CRunnersSJTU_960x544_60fps_10bit_420.mp4,bb0e62907bdd525ce0b7db9ad2312a03
+file://BVI-DVC.zip,CRuralSetupIRIS_960x544_24fps_10bit_420.mp4,fcc47c0d4ea692c84f7b3a19b3e1c6b7
+file://BVI-DVC.zip,CRuralSetupS2IRIS_960x544_24fps_10bit_420.mp4,399343ad169a6c669e9d8fb8a76edc64
+file://BVI-DVC.zip,CScarfSJTU_960x544_60fps_10bit_420.mp4,544e84b8e1aae989ced7a7eff1d57d6b
+file://BVI-DVC.zip,CSeasideWalkIRIS_960x544_24fps_10bit_420.mp4,7fb9b55d5372d15190fb8d3efcfbf617
+file://BVI-DVC.zip,CSeekingMCLV_960x544_25fps_10bit_420.mp4,67c4261c6bb128b15cbdb48562a83bac
+file://BVI-DVC.zip,CSeoulCanalatDawnVidevo_960x544_25fps_10bit_420.mp4,153721366548d09a4ba6105ad6b63846
+file://BVI-DVC.zip,CShoppingCentreVidevo_960x544_25fps_10bit_420.mp4,9e5d2234ee6a2fabe5c53e3b2ff627ba
+file://BVI-DVC.zip,CSignboardBoatLIVENetFlix_960x544_30fps_10bit_420.mp4,4b1e7d5f3087ddfd7f685585d74dda32
+file://BVI-DVC.zip,CSkyscraperBangkokVidevo_960x544_23fps_10bit_420.mp4,d3121ef5941af12ac7734e92d1f50033
+file://BVI-DVC.zip,CSmokeClearBVITexture_960x544_120fps_10bit_420.mp4,1590135bdf4fed8bfae67ebad02af8ae
+file://BVI-DVC.zip,CSparklerBVIHFR_960x544_120fps_10bit_420.mp4,99a71f26283975ddf8cfa8cd145fbc24
+file://BVI-DVC.zip,CSquareAndTimelapseHarmonics_960x544_60fps_10bit_420.mp4,256ebbbaed1f6854d9211d6235184fe1
+file://BVI-DVC.zip,CSquareS1IRIS_960x544_24fps_10bit_420.mp4,f34d474a0a555813c7b5005bb8d1f23a
+file://BVI-DVC.zip,CSquareS2IRIS_960x544_24fps_10bit_420.mp4,937a025d9d19d92bbeccb0380b073c80
+file://BVI-DVC.zip,CStreetArtVidevo_960x544_30fps_10bit_420.mp4,ed8c6b2eea48d6055b0b17bb8a8ee60f
+file://BVI-DVC.zip,CStreetDancerS1IRIS_960x544_24fps_10bit_420.mp4,4b2f25dbdb772d2c15b6509859a2de12
+file://BVI-DVC.zip,CStreetDancerS2IRIS_960x544_24fps_10bit_420.mp4,78009b030089f8312e6f5febff5f7440
+file://BVI-DVC.zip,CStreetDancerS3IRIS_960x544_24fps_10bit_420.mp4,51bce9ba8921d3a8d2a9c76d9b0149ef
+file://BVI-DVC.zip,CStreetDancerS4IRIS_960x544_24fps_10bit_420.mp4,874481084d123a746f4340ab0cfc90b7
+file://BVI-DVC.zip,CStreetDancerS5IRIS_960x544_24fps_10bit_420.mp4,b9f909ae1acbb3861f73c0534017e412
+file://BVI-DVC.zip,CStreetsOfIndiaS1Harmonics_960x544_60fps_10bit_420.mp4,201706614a153c58543357d1d9d797bf
+file://BVI-DVC.zip,CStreetsOfIndiaS2Harmonics_960x544_60fps_10bit_420.mp4,905a4cd47d11b405a5dc7242b5ab3c63
+file://BVI-DVC.zip,CStreetsOfIndiaS3Harmonics_960x544_60fps_10bit_420.mp4,e78f399fdb605b0ff82f958d194a7e9d
+file://BVI-DVC.zip,CTaiChiHongKongS1Videvo_960x544_25fps_10bit_420.mp4,ddb656822100e8c87e3b449681f03814
+file://BVI-DVC.zip,CTaiChiHongKongS2Videvo_960x544_25fps_10bit_420.mp4,f82dc96eb32d40b1cb66110fc22be67b
+file://BVI-DVC.zip,CTaipeiCityRooftops8Videvo_960x544_25fps_10bit_420.mp4,4d5c4182d5bd49edf16a9c97b1350868
+file://BVI-DVC.zip,CTaipeiCityRooftopsS1Videvo_960x544_25fps_10bit_420.mp4,5b3716f5d0f57c60a682405e158db65d
+file://BVI-DVC.zip,CTaipeiCityRooftopsS2Videvo_960x544_25fps_10bit_420.mp4,8edd4bea6270caf37bc62cc6606d1e3a
+file://BVI-DVC.zip,CTaksinBridgeVidevo_960x544_23fps_10bit_420.mp4,c1d253be1c993ab230c1ca81e7afc199
+file://BVI-DVC.zip,CTallBuildingsSJTU_960x544_60fps_10bit_420.mp4,12b413a502bf6ee9899d56a5ee333730
+file://BVI-DVC.zip,CTennisMCLV_960x544_24fps_10bit_420.mp4,8ae73fa2c19bd1a9df0f12109f42c1e2
+file://BVI-DVC.zip,CTouristsSatOutsideVidevo_960x544_25fps_10bit_420.mp4,8de834dd1888e77f608626f18bb17983
+file://BVI-DVC.zip,CToyCalendarHarmonics_960x544_60fps_10bit_420.mp4,225f10822efdb011dc3e2a62a95201fc
+file://BVI-DVC.zip,CTrackingDownHongKongSideVidevo_960x544_25fps_10bit_420.mp4,60f69735bce8d9eef26ddd9abd64d7d6
+file://BVI-DVC.zip,CTrackingPastRestaurantVidevo_960x544_25fps_10bit_420.mp4,c4a235cf2cdd2fa59e9c2f5268f74426
+file://BVI-DVC.zip,CTrackingPastStallHongKongVidevo_960x544_25fps_10bit_420.mp4,4d8a4c2b3818ca9397ea1d48f4fcd90c
+file://BVI-DVC.zip,CTraditionalIndonesianKecakVidevo_960x544_25fps_10bit_420.mp4,59b04a5d376c28151919739d8acf65de
+file://BVI-DVC.zip,CTrafficandBuildingSJTU_960x544_60fps_10bit_420.mp4,bc2b76d158a9d4b59ce151470964854e
+file://BVI-DVC.zip,CTrafficFlowSJTU_960x544_60fps_10bit_420.mp4,2a1520254aa314cedd0f618637975eb7
+file://BVI-DVC.zip,CTrafficonTasksinBridgeVidevo_960x544_25fps_10bit_420.mp4,44a1d6ce4bd1e8209cccbc090e4d7382
+file://BVI-DVC.zip,CTreeWillsBVITexture_960x544_120fps_10bit_420.mp4,17238cb03510d43e45c45f5a1e391886
+file://BVI-DVC.zip,CTruckIRIS_960x544_24fps_10bit_420.mp4,4b131f2f3aefde90c684ee4cf20e4e8e
+file://BVI-DVC.zip,CTunnelFlagS1Harmonics_960x544_60fps_10bit_420.mp4,9427bef82707eb71a3d1c48ce7a37a7c
+file://BVI-DVC.zip,CUnloadingVegetablesVidevo_960x544_25fps_10bit_420.mp4,03630aeb05a68b181df3e0633fcd1ff4
+file://BVI-DVC.zip,CVegetableMarketS1LIVENetFlix_960x544_30fps_10bit_420.mp4,7a28d97b02eb50207edb0368320aeccf
+file://BVI-DVC.zip,CVegetableMarketS2LIVENetFlix_960x544_30fps_10bit_420.mp4,3116f763b0ec5a1156ca37827b86b4e8
+file://BVI-DVC.zip,CVegetableMarketS3LIVENetFlix_960x544_30fps_10bit_420.mp4,4117e7890440884b4d7e7cf1666c0dbe
+file://BVI-DVC.zip,CVegetableMarketS4LIVENetFlix_960x544_30fps_10bit_420.mp4,de02948b58ab5eec025cdb38e2f489e2
+file://BVI-DVC.zip,CVeniceS1Harmonics_960x544_60fps_10bit_420.mp4,eb896c75149cea1be060ca7b0d3e605c
+file://BVI-DVC.zip,CVeniceS2Harmonics_960x544_60fps_10bit_420.mp4,e777971333148b7fff9b0b99f88032fb
+file://BVI-DVC.zip,CVeniceSceneIRIS_960x544_24fps_10bit_420.mp4,f25c688079df620361ccf8e14d2a5419
+file://BVI-DVC.zip,CWalkingDownKhaoStreetVidevo_960x544_25fps_10bit_420.mp4,fa7546290ed4cac00ea6cd5cd127cf78
+file://BVI-DVC.zip,CWalkingDownNorthRodeoVidevo_960x544_25fps_10bit_420.mp4,0561996b105026d3ae3a71d1ffd9b416
+file://BVI-DVC.zip,CWalkingThroughFootbridgeVidevo_960x544_25fps_10bit_420.mp4,7a2e5d8c3ac711608d7fdc6593109afa
+file://BVI-DVC.zip,CWatPhoTempleVidevo_960x544_50fps_10bit_420.mp4,4f84f435e21bb557f96257ffd70649c3
+file://BVI-DVC.zip,CWoodSJTU_960x544_60fps_10bit_420.mp4,e74197742c6a0ce632735e292ced20ad
+file://BVI-DVC.zip,CWovenVidevo_960x544_25fps_10bit_420.mp4,f83c66b855ee81279544cd55927ae5fc
+file://BVI-DVC.zip,DAdvertisingMassagesBangkokVidevo_480x272_25fps_10bit_420.mp4,01ac6cf75932b103a96555c0dcc0bd9b
+file://BVI-DVC.zip,DAmericanFootballS2Harmonics_480x272_60fps_10bit_420.mp4,ae52787fc0b6a835423b093d47e7e670
+file://BVI-DVC.zip,DAmericanFootballS3Harmonics_480x272_60fps_10bit_420.mp4,754684f18872afda8ca32f56cee3d04c
+file://BVI-DVC.zip,DAmericanFootballS4Harmonics_480x272_60fps_10bit_420.mp4,599c6ba952506adb3dd05d44a10ed7b1
+file://BVI-DVC.zip,DAnimalsS11Harmonics_480x272_60fps_10bit_420.mp4,d8d5418af5af5ad09ef65ea4b66bd8eb
+file://BVI-DVC.zip,DAnimalsS1Harmonics_480x272_60fps_10bit_420.mp4,e1b2d0c501616b63a233dc104e00e555
+file://BVI-DVC.zip,DBangkokMarketVidevo_480x272_25fps_10bit_420.mp4,13114896340935b8d62c07de092c010a
+file://BVI-DVC.zip,DBasketballGoalScoredS1Videvo_480x272_25fps_10bit_420.mp4,12ecc0a1f072d6853887129f6f3069d2
+file://BVI-DVC.zip,DBasketballGoalScoredS2Videvo_480x272_25fps_10bit_420.mp4,b36708260e9caf6b3835362563e14ac3
+file://BVI-DVC.zip,DBasketballS1YonseiUniversity_480x272_30fps_10bit_420.mp4,c5b2eac563e047b38be164f6d28b26e6
+file://BVI-DVC.zip,DBasketballS2YonseiUniversity_480x272_30fps_10bit_420.mp4,28b578a00f6ea4a5ce46b98b8cd2cb46
+file://BVI-DVC.zip,DBasketballS3YonseiUniversity_480x272_30fps_10bit_420.mp4,551a901748588c21876134eb077b6072
+file://BVI-DVC.zip,DBoatsChaoPhrayaRiverVidevo_480x272_23fps_10bit_420.mp4,2650a00fde306b34768f7e1c01e8ea47
+file://BVI-DVC.zip,DBobbleheadBVIHFR_480x272_120fps_10bit_420.mp4,3157265f945663c93c4cadb07f811def
+file://BVI-DVC.zip,DBookcaseBVITexture_480x272_120fps_10bit_420.mp4,ef5b0975522fefc8fb8a15b2fc0aa519
+file://BVI-DVC.zip,DBoxingPracticeHarmonics_480x272_60fps_10bit_420.mp4,d34abd9d69c36f20b3e6f5c0d710ea37
+file://BVI-DVC.zip,DBricksBushesStaticBVITexture_480x272_120fps_10bit_420.mp4,46dcc73ead679232eabbcb6802705b37
+file://BVI-DVC.zip,DBricksLeavesBVITexture_480x272_120fps_10bit_420.mp4,350eb206f33215ad64d67df7415aaebd
+file://BVI-DVC.zip,DBricksTiltingBVITexture_480x272_120fps_10bit_420.mp4,211306a95b93b9c306c6162fe8761d04
+file://BVI-DVC.zip,DBubblesPitcherS1BVITexture_480x272_120fps_10bit_420.mp4,31da30a7f09383572eeabde6a25b7089
+file://BVI-DVC.zip,DBuildingRoofS1IRIS_480x272_24fps_10bit_420.mp4,d60dfe949580e6f4748ff050bc1b93a0
+file://BVI-DVC.zip,DBuildingRoofS2IRIS_480x272_24fps_10bit_420.mp4,ee67f1070cbb4285989fffa9118d4bdf
+file://BVI-DVC.zip,DBuildingRoofS3IRIS_480x272_24fps_10bit_420.mp4,9485cf888b1174f8195e1a6aefe0443f
+file://BVI-DVC.zip,DBuildingRoofS4IRIS_480x272_24fps_10bit_420.mp4,500d38eb4aaebf525bc7028330da15cf
+file://BVI-DVC.zip,DBuntingHangingAcrossHongKongVidevo_480x272_25fps_10bit_420.mp4,97cc2e992977be4ef2ce127cf80b3e85
+file://BVI-DVC.zip,DBusyHongKongStreetVidevo_480x272_25fps_10bit_420.mp4,2858e873b6764215c5e4fe5fc2890380
+file://BVI-DVC.zip,DCalmingWaterBVITexture_480x272_120fps_10bit_420.mp4,7f7c6df40ec642a527c907f1f80bac5b
+file://BVI-DVC.zip,DCarpetPanAverageBVITexture_480x272_120fps_10bit_420.mp4,84709c83153cb8ca34eaa71f13166a0f
+file://BVI-DVC.zip,DCatchBVIHFR_480x272_120fps_10bit_420.mp4,8d7847d01596500abd5b4ef11f4d5883
+file://BVI-DVC.zip,DCeramicsandSpicesMoroccoVidevo_480x272_50fps_10bit_420.mp4,f348aed7fc2a0d45bab1b51625a57f97
+file://BVI-DVC.zip,DCharactersYonseiUniversity_480x272_30fps_10bit_420.mp4,a03c133ffe4b6060dacdfcf81d869f44
+file://BVI-DVC.zip,DChristmasPresentsIRIS_480x272_24fps_10bit_420.mp4,0a395f546bec64d5f2e216d9e50b21c0
+file://BVI-DVC.zip,DChristmasRoomDareful_480x272_29fps_10bit_420.mp4,24739d0cb4341e24d486ec72f0446fc7
+file://BVI-DVC.zip,DChurchInsideMCLJCV_480x272_30fps_10bit_420.mp4,482c725f5b1dfd9b7dbab9bda400f57b
+file://BVI-DVC.zip,DCityScapesS1IRIS_480x272_24fps_10bit_420.mp4,4bd4d021fbc6d9ff86e85425bd6ec029
+file://BVI-DVC.zip,DCityScapesS2IRIS_480x272_24fps_10bit_420.mp4,c89b6999db2a62573c10b65d7749e722
+file://BVI-DVC.zip,DCityScapesS3IRIS_480x272_24fps_10bit_420.mp4,ebdbf2bd738e4b412ac67ad5c10029ab
+file://BVI-DVC.zip,DCityStreetS1IRIS_480x272_24fps_10bit_420.mp4,62319787ef70eaa0b60205daabdf92c1
+file://BVI-DVC.zip,DCityStreetS3IRIS_480x272_24fps_10bit_420.mp4,0ed29accd5d660f5f434a1c6d239a52c
+file://BVI-DVC.zip,DCityStreetS4IRIS_480x272_24fps_10bit_420.mp4,c0ecc6dc8337af720e8ca20f52cbf640
+file://BVI-DVC.zip,DCityStreetS5IRIS_480x272_24fps_10bit_420.mp4,08a3f81036c901f118c5b46a618cf4ce
+file://BVI-DVC.zip,DCityStreetS6IRIS_480x272_24fps_10bit_420.mp4,c22a8da2aa480dbb4f72b54936bca220
+file://BVI-DVC.zip,DCityStreetS7IRIS_480x272_24fps_10bit_420.mp4,d120dc49e5a544e86d19baf268abcb1a
+file://BVI-DVC.zip,DCloseUpBasketballSceneVidevo_480x272_25fps_10bit_420.mp4,1ce6e247ec9ea42c1c0eca52c9c5955a
+file://BVI-DVC.zip,DCloudsStaticBVITexture_480x272_120fps_10bit_420.mp4,b90c45dcae29eb85a055d04e90422b0e
+file://BVI-DVC.zip,DColourfulDecorationWatPhoVidevo_480x272_50fps_10bit_420.mp4,12693b49a42d993b54fa34f17c347a7b
+file://BVI-DVC.zip,DColourfulKoreanLanternsVidevo_480x272_50fps_10bit_420.mp4,ef74c7e0ed7f6015ec48262492da721f
+file://BVI-DVC.zip,DColourfulPaperLanternsVidevo_480x272_50fps_10bit_420.mp4,32c193d3779ac7306384bc9224ca8ef5
+file://BVI-DVC.zip,DColourfulRugsMoroccoVidevo_480x272_50fps_10bit_420.mp4,c2bbd3b5f4d0bd862688d212e7c96d69
+file://BVI-DVC.zip,DConstructionS2YonseiUniversity_480x272_30fps_10bit_420.mp4,217ff86d952961f3920f8659ac421440
+file://BVI-DVC.zip,DCostaRicaS3Harmonics_480x272_60fps_10bit_420.mp4,a898a2e5a5c3f90c3fb27068729d31e3
+file://BVI-DVC.zip,DCrosswalkHarmonics_480x272_60fps_10bit_420.mp4,8fffd6a076d2f4c58ab9a89e519f419f
+file://BVI-DVC.zip,DCrosswalkHongKong2S1Videvo_480x272_25fps_10bit_420.mp4,685bc0db2dfc43e5670de2ba8cb2b65d
+file://BVI-DVC.zip,DCrosswalkHongKong2S2Videvo_480x272_25fps_10bit_420.mp4,81ef946bb20ed1a181da126c98e5783e
+file://BVI-DVC.zip,DCrosswalkHongKongVidevo_480x272_25fps_10bit_420.mp4,2c3c009e6e50f09b381d91766fa6b46d
+file://BVI-DVC.zip,DCrowdRunMCLV_480x272_25fps_10bit_420.mp4,0deab89f67139b8cc8b8d82e5ca88897
+file://BVI-DVC.zip,DCyclistS1BVIHFR_480x272_120fps_10bit_420.mp4,ea8441c521475eeac827f85efd9a3c89
+file://BVI-DVC.zip,DCyclistVeniceBeachBoardwalkVidevo_480x272_25fps_10bit_420.mp4,289c423a46a0d74f9a8b98d2ebfdcba5
+file://BVI-DVC.zip,DDollsScene1YonseiUniversity_480x272_30fps_10bit_420.mp4,ecff273f7e800dc97b3b107e71e18e46
+file://BVI-DVC.zip,DDollsScene2YonseiUniversity_480x272_30fps_10bit_420.mp4,797084e1ceb7a95b950b90f5611ca4a5
+file://BVI-DVC.zip,DDowntownHongKongVidevo_480x272_25fps_10bit_420.mp4,5c0fb5017a958d43c651987aa6094b23
+file://BVI-DVC.zip,DDrivingPOVHarmonics_480x272_60fps_10bit_420.mp4,141cf811312084cc7a93f406ae2cf94a
+file://BVI-DVC.zip,DDropsOnWaterBVITexture_480x272_120fps_10bit_420.mp4,8d2ed2bbdbc0a2f4f16f7968a4288847
+file://BVI-DVC.zip,DElFuenteMaskLIVENetFlix_480x272_24fps_10bit_420.mp4,f7c016e28af05ca9ca1e187c6b4f34ef
+file://BVI-DVC.zip,DEnteringHongKongStallS1Videvo_480x272_25fps_10bit_420.mp4,45bf5df23d92bb8031560b323f4b4e60
+file://BVI-DVC.zip,DEnteringHongKongStallS2Videvo_480x272_25fps_10bit_420.mp4,c6a528352879c839d9514fbac2141311
+file://BVI-DVC.zip,DFerrisWheelTurningVidevo_480x272_50fps_10bit_420.mp4,d55c9e7d1cdee9f29683a7183b711711
+file://BVI-DVC.zip,DFirewoodS1IRIS_480x272_24fps_10bit_420.mp4,9e6b024833ce499498edc85220c2f6a5
+file://BVI-DVC.zip,DFirewoodS2IRIS_480x272_25fps_10bit_420.mp4,9a6de1014cb66ce7ef1c41c0f7988d51
+file://BVI-DVC.zip,DFitnessIRIS_480x272_24fps_10bit_420.mp4,55c9c135fc5983e9d1f093fc666ad545
+file://BVI-DVC.zip,DFjordsS1Harmonics_480x272_60fps_10bit_420.mp4,f54b8fb253a954a2191bfa2874e1bd7f
+file://BVI-DVC.zip,DFlagShootTUMSVT_480x272_50fps_10bit_420.mp4,5a9660903855c2e864d95f5210b60975
+file://BVI-DVC.zip,DFlowerChapelS1IRIS_480x272_24fps_10bit_420.mp4,e07b76ae5e5b4596b4be934bfabfdc2a
+file://BVI-DVC.zip,DFlowerChapelS2IRIS_480x272_24fps_10bit_420.mp4,936e7874d98273952f9182a9cfcca24b
+file://BVI-DVC.zip,DFlyingCountrysideDareful_480x272_29fps_10bit_420.mp4,68fb51ff70408ca442a1ac2e7ff238ed
+file://BVI-DVC.zip,DFlyingMountainsDareful_480x272_29fps_10bit_420.mp4,13c4a7fdb079da838021c3ee150ec3b3
+file://BVI-DVC.zip,DFlyingThroughLAStreetVidevo_480x272_23fps_10bit_420.mp4,d987269b245f79eb6ad374f254f39c37
+file://BVI-DVC.zip,DFungusZoomBVITexture_480x272_120fps_10bit_420.mp4,0f4b4a93985808a7e67d7731bf570857
+file://BVI-DVC.zip,DGrassBVITexture_480x272_120fps_10bit_420.mp4,8e67d90c595dad87c2fa4047288ebe6a
+file://BVI-DVC.zip,DGrazTowerIRIS_480x272_24fps_10bit_420.mp4,a3021c9bb97ae6bbfafe04816be13e76
+file://BVI-DVC.zip,DHamsterBVIHFR_480x272_120fps_10bit_420.mp4,00915d8092595fb37e003151a8cc95e9
+file://BVI-DVC.zip,DHarleyDavidsonIRIS_480x272_24fps_10bit_420.mp4,58c780511881760462c08a95879e040e
+file://BVI-DVC.zip,DHongKongIslandVidevo_480x272_25fps_10bit_420.mp4,c889e70865e184912195cb10ebe3266c
+file://BVI-DVC.zip,DHongKongMarket1Videvo_480x272_25fps_10bit_420.mp4,13bdaee5f2bfd1d37d844dc07c61e840
+file://BVI-DVC.zip,DHongKongMarket2Videvo_480x272_25fps_10bit_420.mp4,c4d37110b192c482c8dfc9a862fd51d5
+file://BVI-DVC.zip,DHongKongMarket3S1Videvo_480x272_25fps_10bit_420.mp4,fdcd406588cb0d63de8dc9ed82e6494e
+file://BVI-DVC.zip,DHongKongMarket3S2Videvo_480x272_25fps_10bit_420.mp4,f1666193691587982e9c82ea1c3838fd
+file://BVI-DVC.zip,DHongKongMarket4S1Videvo_480x272_25fps_10bit_420.mp4,6629479ebc00c019cf17d53822e73c46
+file://BVI-DVC.zip,DHongKongMarket4S2Videvo_480x272_25fps_10bit_420.mp4,e225356c5332d91cebacbc2d4bfd8bd2
+file://BVI-DVC.zip,DHongKongS1Harmonics_480x272_60fps_10bit_420.mp4,4ebe123b298bc7819da8ebcea918c3a9
+file://BVI-DVC.zip,DHongKongS2Harmonics_480x272_60fps_10bit_420.mp4,34f6f5c135732611c362cedcb2d2452d
+file://BVI-DVC.zip,DHongKongS3Harmonics_480x272_60fps_10bit_420.mp4,e94d2382c583f11e07ee3a69cb5ab960
+file://BVI-DVC.zip,DHorseDrawnCarriagesVidevo_480x272_50fps_10bit_420.mp4,b98f88ae0dd4faed63afb7e6fb2240ce
+file://BVI-DVC.zip,DHorseStaringS1Videvo_480x272_50fps_10bit_420.mp4,87f329fb29fba379fdf3614b179694c7
+file://BVI-DVC.zip,DHorseStaringS2Videvo_480x272_50fps_10bit_420.mp4,fed9db228dbdadc21215600734a45c82
+file://BVI-DVC.zip,DJockeyHarmonics_480x272_120fps_10bit_420.mp4,501bdb94876c3207449df1dd440a60e7
+file://BVI-DVC.zip,DJoggersS1BVIHFR_480x272_120fps_10bit_420.mp4,d5820cf4c29b2265774737da40535169
+file://BVI-DVC.zip,DJoggersS2BVIHFR_480x272_120fps_10bit_420.mp4,f87235d5aa68ae8f6a5bf2bc07561a49
+file://BVI-DVC.zip,DKartingIRIS_480x272_24fps_10bit_420.mp4,3df874861439a7b458072e0d4e121c1f
+file://BVI-DVC.zip,DKoraDrumsVidevo_480x272_25fps_10bit_420.mp4,926cfbb175afbb9f37542258eec139de
+file://BVI-DVC.zip,DLakeYonseiUniversity_480x272_30fps_10bit_420.mp4,f7646ced715fff9b313d07368c6763e3
+file://BVI-DVC.zip,DLampLeavesBVITexture_480x272_120fps_10bit_420.mp4,a6105d1b3952c1bc93b1c4a1ed7ba407
+file://BVI-DVC.zip,DLaundryHangingOverHongKongVidevo_480x272_25fps_10bit_420.mp4,f8bceb5c234097ab411e2c194f59cb20
+file://BVI-DVC.zip,DLeaves1BVITexture_480x272_120fps_10bit_420.mp4,fcfeb47880bddd53c7de4b04524edcb5
+file://BVI-DVC.zip,DLeaves3BVITexture_480x272_120fps_10bit_420.mp4,4a571f75b2ec449022010a6eb9954b1c
+file://BVI-DVC.zip,DLowLevelShotAlongHongKongVidevo_480x272_25fps_10bit_420.mp4,fe76a8a5708b80705dea599c29e34fb5
+file://BVI-DVC.zip,DLungshanTempleS1Videvo_480x272_50fps_10bit_420.mp4,94ed1583e984a1818b5d01f95302da5c
+file://BVI-DVC.zip,DLungshanTempleS2Videvo_480x272_50fps_10bit_420.mp4,75d40cd82e354f5d4206cef42f9de054
+file://BVI-DVC.zip,DManMoTempleVidevo_480x272_25fps_10bit_420.mp4,416f2fcd98d630ef169cc9290499d170
+file://BVI-DVC.zip,DManStandinginProduceTruckVidevo_480x272_25fps_10bit_420.mp4,9fb3235f1b6124f21983cf3f480780ba
+file://BVI-DVC.zip,DManWalkingThroughBangkokVidevo_480x272_25fps_10bit_420.mp4,98aac7aa932b16b1d59548ab072b3c3a
+file://BVI-DVC.zip,DMaplesS1YonseiUniversity_480x272_30fps_10bit_420.mp4,d1c3657b42621f1133f5ac57cc88c8e3
+file://BVI-DVC.zip,DMaplesS2YonseiUniversity_480x272_30fps_10bit_420.mp4,b82b2d3d3efc4deeb23d2b06eb277fe0
+file://BVI-DVC.zip,DMirabellParkS1IRIS_480x272_24fps_10bit_420.mp4,9674fc7240986cd93403594f9d97070e
+file://BVI-DVC.zip,DMirabellParkS2IRIS_480x272_24fps_10bit_420.mp4,d0a4702e9be8e87f6311eaee05798147
+file://BVI-DVC.zip,DMobileHarmonics_480x272_60fps_10bit_420.mp4,84168e01a3fdbcfafc3a2f26e584f33a
+file://BVI-DVC.zip,DMoroccanCeramicsShopVidevo_480x272_50fps_10bit_420.mp4,7e51ed264886210c0013ea1987fbd283
+file://BVI-DVC.zip,DMoroccanSlippersVidevo_480x272_50fps_10bit_420.mp4,adb3f48734562051c85ee3113f462c1d
+file://BVI-DVC.zip,DMuralPaintingVidevo_480x272_25fps_10bit_420.mp4,31da61bf71646ba07d7faa38c117fdd6
+file://BVI-DVC.zip,DMyanmarS4Harmonics_480x272_60fps_10bit_420.mp4,313f14a790d3160262af21f0138e6b18
+file://BVI-DVC.zip,DMyanmarS6Harmonics_480x272_60fps_10bit_420.mp4,4c149cb45de41b5e201c5e6cfd7b5a1a
+file://BVI-DVC.zip,DMyeongDongVidevo_480x272_25fps_10bit_420.mp4,6b83b12ebe1b400e780025bcadce44f5
+file://BVI-DVC.zip,DNewYorkStreetDareful_480x272_30fps_10bit_420.mp4,36d82b4b8cd39c5ce3be4c9fa9631a95
+file://BVI-DVC.zip,DOrangeBuntingoverHongKongVidevo_480x272_25fps_10bit_420.mp4,460adad14ae241c935205113e8764c07
+file://BVI-DVC.zip,DPaintingTiltingBVITexture_480x272_120fps_10bit_420.mp4,63fc9a62f20c341204c50f361cd7e19a
+file://BVI-DVC.zip,DParkViolinMCLJCV_480x272_25fps_10bit_420.mp4,9b0c20d3a5fd1c67bc4ac1aa8f3c7294
+file://BVI-DVC.zip,DPedestriansSeoulatDawnVidevo_480x272_25fps_10bit_420.mp4,2979260fe16f2d19fac4ce1109b65425
+file://BVI-DVC.zip,DPeopleWalkingS1IRIS_480x272_24fps_10bit_420.mp4,22309c5b312a54db7ccd8dde554e39cf
+file://BVI-DVC.zip,DPersonRunningOutsideVidevo_480x272_50fps_10bit_420.mp4,5ced316509c3ac014f9f508c5ebddaa3
+file://BVI-DVC.zip,DPillowsTransBVITexture_480x272_120fps_10bit_420.mp4,f842add29df7be3bd005e60c63619255
+file://BVI-DVC.zip,DPlasmaFreeBVITexture_480x272_120fps_10bit_420.mp4,de77c10d92a86e74cc7192b435ca6205
+file://BVI-DVC.zip,DPresentsChristmasTreeDareful_480x272_29fps_10bit_420.mp4,3b34e9961d3adf99967b7b42e9052693
+file://BVI-DVC.zip,DResidentialBuildingSJTU_480x272_60fps_10bit_420.mp4,237f501f94ca040a7f3497dede47f393
+file://BVI-DVC.zip,DRunnersSJTU_480x272_60fps_10bit_420.mp4,5561d790faac6647e7ae63cd357b48dd
+file://BVI-DVC.zip,DRuralSetupIRIS_480x272_24fps_10bit_420.mp4,7e2d26d31d2a7c695535e8efe059e067
+file://BVI-DVC.zip,DRuralSetupS2IRIS_480x272_24fps_10bit_420.mp4,66dab561dc89844f82c15c7f7d8f07ff
+file://BVI-DVC.zip,DScarfSJTU_480x272_60fps_10bit_420.mp4,0304859a71492fc1800d3e1a78bf13c4
+file://BVI-DVC.zip,DSeasideWalkIRIS_480x272_24fps_10bit_420.mp4,3d1768d08ca111dc48a62814739d14df
+file://BVI-DVC.zip,DSeekingMCLV_480x272_25fps_10bit_420.mp4,3fcd746d8fbf944149b1f33992a40a2b
+file://BVI-DVC.zip,DSeoulCanalatDawnVidevo_480x272_25fps_10bit_420.mp4,3e0f5da7bb8341f8a12b28da65b52143
+file://BVI-DVC.zip,DShoppingCentreVidevo_480x272_25fps_10bit_420.mp4,7f997adc1ccb50a535e7f9971e02f1f4
+file://BVI-DVC.zip,DSignboardBoatLIVENetFlix_480x272_30fps_10bit_420.mp4,e91e29eab18c8e3ee486c3a8bedcbf1a
+file://BVI-DVC.zip,DSkyscraperBangkokVidevo_480x272_23fps_10bit_420.mp4,6bfa47bcc50f9f5ac00d242b16da52f6
+file://BVI-DVC.zip,DSmokeClearBVITexture_480x272_120fps_10bit_420.mp4,807a0ac702dd9f379e120930158bf8e6
+file://BVI-DVC.zip,DSparklerBVIHFR_480x272_120fps_10bit_420.mp4,ce5ab914b2e6ea28ac98429c83288f75
+file://BVI-DVC.zip,DSquareAndTimelapseHarmonics_480x272_60fps_10bit_420.mp4,b27bf9380a7a5a6879b13ec08ec46b34
+file://BVI-DVC.zip,DSquareS1IRIS_480x272_24fps_10bit_420.mp4,42135c7ffd528f229eeed2cab1b2f085
+file://BVI-DVC.zip,DSquareS2IRIS_480x272_24fps_10bit_420.mp4,131f5c736af20dfec5663ed2e09ba113
+file://BVI-DVC.zip,DStreetArtVidevo_480x272_30fps_10bit_420.mp4,d5870b1804cb2970f43c5c0be97dd956
+file://BVI-DVC.zip,DStreetDancerS1IRIS_480x272_24fps_10bit_420.mp4,e6d2f4b3fa4679e4d2d8d5abb2ceffd0
+file://BVI-DVC.zip,DStreetDancerS2IRIS_480x272_24fps_10bit_420.mp4,75253ff3ebf6e6d0daf3a27a72b89dcb
+file://BVI-DVC.zip,DStreetDancerS3IRIS_480x272_24fps_10bit_420.mp4,2c2ae9645959aee045d12eb75c6cdc50
+file://BVI-DVC.zip,DStreetDancerS4IRIS_480x272_24fps_10bit_420.mp4,b26f55b81a23766a6ffccf6013c0a61b
+file://BVI-DVC.zip,DStreetDancerS5IRIS_480x272_24fps_10bit_420.mp4,d920a72822407db5f575ead448b1b493
+file://BVI-DVC.zip,DStreetsOfIndiaS1Harmonics_480x272_60fps_10bit_420.mp4,6019a5be6118ada9182f9fddacd44731
+file://BVI-DVC.zip,DStreetsOfIndiaS2Harmonics_480x272_60fps_10bit_420.mp4,e46740063434eecb2fb0107ded79f697
+file://BVI-DVC.zip,DStreetsOfIndiaS3Harmonics_480x272_60fps_10bit_420.mp4,5e9baa82e926ce04c33145ef1b01acca
+file://BVI-DVC.zip,DTaiChiHongKongS1Videvo_480x272_25fps_10bit_420.mp4,76c58fbe982642c810616bbe2943c62d
+file://BVI-DVC.zip,DTaiChiHongKongS2Videvo_480x272_25fps_10bit_420.mp4,c7b4b67f38edd010a47a9ff8c562cb9d
+file://BVI-DVC.zip,DTaipeiCityRooftops8Videvo_480x272_25fps_10bit_420.mp4,a094f37ab299a8b29b65d8fd885807a7
+file://BVI-DVC.zip,DTaipeiCityRooftopsS1Videvo_480x272_25fps_10bit_420.mp4,d5c8d705f478bf7902a5b65a915bbd01
+file://BVI-DVC.zip,DTaipeiCityRooftopsS2Videvo_480x272_25fps_10bit_420.mp4,4332c23dd83d4709ab8ef5a70cded0fc
+file://BVI-DVC.zip,DTaksinBridgeVidevo_480x272_23fps_10bit_420.mp4,55d5e4901fcd19c5c81db1e6bb472303
+file://BVI-DVC.zip,DTallBuildingsSJTU_480x272_60fps_10bit_420.mp4,d88bb05081b0cd98e1ec6a9c043c95f5
+file://BVI-DVC.zip,DTennisMCLV_480x272_24fps_10bit_420.mp4,bded4df5a7fe573f5baaf67375c77649
+file://BVI-DVC.zip,DTouristsSatOutsideVidevo_480x272_25fps_10bit_420.mp4,a2d9495cee1a346f95c3410a2e9dc987
+file://BVI-DVC.zip,DToyCalendarHarmonics_480x272_60fps_10bit_420.mp4,82b3344b08164f77791310b300e347be
+file://BVI-DVC.zip,DTrackingDownHongKongSideVidevo_480x272_25fps_10bit_420.mp4,4f2c415c1f24e72dffd8e167b0907c53
+file://BVI-DVC.zip,DTrackingPastRestaurantVidevo_480x272_25fps_10bit_420.mp4,f78b675f12fde993dc73756cf0d857d2
+file://BVI-DVC.zip,DTrackingPastStallHongKongVidevo_480x272_25fps_10bit_420.mp4,e7343163eb47e0675079522012b2d308
+file://BVI-DVC.zip,DTraditionalIndonesianKecakVidevo_480x272_25fps_10bit_420.mp4,9e22204a6271c16d5f7f88dc38185219
+file://BVI-DVC.zip,DTrafficandBuildingSJTU_480x272_60fps_10bit_420.mp4,d5020da72b6062f48e651de827deb2c1
+file://BVI-DVC.zip,DTrafficFlowSJTU_480x272_60fps_10bit_420.mp4,4543c18804e932d8b0b5726a71ecdce2
+file://BVI-DVC.zip,DTrafficonTasksinBridgeVidevo_480x272_25fps_10bit_420.mp4,69592826ba3feb96197445b1605ed6f9
+file://BVI-DVC.zip,DTreeWillsBVITexture_480x272_120fps_10bit_420.mp4,cf038ed8c18a3ea4bf97ceb9e0a041f8
+file://BVI-DVC.zip,DTruckIRIS_480x272_24fps_10bit_420.mp4,27dee566423186505286f6f28ca19d1a
+file://BVI-DVC.zip,DTunnelFlagS1Harmonics_480x272_60fps_10bit_420.mp4,74a3eb4703add242c550e181b24e1020
+file://BVI-DVC.zip,DUnloadingVegetablesVidevo_480x272_25fps_10bit_420.mp4,319170fd8758820852a0d9a3dfcca25c
+file://BVI-DVC.zip,DVegetableMarketS1LIVENetFlix_480x272_30fps_10bit_420.mp4,0f3ad7b143e7c6e0112654a85f445542
+file://BVI-DVC.zip,DVegetableMarketS2LIVENetFlix_480x272_30fps_10bit_420.mp4,2ee73e02e748ec20cfff8f27ad5b3eef
+file://BVI-DVC.zip,DVegetableMarketS3LIVENetFlix_480x272_30fps_10bit_420.mp4,1f35095d73382a456f7619d98baf3b2b
+file://BVI-DVC.zip,DVegetableMarketS4LIVENetFlix_480x272_30fps_10bit_420.mp4,3c5784efdf6e0c0d1f1cffaf136222a4
+file://BVI-DVC.zip,DVeniceS1Harmonics_480x272_60fps_10bit_420.mp4,2c44db2099fdbf1b746ff8352fd01ecb
+file://BVI-DVC.zip,DVeniceS2Harmonics_480x272_60fps_10bit_420.mp4,5147e4828f2e26b38766983e844b8557
+file://BVI-DVC.zip,DVeniceSceneIRIS_480x272_24fps_10bit_420.mp4,ace6e45a39d56502bde046b68caa1897
+file://BVI-DVC.zip,DWalkingDownKhaoStreetVidevo_480x272_25fps_10bit_420.mp4,292142b9b9e1f1be587b53db8656cb54
+file://BVI-DVC.zip,DWalkingDownNorthRodeoVidevo_480x272_25fps_10bit_420.mp4,586ceabcf3ecaff7e391a9415240299d
+file://BVI-DVC.zip,DWalkingThroughFootbridgeVidevo_480x272_25fps_10bit_420.mp4,75cb6764c1db37f269930d73c2191f3d
+file://BVI-DVC.zip,DWatPhoTempleVidevo_480x272_50fps_10bit_420.mp4,08b1ee879be3a910725843b96afbbb2c
+file://BVI-DVC.zip,DWoodSJTU_480x272_60fps_10bit_420.mp4,1990305abeb082ab14f71499c4165ae3
+file://BVI-DVC.zip,DWovenVidevo_480x272_25fps_10bit_420.mp4,959b77c31e75297641f7032d17249bbc
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/mp42yuv_BVI-DVC.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/mp42yuv_BVI-DVC.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e32ac3efdfd682e6b4bdc7d5af044354148a828
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/mp42yuv_BVI-DVC.py
@@ -0,0 +1,77 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import csv
+import sys
+import os
+import time
+from sys import platform
+import subprocess
+from subprocess import Popen, PIPE
+
+
+srcdir = '/proj/video_data4/videosim/ejacstr/extract/BVI-DVC_mp4/Videos/'
+destdir = '/proj/video_data4/videosim/ejacstr/extract/BVI-DVC_yuv/Videos/'
+#mycurrentdir = '/proj/video_data3/videosim/ejacstr/JVET/replication/loopNN/our_data_generation/jvet-v0115-training/tools/toshare/'    
+executablefile = "/proj/video_data4/videosim/eliudux/JVET_NN_EE/ab-ee1-1_5/training_EE115/stage0_prepare_yuv/ffmpeg-5.1.1-amd64-static/ffmpeg"
+
+csvdir =  './csv/'
+filecsv = csvdir + 'training-data.csv'
+seqnum = 0
+cnt = 0
+cntfound = 0
+
+with open(filecsv, newline='') as csvfile1:
+    reader = csv.DictReader(csvfile1)
+    
+    for row in reader:
+        videoname = row['Sequence'][0:-4]
+        if videoname[0:len(sys.argv[1])] == sys.argv[1] and row['Sequence'][-3:] == 'mp4':  
+            cnt += 1
+            for image_path in os.listdir(srcdir):                                        
+                input_path = srcdir + image_path
+                dest_path = destdir + videoname + '.yuv'
+                if videoname == image_path[0:-4]:
+                    cntfound +=1
+                    if os.path.exists(dest_path)==False: 
+                        print(dest_path)
+                        p1 = Popen([executablefile,  "-i", input_path, "-pix_fmt","yuv420p10le", dest_path], stdout=PIPE, stderr=PIPE)
+                        # Avoid having all decodings start at the same time (to not overload system).
+                        print('Waiting 20 seconds before starting next. (Remove this if your system can handle it.)')
+                        time.sleep(20)
+                    break
+            
+print('Complete checking {} seqs.'.format(cnt))
+print('We found {} seqs.'.format(cntfound))
+print('{} sequences not found.'.format(cnt - cntfound))
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/png2yuv_fullres.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/png2yuv_fullres.py
new file mode 100644
index 0000000000000000000000000000000000000000..67f099bee28d55648b322a14d3f34cdc441b1d7c
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/png2yuv_fullres.py
@@ -0,0 +1,87 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import csv
+import sys
+import os
+from sys import platform
+import subprocess
+from subprocess import Popen, PIPE
+import PIL
+from PIL import Image
+
+### Please insert paths here: (End all paths with /)
+if 1:
+    srcdir = '/proj/video_no_backup/videosim/ejacstr/extract/DIV2K_test_yuv/DIV2K_train_HR/'
+    destdir = '/proj/video_no_backup/videosim/ejacstr/extract/DIV2K_test_yuv/DIV2K_train_HR_yuv/'
+    cropimgdir = '/proj/video_no_backup/videosim/ejacstr/extract/DIV2K_test_yuv/DIV2K_train_HR_crop/'
+    #executablefile = "/proj/video_data3/videosim/eliudux/myCurrentWork/training_data/script/ffmpeg"
+
+    ffmpeg_path = '/proj/video_data4/videosim/eliudux/JVET_NN_EE/ab-ee1-1_5/training_EE115/stage0_prepare_yuv/ffmpeg-5.1.1-amd64-static'
+    executablefile = ffmpeg_path+'/ffmpeg'
+
+
+seqnum = 0
+cnt = 0
+cntnotfound = 0
+        
+for image_path in os.listdir(srcdir):  
+    if image_path[-3:] == 'png':  
+        cnt += 1            
+        input_path = srcdir + image_path
+        videoname = image_path[0:-4]
+        img = Image.open(input_path)
+        wid, hgt = img.size
+        wid = wid // 8 * 8
+        hgt = hgt // 8 * 8
+        img_crop = img.crop((0,0,wid,hgt))
+        img_crop_path = cropimgdir + image_path
+        print('saving:', img_crop_path)
+        img_crop.save(img_crop_path,format='png')
+        dest_path = destdir + videoname +'_'+str(wid)+'x'+str(hgt)+'_'+'25fps_8bit_420.yuv'
+        #print(wid,hgt)
+        #print(img_crop_path)
+        print(dest_path)
+        #p1 = Popen([executablefile,  "-i", img_crop_path, "-pix_fmt","yuv420p10le", dest_path], stdout=PIPE, stderr=PIPE)
+        cmdffmpeg = [executablefile,  "-i", img_crop_path, "-pix_fmt","yuv420p", dest_path]
+        cmdffmpeg = " ".join(cmdffmpeg)
+        print(cmdffmpeg)
+        subprocess.call(cmdffmpeg, shell=True)        
+        #output, err = p1.communicate()
+        #print(output,err)
+        #break
+            
+
+            
+print('Complete converting {} PNG to YUV.'.format(cnt))
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/png2yuv_subsampled.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/png2yuv_subsampled.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4f714a64f2e07a0fcd763caa3068c4fb0ac80b6
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage0_prepare_yuv/png2yuv_subsampled.py
@@ -0,0 +1,88 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import csv
+import sys
+import os
+from sys import platform
+import subprocess
+from subprocess import Popen, PIPE
+import PIL
+from PIL import Image
+
+### Please insert paths here: (End all paths with /)
+if 1:
+    #srcdir = '/proj/video_data3/videosim/data/DIV2K/NTIRE_2017_Low_Res_Images/DIV2K_train_LR_bicubic/X2/'
+    srcdir = '/proj/video_no_backup/videosim/ejacstr/extract/DIV2K_test_yuv/DIV2K_train_LR_bicubic/X2/'
+    destdir = '/proj/video_no_backup/videosim/ejacstr/extract/DIV2K_test_yuv/X2_yuv/'    
+    cropimgdir = '/proj/video_no_backup/videosim/ejacstr/extract/DIV2K_test_yuv/X2_crop/'  
+    #executablefile = "/proj/video_data3/videosim/eliudux/myCurrentWork/training_data/script/ffmpeg"
+
+    ffmpeg_path = '/proj/video_data4/videosim/eliudux/JVET_NN_EE/ab-ee1-1_5/training_EE115/stage0_prepare_yuv/ffmpeg-5.1.1-amd64-static'
+    executablefile = ffmpeg_path+'/ffmpeg'
+
+
+seqnum = 0
+cnt = 0
+cntnotfound = 0
+        
+for image_path in os.listdir(srcdir):  
+    if image_path[-3:] == 'png':  
+        cnt += 1            
+        input_path = srcdir + image_path
+        videoname = image_path[0:-4]
+        img = Image.open(input_path)
+        wid, hgt = img.size
+        wid = wid // 8 * 8
+        hgt = hgt // 8 * 8
+        img_crop = img.crop((0,0,wid,hgt))
+        img_crop_path = cropimgdir + image_path
+        print('saving:', img_crop_path)
+        img_crop.save(img_crop_path,format='png')
+        dest_path = destdir + videoname +'_'+str(wid)+'x'+str(hgt)+'_'+'25fps_8bit_420.yuv'
+        #print(wid,hgt)
+        #print(img_crop_path)
+        print(dest_path)
+        #p1 = Popen([executablefile,  "-i", img_crop_path, "-pix_fmt","yuv420p10le", dest_path], stdout=PIPE, stderr=PIPE)
+        cmdffmpeg = [executablefile,  "-i", img_crop_path, "-pix_fmt","yuv420p", dest_path]
+        cmdffmpeg = " ".join(cmdffmpeg)
+        print(cmdffmpeg)
+        subprocess.call(cmdffmpeg, shell=True)        
+        #output, err = p1.communicate()
+        #print(output,err)
+        #break
+            
+
+            
+print('Complete converting {} PNG to YUV.'.format(cnt))
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_2_vtm_dec/extract_stage1_2.patch b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_2_vtm_dec/extract_stage1_2.patch
new file mode 100644
index 0000000000000000000000000000000000000000..395b00e360db54e221f748a5bd8f2e60003140eb
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_2_vtm_dec/extract_stage1_2.patch
@@ -0,0 +1,799 @@
+diff --git a/source/App/DecoderApp/DecApp.cpp b/source/App/DecoderApp/DecApp.cpp
+index 01ecd9e..e599882 100644
+--- a/source/App/DecoderApp/DecApp.cpp
++++ b/source/App/DecoderApp/DecApp.cpp
+@@ -323,6 +323,37 @@ uint32_t DecApp::decode()
+         if( ( m_cDecLib.getVPS() != nullptr && ( m_cDecLib.getVPS()->getMaxLayers() == 1 || xIsNaluWithinTargetOutputLayerIdSet( &nalu ) ) ) || m_cDecLib.getVPS() == nullptr )
+         {
+           m_cVideoIOYuvReconFile[nalu.m_nuhLayerId].open( reconFileName, true, m_outputBitDepth, m_outputBitDepth, bitDepths.recon ); // write mode
++#if DUMP_MORE_INFO
++          if ( !m_picsBeforeDbFileName.empty() || !m_bsInfoFileName.empty() )
++          {
++            CHECK(m_cDecLib.getVPS()->getMaxLayers() != 1, "dump more info does not 100% work in multi-layer case");
++          }
++
++          if (!m_picsBeforeDbFileName.empty())
++          {
++            m_cVideoIOYuvPicBeforeDb[nalu.m_nuhLayerId].open(m_picsBeforeDbFileName, true, m_outputBitDepth, m_outputBitDepth, bitDepths.recon); // write mode
++          }
++
++          if (!m_picsAfterDbFileName.empty())
++          {
++            m_cVideoIOYuvPicAfterDb[nalu.m_nuhLayerId].open(m_picsAfterDbFileName, true, m_outputBitDepth, m_outputBitDepth, bitDepths.recon); // write mode
++          }
++
++          if (!m_bsInfoFileName.empty())
++          {
++            m_cVideoIOYuvBsInfo[nalu.m_nuhLayerId].open(m_bsInfoFileName, true, m_outputBitDepth, m_outputBitDepth, bitDepths.recon); // write mode
++          }
++
++          if (!m_myPredFileName.empty())
++          {
++            m_cVideoIOYuvMyPred[nalu.m_nuhLayerId].open(m_myPredFileName, true, m_outputBitDepth, m_outputBitDepth, bitDepths.recon); // write mode
++          }
++
++          if (!m_myPartFileName.empty())
++          {
++            m_cVideoIOYuvMyPart[nalu.m_nuhLayerId].open(m_myPartFileName, true, m_outputBitDepth, m_outputBitDepth, bitDepths.recon); // write mode
++          }
++#endif
+         }
+       }
+       // update file bitdepth shift if recon bitdepth changed between sequences
+@@ -535,6 +566,47 @@ void DecApp::xDestroyDecLib()
+     }
+   }
+   
++#if DUMP_MORE_INFO
++  if (!m_picsBeforeDbFileName.empty())
++  {
++    for (auto& recFile : m_cVideoIOYuvPicBeforeDb)
++    {
++      recFile.second.close();
++    }
++  }
++  
++  if (!m_picsAfterDbFileName.empty())
++  {
++    for (auto& recFile : m_cVideoIOYuvPicAfterDb)
++    {
++      recFile.second.close();
++    }
++  }
++  
++  if (!m_bsInfoFileName.empty())
++  {
++    for (auto& recFile : m_cVideoIOYuvBsInfo)
++    {
++      recFile.second.close();
++    }
++  }
++  
++  if (!m_myPredFileName.empty())
++  {
++    for (auto& recFile : m_cVideoIOYuvMyPred)
++    {
++      recFile.second.close();
++    }
++  }
++  
++  if (!m_myPartFileName.empty())
++  {
++    for (auto& recFile : m_cVideoIOYuvMyPart)
++    {
++      recFile.second.close();
++    }
++  }
++#endif
+ 
+   // destroy decoder class
+   m_cDecLib.destroy();
+@@ -692,6 +764,69 @@ void DecApp::xWriteOutput( PicList* pcListPic, uint32_t tId )
+                                         conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
+                                         NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
+           }
++
++#if DUMP_MORE_INFO
++          if (!m_picsBeforeDbFileName.empty())
++          {
++            m_cVideoIOYuvPicBeforeDb[pcPic->layerId].write( pcPic->getPicBeforeDbBuf().get( COMPONENT_Y ).width, pcPic->getPicBeforeDbBuf().get( COMPONENT_Y ).height, pcPic->getPicBeforeDbBuf(),
++                                      m_outputColourSpaceConvert,
++                                      m_packedYUVMode,
++                                      conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++          }
++
++          if (!m_picsAfterDbFileName.empty())
++          {
++            m_cVideoIOYuvPicAfterDb[pcPic->layerId].write( pcPic->getPicAfterDbBuf().get( COMPONENT_Y ).width, pcPic->getPicAfterDbBuf().get( COMPONENT_Y ).height, pcPic->getPicAfterDbBuf(),
++                                      m_outputColourSpaceConvert,
++                                      m_packedYUVMode,
++                                      conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++          }
++          
++          if (!m_bsInfoFileName.empty())
++          {
++            m_cVideoIOYuvBsInfo[pcPic->layerId].write( pcPic->getBsMapBuf().get( COMPONENT_Y ).width, pcPic->getBsMapBuf().get( COMPONENT_Y ).height, pcPic->getBsMapBuf(),
++                                      m_outputColourSpaceConvert,
++                                      m_packedYUVMode,
++                                      conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++          }
++          
++          if (!m_myPredFileName.empty())
++          {
++            m_cVideoIOYuvMyPred[pcPic->layerId].write( pcPic->getMyPredBuf().get( COMPONENT_Y ).width, pcPic->getMyPredBuf().get( COMPONENT_Y ).height, pcPic->getMyPredBuf(),
++                                      m_outputColourSpaceConvert,
++                                      m_packedYUVMode,
++                                      conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++          }
++
++          if (!m_myPartFileName.empty())
++          {
++            m_cVideoIOYuvMyPart[pcPic->layerId].write( pcPic->getPartitionBuf().get( COMPONENT_Y ).width, pcPic->getPartitionBuf().get( COMPONENT_Y ).height, pcPic->getPartitionBuf(),
++                                      m_outputColourSpaceConvert,
++                                      m_packedYUVMode,
++                                      conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++          }
++#endif
++
+         }
+         writeLineToOutputLog(pcPic);
+ 
+@@ -839,6 +974,70 @@ void DecApp::xFlushOutput( PicList* pcListPic, const int layerId )
+                                         conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
+                                         NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
+             }
++
++#if DUMP_MORE_INFO
++            if (!m_picsBeforeDbFileName.empty())
++            {
++              m_cVideoIOYuvPicBeforeDb[pcPic->layerId].write( pcPic->getPicBeforeDbBuf().get( COMPONENT_Y ).width, pcPic->getPicBeforeDbBuf().get( COMPONENT_Y ).height, pcPic->getPicBeforeDbBuf(),
++                                        m_outputColourSpaceConvert,
++                                        m_packedYUVMode,
++                                        conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++            }
++
++            if (!m_picsAfterDbFileName.empty())
++            {
++              m_cVideoIOYuvPicAfterDb[pcPic->layerId].write( pcPic->getPicAfterDbBuf().get( COMPONENT_Y ).width, pcPic->getPicAfterDbBuf().get( COMPONENT_Y ).height, pcPic->getPicAfterDbBuf(),
++                                        m_outputColourSpaceConvert,
++                                        m_packedYUVMode,
++                                        conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++            }
++            
++            if (!m_bsInfoFileName.empty())
++            {
++              m_cVideoIOYuvBsInfo[pcPic->layerId].write( pcPic->getBsMapBuf().get( COMPONENT_Y ).width, pcPic->getBsMapBuf().get( COMPONENT_Y ).height, pcPic->getBsMapBuf(),
++                                        m_outputColourSpaceConvert,
++                                        m_packedYUVMode,
++                                        conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++            }
++
++            if (!m_myPredFileName.empty())
++            {
++              m_cVideoIOYuvMyPred[pcPic->layerId].write( pcPic->getMyPredBuf().get( COMPONENT_Y ).width, pcPic->getMyPredBuf().get( COMPONENT_Y ).height, pcPic->getMyPredBuf(),
++                                        m_outputColourSpaceConvert,
++                                        m_packedYUVMode,
++                                        conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++            }
++
++            if (!m_myPartFileName.empty())
++            {
++              m_cVideoIOYuvMyPart[pcPic->layerId].write( pcPic->getPartitionBuf().get( COMPONENT_Y ).width, pcPic->getPartitionBuf().get( COMPONENT_Y ).height, pcPic->getPartitionBuf(),
++                                        m_outputColourSpaceConvert,
++                                        m_packedYUVMode,
++                                        conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++            }
++
++#endif
++
+           }
+           writeLineToOutputLog(pcPic);
+ #if JVET_S0078_NOOUTPUTPRIORPICFLAG
+diff --git a/source/App/DecoderApp/DecApp.h b/source/App/DecoderApp/DecApp.h
+index 11f88ed..1715c40 100644
+--- a/source/App/DecoderApp/DecApp.h
++++ b/source/App/DecoderApp/DecApp.h
+@@ -61,6 +61,13 @@ private:
+   // class interface
+   DecLib          m_cDecLib;                     ///< decoder class
+   std::unordered_map<int, VideoIOYuv>      m_cVideoIOYuvReconFile;        ///< reconstruction YUV class
++#if DUMP_MORE_INFO
++  std::unordered_map<int, VideoIOYuv>      m_cVideoIOYuvPicBeforeDb;        ///< pictures before deblocking
++  std::unordered_map<int, VideoIOYuv>      m_cVideoIOYuvPicAfterDb;         ///< pictures after deblocking
++  std::unordered_map<int, VideoIOYuv>      m_cVideoIOYuvBsInfo;             ///< pictures with bs info
++  std::unordered_map<int, VideoIOYuv>      m_cVideoIOYuvMyPred;             ///< prediction pictures
++  std::unordered_map<int, VideoIOYuv>      m_cVideoIOYuvMyPart;             ///< average over partitinging
++#endif
+ 
+   // for output control
+   int             m_iPOCLastDisplay;              ///< last POC in display order
+diff --git a/source/App/DecoderApp/DecAppCfg.cpp b/source/App/DecoderApp/DecAppCfg.cpp
+index d96c204..be42dc2 100644
+--- a/source/App/DecoderApp/DecAppCfg.cpp
++++ b/source/App/DecoderApp/DecAppCfg.cpp
+@@ -77,6 +77,14 @@ bool DecAppCfg::parseCfg( int argc, char* argv[] )
+   ("BitstreamFile,b",           m_bitstreamFileName,                   string(""), "bitstream input file name")
+   ("ReconFile,o",               m_reconFileName,                       string(""), "reconstructed YUV output file name\n")
+ 
++#if DUMP_MORE_INFO
++  ("PicBeforeDb,-pbd",             m_picsBeforeDbFileName,  string(""), "pictures before deblocking filter YUV output file name\n")
++  ("PicAfterDb,-pad",              m_picsAfterDbFileName,   string(""), "pictures after deblocking filter YUV output file name\n")
++  ("BsInfo,-bs",                   m_bsInfoFileName,        string(""), "boundary strength information YUV output file name\n")
++  ("myPred,-mpr",                  m_myPredFileName,        string(""), "prediction pictures YUV output file name\n")
++  ("myPart,-mpa",                  m_myPartFileName,        string(""), "average over partitioning YUV output file name\n")
++#endif
++
+   ("OplFile,-opl",              m_oplFilename ,                        string(""), "opl-file name without extension for conformance testing\n")
+ 
+ #if ENABLE_SIMD_OPT
+@@ -249,6 +257,13 @@ bool DecAppCfg::parseCfg( int argc, char* argv[] )
+ DecAppCfg::DecAppCfg()
+ : m_bitstreamFileName()
+ , m_reconFileName()
++#if DUMP_MORE_INFO
++, m_picsBeforeDbFileName()
++, m_picsAfterDbFileName()
++, m_bsInfoFileName()
++, m_myPredFileName()
++, m_myPartFileName()
++#endif
+ , m_oplFilename()
+ 
+ , m_iSkipFrame(0)
+diff --git a/source/App/DecoderApp/DecAppCfg.h b/source/App/DecoderApp/DecAppCfg.h
+index ba7c033..1bddfb0 100644
+--- a/source/App/DecoderApp/DecAppCfg.h
++++ b/source/App/DecoderApp/DecAppCfg.h
+@@ -58,6 +58,13 @@ class DecAppCfg
+ protected:
+   std::string   m_bitstreamFileName;                    ///< input bitstream file name
+   std::string   m_reconFileName;                        ///< output reconstruction file name
++#if DUMP_MORE_INFO
++  std::string   m_picsBeforeDbFileName;
++  std::string   m_picsAfterDbFileName;
++  std::string   m_bsInfoFileName;
++  std::string   m_myPredFileName;
++  std::string   m_myPartFileName;
++#endif
+ 
+   std::string   m_oplFilename;                        ///< filename to output conformance log.
+ 
+diff --git a/source/Lib/CommonLib/CNNFilter.cpp b/source/Lib/CommonLib/CNNFilter.cpp
+index 9e2b2e5..216820b 100644
+--- a/source/Lib/CommonLib/CNNFilter.cpp
++++ b/source/Lib/CommonLib/CNNFilter.cpp
+@@ -22,6 +22,8 @@
+ //! \ingroup CommonLib
+ //! \{
+ 
++#if CNN_FILTERING
++
+ #define CNNLFCtx(c) SubCtx( Ctx::cnnlfModelIdx, c )
+ 
+ CNNFilter::CNNFilter()
+@@ -37,9 +39,11 @@ void CNNFilter::initISlice(int qp)
+   at::set_num_threads(1);
+   at::set_num_interop_threads(1);
+   
+-  std::string sLumaModelName = "path to models/JVET_W_EE_1.6_LumaCNNFilter_IntraSlice.pt";
+-  std::string sChromaModelName = "path to models/JVET_W_EE_1.6_ChromaCNNFilter_IntraSlice.pt";
+-  
++  //std::string sLumaModelName = "/Users/ejacstr/Documents/Code/loopNN/ExtractMoreInfoVTM/models/JVET_W_EE_1.6_LumaCNNFilter_IntraSlice.pt";
++  //std::string sChromaModelName = "/Users/ejacstr/Documents/Code/loopNN/ExtractMoreInfoVTM/models/JVET_W_EE_1.6_ChromaCNNFilter_IntraSlice.pt";
++  std::string sLumaModelName = "/proj/video_data3/videosim/ejacstr/JVET/replication/loopNN/ExtractMoreInfoVTM/models/JVET_W_EE_1.6_LumaCNNFilter_IntraSlice.pt";
++  std::string sChromaModelName = "/proj/video_data3/videosim/ejacstr/JVET/replication/loopNN/ExtractMoreInfoVTM/models/JVET_W_EE_1.6_ChromaCNNFilter_IntraSlice.pt";
++    
+   m_lumaModuleISlice = torch::jit::load(sLumaModelName);
+   m_chromaModuleISlice = torch::jit::load(sChromaModelName);
+   
+@@ -50,9 +54,10 @@ void CNNFilter::initBSlice(int qp)
+ {
+   if (m_modelInitFlagBSlice)
+     return;
+-  
+-  std::string sLumaModelName = "path to models/JVET_W_EE_1.6_LumaCNNFilter_InterSlice.pt";
+-  std::string sChromaModelName = "path to models/JVET_W_EE_1.6_ChromaCNNFilter_InterSlice.pt";
++  //std::string sLumaModelName = "/Users/ejacstr/Documents/Code/loopNN/ExtractMoreInfoVTM/models/JVET_W_EE_1.6_LumaCNNFilter_InterSlice.pt";
++  //std::string sChromaModelName = "/Users/ejacstr/Documents/Code/loopNN/ExtractMoreInfoVTM/models/JVET_W_EE_1.6_ChromaCNNFilter_InterSlice.pt";
++  std::string sLumaModelName = "/proj/video_data3/videosim/ejacstr/JVET/replication/loopNN/ExtractMoreInfoVTM/models/JVET_W_EE_1.6_LumaCNNFilter_InterSlice.pt";
++  std::string sChromaModelName = "/proj/video_data3/videosim/ejacstr/JVET/replication/loopNN/ExtractMoreInfoVTM/models/JVET_W_EE_1.6_ChromaCNNFilter_InterSlice.pt";
+ 
+   m_lumaModuleBSlice = torch::jit::load(sLumaModelName);
+   m_chromaModuleBSlice = torch::jit::load(sChromaModelName);
+@@ -124,13 +129,13 @@ void CNNFilter::cnnFilterLumaBlockBSlice(Picture* pic, UnitArea ctuArea, int ext
+   input.push_back(bsMapBatch);
+   input.push_back(qpBatch);
+   int idx = 0;
+-  int blockSize = blockSizeVer * blockSizeHor;
++  //int blockSize = blockSizeVer * blockSizeHor;
+   torch::NoGradGuard no_grad_guard;
+   torch::globalContext().setFlushDenormal(true);
+   //at::init_num_threads();
+   
+   int seqQp = pic->slices[0]->getPPS()->getPicInitQPMinus26() + 26;
+-  int sliceQp = pic->slices[0]->getSliceQp();
++  //int sliceQp = pic->slices[0]->getSliceQp();
+   int qp = modelIdx == 2 ? seqQp - 10 : (modelIdx == 1 ? seqQp - 5 : seqQp);
+   if (pic->slices[0]->getTLayer() >= 4 && modelIdx == 2)
+     qp = seqQp + 5;
+@@ -150,7 +155,7 @@ void CNNFilter::cnnFilterLumaBlockBSlice(Picture* pic, UnitArea ctuArea, int ext
+     }
+   }
+   // Execute the model and turn its output into a tensor.
+-
++#if DO_ACTUAL_CNN_FILTERING
+   at::Tensor output;
+   output = m_lumaModuleBSlice.forward(input).toTensor();
+   float *pOutput = output.data_ptr<float>();
+@@ -179,7 +184,7 @@ void CNNFilter::cnnFilterLumaBlockBSlice(Picture* pic, UnitArea ctuArea, int ext
+     pScaledDst[idx]     = Pel(Clip3<int>(0, 1023 << NN_RESIDUE_ADDITIONAL_SHIFT, int(pOutput[pixelIdx] * maxValue * (1 << NN_RESIDUE_ADDITIONAL_SHIFT) + 0.5)));
+ #endif  
+   }
+-
++#endif
+ }
+ 
+ 
+@@ -235,7 +240,7 @@ void CNNFilter::cnnFilterChromaBlockBSlice(Picture* pic, UnitArea ctuArea, int e
+   input.push_back(bsMapBatchC);
+   input.push_back(qpBatch);
+   int idx = 0;
+-  int blockSizeC = blockSizeVerC * blockSizeHorC;
++  //int blockSizeC = blockSizeVerC * blockSizeHorC;
+   torch::NoGradGuard no_grad_guard;
+   torch::globalContext().setFlushDenormal(true);
+   //at::init_num_threads();
+@@ -269,6 +274,7 @@ void CNNFilter::cnnFilterChromaBlockBSlice(Picture* pic, UnitArea ctuArea, int e
+       pBsMapBatchC[blockSizeHorC*blockSizeVerC+yy*blockSizeHorC+xx] = pBsMapCr[idx] / maxValue;
+     }
+   }
++#if DO_ACTUAL_CNN_FILTERING
+   // Execute the model and turn its output into a tensor.
+   at::Tensor output = m_chromaModuleBSlice.forward(input).toTensor();
+   float *pOutput = output.data_ptr<float>();
+@@ -317,7 +323,7 @@ void CNNFilter::cnnFilterChromaBlockBSlice(Picture* pic, UnitArea ctuArea, int e
+     pScaledDstCr[idx] = Pel(Clip3<int>(0, 1023 << NN_RESIDUE_ADDITIONAL_SHIFT, int(pOutput[pixelIdx + blockSizeC] * maxValue * (1 << NN_RESIDUE_ADDITIONAL_SHIFT) + 0.5)));
+ #endif
+   }
+-
++#endif
+ }
+ void CNNFilter::cnnFilterLumaBlockISlice(Picture* pic, UnitArea ctuArea, int extLeft, int extRight, int extTop, int extBottom, int modelIdx)
+ {
+@@ -337,6 +343,7 @@ void CNNFilter::cnnFilterLumaBlockISlice(Picture* pic, UnitArea ctuArea, int ext
+   Pel* pPred = bufPred.buf;
+   int blockSizeHor = ctuArea.lwidth();
+   int blockSizeVer = ctuArea.lheight();
++  
+   double maxValue = 1023;
+   // Deserialize the ScriptModule from a file using torch::jit::load().
+   torch::Tensor imageBatch = torch::ones({1, 1, blockSizeVer, blockSizeHor});
+@@ -357,7 +364,7 @@ void CNNFilter::cnnFilterLumaBlockISlice(Picture* pic, UnitArea ctuArea, int ext
+   input.push_back(bsMapBatch);
+   input.push_back(qpBatch);
+   int idx = 0;
+-  int blockSize = blockSizeVer * blockSizeHor;
++  //int blockSize = blockSizeVer * blockSizeHor;
+   torch::NoGradGuard no_grad_guard;
+   torch::globalContext().setFlushDenormal(true);
+   //at::init_num_threads();
+@@ -379,6 +386,7 @@ void CNNFilter::cnnFilterLumaBlockISlice(Picture* pic, UnitArea ctuArea, int ext
+     }
+   }
+   // Execute the model and turn its output into a tensor.
++#if DO_ACTUAL_CNN_FILTERING
+   at::Tensor output = m_lumaModuleISlice.forward(input).toTensor();
+   float *pOutput = output.data_ptr<float>();
+   //  std::cout << output.slice(/*dim=*/1, /*start=*/240, /*end=*/480) << '\n';
+@@ -406,6 +414,7 @@ void CNNFilter::cnnFilterLumaBlockISlice(Picture* pic, UnitArea ctuArea, int ext
+     pScaledDst[idx] = Pel(Clip3<int>(0, 1023 << NN_RESIDUE_ADDITIONAL_SHIFT, int(pOutput[pixelIdx] * maxValue * (1 << NN_RESIDUE_ADDITIONAL_SHIFT) + 0.5)));
+ #endif  
+   }
++#endif
+ }
+ 
+ 
+@@ -469,7 +478,7 @@ void CNNFilter::cnnFilterChromaBlockISlice(Picture* pic, UnitArea ctuArea, int e
+   input.push_back(bsMapBatchC);
+   input.push_back(qpBatch);
+   int idx = 0;
+-  int blockSizeC = blockSizeVerC * blockSizeHorC;
++  //int blockSizeC = blockSizeVerC * blockSizeHorC;
+   torch::NoGradGuard no_grad_guard;
+   torch::globalContext().setFlushDenormal(true);
+   //at::init_num_threads();
+@@ -502,6 +511,7 @@ void CNNFilter::cnnFilterChromaBlockISlice(Picture* pic, UnitArea ctuArea, int e
+       pPredBatchC[blockSizeHorC*blockSizeVerC+yy*blockSizeHorC+xx] = pPredCr[idx] / maxValue;
+     }
+   }
++#if DO_ACTUAL_CNN_FILTERING
+   // Execute the model and turn its output into a tensor.
+   at::Tensor output = m_chromaModuleISlice.forward(input).toTensor();
+   float *pOutput = output.data_ptr<float>();
+@@ -549,6 +559,7 @@ void CNNFilter::cnnFilterChromaBlockISlice(Picture* pic, UnitArea ctuArea, int e
+     pScaledDstCr[idx] = Pel(Clip3<int>(0, 1023 << NN_RESIDUE_ADDITIONAL_SHIFT, int(pOutput[pixelIdx + blockSizeC] * maxValue * (1 << NN_RESIDUE_ADDITIONAL_SHIFT) + 0.5)));
+ #endif
+   }
++#endif
+ }
+ 
+ void CNNFilter::cnnFilter(Picture* pic)
+@@ -655,14 +666,14 @@ void CNNFilter::cnnFilter(Picture* pic)
+ void CNNFilter::scaleResidualBlock(Picture *pic, UnitArea ctuArea, int modelIdx, ComponentID compID)
+ {
+ 
+-  CodingStructure &cs         = *pic->cs;
+-  Slice *          pcSlice    = cs.slice;
++  //CodingStructure &cs         = *pic->cs;
++  //Slice *          pcSlice    = cs.slice;
+ 
+-  const int scale  = pcSlice->getNnScale(modelIdx, compID);
++  //const int scale  = pcSlice->getNnScale(modelIdx, compID);
+ 
+-  const int shift  = NN_RESIDUE_SCALE_SHIFT + NN_RESIDUE_ADDITIONAL_SHIFT;
++  //const int shift  = NN_RESIDUE_SCALE_SHIFT + NN_RESIDUE_ADDITIONAL_SHIFT;
+ 
+-  const int offset    = (1 << shift) / 2;
++  //const int offset    = (1 << shift) / 2;
+ 
+   PelBuf bufRec    = pic->getRecoBuf(ctuArea).get(compID);
+ 
+@@ -693,9 +704,14 @@ void CNNFilter::scaleResidualBlock(Picture *pic, UnitArea ctuArea, int modelIdx,
+ 
+     idxDst       = yy * strideDst + xx;
+     idxRec       = yy * strideRec + xx;
++#if DO_ACTUAL_CNN_FILTERING
+     pScaledDst[idxDst] = Clip3(0, 1023, pRec[idxRec] + (((pScaledDst[idxDst] - (pRec[idxRec] << NN_RESIDUE_ADDITIONAL_SHIFT)) * scale + offset) >> shift));
++#else
++    pScaledDst[idxDst] = pRec[idxRec];
++#endif
+   }
+ }
+ #endif
+ 
++#endif
+ //! \}
+diff --git a/source/Lib/CommonLib/CodingStructure.h b/source/Lib/CommonLib/CodingStructure.h
+index e0821ce..05c11ab 100644
+--- a/source/Lib/CommonLib/CodingStructure.h
++++ b/source/Lib/CommonLib/CodingStructure.h
+@@ -67,6 +67,12 @@ enum PictureType
+   PIC_BS_MAP,
+   PIC_PREDICTION_CUSTOM,
+ #endif
++#if DUMP_MORE_INFO
++  PIC_BEFORE_DEBLOCK,
++  PIC_AFTER_DEBLOCK,
++  PIC_MY_PRED_BUF,
++//  PIC_BS_INFO,
++#endif
+   NUM_PIC_TYPES
+ };
+ extern XUCache g_globalUnitCache;
+diff --git a/source/Lib/CommonLib/LoopFilter.cpp b/source/Lib/CommonLib/LoopFilter.cpp
+index a317871..d7906cc 100644
+--- a/source/Lib/CommonLib/LoopFilter.cpp
++++ b/source/Lib/CommonLib/LoopFilter.cpp
+@@ -451,21 +451,32 @@ void LoopFilter::xDeblockCU( CodingUnit& cu, const DeblockEdgeDir edgeDir )
+         if(cu.treeType != TREE_C)
+         {
+           bS |= xGetBoundaryStrengthSingle( cu, edgeDir, localPos, CHANNEL_TYPE_LUMA );
+-#if CNN_FILTERING
++#if CNN_FILTERING && !DUMP_MORE_INFO
+           if(cu.blocks[COMPONENT_Y].valid())
+             storeBS(bS, Position(area.x + x, area.y + y), COMPONENT_Y);
+ #endif
++#if CNN_FILTERING && DUMP_MORE_INFO
++          {
++            storeBS(bS, Position(area.x + x, area.y + y), COMPONENT_Y);
++          }
++#endif
+         }
+         if(cu.treeType != TREE_L && cu.chromaFormat != CHROMA_400 && cu.blocks[COMPONENT_Cb].valid())
+         {
+           bS |= xGetBoundaryStrengthSingle( cu, edgeDir, localPos, CHANNEL_TYPE_CHROMA );
+-#if CNN_FILTERING
++#if CNN_FILTERING && !DUMP_MORE_INFO
+           if ( pcv.chrFormat != CHROMA_400 && cu.blocks[COMPONENT_Cb].valid() )
+           {
+             storeBS(bS, Position(area.x + x, area.y + y), COMPONENT_Cb);
+             storeBS(bS, Position(area.x + x, area.y + y), COMPONENT_Cr);
+           }
+ #endif
++#if CNN_FILTERING && DUMP_MORE_INFO
++          {
++            storeBS(bS, Position(area.x + x, area.y + y), COMPONENT_Cb);
++            storeBS(bS, Position(area.x + x, area.y + y), COMPONENT_Cr);
++          }
++#endif
+         }
+         m_aapucBS[edgeDir][rasterIdx] = bS;
+       }
+@@ -473,7 +484,7 @@ void LoopFilter::xDeblockCU( CodingUnit& cu, const DeblockEdgeDir edgeDir )
+   }
+ 
+ 
+-#if !CNN_FILTERING
++#if !CNN_FILTERING || !DO_ACTUAL_CNN_FILTERING
+   std::sort( edgeIdx.begin(), edgeIdx.end() );
+   int prevEdgeIdx = -1;
+   for ( const int& edge : edgeIdx )
+diff --git a/source/Lib/CommonLib/Picture.cpp b/source/Lib/CommonLib/Picture.cpp
+index 749e508..8ba784c 100644
+--- a/source/Lib/CommonLib/Picture.cpp
++++ b/source/Lib/CommonLib/Picture.cpp
+@@ -212,6 +212,13 @@ void Picture::create( const ChromaFormat &_chromaFormat, const Size &size, const
+   M_BUFS( 0, PIC_BS_MAP ).create( _chromaFormat, a, _maxCUSize, margin, MEMORY_ALIGN_DEF_SIZE );
+ #endif
+   
++#if DUMP_MORE_INFO
++  M_BUFS( 0, PIC_BEFORE_DEBLOCK ).create( _chromaFormat, a, _maxCUSize, margin, MEMORY_ALIGN_DEF_SIZE );
++  M_BUFS( 0, PIC_AFTER_DEBLOCK ).create( _chromaFormat, a, _maxCUSize, margin, MEMORY_ALIGN_DEF_SIZE );
++  M_BUFS( 0, PIC_MY_PRED_BUF ).create( _chromaFormat, a, _maxCUSize, margin, MEMORY_ALIGN_DEF_SIZE );
++//  M_BUFS( 0, PIC_BS_INFO        ).create( _chromaFormat, a, _maxCUSize, margin, MEMORY_ALIGN_DEF_SIZE );
++#endif
++  
+   if( !_decoder )
+   {
+     M_BUFS( 0, PIC_ORIGINAL ).    create( _chromaFormat, a );
+@@ -387,6 +394,29 @@ const CPelBuf     Picture::getPredBufCustom(const CompArea &blk)  const { return
+ const CPelUnitBuf Picture::getPredBufCustom(const UnitArea &unit) const { return getBuf(unit, PIC_PREDICTION_CUSTOM);}
+ #endif
+ 
++#if DUMP_MORE_INFO
++         PelUnitBuf Picture::getPicBeforeDbBuf()                                 { return M_BUFS(0, PIC_BEFORE_DEBLOCK); }
++  const CPelUnitBuf Picture::getPicBeforeDbBuf()                           const { return M_BUFS(0, PIC_BEFORE_DEBLOCK); }
++         PelBuf     Picture::getPicBeforeDbBuf(const CompArea &blk)              { return getBuf(blk, PIC_BEFORE_DEBLOCK); }
++  const CPelBuf     Picture::getPicBeforeDbBuf(const CompArea &blk)        const { return getBuf(blk, PIC_BEFORE_DEBLOCK); }
++
++         PelUnitBuf Picture::getPicAfterDbBuf()                                 { return M_BUFS(0, PIC_AFTER_DEBLOCK); }
++  const CPelUnitBuf Picture::getPicAfterDbBuf()                           const { return M_BUFS(0, PIC_AFTER_DEBLOCK); }
++         PelBuf     Picture::getPicAfterDbBuf(const CompArea &blk)              { return getBuf(blk, PIC_AFTER_DEBLOCK); }
++  const CPelBuf     Picture::getPicAfterDbBuf(const CompArea &blk)        const { return getBuf(blk, PIC_AFTER_DEBLOCK); }
++
++         PelUnitBuf Picture::getMyPredBuf()                                 { return M_BUFS(0, PIC_MY_PRED_BUF); }
++  const CPelUnitBuf Picture::getMyPredBuf()                           const { return M_BUFS(0, PIC_MY_PRED_BUF); }
++         PelBuf     Picture::getMyPredBuf(const CompArea &blk)              { return getBuf(blk, PIC_MY_PRED_BUF); }
++  const CPelBuf     Picture::getMyPredBuf(const CompArea &blk)        const { return getBuf(blk, PIC_MY_PRED_BUF); }
++
++
++//         PelUnitBuf Picture::getBsMapBuf()                                      { return M_BUFS(0, PIC_BS_INFO); }
++//  const CPelUnitBuf Picture::getBsMapBuf()                                const { return M_BUFS(0, PIC_BS_INFO); }
++//         PelBuf     Picture::getBsMapBuf(const CompArea &blk)                   { return getBuf(blk, PIC_BS_INFO); }
++//  const CPelBuf     Picture::getBsMapBuf(const CompArea &blk)             const { return getBuf(blk, PIC_BS_INFO); }
++#endif
++
+        PelBuf     Picture::getRecoBuf(const ComponentID compID, bool wrap)       { return getBuf(compID,                    wrap ? PIC_RECON_WRAP : PIC_RECONSTRUCTION); }
+ const CPelBuf     Picture::getRecoBuf(const ComponentID compID, bool wrap) const { return getBuf(compID,                    wrap ? PIC_RECON_WRAP : PIC_RECONSTRUCTION); }
+        PelBuf     Picture::getRecoBuf(const CompArea &blk, bool wrap)            { return getBuf(blk,                       wrap ? PIC_RECON_WRAP : PIC_RECONSTRUCTION); }
+diff --git a/source/Lib/CommonLib/Picture.h b/source/Lib/CommonLib/Picture.h
+index 33b748b..1d60258 100644
+--- a/source/Lib/CommonLib/Picture.h
++++ b/source/Lib/CommonLib/Picture.h
+@@ -155,6 +155,35 @@ const   CPelBuf     getBsMapBuf(const CompArea &blk) const;
+   const CPelUnitBuf getPredBufCustom(const UnitArea &unit) const;
+ #endif
+   
++#if DUMP_MORE_INFO
++         PelUnitBuf getPicBeforeDbBuf();
++  const CPelUnitBuf getPicBeforeDbBuf() const;
++         PelBuf     getPicBeforeDbBuf(const CompArea &blk);
++  const CPelBuf     getPicBeforeDbBuf(const CompArea &blk) const;
++
++         PelUnitBuf getPicAfterDbBuf();
++  const CPelUnitBuf getPicAfterDbBuf() const;
++         PelBuf     getPicAfterDbBuf(const CompArea &blk);
++  const CPelBuf     getPicAfterDbBuf(const CompArea &blk) const;
++
++         PelUnitBuf getMyPredBuf();
++  const CPelUnitBuf getMyPredBuf() const;
++         PelBuf     getMyPredBuf(const CompArea &blk);
++  const CPelBuf     getMyPredBuf(const CompArea &blk) const;
++
++//         PelUnitBuf getMyPartBuf();
++//  const CPelUnitBuf getMyPartBuf() const;
++//         PelBuf     getMyPartBuf(const CompArea &blk);
++//  const CPelBuf     getMyPartBuf(const CompArea &blk) const;
++
++
++//         PelUnitBuf getBsMapBuf();
++//  const CPelUnitBuf getBsMapBuf() const;
++//         PelBuf     getBsMapBuf(const CompArea &blk);
++//  const CPelBuf     getBsMapBuf(const CompArea &blk) const;
++#endif
++
++  
+          PelBuf     getRecoBuf(const ComponentID compID, bool wrap=false);
+   const CPelBuf     getRecoBuf(const ComponentID compID, bool wrap=false) const;
+          PelBuf     getRecoBuf(const CompArea &blk, bool wrap=false);
+diff --git a/source/Lib/CommonLib/TypeDef.h b/source/Lib/CommonLib/TypeDef.h
+index 4cf53f6..cdcd503 100644
+--- a/source/Lib/CommonLib/TypeDef.h
++++ b/source/Lib/CommonLib/TypeDef.h
+@@ -54,6 +54,10 @@
+ // clang-format off
+ 
+ //########### place macros to be removed in next cycle below this line ###############
++#define DUMP_MORE_INFO                                    1 // note: do not use it for interlace or RPR or multi-layer
++                                                            // use together with output of normal reconstruction file i.e. -o
++
++#define DO_ACTUAL_CNN_FILTERING                           0
+ #define CNN_FILTERING                                     1
+ #if CNN_FILTERING
+ #define RESTRICTED_GRANULARITY                            1
+diff --git a/source/Lib/DecoderLib/DecLib.cpp b/source/Lib/DecoderLib/DecLib.cpp
+index 00cef89..2014d38 100644
+--- a/source/Lib/DecoderLib/DecLib.cpp
++++ b/source/Lib/DecoderLib/DecLib.cpp
+@@ -683,13 +683,29 @@ void DecLib::executeLoopFilters()
+ #if CNN_FILTERING
+   m_pcPic->getBsMapBuf().fill(0);
+ #endif
++#if DUMP_MORE_INFO
++  // store the sample values before deblocking
++  m_pcPic->getPicBeforeDbBuf().copyFrom( m_pcPic->getRecoBuf(), false, false );
++
++  // reset bs info for preparation
++  m_pcPic->getBsMapBuf().fill( 0 );
++#endif
+   // deblocking filter
+   m_cLoopFilter.loopFilterPic( cs );
++  // store the sample values after deblocking
++  m_pcPic->getPicAfterDbBuf().copyFrom( m_pcPic->getRecoBuf(), false, false );
++
+   CS::setRefinedMotionField(cs);
+-  //CNN filter
++
+ #if CNN_FILTERING
+   m_pcCNNFilter->cnnFilter(m_pcPic);
+ #endif
++  //CNN filter
++#if DUMP_MORE_INFO
++  // store the sample from the special prediction buffer
++  m_pcPic->getMyPredBuf().copyFrom( m_pcPic->getPredBufCustom(), false, false );
++#endif
++
+   if( cs.sps->getSAOEnabledFlag() )
+   {
+     m_cSAO.SAOProcess( cs, cs.picture->getSAO() );
+diff --git a/source/Lib/DecoderLib/VLCReader.cpp b/source/Lib/DecoderLib/VLCReader.cpp
+index ad80586..2aa1585 100644
+--- a/source/Lib/DecoderLib/VLCReader.cpp
++++ b/source/Lib/DecoderLib/VLCReader.cpp
+@@ -4175,9 +4175,10 @@ void HLSyntaxReader::parseSliceHeader (Slice* pcSlice, PicHeader* picHeader, Par
+     }
+ 
+ #if CNN_FILTERING
+-    READ_UVLC(uiCode, "slice_cnnlf_luma_indicaiton");  pcSlice->setCnnlfSliceIndication(COMPONENT_Y, uiCode);
+-    READ_UVLC(uiCode, "slice_cnnlf_cb_indication");  pcSlice->setCnnlfSliceIndication(COMPONENT_Cb, uiCode);
+-    READ_UVLC(uiCode, "slice_cnnlf_cr_indication");  pcSlice->setCnnlfSliceIndication(COMPONENT_Cr, uiCode);
++#if DO_ACTUAL_CNN_FILTERING
++    READ_UVLC(uiCode, "slice_cnnlf_luma_indicaiton");  pcSlice->setCnnlfSliceIndication(COMPONENT_Y, uiCode); printf("First: %d\n", uiCode);
++    READ_UVLC(uiCode, "slice_cnnlf_cb_indication");  pcSlice->setCnnlfSliceIndication(COMPONENT_Cb, uiCode); printf("Second: %d\n", uiCode);
++    READ_UVLC(uiCode, "slice_cnnlf_cr_indication");  pcSlice->setCnnlfSliceIndication(COMPONENT_Cr, uiCode); printf("Third: %d\n", uiCode);
+ 
+ #if SCALE_NN_RESIDUE
+     for (int comp = 0; comp < 3; comp++)
+@@ -4190,10 +4191,12 @@ void HLSyntaxReader::parseSliceHeader (Slice* pcSlice, PicHeader* picHeader, Par
+           for (int modelIdx = 0; modelIdx < 3; modelIdx++)
+           {
+             READ_FLAG(uiCode, "slice_cnnlf_scale_flag");
++//            printf("slice_cnnlf_scale_flag = %d\n", uiCode);
+             pcSlice->setNnScaleFlag(uiCode != 0, modelIdx, compID);
+             if (uiCode)
+             {
+               READ_SCODE(NN_RESIDUE_SCALE_SHIFT + 1, iCode, "nnScale");
++//              printf("nnScale = %d\n", iCode);
+               pcSlice->setNnScale(iCode + (1 << NN_RESIDUE_SCALE_SHIFT), modelIdx, compID);
+             }
+           }
+@@ -4201,16 +4204,49 @@ void HLSyntaxReader::parseSliceHeader (Slice* pcSlice, PicHeader* picHeader, Par
+         else
+         {
+           READ_FLAG(uiCode, "slice_cnnlf_scale_flag");
++//          printf("slice_cnnlf_scale_flag = %d\n", uiCode);
+           pcSlice->setNnScaleFlag(uiCode != 0, pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
+           if (uiCode)
+           {
+             READ_SCODE(NN_RESIDUE_SCALE_SHIFT + 1, iCode, "nnScale");
+             pcSlice->setNnScale(iCode + (1 << NN_RESIDUE_SCALE_SHIFT), pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++//            printf("nnScale2 = %d\n", iCode);
+           }
+         }
+       }
+     }
+ #endif
++#else
++  // Fake reading in data
++  pcSlice->setCnnlfSliceIndication(COMPONENT_Y, 1);
++  pcSlice->setCnnlfSliceIndication(COMPONENT_Cb, 1);
++  pcSlice->setCnnlfSliceIndication(COMPONENT_Cr, 1);
++
++  ComponentID compID = ComponentID(0);
++  uiCode = 1;
++//  printf("slice_cnnlf_scale_flag = %d\n", uiCode);
++  pcSlice->setNnScaleFlag(uiCode != 0, pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++  iCode = -23;
++  pcSlice->setNnScale(iCode + (1 << NN_RESIDUE_SCALE_SHIFT), pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++//  printf("nnScale2 = %d\n", iCode);
++
++  compID = ComponentID(1);
++  uiCode = 1;
++//  printf("slice_cnnlf_scale_flag = %d\n", uiCode);
++  pcSlice->setNnScaleFlag(uiCode != 0, pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++  iCode = -44;
++  pcSlice->setNnScale(iCode + (1 << NN_RESIDUE_SCALE_SHIFT), pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++//  printf("nnScale2 = %d\n", iCode);
++
++  compID = ComponentID(2);
++  uiCode = 1;
++//  printf("slice_cnnlf_scale_flag = %d\n", uiCode);
++  pcSlice->setNnScaleFlag(uiCode != 0, pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++  iCode = -49;
++  pcSlice->setNnScale(iCode + (1 << NN_RESIDUE_SCALE_SHIFT), pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++//  printf("nnScale2 = %d\n", iCode);
++  
++#endif
+     int cnnlfInferBlockSizeLuma = 0, cnnlfInferBlockSizeChroma = 0;
+     if (pcSlice->getSliceQp() < 23)
+     {
+diff --git a/source/Lib/EncoderLib/EncCNNFilter.cpp b/source/Lib/EncoderLib/EncCNNFilter.cpp
+index 89838fb..205d83a 100644
+--- a/source/Lib/EncoderLib/EncCNNFilter.cpp
++++ b/source/Lib/EncoderLib/EncCNNFilter.cpp
+@@ -58,6 +58,7 @@ double getDistortion(PelBuf buf1, PelBuf buf2, int width, int height)
+   }
+   return dist;
+ }
++#if CNN_FILTERING
+ #if SCALE_NN_RESIDUE
+ void EncCNNFilter::cnnFilterEncoder(Picture *pic, PelUnitBuf origBuf, const double *lambdas)
+ #else
+@@ -469,6 +470,7 @@ void EncCNNFilter::cnnFilterEncoder(Picture *pic, const double *lambdas)
+     }
+   }
+ }
++#endif
+ 
+ 
+ #if SCALE_NN_RESIDUE
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_3_data_generation/dec_direct_tile3_574d.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_3_data_generation/dec_direct_tile3_574d.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d6b23691d5d1f87bc6665d7948b520dce0e795d
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_3_data_generation/dec_direct_tile3_574d.py
@@ -0,0 +1,216 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import csv
+import sys
+import os
+import subprocess
+from subprocess import Popen, PIPE
+import shutil
+import glob
+import time
+
+#  Modify  directories. Keep '/' at the end.
+software_root_dir = '/proj/video_data3/videosim/eliudux/myCurrentWork/training_data/jvet-ab-ee1-1_5/training_EE115/'
+
+output_root_dir = '/proj/video_no_backup/videosim/eliudux/training_data/'
+# Output YUV directory is in the format of
+# <output_root_dir>/xcheck_yuv/ai_574d_DIV2K/ImageName_qp17/ImageName_qp17_rec.yuv
+# <output_root_dir>/xcheck_yuv/ai_574d_BVIDVC/SequenceName_qp17/SequenceName_qp17_bs.yuv
+
+path_to_original_DIV2K_yuvs = '/proj/video_no_backup/videosim/ejacstr/extract/DIV2K_test_yuv/DIV2K_train_HR_yuv/'
+path_to_original_DIV2K_X2_yuvs = '/proj/video_no_backup/videosim/ejacstr/extract/DIV2K_test_yuv/X2_yuv/'
+path_to_original_BVI_DVC_yuvs = '/proj/video_data3/videosim/data/BVI_DVC/'
+
+# do only decoding, or both encoding and decoding
+onlyDecode = True
+
+# remove YUVs after decoding
+removeYUV = False
+
+dataset = sys.argv[1] # DIV2K or DIV2K_X2 or BVIDVC
+cfgtype = sys.argv[2] # ai or ra
+
+str1 = sys.argv[3]
+# String to control which names from the dataset you want to encode. 
+# E.g., DIV2K images start with 0xxx_xxxxxxx_xxx.yuv, so str1='0'. 
+# BVIDVC class D sequences start with Dxxxx_xxxxx.yuv, so str1='D'.
+# A paticular sequence with name CColourfulDecorationWatPhoVidevo_960x544_50fps_10bit_420.yuv, so str1 = 'CColourfulDecorationWatPhoVidevo_960x544_50fps_10bit_420'
+#str1 = 'DAdvertisingMassagesBangkokVidevo_480x272_25fps_10bit_420'
+#str1 = 'AWovenVidevo_3840x2176_25fps_10bit_420'
+
+
+if cfgtype == 'ai':
+    QP = [42,37,32,27,22,17]
+    cfg1 = software_root_dir + 'cfg/encoder_intra_vtm_subsample1.cfg'
+    if dataset == 'DIV2K':
+        datasetname = "DIV2K_1frm"
+        cfgdir = software_root_dir + 'cfg/cfgDIV2K/'
+        inputdir = path_to_original_DIV2K_yuvs
+        encnumfrm = '1'
+        numfrm = '1' 
+        bitdepth = 10
+        bitdepth_orig = 8 # BVIDVC 10bit, DIV2K 8bit        
+        logdir = software_root_dir + 'cfg/'
+    elif dataset == 'DIV2K_X2':
+        datasetname = "DIV2K_1frm" #same as DIV2K
+        cfgdir = software_root_dir + 'cfg/cfgDIV2K_X2/'
+        inputdir = path_to_original_DIV2K_X2_yuvs
+        encnumfrm = '1'
+        numfrm = '1' 
+        bitdepth = 10
+        bitdepth_orig = 8 # BVIDVC 10bit, DIV2K 8bit        
+        logdir = software_root_dir + 'cfg/'    
+    elif dataset == 'BVIDVC':    
+        datasetname = "BVIDVC_64frm"
+        cfgdir = software_root_dir + 'cfg/cfgBVIDVC/'
+        inputdir = path_to_original_BVI_DVC_yuvs
+        encnumfrm = '64'
+        numfrm = '64' #all frames from A
+        bitdepth = 10
+        bitdepth_orig = 10 # BVIDVC 10bit, DIV2K 8bit
+        logdir = software_root_dir + 'cfg/BVIDVC_64frm/'
+    else:
+        raise ValueError('Dateset', dataset,'not defined for AI. Exit.')  
+elif  cfgtype == 'ra':
+    QP = [40,35,30,25,20]
+    cfg1 = software_root_dir + 'stage1_1_vtm_enc/VVCSoftware_VTM/cfg/encoder_randomaccess_vtm.cfg'
+    if dataset == 'BVIDVC':
+        datasetname = "BVIDVC_64frm"
+        cfgdir = software_root_dir + 'cfg/cfgBVIDVC/'
+        inputdir = path_to_original_BVI_DVC_yuvs
+        encnumfrm = '64'
+        numfrm = '64' #all frames from A
+        bitdepth = 10
+        bitdepth_orig = 10 # BVIDVC 10bit, DIV2K 8bit
+        logdir = software_root_dir + 'cfg/BVIDVC_64frm/' #sliceQP log
+    else:
+        raise ValueError('Dateset', dataset,' not defined for RA. Exit.')
+else:
+    raise ValueError('Unknow configuration', cfgtype,'. Exit.')
+
+scriptdir = software_root_dir + 'stage1_3_data_generation/'
+
+excutabledir_enc = software_root_dir + 'stage1_1_vtm_enc/VVCSoftware_VTM/bin/'  
+excutabledir_dec = software_root_dir + 'stage1_2_vtm_dec/extractmoreinfovtm/bin/'
+
+commit_num = '574d'  # Do not change this. It is used to identify the dataset in the training data.
+encoderdir = excutabledir_enc+'EncoderAppStatic' 
+decoderdir = excutabledir_dec+'DecoderAppStatic'
+
+if onlyDecode:
+    bindir = '/proj/video_data3/videosim/eliudux/myCurrentWork/training_data/outputyuv/ra_pbd_bs_BVIDVC/'
+else:
+    bindir = output_root_dir + 'xcheck_bin/'+ cfgtype + '_' + commit_num +'_'+dataset+'/'
+    
+yuvoutputdir = output_root_dir + 'xcheck_yuv/' + cfgtype + '_' + commit_num +'_'+dataset+'/'
+ 
+cnt = 0
+for input_file in os.listdir(inputdir):
+    if input_file[-3:] == 'yuv' and input_file[:len(str1)] == str1:
+        seqname = input_file[:-4]
+        cfg2 = cfgdir + seqname + '.cfg'
+        file_dir = inputdir + input_file
+        for q in range(0,len(QP)):
+            qp = str(QP[q])
+            currnt_dir = yuvoutputdir + seqname + '_qp' + qp + '/' 
+            os.makedirs(currnt_dir,exist_ok = True)  
+            
+            if onlyDecode:
+                bin_dir =  bindir+ seqname + '_qp' + qp + '/' + seqname + '_qp' + qp + '_str.bin'
+                if not os.path.exists(bin_dir):
+                    # do not proceed if binary does not exist
+                    print('bin does not exist',bin_dir)
+                    continue 
+            else:             
+                current_bin_dir = bindir+ seqname + '_qp' + qp + '/'
+                bin_dir =  current_bin_dir + seqname + '_qp' + qp + '_str.bin'
+                os.makedirs(current_bin_dir,exist_ok = True)      
+            
+                
+            tmpout_dir = currnt_dir + seqname + '_qp' + qp + '_tmprec.yuv'
+            out_dir = currnt_dir + seqname + '_qp' + qp + '_rec.yuv'
+            pbd_dir = currnt_dir + seqname + '_qp' + qp + '_pictureBeforeDb.yuv'
+            bs_dir = currnt_dir + seqname + '_qp' + qp + '_bs.yuv'
+            log_dir = currnt_dir + 'dec.log'
+            mpr_dir = currnt_dir + seqname + '_qp' + qp + '_mpr.yuv'
+            mpa_dir = currnt_dir + seqname + '_qp' + qp + '_mpa.yuv'
+            pad_dir = currnt_dir + seqname + '_qp' + qp + '_pad.yuv'
+            bpm_dir = currnt_dir + seqname + '_qp' + qp + '_bpm.yuv'
+            enclog_dir = currnt_dir + 'enc.log'
+            
+                
+            enccmd = [encoderdir, "-c", cfg1, "-c", cfg2, "-i", file_dir, "-o", tmpout_dir,"-b", bin_dir, "-q", qp, "-f",encnumfrm, "--SEIDecodedPictureHash=1",">&",enclog_dir,"; rm -f", tmpout_dir]
+            enccmd = " ".join(enccmd) 
+            
+            deccmd = [decoderdir, "-b", bin_dir, "-o", out_dir, "-pbd", pbd_dir,"-bs", bs_dir,"-mpr",mpr_dir ,">&",log_dir,"; rm -f",out_dir] #,"-pad",pad_dir,"-mpa",mpa_dir 
+            deccmd = " ".join(deccmd)
+            
+            if removeYUV:
+                cmdrm = [ "rm -f",pbd_dir,"; rm -f",bs_dir,"; rm -f",mpr_dir] #,"; rm -f",pad_dir,"; rm -f",mpa_dir
+                cmdrm = " ".join(cmdrm)                
+            
+            
+            if onlyDecode:
+                cmdall = ["bsub -o /dev/null '", deccmd,"'"] 
+            else:
+                cmdall = ["bsub -o /dev/null  '", enccmd,";",deccmd,"'"]
+            
+            cmdall = " ".join(cmdall)
+            
+            if removeYUV:
+                cmdall = [cmdall[:-2],";",cmdrm, "'"]
+                cmdall = " ".join(cmdall)
+            
+            #if os.path.exists(mpr_dir):             
+            #    continue            
+            
+            cnt += 1 
+            print('#',cnt, currnt_dir)  
+            print(cmdall)
+            
+            try: 
+                p1 = Popen(cmdall, shell=True, stdout=PIPE, stderr=PIPE)
+                status = p1.wait()
+                out = p1.stdout.read()
+                err = p1.stderr.read()
+            except Exception:
+                raise jobException("error while executing command '" + cmdall + "' out={} err={}".format(out, err)) 
+                
+            time.sleep(2)
+            
+            break #QP loop
+        break # sequence loop
+
+print('Count',cnt,'dec jobs.')
\ No newline at end of file
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_4_training_interluma/data_loader.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_4_training_interluma/data_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..11d4f8771c5861fb68267bb6ccfa083432c9bae4
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_4_training_interluma/data_loader.py
@@ -0,0 +1,381 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import json
+import math
+import sys
+from typing import NamedTuple
+import numpy as np
+import struct
+import torch
+from torch.utils.data import Dataset
+
+class PatchInfo(NamedTuple):
+    data_index: int
+    frame_index: int
+    patch_x0: int
+    patch_y0: int
+    
+    
+def readData(patch_size,border_size,norm,fn,off,nbb,ctype,h,w,x0,y0):
+    t = np.zeros((patch_size+2*border_size,patch_size+2*border_size),dtype='float32') # implicit zeros padding
+    with open(fn,"rb") as file:
+         cropc = [ y0-border_size, y0+patch_size+border_size, x0-border_size, x0+patch_size+border_size ]
+         srcc = [max(cropc[0], 0), min(cropc[1], h), max(cropc[2], 0), min(cropc[3], w)]
+         dstc = [srcc[0] - cropc[0], srcc[1] - cropc[0], srcc[2] - cropc[2], srcc[3] - cropc[2]]
+         data_h = srcc[1] - srcc[0]
+         off += srcc[0]*w*nbb
+         data = np.fromfile(file,dtype=ctype,count=data_h*w,offset=off).reshape(data_h,w)
+         data = data[:, srcc[2]:srcc[3]]
+         t[dstc[0]:dstc[1], dstc[2]:dstc[3]] = data
+         return t.astype('float32')/norm
+         
+def readDataBpm(patch_size,border_size,norm,fn,off,nbb,ctype,h,w,x0,y0):
+    t = np.zeros((patch_size+2*border_size,patch_size+2*border_size),dtype='float32') # implicit zeros padding
+    with open(fn,"rb") as file:
+         cropc = [ y0-border_size, y0+patch_size+border_size, x0-border_size, x0+patch_size+border_size ]
+         srcc = [max(cropc[0], 0), min(cropc[1], h), max(cropc[2], 0), min(cropc[3], w)]
+         dstc = [srcc[0] - cropc[0], srcc[1] - cropc[0], srcc[2] - cropc[2], srcc[3] - cropc[2]]
+         data_h = srcc[1] - srcc[0]
+         off += srcc[0]*w*nbb
+         data = np.fromfile(file,dtype=ctype,count=data_h*w,offset=off).reshape(data_h,w)
+         data = data[:, srcc[2]:srcc[3]]
+         # First right shift to make it Ipred=0, IBC=1, uni-pred=2, bi-pred=3
+         ipb_crop2 = np.right_shift(data, 1)
+         # Next add 1 everywhere we have 0 to make it Ipred/IBC = 1, uni-pred=2, bi-pred=3
+         ipb_crop2 = (ipb_crop2==0) + ipb_crop2
+         # Now subtract 1 and divide by 2 to make it Ipred/IBC=0, uni-pred=0.5, bi-pred=1
+         ipb_crop2 = np.float32(ipb_crop2-1)/2.0         
+         t[dstc[0]:dstc[1], dstc[2]:dstc[3]] = ipb_crop2
+         return t.astype('float32')
+         
+def readOne(patch_size,border_size,norm,fn,off,ctype):
+    with open(fn,"rb") as file:
+         if ctype == 'int32':
+             file.seek(off)
+             v = float(struct.unpack("i",file.read(4))[0])/norm
+         else:
+             sys.exit("readOne todo")
+         t = np.full((patch_size+2*border_size,patch_size+2*border_size),v,dtype='float32') 
+         return t
+    
+def getTypeComp(comp):
+    is_y_extracted = False
+    is_u_v_extracted = False
+    for comp_i in comp:
+        if '_Y' in comp_i:
+            is_y_extracted = True
+        if '_U' in comp_i or '_V' in comp_i:
+            is_u_v_extracted = True
+    if is_y_extracted and is_u_v_extracted:
+        return None
+    return is_u_v_extracted
+
+
+class DataLoader1(Dataset):
+    components=[]
+    database=None # contains the whole database
+    patch_info=None # contains address of each patch in the database: dataset index, frame index in the dataset, patch_index in the frame
+    suffix={} # suffix for each file
+    
+    # patch_size in luma sample
+    def __init__(self, jsonfile, patch_size, poc_list, generate_type, comps_luma,comps_chroma, qp_filter=-1, slice_type_filter=-1,transform=None, target_transform=None):
+        self.generate_type=generate_type    
+        self.comps_luma=comps_luma    
+        self.comps_chroma=comps_chroma                
+        self.transform = transform
+        self.target_transform = target_transform
+
+        if self.generate_type == 0:
+            self.normalizer_rec  = 1023.0
+            self.normalizer_pred = 1023.0
+            self.normalizer_bs   = 1023.0
+            self.normalizer_cu_average = 1023.0
+            self.normalizer_org8bits = 255.0
+            self.normalizer_org10bits = 1023.0
+            self.normalizer_qp   = 1023.0
+        else:
+            self.normalizer_rec  = 1024.0
+            self.normalizer_pred = 1024.0
+            self.normalizer_bs   = 1024.0
+            self.normalizer_cu_average = 1024.0
+            self.normalizer_org8bits = 256.0
+            self.normalizer_org10bits = 1024.0
+            self.normalizer_qp   = 64.0
+            self.normalizer_bpm = 1
+        self.patch_size=patch_size
+        self.patch_info=[]
+        with open(jsonfile, "r") as file:
+         content = file.read()
+         dcontent = json.loads(content)
+         if qp_filter>0 and 'suffix_qp' not in dcontent:
+             sys.exit("Filtering on qp impossible: no qp data in the dataset")
+         if slice_type_filter>0 and 'suffix_slicetype' not in dcontent:
+             sys.exit("Filtering on slice type impossible: no slice data in the dataset")
+         if qp_filter>0 or slice_type_filter>0:
+             sys.exit("todo")
+         self.components.append("org_Y")
+         self.components.append("org_U")
+         self.components.append("org_V")
+         if  'suffix_rec_after_dbf' in dcontent: 
+             self.suffix['rec_after_dbf']=dcontent['suffix_rec_after_dbf']
+             self.components.append("rec_after_dbf_Y")
+             self.components.append("rec_after_dbf_U")
+             self.components.append("rec_after_dbf_V")
+         if  'suffix_rec_before_dbf' in dcontent: 
+             self.suffix['rec_before_dbf']=dcontent['suffix_rec_before_dbf']
+             self.components.append("rec_before_dbf_Y")
+             self.components.append("rec_before_dbf_U")
+             self.components.append("rec_before_dbf_V")
+         if  'suffix_pred' in dcontent: 
+             self.suffix['pred']=dcontent['suffix_pred']
+             self.components.append("pred_Y")
+             self.components.append("pred_U")
+             self.components.append("pred_V")
+         if  'suffix_bs' in dcontent: 
+             self.suffix['bs']=dcontent['suffix_bs']             
+             self.components.append("bs_Y")
+             self.components.append("bs_U")
+             self.components.append("bs_V")
+         if  'suffix_partition_cu_average' in dcontent: 
+             self.suffix['partition_cu_average']=dcontent['suffix_partition_cu_average']    
+             self.components.append("partition_cu_average_Y")
+             self.components.append("partition_cu_average_U")
+             self.components.append("partition_cu_average_V")
+         if  'suffix_qp' in dcontent: 
+             self.components.append("qp_slice")
+             self.suffix['qp_slice']=dcontent['suffix_qp']    
+         self.components.append("qp_base") # always here
+         if  'suffix_slicetype' in dcontent: 
+             self.components.append("slice_type")
+             self.suffix['slice_type']=dcontent['suffix_slicetype']   
+         if  'suffix_bpm' in dcontent: 
+             self.suffix['bpm']=dcontent['suffix_bpm']
+             self.components.append("bpm_Y")
+             self.components.append("bpm_U")
+             self.components.append("bpm_V")
+             
+         self.database=dcontent['data']
+         # create array of patches adress
+        
+        if self.generate_type == 0:
+            psize = self.patch_size
+            for didx in range(len(self.database)):
+                 d=self.database[didx]
+                 w = int(d['width'])
+                 h = int(d['height'])
+                 w -= w % psize
+                 h -= h % psize
+                 nb_w=int(w//psize - 2)
+                 nb_h=int(h//psize - 2)
+                 
+                 id_ra = '_T2RA_'
+                 ra_flag = True if id_ra in d['bsname'] else False
+                 for fidx in range(int(d['data_count'])):
+                    if ra_flag and (fidx == 0 or fidx == 32 or fidx == 64):
+                        continue
+                    for y0 in range(nb_h):
+                        for x0 in range(nb_w):
+                            self.patch_info.append(PatchInfo(didx,fidx,1+x0,1+y0))
+        else:
+            for didx in range(len(self.database)):                
+                d=self.database[didx]
+                frames = range(int(d['data_count'])) if not poc_list else poc_list
+                if 'DIV2K' in d['dirname']:    frames = [0]                
+                for fidx in frames:
+                    if 'ra_' in d['dirname'] and (fidx == 0 or fidx == 32 or fidx == 64):  
+                        continue # do not include AI frame in RA data
+                    if fidx >= d['data_count']:
+                        sys.exit("exceed max number of frames ({})".format(d['data_count']))
+                    
+                    # save frame info
+                    self.patch_info.append(PatchInfo(didx,fidx,0,0))
+                    
+                     
+    def __len__(self):
+         return len(self.patch_info)     
+
+    def getPatchData(self,index,comp,x0,y0,border_size=0):
+        assert(index<len(self.patch_info))
+        pinfo=self.patch_info[index]
+        d=self.database[pinfo.data_index]
+        psize=self.patch_size
+        bsize=border_size
+        #print(pinfo,d)
+        chroma_block=getTypeComp(comp)
+        if chroma_block is None:
+            raise AssertionError('The second argument of getPatchData contains strings ending with \'_Y\' and strings ending with \'_U\' or \'_V\', which is not allowed.')
+        w=int(d['width'])
+        h=int(d['height'])
+        frame_size_Y=w*h
+        if chroma_block:
+            psize//=2
+            bsize//=2
+            w//=2
+            h//=2
+        tsize=bsize+psize+bsize        
+        
+        t = np.zeros((1,tsize,tsize,len(comp)),dtype='float32')
+        
+        for idx, c in enumerate(comp):
+            assert(c in self.components)
+                           
+            if 'org' in c:
+                fn=d['original_yuv']
+                off_frame=d['original_frame_skip']+pinfo.frame_index
+                if d['original_bitdepth'] == 8: # 8bits
+                    norm=self.normalizer_org8bits
+                    b='uint8' 
+                    nbb = 1
+                else: # 10bits
+                    norm=self.normalizer_org10bits
+                    b='uint16'                
+                    nbb = 2
+                off = off_frame*(frame_size_Y*nbb*3//2)
+                if c == 'org_U': 
+                    off+=frame_size_Y*nbb                  
+                elif c == 'org_V': 
+                    off+=frame_size_Y*nbb+(frame_size_Y*nbb)//4
+                v = readData(psize,bsize,norm,fn,off,nbb,b,h,w,x0,y0)
+                
+            elif 'rec_after_dbf' in c or 'rec_before_dbf' in c or 'pred' in c or 'partition_cu_average' in c or 'bs' in c or 'bpm' in c:
+                fn=d['dirname']+'/'+d['basename']+self.suffix[c[:-2]]
+                nbb=2 # 16 bits data
+                off=pinfo.frame_index*(frame_size_Y*nbb*3//2)
+                if '_U' in c: 
+                    off+=frame_size_Y*nbb
+                elif '_V' in c: 
+                    off+=frame_size_Y*nbb+(frame_size_Y*nbb)//4
+                if   'rec_after_dbf' in c or 'rec_before_dbf' in c: norm = self.normalizer_rec
+                elif 'pred' in c :          norm = self.normalizer_pred
+                elif 'bs' in c :            norm = self.normalizer_bs
+                elif 'partition_cu_average' in c :     norm = self.normalizer_cu_average
+                elif 'bpm' in c:            norm = self.normalizer_bpm
+                
+                if 'bpm' in c:
+                    # If the data is all-intra coded, v is all-zeros. There is no need to extract IPB data.
+                    # If it's not possible to tell which data is AI coded, you can dump IPB data and read the yuv 
+                    if 'ai_' in d['dirname']: #assume AI data directory has a unique string 'ai_8ccf'
+                        v = np.float32(np.zeros((tsize,tsize)))
+                    else:
+                        v = readDataBpm(psize,bsize,norm,fn,off,nbb,'uint16',h,w,x0,y0)
+                    
+                else:
+                    v = readData(psize,bsize,norm,fn,off,nbb,'uint16',h,w,x0,y0)
+                
+            elif c == 'qp_slice':
+                fn=d['dirname']+'/'+d['basename']+self.suffix['qp_slice']
+                norm=self.normalizer_qp
+                off=pinfo.frame_index*4
+                v = readOne(psize,bsize,norm,fn,off,'int32')
+
+            elif c == 'qp_base':
+                norm=self.normalizer_qp
+                f = float(d['qp_base'])/norm                
+                v = np.full((tsize,tsize),f,dtype='float32')                 
+            elif c == 'slice_type':
+                fn=d['dirname']+'/'+d['basename']+self.suffix['slice_type']
+                norm=1
+                off=pinfo.frame_index*4
+                v = readOne(psize,bsize,norm,fn,off,'int32')   
+            else:
+                 sys.exit("Unkwown component {}".format(c))
+            t[0,:,:,idx]=v            
+        return t    
+        
+    def __getitem__(self, index):   
+        # index is the frame index
+        # Length of self.patch_info is the number of total frames, so each frame is sampled equally.
+        # So patches are sampled evenlly across different classes.    
+        pinfo=self.patch_info[index]
+        d=self.database[pinfo.data_index]
+        psize=self.patch_size
+        #print(d)
+        
+        # Get a random patch position sampled at every 4th pixel
+        # This ensures that the cropped part lines up with deblocking boundaries.
+        w=int(d['width'])
+        h=int(d['height'])
+        yrand = int(np.random.rand(1)*(h-psize+1))
+        xrand = int(np.random.rand(1)*(w-psize+1))
+        yrand = yrand // 4
+        xrand = xrand // 4
+        yrand = yrand * 4
+        xrand = xrand * 4
+        
+        # Components used in the luma model
+        # "org_Y,rec_before_dbf_Y,pred_Y,bs_Y,qp_slice"
+        
+        t_luma = self.getPatchData(index,self.comps_luma, xrand, yrand)
+        
+        origY_tensor =  torch.tensor(t_luma[0,:,:,0])                  
+        recY_tensor =  torch.tensor(t_luma[0,:,:,1])    
+        predY_tensor =  torch.tensor(t_luma[0,:,:,2])    
+        bsY_tensor =  torch.tensor(t_luma[0,:,:,3])   
+        qp_tensor = torch.unsqueeze(torch.tensor(t_luma[0,:,:,4]),0)
+            
+        # Transpose image randomly.
+        if np.random.rand(1) < 0.5 :
+            recY_tensor =  torch.transpose(recY_tensor, 0, 1)
+            predY_tensor = torch.transpose(predY_tensor,0, 1)
+            bsY_tensor =   torch.transpose(bsY_tensor,  0, 1)
+            origY_tensor = torch.transpose(origY_tensor, 0, 1)
+
+        # Flipping in x-direction randomly.
+        if np.random.rand(1) < 0.5 :
+            recY_tensor =  torch.flip(recY_tensor, [1])
+            predY_tensor = torch.flip(predY_tensor,[1])
+            bsY_tensor =   torch.flip(bsY_tensor,  [1])
+            origY_tensor = torch.flip(origY_tensor, [1])
+
+        # Flipping in y-direction randomly.
+        if np.random.rand(1) < 0.5 :
+            recY_tensor =  torch.flip(recY_tensor, [0])
+            predY_tensor = torch.flip(predY_tensor,[0])
+            bsY_tensor =   torch.flip(bsY_tensor,  [0])
+            origY_tensor = torch.flip(origY_tensor, [0])
+        
+        recY_tensor =  torch.unsqueeze(recY_tensor, 0)
+        predY_tensor = torch.unsqueeze(predY_tensor, 0)
+        bsY_tensor =   torch.unsqueeze(bsY_tensor, 0)
+        origY_tensor = torch.unsqueeze(origY_tensor, 0)
+
+        if self.transform:
+            recY_tensor = self.transform(recY_tensor)
+            predY_tensor = self.transform(predY_tensor)
+            bsY_tensor = self.transform(bsY_tensor)
+        if self.target_transform:
+            origY_tensor = self.target_transform(origY_tensor)
+            
+        return qp_tensor, recY_tensor, predY_tensor, bsY_tensor, origY_tensor
\ No newline at end of file
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_4_training_interluma/net.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_4_training_interluma/net.py
new file mode 100644
index 0000000000000000000000000000000000000000..af2b1039ead2bac3060a670eeda8dff325aac996
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_4_training_interluma/net.py
@@ -0,0 +1,107 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import torch
+import torch.nn as nn
+
+class conv3x3_f(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size = 3,stride = 1):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=1 )
+        self.relu = nn.PReLU()
+    def forward(self,rec):
+        y = self.relu(self.conv(rec))
+        return y
+
+class conv1x1_f(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size = 1):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=0)
+        self.relu = nn.PReLU()
+    def forward(self,input):
+        y = self.relu(self.conv(input))
+        return y
+    
+class ResidualBlock2(nn.Module):    
+    def __init__(self, in_channels, out_channels, no_features, kernel_size = 3):
+        super().__init__()
+        self.conv1 = nn.Conv2d(in_channels, no_features, kernel_size, padding=1)
+        self.relu = nn.PReLU()
+        self.conv2 = nn.Conv2d(no_features, out_channels, kernel_size, padding=1)
+    def forward(self,x):
+        res = self.conv2(self.relu(self.conv1(x)))
+        return res
+
+class SpatialGate(nn.Module):
+    def __init__(self, in_channels, out_channels, no_features, stride = 1, kernel_size = 3):
+        super().__init__()
+        self.conv1 = nn.Conv2d(in_channels, no_features, kernel_size, stride, padding=1)
+        self.relu = nn.PReLU()
+        self.conv2 = nn.Conv2d(no_features, out_channels, kernel_size, stride=1, padding=1)
+    def forward(self,x):
+        y = self.conv2(self.relu(self.conv1(x)))
+        return y  
+
+class ConditionalNet1(nn.Module):
+    def __init__(self, in_channels=1, out_channels=4, no_features=96, kernel_size = 3):
+        super().__init__()
+        self.convRec = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convPred = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convBs = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convQp = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.fuse = conv1x1_f(4*no_features, no_features)
+        self.transitionH = conv3x3_f(no_features, no_features,stride=2)
+        self.backbone = nn.ModuleList([ResidualBlock2(no_features,no_features,no_features) for i in range(8)])
+        self.mask = nn.ModuleList([SpatialGate(4, 1, 32, stride=2 )for i in range(8)]) 
+        self.last_layer = nn.Sequential(nn.Conv2d(no_features,no_features,kernel_size,padding=1),
+                                        nn.PReLU(),
+                                        nn.Conv2d(no_features,out_channels,kernel_size,padding=1),
+                                        nn.PixelShuffle(2),
+                                       )
+    def forward(self,rec,input,input0,qp):
+        input1 = qp
+        input2 = torch.cat([self.convRec(rec), self.convPred(input),  
+                            self.convBs(input0), self.convQp(input1)], 1)
+        
+        inputbackbone = self.transitionH(self.fuse(input2 ) )         
+        inputspatial = torch.cat([rec, input, input0, input1], 1) 
+        for backbone, mask in zip(self.backbone,self.mask):
+            outputbackbone = backbone(inputbackbone) 
+            spatialattention = mask(inputspatial)            
+            spatialattention = spatialattention.expand_as(outputbackbone) 
+            y = outputbackbone * spatialattention + outputbackbone 
+            inputbackbone = inputbackbone + y           
+         
+        x = self.last_layer(inputbackbone) + rec
+        return x 
\ No newline at end of file
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_4_training_interluma/train.sh b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_4_training_interluma/train.sh
new file mode 100644
index 0000000000000000000000000000000000000000..af4e41e17d2a846f75a4796ed78335ce54f66288
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_4_training_interluma/train.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+python3 train_interluma.py --save_ckp_path './ckp/' --loss_function 'L1' --learning_rate 1e-4 --epochs 455 --switch_lr 1e-5 --switch_epochs 295 --batchsize 64 --num_workers 20 --input_json_train db_stage1.json --input_json_valid db_valid.json --tag InterLuma
\ No newline at end of file
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_4_training_interluma/train_interluma.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_4_training_interluma/train_interluma.py
new file mode 100644
index 0000000000000000000000000000000000000000..26fade6e5d2c17d783d509d050119f7cc7525b36
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage1_4_training_interluma/train_interluma.py
@@ -0,0 +1,261 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import torch
+import torch.nn as nn
+from torch.utils.data import Dataset
+from torch.utils.data import DataLoader
+import os
+import numpy as np
+import glob
+import argparse
+import sys
+from datetime import datetime
+from time import time
+from net import ConditionalNet1 as ConditionalNet
+import data_loader
+
+# learning policy
+def adjust_learning_rate(optimizer, mse_lr):
+    print('Update learing rate to lr', mse_lr)
+    for param_group in optimizer.param_groups:
+        param_group['lr'] = mse_lr
+     
+def train_loop(dataloader, model, loss_fn, optimizer, validation_dataloader, epoch, checkpoint_path):
+    model.train()
+    size = len(dataloader.dataset)
+    stop_time = datetime.now()
+    loss_sum = 0
+    my_device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+    
+    for batch, (QP, recY, predY, bsY, origY) in enumerate(dataloader):        
+        QP = QP.to(my_device)
+        recY = recY.to(my_device)
+        predY = predY.to(my_device)
+        bsY = bsY.to(my_device)
+        origY = origY.to(my_device)
+        
+        # Compute prediction and loss
+        filteredY = model(qp=QP, rec=recY, input=predY, input0=bsY)
+
+        # Allow the use of projective loss function
+        r_bar = origY-recY
+        r_hat = filteredY-recY
+        loss = loss_fn(r_hat, r_bar)
+
+        loss_sum += loss.item()
+
+        # Backpropagation
+        optimizer.zero_grad()
+        loss.backward()
+        optimizer.step()
+        
+        if batch % 200 == 0:
+            start_time = stop_time
+            stop_time = datetime.now()
+            duration = stop_time - start_time
+            loss, current = loss.item(), batch * len(recY)
+            ave_loss = loss_sum / (1.0 * (batch+1))
+            print(f"ave loss: {ave_loss:>9e}, last loss: {loss:>7f}  [{current:>5d}/{size:>5d}]", " ", duration.seconds, "seconds since last print out.")
+
+        if (batch % 1000 == 0) and (not (batch==0)):
+            if epoch <= 1:
+                my_loss = test_loop(validation_dataloader, model, loss_fn)
+                filename = 'epoch_%04d_' % (epoch) + 'batch%07d.pt' %(batch)
+                final_checkpoint_path = os.path.join(checkpoint_path, filename)    
+                torch.save({
+                    'epoch': epoch,
+                    'model_state_dict': model.state_dict(),
+                }, final_checkpoint_path)
+
+
+def test_loop(dataloader, model, loss_fn):
+    model.eval()
+    size = len(dataloader.dataset)
+    num_batches = len(dataloader)
+    test_loss = 0
+    nofilter_loss = 0
+
+    with torch.no_grad():
+        for QP, recY, predY, bsY, origY in dataloader:
+            my_device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+            QP = QP.to(my_device)
+            recY = recY.to(my_device)
+            predY = predY.to(my_device)
+            bsY = bsY.to(my_device)
+            origY = origY.to(my_device)
+            
+            pred = model(qp=QP, rec=recY, input=predY, input0=bsY)
+
+            r_bar = origY - recY
+            r_hat = pred - recY
+            
+            test_loss += loss_fn(r_hat, r_bar).item()
+            nofilter_loss += ((r_bar)**2).mean().item()
+
+    test_loss /= num_batches
+    nofilter_loss /= num_batches
+    print(f"Test Error: \n Avg loss: {test_loss:e}, Nofilter loss: {nofilter_loss:e} \n")
+    return test_loss    
+
+
+def train(args):
+    stop_time = datetime.now()       
+
+    # Start by finding out if we have cuda:
+    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+
+    # load training and validation data
+    generate_type = 1    
+    poc_list = args.poc_list
+    components_luma = "org_Y,rec_before_dbf_Y,pred_Y,bs_Y,qp_slice" 
+    components_chroma = ""     
+    comps_luma = components_luma.split(",")
+    comps_chroma = components_chroma.split(",")
+    
+    train_dl=data_loader.DataLoader1(args.input_json_train,args.patch_size,poc_list,generate_type,comps_luma,comps_chroma) 
+    train_dataloader = DataLoader(train_dl, batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers, pin_memory=True)
+    
+    poc_list_valid = args.poc_list_valid
+    valid_dl=data_loader.DataLoader1(args.input_json_valid,args.patch_size,poc_list_valid,generate_type,comps_luma,comps_chroma) 
+    validation_dataloader = DataLoader(valid_dl, batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers, pin_memory=True)
+    
+    print("Nb train samples (frames) available: {}".format(len(train_dl)))
+    print("Nb valid samples (frames) available: {}".format(len(valid_dl)))
+    print("Available components: {}".format(train_dl.components))    
+    print("Selected luma components: {}".format(comps_luma))
+    print("Selected Chroma components: {}".format(comps_chroma))
+            
+    # Load model
+    model_loaded = ConditionalNet()
+    print('Use self-defined model')
+
+    model_loaded = model_loaded.train()
+
+    if not args.load_ckp_path == "":
+        # load checkpoint
+        checkpoint = torch.load(args.load_ckp_path, map_location=torch.device('cpu'))
+        # torch load checkpoint
+        model_loaded.load_state_dict(checkpoint['model_state_dict'])
+        read_epoch = checkpoint['epoch']
+        # We should start at the read_epoch + 1.
+        start_epoch = read_epoch+1
+        print('The loaded epoch number was', start_epoch-1, 'so we will start training at epoch number', start_epoch)
+        my_loss = checkpoint['loss']
+        # Not sure we need to do this again but better be safe than sorry
+        model_loaded = model_loaded.train()
+    else:
+        # reset weights
+        with torch.no_grad():
+            for name, param in model_loaded.named_parameters():
+                param.data = 0.025*(torch.rand(param.shape)-0.5)
+        start_epoch = 1
+
+    model_loaded = model_loaded.to(device)
+
+    learning_rate = args.learning_rate
+    print('Using learning rate', learning_rate, 'and beta2 = 0.999')
+        
+    optimizer = torch.optim.Adam(model_loaded.parameters(), lr=learning_rate, betas=(0.9, 0.999))
+
+    if args.loss_function == 'L1':
+        loss_fn = nn.L1Loss()
+        print('Using L1 loss function.')
+    elif args.loss_function == 'MSE':
+        loss_fn = nn.MSELoss()
+        print('Using MSE loss function.')
+    else:
+        print("Loss function is not one of 'L1' or 'MSE' , exiting.")
+        exit(1)
+        
+    checkpoint_path = args.save_ckp_path 
+    checkpoint_path = os.path.join(checkpoint_path, args.tag)
+    os.makedirs(checkpoint_path, exist_ok=True)
+
+    print('before training:')
+    my_loss = test_loop(validation_dataloader, model_loaded, loss_fn)
+    for epoch in range(start_epoch, args.epochs+1):
+        print('epoch', epoch)
+        if epoch < args.switch_epochs :
+            train_loop(train_dataloader, model_loaded, loss_fn, optimizer, validation_dataloader, epoch, checkpoint_path)
+        else:      
+            adjust_learning_rate(optimizer, args.switch_lr)
+            train_loop(train_dataloader, model_loaded, loss_fn, optimizer, validation_dataloader, epoch, checkpoint_path)
+        
+        my_loss = test_loop(validation_dataloader, model_loaded, loss_fn)
+        filename = 'epoch_%04d.pt' % (epoch)
+        final_checkpoint_path = os.path.join(checkpoint_path, filename)    
+        torch.save({
+            'epoch': epoch,
+            'model_state_dict': model_loaded.state_dict(),
+            'optimizer_state_dict': optimizer.state_dict(),
+            'loss': my_loss,
+        }, final_checkpoint_path)
+        
+
+if __name__ == '__main__':
+    """Parses command line arguments."""
+    parser = argparse.ArgumentParser(description='Training script')
+
+    parser.add_argument(
+        "--verbose", "-V", action="store_true",
+        help="Report progress and metrics when training or compressing.")
+        
+    # training/validation data  
+    parser.add_argument("--num_workers", type=int, default=16, help="Number of workers for training data.")  
+    parser.add_argument("--input_json_train", action="store", nargs='?', type=str, help="input json database.")
+    parser.add_argument("--input_json_valid", action="store", nargs='?', type=str, help="input json database.")
+    parser.add_argument('--poc_list', nargs="*", type=int, default=range(0,64), help='pocs of frames for training (e.g. --poc_list 1 3 5), use all frames by default')
+    parser.add_argument('--poc_list_valid', nargs="*", type=int, default=[0], help='pocs of frames for training (e.g. --poc_list 1 3 5), use all frames by default')
+    parser.add_argument("--patch_size", action="store", nargs='?', default=256, type=int, help="patch size to extract")
+    parser.add_argument("--batchsize", type=int, default=32, help="Batch size for training and validation.")
+    
+    # optimizer configuration
+    parser.add_argument("--learning_rate", type=float, default=1e-4, help="learning for training.")
+    parser.add_argument("--loss_function", type=str, default = 'MSE', help="The loss function, 'L1' or 'MSE'.")
+    parser.add_argument("--epochs", type=int, default=400, help="Train up to this number of epochs. ")
+    parser.add_argument("--switch_epochs", type=int, default=300, help="switch to a smaller learning rate. Should be smaller than epoch.")
+    parser.add_argument('--switch_lr', type=float, default=1e-5, help='Learning rate after switching.')
+        
+    # save and load checkpoint    
+    parser.add_argument("--save_ckp_path", default="./checkpoints/", help="Path to save checkpoint.")
+    parser.add_argument("--load_ckp_path", default="", help="Path to load pretrained checkpoint.")
+    
+    # a unique name for the training job
+    parser.add_argument("--tag", default="_mytest",help="Tag for the current training. Checkpoints are saved under the folder save_ckp_path+tag.")  
+
+    args = parser.parse_args()
+    print(args)
+
+    train(args)
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_1_vtm/extract_stage2_1.patch b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_1_vtm/extract_stage2_1.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6a99f521bbe79b39cab5ddf93921c02e7f8baa35
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_1_vtm/extract_stage2_1.patch
@@ -0,0 +1,970 @@
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index c60921e..be58c92 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -73,7 +73,7 @@ endif()
+ # bb_enable_warnings( gcc -Wno-unused-variable )
+ # bb_enable_warnings( gcc-4.8 warnings-as-errors -Wno-unused-variable )
+ # for gcc 8.2:
+-bb_enable_warnings( gcc warnings-as-errors -Wno-sign-compare -Wno-class-memaccess)
++#bb_enable_warnings( gcc warnings-as-errors -Wno-sign-compare -Wno-class-memaccess)
+ 
+ if( XCODE )
+   bb_enable_warnings( clang warnings-as-errors
+@@ -94,7 +94,7 @@ endif()
+ #bb_enable_warnings( clang warnings-as-errors )
+ 
+ # enable warnings
+-bb_enable_warnings( msvc warnings-as-errors "/wd4996" )
++#bb_enable_warnings( msvc warnings-as-errors "/wd4996" )
+ 
+ # enable sse4.1 build for all source files for gcc and clang
+ if( UNIX OR MINGW )
+diff --git a/source/App/DecoderApp/DecApp.cpp b/source/App/DecoderApp/DecApp.cpp
+index 01ecd9e..640bded 100644
+--- a/source/App/DecoderApp/DecApp.cpp
++++ b/source/App/DecoderApp/DecApp.cpp
+@@ -323,6 +323,44 @@ uint32_t DecApp::decode()
+         if( ( m_cDecLib.getVPS() != nullptr && ( m_cDecLib.getVPS()->getMaxLayers() == 1 || xIsNaluWithinTargetOutputLayerIdSet( &nalu ) ) ) || m_cDecLib.getVPS() == nullptr )
+         {
+           m_cVideoIOYuvReconFile[nalu.m_nuhLayerId].open( reconFileName, true, m_outputBitDepth, m_outputBitDepth, bitDepths.recon ); // write mode
++#if DUMP_MORE_INFO
++          if ( !m_picsBeforeDbFileName.empty() || !m_bsInfoFileName.empty() )
++          {
++            CHECK(m_cDecLib.getVPS()->getMaxLayers() != 1, "dump more info does not 100% work in multi-layer case");
++          }
++
++          if (!m_picsBeforeDbFileName.empty())
++          {
++            m_cVideoIOYuvPicBeforeDb[nalu.m_nuhLayerId].open(m_picsBeforeDbFileName, true, m_outputBitDepth, m_outputBitDepth, bitDepths.recon); // write mode
++          }
++
++          if (!m_picsAfterDbFileName.empty())
++          {
++            m_cVideoIOYuvPicAfterDb[nalu.m_nuhLayerId].open(m_picsAfterDbFileName, true, m_outputBitDepth, m_outputBitDepth, bitDepths.recon); // write mode
++          }
++
++          if (!m_bsInfoFileName.empty())
++          {
++            m_cVideoIOYuvBsInfo[nalu.m_nuhLayerId].open(m_bsInfoFileName, true, m_outputBitDepth, m_outputBitDepth, bitDepths.recon); // write mode
++          }
++
++          if (!m_myPredFileName.empty())
++          {
++            m_cVideoIOYuvMyPred[nalu.m_nuhLayerId].open(m_myPredFileName, true, m_outputBitDepth, m_outputBitDepth, bitDepths.recon); // write mode
++          }
++
++          if (!m_myPartFileName.empty())
++          {
++            m_cVideoIOYuvMyPart[nalu.m_nuhLayerId].open(m_myPartFileName, true, m_outputBitDepth, m_outputBitDepth, bitDepths.recon); // write mode
++          }
++#endif
++#if EXTRACT_BMP_INFO
++          if (!m_myBpmFileName.empty())
++          {
++            m_cVideoIOYuvMyBpm[nalu.m_nuhLayerId].open(m_myBpmFileName, true, m_outputBitDepth, m_outputBitDepth,
++                                                        bitDepths.recon);   // write mode
++          }
++#endif
+         }
+       }
+       // update file bitdepth shift if recon bitdepth changed between sequences
+@@ -535,6 +573,56 @@ void DecApp::xDestroyDecLib()
+     }
+   }
+   
++#if DUMP_MORE_INFO
++  if (!m_picsBeforeDbFileName.empty())
++  {
++    for (auto& recFile : m_cVideoIOYuvPicBeforeDb)
++    {
++      recFile.second.close();
++    }
++  }
++  
++  if (!m_picsAfterDbFileName.empty())
++  {
++    for (auto& recFile : m_cVideoIOYuvPicAfterDb)
++    {
++      recFile.second.close();
++    }
++  }
++  
++  if (!m_bsInfoFileName.empty())
++  {
++    for (auto& recFile : m_cVideoIOYuvBsInfo)
++    {
++      recFile.second.close();
++    }
++  }
++  
++  if (!m_myPredFileName.empty())
++  {
++    for (auto& recFile : m_cVideoIOYuvMyPred)
++    {
++      recFile.second.close();
++    }
++  }
++  
++  if (!m_myPartFileName.empty())
++  {
++    for (auto& recFile : m_cVideoIOYuvMyPart)
++    {
++      recFile.second.close();
++    }
++  }
++#endif
++#if EXTRACT_BMP_INFO
++  if (!m_myBpmFileName.empty())
++  {
++    for (auto &recFile: m_cVideoIOYuvMyBpm)
++    {
++      recFile.second.close();
++    }
++  }
++#endif
+ 
+   // destroy decoder class
+   m_cDecLib.destroy();
+@@ -692,6 +780,81 @@ void DecApp::xWriteOutput( PicList* pcListPic, uint32_t tId )
+                                         conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
+                                         NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
+           }
++
++#if DUMP_MORE_INFO
++          if (!m_picsBeforeDbFileName.empty())
++          {
++            m_cVideoIOYuvPicBeforeDb[pcPic->layerId].write( pcPic->getPicBeforeDbBuf().get( COMPONENT_Y ).width, pcPic->getPicBeforeDbBuf().get( COMPONENT_Y ).height, pcPic->getPicBeforeDbBuf(),
++                                      m_outputColourSpaceConvert,
++                                      m_packedYUVMode,
++                                      conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++          }
++
++          if (!m_picsAfterDbFileName.empty())
++          {
++            m_cVideoIOYuvPicAfterDb[pcPic->layerId].write( pcPic->getPicAfterDbBuf().get( COMPONENT_Y ).width, pcPic->getPicAfterDbBuf().get( COMPONENT_Y ).height, pcPic->getPicAfterDbBuf(),
++                                      m_outputColourSpaceConvert,
++                                      m_packedYUVMode,
++                                      conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++          }
++          
++          if (!m_bsInfoFileName.empty())
++          {
++            m_cVideoIOYuvBsInfo[pcPic->layerId].write( pcPic->getBsMapBuf().get( COMPONENT_Y ).width, pcPic->getBsMapBuf().get( COMPONENT_Y ).height, pcPic->getBsMapBuf(),
++                                      m_outputColourSpaceConvert,
++                                      m_packedYUVMode,
++                                      conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++          }
++          
++          if (!m_myPredFileName.empty())
++          {
++            m_cVideoIOYuvMyPred[pcPic->layerId].write( pcPic->getMyPredBuf().get( COMPONENT_Y ).width, pcPic->getMyPredBuf().get( COMPONENT_Y ).height, pcPic->getMyPredBuf(),
++                                      m_outputColourSpaceConvert,
++                                      m_packedYUVMode,
++                                      conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++          }
++
++          if (!m_myPartFileName.empty())
++          {
++            m_cVideoIOYuvMyPart[pcPic->layerId].write( pcPic->getPartitionBuf().get( COMPONENT_Y ).width, pcPic->getPartitionBuf().get( COMPONENT_Y ).height, pcPic->getPartitionBuf(),
++                                      m_outputColourSpaceConvert,
++                                      m_packedYUVMode,
++                                      conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                      conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                      NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++          }
++#endif
++#if EXTRACT_BMP_INFO
++          if (!m_myBpmFileName.empty())
++          {
++            m_cVideoIOYuvMyBpm[pcPic->layerId].write(
++              pcPic->getBlockPredModeBuf().get(COMPONENT_Y).width, pcPic->getBlockPredModeBuf().get(COMPONENT_Y).height,
++              pcPic->getBlockPredModeBuf(), m_outputColourSpaceConvert, m_packedYUVMode,
++              conf.getWindowLeftOffset() * SPS::getWinUnitX(chromaFormatIDC),
++              conf.getWindowRightOffset() * SPS::getWinUnitX(chromaFormatIDC),
++              conf.getWindowTopOffset() * SPS::getWinUnitY(chromaFormatIDC),
++              conf.getWindowBottomOffset() * SPS::getWinUnitY(chromaFormatIDC), NUM_CHROMA_FORMAT,
++              m_bClipOutputVideoToRec709Range);
++          }
++#endif
+         }
+         writeLineToOutputLog(pcPic);
+ 
+@@ -839,6 +1002,83 @@ void DecApp::xFlushOutput( PicList* pcListPic, const int layerId )
+                                         conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
+                                         NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
+             }
++
++#if DUMP_MORE_INFO
++            if (!m_picsBeforeDbFileName.empty())
++            {
++              m_cVideoIOYuvPicBeforeDb[pcPic->layerId].write( pcPic->getPicBeforeDbBuf().get( COMPONENT_Y ).width, pcPic->getPicBeforeDbBuf().get( COMPONENT_Y ).height, pcPic->getPicBeforeDbBuf(),
++                                        m_outputColourSpaceConvert,
++                                        m_packedYUVMode,
++                                        conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++            }
++
++            if (!m_picsAfterDbFileName.empty())
++            {
++              m_cVideoIOYuvPicAfterDb[pcPic->layerId].write( pcPic->getPicAfterDbBuf().get( COMPONENT_Y ).width, pcPic->getPicAfterDbBuf().get( COMPONENT_Y ).height, pcPic->getPicAfterDbBuf(),
++                                        m_outputColourSpaceConvert,
++                                        m_packedYUVMode,
++                                        conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++            }
++            
++            if (!m_bsInfoFileName.empty())
++            {
++              m_cVideoIOYuvBsInfo[pcPic->layerId].write( pcPic->getBsMapBuf().get( COMPONENT_Y ).width, pcPic->getBsMapBuf().get( COMPONENT_Y ).height, pcPic->getBsMapBuf(),
++                                        m_outputColourSpaceConvert,
++                                        m_packedYUVMode,
++                                        conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++            }
++
++            if (!m_myPredFileName.empty())
++            {
++              m_cVideoIOYuvMyPred[pcPic->layerId].write( pcPic->getMyPredBuf().get( COMPONENT_Y ).width, pcPic->getMyPredBuf().get( COMPONENT_Y ).height, pcPic->getMyPredBuf(),
++                                        m_outputColourSpaceConvert,
++                                        m_packedYUVMode,
++                                        conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++            }
++
++            if (!m_myPartFileName.empty())
++            {
++              m_cVideoIOYuvMyPart[pcPic->layerId].write( pcPic->getPartitionBuf().get( COMPONENT_Y ).width, pcPic->getPartitionBuf().get( COMPONENT_Y ).height, pcPic->getPartitionBuf(),
++                                        m_outputColourSpaceConvert,
++                                        m_packedYUVMode,
++                                        conf.getWindowLeftOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowRightOffset() * SPS::getWinUnitX( chromaFormatIDC ),
++                                        conf.getWindowTopOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        conf.getWindowBottomOffset() * SPS::getWinUnitY( chromaFormatIDC ),
++                                        NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range );
++            }
++
++#endif
++#if EXTRACT_BMP_INFO
++            if (!m_myBpmFileName.empty())
++            {
++              m_cVideoIOYuvMyBpm[pcPic->layerId].write(
++                pcPic->getBlockPredModeBuf().get(COMPONENT_Y).width,
++                pcPic->getBlockPredModeBuf().get(COMPONENT_Y).height, pcPic->getBlockPredModeBuf(),
++                m_outputColourSpaceConvert, m_packedYUVMode,
++                conf.getWindowLeftOffset() * SPS::getWinUnitX(chromaFormatIDC),
++                conf.getWindowRightOffset() * SPS::getWinUnitX(chromaFormatIDC),
++                conf.getWindowTopOffset() * SPS::getWinUnitY(chromaFormatIDC),
++                conf.getWindowBottomOffset() * SPS::getWinUnitY(chromaFormatIDC), NUM_CHROMA_FORMAT,
++                m_bClipOutputVideoToRec709Range);
++            }
++#endif
+           }
+           writeLineToOutputLog(pcPic);
+ #if JVET_S0078_NOOUTPUTPRIORPICFLAG
+diff --git a/source/App/DecoderApp/DecApp.h b/source/App/DecoderApp/DecApp.h
+index 11f88ed..65de9bf 100644
+--- a/source/App/DecoderApp/DecApp.h
++++ b/source/App/DecoderApp/DecApp.h
+@@ -61,7 +61,16 @@ private:
+   // class interface
+   DecLib          m_cDecLib;                     ///< decoder class
+   std::unordered_map<int, VideoIOYuv>      m_cVideoIOYuvReconFile;        ///< reconstruction YUV class
+-
++#if DUMP_MORE_INFO
++  std::unordered_map<int, VideoIOYuv>      m_cVideoIOYuvPicBeforeDb;        ///< pictures before deblocking
++  std::unordered_map<int, VideoIOYuv>      m_cVideoIOYuvPicAfterDb;         ///< pictures after deblocking
++  std::unordered_map<int, VideoIOYuv>      m_cVideoIOYuvBsInfo;             ///< pictures with bs info
++  std::unordered_map<int, VideoIOYuv>      m_cVideoIOYuvMyPred;             ///< prediction pictures
++  std::unordered_map<int, VideoIOYuv>      m_cVideoIOYuvMyPart;             ///< average over partitinging
++#endif
++#if EXTRACT_BMP_INFO
++  std::unordered_map<int, VideoIOYuv> m_cVideoIOYuvMyBpm;   ///< Block prediction mode
++#endif
+   // for output control
+   int             m_iPOCLastDisplay;              ///< last POC in display order
+   std::ofstream   m_seiMessageFileStream;         ///< Used for outputing SEI messages.
+diff --git a/source/App/DecoderApp/DecAppCfg.cpp b/source/App/DecoderApp/DecAppCfg.cpp
+index d96c204..a0b3056 100644
+--- a/source/App/DecoderApp/DecAppCfg.cpp
++++ b/source/App/DecoderApp/DecAppCfg.cpp
+@@ -77,6 +77,17 @@ bool DecAppCfg::parseCfg( int argc, char* argv[] )
+   ("BitstreamFile,b",           m_bitstreamFileName,                   string(""), "bitstream input file name")
+   ("ReconFile,o",               m_reconFileName,                       string(""), "reconstructed YUV output file name\n")
+ 
++#if DUMP_MORE_INFO
++  ("PicBeforeDb,-pbd",             m_picsBeforeDbFileName,  string(""), "pictures before deblocking filter YUV output file name\n")
++  ("PicAfterDb,-pad",              m_picsAfterDbFileName,   string(""), "pictures after deblocking filter YUV output file name\n")
++  ("BsInfo,-bs",                   m_bsInfoFileName,        string(""), "boundary strength information YUV output file name\n")
++  ("myPred,-mpr",                  m_myPredFileName,        string(""), "prediction pictures YUV output file name\n")
++  ("myPart,-mpa",                  m_myPartFileName,        string(""), "average over partitioning YUV output file name\n")
++#endif
++#if EXTRACT_BMP_INFO
++  ("myBpm,-bpm",                   m_myBpmFileName,         string(""), "block prediciton mode YUV output file name\n")
++#endif
++
+   ("OplFile,-opl",              m_oplFilename ,                        string(""), "opl-file name without extension for conformance testing\n")
+ 
+ #if ENABLE_SIMD_OPT
+@@ -249,6 +260,13 @@ bool DecAppCfg::parseCfg( int argc, char* argv[] )
+ DecAppCfg::DecAppCfg()
+ : m_bitstreamFileName()
+ , m_reconFileName()
++#if DUMP_MORE_INFO
++, m_picsBeforeDbFileName()
++, m_picsAfterDbFileName()
++, m_bsInfoFileName()
++, m_myPredFileName()
++, m_myPartFileName()
++#endif
+ , m_oplFilename()
+ 
+ , m_iSkipFrame(0)
+diff --git a/source/App/DecoderApp/DecAppCfg.h b/source/App/DecoderApp/DecAppCfg.h
+index ba7c033..6262008 100644
+--- a/source/App/DecoderApp/DecAppCfg.h
++++ b/source/App/DecoderApp/DecAppCfg.h
+@@ -58,7 +58,16 @@ class DecAppCfg
+ protected:
+   std::string   m_bitstreamFileName;                    ///< input bitstream file name
+   std::string   m_reconFileName;                        ///< output reconstruction file name
+-
++#if DUMP_MORE_INFO
++  std::string   m_picsBeforeDbFileName;
++  std::string   m_picsAfterDbFileName;
++  std::string   m_bsInfoFileName;
++  std::string   m_myPredFileName;
++  std::string   m_myPartFileName;
++#endif
++#if EXTRACT_BMP_INFO
++  std::string m_myBpmFileName;
++#endif
+   std::string   m_oplFilename;                        ///< filename to output conformance log.
+ 
+   int           m_iSkipFrame;                           ///< counter for frames prior to the random access point to skip
+diff --git a/source/Lib/CommonLib/CNNFilter.cpp b/source/Lib/CommonLib/CNNFilter.cpp
+index 9e2b2e5..4b50387 100644
+--- a/source/Lib/CommonLib/CNNFilter.cpp
++++ b/source/Lib/CommonLib/CNNFilter.cpp
+@@ -22,6 +22,8 @@
+ //! \ingroup CommonLib
+ //! \{
+ 
++#if CNN_FILTERING
++
+ #define CNNLFCtx(c) SubCtx( Ctx::cnnlfModelIdx, c )
+ 
+ CNNFilter::CNNFilter()
+@@ -37,9 +39,11 @@ void CNNFilter::initISlice(int qp)
+   at::set_num_threads(1);
+   at::set_num_interop_threads(1);
+   
+-  std::string sLumaModelName = "path to models/JVET_W_EE_1.6_LumaCNNFilter_IntraSlice.pt";
+-  std::string sChromaModelName = "path to models/JVET_W_EE_1.6_ChromaCNNFilter_IntraSlice.pt";
+-  
++  //std::string sLumaModelName = "/Users/ejacstr/Documents/Code/loopNN/ExtractMoreInfoVTM/models/JVET_W_EE_1.6_LumaCNNFilter_IntraSlice.pt";
++  //std::string sChromaModelName = "/Users/ejacstr/Documents/Code/loopNN/ExtractMoreInfoVTM/models/JVET_W_EE_1.6_ChromaCNNFilter_IntraSlice.pt";
++  std::string sLumaModelName = "/proj/video_data3/videosim/eliudux/myCurrentWork/training_data/jvet-ab-ee1-1_5/training/stage2_1_vtm/trained_models/IntraY_dd6b_jacob_ep378.pt";
++  std::string sChromaModelName = "/proj/video_data3/videosim/eliudux/myCurrentWork/training_data/jvet-ab-ee1-1_5/training/stage2_1_vtm/extractmoreinfovtm/models/JVET_W_EE_1.6_ChromaCNNFilter_IntraSlice.pt";
++    
+   m_lumaModuleISlice = torch::jit::load(sLumaModelName);
+   m_chromaModuleISlice = torch::jit::load(sChromaModelName);
+   
+@@ -50,9 +54,10 @@ void CNNFilter::initBSlice(int qp)
+ {
+   if (m_modelInitFlagBSlice)
+     return;
+-  
+-  std::string sLumaModelName = "path to models/JVET_W_EE_1.6_LumaCNNFilter_InterSlice.pt";
+-  std::string sChromaModelName = "path to models/JVET_W_EE_1.6_ChromaCNNFilter_InterSlice.pt";
++  //std::string sLumaModelName = "/Users/ejacstr/Documents/Code/loopNN/ExtractMoreInfoVTM/models/JVET_W_EE_1.6_LumaCNNFilter_InterSlice.pt";
++  //std::string sChromaModelName = "/Users/ejacstr/Documents/Code/loopNN/ExtractMoreInfoVTM/models/JVET_W_EE_1.6_ChromaCNNFilter_InterSlice.pt";
++  std::string sLumaModelName = "/proj/video_data3/videosim/eliudux/myCurrentWork/training_data/jvet-ab-ee1-1_5/training/stage2_1_vtm/trained_models/InterY_9b7d_jacob_ep455.pt";
++  std::string sChromaModelName = "/proj/video_data3/videosim/eliudux/myCurrentWork/training_data/jvet-ab-ee1-1_5/training/stage2_1_vtm/extractmoreinfovtm/models/JVET_W_EE_1.6_ChromaCNNFilter_InterSlice.pt";
+ 
+   m_lumaModuleBSlice = torch::jit::load(sLumaModelName);
+   m_chromaModuleBSlice = torch::jit::load(sChromaModelName);
+@@ -130,7 +135,7 @@ void CNNFilter::cnnFilterLumaBlockBSlice(Picture* pic, UnitArea ctuArea, int ext
+   //at::init_num_threads();
+   
+   int seqQp = pic->slices[0]->getPPS()->getPicInitQPMinus26() + 26;
+-  int sliceQp = pic->slices[0]->getSliceQp();
++  //int sliceQp = pic->slices[0]->getSliceQp();
+   int qp = modelIdx == 2 ? seqQp - 10 : (modelIdx == 1 ? seqQp - 5 : seqQp);
+   if (pic->slices[0]->getTLayer() >= 4 && modelIdx == 2)
+     qp = seqQp + 5;
+@@ -150,7 +155,7 @@ void CNNFilter::cnnFilterLumaBlockBSlice(Picture* pic, UnitArea ctuArea, int ext
+     }
+   }
+   // Execute the model and turn its output into a tensor.
+-
++#if DO_ACTUAL_CNN_FILTERING
+   at::Tensor output;
+   output = m_lumaModuleBSlice.forward(input).toTensor();
+   float *pOutput = output.data_ptr<float>();
+@@ -179,7 +184,7 @@ void CNNFilter::cnnFilterLumaBlockBSlice(Picture* pic, UnitArea ctuArea, int ext
+     pScaledDst[idx]     = Pel(Clip3<int>(0, 1023 << NN_RESIDUE_ADDITIONAL_SHIFT, int(pOutput[pixelIdx] * maxValue * (1 << NN_RESIDUE_ADDITIONAL_SHIFT) + 0.5)));
+ #endif  
+   }
+-
++#endif
+ }
+ 
+ 
+@@ -269,6 +274,7 @@ void CNNFilter::cnnFilterChromaBlockBSlice(Picture* pic, UnitArea ctuArea, int e
+       pBsMapBatchC[blockSizeHorC*blockSizeVerC+yy*blockSizeHorC+xx] = pBsMapCr[idx] / maxValue;
+     }
+   }
++#if DO_ACTUAL_CNN_FILTERING
+   // Execute the model and turn its output into a tensor.
+   at::Tensor output = m_chromaModuleBSlice.forward(input).toTensor();
+   float *pOutput = output.data_ptr<float>();
+@@ -317,7 +323,7 @@ void CNNFilter::cnnFilterChromaBlockBSlice(Picture* pic, UnitArea ctuArea, int e
+     pScaledDstCr[idx] = Pel(Clip3<int>(0, 1023 << NN_RESIDUE_ADDITIONAL_SHIFT, int(pOutput[pixelIdx + blockSizeC] * maxValue * (1 << NN_RESIDUE_ADDITIONAL_SHIFT) + 0.5)));
+ #endif
+   }
+-
++#endif
+ }
+ void CNNFilter::cnnFilterLumaBlockISlice(Picture* pic, UnitArea ctuArea, int extLeft, int extRight, int extTop, int extBottom, int modelIdx)
+ {
+@@ -337,6 +343,7 @@ void CNNFilter::cnnFilterLumaBlockISlice(Picture* pic, UnitArea ctuArea, int ext
+   Pel* pPred = bufPred.buf;
+   int blockSizeHor = ctuArea.lwidth();
+   int blockSizeVer = ctuArea.lheight();
++  
+   double maxValue = 1023;
+   // Deserialize the ScriptModule from a file using torch::jit::load().
+   torch::Tensor imageBatch = torch::ones({1, 1, blockSizeVer, blockSizeHor});
+@@ -379,6 +386,7 @@ void CNNFilter::cnnFilterLumaBlockISlice(Picture* pic, UnitArea ctuArea, int ext
+     }
+   }
+   // Execute the model and turn its output into a tensor.
++#if DO_ACTUAL_CNN_FILTERING
+   at::Tensor output = m_lumaModuleISlice.forward(input).toTensor();
+   float *pOutput = output.data_ptr<float>();
+   //  std::cout << output.slice(/*dim=*/1, /*start=*/240, /*end=*/480) << '\n';
+@@ -406,6 +414,7 @@ void CNNFilter::cnnFilterLumaBlockISlice(Picture* pic, UnitArea ctuArea, int ext
+     pScaledDst[idx] = Pel(Clip3<int>(0, 1023 << NN_RESIDUE_ADDITIONAL_SHIFT, int(pOutput[pixelIdx] * maxValue * (1 << NN_RESIDUE_ADDITIONAL_SHIFT) + 0.5)));
+ #endif  
+   }
++#endif
+ }
+ 
+ 
+@@ -502,6 +511,7 @@ void CNNFilter::cnnFilterChromaBlockISlice(Picture* pic, UnitArea ctuArea, int e
+       pPredBatchC[blockSizeHorC*blockSizeVerC+yy*blockSizeHorC+xx] = pPredCr[idx] / maxValue;
+     }
+   }
++#if DO_ACTUAL_CNN_FILTERING
+   // Execute the model and turn its output into a tensor.
+   at::Tensor output = m_chromaModuleISlice.forward(input).toTensor();
+   float *pOutput = output.data_ptr<float>();
+@@ -549,6 +559,7 @@ void CNNFilter::cnnFilterChromaBlockISlice(Picture* pic, UnitArea ctuArea, int e
+     pScaledDstCr[idx] = Pel(Clip3<int>(0, 1023 << NN_RESIDUE_ADDITIONAL_SHIFT, int(pOutput[pixelIdx + blockSizeC] * maxValue * (1 << NN_RESIDUE_ADDITIONAL_SHIFT) + 0.5)));
+ #endif
+   }
++#endif
+ }
+ 
+ void CNNFilter::cnnFilter(Picture* pic)
+@@ -693,9 +704,14 @@ void CNNFilter::scaleResidualBlock(Picture *pic, UnitArea ctuArea, int modelIdx,
+ 
+     idxDst       = yy * strideDst + xx;
+     idxRec       = yy * strideRec + xx;
++#if DO_ACTUAL_CNN_FILTERING
+     pScaledDst[idxDst] = Clip3(0, 1023, pRec[idxRec] + (((pScaledDst[idxDst] - (pRec[idxRec] << NN_RESIDUE_ADDITIONAL_SHIFT)) * scale + offset) >> shift));
++#else
++    pScaledDst[idxDst] = pRec[idxRec];
++#endif
+   }
+ }
+ #endif
+ 
++#endif
+ //! \}
+diff --git a/source/Lib/CommonLib/CodingStructure.h b/source/Lib/CommonLib/CodingStructure.h
+index e0821ce..1f1a22a 100644
+--- a/source/Lib/CommonLib/CodingStructure.h
++++ b/source/Lib/CommonLib/CodingStructure.h
+@@ -67,6 +67,15 @@ enum PictureType
+   PIC_BS_MAP,
+   PIC_PREDICTION_CUSTOM,
+ #endif
++#if DUMP_MORE_INFO
++  PIC_BEFORE_DEBLOCK,
++  PIC_AFTER_DEBLOCK,
++  PIC_MY_PRED_BUF,
++//  PIC_BS_INFO,
++#endif
++#if EXTRACT_BMP_INFO
++  PIC_BLOCK_PRED_MODE,
++#endif
+   NUM_PIC_TYPES
+ };
+ extern XUCache g_globalUnitCache;
+@@ -236,6 +245,9 @@ private:
+ #if CNN_FILTERING
+   PelStorage m_predCustom;
+ #endif
++#if EXTRACT_BMP_INFO
++  PelStorage m_block_pred_mode;
++#endif
+   PelStorage m_resi;
+   PelStorage m_reco;
+   PelStorage m_orgr;
+@@ -334,6 +346,13 @@ public:
+          PelUnitBuf   getRecoBuf()                                 { return m_reco; }
+   const CPelUnitBuf   getRecoBuf()                           const { return m_reco; }
+ 
++#if EXTRACT_BMP_INFO
++  // block prediction mode buffer
++  PelBuf            getBlockPredModeBuf(const ComponentID compID) { return m_block_pred_mode.get(compID); }
++  const CPelBuf     getBlockPredModeBuf(const ComponentID compID) const { return m_block_pred_mode.get(compID); }
++  PelUnitBuf        getBlockPredModeBuf() { return m_block_pred_mode; }
++  const CPelUnitBuf getBlockPredModeBuf() const { return m_block_pred_mode; }
++#endif
+ private:
+ 
+   inline        PelBuf       getBuf(const CompArea &blk,  const PictureType &type);
+diff --git a/source/Lib/CommonLib/LoopFilter.cpp b/source/Lib/CommonLib/LoopFilter.cpp
+index a317871..6ea249c 100644
+--- a/source/Lib/CommonLib/LoopFilter.cpp
++++ b/source/Lib/CommonLib/LoopFilter.cpp
+@@ -473,7 +473,7 @@ void LoopFilter::xDeblockCU( CodingUnit& cu, const DeblockEdgeDir edgeDir )
+   }
+ 
+ 
+-#if !CNN_FILTERING
++#if !CNN_FILTERING || !DO_ACTUAL_CNN_FILTERING
+   std::sort( edgeIdx.begin(), edgeIdx.end() );
+   int prevEdgeIdx = -1;
+   for ( const int& edge : edgeIdx )
+diff --git a/source/Lib/CommonLib/Picture.cpp b/source/Lib/CommonLib/Picture.cpp
+index 749e508..bf6a3a8 100644
+--- a/source/Lib/CommonLib/Picture.cpp
++++ b/source/Lib/CommonLib/Picture.cpp
+@@ -39,7 +39,9 @@
+ #include "SEI.h"
+ #include "ChromaFormat.h"
+ #include "CommonLib/InterpolationFilter.h"
+-
++#if EXTRACT_BMP_INFO
++#include "CommonLib/UnitTools.h"
++#endif
+ 
+ #if ENABLE_SPLIT_PARALLELISM
+ 
+@@ -212,6 +214,15 @@ void Picture::create( const ChromaFormat &_chromaFormat, const Size &size, const
+   M_BUFS( 0, PIC_BS_MAP ).create( _chromaFormat, a, _maxCUSize, margin, MEMORY_ALIGN_DEF_SIZE );
+ #endif
+   
++#if DUMP_MORE_INFO
++  M_BUFS( 0, PIC_BEFORE_DEBLOCK ).create( _chromaFormat, a, _maxCUSize, margin, MEMORY_ALIGN_DEF_SIZE );
++  M_BUFS( 0, PIC_AFTER_DEBLOCK ).create( _chromaFormat, a, _maxCUSize, margin, MEMORY_ALIGN_DEF_SIZE );
++  M_BUFS( 0, PIC_MY_PRED_BUF ).create( _chromaFormat, a, _maxCUSize, margin, MEMORY_ALIGN_DEF_SIZE );
++//  M_BUFS( 0, PIC_BS_INFO        ).create( _chromaFormat, a, _maxCUSize, margin, MEMORY_ALIGN_DEF_SIZE );
++#endif
++#if EXTRACT_BMP_INFO
++  M_BUFS(0, PIC_BLOCK_PRED_MODE).create(_chromaFormat, a, _maxCUSize, margin, MEMORY_ALIGN_DEF_SIZE);
++#endif 
+   if( !_decoder )
+   {
+     M_BUFS( 0, PIC_ORIGINAL ).    create( _chromaFormat, a );
+@@ -387,6 +398,39 @@ const CPelBuf     Picture::getPredBufCustom(const CompArea &blk)  const { return
+ const CPelUnitBuf Picture::getPredBufCustom(const UnitArea &unit) const { return getBuf(unit, PIC_PREDICTION_CUSTOM);}
+ #endif
+ 
++#if DUMP_MORE_INFO
++         PelUnitBuf Picture::getPicBeforeDbBuf()                                 { return M_BUFS(0, PIC_BEFORE_DEBLOCK); }
++  const CPelUnitBuf Picture::getPicBeforeDbBuf()                           const { return M_BUFS(0, PIC_BEFORE_DEBLOCK); }
++         PelBuf     Picture::getPicBeforeDbBuf(const CompArea &blk)              { return getBuf(blk, PIC_BEFORE_DEBLOCK); }
++  const CPelBuf     Picture::getPicBeforeDbBuf(const CompArea &blk)        const { return getBuf(blk, PIC_BEFORE_DEBLOCK); }
++
++         PelUnitBuf Picture::getPicAfterDbBuf()                                 { return M_BUFS(0, PIC_AFTER_DEBLOCK); }
++  const CPelUnitBuf Picture::getPicAfterDbBuf()                           const { return M_BUFS(0, PIC_AFTER_DEBLOCK); }
++         PelBuf     Picture::getPicAfterDbBuf(const CompArea &blk)              { return getBuf(blk, PIC_AFTER_DEBLOCK); }
++  const CPelBuf     Picture::getPicAfterDbBuf(const CompArea &blk)        const { return getBuf(blk, PIC_AFTER_DEBLOCK); }
++
++         PelUnitBuf Picture::getMyPredBuf()                                 { return M_BUFS(0, PIC_MY_PRED_BUF); }
++  const CPelUnitBuf Picture::getMyPredBuf()                           const { return M_BUFS(0, PIC_MY_PRED_BUF); }
++         PelBuf     Picture::getMyPredBuf(const CompArea &blk)              { return getBuf(blk, PIC_MY_PRED_BUF); }
++  const CPelBuf     Picture::getMyPredBuf(const CompArea &blk)        const { return getBuf(blk, PIC_MY_PRED_BUF); }
++
++
++//         PelUnitBuf Picture::getBsMapBuf()                                      { return M_BUFS(0, PIC_BS_INFO); }
++//  const CPelUnitBuf Picture::getBsMapBuf()                                const { return M_BUFS(0, PIC_BS_INFO); }
++//         PelBuf     Picture::getBsMapBuf(const CompArea &blk)                   { return getBuf(blk, PIC_BS_INFO); }
++//  const CPelBuf     Picture::getBsMapBuf(const CompArea &blk)             const { return getBuf(blk, PIC_BS_INFO); }
++#endif
++#if EXTRACT_BMP_INFO
++       //PelBuf     Picture::getBlockPredModeBuf(const ComponentID compID, bool /*wrap*/)       { return getBuf(compID,               PIC_BLOCK_PRED_MODE); }
++       //PelUnitBuf Picture::getBlockPredModeBuf(bool /*wrap*/)                                 { return M_BUFS(scheduler.getSplitPicId(), PIC_BLOCK_PRED_MODE); }
++       PelUnitBuf Picture::getBlockPredModeBuf()                                 { return M_BUFS(0, PIC_BLOCK_PRED_MODE); }
++const CPelUnitBuf Picture::getBlockPredModeBuf()                           const { return M_BUFS(0, PIC_BLOCK_PRED_MODE); }
++       PelUnitBuf Picture::getBlockPredModeBuf(const UnitArea &unit)             { return getBuf(unit, PIC_BLOCK_PRED_MODE); }
++const CPelUnitBuf Picture::getBlockPredModeBuf(const UnitArea &unit)       const { return getBuf(unit, PIC_BLOCK_PRED_MODE); }
++       PelBuf     Picture::getBlockPredModeBuf(const CompArea &blk)              { return getBuf(blk, PIC_BLOCK_PRED_MODE); }
++const CPelBuf     Picture::getBlockPredModeBuf(const CompArea &blk)        const { return getBuf(blk, PIC_BLOCK_PRED_MODE); }
++#endif
++
+        PelBuf     Picture::getRecoBuf(const ComponentID compID, bool wrap)       { return getBuf(compID,                    wrap ? PIC_RECON_WRAP : PIC_RECONSTRUCTION); }
+ const CPelBuf     Picture::getRecoBuf(const ComponentID compID, bool wrap) const { return getBuf(compID,                    wrap ? PIC_RECON_WRAP : PIC_RECONSTRUCTION); }
+        PelBuf     Picture::getRecoBuf(const CompArea &blk, bool wrap)            { return getBuf(blk,                       wrap ? PIC_RECON_WRAP : PIC_RECONSTRUCTION); }
+@@ -495,6 +539,98 @@ void Picture::dumpPicPartition()
+   }
+ }
+ #endif
++
++
++#if EXTRACT_BMP_INFO
++void Picture::dumpPicBpmInfo()
++{
++  CodingStructure     &cs  = *(this->cs);
++  const PreCalcValues &pcv = *cs.pcv;
++  // Defining a scanning area that covers the picture
++  const UnitArea PICarea(cs.area.chromaFormat, Area(0, 0, pcv.lumaWidth, pcv.lumaHeight));
++  // Traversing the CUs in the picture and filling a buffer with extracted block pred mode info
++  for (int y = 0; y < pcv.heightInCtus; y++)
++  {
++    for (int x = 0; x < pcv.widthInCtus; x++)
++    {
++      const UnitArea ctuArea(pcv.chrFormat,
++                             Area(x << pcv.maxCUWidthLog2, y << pcv.maxCUHeightLog2, pcv.maxCUWidth, pcv.maxCUWidth));
++      for (auto &currCU: cs.traverseCUs(CS::getArea(cs, ctuArea, CH_L), CH_L))
++      {
++        // Extracting if the current block is an inter coded block, an intra coded block or an IBC block
++        // MODE_INTER   = 0,   ///< inter-prediction mode
++        // MODE_INTRA   = 1,   ///< intra-prediction mode
++        // MODE_IBC     = 2,   ///< ibc-prediction mode
++        bool isInter = (currCU.predMode == MODE_INTER) ? true : false;
++        bool isIntra = (currCU.predMode == MODE_INTRA) ? true : false;
++        bool isIBC   = (currCU.predMode == MODE_IBC) ? true : false;
++
++        // Extracting if the current block is a uni-prediction block
++        // interDir     = 1    /// only L0 is used for the current block
++        // interDir     = 2    /// only L1 is used for the current block
++        // interDir     = 3    /// both L0 and L1 are used for the current block
++        bool isUniPred = ((&currCU)->firstPU->interDir == 1 || (&currCU)->firstPU->interDir == 2) ? true : false;
++        // For some reason the encoder has set currCU.firstPU->interDir to -1 if the mode is GEO.
++        // So we have to check for GEO as well. Every GEO-predicted block is per definition
++        // uni-predicted (see JVET-L0124). In the decoder this is not a problem, but for the
++        // encoder we need the line below.
++        isUniPred = (isUniPred || currCU.geoFlag);
++
++        // Extracting if the current block is a bi-prediction block
++        bool isBiPred = ((&currCU)->firstPU->interDir == 3) ? true : false;
++
++        Pel to_fill = 0;
++
++        if (isIntra == true)   // if an intra block
++        {
++          to_fill = 0;
++        }
++        if (isIBC == true && currCU.skip == false)   // if a non-skipped IBC block
++        {
++          to_fill = 2;
++        }
++        if (isIBC == true && currCU.skip == true)   // if a skipped IBC block
++        {
++          to_fill = 3;
++        }
++        if ((isInter == true && currCU.skip == false) && (isUniPred == true))   // if a non-skipped uni-prediction block
++        {
++          to_fill = 4;
++        }
++        if ((isInter == true && currCU.skip == true) && (isUniPred == true))   // if a skipped uni-prediction block
++        {
++          to_fill = 5;
++        }
++        if ((isInter == true && currCU.skip == false) && (isBiPred == true))   // if a non-skipped bi-prediction block
++        {
++          to_fill = 6;
++        }
++        if ((isInter == true && currCU.skip == true) && (isBiPred == true))   // if a skipped bi-prediction block
++        {
++          to_fill = 7;
++        }
++
++        // Filling all three components of the buffer with extracted value
++        CompArea y_area(COMPONENT_Y, cs.area.chromaFormat, currCU.lx(), currCU.ly(), currCU.lwidth(), currCU.lheight(),
++                        true);
++        auto     target_buff = currCU.slice->getPic()->getBlockPredModeBuf(y_area);
++        target_buff.fill(to_fill);
++
++        CompArea cb_area(COMPONENT_Cb, cs.area.chromaFormat, currCU.lx(), currCU.ly(), currCU.lwidth(),
++                         currCU.lheight(), true);
++        target_buff = currCU.slice->getPic()->getBlockPredModeBuf(cb_area);
++        target_buff.fill(to_fill);
++
++        CompArea cr_area(COMPONENT_Cr, cs.area.chromaFormat, currCU.lx(), currCU.ly(), currCU.lwidth(),
++                         currCU.lheight(), true);
++        target_buff = currCU.slice->getPic()->getBlockPredModeBuf(cr_area);
++        target_buff.fill(to_fill);
++      }
++    }
++  }
++}
++#endif
++
+ void Picture::allocateNewSlice()
+ {
+   slices.push_back(new Slice);
+diff --git a/source/Lib/CommonLib/Picture.h b/source/Lib/CommonLib/Picture.h
+index 33b748b..75acd4f 100644
+--- a/source/Lib/CommonLib/Picture.h
++++ b/source/Lib/CommonLib/Picture.h
+@@ -155,6 +155,43 @@ const   CPelBuf     getBsMapBuf(const CompArea &blk) const;
+   const CPelUnitBuf getPredBufCustom(const UnitArea &unit) const;
+ #endif
+   
++#if DUMP_MORE_INFO
++         PelUnitBuf getPicBeforeDbBuf();
++  const CPelUnitBuf getPicBeforeDbBuf() const;
++         PelBuf     getPicBeforeDbBuf(const CompArea &blk);
++  const CPelBuf     getPicBeforeDbBuf(const CompArea &blk) const;
++
++         PelUnitBuf getPicAfterDbBuf();
++  const CPelUnitBuf getPicAfterDbBuf() const;
++         PelBuf     getPicAfterDbBuf(const CompArea &blk);
++  const CPelBuf     getPicAfterDbBuf(const CompArea &blk) const;
++
++         PelUnitBuf getMyPredBuf();
++  const CPelUnitBuf getMyPredBuf() const;
++         PelBuf     getMyPredBuf(const CompArea &blk);
++  const CPelBuf     getMyPredBuf(const CompArea &blk) const;
++
++//         PelUnitBuf getMyPartBuf();
++//  const CPelUnitBuf getMyPartBuf() const;
++//         PelBuf     getMyPartBuf(const CompArea &blk);
++//  const CPelBuf     getMyPartBuf(const CompArea &blk) const;
++
++
++//         PelUnitBuf getBsMapBuf();
++//  const CPelUnitBuf getBsMapBuf() const;
++//         PelBuf     getBsMapBuf(const CompArea &blk);
++//  const CPelBuf     getBsMapBuf(const CompArea &blk) const;
++#endif
++#if EXTRACT_BMP_INFO
++  PelUnitBuf        getBlockPredModeBuf();
++  const CPelUnitBuf getBlockPredModeBuf() const;
++  PelUnitBuf        getBlockPredModeBuf(const UnitArea &unit);
++  const CPelUnitBuf getBlockPredModeBuf(const UnitArea &unit) const;
++  PelBuf            getBlockPredModeBuf(const CompArea &blk);
++  const CPelBuf     getBlockPredModeBuf(const CompArea &blk) const;
++  void              dumpPicBpmInfo();
++#endif 
++  
+          PelBuf     getRecoBuf(const ComponentID compID, bool wrap=false);
+   const CPelBuf     getRecoBuf(const ComponentID compID, bool wrap=false) const;
+          PelBuf     getRecoBuf(const CompArea &blk, bool wrap=false);
+diff --git a/source/Lib/CommonLib/TypeDef.h b/source/Lib/CommonLib/TypeDef.h
+index 4cf53f6..536a4ed 100644
+--- a/source/Lib/CommonLib/TypeDef.h
++++ b/source/Lib/CommonLib/TypeDef.h
+@@ -54,6 +54,11 @@
+ // clang-format off
+ 
+ //########### place macros to be removed in next cycle below this line ###############
++#define DUMP_MORE_INFO                                    1 // note: do not use it for interlace or RPR or multi-layer
++                                                            // use together with output of normal reconstruction file i.e. -o
++#define EXTRACT_BMP_INFO                                  1
++
++#define DO_ACTUAL_CNN_FILTERING                           1
+ #define CNN_FILTERING                                     1
+ #if CNN_FILTERING
+ #define RESTRICTED_GRANULARITY                            1
+diff --git a/source/Lib/DecoderLib/DecLib.cpp b/source/Lib/DecoderLib/DecLib.cpp
+index 00cef89..2014d38 100644
+--- a/source/Lib/DecoderLib/DecLib.cpp
++++ b/source/Lib/DecoderLib/DecLib.cpp
+@@ -683,13 +683,29 @@ void DecLib::executeLoopFilters()
+ #if CNN_FILTERING
+   m_pcPic->getBsMapBuf().fill(0);
+ #endif
++#if DUMP_MORE_INFO
++  // store the sample values before deblocking
++  m_pcPic->getPicBeforeDbBuf().copyFrom( m_pcPic->getRecoBuf(), false, false );
++
++  // reset bs info for preparation
++  m_pcPic->getBsMapBuf().fill( 0 );
++#endif
+   // deblocking filter
+   m_cLoopFilter.loopFilterPic( cs );
++  // store the sample values after deblocking
++  m_pcPic->getPicAfterDbBuf().copyFrom( m_pcPic->getRecoBuf(), false, false );
++
+   CS::setRefinedMotionField(cs);
+-  //CNN filter
++
+ #if CNN_FILTERING
+   m_pcCNNFilter->cnnFilter(m_pcPic);
+ #endif
++  //CNN filter
++#if DUMP_MORE_INFO
++  // store the sample from the special prediction buffer
++  m_pcPic->getMyPredBuf().copyFrom( m_pcPic->getPredBufCustom(), false, false );
++#endif
++
+   if( cs.sps->getSAOEnabledFlag() )
+   {
+     m_cSAO.SAOProcess( cs, cs.picture->getSAO() );
+diff --git a/source/Lib/DecoderLib/DecSlice.cpp b/source/Lib/DecoderLib/DecSlice.cpp
+index b1a8a35..c8ca665 100644
+--- a/source/Lib/DecoderLib/DecSlice.cpp
++++ b/source/Lib/DecoderLib/DecSlice.cpp
+@@ -300,6 +300,9 @@ void DecSlice::decompressSlice( Slice* slice, InputBitstream* bitstream, int deb
+ #if CNN_FILTERING
+   pic->dumpPicPartition();
+ #endif
++#if EXTRACT_BMP_INFO
++  pic->dumpPicBpmInfo();
++#endif
+   // deallocate all created substreams, including internal buffers.
+   for( auto substr: ppcSubstreams )
+   {
+diff --git a/source/Lib/DecoderLib/VLCReader.cpp b/source/Lib/DecoderLib/VLCReader.cpp
+index ad80586..2aa1585 100644
+--- a/source/Lib/DecoderLib/VLCReader.cpp
++++ b/source/Lib/DecoderLib/VLCReader.cpp
+@@ -4175,9 +4175,10 @@ void HLSyntaxReader::parseSliceHeader (Slice* pcSlice, PicHeader* picHeader, Par
+     }
+ 
+ #if CNN_FILTERING
+-    READ_UVLC(uiCode, "slice_cnnlf_luma_indicaiton");  pcSlice->setCnnlfSliceIndication(COMPONENT_Y, uiCode);
+-    READ_UVLC(uiCode, "slice_cnnlf_cb_indication");  pcSlice->setCnnlfSliceIndication(COMPONENT_Cb, uiCode);
+-    READ_UVLC(uiCode, "slice_cnnlf_cr_indication");  pcSlice->setCnnlfSliceIndication(COMPONENT_Cr, uiCode);
++#if DO_ACTUAL_CNN_FILTERING
++    READ_UVLC(uiCode, "slice_cnnlf_luma_indicaiton");  pcSlice->setCnnlfSliceIndication(COMPONENT_Y, uiCode); printf("First: %d\n", uiCode);
++    READ_UVLC(uiCode, "slice_cnnlf_cb_indication");  pcSlice->setCnnlfSliceIndication(COMPONENT_Cb, uiCode); printf("Second: %d\n", uiCode);
++    READ_UVLC(uiCode, "slice_cnnlf_cr_indication");  pcSlice->setCnnlfSliceIndication(COMPONENT_Cr, uiCode); printf("Third: %d\n", uiCode);
+ 
+ #if SCALE_NN_RESIDUE
+     for (int comp = 0; comp < 3; comp++)
+@@ -4190,10 +4191,12 @@ void HLSyntaxReader::parseSliceHeader (Slice* pcSlice, PicHeader* picHeader, Par
+           for (int modelIdx = 0; modelIdx < 3; modelIdx++)
+           {
+             READ_FLAG(uiCode, "slice_cnnlf_scale_flag");
++//            printf("slice_cnnlf_scale_flag = %d\n", uiCode);
+             pcSlice->setNnScaleFlag(uiCode != 0, modelIdx, compID);
+             if (uiCode)
+             {
+               READ_SCODE(NN_RESIDUE_SCALE_SHIFT + 1, iCode, "nnScale");
++//              printf("nnScale = %d\n", iCode);
+               pcSlice->setNnScale(iCode + (1 << NN_RESIDUE_SCALE_SHIFT), modelIdx, compID);
+             }
+           }
+@@ -4201,16 +4204,49 @@ void HLSyntaxReader::parseSliceHeader (Slice* pcSlice, PicHeader* picHeader, Par
+         else
+         {
+           READ_FLAG(uiCode, "slice_cnnlf_scale_flag");
++//          printf("slice_cnnlf_scale_flag = %d\n", uiCode);
+           pcSlice->setNnScaleFlag(uiCode != 0, pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
+           if (uiCode)
+           {
+             READ_SCODE(NN_RESIDUE_SCALE_SHIFT + 1, iCode, "nnScale");
+             pcSlice->setNnScale(iCode + (1 << NN_RESIDUE_SCALE_SHIFT), pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++//            printf("nnScale2 = %d\n", iCode);
+           }
+         }
+       }
+     }
+ #endif
++#else
++  // Fake reading in data
++  pcSlice->setCnnlfSliceIndication(COMPONENT_Y, 1);
++  pcSlice->setCnnlfSliceIndication(COMPONENT_Cb, 1);
++  pcSlice->setCnnlfSliceIndication(COMPONENT_Cr, 1);
++
++  ComponentID compID = ComponentID(0);
++  uiCode = 1;
++//  printf("slice_cnnlf_scale_flag = %d\n", uiCode);
++  pcSlice->setNnScaleFlag(uiCode != 0, pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++  iCode = -23;
++  pcSlice->setNnScale(iCode + (1 << NN_RESIDUE_SCALE_SHIFT), pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++//  printf("nnScale2 = %d\n", iCode);
++
++  compID = ComponentID(1);
++  uiCode = 1;
++//  printf("slice_cnnlf_scale_flag = %d\n", uiCode);
++  pcSlice->setNnScaleFlag(uiCode != 0, pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++  iCode = -44;
++  pcSlice->setNnScale(iCode + (1 << NN_RESIDUE_SCALE_SHIFT), pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++//  printf("nnScale2 = %d\n", iCode);
++
++  compID = ComponentID(2);
++  uiCode = 1;
++//  printf("slice_cnnlf_scale_flag = %d\n", uiCode);
++  pcSlice->setNnScaleFlag(uiCode != 0, pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++  iCode = -49;
++  pcSlice->setNnScale(iCode + (1 << NN_RESIDUE_SCALE_SHIFT), pcSlice->getCnnlfSliceIndication(compID) - 1, compID);
++//  printf("nnScale2 = %d\n", iCode);
++  
++#endif
+     int cnnlfInferBlockSizeLuma = 0, cnnlfInferBlockSizeChroma = 0;
+     if (pcSlice->getSliceQp() < 23)
+     {
+diff --git a/source/Lib/EncoderLib/EncCNNFilter.cpp b/source/Lib/EncoderLib/EncCNNFilter.cpp
+index 89838fb..205d83a 100644
+--- a/source/Lib/EncoderLib/EncCNNFilter.cpp
++++ b/source/Lib/EncoderLib/EncCNNFilter.cpp
+@@ -58,6 +58,7 @@ double getDistortion(PelBuf buf1, PelBuf buf2, int width, int height)
+   }
+   return dist;
+ }
++#if CNN_FILTERING
+ #if SCALE_NN_RESIDUE
+ void EncCNNFilter::cnnFilterEncoder(Picture *pic, PelUnitBuf origBuf, const double *lambdas)
+ #else
+@@ -469,6 +470,7 @@ void EncCNNFilter::cnnFilterEncoder(Picture *pic, const double *lambdas)
+     }
+   }
+ }
++#endif
+ 
+ 
+ #if SCALE_NN_RESIDUE
+diff --git a/source/Lib/EncoderLib/EncSlice.cpp b/source/Lib/EncoderLib/EncSlice.cpp
+index 87be357..b40b87a 100644
+--- a/source/Lib/EncoderLib/EncSlice.cpp
++++ b/source/Lib/EncoderLib/EncSlice.cpp
+@@ -1806,6 +1806,9 @@ void EncSlice::encodeCtus( Picture* pcPic, const bool bCompressEntireSlice, cons
+ #if CNN_FILTERING
+   pcPic->dumpPicPartition();
+ #endif
++#if EXTRACT_BMP_INFO
++  pcPic->dumpPicBpmInfo();
++#endif 
+   // this is wpp exclusive section
+ 
+ //  m_uiPicTotalBits += actualBits;
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_2_data_generation/dec_direct_tile3_93dd.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_2_data_generation/dec_direct_tile3_93dd.py
new file mode 100644
index 0000000000000000000000000000000000000000..24890589d2ee85fcb235b70994fc7c3114e704ab
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_2_data_generation/dec_direct_tile3_93dd.py
@@ -0,0 +1,174 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import csv
+import sys
+import os
+from sys import platform
+import subprocess
+from subprocess import Popen, PIPE
+import shutil
+import glob
+import time
+
+#  Modify directories
+software_root_dir = '/proj/video_data4/videosim/eliudux/JVET_NN_EE/ab-ee1-1_5/training_EE115/'
+
+output_root_dir = '/proj/video_no_backup/videosim/eliudux/training_data/'
+# Output YUV directory is in the format of 
+# <output_root_dir>/xcheck_yuv/ra_93dd_BVIDVC/SequenceName_qp17/SequenceName_qp17_rec.yuv
+# <output_root_dir>/xcheck_yuv/ra_93dd_BVIDVC/SequenceName_qp17/SequenceName_qp17_bs.yuv
+
+path_to_original_BVI_DVC_yuvs = '/proj/video_data3/videosim/data/BVI_DVC/'
+
+# do only decoding, or both encoding and decoding
+onlyDecode = True
+
+# remove YUVs after decoding
+removeYUV = False
+
+dataset = 'BVIDVC'
+cfgtype = 'ra' 
+QP = [42,37,32,27,22,17]
+
+str1 = sys.argv[1]
+# String to control which names from the dataset you want to encode. 
+# E.g., DIV2K images start with 0xxx_xxxxxxx_xxx.yuv, so str1='0'. 
+# BVIDVC class D sequences start with Dxxxx_xxxxx.yuv, so str1='D'.
+# A paticular sequence with name CColourfulDecorationWatPhoVidevo_960x544_50fps_10bit_420.yuv, so str1 = 'CColourfulDecorationWatPhoVidevo_960x544_50fps_10bit_420'
+#str1 = 'DAdvertisingMassagesBangkokVidevo_480x272_25fps_10bit_420'
+#str1 = 'AWovenVidevo_3840x2176_25fps_10bit_420'
+  
+datasetname = "BVIDVC_64frm"
+cfgdir = software_root_dir + 'cfg/cfgBVIDVC/'
+inputdir = path_to_original_BVI_DVC_yuvs
+encnumfrm = '64'
+numfrm = '64' #all frames from A
+bitdepth = 10
+bitdepth_orig = 10 # BVIDVC 10bit, DIV2K 8bit
+logdir = software_root_dir + 'cfg/BVIDVC_64frm/' #sliceQP log
+
+cfg1 = software_root_dir + 'stage2_1_vtm/extractmoreinfovtm/cfg/encoder_randomaccess_vtm.cfg'    
+scriptdir = software_root_dir + 'stage2_2_data_generation/'
+excutabledir = software_root_dir + 'stage2_1_vtm/extractmoreinfovtm/bin/'  
+
+commit_num = '93dd'  # Do not change this. It is used to identify the dataset in the training data.
+encoderdir = excutabledir+'EncoderAppStatic' 
+decoderdir = excutabledir+'DecoderAppStatic' 
+
+if onlyDecode:
+    bindir = '/proj/video_data3/videosim/eliudux/myCurrentWork/JVET_NN_EE/VideoSimScript/Results/enc/ra_he10_RunSimNNVC1.0_x0066_RAtrained_dump_D_all2/'
+else:
+    bindir = output_root_dir + 'xcheck_bin/'+ cfgtype + '_' + commit_num +'_'+dataset+'/'
+    
+yuvoutputdir = output_root_dir + 'xcheck_yuv/' + cfgtype + '_' + commit_num +'_'+dataset+'/'
+ 
+cnt = 0
+for input_file in os.listdir(inputdir):
+    if input_file[-3:] == 'yuv' and input_file[:len(str1)] == str1:
+        seqname = input_file[:-4]
+        cfg2 = cfgdir + seqname + '.cfg'
+        file_dir = inputdir + input_file
+        for q in range(0,len(QP)):
+            qp = str(QP[q])
+            currnt_dir = yuvoutputdir + seqname + '_qp' + qp + '/'   
+            os.makedirs(currnt_dir,exist_ok = True)  
+            
+            if onlyDecode:         
+                bin_dir =  bindir+ seqname + 'qp' + qp + '.bin'
+                if not os.path.exists(bin_dir):
+                    # do not proceed if binary does not exist
+                    print('bin does not exist',bin_dir)
+                    continue 
+            else:    
+                current_bin_dir = bindir+ seqname + '_qp' + qp + '/'
+                bin_dir =  current_bin_dir + seqname + '_qp' + qp + '_str.bin'
+                os.makedirs(current_bin_dir,exist_ok = True)
+                
+            tmpout_dir = currnt_dir + seqname + '_qp' + qp + '_tmprec.yuv'
+            out_dir = currnt_dir + seqname + '_qp' + qp + '_rec.yuv'
+            pbd_dir = currnt_dir + seqname + '_qp' + qp + '_pictureBeforeDb.yuv'
+            bs_dir = currnt_dir + seqname + '_qp' + qp + '_bs.yuv'
+            log_dir = currnt_dir + 'dec.log'
+            mpr_dir = currnt_dir + seqname + '_qp' + qp + '_mpr.yuv'
+            mpa_dir = currnt_dir + seqname + '_qp' + qp + '_mpa.yuv'
+            pad_dir = currnt_dir + seqname + '_qp' + qp + '_pad.yuv'
+            bpm_dir = currnt_dir + seqname + '_qp' + qp + '_bpm.yuv'
+            enclog_dir = currnt_dir + 'enc.log'
+            
+            
+            enccmd = [encoderdir, "-c", cfg1, "-c", cfg2, "-i", file_dir, "-o", tmpout_dir,"-b", bin_dir, "-q", qp, "-f",encnumfrm, "--SEIDecodedPictureHash=1", ">&",enclog_dir,"; rm -f", tmpout_dir]
+            enccmd = " ".join(enccmd) 
+            
+            deccmd = [decoderdir, "-b", bin_dir, "-o", out_dir, "-pbd", pbd_dir,"-bs", bs_dir,"-mpr",mpr_dir,"-bpm",bpm_dir ,">&",log_dir,"; rm -f",out_dir] #,"-pad",pad_dir,"-mpa",mpa_dir
+            deccmd = " ".join(deccmd)
+            
+            
+            if removeYUV:
+                cmdrm = [ "rm -f",pbd_dir,"; rm -f",bs_dir,"; rm -f",mpr_dir,"; rm -f",bpm_dir] #,"; rm -f",pad_dir,"; rm -f",mpa_dir
+                cmdrm = " ".join(cmdrm)                
+            
+            
+            if onlyDecode:
+                cmdall = ["bsub -o /dev/null '", deccmd,"'"]
+            else:
+                cmdall = ["bsub -o /dev/null  '", enccmd,";",deccmd,"'"] 
+            
+            cmdall = " ".join(cmdall)
+            
+            if removeYUV:
+                cmdall = [cmdall[:-2],";",cmdrm, "'"]
+                cmdall = " ".join(cmdall)
+            
+            #if os.path.exists(mpr_dir):             
+            #    continue
+            
+            cnt += 1 
+            print('#',cnt, currnt_dir)
+            print(cmdall)            
+            
+            try: 
+                p1 = Popen(cmdall, shell=True, stdout=PIPE, stderr=PIPE)
+                status = p1.wait()
+                out = p1.stdout.read()
+                err = p1.stderr.read()
+            except Exception:
+                raise jobException("error while executing command '" + cmdall + "' out={} err={}".format(out, err))      
+            
+            time.sleep(2)
+            
+            break #QP loop
+        break # sequence loop
+
+print('Count',cnt,'dec jobs.')
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_3_training_lumaIPB/data_loader.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_3_training_lumaIPB/data_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5056f69f6258f96b269b23af69b6e7138ac8097
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_3_training_lumaIPB/data_loader.py
@@ -0,0 +1,389 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import json
+import math
+import sys
+from typing import NamedTuple
+import numpy as np
+import struct
+import torch
+from torch.utils.data import Dataset
+
+class PatchInfo(NamedTuple):
+    data_index: int
+    frame_index: int
+    patch_x0: int
+    patch_y0: int
+    
+    
+def readData(patch_size,border_size,norm,fn,off,nbb,ctype,h,w,x0,y0):
+    t = np.zeros((patch_size+2*border_size,patch_size+2*border_size),dtype='float32') # implicit zeros padding
+    with open(fn,"rb") as file:
+         cropc = [ y0-border_size, y0+patch_size+border_size, x0-border_size, x0+patch_size+border_size ]
+         srcc = [max(cropc[0], 0), min(cropc[1], h), max(cropc[2], 0), min(cropc[3], w)]
+         dstc = [srcc[0] - cropc[0], srcc[1] - cropc[0], srcc[2] - cropc[2], srcc[3] - cropc[2]]
+         data_h = srcc[1] - srcc[0]
+         off += srcc[0]*w*nbb
+         data = np.fromfile(file,dtype=ctype,count=data_h*w,offset=off).reshape(data_h,w)
+         data = data[:, srcc[2]:srcc[3]]
+         t[dstc[0]:dstc[1], dstc[2]:dstc[3]] = data
+         return t.astype('float32')/norm
+         
+def readDataBpm(patch_size,border_size,norm,fn,off,nbb,ctype,h,w,x0,y0):
+    t = np.zeros((patch_size+2*border_size,patch_size+2*border_size),dtype='float32') # implicit zeros padding
+    with open(fn,"rb") as file:
+         cropc = [ y0-border_size, y0+patch_size+border_size, x0-border_size, x0+patch_size+border_size ]
+         srcc = [max(cropc[0], 0), min(cropc[1], h), max(cropc[2], 0), min(cropc[3], w)]
+         dstc = [srcc[0] - cropc[0], srcc[1] - cropc[0], srcc[2] - cropc[2], srcc[3] - cropc[2]]
+         data_h = srcc[1] - srcc[0]
+         off += srcc[0]*w*nbb
+         data = np.fromfile(file,dtype=ctype,count=data_h*w,offset=off).reshape(data_h,w)
+         data = data[:, srcc[2]:srcc[3]]
+         # First right shift to make it Ipred=0, IBC=1, uni-pred=2, bi-pred=3
+         ipb_crop2 = np.right_shift(data, 1)
+         # Next add 1 everywhere we have 0 to make it Ipred/IBC = 1, uni-pred=2, bi-pred=3
+         ipb_crop2 = (ipb_crop2==0) + ipb_crop2
+         # Now subtract 1 and divide by 2 to make it Ipred/IBC=0, uni-pred=0.5, bi-pred=1
+         ipb_crop2 = np.float32(ipb_crop2-1)/2.0         
+         t[dstc[0]:dstc[1], dstc[2]:dstc[3]] = ipb_crop2
+         return t.astype('float32')
+         
+def readOne(patch_size,border_size,norm,fn,off,ctype):
+    with open(fn,"rb") as file:
+         if ctype == 'int32':
+             file.seek(off)
+             v = float(struct.unpack("i",file.read(4))[0])/norm
+         else:
+             sys.exit("readOne todo")
+         t = np.full((patch_size+2*border_size,patch_size+2*border_size),v,dtype='float32') 
+         return t
+    
+def getTypeComp(comp):
+    is_y_extracted = False
+    is_u_v_extracted = False
+    for comp_i in comp:
+        if '_Y' in comp_i:
+            is_y_extracted = True
+        if '_U' in comp_i or '_V' in comp_i:
+            is_u_v_extracted = True
+    if is_y_extracted and is_u_v_extracted:
+        return None
+    return is_u_v_extracted
+
+
+class DataLoader1(Dataset):
+    components=[]
+    database=None # contains the whole database
+    patch_info=None # contains address of each patch in the database: dataset index, frame index in the dataset, patch_index in the frame
+    suffix={} # suffix for each file
+    
+    # patch_size in luma sample
+    def __init__(self, jsonfile, patch_size, poc_list, generate_type, comps_luma,comps_chroma, qp_filter=-1, slice_type_filter=-1,transform=None, target_transform=None):
+        self.generate_type=generate_type    
+        self.comps_luma=comps_luma    
+        self.comps_chroma=comps_chroma                
+        self.transform = transform
+        self.target_transform = target_transform
+
+        if self.generate_type == 0:
+            self.normalizer_rec  = 1023.0
+            self.normalizer_pred = 1023.0
+            self.normalizer_bs   = 1023.0
+            self.normalizer_cu_average = 1023.0
+            self.normalizer_org8bits = 255.0
+            self.normalizer_org10bits = 1023.0
+            self.normalizer_qp   = 1023.0
+        else:
+            self.normalizer_rec  = 1024.0
+            self.normalizer_pred = 1024.0
+            self.normalizer_bs   = 1024.0
+            self.normalizer_cu_average = 1024.0
+            self.normalizer_org8bits = 256.0
+            self.normalizer_org10bits = 1024.0
+            self.normalizer_qp   = 64.0
+            self.normalizer_bpm = 1
+        self.patch_size=patch_size
+        self.patch_info=[]
+        with open(jsonfile, "r") as file:
+         content = file.read()
+         dcontent = json.loads(content)
+         if qp_filter>0 and 'suffix_qp' not in dcontent:
+             sys.exit("Filtering on qp impossible: no qp data in the dataset")
+         if slice_type_filter>0 and 'suffix_slicetype' not in dcontent:
+             sys.exit("Filtering on slice type impossible: no slice data in the dataset")
+         if qp_filter>0 or slice_type_filter>0:
+             sys.exit("todo")
+         self.components.append("org_Y")
+         self.components.append("org_U")
+         self.components.append("org_V")
+         if  'suffix_rec_after_dbf' in dcontent: 
+             self.suffix['rec_after_dbf']=dcontent['suffix_rec_after_dbf']
+             self.components.append("rec_after_dbf_Y")
+             self.components.append("rec_after_dbf_U")
+             self.components.append("rec_after_dbf_V")
+         if  'suffix_rec_before_dbf' in dcontent: 
+             self.suffix['rec_before_dbf']=dcontent['suffix_rec_before_dbf']
+             self.components.append("rec_before_dbf_Y")
+             self.components.append("rec_before_dbf_U")
+             self.components.append("rec_before_dbf_V")
+         if  'suffix_pred' in dcontent: 
+             self.suffix['pred']=dcontent['suffix_pred']
+             self.components.append("pred_Y")
+             self.components.append("pred_U")
+             self.components.append("pred_V")
+         if  'suffix_bs' in dcontent: 
+             self.suffix['bs']=dcontent['suffix_bs']             
+             self.components.append("bs_Y")
+             self.components.append("bs_U")
+             self.components.append("bs_V")
+         if  'suffix_partition_cu_average' in dcontent: 
+             self.suffix['partition_cu_average']=dcontent['suffix_partition_cu_average']    
+             self.components.append("partition_cu_average_Y")
+             self.components.append("partition_cu_average_U")
+             self.components.append("partition_cu_average_V")
+         if  'suffix_qp' in dcontent: 
+             self.components.append("qp_slice")
+             self.suffix['qp_slice']=dcontent['suffix_qp']    
+         self.components.append("qp_base") # always here
+         if  'suffix_slicetype' in dcontent: 
+             self.components.append("slice_type")
+             self.suffix['slice_type']=dcontent['suffix_slicetype']   
+         if  'suffix_bpm' in dcontent: 
+             self.suffix['bpm']=dcontent['suffix_bpm']
+             self.components.append("bpm_Y")
+             self.components.append("bpm_U")
+             self.components.append("bpm_V")
+             
+         self.database=dcontent['data']
+         # create array of patches adress
+        
+        if self.generate_type == 0:
+            psize = self.patch_size
+            for didx in range(len(self.database)):
+                 d=self.database[didx]
+                 w = int(d['width'])
+                 h = int(d['height'])
+                 w -= w % psize
+                 h -= h % psize
+                 nb_w=int(w//psize - 2)
+                 nb_h=int(h//psize - 2)
+                 
+                 id_ra = '_T2RA_'
+                 ra_flag = True if id_ra in d['bsname'] else False
+                 for fidx in range(int(d['data_count'])):
+                    if ra_flag and (fidx == 0 or fidx == 32 or fidx == 64):
+                        continue
+                    for y0 in range(nb_h):
+                        for x0 in range(nb_w):
+                            self.patch_info.append(PatchInfo(didx,fidx,1+x0,1+y0))
+        else:
+            for didx in range(len(self.database)):                
+                d=self.database[didx]
+                frames = range(int(d['data_count'])) if not poc_list else poc_list
+                if 'DIV2K' in d['dirname']:    frames = [0]
+                for fidx in frames:
+                    if 'ra_' in d['dirname']: 
+                        if (d['data_count']== 64 and (fidx == 0 or fidx == 32)) or \
+                        (d['data_count']== 65 and (fidx == 0 or fidx == 64)):  
+                            continue # do not include AI frame in RA data
+                    if fidx >= d['data_count']:
+                        sys.exit("exceed max number of frames ({})".format(d['data_count']))
+                    
+                    # save frame info
+                    self.patch_info.append(PatchInfo(didx,fidx,0,0))
+                    
+                     
+    def __len__(self):
+         return len(self.patch_info)     
+
+    def getPatchData(self,index,comp,x0,y0,border_size=0):
+        assert(index<len(self.patch_info))
+        pinfo=self.patch_info[index]
+        d=self.database[pinfo.data_index]
+        psize=self.patch_size
+        bsize=border_size
+        #print(pinfo,d)
+        chroma_block=getTypeComp(comp)
+        if chroma_block is None:
+            raise AssertionError('The second argument of getPatchData contains strings ending with \'_Y\' and strings ending with \'_U\' or \'_V\', which is not allowed.')
+        w=int(d['width'])
+        h=int(d['height'])
+        frame_size_Y=w*h
+        if chroma_block:
+            psize//=2
+            bsize//=2
+            w//=2
+            h//=2
+        tsize=bsize+psize+bsize        
+        
+        t = np.zeros((1,tsize,tsize,len(comp)),dtype='float32')
+        
+        for idx, c in enumerate(comp):
+            assert(c in self.components)
+                           
+            if 'org' in c:
+                fn=d['original_yuv']
+                off_frame=d['original_frame_skip']+pinfo.frame_index
+                if d['original_bitdepth'] == 8: # 8bits
+                    norm=self.normalizer_org8bits
+                    b='uint8' 
+                    nbb = 1
+                else: # 10bits
+                    norm=self.normalizer_org10bits
+                    b='uint16'                
+                    nbb = 2
+                off = off_frame*(frame_size_Y*nbb*3//2)
+                if c == 'org_U': 
+                    off+=frame_size_Y*nbb                  
+                elif c == 'org_V': 
+                    off+=frame_size_Y*nbb+(frame_size_Y*nbb)//4
+                v = readData(psize,bsize,norm,fn,off,nbb,b,h,w,x0,y0)
+                
+            elif 'rec_after_dbf' in c or 'rec_before_dbf' in c or 'pred' in c or 'partition_cu_average' in c or 'bs' in c or 'bpm' in c:
+                fn=d['dirname']+'/'+d['basename']+self.suffix[c[:-2]]
+                nbb=2 # 16 bits data
+                off=pinfo.frame_index*(frame_size_Y*nbb*3//2)
+                if '_U' in c: 
+                    off+=frame_size_Y*nbb
+                elif '_V' in c: 
+                    off+=frame_size_Y*nbb+(frame_size_Y*nbb)//4
+                if   'rec_after_dbf' in c or 'rec_before_dbf' in c: norm = self.normalizer_rec
+                elif 'pred' in c :          norm = self.normalizer_pred
+                elif 'bs' in c :            norm = self.normalizer_bs
+                elif 'partition_cu_average' in c :     norm = self.normalizer_cu_average
+                elif 'bpm' in c:            norm = self.normalizer_bpm
+                
+                if 'bpm' in c:
+                    # If the data is all-intra coded, v is all-zeros. There is no need to extract IPB data.
+                    # If it's not possible to tell which data is AI coded, you can dump IPB data and read the yuv 
+                    if 'ai_' in d['dirname']: #assume AI data directory has a unique string 'ai_'
+                        v = np.float32(np.zeros((tsize,tsize)))
+                    else:
+                        v = readDataBpm(psize,bsize,norm,fn,off,nbb,'uint16',h,w,x0,y0)
+                    
+                else:
+                    v = readData(psize,bsize,norm,fn,off,nbb,'uint16',h,w,x0,y0)
+                
+            elif c == 'qp_slice':
+                fn=d['dirname']+'/'+d['basename']+self.suffix['qp_slice']
+                norm=self.normalizer_qp
+                off=pinfo.frame_index*4
+                v = readOne(psize,bsize,norm,fn,off,'int32')
+
+            elif c == 'qp_base':
+                norm=self.normalizer_qp
+                f = float(d['qp_base'])/norm                
+                v = np.full((tsize,tsize),f,dtype='float32')                 
+            elif c == 'slice_type':
+                fn=d['dirname']+'/'+d['basename']+self.suffix['slice_type']
+                norm=1
+                off=pinfo.frame_index*4
+                v = readOne(psize,bsize,norm,fn,off,'int32')   
+            else:
+                 sys.exit("Unkwown component {}".format(c))
+            t[0,:,:,idx]=v            
+        return t    
+        
+    def __getitem__(self, index):   
+        # index is the frame index
+        # Length of self.patch_info is the number of total frames, so each frame is sampled equally.
+        # So patches are sampled evenlly across different classes.    
+        pinfo=self.patch_info[index]
+        d=self.database[pinfo.data_index]
+        psize=self.patch_size
+        #print(d)
+        
+        # Get a random patch position sampled at every 4th pixel
+        # This ensures that the cropped part lines up with deblocking boundaries.
+        w=int(d['width'])
+        h=int(d['height'])
+        yrand = int(np.random.rand(1)*(h-psize+1))
+        xrand = int(np.random.rand(1)*(w-psize+1))
+        yrand = yrand // 4
+        xrand = xrand // 4
+        yrand = yrand * 4
+        xrand = xrand * 4
+        
+        # Components used in the luma model
+        # "org_Y,rec_before_dbf_Y,pred_Y,bs_Y,bpm_Y,qp_slice,"  
+        
+        t_luma = self.getPatchData(index,self.comps_luma, xrand, yrand)
+        
+        origY_tensor =  torch.tensor(t_luma[0,:,:,0])                  
+        recY_tensor =  torch.tensor(t_luma[0,:,:,1])    
+        predY_tensor =  torch.tensor(t_luma[0,:,:,2])    
+        bsY_tensor =  torch.tensor(t_luma[0,:,:,3])    
+        ipb_tensor =  torch.tensor(t_luma[0,:,:,4])    
+        qp_tensor = torch.unsqueeze(torch.tensor(t_luma[0,:,:,5]),0)
+            
+        # Transpose image randomly.
+        if np.random.rand(1) < 0.5 :
+            recY_tensor =  torch.transpose(recY_tensor, 0, 1)
+            predY_tensor = torch.transpose(predY_tensor,0, 1)
+            bsY_tensor =   torch.transpose(bsY_tensor,  0, 1)
+            origY_tensor = torch.transpose(origY_tensor, 0, 1)
+            ipb_tensor =   torch.transpose(ipb_tensor, 0, 1)
+
+        # Flipping in x-direction randomly.
+        if np.random.rand(1) < 0.5 :
+            recY_tensor =  torch.flip(recY_tensor, [1])
+            predY_tensor = torch.flip(predY_tensor,[1])
+            bsY_tensor =   torch.flip(bsY_tensor,  [1])
+            origY_tensor = torch.flip(origY_tensor, [1])
+            ipb_tensor =   torch.flip(ipb_tensor, [1])
+
+        # Flipping in y-direction randomly.
+        if np.random.rand(1) < 0.5 :
+            recY_tensor =  torch.flip(recY_tensor, [0])
+            predY_tensor = torch.flip(predY_tensor,[0])
+            bsY_tensor =   torch.flip(bsY_tensor,  [0])
+            origY_tensor = torch.flip(origY_tensor, [0])
+            ipb_tensor =   torch.flip(ipb_tensor, [0])
+        
+        recY_tensor =  torch.unsqueeze(recY_tensor, 0)
+        predY_tensor = torch.unsqueeze(predY_tensor, 0)
+        bsY_tensor =   torch.unsqueeze(bsY_tensor, 0)
+        ipb_tensor =   torch.unsqueeze(ipb_tensor, 0)
+        origY_tensor = torch.unsqueeze(origY_tensor, 0)
+
+        if self.transform:
+            recY_tensor = self.transform(recY_tensor)
+            predY_tensor = self.transform(predY_tensor)
+            bsY_tensor = self.transform(bsY_tensor)
+            ipb_tensor = self.transform(ipb_tensor)
+        if self.target_transform:
+            origY_tensor = self.target_transform(origY_tensor)
+            
+        return qp_tensor, recY_tensor, predY_tensor, bsY_tensor, origY_tensor, ipb_tensor
\ No newline at end of file
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_3_training_lumaIPB/net.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_3_training_lumaIPB/net.py
new file mode 100644
index 0000000000000000000000000000000000000000..004b5a357a2f30c24962731275ab78a91adbda19
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_3_training_lumaIPB/net.py
@@ -0,0 +1,104 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import torch
+import torch.nn as nn
+
+class conv3x3_f(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size = 3,stride = 1):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=1 )
+        self.relu = nn.PReLU()
+    def forward(self,rec):
+        y = self.relu(self.conv(rec))
+        return y
+
+class conv1x1_f(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size = 1):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=0)
+        self.relu = nn.PReLU()
+    def forward(self,input):
+        y = self.relu(self.conv(input))
+        return y
+    
+class ResidualBlock2(nn.Module):    
+    def __init__(self, in_channels, out_channels, no_features, kernel_size = 3):
+        super().__init__()
+        self.conv1 = nn.Conv2d(in_channels, no_features, kernel_size, padding=1)
+        self.relu = nn.PReLU()
+        self.conv2 = nn.Conv2d(no_features, out_channels, kernel_size, padding=1)
+    def forward(self,x):
+        res = self.conv2(self.relu(self.conv1(x)))
+        return res
+
+class SpatialGate(nn.Module):
+    def __init__(self, in_channels, out_channels, no_features, stride = 1, kernel_size = 3):
+        super().__init__()
+        self.conv1 = nn.Conv2d(in_channels, no_features, kernel_size, stride, padding=1)
+        self.relu = nn.PReLU()
+        self.conv2 = nn.Conv2d(no_features, out_channels, kernel_size, stride=1, padding=1)
+    def forward(self,x):
+        y = self.conv2(self.relu(self.conv1(x)))
+        return y  
+
+class ConditionalNet1(nn.Module):
+    def __init__(self, in_channels=1, out_channels=4, no_features=96, kernel_size = 3):
+        super().__init__()
+        self.convRec = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convPred = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convBs = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convIpb = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.convQp = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.fuse = conv1x1_f(5*no_features, no_features)
+        self.transitionH = conv3x3_f(no_features, no_features,stride=2)
+        self.backbone = nn.ModuleList([ResidualBlock2(no_features,no_features,no_features) for i in range(8)])
+        self.last_layer = nn.Sequential(nn.Conv2d(no_features,no_features,kernel_size,padding=1),
+                                        nn.PReLU(),
+                                        nn.Conv2d(no_features,out_channels,kernel_size,padding=1),
+                                        nn.PixelShuffle(2),
+                                       )
+    def forward(self,rec,input,input0,input_ipb,qp):
+        input1 = qp
+        input2 = torch.cat([self.convRec(rec), self.convPred(input),  
+                            self.convBs(input0), self.convIpb(input_ipb), self.convQp(input1)], 1)
+        
+        inputbackbone = self.transitionH(self.fuse(input2 ) )         
+        inputspatial = torch.cat([rec, input, input0, input_ipb, input1], 1) 
+        for backbone in self.backbone:
+            outputbackbone = backbone(inputbackbone)          
+            inputbackbone = inputbackbone + outputbackbone           
+         
+        x = self.last_layer(inputbackbone) + rec
+        return x 
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_3_training_lumaIPB/train.sh b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_3_training_lumaIPB/train.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0d6690893f43f1749b11f4fd3524f11f8f7907ca
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_3_training_lumaIPB/train.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+python3 train_luma.py --save_ckp_path './ckp/' --loss_function 'L1' --learning_rate 1e-4 --epochs 340 --mse_lr 1e-5 --mse_epochs 311 --batchsize 32 --num_workers 20 --input_json_train db_stage2.json --input_json_valid db_valid.json --tag Luma
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_3_training_lumaIPB/train_luma.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_3_training_lumaIPB/train_luma.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcd8b53b148b8789cc7445899320a74938d63650
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_3_training_lumaIPB/train_luma.py
@@ -0,0 +1,327 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import torch
+import torch.nn as nn
+from torch.utils.data import Dataset
+from torch.utils.data import DataLoader
+import os
+import numpy as np
+import glob
+import argparse
+import sys
+from datetime import datetime
+from time import time
+from net import ConditionalNet1 as ConditionalNet
+import data_loader
+
+# learning policy
+def adjust_learning_rate(optimizer, mse_lr):
+    print('Update learning rate to ', mse_lr)
+    for param_group in optimizer.param_groups:
+        param_group['lr'] = mse_lr
+     
+def train_loop(dataloader, model, loss_fn, optimizer, validation_dataloader, epoch, checkpoint_path):
+    model.train()
+    size = len(dataloader.dataset)
+    stop_time = datetime.now()
+    loss_sum = 0
+    for batch, (QP, recY, predY, bsY, origY, ipbY) in enumerate(dataloader):
+        my_device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+        QP = QP.to(my_device)
+        recY = recY.to(my_device)
+        predY = predY.to(my_device)
+        bsY = bsY.to(my_device)
+        ipbY = ipbY.to(my_device)
+        origY = origY.to(my_device)      
+        
+        # Compute prediction and loss
+        filteredY = model(qp=QP, rec=recY, input=predY, input0=bsY, input_ipb=ipbY)
+
+        # Allow the use of projective loss function
+        oloss = origY.detach()-recY.detach()
+        oloss = oloss.abs().mean([2, 3],True)+0.002
+        # Allow the use of projective loss function
+        r_bar = (0.002+origY-recY).div(oloss)
+        r_hat = (0.002+filteredY-recY).div(oloss)
+        loss = (loss_fn(r_hat, r_bar))
+
+        loss_sum += loss.item()
+
+        # Backpropagation
+        optimizer.zero_grad()
+        loss.backward()
+        optimizer.step()
+
+        if batch % 200 == 0:
+            start_time = stop_time
+            stop_time = datetime.now()
+            duration = stop_time - start_time
+            loss, current = loss.item(), batch * len(recY)
+            ave_loss = loss_sum / (1.0 * (batch+1))
+            print(f"ave loss: {ave_loss:>9e}, last loss: {loss:>7f}  [{current:>5d}/{size:>5d}]", " ", duration.seconds, "seconds since last print out.")
+
+        if (batch % 1000 == 0) and (not (batch==0)):
+            if epoch <= 1:
+                my_loss = test_loop(validation_dataloader, model, loss_fn)
+                filename = 'epoch_%04d_' % (epoch) + 'batch%07d.pt' %(batch)
+                final_checkpoint_path = os.path.join(checkpoint_path, filename)    
+                torch.save({
+                    'epoch': epoch,
+                    'model_state_dict': model.state_dict(),
+                }, final_checkpoint_path)
+            
+def train_loop_MSE(dataloader, model, loss_fn, optimizer, validation_dataloader, epoch, checkpoint_path):
+    model.train()
+    size = len(dataloader.dataset)
+    stop_time = datetime.now()
+    loss_sum = 0
+    for batch, (QP, recY, predY, bsY, origY, ipbY) in enumerate(dataloader):
+        my_device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+        QP2 = QP-5/63
+        QP2 = QP2.to(my_device)        
+        QP = QP.to(my_device)
+        recY = recY.to(my_device)
+        predY = predY.to(my_device)
+        bsY = bsY.to(my_device)
+        origY = origY.to(my_device)        
+        ipbY = ipbY.to(my_device)       
+        
+        # Compute prediction and loss
+        filteredY = model(qp=QP, rec=recY, input=predY, input0=bsY, input_ipb=ipbY)
+        filtered2 = model(qp=QP2,rec=recY, input=predY, input0=bsY, input_ipb=ipbY)  
+        
+        oloss = origY.detach()-recY.detach()
+        oloss = oloss.square().mean([2, 3],True)+0.002        
+        loss_m00 = torch.mean((((origY.detach()-filteredY).square()+0.002)/oloss), (1, 2, 3)) # take the mean of all dimensions except the batch dimension
+        loss_m05 = torch.mean((((origY.detach()-filtered2).square()+0.002)/oloss), (1, 2, 3)) # take the mean of all dimensions except the batch dimension
+
+        # Put each 64-long loss vector from each model into a 64x3 matrix
+        loss_m00_us = torch.unsqueeze(loss_m00, 1)
+        loss_m05_us = torch.unsqueeze(loss_m05, 1)
+        loss_matrix = torch.cat((loss_m00_us, loss_m05_us), 1)
+        
+        # Calculate the smallest error for each sample
+        min_loss_values, min_loss_indices = torch.min(loss_matrix, 1)
+
+        # Calculate the average error over the batch
+        loss = torch.mean(min_loss_values)
+        loss_sum += loss.item()
+
+        # Backpropagation
+        optimizer.zero_grad()
+        loss.backward()
+        optimizer.step()
+
+        if batch % 200 == 0:
+            start_time = stop_time
+            stop_time = datetime.now()
+            duration = stop_time - start_time
+            loss, current = loss.item(), batch * len(recY)
+            ave_loss = loss_sum / (1.0 * (batch+1))
+            print(f"ave loss: {ave_loss:>9e}, last loss: {loss:>7f}  [{current:>5d}/{size:>5d}]", " ", duration.seconds, "seconds since last print out.")
+
+        if (batch % 1000 == 0) and (not (batch==0)):
+            if epoch <= 1:
+                my_loss = test_loop(validation_dataloader, model, loss_fn)
+                filename = 'epoch_%04d_' % (epoch) + 'batch%07d.pt' %(batch)
+                final_checkpoint_path = os.path.join(checkpoint_path, filename)    
+                torch.save({
+                    'epoch': epoch,
+                    'model_state_dict': model.state_dict(),
+                }, final_checkpoint_path)            
+
+def test_loop(dataloader, model, loss_fn):
+    model.eval()
+    size = len(dataloader.dataset)
+    num_batches = len(dataloader)
+    test_loss = 0
+    nofilter_loss = 0
+
+    with torch.no_grad():
+        for QP, recY, predY, bsY, origY, ipbY in dataloader:
+            my_device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+            QP = QP.to(my_device)
+            recY = recY.to(my_device)
+            predY = predY.to(my_device)
+            bsY = bsY.to(my_device)
+            ipbY = ipbY.to(my_device)
+            origY = origY.to(my_device)
+            
+            pred = model(qp=QP, rec=recY, input=predY, input0=bsY, input_ipb=ipbY)
+
+            r_bar = origY - recY
+            r_hat = pred - recY
+            
+            test_loss += loss_fn(r_hat, r_bar).item()
+            nofilter_loss += ((r_bar)**2).mean().item()
+
+    test_loss /= num_batches
+    nofilter_loss /= num_batches
+    print(f"Test Error: \n Avg loss: {test_loss:e}, Nofilter loss: {nofilter_loss:e} \n")
+    return test_loss
+    
+
+
+def train(args):
+    stop_time = datetime.now()       
+
+    # Start by finding out if we have cuda:
+    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+
+    # load training and validation data
+    generate_type = 1    
+    poc_list = args.poc_list
+    components_luma = "org_Y,rec_before_dbf_Y,pred_Y,bs_Y,bpm_Y,qp_slice" 
+    components_chroma = ""     
+    comps_luma = components_luma.split(",")
+    comps_chroma = components_chroma.split(",")
+    
+    train_dl=data_loader.DataLoader1(args.input_json_train,args.patch_size,poc_list,generate_type,comps_luma,comps_chroma) 
+    train_dataloader = DataLoader(train_dl, batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers, pin_memory=True)
+    
+    poc_list_valid = args.poc_list_valid
+    valid_dl=data_loader.DataLoader1(args.input_json_valid,args.patch_size,poc_list_valid,generate_type,comps_luma,comps_chroma) 
+    validation_dataloader = DataLoader(valid_dl, batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers, pin_memory=True)
+    
+    print("Nb train samples (frames) available: {}".format(len(train_dl)))
+    print("Nb valid samples (frames) available: {}".format(len(valid_dl)))
+    print("Available components: {}".format(train_dl.components))    
+    print("Selected luma components: {}".format(comps_luma))
+    print("Selected Chroma components: {}".format(comps_chroma))
+            
+    # Load model
+    model_loaded = ConditionalNet()
+    print('Use self-defined model')
+
+    model_loaded = model_loaded.train()
+
+    if not args.load_ckp_path == "":
+        # load checkpoint
+        checkpoint = torch.load(args.load_ckp_path, map_location=torch.device('cpu'))
+        # torch load checkpoint
+        model_loaded.load_state_dict(checkpoint['model_state_dict'])
+        read_epoch = checkpoint['epoch']
+        # We should start at the read_epoch + 1.
+        start_epoch = read_epoch+1
+        print('The loaded epoch number was', start_epoch-1, 'so we will start training at epoch number', start_epoch)
+        my_loss = checkpoint['loss']
+        # Not sure we need to do this again but better be safe than sorry
+        model_loaded = model_loaded.train()
+    else:
+        # reset weights
+        with torch.no_grad():
+            for name, param in model_loaded.named_parameters():
+                param.data = 0.025*(torch.rand(param.shape)-0.5)
+        start_epoch = 1
+
+    model_loaded = model_loaded.to(device)
+
+    learning_rate = args.learning_rate
+    print('Using learning rate', learning_rate, 'and beta2 = 0.999')
+        
+    optimizer = torch.optim.Adam(model_loaded.parameters(), lr=learning_rate, betas=(0.9, 0.999))
+
+    if args.loss_function == 'L1':
+        loss_fn = nn.L1Loss()
+        print('Using L1 loss function.')
+    elif args.loss_function == 'MSE':
+        loss_fn = nn.MSELoss()
+        print('Using MSE loss function.')
+    else:
+        print("Loss function is not one of 'L1' or 'MSE' , exiting.")
+        exit(1)
+        
+    checkpoint_path = args.save_ckp_path 
+    checkpoint_path = os.path.join(checkpoint_path, args.tag)
+    os.makedirs(checkpoint_path, exist_ok=True)
+
+    print('before training:')
+    my_loss = test_loop(validation_dataloader, model_loaded, loss_fn)
+    for epoch in range(start_epoch, args.epochs+1):
+        print('epoch', epoch)
+        if epoch < args.mse_epochs and args.loss_function == 'L1':
+            train_loop(train_dataloader, model_loaded, loss_fn, optimizer, validation_dataloader, epoch, checkpoint_path)
+        else:      
+            print('Use MSE loss function from epoch', args.mse_epochs )
+            loss_fn = nn.MSELoss()
+            adjust_learning_rate(optimizer, args.mse_lr)
+            train_loop_MSE(train_dataloader, model_loaded, loss_fn, optimizer, validation_dataloader, epoch, checkpoint_path)
+        
+        my_loss = test_loop(validation_dataloader, model_loaded, loss_fn)
+        filename = 'epoch_%04d.pt' % (epoch)
+        final_checkpoint_path = os.path.join(checkpoint_path, filename)    
+        torch.save({
+            'epoch': epoch,
+            'model_state_dict': model_loaded.state_dict(),
+            'optimizer_state_dict': optimizer.state_dict(),
+            'loss': my_loss,
+        }, final_checkpoint_path)
+        
+
+if __name__ == '__main__':
+    """Parses command line arguments."""
+    parser = argparse.ArgumentParser(description='Training script')
+
+    parser.add_argument(
+        "--verbose", "-V", action="store_true",
+        help="Report progress and metrics when training or compressing.")
+        
+    # training/validation data  
+    parser.add_argument("--num_workers", type=int, default=16, help="Number of workers for training data.")  
+    parser.add_argument("--input_json_train", action="store", nargs='?', type=str, help="input training json database.")
+    parser.add_argument("--input_json_valid", action="store", nargs='?', type=str, help="input validation json database.")
+    parser.add_argument('--poc_list', nargs="*", type=int, default=range(0,64,2), help='pocs of frames for training (e.g. --poc_list 1 3 5).')
+    parser.add_argument('--poc_list_valid', nargs="*", type=int, default=[0], help='pocs of frames for validation (e.g. --poc_list 1 3 5).')
+    parser.add_argument("--patch_size", action="store", nargs='?', default=256, type=int, help="patch size to extract")
+    parser.add_argument("--batchsize", type=int, default=32, help="Batch size for training and validation.")
+    
+    # optimizer configuration
+    parser.add_argument("--learning_rate", type=float, default=1e-4, help="learning for training.")
+    parser.add_argument("--loss_function", type=str, default = 'MSE', help="The loss function, 'L1' or 'MSE'.")
+    parser.add_argument("--epochs", type=int, default=400, help="Train up to this number of epochs. ")
+    parser.add_argument("--mse_epochs", type=int, default=300, help="switch to the mse loss and decrease learning rate at mse_epoch. Should be smaller than epoch.")
+    parser.add_argument('--mse_lr', type=float, default=1e-5, help='Learning rate for MSE.')
+        
+    # save and load checkpoint    
+    parser.add_argument("--save_ckp_path", default="./checkpoints/", help="Path to save checkpoint.")
+    parser.add_argument("--load_ckp_path", default="", help="Path to load pretrained checkpoint.")
+    
+    # a unique name for the training job
+    parser.add_argument("--tag", default="_mytest",help="Tag for the current training. Checkpoints are saved under the folder save_ckp_path+tag.")  
+
+    args = parser.parse_args()
+    print(args)
+
+    train(args)
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_4_training_chroma/data_loader.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_4_training_chroma/data_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..8fd2250b6ddc210e324f5334ddd3635480416d3b
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_4_training_chroma/data_loader.py
@@ -0,0 +1,428 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import json
+import math
+import sys
+from typing import NamedTuple
+import numpy as np
+import struct
+import torch
+from torch.utils.data import Dataset
+
+class PatchInfo(NamedTuple):
+    data_index: int
+    frame_index: int
+    patch_x0: int
+    patch_y0: int
+    
+    
+def readData(patch_size,border_size,norm,fn,off,nbb,ctype,h,w,x0,y0):
+    t = np.zeros((patch_size+2*border_size,patch_size+2*border_size),dtype='float32') # implicit zeros padding
+    with open(fn,"rb") as file:
+         cropc = [ y0-border_size, y0+patch_size+border_size, x0-border_size, x0+patch_size+border_size ]
+         srcc = [max(cropc[0], 0), min(cropc[1], h), max(cropc[2], 0), min(cropc[3], w)]
+         dstc = [srcc[0] - cropc[0], srcc[1] - cropc[0], srcc[2] - cropc[2], srcc[3] - cropc[2]]
+         data_h = srcc[1] - srcc[0]
+         off += srcc[0]*w*nbb
+         data = np.fromfile(file,dtype=ctype,count=data_h*w,offset=off).reshape(data_h,w)
+         data = data[:, srcc[2]:srcc[3]]
+         t[dstc[0]:dstc[1], dstc[2]:dstc[3]] = data
+         return t.astype('float32')/norm
+         
+def readDataBpm(patch_size,border_size,norm,fn,off,nbb,ctype,h,w,x0,y0):
+    t = np.zeros((patch_size+2*border_size,patch_size+2*border_size),dtype='float32') # implicit zeros padding
+    with open(fn,"rb") as file:
+         cropc = [ y0-border_size, y0+patch_size+border_size, x0-border_size, x0+patch_size+border_size ]
+         srcc = [max(cropc[0], 0), min(cropc[1], h), max(cropc[2], 0), min(cropc[3], w)]
+         dstc = [srcc[0] - cropc[0], srcc[1] - cropc[0], srcc[2] - cropc[2], srcc[3] - cropc[2]]
+         data_h = srcc[1] - srcc[0]
+         off += srcc[0]*w*nbb
+         data = np.fromfile(file,dtype=ctype,count=data_h*w,offset=off).reshape(data_h,w)
+         data = data[:, srcc[2]:srcc[3]]
+         # First right shift to make it Ipred=0, IBC=1, uni-pred=2, bi-pred=3
+         ipb_crop2 = np.right_shift(data, 1)
+         # Next add 1 everywhere we have 0 to make it Ipred/IBC = 1, uni-pred=2, bi-pred=3
+         ipb_crop2 = (ipb_crop2==0) + ipb_crop2
+         # Now subtract 1 and divide by 2 to make it Ipred/IBC=0, uni-pred=0.5, bi-pred=1
+         ipb_crop2 = np.float32(ipb_crop2-1)/2.0         
+         t[dstc[0]:dstc[1], dstc[2]:dstc[3]] = ipb_crop2
+         return t.astype('float32')
+         
+def readOne(patch_size,border_size,norm,fn,off,ctype):
+    with open(fn,"rb") as file:
+         if ctype == 'int32':
+             file.seek(off)
+             v = float(struct.unpack("i",file.read(4))[0])/norm
+         else:
+             sys.exit("readOne todo")
+         t = np.full((patch_size+2*border_size,patch_size+2*border_size),v,dtype='float32') 
+         return t
+    
+def getTypeComp(comp):
+    is_y_extracted = False
+    is_u_v_extracted = False
+    for comp_i in comp:
+        if '_Y' in comp_i:
+            is_y_extracted = True
+        if '_U' in comp_i or '_V' in comp_i:
+            is_u_v_extracted = True
+    if is_y_extracted and is_u_v_extracted:
+        return None
+    return is_u_v_extracted
+
+
+class DataLoader1(Dataset):
+    components=[]
+    database=None # contains the whole database
+    patch_info=None # contains address of each patch in the database: dataset index, frame index in the dataset, patch_index in the frame
+    suffix={} # suffix for each file
+    
+    # patch_size in luma sample
+    def __init__(self, jsonfile, patch_size, poc_list, generate_type, comps_luma,comps_chroma, qp_filter=-1, slice_type_filter=-1,transform=None, target_transform=None):
+        self.generate_type=generate_type    
+        self.comps_luma=comps_luma    
+        self.comps_chroma=comps_chroma                
+        self.transform = transform
+        self.target_transform = target_transform
+
+        if self.generate_type == 0:
+            self.normalizer_rec  = 1023.0
+            self.normalizer_pred = 1023.0
+            self.normalizer_bs   = 1023.0
+            self.normalizer_cu_average = 1023.0
+            self.normalizer_org8bits = 255.0
+            self.normalizer_org10bits = 1023.0
+            self.normalizer_qp   = 1023.0
+        else:
+            self.normalizer_rec  = 1024.0
+            self.normalizer_pred = 1024.0
+            self.normalizer_bs   = 1024.0
+            self.normalizer_cu_average = 1024.0
+            self.normalizer_org8bits = 256.0
+            self.normalizer_org10bits = 1024.0
+            self.normalizer_qp   = 64.0
+            self.normalizer_bpm = 1
+        self.patch_size=patch_size
+        self.patch_info=[]
+        with open(jsonfile, "r") as file:
+         content = file.read()
+         dcontent = json.loads(content)
+         if qp_filter>0 and 'suffix_qp' not in dcontent:
+             sys.exit("Filtering on qp impossible: no qp data in the dataset")
+         if slice_type_filter>0 and 'suffix_slicetype' not in dcontent:
+             sys.exit("Filtering on slice type impossible: no slice data in the dataset")
+         if qp_filter>0 or slice_type_filter>0:
+             sys.exit("todo")
+         self.components.append("org_Y")
+         self.components.append("org_U")
+         self.components.append("org_V")
+         if  'suffix_rec_after_dbf' in dcontent: 
+             self.suffix['rec_after_dbf']=dcontent['suffix_rec_after_dbf']
+             self.components.append("rec_after_dbf_Y")
+             self.components.append("rec_after_dbf_U")
+             self.components.append("rec_after_dbf_V")
+         if  'suffix_rec_before_dbf' in dcontent: 
+             self.suffix['rec_before_dbf']=dcontent['suffix_rec_before_dbf']
+             self.components.append("rec_before_dbf_Y")
+             self.components.append("rec_before_dbf_U")
+             self.components.append("rec_before_dbf_V")
+         if  'suffix_pred' in dcontent: 
+             self.suffix['pred']=dcontent['suffix_pred']
+             self.components.append("pred_Y")
+             self.components.append("pred_U")
+             self.components.append("pred_V")
+         if  'suffix_bs' in dcontent: 
+             self.suffix['bs']=dcontent['suffix_bs']             
+             self.components.append("bs_Y")
+             self.components.append("bs_U")
+             self.components.append("bs_V")
+         if  'suffix_partition_cu_average' in dcontent: 
+             self.suffix['partition_cu_average']=dcontent['suffix_partition_cu_average']    
+             self.components.append("partition_cu_average_Y")
+             self.components.append("partition_cu_average_U")
+             self.components.append("partition_cu_average_V")
+         if  'suffix_qp' in dcontent: 
+             self.components.append("qp_slice")
+             self.suffix['qp_slice']=dcontent['suffix_qp']    
+         self.components.append("qp_base") # always here
+         if  'suffix_slicetype' in dcontent: 
+             self.components.append("slice_type")
+             self.suffix['slice_type']=dcontent['suffix_slicetype']   
+         if  'suffix_bpm' in dcontent: 
+             self.suffix['bpm']=dcontent['suffix_bpm']
+             self.components.append("bpm_Y")
+             self.components.append("bpm_U")
+             self.components.append("bpm_V")
+             
+         self.database=dcontent['data']
+         # create array of patches adress
+        
+        if self.generate_type == 0:
+            psize = self.patch_size
+            for didx in range(len(self.database)):
+                 d=self.database[didx]
+                 w = int(d['width'])
+                 h = int(d['height'])
+                 w -= w % psize
+                 h -= h % psize
+                 nb_w=int(w//psize - 2)
+                 nb_h=int(h//psize - 2)
+                 
+                 id_ra = '_T2RA_'
+                 ra_flag = True if id_ra in d['bsname'] else False
+                 for fidx in range(int(d['data_count'])):
+                    if ra_flag and (fidx == 0 or fidx == 32 or fidx == 64):
+                        continue
+                    for y0 in range(nb_h):
+                        for x0 in range(nb_w):
+                            self.patch_info.append(PatchInfo(didx,fidx,1+x0,1+y0))
+        else:
+            for didx in range(len(self.database)):                
+                d=self.database[didx]
+                frames = range(int(d['data_count'])) if not poc_list else poc_list
+                if 'DIV2K' in d['dirname']:    frames = [0]
+                for fidx in frames:
+                    if 'ra_' in d['dirname']: 
+                        if (d['data_count']== 64 and (fidx == 0 or fidx == 32)) or \
+                        (d['data_count']== 65 and (fidx == 0 or fidx == 64)): 
+                            continue # do not include AI frame in RA data
+                    if fidx >= d['data_count']:
+                        sys.exit("exceed max number of frames ({})".format(d['data_count']))
+                    
+                    # save frame info
+                    self.patch_info.append(PatchInfo(didx,fidx,0,0))
+                    
+                     
+    def __len__(self):
+         return len(self.patch_info)     
+
+    def getPatchData(self,index,comp,x0,y0,border_size=0):
+        assert(index<len(self.patch_info))
+        pinfo=self.patch_info[index]
+        d=self.database[pinfo.data_index]
+        psize=self.patch_size
+        bsize=border_size
+        #print(pinfo,d)
+        chroma_block=getTypeComp(comp)
+        if chroma_block is None:
+            raise AssertionError('The second argument of getPatchData contains strings ending with \'_Y\' and strings ending with \'_U\' or \'_V\', which is not allowed.')
+        w=int(d['width'])
+        h=int(d['height'])
+        frame_size_Y=w*h
+        if chroma_block:
+            psize//=2
+            bsize//=2
+            w//=2
+            h//=2
+        tsize=bsize+psize+bsize        
+        
+        t = np.zeros((1,tsize,tsize,len(comp)),dtype='float32')
+        
+        for idx, c in enumerate(comp):
+            assert(c in self.components)
+                           
+            if 'org' in c:
+                fn=d['original_yuv']
+                off_frame=d['original_frame_skip']+pinfo.frame_index
+                if d['original_bitdepth'] == 8: # 8bits
+                    norm=self.normalizer_org8bits
+                    b='uint8' 
+                    nbb = 1
+                else: # 10bits
+                    norm=self.normalizer_org10bits
+                    b='uint16'                
+                    nbb = 2
+                off = off_frame*(frame_size_Y*nbb*3//2)
+                if c == 'org_U': 
+                    off+=frame_size_Y*nbb                  
+                elif c == 'org_V': 
+                    off+=frame_size_Y*nbb+(frame_size_Y*nbb)//4
+                v = readData(psize,bsize,norm,fn,off,nbb,b,h,w,x0,y0)
+                
+            elif 'rec_after_dbf' in c or 'rec_before_dbf' in c or 'pred' in c or 'partition_cu_average' in c or 'bs' in c or 'bpm' in c:
+                fn=d['dirname']+'/'+d['basename']+self.suffix[c[:-2]]
+                nbb=2 # 16 bits data
+                off=pinfo.frame_index*(frame_size_Y*nbb*3//2)
+                if '_U' in c: 
+                    off+=frame_size_Y*nbb
+                elif '_V' in c: 
+                    off+=frame_size_Y*nbb+(frame_size_Y*nbb)//4
+                if   'rec_after_dbf' in c or 'rec_before_dbf' in c: norm = self.normalizer_rec
+                elif 'pred' in c :          norm = self.normalizer_pred
+                elif 'bs' in c :            norm = self.normalizer_bs
+                elif 'partition_cu_average' in c :     norm = self.normalizer_cu_average
+                elif 'bpm' in c:            norm = self.normalizer_bpm
+                
+                if 'bpm' in c:
+                    # If the data is all-intra coded, v is all-zeros. There is no need to extract IPB data.
+                    # If it's not possible to tell which data is AI coded, you can dump IPB data and read the yuv 
+                    if 'ai_' in d['dirname']: #assume AI data directory has a unique string 'ai_'
+                        v = np.float32(np.zeros((tsize,tsize)))
+                    else:
+                        v = readDataBpm(psize,bsize,norm,fn,off,nbb,'uint16',h,w,x0,y0)
+                    
+                else:
+                    v = readData(psize,bsize,norm,fn,off,nbb,'uint16',h,w,x0,y0)
+                
+            elif c == 'qp_slice':
+                fn=d['dirname']+'/'+d['basename']+self.suffix['qp_slice']
+                norm=self.normalizer_qp
+                off=pinfo.frame_index*4
+                v = readOne(psize,bsize,norm,fn,off,'int32')
+
+            elif c == 'qp_base':
+                norm=self.normalizer_qp
+                f = float(d['qp_base'])/norm                
+                v = np.full((tsize,tsize),f,dtype='float32')                 
+            elif c == 'slice_type':
+                fn=d['dirname']+'/'+d['basename']+self.suffix['slice_type']
+                norm=1
+                off=pinfo.frame_index*4
+                v = readOne(psize,bsize,norm,fn,off,'int32')   
+            else:
+                 sys.exit("Unkwown component {}".format(c))
+            t[0,:,:,idx]=v            
+        return t    
+        
+    def __getitem__(self, index):   
+        # index is the frame index
+        # Length of self.patch_info is the number of total frames, so each frame is sampled equally.
+        # So patches are sampled evenlly across different classes.    
+        pinfo=self.patch_info[index]
+        d=self.database[pinfo.data_index]
+        psize=self.patch_size
+        #print(d)
+        
+        # Get a random patch position sampled at every 4th pixel
+        # This ensures that the cropped part lines up with deblocking boundaries.
+        w=int(d['width'])
+        h=int(d['height'])
+        yrand = int(np.random.rand(1)*(h-psize+1))
+        xrand = int(np.random.rand(1)*(w-psize+1))
+        yrand = yrand // 4
+        xrand = xrand // 4
+        yrand = yrand * 4
+        xrand = xrand * 4
+        
+        # Components used in the chroma model
+        # "rec_before_dbf_Y" 
+        # "org_U,org_V,rec_before_dbf_U,rec_before_dbf_V,pred_U,pred_V,bs_U,bs_V,qp_slice" 
+        
+        t_luma = self.getPatchData(index,self.comps_luma, xrand, yrand)
+        t_chroma = self.getPatchData(index,self.comps_chroma, xrand//2, yrand//2)
+        
+        recY_tensor =  torch.tensor(t_luma[0,:,:,0])                  
+        origU_tensor = torch.tensor(t_chroma[0,:,:,0])
+        origV_tensor = torch.tensor(t_chroma[0,:,:,1])            
+        recU_tensor = torch.tensor(t_chroma[0,:,:,2])
+        recV_tensor = torch.tensor(t_chroma[0,:,:,3])
+        predU_tensor = torch.tensor(t_chroma[0,:,:,4])
+        predV_tensor = torch.tensor(t_chroma[0,:,:,5])
+        bsU_tensor = torch.tensor(t_chroma[0,:,:,6])            
+        bsV_tensor = torch.tensor(t_chroma[0,:,:,7])
+        qp_tensor = torch.unsqueeze(torch.tensor(t_chroma[0,:,:,8]),0)
+            
+        # Transpose image randomly.
+        if np.random.rand(1) < 0.5 :
+            recY_tensor =  torch.transpose(recY_tensor, 0, 1)
+            
+            recU_tensor =  torch.transpose(recU_tensor, 0, 1)
+            predU_tensor = torch.transpose(predU_tensor,0, 1)
+            bsU_tensor =   torch.transpose(bsU_tensor,  0, 1)
+            origU_tensor = torch.transpose(origU_tensor, 0, 1)
+            
+            recV_tensor =  torch.transpose(recV_tensor, 0, 1)
+            predV_tensor = torch.transpose(predV_tensor,0, 1)
+            bsV_tensor =   torch.transpose(bsV_tensor,  0, 1)
+            origV_tensor = torch.transpose(origV_tensor, 0, 1)
+
+        # Flipping in x-direction randomly.
+        if np.random.rand(1) < 0.5 :
+            recY_tensor =  torch.flip(recY_tensor, [1])
+            
+            recU_tensor =  torch.flip(recU_tensor, [1])
+            predU_tensor = torch.flip(predU_tensor,[1])
+            bsU_tensor =   torch.flip(bsU_tensor,  [1])
+            origU_tensor = torch.flip(origU_tensor, [1])
+            
+            recV_tensor =  torch.flip(recV_tensor, [1])
+            predV_tensor = torch.flip(predV_tensor,[1])
+            bsV_tensor =   torch.flip(bsV_tensor,  [1])
+            origV_tensor = torch.flip(origV_tensor, [1])
+
+        # Flipping in y-direction randomly.
+        if np.random.rand(1) < 0.5 :
+            recY_tensor =  torch.flip(recY_tensor, [0])
+            
+            recU_tensor =  torch.flip(recU_tensor, [0])
+            predU_tensor = torch.flip(predU_tensor,[0])
+            bsU_tensor =   torch.flip(bsU_tensor,  [0])
+            origU_tensor = torch.flip(origU_tensor, [0])
+            
+            recV_tensor =  torch.flip(recV_tensor, [0])
+            predV_tensor = torch.flip(predV_tensor,[0])
+            bsV_tensor =   torch.flip(bsV_tensor,  [0])
+            origV_tensor = torch.flip(origV_tensor, [0])
+        
+        recY_tensor =  torch.unsqueeze(recY_tensor, 0)
+
+        recU_tensor = torch.unsqueeze(recU_tensor, 0)
+        predU_tensor = torch.unsqueeze(predU_tensor, 0)
+        bsU_tensor = torch.unsqueeze(bsU_tensor, 0)
+        origU_tensor = torch.unsqueeze(origU_tensor, 0)
+        
+        recV_tensor = torch.unsqueeze(recV_tensor, 0)
+        predV_tensor = torch.unsqueeze(predV_tensor, 0)
+        bsV_tensor = torch.unsqueeze(bsV_tensor, 0)        
+        origV_tensor = torch.unsqueeze(origV_tensor, 0)
+        
+        recUV_tensor = torch.cat((recU_tensor,recV_tensor),0)
+        predUV_tensor = torch.cat((predU_tensor,predV_tensor),0)
+        bsUV_tensor = torch.cat((bsU_tensor,bsV_tensor),0)
+        origUV_tensor = torch.cat((origU_tensor,origV_tensor),0)      
+        
+        if self.transform:
+            recY_tensor = self.transform(recY_tensor)
+            predY_tensor = self.transform(predY_tensor)
+            bsY_tensor = self.transform(bsY_tensor)
+            
+            recUV_tensor = self.transform(recUV_tensor)
+            predUV_tensor = self.transform(predUV_tensor)
+            bsUV_tensor = self.transform(bsUV_tensor)
+        if self.target_transform:
+            origY_tensor = self.target_transform(origY_tensor)            
+            origUV_tensor = self.target_transform(origUV_tensor)
+            
+        return qp_tensor, recY_tensor, recUV_tensor, predUV_tensor, bsUV_tensor, origUV_tensor
\ No newline at end of file
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_4_training_chroma/net.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_4_training_chroma/net.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb439a227b5473c3fa331bb9f487432427b56e81
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_4_training_chroma/net.py
@@ -0,0 +1,95 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import torch
+import torch.nn as nn
+
+class conv3x3_f(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size = 3,stride = 1):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=1 )
+        self.relu = nn.PReLU()
+    def forward(self,rec):
+        y = self.relu(self.conv(rec))
+        return y
+
+class conv1x1_f(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size = 1):
+        super().__init__()
+        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=0)
+        self.relu = nn.PReLU()
+    def forward(self,input):
+        y = self.relu(self.conv(input))
+        return y
+    
+class ResidualBlock(nn.Module):    
+    def __init__(self, in_channels, out_channels, no_features, kernel_size = 3):
+        super().__init__()
+        self.conv1 = nn.Conv2d(in_channels, no_features, kernel_size, padding=1)
+        self.relu = nn.PReLU()
+        self.conv2 = nn.Conv2d(no_features, out_channels, kernel_size, padding=1)
+    def forward(self,x):
+        res = self.conv2(self.relu(self.conv1(x)))
+        out = res + x
+        return out
+
+class ConditionalNet0(nn.Module):
+    def __init__(self, in_channels=1, out_channels=4, no_features=96, kernel_size = 3):
+        super().__init__()
+        self.convLuma = conv3x3_f(1,no_features,kernel_size,stride=2)
+        self.convRec = conv3x3_f(2,no_features,kernel_size,stride=1)
+        self.convPred = conv3x3_f(2,no_features,kernel_size,stride=1)
+        self.convBs = conv3x3_f(2,no_features,kernel_size,stride=1)
+        self.convQp = conv3x3_f(1,no_features,kernel_size,stride=1)
+        self.fuse = conv1x1_f(96*5, no_features)
+        self.transitionH = conv3x3_f(no_features, no_features,stride=2)
+        self.backbone = nn.ModuleList([ResidualBlock(no_features,no_features,no_features) for i in range(8)])
+        self.last_layer = nn.Sequential(nn.Conv2d(no_features,no_features,kernel_size,padding=1),
+                                        nn.PReLU(),
+                                        nn.Conv2d(no_features,out_channels*2,kernel_size,padding=1),
+                                        nn.PixelShuffle(2),
+                                       )
+    def forward(self,input,rec,input0,input1,qp):
+        input2 = qp
+        input3 = torch.cat([self.convLuma(input),self.convRec(rec), self.convPred(input0), 
+                            self.convBs(input1), self.convQp(input2)], 1) 
+        
+        inputbackbone = self.transitionH(self.fuse(input3 ) ) 
+        
+        for i,backbone in enumerate(self.backbone):
+            outputbackbone = backbone(inputbackbone) 
+            inputbackbone = outputbackbone 
+         
+        x = self.last_layer(inputbackbone) + rec
+        return x
\ No newline at end of file
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_4_training_chroma/train.sh b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_4_training_chroma/train.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9837745cb7e9d58137544030a0e8e79a8c5d8514
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_4_training_chroma/train.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+python3 train_chroma.py --save_ckp_path './ckp/' --loss_function 'L1' --learning_rate 1e-4 --epochs 400 --mse_lr 1e-5 --mse_epochs 301 --batchsize 32 --num_workers 20 --input_json_train db_stage2.json --input_json_valid db_valid.json --tag Chroma
diff --git a/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_4_training_chroma/train_chroma.py b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_4_training_chroma/train_chroma.py
new file mode 100644
index 0000000000000000000000000000000000000000..11ef1e86d0260a3fb600ab0f2353e16a59c88a3a
--- /dev/null
+++ b/training/training_scripts/Nn_Filtering_Set_1/Scripts/CombinedIntraInter/stage2_4_training_chroma/train_chroma.py
@@ -0,0 +1,331 @@
+"""
+/* The copyright in this software is being made available under the BSD
+* License, included below. This software may be subject to other third party
+* and contributor rights, including patent rights, and no such rights are
+* granted under this license.
+*
+* Copyright (c) 2010-2022, ITU/ISO/IEC
+* All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are met:
+*
+*  * Redistributions of source code must retain the above copyright notice,
+*    this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above copyright notice,
+*    this list of conditions and the following disclaimer in the documentation
+*    and/or other materials provided with the distribution.
+*  * Neither the name of the ITU/ISO/IEC nor the names of its contributors may
+*    be used to endorse or promote products derived from this software without
+*    specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+* THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import torch
+import torch.nn as nn
+from torch.utils.data import Dataset
+from torch.utils.data import DataLoader
+import os
+import numpy as np
+import glob
+import argparse
+import sys
+from datetime import datetime
+from time import time
+from net import ConditionalNet0 as ConditionalNet
+import data_loader
+
+# learning policy
+def adjust_learning_rate(optimizer, mse_lr):
+    print('Update learning rate to ', mse_lr)
+    for param_group in optimizer.param_groups:
+        param_group['lr'] = mse_lr
+     
+def train_loop(dataloader, model, loss_fn, optimizer, validation_dataloader, epoch, checkpoint_path):
+    model.train()
+    size = len(dataloader.dataset)
+    stop_time = datetime.now()
+    loss_sum = 0
+    for batch, (QP, recY, recUV, predUV, bsUV, origUV) in enumerate(dataloader):
+        my_device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+        QP = QP.to(my_device)
+        recY = recY.to(my_device)
+        recUV = recUV.to(my_device)
+        predUV = predUV.to(my_device)
+        bsUV = bsUV.to(my_device)
+        origUV = origUV.to(my_device)        
+        
+        # Compute prediction and loss
+        filteredUV = model(input=recY, rec=recUV, input0=predUV, input1=bsUV, qp=QP) 
+
+        # Allow the use of projective loss function
+        oloss = origUV.detach()-recUV.detach()
+        oloss = oloss.abs().mean([2, 3],True)+0.002
+        # Allow the use of projective loss function
+        r_bar = (0.002+origUV-recUV).div(oloss)
+        r_hat = (0.002+filteredUV-recUV).div(oloss)
+        loss = (loss_fn(r_hat, r_bar))
+
+        loss_sum += loss.item()
+
+        # Backpropagation
+        optimizer.zero_grad()
+        loss.backward()
+        optimizer.step()
+
+        if batch % 200 == 0:
+            start_time = stop_time
+            stop_time = datetime.now()
+            duration = stop_time - start_time
+            loss, current = loss.item(), batch * len(recUV)
+            ave_loss = loss_sum / (1.0 * (batch+1))
+            print(f"ave loss: {ave_loss:>9e}, last loss: {loss:>7f}  [{current:>5d}/{size:>5d}]", " ", duration.seconds, "seconds since last print out.")
+
+        if (batch % 1000 == 0) and (not (batch==0)):
+            if epoch <= 1:
+                my_loss = test_loop(validation_dataloader, model, loss_fn)
+                filename = 'epoch_%04d_' % (epoch) + 'batch%07d.pt' %(batch)
+                final_checkpoint_path = os.path.join(checkpoint_path, filename)    
+                torch.save({
+                    'epoch': epoch,
+                    'model_state_dict': model.state_dict(),
+                }, final_checkpoint_path)
+            
+def train_loop_MSE(dataloader, model, loss_fn, optimizer, validation_dataloader, epoch, checkpoint_path):
+    model.train()
+    size = len(dataloader.dataset)
+    stop_time = datetime.now()
+    loss_sum = 0
+    for batch, (QP, recY, recUV, predUV, bsUV, origUV) in enumerate(dataloader):
+        my_device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+        QP2 = QP-5/63
+        QP2 = QP2.to(my_device)  
+        QP = QP.to(my_device)
+        recY = recY.to(my_device)
+        recUV = recUV.to(my_device)
+        predUV = predUV.to(my_device)
+        bsUV = bsUV.to(my_device)
+        origUV = origUV.to(my_device)        
+        
+        # Compute prediction and loss
+        filteredUV = model(input=recY, rec=recUV, input0=predUV, input1=bsUV, qp=QP)  
+        filteredUV2 = model(input=recY, rec=recUV, input0=predUV, input1=bsUV, qp=QP2)  
+        
+        # Allow the use of projective loss function
+        oloss = origUV.detach()-recUV.detach()
+        oloss = oloss.abs().mean([2, 3],True)+0.002
+        
+        loss_m00 = torch.mean((((origUV.detach()-filteredUV).square()+0.002)/oloss), (1, 2, 3)) # take the mean of all dimensions except the batch dimension
+        loss_m05 = torch.mean((((origUV.detach()-filteredUV2).square()+0.002)/oloss), (1, 2, 3)) # take the mean of all dimensions except the batch dimension
+
+        # Put each 64-long loss vector from each model into a 64x3 matrix
+        loss_m00_us = torch.unsqueeze(loss_m00, 1)
+        loss_m05_us = torch.unsqueeze(loss_m05, 1)
+        loss_matrix = torch.cat((loss_m00_us, loss_m05_us), 1)
+        
+        # Calculate the smallest error for each sample
+        min_loss_values, min_loss_indices = torch.min(loss_matrix, 1)
+
+        # Calculate the average error over the batch
+        loss = torch.mean(min_loss_values) 
+        loss_sum += loss.item()
+
+        # Backpropagation
+        optimizer.zero_grad()
+        loss.backward()
+        optimizer.step()
+
+        if batch % 200 == 0:
+            start_time = stop_time
+            stop_time = datetime.now()
+            duration = stop_time - start_time
+            loss, current = loss.item(), batch * len(recUV)
+            ave_loss = loss_sum / (1.0 * (batch+1))
+            print(f"ave loss: {ave_loss:>9e}, last loss: {loss:>7f}  [{current:>5d}/{size:>5d}]", " ", duration.seconds, "seconds since last print out.")
+
+        if (batch % 1000 == 0) and (not (batch==0)):
+            if epoch <= 1:
+                my_loss = test_loop(validation_dataloader, model, loss_fn)
+                filename = 'epoch_%04d_' % (epoch) + 'batch%07d.pt' %(batch)
+                final_checkpoint_path = os.path.join(checkpoint_path, filename)    
+                torch.save({
+                    'epoch': epoch,
+                    'model_state_dict': model.state_dict(),
+                }, final_checkpoint_path)
+            
+
+def test_loop(dataloader, model, loss_fn):
+    model.eval()
+    size = len(dataloader.dataset)
+    num_batches = len(dataloader)
+    test_loss = 0
+    nofilter_loss = 0
+
+    with torch.no_grad():
+        for QP, recY, recUV, predUV, bsUV, origUV in dataloader:
+            my_device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+            QP = QP.to(my_device)
+            recY = recY.to(my_device)
+            recUV = recUV.to(my_device)
+            predUV = predUV.to(my_device)
+            bsUV = bsUV.to(my_device)
+            origUV = origUV.to(my_device)
+            
+            # Compute prediction and loss
+            filteredUV = model(input=recY, rec=recUV, input0=predUV, input1=bsUV, qp=QP)  
+
+            r_bar = origUV - recUV
+            r_hat = predUV - recUV
+            
+            test_loss += loss_fn(r_hat, r_bar).item()
+            nofilter_loss += ((r_bar)**2).mean().item()
+
+    test_loss /= num_batches
+    nofilter_loss /= num_batches
+    print(f"Test Error: \n Avg loss: {test_loss:e}, Nofilter loss: {nofilter_loss:e} \n")
+    return test_loss
+    
+
+
+def train(args):
+    stop_time = datetime.now()       
+
+    # Start by finding out if we have cuda:
+    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+
+    # load training and validation data
+    generate_type = 1    
+    poc_list = args.poc_list
+    components_luma = "rec_before_dbf_Y" 
+    components_chroma = "org_U,org_V,rec_before_dbf_U,rec_before_dbf_V,pred_U,pred_V,bs_U,bs_V,qp_slice"     
+    comps_luma = components_luma.split(",")
+    comps_chroma = components_chroma.split(",")
+    
+    train_dl=data_loader.DataLoader1(args.input_json_train,args.patch_size,poc_list,generate_type,comps_luma,comps_chroma) 
+    train_dataloader = DataLoader(train_dl, batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers, pin_memory=True)
+    
+    poc_list_valid = args.poc_list_valid
+    valid_dl=data_loader.DataLoader1(args.input_json_valid,args.patch_size,poc_list_valid,generate_type,comps_luma,comps_chroma) 
+    validation_dataloader = DataLoader(valid_dl, batch_size=args.batchsize, shuffle=True, num_workers=args.num_workers, pin_memory=True)
+    
+    print("Nb train samples (frames) available: {}".format(len(train_dl)))
+    print("Nb valid samples (frames) available: {}".format(len(valid_dl)))
+    print("Available components: {}".format(train_dl.components))     
+    print("Selected luma components: {}".format(comps_luma))
+    print("Selected Chroma components: {}".format(comps_chroma))  
+            
+    # Load model
+    model_loaded = ConditionalNet()
+    print('Use self-defined model')
+
+    model_loaded = model_loaded.train()
+
+    if not args.load_ckp_path == "":
+        # load checkpoint
+        checkpoint = torch.load(args.load_ckp_path, map_location=torch.device('cpu'))
+        # torch load checkpoint
+        model_loaded.load_state_dict(checkpoint['model_state_dict'])
+        read_epoch = checkpoint['epoch']
+        # We should start at the read_epoch + 1.
+        start_epoch = read_epoch+1
+        print('The loaded epoch number was', start_epoch-1, 'so we will start training at epoch number', start_epoch)
+        my_loss = checkpoint['loss']
+        # Not sure we need to do this again but better be safe than sorry
+        model_loaded = model_loaded.train()
+    else:
+        # reset weights
+        with torch.no_grad():
+            for name, param in model_loaded.named_parameters():
+                param.data = 0.025*(torch.rand(param.shape)-0.5)
+        start_epoch = 1
+
+    model_loaded = model_loaded.to(device)
+
+    learning_rate = args.learning_rate
+    print('Using learning rate', learning_rate, 'and beta2 = 0.999')
+        
+    optimizer = torch.optim.Adam(model_loaded.parameters(), lr=learning_rate, betas=(0.9, 0.999))
+
+    if args.loss_function == 'L1':
+        loss_fn = nn.L1Loss()
+        print('Using L1 loss function.')
+    elif args.loss_function == 'MSE':
+        loss_fn = nn.MSELoss()
+        print('Using MSE loss function.')
+    else:
+        print("Loss function is not one of 'L1' or 'MSE' , exiting.")
+        exit(1)
+        
+    checkpoint_path = args.save_ckp_path 
+    checkpoint_path = os.path.join(checkpoint_path, args.tag)
+    os.makedirs(checkpoint_path, exist_ok=True)
+
+    print('before training:')
+    my_loss = test_loop(validation_dataloader, model_loaded, loss_fn)
+    for epoch in range(start_epoch, args.epochs+1):
+        print('epoch', epoch)
+        if epoch < args.mse_epochs and args.loss_function == 'L1':
+            train_loop(train_dataloader, model_loaded, loss_fn, optimizer, validation_dataloader, epoch, checkpoint_path)
+        else:      
+            print('Use MSE loss function from epoch', args.mse_epochs )
+            loss_fn = nn.MSELoss()
+            adjust_learning_rate(optimizer, args.mse_lr)
+            train_loop_MSE(train_dataloader, model_loaded, loss_fn, optimizer, validation_dataloader, epoch, checkpoint_path)
+        
+        my_loss = test_loop(validation_dataloader, model_loaded, loss_fn)
+        filename = 'epoch_%04d.pt' % (epoch)
+        final_checkpoint_path = os.path.join(checkpoint_path, filename)    
+        torch.save({
+            'epoch': epoch,
+            'model_state_dict': model_loaded.state_dict(),
+            'optimizer_state_dict': optimizer.state_dict(),
+            'loss': my_loss,
+        }, final_checkpoint_path)
+        
+
+if __name__ == '__main__':
+    """Parses command line arguments."""
+    parser = argparse.ArgumentParser(description='Training script')
+
+    parser.add_argument(
+        "--verbose", "-V", action="store_true",
+        help="Report progress and metrics when training or compressing.")
+        
+    # training/validation data  
+    parser.add_argument("--num_workers", type=int, default=16, help="Number of workers for training data.")  
+    parser.add_argument("--input_json_train", action="store", nargs='?', type=str, help="input training json database.")
+    parser.add_argument("--input_json_valid", action="store", nargs='?', type=str, help="input validation json database.")
+    parser.add_argument('--poc_list', nargs="*", type=int, default=range(0,64,2), help='pocs of frames for training (e.g. --poc_list 1 3 5).')
+    parser.add_argument('--poc_list_valid', nargs="*", type=int, default=[0], help='pocs of frames for validation (e.g. --poc_list 1 3 5).')
+    parser.add_argument("--patch_size", action="store", nargs='?', default=256, type=int, help="patch size to extract")
+    parser.add_argument("--batchsize", type=int, default=32, help="Batch size for training and validation.")
+    
+    # optimizer configuration
+    parser.add_argument("--learning_rate", type=float, default=1e-4, help="learning for training.")
+    parser.add_argument("--loss_function", type=str, default = 'MSE', help="The loss function, 'L1' or 'MSE'.")
+    parser.add_argument("--epochs", type=int, default=400, help="Train up to this number of epochs. ")
+    parser.add_argument("--mse_epochs", type=int, default=300, help="switch to the mse loss and decrease learning rate at mse_epoch. Should be smaller than epoch.")
+    parser.add_argument('--mse_lr', type=float, default=1e-5, help='Learning rate for MSE.')
+        
+    # save and load checkpoint    
+    parser.add_argument("--save_ckp_path", default="./checkpoints/", help="Path to save checkpoint.")
+    parser.add_argument("--load_ckp_path", default="", help="Path to load pretrained checkpoint.")
+    
+    # a unique name for the training job
+    parser.add_argument("--tag", default="_mytest",help="Tag for the current training. Checkpoints are saved under the folder save_ckp_path+tag.")  
+
+    args = parser.parse_args()
+    print(args)
+
+    train(args)