diff --git a/training/training_scripts/NN_Filtering_HOP/config.json b/training/training_scripts/NN_Filtering_HOP/config.json index 3f012ee0bce7a69606f863befc7cea44884bda3c..04540633ce59c62cf18057645fbbd4dfe2f2fe21 100644 --- a/training/training_scripts/NN_Filtering_HOP/config.json +++ b/training/training_scripts/NN_Filtering_HOP/config.json @@ -130,7 +130,7 @@ "enc_dir" : "enc_bvi", "dec_dir" : "dec_bvi", "cfg_dir" : "cfg_bvi", - "qps" : "22 27 32 37", + "qps" : "22 27 32 37 42", "vtm_enc" : "/path/to/src5.1/bin/EncoderAppStatic", "vtm_dec" : "/path/to/src5.1/bin/DecoderAppStatic", "vtm_cfg" : "/path/to/src5.1/cfg/encoder_randomaccess_vtm.cfg", @@ -146,7 +146,7 @@ "enc_dir" : "enc_bvi_valid", "dec_dir" : "dec_bvi_valid", "cfg_dir" : "cfg_bvi_valid", - "qps" : "22 27 32 37", + "qps" : "22 27 32 37 42", "vtm_enc" : "/path/to/src5.1/bin/EncoderAppStatic", "vtm_dec" : "/path/to/src5.1/bin/DecoderAppStatic", "vtm_cfg" : "/path/to/src5.1/cfg/encoder_randomaccess_vtm.cfg", @@ -162,7 +162,7 @@ "enc_dir" : "enc_tvd", "dec_dir" : "dec_tvd", "cfg_dir" : "cfg_tvd", - "qps" : "22 27 32 37", + "qps" : "22 27 32 37 42", "vtm_enc" : "/path/to/src5.1/bin/EncoderAppStatic", "vtm_dec" : "/path/to/src5.1/bin/DecoderAppStatic", "vtm_cfg" : "/path/to/src5.1/cfg/encoder_randomaccess_vtm.cfg", @@ -178,7 +178,7 @@ "enc_dir" : "enc_tvd_valid", "dec_dir" : "dec_tvd_valid", "cfg_dir" : "cfg_tvd_valid", - "qps" : "22 27 32 37", + "qps" : "22 27 32 37 42", "vtm_enc" : "/path/to/src5.1/bin/EncoderAppStatic", "vtm_dec" : "/path/to/src5.1/bin/DecoderAppStatic", "vtm_cfg" : "/path/to/src5.1/cfg/encoder_randomaccess_vtm.cfg", @@ -263,7 +263,7 @@ "enc_dir" : "enc_bvi", "dec_dir" : "dec_bvi", "cfg_dir" : "cfg_bvi", - "qps" : "22 27 32 37", + "qps" : "22 27 32 37 42", "vtm_enc" : "/path/to/src5.1/bin/EncoderAppStatic", "vtm_dec" : "/path/to/src5.1/bin/DecoderAppStatic", "vtm_cfg" : "/path/to/src5.1/cfg/encoder_randomaccess_vtm.cfg", @@ -279,7 +279,7 @@ "enc_dir" : "enc_bvi_valid", "dec_dir" : "dec_bvi_valid", "cfg_dir" : "cfg_bvi_valid", - "qps" : "22 27 32 37", + "qps" : "22 27 32 37 42", "vtm_enc" : "/path/to/src5.1/bin/EncoderAppStatic", "vtm_dec" : "/path/to/src5.1/bin/DecoderAppStatic", "vtm_cfg" : "/path/to/src5.1/cfg/encoder_randomaccess_vtm.cfg", @@ -295,7 +295,7 @@ "enc_dir" : "enc_tvd", "dec_dir" : "dec_tvd", "cfg_dir" : "cfg_tvd", - "qps" : "22 27 32 37", + "qps" : "22 27 32 37 42", "vtm_enc" : "/path/to/src5.1/bin/EncoderAppStatic", "vtm_dec" : "/path/to/src5.1/bin/DecoderAppStatic", "vtm_cfg" : "/path/to/src5.1/cfg/encoder_randomaccess_vtm.cfg", @@ -311,7 +311,7 @@ "enc_dir" : "enc_tvd_valid", "dec_dir" : "dec_tvd_valid", "cfg_dir" : "cfg_tvd_valid", - "qps" : "22 27 32 37", + "qps" : "22 27 32 37 42", "vtm_enc" : "/path/to/src5.1/bin/EncoderAppStatic", "vtm_dec" : "/path/to/src5.1/bin/DecoderAppStatic", "vtm_cfg" : "/path/to/src5.1/cfg/encoder_randomaccess_vtm.cfg", diff --git a/training/training_scripts/NN_Filtering_HOP/readme.md b/training/training_scripts/NN_Filtering_HOP/readme.md index 12ec19d271dab947f16c142452e0ac357aee7926..f997a16529222df9829b87dbd4b8a263f4095ac0 100644 --- a/training/training_scripts/NN_Filtering_HOP/readme.md +++ b/training/training_scripts/NN_Filtering_HOP/readme.md @@ -10,7 +10,7 @@ Other keys should not be edited except for testing reasons. ## I- Model Stage I -Total size required for stage1 (without deleting intermediate data is about 3312GB). +Total size required for stage1 (without deleting intermediate data) is about 3312GB. ### A- Data extraction for intra from vanilla VTM #### 1. Dataset preparation - div2k conversion @@ -175,6 +175,8 @@ The flag ``NnlfHopDebugOption`` is also needed at decoder since it forces the us ## II- Model Stage 2 +Total size required for stage2 (without deleting intermediate data) is about 5TB. + ### A- Data extraction #### 1. Dataset preparation - bvi/tvd conversion @@ -217,22 +219,22 @@ It will generate the cfg files for the dataset and a shell script to encode and Loop on all sequences to encode, for example: ```sh cd stage2/encdec; -for((i=0;i<N1;i++)); do +for((i=0;i<90;i++)); do ./encode_decode_dataset_tvd.sh $i; done -for((i=0;i<N2;i++)); do +for((i=0;i<10;i++)); do ./encode_decode_dataset_tvd_valid.sh $i; done -for((i=0;i<N3;i++)); do +for((i=0;i<3025;i++)); do ./encode_decode_dataset_bvi.sh $i; done -for((i=0;i<N4;i++)); do +for((i=0;i<75;i++)); do ./encode_decode_dataset_bvi_valid.sh $i; done ``` or you can use the script to encode on your cluster. N is the number of sequences (run ./encode_decode_dataset.sh to get the value N). -** Note ** The size requirement is: about 2.6TB for the dumped data. +** Note ** The size requirement is: about 3.3TB for the dumped data. #### 4. Create consolidated datasets @@ -244,7 +246,7 @@ done ``` It will generate a unique dataset for each dataset in ["stage2"]["encdec"]["path"] from all individual datasets in ["stage2"]["encdec_xxx"]["path"]/["dump_dir"] and encoder logs in ["stage2"]["encdec_xxx"]["enc_dir"]. -** Note ** The size requirement is: about 1.3TB for the datasets. +** Note ** The size requirement is: about 1.6TB for the datasets. #### 5a. Create offline datasets with all batches