diff --git a/README.md b/README.md
index a15db5bf49799c1a17ec0db1c59a0e5ccb7a1bc8..0145c06178e1cb1d7dbf76a7fc51860459c95d32 100644
--- a/README.md
+++ b/README.md
@@ -13,10 +13,12 @@ Content
 ==================
 The package contains the following components:
 - a base codec based on VTM-11.0_nnvc (VTM-11.0 + JVET-V0056 patch)
-- a data dumping feature at the decoder, activated with the macro NNVC\_DUMP\_DATA in TypeDef.h. Encoder log is also slightly changed in order to log some useful information.
+- a data dumping feature at the decoder, activated with the macro NNVC\_DUMP\_DATA in TypeDef.h. Encoder log is also slightly changed in order to log some useful information
 - a data loading feature in python, compatible with the data dumped by the decoder
-- training scripts xxxx
 - an inference feature based on SADL supporting both float and int16 model
+- a common api supporting input preparation and inference of NN-based models
+- two NN-based loop filter sets, activated with command line parameter --NnlfOption (0: disable NN filter; 1: Use NN-based loop filter set 0; 2: Use NN-based loop filter set 1)
+- training scripts of the two NN-based loop filter sets
 
 Build instructions
 ==================
@@ -323,11 +325,9 @@ patch_size=128
 poc_list=0
 
 # If `generate_type` is equal to 0, the data loading
-# has some specificities, e.g. normalization, for JVET-AA0088
-# (Tencent).
+# has some specificities, e.g. normalization, for NN-based loop filter set 0.
 # If `generate_type` is equal to 1, the data loading
-# has some specificities, e.g. normalization, for JVET-AA0111
-# (Bytedance).
+# has some specificities, e.g. normalization, for NN-based loop filter set 1.
 generate_type=1
 border_size=8
 
@@ -369,21 +369,28 @@ The loaded data are put into one binary file with all the patches inside. This f
 Finally, a sample program allows to visualize the resulting dumped patches.
 
 
-NN-filter control
+Common api
 ==================
+source/Lib/CommonLib/NNInference is a common api for unifiying input preparation and inference of NN models.
+The common api supports preparing input from any dumped information such as reconstruction, prediction, partitioning, boundary strength, base QP, slice QP, and slice type.
+Currently, input preparation and inference of the two NN-based loop filter sets are based on the common API.
 
-NN-filters are disabled by default (--NnlfOption=0).
 
-NN-filter set 0
+NN-based loop filter
+==================
+
+There are two NN-based loop filter sets in the common software, which are both disabled by default (--NnlfOption=0).
+
+NN-based loop filter set 0
 ----------------------------------------------
-To activate NN-filter set 0, use --NnlfOption=1, or equivalently -c cfg/nn-based/NnlfOption\_1.cfg
+To activate NN-based loop filter set 0, use --NnlfOption=1, or equivalently -c cfg/nn-based/NnlfOption\_1.cfg
 
 To specify a model path, use e.g. --ModelPath="models/", or equivalently -mp "models/". Note that model path should be specified at both encoder and decoder.
 
 
-NN-filter set 1
+NN-based loop filter set 1
 ----------------------------------------------
-To activate NN-filter set 1, use --NnlfOption=2, or equivalently -c cfg/nn-based/NnlfOption\_2.cfg
+To activate NN-based loop filter set 1, use --NnlfOption=2, or equivalently -c cfg/nn-based/NnlfOption\_2.cfg
 
 To specify model paths, use e.g. following command lines. Note that model paths should be specified at both encoder and decoder.
 
diff --git a/training/data_loader/data_loader.py b/training/data_loader/data_loader.py
index 3a1dc555590da821a4f88f34f00af268a1fe2e27..150009859d1df0123f7ed6a26b69d950b40295e1 100644
--- a/training/data_loader/data_loader.py
+++ b/training/data_loader/data_loader.py
@@ -55,7 +55,7 @@ class DataLoader:
     suffix={} # suffix for each file
     
     # patch_size in luma sample
-    def __init__(self, jsonfile, patch_size, poc_list, generate_type = 0, qp_filter=-1, slice_type_filter=-1):
+    def __init__(self, jsonfile, patch_size, poc_list, generate_type, qp_filter=-1, slice_type_filter=-1):
         self.generate_type=generate_type
         if self.generate_type == 0:
             self.normalizer_rec  = 1023.0
diff --git a/training/example/create_unified_dataset.py b/training/example/create_unified_dataset.py
index 18f738f91a184862f0edfd1466ce25eb4f79c80e..148aa2144382d465eb5f9460829205e7c8d8d619 100644
--- a/training/example/create_unified_dataset.py
+++ b/training/example/create_unified_dataset.py
@@ -20,11 +20,11 @@ parser.add_argument("--components", action="store", nargs='?', type=str, help="c
 parser.add_argument("--nb_patches", action="store", default=1, nargs='?', type=int, help="nb patches to extract, nb_patches=-1 means extracting all patches")
 parser.add_argument("--output_file", action="store", nargs='?', type=str, help="output binary file (patches in float format)")
 parser.add_argument('--random_sample', type=int, default=1, help='whether to sample randomly')
-parser.add_argument('--generate_type', type=int, default=0, help='0 ~ using some specifications for the data loading of JVET-AA0088; 1 ~ using some specifications for the data loading of JVET-A0111')
+parser.add_argument('--generate_type', type=int, help='0 ~ using some specifications for the data loading of NN-based loop filter set 0; 1 ~ using some specifications for the data loading of NN-based loop filter set 1', required=True)
 args = parser.parse_args()
 
 
-dl=data_loader.DataLoader(args.input_json,args.patch_size,args.poc_list,generate_type=args.generate_type)
+dl=data_loader.DataLoader(args.input_json,args.patch_size,args.poc_list,args.generate_type)
 
 print("Nb samples available: {}".format(dl.nb_patches()))
 print("Available components: {}".format(dl.components))