diff --git a/training/tools/dataset_to_encoding_script.py b/training/tools/dataset_to_encoding_script.py
index 59a99ab0794fa57ac353600331c6af5fc770ef42..042a95b01cc9e0b63c3153f36c8c501fb3a6ca39 100644
--- a/training/tools/dataset_to_encoding_script.py
+++ b/training/tools/dataset_to_encoding_script.py
@@ -212,10 +212,14 @@ for seq in dataset:
     echo "[CACHED] {basename}_QP{qp} already encoded"
  fi
  if [ ! -f ${{DECDIR}}/{basename}_QP{qp}.log ]; then
+    if [ -f ${{ENCDIR}}/{basename}_QP{qp}.bin -a ${{ENCDIR}}/{basename}_QP{qp}.log ]; then
     $DEC -b ${{ENCDIR}}/{basename}_QP{qp}.bin --DumpBasename=${{DUMPDIR}}/{basename}_QP{qp} $OPTDEC  > ${{DECDIR}}/{basename}_QP{qp}.log
     echo "[INFO] {basename}_QP{qp} decoded"
  else
-    echo "[CACHED] {basename}_QP{qp} already decoded"
+      echo "[INFO] skipped decoding (no bitstream)";
+    fi;
+ else
+    echo "[CACHED] {basename}_QP{qp} already decoded";
  fi
 fi
 
diff --git a/training/training_scripts/NN_Filtering_HOP/training/trainer.py b/training/training_scripts/NN_Filtering_HOP/training/trainer.py
index 3834ab27d2ddba8a77614b7b6256abca40040bc8..d92c91ccfec202e3643e3c94023244db9216d54c 100644
--- a/training/training_scripts/NN_Filtering_HOP/training/trainer.py
+++ b/training/training_scripts/NN_Filtering_HOP/training/trainer.py
@@ -65,7 +65,7 @@ class Trainer:
         self.device = self.config_training["device"] or (
             torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
         )
-
+        print(f"[INFO] tf32 {torch.backends.cuda.matmul.allow_tf32} {torch.backends.cudnn.allow_tf32}")
         self.base_dir = self.config_training["path"]
         self.save_dir = os.path.join(self.base_dir, self.config_training["ckpt_dir"])
         os.makedirs(self.save_dir, exist_ok=True)