From f3b9b200fce04d7b98e2d31029b06be0d5e74aaa Mon Sep 17 00:00:00 2001 From: Franck Galpin <franck.galpin@interdigital.com> Date: Fri, 9 Jun 2023 15:40:20 +0200 Subject: [PATCH] inverse logic logger --- training/training_scripts/NN_Filtering_HOP/training/logger.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/training/training_scripts/NN_Filtering_HOP/training/logger.py b/training/training_scripts/NN_Filtering_HOP/training/logger.py index a7e59f97d1..229149eb15 100644 --- a/training/training_scripts/NN_Filtering_HOP/training/logger.py +++ b/training/training_scripts/NN_Filtering_HOP/training/logger.py @@ -144,7 +144,7 @@ class PrintLogger(BaseLogger): def on_train_iter_end( self, epoch: int, iteration: int, train_metrics: Dict[str, Any] ) -> None: - if self.log_train_interval > 0 and (iteration + 1) % self.log_train_interval: + if self.log_train_interval > 0 and (iteration + 1) % self.log_train_interval == 0: print( f"Epoch {epoch}, iteration {iteration}: {self.format_metrics(train_metrics)}", file=self.out_file, @@ -280,7 +280,7 @@ class TensorboardLogger(BaseLogger): def on_train_iter_end( self, epoch: int, iteration: int, train_metrics: Dict[str, Any] ) -> None: - if self.log_train_interval > 0 and (iteration + 1) % self.log_train_interval: + if self.log_train_interval > 0 and (iteration + 1) % self.log_train_interval == 0: self.global_iteration += self.log_train_interval for metric, value in train_metrics.items(): self.writer.add_scalar( -- GitLab