From cc7d754687c275b7883840f6ac6a7fe791743af4 Mon Sep 17 00:00:00 2001 From: David Karhi Date: Fri, 19 May 2023 17:27:45 -0500 Subject: [PATCH] STYLE: Fix lightning deprecation warnings I'm fixing a few issues that would prevent upgrading the pytorch-lightning package. Trainer.lr_schedulers is being replaced with trainer.lr_scheduler_configs. In future versions, using trainer.logger with multiple loggers will only return the first logger available, so I'm iterating through trainer.loggers instead. And LightningLoggerBase.close is being replaced with LightningLoggerBase.finalize. Closes #751 --- InnerEye/ML/lightning_base.py | 2 +- InnerEye/ML/model_training.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/InnerEye/ML/lightning_base.py b/InnerEye/ML/lightning_base.py index 93ee23e81..7be71e101 100644 --- a/InnerEye/ML/lightning_base.py +++ b/InnerEye/ML/lightning_base.py @@ -395,5 +395,5 @@ def write_loss(self, is_training: bool, loss: torch.Tensor) -> None: assert isinstance(self.trainer, Trainer) self.log_on_epoch(MetricType.LOSS, loss, is_training) if is_training: - learning_rate = self.trainer.lr_schedulers[0]['scheduler'].get_last_lr()[0] + learning_rate = self.trainer.lr_scheduler_configs[0].scheduler.get_last_lr()[0] # type: ignore self.log_on_epoch(MetricType.LEARNING_RATE, learning_rate, is_training) diff --git a/InnerEye/ML/model_training.py b/InnerEye/ML/model_training.py index ce0b8aca2..f893b9524 100644 --- a/InnerEye/ML/model_training.py +++ b/InnerEye/ML/model_training.py @@ -266,7 +266,8 @@ def model_train(checkpoint_path: Optional[Path], logging.info("Starting training") trainer.fit(lightning_model, datamodule=data_module) - trainer.logger.close() # type: ignore + for logger in trainer.loggers: + logger.finalize("success") world_size = getattr(trainer, "world_size", 0) is_azureml_run = not is_offline_run_context(RUN_CONTEXT)