From 8be6ea5650812bfcdd9c602db5a20cb55fadf279 Mon Sep 17 00:00:00 2001 From: Shawn Carere Date: Fri, 12 Jul 2024 12:16:11 -0400 Subject: [PATCH] Created new logger to use instead of root logger --- .../dataloading/multi_threaded_augmenter.py | 17 +++++++++-------- .../nondet_multi_threaded_augmenter.py | 11 ++++++----- batchgenerators/utilities/logger.py | 8 ++++++++ 3 files changed, 23 insertions(+), 13 deletions(-) create mode 100644 batchgenerators/utilities/logger.py diff --git a/batchgenerators/dataloading/multi_threaded_augmenter.py b/batchgenerators/dataloading/multi_threaded_augmenter.py index 6006fbe..c1cc7c7 100755 --- a/batchgenerators/dataloading/multi_threaded_augmenter.py +++ b/batchgenerators/dataloading/multi_threaded_augmenter.py @@ -20,10 +20,11 @@ from queue import Queue as thrQueue import numpy as np import sys -import logging +from logging import INFO, DEBUG from multiprocessing import Event from time import sleep, time from threadpoolctl import threadpool_limits +from batchgenerators.utilities.logger import log try: import torch @@ -72,7 +73,7 @@ def results_loop(in_queues: List[Queue], out_queue: thrQueue, abort_event: Event do_pin_memory = torch is not None and pin_memory and gpu is not None and torch.cuda.is_available() if do_pin_memory: - print('using pin_memory on device', gpu) + log(INFO, f'using pin_memory on device {gpu}') torch.cuda.set_device(gpu) item = None @@ -208,7 +209,7 @@ def __next__(self): if self._end_ctr == self.num_processes: self._end_ctr = 0 self._queue_ctr = 0 - logging.debug("MultiThreadedGenerator: finished data generation") + log(DEBUG, "MultiThreadedGenerator: finished data generation") raise StopIteration item = self.__get_next_item() @@ -216,7 +217,7 @@ def __next__(self): return item except KeyboardInterrupt: - logging.error("MultiThreadedGenerator: caught exception: {}".format(sys.exc_info())) + log(DEBUG, "MultiThreadedGenerator: caught exception: {}".format(sys.exc_info())) self.abort_event.set() self._finish() raise KeyboardInterrupt @@ -226,7 +227,7 @@ def _start(self): self._finish() self.abort_event.clear() - logging.debug("starting workers") + log(DEBUG, "starting workers") self._queue_ctr = 0 self._end_ctr = 0 @@ -258,7 +259,7 @@ def _start(self): self.was_initialized = True else: - logging.debug("MultiThreadedGenerator Warning: start() has been called but it has already been " + log(DEBUG, "MultiThreadedGenerator Warning: start() has been called but it has already been " "initialized previously") def _finish(self, timeout=10): @@ -269,7 +270,7 @@ def _finish(self, timeout=10): sleep(0.2) if len(self._processes) != 0: - logging.debug("MultiThreadedGenerator: shutting down workers...") + log(DEBUG, "MultiThreadedGenerator: shutting down workers...") [i.terminate() for i in self._processes] for i, p in enumerate(self._processes): @@ -290,5 +291,5 @@ def restart(self): self._start() def __del__(self): - logging.debug("MultiThreadedGenerator: destructor was called") + log(DEBUG, "MultiThreadedGenerator: destructor was called") self._finish() diff --git a/batchgenerators/dataloading/nondet_multi_threaded_augmenter.py b/batchgenerators/dataloading/nondet_multi_threaded_augmenter.py index 530eba1..4cba446 100755 --- a/batchgenerators/dataloading/nondet_multi_threaded_augmenter.py +++ b/batchgenerators/dataloading/nondet_multi_threaded_augmenter.py @@ -22,11 +22,12 @@ from multiprocessing import Queue from queue import Queue as thrQueue import numpy as np -import logging +from logging import DEBUG, INFO from multiprocessing import Event from time import sleep, time from batchgenerators.dataloading.data_loader import DataLoader +from batchgenerators.utilities.logger import log from threadpoolctl import threadpool_limits try: @@ -87,7 +88,7 @@ def results_loop(in_queue: Queue, out_queue: thrQueue, abort_event: Event, do_pin_memory = torch is not None and pin_memory and gpu is not None and torch.cuda.is_available() if do_pin_memory: - print('using pin_memory on device', gpu) + log(INFO, f'using pin_memory on device{gpu}') torch.cuda.set_device(gpu) item = None @@ -204,7 +205,7 @@ def _start(self): self.results_loop_queue = thrQueue(self.num_cached) self.abort_event = Event() - logging.debug("starting workers") + log(DEBUG, "starting workers") if isinstance(self.generator, DataLoader): self.generator.was_initialized = False @@ -237,7 +238,7 @@ def _start(self): self.initialized = True else: - logging.debug("MultiThreadedGenerator Warning: start() has been called but workers are already running") + log(DEBUG, "MultiThreadedGenerator Warning: start() has been called but workers are already running") def _finish(self): if self.initialized: @@ -255,7 +256,7 @@ def restart(self): self._start() def __del__(self): - logging.debug("MultiThreadedGenerator: destructor was called") + log(DEBUG, "MultiThreadedGenerator: destructor was called") self._finish() diff --git a/batchgenerators/utilities/logger.py b/batchgenerators/utilities/logger.py new file mode 100644 index 0000000..18eb353 --- /dev/null +++ b/batchgenerators/utilities/logger.py @@ -0,0 +1,8 @@ +import logging + +# Create a logger to use instead of root logger +logger = logging.getLogger('batchgen') +handler = logging.StreamHandler() +handler.setLevel(logging.DEBUG) +logger.addHandler(handler) +log = logger.log # Useful as it can be imported by other files \ No newline at end of file