diff --git a/onmt/inputters/dataloader.py b/onmt/inputters/dataloader.py index 5f8a99ff..170780e9 100644 --- a/onmt/inputters/dataloader.py +++ b/onmt/inputters/dataloader.py @@ -55,7 +55,7 @@ def numel_fn(example_dict): class InferenceBatcher(): """Iterator for inference""" - def __init__(self, dataset, batch_size, as_iter=False): + def __init__(self, dataset, batch_size): self.examples_stream = dataset self.collate_fn = dataset.collate_fn self.batch_size = batch_size diff --git a/onmt/train_single.py b/onmt/train_single.py index 628b4128..b396c5f4 100644 --- a/onmt/train_single.py +++ b/onmt/train_single.py @@ -106,15 +106,6 @@ def init_distributed(model, task_queue_manager): logger.debug(f'{task_queue_manager.node_rank}:{task_queue_manager.local_rank} {name}: {p.flatten()[:10]}') -# def iter_on_device(iterator, device_context): -# if device_context.is_gpu(): -# device = torch.device(f'cuda:{device_context.local_rank}') -# else: -# device = torch.device('cpu') -# for batch, meta, comm_batch_id in iterator: -# yield batch.to(device), meta, comm_batch_id - - def main( opt, vocabs_dict, @@ -208,8 +199,6 @@ def _train_iter(): # train_iter = iter_on_device(train_iter, device_context) logger.info("Device {} - Valid iter".format(device_context.id)) valid_iter = _build_valid_iter(opt, vocabs_dict, transforms_cls, task_queue_manager) - # if valid_iter is not None: - # valid_iter = iter_on_device(valid_iter, device_context) if len(opt.gpu_ranks): if device_context.is_master(): diff --git a/onmt/trainer.py b/onmt/trainer.py index e5155cac..f0fa8efd 100644 --- a/onmt/trainer.py +++ b/onmt/trainer.py @@ -362,7 +362,7 @@ def train( ) if step % valid_steps == 0 and valid_iter is not None: - if True or self.gpu_verbose_level > 0: + if self.gpu_verbose_level > 0: logger.info(f'{device_context.node_rank}:{device_context.local_rank} validate step {step}') valid_stats = self.validate( iter_on_device(valid_iter, device_context),