Skip to content

Commit

Permalink
[fix] Be able to display error messages in additional info as it is (a…
Browse files Browse the repository at this point in the history
…utoml#225)

* [feat] Be able to display the additional info as it is

* [fix] Fix dict_repr to be able to detect None

* [fix] Fix a phrase
  • Loading branch information
nabenabe0928 authored Jun 30, 2021
1 parent 3f51d8b commit 9002937
Show file tree
Hide file tree
Showing 6 changed files with 53 additions and 46 deletions.
60 changes: 26 additions & 34 deletions autoPyTorch/api/base_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
from autoPyTorch.pipeline.components.setup.traditional_ml.traditional_learner import get_available_traditional_learners
from autoPyTorch.pipeline.components.training.metrics.base import autoPyTorchMetric
from autoPyTorch.pipeline.components.training.metrics.utils import calculate_score, get_metrics
from autoPyTorch.utils.common import FitRequirement, replace_string_bool_to_bool
from autoPyTorch.utils.common import FitRequirement, dict_repr, replace_string_bool_to_bool
from autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates
from autoPyTorch.utils.logging_ import (
PicklableClientLogger,
Expand Down Expand Up @@ -525,39 +525,28 @@ def _do_dummy_prediction(self) -> None:
all_supported_metrics=self._all_supported_metrics
)

status, cost, runtime, additional_info = ta.run(num_run, cutoff=self._time_for_task)
status, _, _, additional_info = ta.run(num_run, cutoff=self._time_for_task)
if status == StatusType.SUCCESS:
self._logger.info("Finished creating dummy predictions.")
else:
if additional_info.get('exitcode') == -6:
self._logger.error(
"Dummy prediction failed with run state %s. "
"The error suggests that the provided memory limits were too tight. Please "
"increase the 'ml_memory_limit' and try again. If this does not solve your "
"problem, please open an issue and paste the additional output. "
"Additional output: %s.",
str(status), str(additional_info),
)
err_msg = "Dummy prediction failed with run state {},\n" \
"because the provided memory limits were too tight.\n" \
"Please increase the 'ml_memory_limit' and try again.\n" \
"If you still get the problem, please open an issue and\n" \
"paste the additional info.\n" \
"Additional info:\n{}.".format(str(status), dict_repr(additional_info))
self._logger.error(err_msg)
# Fail if dummy prediction fails.
raise ValueError(
"Dummy prediction failed with run state %s. "
"The error suggests that the provided memory limits were too tight. Please "
"increase the 'ml_memory_limit' and try again. If this does not solve your "
"problem, please open an issue and paste the additional output. "
"Additional output: %s." %
(str(status), str(additional_info)),
)
raise ValueError(err_msg)

else:
self._logger.error(
"Dummy prediction failed with run state %s and additional output: %s.",
str(status), str(additional_info),
err_msg = "Dummy prediction failed with run state {} and additional info:\n{}.".format(
str(status), dict_repr(additional_info)
)
self._logger.error(err_msg)
# Fail if dummy prediction fails.
raise ValueError(
"Dummy prediction failed with run state %s and additional output: %s."
% (str(status), str(additional_info))
)
raise ValueError(err_msg)

def _do_traditional_prediction(self, time_left: int, func_eval_time_limit_secs: int) -> None:
"""
Expand Down Expand Up @@ -652,7 +641,9 @@ def _do_traditional_prediction(self, time_left: int, func_eval_time_limit_secs:
status, cost, runtime, additional_info = future.result()
if status == StatusType.SUCCESS:
self._logger.info(
f"Fitting {cls} took {runtime}s, performance:{cost}/{additional_info}")
"Fitting {} took {} [sec] and got performance: {}.\n"
"additional info:\n{}".format(cls, runtime, cost, dict_repr(additional_info))
)
configuration = additional_info['pipeline_configuration']
origin = additional_info['configuration_origin']
additional_info.pop('pipeline_configuration')
Expand All @@ -663,17 +654,18 @@ def _do_traditional_prediction(self, time_left: int, func_eval_time_limit_secs:
else:
if additional_info.get('exitcode') == -6:
self._logger.error(
"Traditional prediction for %s failed with run state %s. "
"The error suggests that the provided memory limits were too tight. Please "
"increase the 'ml_memory_limit' and try again. If this does not solve your "
"problem, please open an issue and paste the additional output. "
"Additional output: %s.",
cls, str(status), str(additional_info),
"Traditional prediction for {} failed with run state {},\n"
"because the provided memory limits were too tight.\n"
"Please increase the 'ml_memory_limit' and try again.\n"
"If you still get the problem, please open an issue\n"
"and paste the additional info.\n"
"Additional info:\n{}".format(cls, str(status), dict_repr(additional_info))
)
else:
self._logger.error(
"Traditional prediction for %s failed with run state %s and additional output: %s.",
cls, str(status), str(additional_info),
"Traditional prediction for {} failed with run state {}.\nAdditional info:\n{}".format(
cls, str(status), dict_repr(additional_info)
)
)

# In the case of a serial execution, calling submit halts the run for a resource
Expand Down
4 changes: 2 additions & 2 deletions autoPyTorch/evaluation/abstract_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
calculate_loss,
get_metrics,
)
from autoPyTorch.utils.common import subsampler
from autoPyTorch.utils.common import dict_repr, subsampler
from autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates
from autoPyTorch.utils.logging_ import PicklableClientLogger, get_named_client_logger
from autoPyTorch.utils.pipeline import get_dataset_requirements
Expand Down Expand Up @@ -537,7 +537,7 @@ def __init__(self, backend: Backend,
self.Y_actual_train: Optional[np.ndarray] = None
self.pipelines: Optional[List[BaseEstimator]] = None
self.pipeline: Optional[BaseEstimator] = None
self.logger.debug("Fit dictionary in Abstract evaluator: {}".format(self.fit_dictionary))
self.logger.debug("Fit dictionary in Abstract evaluator: {}".format(dict_repr(self.fit_dictionary)))
self.logger.debug("Search space updates :{}".format(self.search_space_updates))

def _get_pipeline(self) -> BaseEstimator:
Expand Down
13 changes: 10 additions & 3 deletions autoPyTorch/evaluation/tae.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from autoPyTorch.automl_common.common.utils.backend import Backend
from autoPyTorch.evaluation.utils import empty_queue, extract_learning_curve, read_queue
from autoPyTorch.pipeline.components.training.metrics.base import autoPyTorchMetric
from autoPyTorch.utils.common import replace_string_bool_to_bool
from autoPyTorch.utils.common import dict_repr, replace_string_bool_to_bool
from autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates
from autoPyTorch.utils.logging_ import PicklableClientLogger, get_named_client_logger
from autoPyTorch.utils.parallel import preload_modules
Expand Down Expand Up @@ -459,7 +459,14 @@ def run(

empty_queue(queue)
self.logger.debug(
'Finished function evaluation %s. Status: %s, Cost: %f, Runtime: %f, Additional %s',
str(num_run), status, cost, runtime, additional_run_info,
"Finish function evaluation {}.\n"
"Status: {}, Cost: {}, Runtime: {},\n"
"Additional information:\n{}".format(
str(num_run),
status,
cost,
runtime,
dict_repr(additional_run_info)
)
)
return status, cost, runtime, additional_run_info
12 changes: 6 additions & 6 deletions autoPyTorch/evaluation/train_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
fit_and_suppress_warnings
)
from autoPyTorch.pipeline.components.training.metrics.base import autoPyTorchMetric
from autoPyTorch.utils.common import subsampler
from autoPyTorch.utils.common import dict_repr, subsampler
from autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates

__all__ = ['TrainEvaluator', 'eval_function']
Expand Down Expand Up @@ -172,11 +172,11 @@ def fit_predict_and_loss(self) -> None:

status = StatusType.SUCCESS

self.logger.debug("In train evaluator fit_predict_and_loss, num_run: {} loss:{},"
" additional run info:{}, status: {}".format(self.num_run,
loss,
additional_run_info,
status))
self.logger.debug("In train evaluator.fit_predict_and_loss, num_run: {} loss:{},"
" status: {},\nadditional run info:\n{}".format(self.num_run,
loss,
dict_repr(additional_run_info),
status))
self.finish_up(
loss=loss,
train_loss=train_loss,
Expand Down
2 changes: 1 addition & 1 deletion autoPyTorch/optimizer/smbo.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ def run_smbo(self, func: typing.Optional[typing.Callable] = None
pynisher_context=self.pynisher_context,
)
ta = ExecuteTaFuncWithQueue
self.logger.info("Created TA")
self.logger.info("Finish creating Target Algorithm (TA) function")

startup_time = self.watcher.wall_elapsed(self.dataset_name)
total_walltime_limit = self.total_walltime_limit - startup_time - 5
Expand Down
8 changes: 8 additions & 0 deletions autoPyTorch/utils/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,14 @@ def custom_collate_fn(batch: List) -> List[Optional[torch.tensor]]:
return items


def dict_repr(d: Optional[Dict[Any, Any]]) -> str:
""" Display long message in dict as it is. """
if isinstance(d, dict):
return "\n".join(["{}: {}".format(k, v) for k, v in d.items()])
else:
return "None"


def replace_string_bool_to_bool(dictionary: Dict[str, Any]) -> Dict[str, Any]:
"""
Utility function to replace string-type bool to
Expand Down

0 comments on commit 9002937

Please sign in to comment.