Skip to content

Commit

Permalink
[python] added f-string to python-package/lightgbm/callback.py (#4142)
Browse files Browse the repository at this point in the history
* added f-string

* fixed indent issues

* fixed some linting issues

* trying to fix E225 for 233:65

* Update python-package/lightgbm/callback.py

* first_metric_only log message

* Apply suggestions from code review

Co-authored-by: Nikita Titov <[email protected]>

Co-authored-by: James Lamb <[email protected]>
Co-authored-by: Nikita Titov <[email protected]>
  • Loading branch information
3 people authored May 12, 2021
1 parent 676c95f commit c8482cc
Showing 1 changed file with 13 additions and 13 deletions.
26 changes: 13 additions & 13 deletions python-package/lightgbm/callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,12 @@ def __init__(self, best_iteration: int, best_score: float) -> None:
def _format_eval_result(value: list, show_stdv: bool = True) -> str:
"""Format metric string."""
if len(value) == 4:
return '%s\'s %s: %g' % (value[0], value[1], value[2])
return f"{value[0]}'s {value[1]}: {value[2]:g}"
elif len(value) == 5:
if show_stdv:
return '%s\'s %s: %g + %g' % (value[0], value[1], value[2], value[4])
return f"{value[0]}'s {value[1]}: {value[2]:g} + {value[4]:g}"
else:
return '%s\'s %s: %g' % (value[0], value[1], value[2])
return f"{value[0]}'s {value[1]}: {value[2]:g}"
else:
raise ValueError("Wrong metric value")

Expand All @@ -67,7 +67,7 @@ def print_evaluation(period: int = 1, show_stdv: bool = True) -> Callable:
def _callback(env: CallbackEnv) -> None:
if period > 0 and env.evaluation_result_list and (env.iteration + 1) % period == 0:
result = '\t'.join([_format_eval_result(x, show_stdv) for x in env.evaluation_result_list])
_log_info('[%d]\t%s' % (env.iteration + 1, result))
_log_info(f'[{env.iteration + 1}]\t{result}')
_callback.order = 10 # type: ignore
return _callback

Expand Down Expand Up @@ -129,8 +129,7 @@ def _callback(env: CallbackEnv) -> None:
for key, value in kwargs.items():
if isinstance(value, list):
if len(value) != env.end_iteration - env.begin_iteration:
raise ValueError("Length of list {} has to equal to 'num_boost_round'."
.format(repr(key)))
raise ValueError(f"Length of list {repr(key)} has to equal to 'num_boost_round'.")
new_param = value[env.iteration - env.begin_iteration]
else:
new_param = value(env.iteration - env.begin_iteration)
Expand Down Expand Up @@ -187,7 +186,7 @@ def _init(env: CallbackEnv) -> None:
'at least one dataset and eval metric is required for evaluation')

if verbose:
_log_info("Training until validation scores don't improve for {} rounds".format(stopping_rounds))
_log_info(f"Training until validation scores don't improve for {stopping_rounds} rounds")

# split is needed for "<dataset type> <metric>" case (e.g. "train l1")
first_metric[0] = env.evaluation_result_list[0][1].split(" ")[-1]
Expand All @@ -204,10 +203,11 @@ def _init(env: CallbackEnv) -> None:
def _final_iteration_check(env: CallbackEnv, eval_name_splitted: List[str], i: int) -> None:
if env.iteration == env.end_iteration - 1:
if verbose:
_log_info('Did not meet early stopping. Best iteration is:\n[%d]\t%s' % (
best_iter[i] + 1, '\t'.join([_format_eval_result(x) for x in best_score_list[i]])))
best_score_str = '\t'.join([_format_eval_result(x) for x in best_score_list[i]])
_log_info('Did not meet early stopping. '
f'Best iteration is:\n[{best_iter[i] + 1}]\t{best_score_str}')
if first_metric_only:
_log_info("Evaluated only: {}".format(eval_name_splitted[-1]))
_log_info(f"Evaluated only: {eval_name_splitted[-1]}")
raise EarlyStopException(best_iter[i], best_score_list[i])

def _callback(env: CallbackEnv) -> None:
Expand All @@ -231,10 +231,10 @@ def _callback(env: CallbackEnv) -> None:
continue # train data for lgb.cv or sklearn wrapper (underlying lgb.train)
elif env.iteration - best_iter[i] >= stopping_rounds:
if verbose:
_log_info('Early stopping, best iteration is:\n[%d]\t%s' % (
best_iter[i] + 1, '\t'.join([_format_eval_result(x) for x in best_score_list[i]])))
eval_result_str = '\t'.join([_format_eval_result(x) for x in best_score_list[i]])
_log_info(f"Early stopping, best iteration is:\n[{best_iter[i] + 1}]\t{eval_result_str}")
if first_metric_only:
_log_info("Evaluated only: {}".format(eval_name_splitted[-1]))
_log_info(f"Evaluated only: {eval_name_splitted[-1]}")
raise EarlyStopException(best_iter[i], best_score_list[i])
_final_iteration_check(env, eval_name_splitted, i)
_callback.order = 30 # type: ignore
Expand Down

0 comments on commit c8482cc

Please sign in to comment.