From 66bb3d3aa599680eb7100f02f1ac7725629744c0 Mon Sep 17 00:00:00 2001 From: nikosbosse Date: Sat, 28 Oct 2023 14:06:03 +0200 Subject: [PATCH] Fix some documentation related warnings --- R/check-input-helpers.R | 20 ++-------- R/summarise_scores.R | 43 ++++++++++++++-------- R/utils.R | 1 + man/add_coverage.Rd | 3 +- man/add_pairwise_comparison.Rd | 43 ++++++++++++++++++++++ man/assign_attributes.Rd | 6 +-- man/check_has_attribute.Rd | 4 +- man/check_summary_params.Rd | 16 -------- man/metrics_sample.Rd | 2 +- man/summarise_scores.Rd | 45 +---------------------- man/test_has_attribute.Rd | 19 ---------- tests/testthat/_snaps/score.md | 17 --------- tests/testthat/_snaps/summarise_scores.md | 8 ---- 13 files changed, 82 insertions(+), 145 deletions(-) create mode 100644 man/add_pairwise_comparison.Rd delete mode 100644 man/test_has_attribute.Rd delete mode 100644 tests/testthat/_snaps/score.md delete mode 100644 tests/testthat/_snaps/summarise_scores.md diff --git a/R/check-input-helpers.R b/R/check-input-helpers.R index 7cae1d837..d1fa62edc 100644 --- a/R/check-input-helpers.R +++ b/R/check-input-helpers.R @@ -394,33 +394,19 @@ check_data_columns <- function(data) { #' Check whether an attribute is present #' @description Checks whether an object has an attribute -#' @param data An object to be checked +#' @param object An object to be checked +#' @param attribute name of an attribute to be checked #' @return Returns TRUE if attribute is there and an error message as #' a string otherwise #' @keywords check-inputs check_has_attribute <- function(object, attribute) { if (is.null(attr(object, attribute))) { return( - paste0("Found no attribue `", attribute, "`") + paste0("Found no attribute `", attribute, "`") ) } else { return(TRUE) } } -#' Test whether an attribute is present -#' @description Tests whether an object has an attribute -#' @param data An object to be checked -#' @return Returns TRUE if attribute is there and FALSE otherwise -#' a string otherwise -#' @keywords check-inputs -test_has_attribute <- function(object, attribute) { - check <- check_has_attribute(object, attribute) - if (is.logical(check)) { - return(TRUE) - } else { - return(FALSE) - } -} - diff --git a/R/summarise_scores.R b/R/summarise_scores.R index fd530c845..33c5f76fa 100644 --- a/R/summarise_scores.R +++ b/R/summarise_scores.R @@ -21,21 +21,6 @@ #' be used or inferred internally if also not specified. Only one of `across` #' and `by` may be used at a time. #' @param fun a function used for summarising scores. Default is `mean`. -#' @param relative_skill logical, whether or not to compute relative -#' performance between models based on pairwise comparisons. -#' If `TRUE` (default is `FALSE`), then a column called -#' 'model' must be present in the input data. For more information on -#' the computation of relative skill, see [pairwise_comparison()]. -#' Relative skill will be calculated for the aggregation level specified in -#' `by`. -#' @param relative_skill_metric character with the name of the metric for which -#' a relative skill shall be computed. If equal to 'auto' (the default), then -#' this will be either interval score, CRPS or Brier score (depending on which -#' of these is available in the input data) -#' @param baseline character string with the name of a model. If a baseline is -#' given, then a scaled relative skill with respect to the baseline will be -#' returned. By default (`NULL`), relative skill will not be scaled with -#' respect to a baseline model. #' @param ... additional parameters that can be passed to the summary function #' provided to `fun`. For more information see the documentation of the #' respective function. @@ -167,6 +152,33 @@ summarise_scores <- function(scores, summarize_scores <- summarise_scores + +#' @title Add pairwise comparisons +#' @description Adds a columns with relative skills computed by running +#' pairwise comparisons on the scores. +#' +#' a column called +#' 'model' must be present in the input data. For more information on +#' the computation of relative skill, see [pairwise_comparison()]. +#' Relative skill will be calculated for the aggregation level specified in +#' `by`. +#' WRITE MORE INFO HERE. +#' +#' +#' @param scores MORE INFO HERE. +#' @param by character vector with column names to summarise scores by. Default +#' is `NULL`, meaning that the only summary that takes is place is summarising +#' over samples or quantiles (in case of quantile-based forecasts), such that +#' there is one score per forecast as defined by the *unit of a single forecast* +#' (rather than one score for every sample or quantile). +#' @param relative_skill_metric character with the name of the metric for which +#' a relative skill shall be computed. If equal to 'auto' (the default), then +#' this will be either interval score, CRPS or Brier score (depending on which +#' of these is available in the input data) +#' @param baseline character string with the name of a model. If a baseline is +#' given, then a scaled relative skill with respect to the baseline will be +#' returned. By default (`NULL`), relative skill will not be scaled with +#' respect to a baseline model. #' @export add_pairwise_comparison <- function(scores, by = NULL, @@ -306,7 +318,6 @@ check_summary_params <- function(scores, #' @description Adds a column with the coverage of central prediction intervals #' to unsummarised scores as produced by [score()] #' -#' @details #' The coverage values that are added are computed according to the values #' specified in `by`. If, for example, `by = "model"`, then there will be one #' coverage value for every model and [add_coverage()] will compute the coverage diff --git a/R/utils.R b/R/utils.R index 52be63fd8..1301aa8fe 100644 --- a/R/utils.R +++ b/R/utils.R @@ -134,6 +134,7 @@ filter_function_args <- function(fun, args) { #' @title Assign attributes to an object from a named list #' +#' @description #' Every list item will be made an attribute of the object. #' @param object An object to assign attributes to #' @param attribute_list A named list of attributes to assign to the object. diff --git a/man/add_coverage.Rd b/man/add_coverage.Rd index ad658432e..e0abf8161 100644 --- a/man/add_coverage.Rd +++ b/man/add_coverage.Rd @@ -23,8 +23,7 @@ summary is present according to the value specified in \code{by}. \description{ Adds a column with the coverage of central prediction intervals to unsummarised scores as produced by \code{\link[=score]{score()}} -} -\details{ + The coverage values that are added are computed according to the values specified in \code{by}. If, for example, \code{by = "model"}, then there will be one coverage value for every model and \code{\link[=add_coverage]{add_coverage()}} will compute the coverage diff --git a/man/add_pairwise_comparison.Rd b/man/add_pairwise_comparison.Rd new file mode 100644 index 000000000..31777dbfc --- /dev/null +++ b/man/add_pairwise_comparison.Rd @@ -0,0 +1,43 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/summarise_scores.R +\name{add_pairwise_comparison} +\alias{add_pairwise_comparison} +\title{Add pairwise comparisons} +\usage{ +add_pairwise_comparison( + scores, + by = NULL, + relative_skill_metric = "auto", + baseline = NULL +) +} +\arguments{ +\item{scores}{MORE INFO HERE.} + +\item{by}{character vector with column names to summarise scores by. Default +is \code{NULL}, meaning that the only summary that takes is place is summarising +over samples or quantiles (in case of quantile-based forecasts), such that +there is one score per forecast as defined by the \emph{unit of a single forecast} +(rather than one score for every sample or quantile).} + +\item{relative_skill_metric}{character with the name of the metric for which +a relative skill shall be computed. If equal to 'auto' (the default), then +this will be either interval score, CRPS or Brier score (depending on which +of these is available in the input data)} + +\item{baseline}{character string with the name of a model. If a baseline is +given, then a scaled relative skill with respect to the baseline will be +returned. By default (\code{NULL}), relative skill will not be scaled with +respect to a baseline model.} +} +\description{ +Adds a columns with relative skills computed by running +pairwise comparisons on the scores. + +a column called +'model' must be present in the input data. For more information on +the computation of relative skill, see \code{\link[=pairwise_comparison]{pairwise_comparison()}}. +Relative skill will be calculated for the aggregation level specified in +\code{by}. +WRITE MORE INFO HERE. +} diff --git a/man/assign_attributes.Rd b/man/assign_attributes.Rd index f9423bab9..f6dfdeadb 100644 --- a/man/assign_attributes.Rd +++ b/man/assign_attributes.Rd @@ -2,9 +2,7 @@ % Please edit documentation in R/utils.R \name{assign_attributes} \alias{assign_attributes} -\title{Assign attributes to an object from a named list - -Every list item will be made an attribute of the object.} +\title{Assign attributes to an object from a named list} \usage{ assign_attributes(object, attribute_list) } @@ -18,8 +16,6 @@ The object with new attributes according to the contents of \code{attribute_list} } \description{ -Assign attributes to an object from a named list - Every list item will be made an attribute of the object. } \keyword{internal} diff --git a/man/check_has_attribute.Rd b/man/check_has_attribute.Rd index 1372c1645..48b49c208 100644 --- a/man/check_has_attribute.Rd +++ b/man/check_has_attribute.Rd @@ -7,7 +7,9 @@ check_has_attribute(object, attribute) } \arguments{ -\item{data}{An object to be checked} +\item{object}{An object to be checked} + +\item{attribute}{name of an attribute to be checked} } \value{ Returns TRUE if attribute is there and an error message as diff --git a/man/check_summary_params.Rd b/man/check_summary_params.Rd index 042d565e9..9b605f999 100644 --- a/man/check_summary_params.Rd +++ b/man/check_summary_params.Rd @@ -19,22 +19,6 @@ input data that do not correspond to a metric produced by \code{\link[=score]{sc indicate indicate a grouping of forecasts (for example there may be one forecast per day, location and model). Adding additional, unrelated, columns may alter results in an unpredictable way.} - -\item{relative_skill}{logical, whether or not to compute relative -performance between models based on pairwise comparisons. -If \code{TRUE} (default is \code{FALSE}), then a column called -'model' must be present in the input data. For more information on -the computation of relative skill, see \code{\link[=pairwise_comparison]{pairwise_comparison()}}. -Relative skill will be calculated for the aggregation level specified in -\code{by}.} - -\item{baseline}{character string with the name of a model. If a baseline is -given, then a scaled relative skill with respect to the baseline will be -returned. By default (\code{NULL}), relative skill will not be scaled with -respect to a baseline model.} - -\item{metric}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} Deprecated in 1.1.0. Use -\code{relative_skill_metric} instead.} } \description{ A helper function to check the input parameters for diff --git a/man/metrics_sample.Rd b/man/metrics_sample.Rd index ab1444158..5231f4ae7 100644 --- a/man/metrics_sample.Rd +++ b/man/metrics_sample.Rd @@ -5,7 +5,7 @@ \alias{metrics_sample} \title{Default metrics for sample-based forecasts.} \format{ -An object of class \code{list} of length 8. +An object of class \code{list} of length 7. } \usage{ metrics_sample diff --git a/man/summarise_scores.Rd b/man/summarise_scores.Rd index ed63cf1af..33e43985d 100644 --- a/man/summarise_scores.Rd +++ b/man/summarise_scores.Rd @@ -5,29 +5,9 @@ \alias{summarize_scores} \title{Summarise scores as produced by \code{\link[=score]{score()}}} \usage{ -summarise_scores( - scores, - by = NULL, - across = NULL, - fun = mean, - relative_skill = FALSE, - relative_skill_metric = "auto", - metric = deprecated(), - baseline = NULL, - ... -) +summarise_scores(scores, by = NULL, across = NULL, fun = mean, ...) -summarize_scores( - scores, - by = NULL, - across = NULL, - fun = mean, - relative_skill = FALSE, - relative_skill_metric = "auto", - metric = deprecated(), - baseline = NULL, - ... -) +summarize_scores(scores, by = NULL, across = NULL, fun = mean, ...) } \arguments{ \item{scores}{A data.table of scores as produced by \code{\link[=score]{score()}}.} @@ -52,27 +32,6 @@ and \code{by} may be used at a time.} \item{fun}{a function used for summarising scores. Default is \code{mean}.} -\item{relative_skill}{logical, whether or not to compute relative -performance between models based on pairwise comparisons. -If \code{TRUE} (default is \code{FALSE}), then a column called -'model' must be present in the input data. For more information on -the computation of relative skill, see \code{\link[=pairwise_comparison]{pairwise_comparison()}}. -Relative skill will be calculated for the aggregation level specified in -\code{by}.} - -\item{relative_skill_metric}{character with the name of the metric for which -a relative skill shall be computed. If equal to 'auto' (the default), then -this will be either interval score, CRPS or Brier score (depending on which -of these is available in the input data)} - -\item{metric}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} Deprecated in 1.1.0. Use -\code{relative_skill_metric} instead.} - -\item{baseline}{character string with the name of a model. If a baseline is -given, then a scaled relative skill with respect to the baseline will be -returned. By default (\code{NULL}), relative skill will not be scaled with -respect to a baseline model.} - \item{...}{additional parameters that can be passed to the summary function provided to \code{fun}. For more information see the documentation of the respective function.} diff --git a/man/test_has_attribute.Rd b/man/test_has_attribute.Rd deleted file mode 100644 index 17e96d59b..000000000 --- a/man/test_has_attribute.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/check-input-helpers.R -\name{test_has_attribute} -\alias{test_has_attribute} -\title{Test whether an attribute is present} -\usage{ -test_has_attribute(object, attribute) -} -\arguments{ -\item{data}{An object to be checked} -} -\value{ -Returns TRUE if attribute is there and FALSE otherwise -a string otherwise -} -\description{ -Tests whether an object has an attribute -} -\keyword{check-inputs} diff --git a/tests/testthat/_snaps/score.md b/tests/testthat/_snaps/score.md deleted file mode 100644 index 78c7810cd..000000000 --- a/tests/testthat/_snaps/score.md +++ /dev/null @@ -1,17 +0,0 @@ -# score() can support a sample column when a quantile forecast is used - - Code - summarise_scores(summarise_scores(scores, by = "model"), by = "model", fun = signif, - digits = 2) - Output - model interval_score dispersion underprediction - 1: EuroCOVIDhub-baseline 8500 850 0 - 2: EuroCOVIDhub-ensemble NA NA NA - 3: epiforecasts-EpiNow2 13000 4100 0 - 4: UMass-MechBayes 120 77 39 - overprediction coverage_deviation bias ae_median - 1: 7600 -0.081 0.62 13000 - 2: 11000 NA 0.58 21000 - 3: 8600 0.050 0.50 22000 - 4: 0 0.050 -0.50 210 - diff --git a/tests/testthat/_snaps/summarise_scores.md b/tests/testthat/_snaps/summarise_scores.md deleted file mode 100644 index fdc138864..000000000 --- a/tests/testthat/_snaps/summarise_scores.md +++ /dev/null @@ -1,8 +0,0 @@ -# summarise_scores() metric is deprecated - - Code - x <- summarise_scores(scores, by = "model", metric = "auto", relative_skill = TRUE) - Warning - The `metric` argument of `summarise_scores()` is deprecated as of scoringutils 1.1.0. - i Please use the `relative_skill_metric` argument instead. -