diff --git a/R/BenchmarkResult.R b/R/BenchmarkResult.R index 57d5608f5..9f017bd26 100644 --- a/R/BenchmarkResult.R +++ b/R/BenchmarkResult.R @@ -124,7 +124,7 @@ BenchmarkResult = R6Class("BenchmarkResult", if (!is.null(bmr)) { assert_benchmark_result(bmr) if (private$.data$iterations() && self$task_type != bmr$task_type) { - stopf("BenchmarkResult is of task type '%s', but must be '%s'", bmr$task_type, self$task_type) + error_input("BenchmarkResult is of task type '%s', but must be '%s'", bmr$task_type, self$task_type) } private$.data$combine(get_private(bmr)$.data) @@ -428,8 +428,8 @@ BenchmarkResult = R6Class("BenchmarkResult", resample_result = function(i = NULL, uhash = NULL, task_id = NULL, learner_id = NULL, resampling_id = NULL) { uhash = private$.get_uhashes(i, uhash, learner_id, task_id, resampling_id) - if (length(uhash) != 1L) { - stopf("Method requires selecting exactly one ResampleResult, but got %s", + if (length(uhash) != 1) { + error_input("Method requires selecting exactly one ResampleResult, but got %s", length(uhash)) } ResampleResult$new(private$.data, view = uhash) @@ -601,7 +601,7 @@ BenchmarkResult = R6Class("BenchmarkResult", resampling_ids = resampling_ids), is.null) if (sum(!is.null(i), !is.null(uhashes), length(args) > 0L) > 1) { - stopf("At most one of `i`, `uhash`, or IDs can be provided.") + error_input("At most one of `i`, `uhash`, or IDs can be provided.") } if (!is.null(i)) { uhashes = self$uhashes @@ -612,7 +612,7 @@ BenchmarkResult = R6Class("BenchmarkResult", uhashes = invoke(match.fun("uhashes"), bmr = self, .args = args) } if (length(uhashes) == 0L) { - stopf("No resample results found for the given arguments.") + error_input("No resample results found for the given arguments.") } uhashes }, @@ -717,7 +717,7 @@ uhash = function(bmr, learner_id = NULL, task_id = NULL, resampling_id = NULL) { assert_string(resampling_id, null.ok = TRUE) uhash = uhashes(bmr, learner_id, task_id, resampling_id) if (length(uhash) != 1) { - stopf("Expected exactly one uhash, got %s", length(uhash)) + error_input("Expected exactly one uhash, got %s", length(uhash)) } uhash } diff --git a/R/DataBackendCbind.R b/R/DataBackendCbind.R index e80196d57..13cd9db42 100644 --- a/R/DataBackendCbind.R +++ b/R/DataBackendCbind.R @@ -7,7 +7,7 @@ DataBackendCbind = R6Class("DataBackendCbind", inherit = DataBackend, cloneable pk = b1$primary_key if (pk != b2$primary_key) { - stopf("All backends to cbind must have the primary_key '%s'", pk) + error_input("All backends to cbind must have the primary_key '%s'", pk) } super$initialize(list(b1 = b1, b2 = b2), pk) diff --git a/R/DataBackendDataTable.R b/R/DataBackendDataTable.R index 5601ba487..35dcdbcfd 100644 --- a/R/DataBackendDataTable.R +++ b/R/DataBackendDataTable.R @@ -47,7 +47,7 @@ DataBackendDataTable = R6Class("DataBackendDataTable", inherit = DataBackend, super$initialize(setkeyv(data, primary_key), primary_key) ii = match(primary_key, names(data)) if (is.na(ii)) { - stopf("Primary key '%s' not in 'data'", primary_key) + error_input("Primary key '%s' not in 'data'", primary_key) } private$.cache = set_names(replace(rep(NA, ncol(data)), ii, FALSE), names(data)) }, diff --git a/R/DataBackendRbind.R b/R/DataBackendRbind.R index 4ed652ae3..4e699e6fa 100644 --- a/R/DataBackendRbind.R +++ b/R/DataBackendRbind.R @@ -7,7 +7,7 @@ DataBackendRbind = R6Class("DataBackendRbind", inherit = DataBackend, cloneable pk = b1$primary_key if (pk != b2$primary_key) { - stopf("All backends to rbind must have the primary_key '%s'", pk) + error_input("All backends to rbind must have the primary_key '%s'", pk) } super$initialize(list(b1 = b1, b2 = b2), pk) diff --git a/R/DataBackendRename.R b/R/DataBackendRename.R index 1aca9f90f..dce30de51 100644 --- a/R/DataBackendRename.R +++ b/R/DataBackendRename.R @@ -16,14 +16,14 @@ DataBackendRename = R6Class("DataBackendRename", inherit = DataBackend, cloneabl new = new[ii] if (self$primary_key %chin% old) { - stopf("Renaming the primary key is not supported") + error_input("Renaming the primary key is not supported") } resulting_names = map_values(b$colnames, old, new) dup = anyDuplicated(resulting_names) if (dup > 0L) { - stopf("Duplicated column name after rename: %s", resulting_names[dup]) + error_input("Duplicated column name after rename: %s", resulting_names[dup]) } self$old = old diff --git a/R/HotstartStack.R b/R/HotstartStack.R index 099119a9d..eccc6b336 100644 --- a/R/HotstartStack.R +++ b/R/HotstartStack.R @@ -87,11 +87,11 @@ HotstartStack = R6Class("HotstartStack", walk(learners, function(learner) { if (!is.null(get0("validate", learner))) { - stopf("Hotstart learners that did validation is currently not supported.") + error_config("Hotstart learners that did validation is currently not supported.") } else if (is.null(learner$model)) { - stopf("Learners must be trained before adding them to the hotstart stack.") + error_config("Learners must be trained before adding them to the hotstart stack.") } else if (is_marshaled_model(learner$model)) { - stopf("Learners must be unmarshaled before adding them to the hotstart stack.") + error_config("Learners must be unmarshaled before adding them to the hotstart stack.") } }) diff --git a/R/Learner.R b/R/Learner.R index fb0722a91..200faf33f 100644 --- a/R/Learner.R +++ b/R/Learner.R @@ -375,14 +375,14 @@ Learner = R6Class("Learner", predict = function(task, row_ids = NULL) { # improve error message for the common mistake of passing a data.frame here if (is.data.frame(task)) { - stopf("To predict on data.frames, use the method `$predict_newdata()` instead of `$predict()`") + error_learner_predict("To predict on data.frames, use the method `$predict_newdata()` instead of `$predict()`") } task = assert_task(as_task(task)) assert_predictable(task, self) row_ids = assert_row_ids(row_ids, task = task, null.ok = TRUE) if (is.null(self$state$model) && is.null(self$state$fallback_state$model)) { - stopf("Cannot predict, Learner '%s' has not been trained yet", self$id) + error_input("Cannot predict, Learner '%s' has not been trained yet", self$id) } # we need to marshal for call-r prediction and parallel prediction, but afterwards we reset the model @@ -452,7 +452,7 @@ Learner = R6Class("Learner", predict_newdata = function(newdata, task = NULL) { if (is.null(task)) { if (is.null(self$state$train_task)) { - stopf("No task stored, and no task provided") + error_input("No task stored, and no task provided") } task = self$state$train_task$clone() } else { @@ -618,7 +618,7 @@ Learner = R6Class("Learner", fallback$id, self$id, str_collapse(missing_properties), class = "Mlr3WarningConfigFallbackProperties") } } else if (method == "none" && !is.null(fallback)) { - stopf("Fallback learner must be `NULL` if encapsulation is set to `none`.") + error_config("Fallback learner must be `NULL` if encapsulation is set to `none`.") } private$.encapsulation = c(train = method, predict = method) @@ -665,7 +665,7 @@ Learner = R6Class("Learner", for (i in seq_along(new_values)) { nn = ndots[[i]] if (!exists(nn, envir = self, inherits = FALSE)) { - stopf("Cannot set argument '%s' for '%s' (not a parameter, not a field).%s", + error_config("Cannot set argument '%s' for '%s' (not a parameter, not a field).%s", nn, class(self)[1L], did_you_mean(nn, c(param_ids, setdiff(names(self), ".__enclos_env__")))) # nolint } self[[nn]] = new_values[[i]] @@ -681,10 +681,10 @@ Learner = R6Class("Learner", #' If set to `"error"`, an error is thrown, otherwise all features are returned. selected_features = function() { if (is.null(self$model)) { - stopf("No model stored") + error_input("No model stored") # TODO error_learner? } if (private$.selected_features_impute == "error") { - stopf("Learner does not support feature selection") + error_config("Learner does not support feature selection") # TODO error_learner? } else { self$state$feature_names } @@ -790,7 +790,7 @@ Learner = R6Class("Learner", assert_string(rhs, .var.name = "predict_type") if (rhs %nin% self$predict_types) { - stopf("Learner '%s' does not support predict type '%s'", self$id, rhs) + error_config("Learner '%s' does not support predict type '%s'", self$id, rhs) # TODO error_learner? } private$.predict_type = rhs }, @@ -798,7 +798,7 @@ Learner = R6Class("Learner", #' @template field_param_set param_set = function(rhs) { if (!missing(rhs) && !identical(rhs, private$.param_set)) { - stopf("param_set is read-only.") + error_config("param_set is read-only.") } private$.param_set }, @@ -866,7 +866,7 @@ Learner = R6Class("Learner", # return: Numeric vector of weights or `no_weights_val` (default NULL) .get_weights = function(task, no_weights_val = NULL) { if ("weights" %nin% self$properties) { - stop("private$.get_weights should not be used in Learners that do not have the 'weights' property.") + error_mlr3("private$.get_weights should not be used in Learners that do not have the 'weights' property.") } if (self$use_weights == "use" && "weights_learner" %in% task$properties) { task$weights_learner$weight @@ -916,7 +916,7 @@ default_values.Learner = function(x, search_space, task, ...) { # nolint values = default_values(x$param_set) if (any(search_space$ids() %nin% names(values))) { - stopf("Could not find default values for the following parameters: %s", + error_config("Could not find default values for the following parameters: %s", str_collapse(setdiff(search_space$ids(), names(values)))) } diff --git a/R/LearnerClassif.R b/R/LearnerClassif.R index 2a37eeb2b..8623f2c9d 100644 --- a/R/LearnerClassif.R +++ b/R/LearnerClassif.R @@ -77,7 +77,7 @@ LearnerClassif = R6Class("LearnerClassif", inherit = Learner, #' #' @return `list()` with elements `"response"` or `"prob"` depending on the predict type. predict_newdata_fast = function(newdata, task = NULL) { - if (is.null(task) && is.null(self$state$train_task)) stopf("No task stored, and no task provided") + if (is.null(task) && is.null(self$state$train_task)) error_input("No task stored, and no task provided") feature_names = self$state$train_task$feature_names %??% task$feature_names class_names = self$state$train_task$class_names %??% task$class_names diff --git a/R/LearnerClassifDebug.R b/R/LearnerClassifDebug.R index 381b0a2d0..876e78e8f 100644 --- a/R/LearnerClassifDebug.R +++ b/R/LearnerClassifDebug.R @@ -113,7 +113,7 @@ LearnerClassifDebug = R6Class("LearnerClassifDebug", inherit = LearnerClassif, #' @return Named `numeric()`. importance = function() { if (is.null(self$model)) { - stopf("No model stored") + error_input("No model stored") } fns = self$state$feature_names set_names(rep(0, length(fns)), fns) @@ -124,7 +124,7 @@ LearnerClassifDebug = R6Class("LearnerClassifDebug", inherit = LearnerClassif, #' @return `character()`. selected_features = function() { if (is.null(self$model)) { - stopf("No model stored") + error_input("No model stored") } character(0) } @@ -180,10 +180,10 @@ LearnerClassifDebug = R6Class("LearnerClassifDebug", inherit = LearnerClassif, message("Message from classif.debug->train()") } if (roll("warning_train")) { - warningf("Warning from classif.debug->train()") + warning_mlr3("Warning from classif.debug->train()") } if (roll("error_train")) { - stopf("Error from classif.debug->train()") + error_learner_train("Error from classif.debug->train()") } if (roll("segfault_train")) { get("attach")(structure(list(), class = "UserDefinedDatabase")) @@ -192,7 +192,7 @@ LearnerClassifDebug = R6Class("LearnerClassifDebug", inherit = LearnerClassif, valid_truth = if (!is.null(task$internal_valid_task)) task$internal_valid_task$truth() if (isTRUE(pv$early_stopping) && is.null(valid_truth)) { - stopf("Early stopping is only possible when a validation task is present.") + error_config("Early stopping is only possible when a validation task is present.") } model = list( @@ -248,7 +248,7 @@ LearnerClassifDebug = R6Class("LearnerClassifDebug", inherit = LearnerClassif, }, .predict = function(task) { if (!is.null(self$model$marshal_pid) && self$model$marshal_pid != Sys.getpid()) { - stopf("Model was not unmarshaled correctly") + error_input("Model was not unmarshaled correctly") # TODO error_mlr3? } n = task$nrow pv = self$param_set$get_values(tags = "predict") @@ -265,10 +265,10 @@ LearnerClassifDebug = R6Class("LearnerClassifDebug", inherit = LearnerClassif, message("Message from classif.debug->predict()") } if (roll("warning_predict")) { - warningf("Warning from classif.debug->predict()") + warning_mlr3("Warning from classif.debug->predict()") } if (roll("error_predict")) { - stopf("Error from classif.debug->predict()") + error_learner_predict("Error from classif.debug->predict()") } if (roll("segfault_predict")) { get("attach")(structure(list(), class = "UserDefinedDatabase")) diff --git a/R/LearnerClassifFeatureless.R b/R/LearnerClassifFeatureless.R index 4395c367d..52c226aa7 100644 --- a/R/LearnerClassifFeatureless.R +++ b/R/LearnerClassifFeatureless.R @@ -54,7 +54,7 @@ LearnerClassifFeatureless = R6Class("LearnerClassifFeatureless", inherit = Learn #' @return Named `numeric()`. importance = function() { if (is.null(self$model)) { - stopf("No model stored") + error_input("No model stored") } fn = self$model$features named_vector(fn, 0) @@ -65,7 +65,7 @@ LearnerClassifFeatureless = R6Class("LearnerClassifFeatureless", inherit = Learn #' @return `character(0)`. selected_features = function() { if (is.null(self$model)) { - stopf("No model stored") + error_input("No model stored") } character() } diff --git a/R/LearnerClassifRpart.R b/R/LearnerClassifRpart.R index 42bc13916..2e6b45c0d 100644 --- a/R/LearnerClassifRpart.R +++ b/R/LearnerClassifRpart.R @@ -55,7 +55,7 @@ LearnerClassifRpart = R6Class("LearnerClassifRpart", inherit = LearnerClassif, #' @return Named `numeric()`. importance = function() { if (is.null(self$model)) { - stopf("No model stored") + error_input("No model stored") } # importance is only present if there is at least on split sort(self$model$variable.importance %??% set_names(numeric()), decreasing = TRUE) @@ -66,7 +66,7 @@ LearnerClassifRpart = R6Class("LearnerClassifRpart", inherit = LearnerClassif, #' @return `character()`. selected_features = function() { if (is.null(self$model)) { - stopf("No model stored") + error_input("No model stored") } setdiff(self$model$frame$var, "") } diff --git a/R/LearnerRegr.R b/R/LearnerRegr.R index ccda6230b..9de6ebd38 100644 --- a/R/LearnerRegr.R +++ b/R/LearnerRegr.R @@ -69,7 +69,7 @@ LearnerRegr = R6Class("LearnerRegr", inherit = Learner, #' #' @return `list()` with elements `"response"`, `"se"` or `"quantiles"` depending on the predict type. predict_newdata_fast = function(newdata, task = NULL) { - if (is.null(task) && is.null(self$state$train_task)) stopf("No task stored, and no task provided") + if (is.null(task) && is.null(self$state$train_task)) error_input("No task stored, and no task provided") feature_names = self$state$train_task$feature_names %??% task$feature_names # add data and most common used meta data @@ -134,7 +134,7 @@ LearnerRegr = R6Class("LearnerRegr", inherit = Learner, } if ("quantiles" %nin% self$predict_types) { - stopf("Learner does not support predicting quantiles") + error_learner_predict("Learner does not support predicting quantiles") # TODO error_config/input? } private$.quantiles = assert_numeric(rhs, lower = 0, upper = 1, any.missing = FALSE, min.len = 1L, sorted = TRUE, .var.name = "quantiles") @@ -151,7 +151,7 @@ LearnerRegr = R6Class("LearnerRegr", inherit = Learner, } if ("quantiles" %nin% self$predict_types) { - stopf("Learner does not support predicting quantiles") + error_learner_predict("Learner does not support predicting quantiles") # TODO error_config/input? } private$.quantile_response = assert_number(rhs, lower = 0, upper = 1, .var.name = "response") diff --git a/R/LearnerRegrDebug.R b/R/LearnerRegrDebug.R index e84eb8ac7..bfac670f8 100644 --- a/R/LearnerRegrDebug.R +++ b/R/LearnerRegrDebug.R @@ -64,7 +64,7 @@ LearnerRegrDebug = R6Class("LearnerRegrDebug", inherit = LearnerRegr, #' @return Named `numeric()`. importance = function() { if (is.null(self$model)) { - stopf("No model stored") + error_input("No model stored") } fns = self$state$feature_names set_names(rep(0, length(fns)), fns) @@ -75,7 +75,7 @@ LearnerRegrDebug = R6Class("LearnerRegrDebug", inherit = LearnerRegr, #' @return `character()`. selected_features = function() { if (is.null(self$model)) { - stopf("No model stored") + error_input("No model stored") } character(0) } @@ -88,13 +88,13 @@ LearnerRegrDebug = R6Class("LearnerRegrDebug", inherit = LearnerRegr, } if (roll("message_train")) { - message("Message from classif.debug->train()") + message("Message from regr.debug->train()") } if (roll("warning_train")) { - warningf("Warning from classif.debug->train()") + warning_mlr3("Warning from regr.debug->train()") } if (roll("error_train")) { - stopf("Error from classif.debug->train()") + error_learner_train("Error from regr.debug->train()") } if (roll("segfault_train")) { get("attach")(structure(list(), class = "UserDefinedDatabase")) @@ -131,13 +131,13 @@ LearnerRegrDebug = R6Class("LearnerRegrDebug", inherit = LearnerRegr, } if (roll("message_predict")) { - message("Message from classif.debug->predict()") + message("Message from regr.debug->predict()") } if (roll("warning_predict")) { - warningf("Warning from classif.debug->predict()") + warning_mlr3("Warning from regr.debug->predict()") } if (roll("error_predict")) { - stopf("Error from classif.debug->predict()") + error_learner_predict("Error from regr.debug->predict()") } if (roll("segfault_predict")) { get("attach")(structure(list(), class = "UserDefinedDatabase")) diff --git a/R/LearnerRegrFeatureless.R b/R/LearnerRegrFeatureless.R index e8523de17..887fd6d92 100644 --- a/R/LearnerRegrFeatureless.R +++ b/R/LearnerRegrFeatureless.R @@ -55,7 +55,7 @@ LearnerRegrFeatureless = R6Class("LearnerRegrFeatureless", inherit = LearnerRegr #' @return Named `numeric()`. importance = function() { if (is.null(self$model)) { - stopf("No model stored") + error_input("No model stored") } fn = self$model$features named_vector(fn, 0) @@ -66,7 +66,7 @@ LearnerRegrFeatureless = R6Class("LearnerRegrFeatureless", inherit = LearnerRegr #' @return `character(0)`. selected_features = function() { if (is.null(self$model)) { - stopf("No model stored") + error_input("No model stored") } character() } diff --git a/R/LearnerRegrRpart.R b/R/LearnerRegrRpart.R index 243008fe3..f00bee98c 100644 --- a/R/LearnerRegrRpart.R +++ b/R/LearnerRegrRpart.R @@ -55,7 +55,7 @@ LearnerRegrRpart = R6Class("LearnerRegrRpart", inherit = LearnerRegr, #' @return Named `numeric()`. importance = function() { if (is.null(self$model)) { - stopf("No model stored") + error_input("No model stored") } # importance is only present if there is at least on split sort(self$model$variable.importance %??% set_names(numeric()), decreasing = TRUE) @@ -66,7 +66,7 @@ LearnerRegrRpart = R6Class("LearnerRegrRpart", inherit = LearnerRegr, #' @return `character()`. selected_features = function() { if (is.null(self$model)) { - stopf("No model stored") + error_input("No model stored") } setdiff(self$model$frame$var, "") } diff --git a/R/Measure.R b/R/Measure.R index 1d539320a..17d7f5a3e 100644 --- a/R/Measure.R +++ b/R/Measure.R @@ -249,19 +249,19 @@ Measure = R6Class("Measure", # except when the checks are superfluous for rr$score() and bmr$score() # these checks should be added bellow if ("requires_task" %chin% properties && is.null(task)) { - stopf("Measure '%s' requires a task", self$id) + error_input("Measure '%s' requires a task", self$id) } if ("requires_learner" %chin% properties && is.null(learner)) { - stopf("Measure '%s' requires a learner", self$id) + error_input("Measure '%s' requires a learner", self$id) } if (!is_scalar_na(self$task_type) && self$task_type != prediction$task_type) { - stopf("Measure '%s' incompatible with task type '%s'", self$id, prediction$task_type) + error_input("Measure '%s' incompatible with task type '%s'", self$id, prediction$task_type) } if ("requires_train_set" %chin% properties && is.null(train_set)) { - stopf("Measure '%s' requires the train_set", self$id) + error_input("Measure '%s' requires the train_set", self$id) } score_single_measure(self, task, learner, train_set, prediction) @@ -306,7 +306,7 @@ Measure = R6Class("Measure", "custom" = { if (!is.null(get_private(rr$resampling)$.primary_iters) && "primary_iters" %nin% self$properties && !test_permutation(get_private(rr$resampling)$.primary_iters, seq_len(rr$resampling$iters))) { - stopf("Resample result has non-NULL primary_iters, but measure '%s' cannot handle them", self$id) + error_input("Resample result has non-NULL primary_iters, but measure '%s' cannot handle them", self$id) } private$.aggregator(rr) } @@ -411,7 +411,7 @@ Measure = R6Class("Measure", .aggregator = NULL, .use_weights = NULL, .score = function(prediction, task, weights, ...) { - stop("abstract method") + error_mlr3("abstract method") } ) ) diff --git a/R/MeasureAIC.R b/R/MeasureAIC.R index 564f8e817..6b055a1c4 100644 --- a/R/MeasureAIC.R +++ b/R/MeasureAIC.R @@ -45,7 +45,7 @@ MeasureAIC = R6Class("MeasureAIC", tryCatch({ return(stats::AIC(stats::logLik(learner$model), k = k)) }, error = function(e) { - warningf("Learner '%s' does not support AIC calculation", learner$id) + warning_config("Learner '%s' does not support AIC calculation", learner$id) return(NA_real_) }) } diff --git a/R/MeasureBIC.R b/R/MeasureBIC.R index b3d48f205..bac6242a9 100644 --- a/R/MeasureBIC.R +++ b/R/MeasureBIC.R @@ -42,7 +42,7 @@ MeasureBIC = R6Class("MeasureBIC", tryCatch({ return(stats::BIC(stats::logLik(learner$model))) }, error = function(e) { - warningf("Learner '%s' does not support BIC calculation", learner$id) + warning_config("Learner '%s' does not support BIC calculation", learner$id) return(NA_real_) }) } diff --git a/R/Prediction.R b/R/Prediction.R index e98232d20..1e6fd61e6 100644 --- a/R/Prediction.R +++ b/R/Prediction.R @@ -165,7 +165,7 @@ c.Prediction = function(..., keep_duplicates = TRUE) { # nolint classes = unique(map_chr(dots, function(x) class(x)[1L])) if (length(classes) > 1L) { - stopf("Cannot combine objects of different type: %s", str_collapse(classes)) + error_input("Cannot combine objects of different type: %s", str_collapse(classes)) } assert_flag(keep_duplicates) diff --git a/R/PredictionClassif.R b/R/PredictionClassif.R index 18889fbfe..38ebb517a 100644 --- a/R/PredictionClassif.R +++ b/R/PredictionClassif.R @@ -172,14 +172,14 @@ as.data.table.PredictionClassif = function(x, ...) { # nolint set_threshold_pdata = function(pdata, threshold, ties_method) { if (!is.matrix(pdata$prob)) { - stopf("Cannot set threshold, no probabilities available") + error_input("Cannot set threshold, no probabilities available") } lvls = levels(pdata$truth) if (length(threshold) == 1L) { assert_number(threshold, lower = 0, upper = 1) if (length(lvls) != 2L) { - stopf("Setting a single threshold only supported for binary classification problems") + error_config("Setting a single threshold only supported for binary classification problems") } prob = cbind(pdata$prob[, 1L], threshold) } else { @@ -195,4 +195,4 @@ set_threshold_pdata = function(pdata, threshold, ties_method) { ind = max.col(prob, ties.method = ties_method) pdata$response = factor(lvls[ind], levels = lvls) pdata -} \ No newline at end of file +} diff --git a/R/PredictionDataClassif.R b/R/PredictionDataClassif.R index 5ecfb8272..2e1b4274f 100644 --- a/R/PredictionDataClassif.R +++ b/R/PredictionDataClassif.R @@ -95,11 +95,11 @@ c.PredictionDataClassif = function(..., keep_duplicates = TRUE) { predict_types = names(mlr_reflections$learner_predict_types$classif) predict_types = map(dots, function(x) intersect(names(x), predict_types)) if (!every(predict_types[-1L], setequal, y = predict_types[[1L]])) { - stopf("Cannot rbind predictions: Different predict types") + error_input("Cannot rbind predictions: Different predict types") } if (length(unique(map_lgl(dots, function(x) is.null(x$weights)))) > 1L) { - stopf("Cannot rbind predictions: Some predictions have weights, others do not") + error_input("Cannot rbind predictions: Some predictions have weights, others do not") } elems = c("row_ids", "truth", intersect(predict_types[[1L]], "response"), if ("weights" %chin% names(dots[[1L]])) "weights") diff --git a/R/PredictionDataRegr.R b/R/PredictionDataRegr.R index 12e2854b6..b0ef72d83 100644 --- a/R/PredictionDataRegr.R +++ b/R/PredictionDataRegr.R @@ -22,11 +22,11 @@ check_prediction_data.PredictionDataRegr = function(pdata, ...) { # nolint assert_prediction_count(nrow(quantiles), n, "quantiles") if (is.null(attr(quantiles, "probs"))) { - stopf("No probs attribute stored in 'quantile'") + error_config("No probs attribute stored in 'quantile'") } if (is.null(attr(quantiles, "response")) && is.null(pdata$response)) { - stopf("No response attribute stored in 'quantile' or response stored in 'pdata'") + error_config("No response attribute stored in 'quantile' or response stored in 'pdata'") } colnames(pdata$quantiles) = sprintf("q%g", attr(quantiles, "probs")) @@ -91,11 +91,11 @@ c.PredictionDataRegr = function(..., keep_duplicates = TRUE) { # nolint predict_types = names(mlr_reflections$learner_predict_types$regr) predict_types = map(dots, function(x) intersect(names(x), predict_types)) if (!every(predict_types[-1L], setequal, y = predict_types[[1L]])) { - stopf("Cannot combine predictions: Different predict types") + error_input("Cannot combine predictions: Different predict types") } if (length(unique(map_lgl(dots, function(x) is.null(x$weights)))) > 1L) { - stopf("Cannot combine predictions: Some predictions have weights, others do not") + error_input("Cannot combine predictions: Some predictions have weights, others do not") } elems = c("row_ids", "truth", intersect(predict_types[[1L]], c("response", "se")), if ("weights" %chin% names(dots[[1L]])) "weights") diff --git a/R/ResampleResult.R b/R/ResampleResult.R index 4b01e1b7d..1d2d0de9c 100644 --- a/R/ResampleResult.R +++ b/R/ResampleResult.R @@ -299,7 +299,7 @@ ResampleResult = R6Class("ResampleResult", #' rr$set_threshold(0.6) set_threshold = function(threshold, ties_method = "random") { if (!self$task_type == "classif") { - stopf("Can only change the threshold for classification problems, but task type is '%s'.", self$task_type) + error_config("Can only change the threshold for classification problems, but task type is '%s'.", self$task_type) } private$.data$set_threshold(self$uhash, threshold, ties_method) } diff --git a/R/Resampling.R b/R/Resampling.R index 29867ed06..459d6da84 100644 --- a/R/Resampling.R +++ b/R/Resampling.R @@ -277,7 +277,7 @@ Resampling = R6Class("Resampling", } } else { if (!is.null(groups)) { - stopf("Cannot combine stratification with grouping") + error_config("Cannot combine stratification with grouping") } private$.combine(lapply(strata$row_id, private$.sample, task = task)) } @@ -285,7 +285,7 @@ Resampling = R6Class("Resampling", .get_set = function(getter, i) { if (!self$is_instantiated) { - stopf("Resampling '%s' has not been instantiated yet", self$id) + error_input("Resampling '%s' has not been instantiated yet", self$id) } i = assert_int(i, lower = 1L, upper = self$iters, coerce = TRUE) ids = getter(i) diff --git a/R/ResamplingCustomCV.R b/R/ResamplingCustomCV.R index f5f6ca286..e517a1963 100644 --- a/R/ResamplingCustomCV.R +++ b/R/ResamplingCustomCV.R @@ -62,7 +62,7 @@ ResamplingCustomCV = R6Class("ResamplingCustomCV", inherit = Resampling, instantiate = function(task, f = NULL, col = NULL) { task = assert_task(as_task(task)) if (!xor(is.null(f), is.null(col))) { - stopf("Either `f` or `col` must be provided") + error_config("Either `f` or `col` must be provided") } if (!is.null(col)) { diff --git a/R/Task.R b/R/Task.R index 8d0439d2a..068117cd4 100644 --- a/R/Task.R +++ b/R/Task.R @@ -131,7 +131,7 @@ Task = R6Class("Task", assert_names(cn, "unique", .var.name = "column names") if (any(grepl("%", cn, fixed = TRUE))) { - stopf("Column names may not contain special character '%%'") + error_input("Column names may not contain special character '%%'") } self$col_info = col_info(self$backend) @@ -172,7 +172,7 @@ Task = R6Class("Task", private$.hash = NULL if (!xor(is.null(ratio), is.null(ids))) { - stopf("Provide a ratio or ids to create a validation task, but not both (Task '%s').", self$id) + error_config("Provide a ratio or ids to create a validation task, but not both (Task '%s').", self$id) } valid_ids = if (!is.null(ratio)) { @@ -333,12 +333,12 @@ Task = R6Class("Task", data = self$backend$data(rows = rows, cols = query_cols) if (length(query_cols) && nrow(data) != length(rows)) { - stopf("DataBackend did not return the queried rows correctly: %i requested, %i received. - The resampling was probably instantiated on a different task.", length(rows), nrow(data)) + error_mlr3("DataBackend did not return the queried rows correctly: %i requested, %i received. + The resampling was probably instantiated on a different task.", length(rows), nrow(data)) # TODO: more specific error necessary? } if (length(rows) && ncol(data) != length(query_cols)) { - stopf("DataBackend did not return the queried cols correctly: %i requested, %i received", length(cols), ncol(data)) + error_mlr3("DataBackend did not return the queried cols correctly: %i requested, %i received", length(cols), ncol(data)) # TODO: more specific error necessary? } .__i__ = self$col_info[["fix_factor_levels"]] @@ -545,7 +545,7 @@ Task = R6Class("Task", } if (pk_in_backend && any(data$rownames %in% self$backend$rownames)) { - stopf("Cannot rbind data to task '%s', duplicated row ids", self$id) + error_input("Cannot rbind data to task '%s', duplicated row ids", self$id) } # columns with these roles must be present in data @@ -553,7 +553,7 @@ Task = R6Class("Task", mandatory_cols = unlist(private$.col_roles[mandatory_roles], use.names = FALSE) missing_cols = setdiff(mandatory_cols, data$colnames) if (length(missing_cols)) { - stopf("Cannot rbind data to task '%s', missing the following mandatory columns: %s", self$id, str_collapse(missing_cols)) + error_input("Cannot rbind data to task '%s', missing the following mandatory columns: %s", self$id, str_collapse(missing_cols)) } # merge col infos @@ -565,7 +565,7 @@ Task = R6Class("Task", type = type_y = NULL ii = head(tab[type != type_y, which = TRUE], 1L) if (length(ii)) { - stopf("Cannot rbind to task: Types do not match for column: %s (%s != %s)", tab$id[ii], tab$type[ii], tab$type_y[ii]) + error_input("Cannot rbind to task: Types do not match for column: %s (%s != %s)", tab$id[ii], tab$type[ii], tab$type_y[ii]) } } @@ -844,7 +844,7 @@ Task = R6Class("Task", col_types = fget_keys(self$col_info, i = cols, j = "type", key = "id") ii = wf(col_types %nin% c("integer", "numeric")) if (length(ii)) { - stopf("For `add_strata`, all columns must be numeric, but '%s' is not", cols[ii]) + error_config("For `add_strata`, all columns must be numeric, but '%s' is not", cols[ii]) } strata = pmap_dtc(list(self$data(cols = cols), bins), cut, include.lowest = TRUE) @@ -924,7 +924,7 @@ Task = R6Class("Task", self$row_roles$use = train_ids } else { if (!is.null(rhs$internal_valid_task)) { # avoid recursive structures - stopf("Trying to assign task '%s' as a validation task, remove its validation task first.", rhs$id) + error_config("Trying to assign task '%s' as a validation task, remove its validation task first.", rhs$id) } assert_task(rhs, task_type = self$task_type) rhs = rhs$clone(deep = TRUE) @@ -936,16 +936,16 @@ Task = R6Class("Task", cols = unlist(self$col_roles[c("target", "feature")], use.names = FALSE) walk(cols, function(.col) { if (.col %nin% ci2$id) { - stopf("Primary task has column '%s' which is not present in the validation task.", .col) + error_input("Primary task has column '%s' which is not present in the validation task.", .col) } if (ci1[get("id") == .col, "type"]$type != ci2[get("id") == .col, "type"]$type) { - stopf("The type of column '%s' from the validation task differs from the type in the primary task.", .col) + error_input("The type of column '%s' from the validation task differs from the type in the primary task.", .col) } }) private$.internal_valid_task = rhs - if (private$.internal_valid_task$nrow == 0L) { - warningf("Internal validation task has 0 observations.") + if (private$.internal_valid_task$nrow == 0) { + warning_input("Internal validation task has 0 observations.") } invisible(private$.internal_valid_task) }, @@ -1056,7 +1056,7 @@ Task = R6Class("Task", assert_has_backend(self) assert_list(rhs, .var.name = "row_roles") if ("test" %chin% names(rhs) || "holdout" %chin% names(rhs)) { - stopf("Setting row roles 'test'/'holdout' is no longer possible.") + error_config("Setting row roles 'test'/'holdout' is no longer possible.") } assert_names(names(rhs), "unique", permutation.of = mlr_reflections$task_row_roles, .var.name = "names of row_roles") rhs = map(rhs, assert_row_ids, .var.name = "elements of row_roles") @@ -1428,12 +1428,12 @@ task_check_col_roles = function(task, new_roles, ...) { #' @export task_check_col_roles.Task = function(task, new_roles, ...) { if ("weight" %in% names(new_roles)) { - stopf("Task role 'weight' is deprecated, use 'weights_learner' instead") + error_config("Task role 'weight' is deprecated, use 'weights_learner' instead") } for (role in c("group", "name", "weights_learner", "weights_measure")) { if (length(new_roles[[role]]) > 1L) { - stopf("There may only be up to one column with role '%s'", role) + error_config("There may only be up to one column with role '%s'", role) } } @@ -1449,19 +1449,19 @@ task_check_col_roles.Task = function(task, new_roles, ...) { if (length(new_roles[["name"]])) { row_names = task$backend$data(task$backend$rownames, cols = new_roles[["name"]]) if (!is.character(row_names[[1L]]) && !is.factor(row_names[[1L]])) { - stopf("Assertion on '%s' failed: Must be of type 'character' or 'factor', not %s", names(row_names), class(row_names[[1]])) + error_config("Assertion on '%s' failed: Must be of type 'character' or 'factor', not %s", names(row_names), class(row_names[[1]])) } } # check offset if (length(new_roles[["offset"]]) && any(fget_keys(task$col_info, new_roles[["offset"]], "type", key = "id") %nin% c("numeric", "integer"))) { - stopf("Offset column(s) %s must be a numeric or integer column", paste0("'", new_roles[["offset"]], "'", collapse = ",")) + error_config("Offset column(s) %s must be a numeric or integer column", paste0("'", new_roles[["offset"]], "'", collapse = ",")) } if (length(new_roles[["offset"]]) && any(task$missings(cols = new_roles[["offset"]]) > 0)) { missings = task$missings(cols = new_roles[["offset"]]) missings = names(missings[missings > 0]) - stopf("Offset column(s) %s contain missing values", paste0("'", missings, "'", collapse = ",")) + error_config("Offset column(s) %s contain missing values", paste0("'", missings, "'", collapse = ",")) } return(new_roles) @@ -1473,15 +1473,15 @@ task_check_col_roles.TaskClassif = function(task, new_roles, ...) { # check target if (length(new_roles[["target"]]) > 1L) { - stopf("There may only be up to one column with role 'target'") + error_config("There may only be up to one column with role 'target'") } if (length(new_roles[["target"]]) && any(fget_keys(task$col_info, new_roles[["target"]], "type", key = "id") %nin% c("factor", "ordered"))) { - stopf("Target column(s) %s must be a factor or ordered factor", paste0("'", new_roles[["target"]], "'", collapse = ",")) + error_config("Target column(s) %s must be a factor or ordered factor", paste0("'", new_roles[["target"]], "'", collapse = ",")) } if (length(new_roles[["offset"]]) > 1L && length(task$class_names) == 2L) { - stopf("There may only be up to one column with role 'offset' for binary classification tasks") + error_config("There may only be up to one column with role 'offset' for binary classification tasks") } if (length(new_roles[["offset"]]) > 1L) { @@ -1497,12 +1497,12 @@ task_check_col_roles.TaskClassif = function(task, new_roles, ...) { task_check_col_roles.TaskRegr = function(task, new_roles, ...) { for (role in c("target", "offset")) { if (length(new_roles[[role]]) > 1L) { - stopf("There may only be up to one column with role '%s'", role) + error_config("There may only be up to one column with role '%s'", role) } } if (length(new_roles[["target"]]) && any(fget_keys(task$col_info, new_roles[["target"]], "type", key = "id") %nin% c("numeric", "integer"))) { - stopf("Target column '%s' must be a numeric or integer column", paste0("'", new_roles[["target"]], "'", collapse = ",")) + error_config("Target column '%s' must be a numeric or integer column", paste0("'", new_roles[["target"]], "'", collapse = ",")) } NextMethod() @@ -1514,7 +1514,7 @@ task_check_col_roles.TaskSupervised = function(task, new_roles, ...) { # check target if (length(new_roles$target) == 0L) { - stopf("Supervised tasks need at least one target column") + error_config("Supervised tasks need at least one target column") } NextMethod() @@ -1526,7 +1526,7 @@ task_check_col_roles.TaskUnsupervised = function(task, new_roles, ...) { # check target if (length(new_roles$target) != 0L) { - stopf("Unsupervised tasks may not have a target column") + error_config("Unsupervised tasks may not have a target column") } NextMethod() diff --git a/R/TaskClassif.R b/R/TaskClassif.R index 674b8934d..39dd0239c 100644 --- a/R/TaskClassif.R +++ b/R/TaskClassif.R @@ -102,7 +102,7 @@ TaskClassif = R6Class("TaskClassif", } if (length(lvls) != 2L) { - stopf("Setting the positive class is only feasible for binary classification") + error_config("Setting the positive class is only feasible for binary classification") } positive = assert_choice(rhs, lvls) negative = setdiff(lvls, rhs) @@ -139,12 +139,12 @@ TaskClassif = R6Class("TaskClassif", update_classif_property = function(self, private) { tn = self$target_names if (fget_key(self$col_info, tn, "type", key = "id") %nin% c("factor", "ordered")) { - stopf("Target column '%s' must be a factor or ordered factor", tn) + error_config("Target column '%s' must be a factor or ordered factor", tn) } nlvls = length(self$class_names) if (nlvls < 2L) { - stopf("Target column '%s' must have at least two levels", tn) + error_config("Target column '%s' must have at least two levels", tn) } private$.properties = setdiff(private$.properties, c("twoclass", "multiclass")) diff --git a/R/TaskRegr.R b/R/TaskRegr.R index de823a250..53c772025 100644 --- a/R/TaskRegr.R +++ b/R/TaskRegr.R @@ -40,7 +40,7 @@ TaskRegr = R6Class("TaskRegr", type = fget_key(self$col_info, i = target, j = "type", key = "id") if (type %nin% c("integer", "numeric")) { - stopf("Target column '%s' must be numeric", target) + error_config("Target column '%s' must be numeric", target) } }, diff --git a/R/as_data_backend.R b/R/as_data_backend.R index 8aa97c1ec..f61d7002d 100644 --- a/R/as_data_backend.R +++ b/R/as_data_backend.R @@ -72,7 +72,7 @@ as_data_backend.data.frame = function(data, primary_key = NULL, keep_rownames = } else if (is.integer(primary_key)) { row_ids = assert_integer(primary_key, len = nrow(data), any.missing = FALSE, unique = TRUE) } else { - stopf("Argument 'primary_key' must be NULL, a column name or a vector of ids") + error_config("Argument 'primary_key' must be NULL, a column name or a vector of ids") } primary_key = "..row_id" diff --git a/R/as_prediction_classif.R b/R/as_prediction_classif.R index 2293a86ad..c08fb87a6 100644 --- a/R/as_prediction_classif.R +++ b/R/as_prediction_classif.R @@ -46,7 +46,7 @@ as_prediction_classif.data.frame = function(x, ...) { # nolint assert_names(names(x), must.include = c("row_ids", "truth", "response")) prob_cols = setdiff(names(x), c("row_ids", "truth", "response", "weights")) if (!all(startsWith(prob_cols, "prob."))) { - stopf("Table may only contain columns 'row_ids', 'truth', 'response', 'weights' as well as columns prefixed with 'prob.' for class probabilities") + error_config("Table may only contain columns 'row_ids', 'truth', 'response', 'weights' as well as columns prefixed with 'prob.' for class probabilities") } x = as.data.table(x) diff --git a/R/as_result_data.R b/R/as_result_data.R index ccaa2ecf2..71e20699d 100644 --- a/R/as_result_data.R +++ b/R/as_result_data.R @@ -63,25 +63,25 @@ as_result_data = function( N = length(iterations) if (length(learners) != N) { - stopf("Number of learners (%i) must match the number of resampling iterations (%i)", length(learners), N) + error_input("Number of learners (%i) must match the number of resampling iterations (%i)", length(learners), N) } if (length(predictions) != N) { - stopf("Number of predictions (%i) must match the number of resampling iterations (%i)", length(predictions), N) + error_input("Number of predictions (%i) must match the number of resampling iterations (%i)", length(predictions), N) } if (is.null(learner_states)) { learner_states = map(learners, "state") } else if (length(learner_states) != N) { - stopf("Number of learner_states (%i) must match the number of resampling iterations (%i)", length(learner_states), N) + error_input("Number of learner_states (%i) must match the number of resampling iterations (%i)", length(learner_states), N) } if (resampling$task_hash != task$hash) { - stopf("Resampling '%s' has not been trained on task '%s', hashes do not match", resampling$id, task$id) + error_input("Resampling '%s' has not been trained on task '%s', hashes do not match", resampling$id, task$id) } if (!is.null(data_extra) && length(data_extra) != N) { - stopf("Length of data_extra (%i) must match the number of resampling iterations (%i)", length(data_extra), N) + error_input("Length of data_extra (%i) must match the number of resampling iterations (%i)", length(data_extra), N) } ResultData$new(data.table( diff --git a/R/as_task.R b/R/as_task.R index 3f4d19c0f..de6d67026 100644 --- a/R/as_task.R +++ b/R/as_task.R @@ -24,7 +24,7 @@ as_task = function(x, ...) { #' @export as_task.default = function(x, ...) { - stopf("No method for class '%s'. To create a task from a `data.frame`, use dedicated converters such as `as_task_classif()` or `as_task_regr()`.", class(x)[1L]) + error_input("No method for class '%s'. To create a task from a `data.frame`, use dedicated converters such as `as_task_classif()` or `as_task_regr()`.", class(x)[1L]) } #' @rdname as_task diff --git a/R/as_task_classif.R b/R/as_task_classif.R index 638d62f6b..ccd66d717 100644 --- a/R/as_task_classif.R +++ b/R/as_task_classif.R @@ -43,7 +43,7 @@ as_task_classif.data.frame = function(x, target, id = deparse1(substitute(x)), p ii = which(map_lgl(keep(x, is.double), anyInfinite)) if (length(ii)) { - warningf("Detected columns with unsupported Inf values in data: %s", str_collapse(names(ii))) + warning_input("Detected columns with unsupported Inf values in data: %s", str_collapse(names(ii))) } y = x[[target]] @@ -93,7 +93,7 @@ as_task_classif.formula = function(x, data, id = deparse1(substitute(data)), pos assert_subset(all.vars(x), c(names(data), "."), .var.name = "formula") if (!attributes(terms(x, data = data))$response) { - stopf("Formula %s is missing a response", format(x)) + error_config("Formula %s is missing a response", format(x)) } tab = model.frame(x, data, na.action = "na.pass") setattr(tab, "terms", NULL) diff --git a/R/as_task_regr.R b/R/as_task_regr.R index d6950a404..cfb1b5583 100644 --- a/R/as_task_regr.R +++ b/R/as_task_regr.R @@ -41,7 +41,7 @@ as_task_regr.data.frame = function(x, target, id = deparse1(substitute(x)), labe ii = which(map_lgl(keep(x, is.double), anyInfinite)) if (length(ii)) { - warningf("Detected columns with unsupported Inf values in data: %s", str_collapse(names(ii))) + warning_input("Detected columns with unsupported Inf values in data: %s", str_collapse(names(ii))) } TaskRegr$new(id = id, backend = x, target = target, label = label) @@ -86,7 +86,7 @@ as_task_regr.formula = function(x, data, id = deparse1(substitute(data)), label assert_subset(all.vars(x), c(names(data), "."), .var.name = "formula") if (!attributes(terms(x, data = data))$response) { - stopf("Formula %s is missing a response", format(x)) + error_config("Formula %s is missing a response", format(x)) } tab = model.frame(x, data, na.action = "na.pass") setattr(tab, "terms", NULL) diff --git a/R/as_task_unsupervised.R b/R/as_task_unsupervised.R index d68c4cf13..1de5ade06 100644 --- a/R/as_task_unsupervised.R +++ b/R/as_task_unsupervised.R @@ -27,7 +27,7 @@ as_task_unsupervised.data.frame = function(x, id = deparse1(substitute(x)), labe ii = which(map_lgl(keep(x, is.double), anyInfinite)) if (length(ii)) { - warningf("Detected columns with unsupported Inf values in data: %s", str_collapse(names(ii))) + warning_input("Detected columns with unsupported Inf values in data: %s", str_collapse(names(ii))) } TaskUnsupervised$new(id = id, backend = x, label = label) diff --git a/R/assertions.R b/R/assertions.R index 8a4b56347..fdbea00ba 100644 --- a/R/assertions.R +++ b/R/assertions.R @@ -36,20 +36,20 @@ assert_task = function(task, task_type = NULL, feature_types = NULL, task_proper assert_class(task, "Task", .var.name = .var.name) if (!is.null(task_type) && task$task_type != task_type) { - stopf("Task '%s' must have type '%s'", task$id, task_type) + error_input("Task '%s' must have type '%s'", task$id, task_type) } if (!is.null(feature_types)) { tmp = setdiff(task$feature_types$type, feature_types) if (length(tmp)) { - stopf("Task '%s' has the following unsupported feature types: %s", task$id, str_collapse(tmp)) + error_input("Task '%s' has the following unsupported feature types: %s", task$id, str_collapse(tmp)) } } if (!is.null(task_properties)) { tmp = setdiff(task_properties, task$properties) if (length(tmp)) { - stopf("Task '%s' is missing the following properties: %s", task$id, str_collapse(tmp)) + error_input("Task '%s' is missing the following properties: %s", task$id, str_collapse(tmp)) } } @@ -76,13 +76,13 @@ assert_learner = function(learner, task = NULL, task_type = NULL, properties = c # check on class(learner) does not work with GraphLearner and AutoTuner # check on learner$task_type does not work with TaskUnsupervised if (!test_matching_task_type(task_type, learner, "learner")) { - stopf("Learner '%s' must have task type '%s'", learner$id, task_type) + error_input("Learner '%s' must have task type '%s'", learner$id, task_type) } if (length(properties)) { miss = setdiff(properties, learner$properties) if (length(miss)) { - stopf("Learner '%s' must have the properties: %s", learner$id, str_collapse(miss)) + error_input("Learner '%s' must have the properties: %s", learner$id, str_collapse(miss)) } } @@ -111,7 +111,7 @@ assert_learners = function(learners, task = NULL, task_type = NULL, properties = if (unique_ids) { ids = map_chr(learners, "id") if (!test_character(ids, unique = TRUE)) { - stopf("Learners need to have unique IDs: %s", str_collapse(ids)) + error_input("Learners need to have unique IDs: %s", str_collapse(ids)) } } invisible(lapply(learners, assert_learner, task = task, task_type = NULL, properties = properties, .var.name = .var.name)) @@ -124,31 +124,31 @@ assert_task_learner = function(task, learner, param_values = NULL, cols = NULL) # remove pars that are covered by param_values pars = pars[names(pars) %nin% names(param_values)] if (length(pars) > 0) { - stopf("%s cannot be trained with TuneToken present in hyperparameter: %s", learner$format(), str_collapse(names(pars))) + error_config("%s cannot be trained with TuneToken present in hyperparameter: %s", format_angle_brackets(learner), str_collapse(names(pars))) } # check on class(learner) does not work with GraphLearner and AutoTuner # check on learner$task_type does not work with TaskUnsupervised if (!test_matching_task_type(task$task_type, learner, "learner")) { - stopf("Type '%s' of %s does not match type '%s' of %s", - task$task_type, task$format(), learner$task_type, learner$format()) + error_input("Type '%s' of %s does not match type '%s' of %s", + task$task_type, format_angle_brackets(task), learner$task_type, format_angle_brackets(learner)) } tmp = setdiff(task$feature_types$type, learner$feature_types) if (length(tmp) > 0) { - stopf("%s has the following unsupported feature types: %s", task$format(), str_collapse(tmp)) + error_input("%s has the following unsupported feature types: %s", format_angle_brackets(task), str_collapse(tmp)) } if ("missings" %nin% learner$properties) { miss = task$missings(cols = cols) > 0L if (any(miss)) { - stopf("Task '%s' has missing values in column(s) %s, but learner '%s' does not support this", + error_config("Task '%s' has missing values in column(s) %s, but learner '%s' does not support this", task$id, str_collapse(names(miss)[miss], quote = "'"), learner$id) } } if ("offset" %in% task$properties && "offset" %nin% learner$properties) { - warningf("Task '%s' has offset, but learner '%s' does not support this, so it will be ignored", + warning_config("Task '%s' has offset, but learner '%s' does not support this, so it will be ignored", task$id, learner$id) } @@ -156,14 +156,14 @@ assert_task_learner = function(task, learner, param_values = NULL, cols = NULL) if (length(tmp)) { tmp = setdiff(intersect(task$properties, tmp), learner$properties) if (length(tmp)) { - stopf("Task '%s' has property '%s', but learner '%s' does not support that", + error_input("Task '%s' has property '%s', but learner '%s' does not support that", task$id, tmp[1L], learner$id) } } validate = get0("validate", learner) if (!is.null(task$internal_valid_task) && (is.numeric(validate) || identical(validate, "test"))) { - stopf("Parameter 'validate' of Learner '%s' cannot be set to 'test' or a ratio when internal_valid_task is present, remove it first", learner$id) + error_config("Parameter 'validate' of Learner '%s' cannot be set to 'test' or a ratio when internal_valid_task is present, remove it first", learner$id) } } @@ -174,12 +174,12 @@ assert_task_learner = function(task, learner, param_values = NULL, cols = NULL) #' @rdname mlr_assertions assert_learnable = function(task, learner, param_values = NULL) { if (task$task_type == "unsupervised") { - stopf("%s cannot be trained with %s", learner$format(), task$format()) + error_input("%s cannot be trained with %s", format_angle_brackets(learner), format_angle_brackets(task)) } # we only need to check whether the learner wants to error on weights in training, # since weights_learner are always ignored during prediction. if (learner$use_weights == "error" && "weights_learner" %in% task$properties) { - stopf("%s cannot be trained with weights in %s%s", learner$format(), task$format(), + error_config("%s cannot be trained with weights in %s%s", format_angle_brackets(learner), format_angle_brackets(task), if ("weights" %in% learner$properties) { " since 'use_weights' was set to 'error'." } else { @@ -198,7 +198,7 @@ assert_predictable = function(task, learner) { cols_predict = task$feature_names if (!test_permutation(cols_train, cols_predict)) { - stopf("Learner '%s' has received tasks with different columns in train and predict.", learner$id) + error_input("Learner '%s' has received tasks with different columns in train and predict.", learner$id) } ids = fget_keys(train_task$col_info, i = cols_train, j = "id", key = "id") @@ -210,7 +210,7 @@ assert_predictable = function(task, learner) { ok = all(train_type == predict_type) && all(pmap_lgl(list(x = train_levels, y = predict_levels), identical)) if (!ok) { - stopf("Learner '%s' received task with different column info (feature type or factor level ordering) during train and predict.", learner$id) + error_input("Learner '%s' received task with different column info (feature type or factor level ordering) during train and predict.", learner$id) } } @@ -227,7 +227,7 @@ assert_measure = function(measure, task = NULL, learner = NULL, prediction = NUL assert_class(measure, "Measure", .var.name = .var.name) if (measure$use_weights == "error" && (!is.null(prediction$weights) || "weights_measure" %chin% task$properties)) { - stopf("%s cannot be evaluated with weights%s%s", measure$format(), if (!is.null(task)) paste0(" in ", task$format()) else "", + error_input("%s cannot be evaluated with weights%s%s", format_angle_brackets(measure), if (!is.null(task)) paste0(" in ", format_angle_brackets(task)) else "", if ("weights" %in% measure$properties) { " since 'use_weights' was set to 'error'." } else { @@ -239,14 +239,14 @@ assert_measure = function(measure, task = NULL, learner = NULL, prediction = NUL if (!is.null(task)) { if (!is_scalar_na(measure$task_type) && !test_matching_task_type(task$task_type, measure, "measure")) { - stopf("Measure '%s' is not compatible with type '%s' of task '%s'", + error_input("Measure '%s' is not compatible with type '%s' of task '%s'", measure$id, task$task_type, task$id) } if (measure$check_prerequisites != "ignore") { miss = setdiff(measure$task_properties, task$properties) if (length(miss) > 0) { - warningf("Measure '%s' is missing properties %s of task '%s'", + warning_config("Measure '%s' is missing properties %s of task '%s'", measure$id, str_collapse(miss, quote = "'"), task$id) } } @@ -255,14 +255,14 @@ assert_measure = function(measure, task = NULL, learner = NULL, prediction = NUL if (!is.null(learner)) { if (!is_scalar_na(measure$task_type) && measure$task_type != learner$task_type) { - stopf("Measure '%s' is not compatible with type '%s' of learner '%s'", + error_input("Measure '%s' is not compatible with type '%s' of learner '%s'", measure$id, learner$task_type, learner$id) } if (!is_scalar_na(measure$predict_type) && measure$check_prerequisites != "ignore") { predict_types = mlr_reflections$learner_predict_types[[learner$task_type]][[learner$predict_type]] if (measure$predict_type %nin% predict_types) { - warningf("Measure '%s' is missing predict type '%s' of learner '%s'", measure$id, measure$predict_type, learner$id) + warning_config("Measure '%s' is missing predict type '%s' of learner '%s'", measure$id, measure$predict_type, learner$id) } } @@ -270,7 +270,7 @@ assert_measure = function(measure, task = NULL, learner = NULL, prediction = NUL miss = setdiff(measure$predict_sets, learner$predict_sets) if (length(miss) > 0) { - warningf("Measure '%s' needs predict sets %s, but learner '%s' only predicted on sets %s", + warning_config("Measure '%s' needs predict sets %s, but learner '%s' only predicted on sets %s", measure$id, str_collapse(miss, quote = "'"), learner$id, str_collapse(learner$predict_sets, quote = "'")) } } @@ -279,7 +279,7 @@ assert_measure = function(measure, task = NULL, learner = NULL, prediction = NUL if (!is.null(prediction) && is.null(learner)) { # same as above but works without learner e.g. measure$score(prediction) if (measure$check_prerequisites != "ignore" && measure$predict_type %nin% prediction$predict_types) { - warningf("Measure '%s' is missing predict type '%s' of prediction", measure$id, measure$predict_type) + warning_config("Measure '%s' is missing predict type '%s' of prediction", measure$id, measure$predict_type) } } @@ -292,11 +292,11 @@ assert_measure = function(measure, task = NULL, learner = NULL, prediction = NUL #' @rdname mlr_assertions assert_scorable = function(measure, task, learner, prediction = NULL, .var.name = vname(measure)) { if ("requires_model" %chin% measure$properties && is.null(learner$model)) { - stopf("Measure '%s' requires the trained model", measure$id) + error_input("Measure '%s' requires the trained model", measure$id) } if ("requires_model" %chin% measure$properties && is_marshaled_model(learner$model)) { - stopf("Measure '%s' requires the trained model, but model is in marshaled form", measure$id) + error_input("Measure '%s' requires the trained model, but model is in marshaled form", measure$id) } assert_measure(measure, task = task, learner = learner, prediction = prediction, .var.name = .var.name) @@ -308,7 +308,7 @@ assert_scorable = function(measure, task, learner, prediction = NULL, .var.name assert_measures = function(measures, task = NULL, learner = NULL, .var.name = vname(measures)) { lapply(measures, assert_measure, task = task, learner = learner, .var.name = .var.name) if (anyDuplicated(ids(measures))) { - stopf("Measures need to have unique IDs") + error_config("Measures need to have unique IDs") } invisible(measures) } @@ -321,10 +321,10 @@ assert_resampling = function(resampling, instantiated = NULL, .var.name = vname( if (!is.null(instantiated)) { if (instantiated && !resampling$is_instantiated) { - stopf("Resampling '%s' must be instantiated", resampling$id) + error_input("Resampling '%s' must be instantiated", resampling$id) } if (!instantiated && resampling$is_instantiated) { - stopf("Resampling '%s' may not be instantiated", resampling$id) + error_input("Resampling '%s' may not be instantiated", resampling$id) } } @@ -374,7 +374,7 @@ assert_range = function(range, .var.name = vname(range)) { assert_numeric(range, len = 2L, any.missing = FALSE, .var.name = .var.name) if (diff(range) <= 0) { - stopf("Invalid range specified. First value (%f) must be greater than second value (%f)", range[1L], range[2L]) + error_config("Invalid range specified. First value (%f) must be greater than second value (%f)", range[1L], range[2L]) } invisible(range) @@ -390,7 +390,7 @@ assert_row_ids = function(row_ids, task = NULL, null.ok = FALSE, .var.name = vna row_ids = assert_integerish(row_ids, coerce = TRUE, null.ok = null.ok) if (!is.null(task)) { if (any(row_ids %nin% task$row_ids)) { - stopf("The provided row ids do not exist in task '%s'", task$id) + error_input("The provided row ids do not exist in task '%s'", task$id) } } invisible(row_ids) @@ -401,7 +401,7 @@ assert_row_ids = function(row_ids, task = NULL, null.ok = FALSE, .var.name = vna #' @rdname mlr_assertions assert_has_backend = function(task) { if (is.null(task$backend)) { - stopf("The backend of Task '%s' has been removed. Set `store_backends` to `TRUE` during model fitting to conserve it.", task$id) + error_config("The backend of Task '%s' has been removed. Set `store_backends` to `TRUE` during model fitting to conserve it.", task$id) } } @@ -409,10 +409,10 @@ assert_has_backend = function(task) { assert_prediction_count = function(actual, expected, type) { if (actual != expected) { if (actual < expected) { - stopf("Predicted %s not complete, %s for %i observations is missing", + error_learner_predict("Predicted %s not complete, %s for %i observations is missing", type, type, expected - actual) } else { - stopf("Predicted %s contains %i additional predictions without matching rows", + error_learner_predict("Predicted %s contains %i additional predictions without matching rows", type, actual - expected) } } @@ -424,12 +424,12 @@ assert_row_sums = function(prob) { n_missing = count_missing(x) if (n_missing > 0L) { if (n_missing < length(x)) { - stopf("Probabilities for observation %i are partly missing", i) + error_config("Probabilities for observation %i are partly missing", i) } } else { s = sum(x) if (abs(s - 1) > 0.001) { - stopf("Probabilities for observation %i do sum up to %f != 1", i, s) + error_config("Probabilities for observation %i do sum up to %f != 1", i, s) # TODO error_input? } } } @@ -445,11 +445,11 @@ assert_row_sums = function(prob) { assert_quantiles = function(learner, quantile_response = FALSE) { if (is.null(learner$quantiles)) { - stopf("Quantiles must be set via `$quantiles`") + error_config("Quantiles must be set via `$quantiles`") } if (quantile_response && is.null(learner$quantile_response)) { - stopf("Quantile response must be set via `$quantile_response`") + error_config("Quantile response must be set via `$quantile_response`") } invisible(learner) @@ -463,7 +463,7 @@ assert_param_values = function(x, n_learners = NULL, .var.name = vname(x)) { }) if (!ok) { - stopf("'%s' must be a three-time nested list and the most inner list must be named", .var.name) + error_config("'%s' must be a three-time nested list and the most inner list must be named", .var.name) } invisible(x) } @@ -484,16 +484,16 @@ assert_empty_ellipsis = function(...) { } names = ...names() if (is.null(names)) { - stopf("Received %i unnamed argument that was not used.", nx) + error_input("Received %i unnamed argument that was not used.", nx) } names2 = names[nzchar(names)] if (length(names2) == length(names)) { - stopf( + error_input( "Received the following named arguments that were unused: %s.", toString(names2) ) } - stopf( + error_input( "Received unused arguments: %i unnamed, as well as named arguments %s.", length(names) - length(names2), toString(names2) ) diff --git a/R/auto_convert.R b/R/auto_convert.R index 5c10a9a90..62bf433e7 100644 --- a/R/auto_convert.R +++ b/R/auto_convert.R @@ -173,7 +173,7 @@ auto_convert = function(value, id, type, levels) { } if (class(value)[1L] != type) { - stopf("Incompatible types during auto-converting column '%s': failed to convert from class '%s' to class '%s'", id, cl, type) + error_input("Incompatible types during auto-converting column '%s': failed to convert from class '%s' to class '%s'", id, cl, type) } value diff --git a/R/benchmark.R b/R/benchmark.R index 45985f9f4..343f88f35 100644 --- a/R/benchmark.R +++ b/R/benchmark.R @@ -105,12 +105,12 @@ benchmark = function(design, store_models = FALSE, store_backends = TRUE, encaps # check for multiple task types task_types = unique(map_chr(design$task, "task_type")) - if (length(task_types) > 1L) { - stopf("Multiple task types detected, but mixing types is not supported: %s", str_collapse(task_types)) + if (length(task_types) > 1) { + error_input("Multiple task types detected, but mixing types is not supported: %s", str_collapse(task_types)) } learner_types = unique(map_chr(design$learner, "task_type")) - if (length(learner_types) > 1L) { - stopf("Multiple learner types detected, but mixing types is not supported: %s", str_collapse(learner_types)) + if (length(learner_types) > 1) { + error_input("Multiple learner types detected, but mixing types is not supported: %s", str_collapse(learner_types)) } setDT(design) @@ -138,7 +138,7 @@ benchmark = function(design, store_models = FALSE, store_backends = TRUE, encaps # check that all row ids of the resampling are present in the task if (resampling$task_row_hash != task$row_hash) { - stopf("Resampling '%s' is not instantiated on task '%s'", resampling$id, task$id) + error_input("Resampling '%s' is not instantiated on task '%s'", resampling$id, task$id) } data.table( diff --git a/R/benchmark_grid.R b/R/benchmark_grid.R index 334920a14..e1848d8e7 100644 --- a/R/benchmark_grid.R +++ b/R/benchmark_grid.R @@ -87,17 +87,17 @@ benchmark_grid = function(tasks, learners, resamplings, param_values = NULL, pai if (assert_flag(paired)) { if (length(tasks) != length(resamplings)) { - stopf("If `paired` is `TRUE`, you need to provide the same number of tasks and instantiated resamplings") + error_input("If `paired` is `TRUE`, you need to provide the same number of tasks and instantiated resamplings") } for (i in seq_along(tasks)) { task = tasks[[i]] resampling = resamplings[[i]] if (!resamplings[[i]]$is_instantiated) { - stopf("Resampling #%i ('%s' for task '%s') is not instantiated", i, resampling$id, task$id) + error_input("Resampling #%i ('%s' for task '%s') is not instantiated", i, resampling$id, task$id) } if (resampling$task_row_hash != task$row_hash) { - stopf("Resampling #%i ('%s' for task '%s') is not instantiated on the corresponding task", i, resampling$id, task$id) + error_input("Resampling #%i ('%s' for task '%s') is not instantiated on the corresponding task", i, resampling$id, task$id) } } @@ -109,13 +109,13 @@ benchmark_grid = function(tasks, learners, resamplings, param_values = NULL, pai if (any(is_instantiated) && !all(is_instantiated)) { # prevent that some resamplings are instantiated and others are not - stopf("All resamplings must be instantiated, or none at all") + error_input("All resamplings must be instantiated, or none at all") } else if (all(is_instantiated)) { # check that all row ids of the resamplings are present in the tasks pwalk(grid, function(task, resampling) { if (!is.null(resamplings[[resampling]]$task_row_hash) && resamplings[[resampling]]$task_row_hash != tasks[[task]]$row_hash) { - stopf("Resampling '%s' is not instantiated on task '%s'", resamplings[[resampling]]$id, tasks[[task]]$id) + error_input("Resampling '%s' is not instantiated on task '%s'", resamplings[[resampling]]$id, tasks[[task]]$id) } }) diff --git a/R/default_fallback.R b/R/default_fallback.R index b2b9729fb..afdd41d80 100644 --- a/R/default_fallback.R +++ b/R/default_fallback.R @@ -30,7 +30,7 @@ default_fallback.LearnerClassif = function(learner, ...) { # set predict type if (learner$predict_type %nin% fallback$predict_types) { - stopf("Fallback learner '%s' does not support predict type '%s'.", fallback$id, learner$predict_type) + error_learner_predict("Fallback learner '%s' does not support predict type '%s'.", fallback$id, learner$predict_type) } fallback$predict_type = learner$predict_type @@ -45,7 +45,7 @@ default_fallback.LearnerRegr = function(learner, ...) { # set predict type if (learner$predict_type %nin% fallback$predict_types) { - stopf("Fallback learner '%s' does not support predict type '%s'.", fallback$id, learner$predict_type) + error_learner_predict("Fallback learner '%s' does not support predict type '%s'.", fallback$id, learner$predict_type) } fallback$predict_type = learner$predict_type @@ -54,7 +54,7 @@ default_fallback.LearnerRegr = function(learner, ...) { if (learner$predict_type == "quantiles") { if (is.null(learner$quantiles) || is.null(learner$quantile_response)) { - stopf("Cannot set quantiles for fallback learner. Set `$quantiles` and `$quantile_response` in %s.", learner$id) + error_learner_predict("Cannot set quantiles for fallback learner. Set `$quantiles` and `$quantile_response` in %s.", learner$id) } fallback$quantiles = learner$quantiles diff --git a/R/helper.R b/R/helper.R index 8533914e9..447142a83 100644 --- a/R/helper.R +++ b/R/helper.R @@ -150,3 +150,11 @@ weighted_mean_sd = function(x, weights) { sd = sqrt(sum(weights * (x - mean)^2) / (weights_sum - sum(weights ^2) / weights_sum)) list(mean = mean, sd = sd) } + +# Alternative formatting function for Task / Learner / Measure because mlr3misc::error_* and warning_* fail with input +# that is formated with angle brackets, e.g. "", +# see https://github.com/r-lib/cli/issues/789 +# Replace this with x$format() when the issue is solved. +format_angle_brackets = function(x) { + sprintf("<<%s:%s>>", class(x)[1L], x$id) +} diff --git a/R/helper_exec.R b/R/helper_exec.R index bd09407db..33751f1e4 100644 --- a/R/helper_exec.R +++ b/R/helper_exec.R @@ -11,7 +11,7 @@ set_encapsulation = function(learners, encapsulate) { lapply(learners, function(learner) { fallback = if (encapsulate != "none") default_fallback(learner) if (is.null(fallback)) { - stopf("Could not find default fallback learner for learner '%s'", learner$id) + error_learner("Could not find default fallback learner for learner '%s'", learner$id) } learner$encapsulate(encapsulate, fallback) }) diff --git a/R/partition.R b/R/partition.R index f3cfa6ff3..bfda899c2 100644 --- a/R/partition.R +++ b/R/partition.R @@ -33,7 +33,7 @@ partition = function(task, ratio = 0.67) { partition.Task = function(task, ratio = 0.67) { task = task$clone(deep = TRUE) if (sum(ratio) >= 1) { - stopf("Sum of 'ratio' must be smaller than 1") + error_config("Sum of 'ratio' must be smaller than 1") } if (length(ratio) == 1L) { diff --git a/R/predict.R b/R/predict.R index 9cab54b5b..b36a850a9 100644 --- a/R/predict.R +++ b/R/predict.R @@ -42,7 +42,7 @@ predict.Learner = function(object, newdata, predict_type = NULL, ...) { predict_pars = names(tags)[map_lgl(tags, is.element, el = "predict")] i = which(names(pars) %nin% predict_pars) if (length(i)) { - stopf("Unknown parameters: %s", str_collapse(names(pars)[i])) + error_config("Unknown parameters: %s", str_collapse(names(pars)[i])) # TODO error_learner_predict? } object = object$clone() @@ -57,7 +57,7 @@ predict.Learner = function(object, newdata, predict_type = NULL, ...) { predict_type = predict_type %??% head(names(mlr_reflections$learner_predict_types[[object$task_type]]), 1L) if (predict_type %nin% prediction$predict_types) { - stopf("Predict type '%s' not available", predict_type) + error_learner_predict("Predict type '%s' not available", predict_type) # TODO: error_config? } prediction[[predict_type]] diff --git a/R/resample.R b/R/resample.R index 21b8eda39..02c7edea5 100644 --- a/R/resample.R +++ b/R/resample.R @@ -93,7 +93,7 @@ resample = function( } if (!is.null(resampling$task_row_hash) && resampling$task_row_hash != task$row_hash) { - stopf("Resampling '%s' is not instantiated on task '%s'", resampling$id, task$id) + error_input("Resampling '%s' is not instantiated on task '%s'", resampling$id, task$id) } n = resampling$iters diff --git a/R/set_validate.R b/R/set_validate.R index c89a61e00..fefe4f0ab 100644 --- a/R/set_validate.R +++ b/R/set_validate.R @@ -27,7 +27,7 @@ set_validate = function(learner, validate, ...) { #' @export set_validate.Learner = function(learner, validate, ...) { if (!"validation" %chin% learner$properties) { - stopf("Learner '%s' does not support validation.", learner$id) + error_input("Learner '%s' does not support validation.", learner$id) # TODO: error_learner? } learner$validate = validate invisible(learner) diff --git a/R/warn_deprecated.R b/R/warn_deprecated.R index 3e9b41259..01c0a57b1 100644 --- a/R/warn_deprecated.R +++ b/R/warn_deprecated.R @@ -63,7 +63,7 @@ deprecated_binding = function(what, value) { ## 'value' could be an expression that gets substituted here, which we only want to evaluate once x = value if (!missing(rhs) && !identical(rhs, x)) { - stop(sprintf("%s read-only.", what)) + error_mlr3(sprintf("%s read-only.", what)) } x }, diff --git a/R/worker.R b/R/worker.R index 380617ce9..9ff11650a 100644 --- a/R/worker.R +++ b/R/worker.R @@ -5,7 +5,7 @@ learner_train = function(learner, task, train_row_ids = NULL, test_row_ids = NUL # and turned into log messages. train_wrapper = function(learner, task) { if (task$nrow == 0L) { - stopf("Cannot %s Learner '%s' on task '%s': No observations", mode, learner$id, task$id) + error_input("Cannot %s Learner '%s' on task '%s': No observations", mode, learner$id, task$id) } model = if (mode == "train") { @@ -15,7 +15,7 @@ learner_train = function(learner, task, train_row_ids = NULL, test_row_ids = NUL } if (is.null(model)) { - stopf("Learner '%s' on task '%s' returned NULL during internal %s()", learner$id, task$id, mode) + error_learner_train("Learner '%s' on task '%s' returned NULL during internal %s()", learner$id, task$id, mode) } @@ -90,7 +90,7 @@ learner_train = function(learner, task, train_row_ids = NULL, test_row_ids = NUL # modifies the task in place create_internal_valid_task(validate, task, test_row_ids, prev_valid, learner) if (!is.null(task$internal_valid_task) && !task$internal_valid_task$nrow) { - stopf("Internal validation task for task '%s' has 0 observations", task$id) + error_learner_train("Internal validation task for task '%s' has 0 observations", task$id) } if (mode == "train") learner$state = list() @@ -118,7 +118,7 @@ learner_train = function(learner, task, train_row_ids = NULL, test_row_ids = NUL log = append_log(NULL, "train", result$log$class, result$log$msg, log_error = !err) if (err) { - stop(cond) + stop(cond) # TODO: can this be changed at all to error_*? } train_time = result$elapsed @@ -183,7 +183,7 @@ learner_predict = function(learner, task, row_ids = NULL) { # default method does nothing learner$model = unmarshal_model(learner$model, inplace = TRUE) if (is.null(learner$state$model)) { - stopf("No trained model available for learner '%s' on task '%s'", learner$id, task$id) + error_learner_train("No trained model available for learner '%s' on task '%s'", learner$id, task$id) } result = get_private(learner)$.predict(task) @@ -201,7 +201,7 @@ learner_predict = function(learner, task, row_ids = NULL) { v_predict = mlr_reflections$package_version if (!is.null(v_train) && v_train != v_predict) { - warningf("Detected version mismatch: Learner '%s' has been trained with mlr3 version '%s', not matching currently installed version '%s'", + warning_mlr3("Detected version mismatch: Learner '%s' has been trained with mlr3 version '%s', not matching currently installed version '%s'", learner$id, v_train, v_predict) } } @@ -236,7 +236,7 @@ learner_predict = function(learner, task, row_ids = NULL) { learner$state$predict_time = NA_real_ cond = error_learner_predict("No model stored", signal = FALSE, class = "Mlr3ErrorLearnerNoModel") if (learner_will_err(cond, learner, stage = "predict")) { - stop(cond) + stop(cond) # TODO: can this be changed at all to error_*? } } else { # call predict with encapsulation @@ -266,7 +266,7 @@ learner_predict = function(learner, task, row_ids = NULL) { learner$state$log = append_log(learner$state$log, "predict", result$log$class, result$log$msg, log_error = !err) if (err) { - stop(cond) + stop(cond) # TODO: can this be changed at all to error_*? } learner$state$predict_time = sum(learner$state$predict_time, result$elapsed) @@ -331,7 +331,7 @@ workhorse = function( pb(sprintf("%s|%s|i:%i", task$id, learner$id, iteration)) } if ("internal_valid" %chin% learner$predict_sets && is.null(task$internal_valid_task) && is.null(get0("validate", learner))) { - stopf("Cannot set the predict_type field of learner '%s' to 'internal_valid' if there is no internal validation task configured", learner$id) + error_config("Cannot set the predict_type field of learner '%s' to 'internal_valid' if there is no internal validation task configured", learner$id) } # restore settings on the workers @@ -548,26 +548,26 @@ create_internal_valid_task = function(validate, task, test_row_ids, prev_valid, # Otherwise, predict_set = "internal_valid" is ambiguous if (!is.null(prev_valid) && (is.numeric(validate) || identical(validate, "test"))) { - stopf("Parameter 'validate' of Learner '%s' cannot be set to 'test' or a ratio when internal_valid_task is present", learner$id) + error_config("Parameter 'validate' of Learner '%s' cannot be set to 'test' or a ratio when internal_valid_task is present", learner$id) } if (is.character(validate)) { if (validate == "predefined") { if (is.null(task$internal_valid_task)) { - stopf("Parameter 'validate' is set to 'predefined' but no internal validation task is present. This commonly happens in GraphLearners and can be avoided by configuring the validation data for the GraphLearner via `set_validate(, validate = )`. See https://mlr3book.mlr-org.com/chapters/chapter15/predsets_valid_inttune.html for more information.") + error_config("Parameter 'validate' is set to 'predefined' but no internal validation task is present. This commonly happens in GraphLearners and can be avoided by configuring the validation data for the GraphLearner via `set_validate(glrn, validate = values)`. See https://mlr3book.mlr-org.com/chapters/chapter15/predsets_valid_inttune.html for more information.") } if (!identical(task$target_names, task$internal_valid_task$target_names)) { - stopf("Internal validation task '%s' has different target names than primary task '%s', did you modify the task after creating the internal validation task?", - task$internal_valid_task$id, task$id) + error_input("Internal validation task '%s' has different target names than primary task '%s', did you modify the task after creating the internal validation task?", + task$internal_valid_task$id, task$id) # TODO error_config? } if (!test_permutation(task$feature_names, task$internal_valid_task$feature_names)) { - stopf("Internal validation task '%s' has different features than primary task '%s', did you modify the task after creating the internal validation task?", - task$internal_valid_task$id, task$id) + error_input("Internal validation task '%s' has different features than primary task '%s', did you modify the task after creating the internal validation task?", + task$internal_valid_task$id, task$id) # TODO error_config? } return(task) } else { # validate is "test" if (is.null(test_row_ids)) { - stopf("Parameter 'validate' cannot be set to 'test' when calling train manually.") + error_config("Parameter 'validate' cannot be set to 'test' when calling train manually.") } # at this point, the train rows are already set to the train set, i.e. we don't have to remove the test ids # from the primary task (this would cause bugs for resamplings with overlapping train and test set) diff --git a/R/zzz.R b/R/zzz.R index 01613a494..e38f9ee1e 100644 --- a/R/zzz.R +++ b/R/zzz.R @@ -97,7 +97,7 @@ dummy_import = function() { } register_namespace_callback(pkgname, "mlr", function(...) { - warning("Packages 'mlr3' and 'mlr' are conflicting and should not be loaded in the same session") + warning_mlr3("Packages 'mlr3' and 'mlr' are conflicting and should not be loaded in the same session") }) } # nocov end diff --git a/tests/testthat/test_Learner.R b/tests/testthat/test_Learner.R index 2cd9553aa..e48ea2e7c 100644 --- a/tests/testthat/test_Learner.R +++ b/tests/testthat/test_Learner.R @@ -249,7 +249,7 @@ test_that("learner cannot be trained with TuneToken present", { task = tsk("california_housing") learner = lrn("regr.rpart", cp = paradox::to_tune(0.1, 0.3)) expect_error(learner$train(task), - regexp = " cannot be trained with TuneToken present in hyperparameter: cp", + regexp = " cannot be trained with TuneToken present", fixed = TRUE) }) @@ -263,7 +263,7 @@ test_that("integer<->numeric conversion in newdata (#533)", { learner$train(task) expect_prediction(learner$predict_newdata(data)) expect_prediction(learner$predict_newdata(newdata1)) - expect_error(learner$predict_newdata(newdata2), "failed to convert from class 'numeric'") + expect_error(learner$predict_newdata(newdata2), "class 'numeric' to class 'integer'") }) test_that("weights", { @@ -802,7 +802,7 @@ test_that("weights are used when appropriate", { expect_equal(unname(learner$train(iris_weights_learner)$predict(predict_task)$prob), matrix(c(1, 1, 1) / 3, nrow = 1, ncol = 3)) learner$use_weights = "error" - expect_error(learner$train(iris_weights_learner), "'use_weights' was set to 'error'") + expect_error(learner$train(iris_weights_learner), "'use_weights' was set to\n 'error'") # behaviour of learner that does not support weights llclass = R6Class("dummy", inherit = LearnerClassif, @@ -820,7 +820,7 @@ test_that("weights are used when appropriate", { ll = llclass$new() # different error message - expect_error(ll$train(iris_weights_learner), "Learner does not support weights") + expect_error(ll$train(iris_weights_learner), "Learner does not support\n weights") ll$train(iris_weights_measure) ll$use_weights = "ignore" diff --git a/tests/testthat/test_Measure.R b/tests/testthat/test_Measure.R index 5853d7bef..e76dbcbb3 100644 --- a/tests/testthat/test_Measure.R +++ b/tests/testthat/test_Measure.R @@ -140,7 +140,7 @@ test_that("scoring fails when measure requires_model, but model is in marshaled pred = learner$train(task)$predict(task) learner$marshal() expect_error(measure$score(pred, learner = learner, task = task), - regexp = "is in marshaled form") + regexp = "model is in marshaled") }) test_that("measure weights", { @@ -171,8 +171,8 @@ test_that("measure weights", { m$use_weights = "error" expect_equal(prediction_no_weights$score(m), c(classif.acc = 0.5)) expect_equal(prediction_learner_weights$score(m), c(classif.acc = 0.5)) - expect_error(prediction_measure_weights$score(m), "since 'use_weights' was set to 'error'") - expect_error(m$score(prediction_measure_weights), "since 'use_weights' was set to 'error'") + expect_error(prediction_measure_weights$score(m), "'use_weights' was set to 'error'") + expect_error(m$score(prediction_measure_weights), "'use_weights' was set to 'error'") mauc = msr("classif.mauc_au1p") prediction_no_weights = learner$predict(tsk("iris"), row_ids = c(1, 2, 51, 52, 101, 102)) @@ -181,8 +181,8 @@ test_that("measure weights", { expect_equal(prediction_no_weights$score(mauc), c(classif.mauc_au1p = 0.5)) expect_equal(prediction_learner_weights$score(mauc), c(classif.mauc_au1p = 0.5)) - expect_error(prediction_measure_weights$score(mauc), "cannot be evaluated with weights since the Measure does not support weights") - expect_error(mauc$score(prediction_measure_weights), "cannot be evaluated with weights since the Measure does not support weights") + expect_error(prediction_measure_weights$score(mauc), "cannot be evaluated with weights") + expect_error(mauc$score(prediction_measure_weights), "cannot be evaluated with weights") mauc$use_weights = "ignore" expect_equal(prediction_measure_weights$score(mauc), c(classif.mauc_au1p = 0.5)) @@ -221,7 +221,7 @@ test_that("measure weights", { mauc$use_weights = "error" expect_equal(rr_no_weights$score(mauc)$classif.mauc_au1p, c(1, 1) / 2) expect_equal(rr_learner_weights$score(mauc)$classif.mauc_au1p, c(1, 1) / 2) - expect_error(rr_measure_weights$score(mauc), "cannot be evaluated with weights in .*Task.*since the Measure does not support weights") + expect_error(rr_measure_weights$score(mauc), "cannot be evaluated with weights in\n .*Task.*since the Measure does not support\n weights") # aggregating resampling with weights m$use_weights = "use" @@ -253,8 +253,8 @@ test_that("measure weights", { mauc$use_weights = "error" expect_equal(rr_no_weights$aggregate(mauc), c(classif.mauc_au1p = 0.5)) expect_equal(rr_learner_weights$aggregate(mauc), c(classif.mauc_au1p = 0.5)) - expect_error(rr_measure_weights$aggregate(mauc), "cannot be evaluated with weights in .*Task.*since the Measure does not support weights") - expect_error(mauc$aggregate(rr_measure_weights), "cannot be evaluated with weights in .*Task.*since the Measure does not support weights") + expect_error(rr_measure_weights$aggregate(mauc), "cannot be evaluated with weights in\n .*Task.*since the Measure does not support\n weights") + expect_error(mauc$aggregate(rr_measure_weights), "cannot be evaluated with weights in\n .*Task.*since the Measure does not support\n weights") m$use_weights = "use" m$average = "macro_weighted" diff --git a/tests/testthat/test_benchmark.R b/tests/testthat/test_benchmark.R index 87d0362bf..73723e010 100644 --- a/tests/testthat/test_benchmark.R +++ b/tests/testthat/test_benchmark.R @@ -657,7 +657,7 @@ test_that("benchmark allows that param_values overwrites tune token", { learner = lrn("classif.rpart", cp = to_tune(0.01, 0.1)) design = benchmark_grid(tsk("pima"), learner, rsmp("cv", folds = 3)) - expect_error(benchmark(design), "cannot be trained with TuneToken present in hyperparameter") + expect_error(benchmark(design), "cannot be trained with TuneToken present") }) test_that("uhash_table works", { diff --git a/tests/testthat/test_resample.R b/tests/testthat/test_resample.R index c23d0f541..26e6ca6a1 100644 --- a/tests/testthat/test_resample.R +++ b/tests/testthat/test_resample.R @@ -376,7 +376,7 @@ test_that("can even use internal_valid predict set on learners that don't suppor task = tsk("mtcars") task$internal_valid_task = 1:10 rr = resample(task, lrn("regr.debug", predict_sets = "internal_valid"), rsmp("holdout")) - expect_warning(rr$score(), "only predicted on sets") + expect_warning(rr$score(), "predicted on sets 'internal_valid'") }) test_that("callr during prediction triggers marshaling", {