diff --git a/NEWS.md b/NEWS.md index 14c09be3c..e0b284f00 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,5 +1,6 @@ # testthat (development version) +* When running a test interactively, testthat now reports the number of succeses. The results should also be more useful if you are using nested tests. * The hints generated by `expect_snapshot()` and `expect_snapshot_file()` now include the path to the package, if its not in the current working directory (#1577). * `expect_snapshot_file()` now clearly errors if the `path` doesnt exist (#2191). * `expect_snapshot_file()` now considers `.json` to be a text file (#1593). diff --git a/R/local.R b/R/local.R index f7cc22bc0..996186573 100644 --- a/R/local.R +++ b/R/local.R @@ -201,7 +201,6 @@ local_interactive_reporter <- function(.env = parent.frame()) { reporter <- StopReporter$new() old <- set_reporter(reporter) withr::defer(reporter$end_reporter(), envir = .env) - withr::defer(reporter$stop_if_needed(), envir = .env) withr::defer(set_reporter(old), envir = .env) reporter diff --git a/R/praise.R b/R/praise.R index b73b63e17..a0dc1ee15 100644 --- a/R/praise.R +++ b/R/praise.R @@ -43,7 +43,7 @@ praise_emoji <- function() { "\U0001f389", # party popper "\U0001f38a" # confetti ball ) - sample(emoji, 1) + paste0(" ", sample(emoji, 1)) } encourage <- function() { diff --git a/R/reporter-progress.R b/R/reporter-progress.R index 984b96526..748218226 100644 --- a/R/reporter-progress.R +++ b/R/reporter-progress.R @@ -558,7 +558,7 @@ spinner <- function(frames, i) { frames[((i - 1) %% length(frames)) + 1] } -issue_header <- function(x, pad = FALSE) { +issue_header <- function(x, pad = FALSE, location = TRUE) { type <- expectation_type(x) if (has_colour()) { type <- colourise(first_upper(type), type) @@ -569,11 +569,11 @@ issue_header <- function(x, pad = FALSE) { type <- strpad(type, 7) } - paste0(type, expectation_location(x, " (", ")"), ": ", x$test) + paste0(type, if (location) expectation_location(x, " (", ")"), ": ", x$test) } -issue_summary <- function(x, rule = FALSE) { - header <- cli::style_bold(issue_header(x)) +issue_summary <- function(x, rule = FALSE, location = TRUE) { + header <- cli::style_bold(issue_header(x, location = location)) if (rule) { # Don't truncate long test names width <- max(cli::ansi_nchar(header) + 6, getOption("width")) diff --git a/R/reporter-stop.R b/R/reporter-stop.R index 4c9f2a67b..af61c764e 100644 --- a/R/reporter-stop.R +++ b/R/reporter-stop.R @@ -2,12 +2,8 @@ #' #' @description #' The default reporter used when [expect_that()] is run interactively. -#' It responds by [stop()]ping on failures and doing nothing otherwise. This -#' will ensure that a failing test will raise an error. -#' -#' This should be used when doing a quick and dirty test, or during the final -#' automated testing of R CMD check. Otherwise, use a reporter that runs all -#' tests and gives you more context about the problem. +#' It responds by displaying a summary of the number of successes and faiures +#' and [stop()]ping on if there are any failures. #' #' @export #' @family reporters @@ -21,18 +17,22 @@ StopReporter <- R6::R6Class( n_fail = 0L, # Successful expectations n_success = 0L, - stop_reporter = TRUE, praise = TRUE, + depth = 0, - initialize = function(stop_reporter = TRUE, praise = TRUE) { + initialize = function(praise = TRUE) { super$initialize() self$issues <- Stack$new() self$praise <- praise - self$stop_reporter <- stop_reporter }, start_test = function(context, test) { - self$issues <- Stack$new() + if (self$depth == 0) { + self$n_fail <- 0L + self$n_success <- 0L + self$issues <- Stack$new() + } + self$depth <- self$depth + 1 }, add_result = function(context, test, result) { @@ -45,25 +45,32 @@ StopReporter <- R6::R6Class( self$n_fail <- self$n_fail + 1 } self$issues$push(result) - - self$local_user_output() - self$cat_line(issue_summary(result, rule = TRUE), "\n") }, - end_reporter = function(context, test) { + end_test = function(context, test) { + self$depth <- self$depth - 1 + if (self$depth > 0) { + return() + } + self$local_user_output() - if (self$issues$size() == 0) { - if (self$praise && self$n_success > 0) { - emoji <- praise_emoji() - self$cat_line(colourise("Test passed", "success"), " ", emoji) - } + for (issue in self$issues$as_list()) { + self$cat_line(issue_summary(issue, rule = TRUE, location = FALSE)) + } + + if (self$praise && self$n_fail == 0 && self$n_success > 0) { + emoji <- praise_emoji() + self$cat_line(cli::format_inline( + "{.strong Test passed with {self$n_success} success{?es}{emoji}}." + )) } - }, - stop_if_needed = function() { - if (self$stop_reporter && self$n_fail > 0) { - cli::cli_abort("Test failed.", call = NULL) + if (self$n_fail > 0) { + cli::cli_abort( + "Test failed with {self$n_fail} failure{?s} and {self$n_success} success{?es}.", + call = NULL + ) } } ) diff --git a/man/StopReporter.Rd b/man/StopReporter.Rd index f2cee26c4..012193017 100644 --- a/man/StopReporter.Rd +++ b/man/StopReporter.Rd @@ -5,12 +5,8 @@ \title{Error if any test fails} \description{ The default reporter used when \code{\link[=expect_that]{expect_that()}} is run interactively. -It responds by \code{\link[=stop]{stop()}}ping on failures and doing nothing otherwise. This -will ensure that a failing test will raise an error. - -This should be used when doing a quick and dirty test, or during the final -automated testing of R CMD check. Otherwise, use a reporter that runs all -tests and gives you more context about the problem. +It responds by displaying a summary of the number of successes and faiures +and \code{\link[=stop]{stop()}}ping on if there are any failures. } \seealso{ Other reporters: diff --git a/tests/testthat/_snaps/reporter-stop.md b/tests/testthat/_snaps/reporter-stop.md index c57289735..4084fce3b 100644 --- a/tests/testthat/_snaps/reporter-stop.md +++ b/tests/testthat/_snaps/reporter-stop.md @@ -1,59 +1,46 @@ # produces useful output - -- Failure ('reporters/tests.R:13:3'): Failure:1 ------------------------------- - Expected `x` to be TRUE. - Differences: - `actual`: FALSE - `expected`: TRUE - - - -- Failure ('reporters/tests.R:17:8'): Failure:2a ------------------------------ - Expected FALSE to be TRUE. - Differences: - `actual`: FALSE - `expected`: TRUE - - Backtrace: - x - 1. \-f() - 2. \-testthat::expect_true(FALSE) - - -- Error ('reporters/tests.R:24:3'): Error:1 ----------------------------------- - Error in `eval(code, test_env)`: stop - - -- Error ('reporters/tests.R:30:8'): errors get tracebacks --------------------- - Error in `h()`: ! - Backtrace: - x - 1. \-f() - 2. \-g() - 3. \-h() - - -- Skip ('reporters/tests.R:38:3'): explicit skips are reported ---------------- - Reason: skip - - -- Skip ('reporters/tests.R:41:1'): empty tests are implicitly skipped --------- - Reason: empty test - - -- Warning ('reporters/tests.R:47:5'): warnings get backtraces ----------------- - def - Backtrace: - x - 1. \-f() - - -- Skip ('reporters/tests.R:45:1'): warnings get backtraces -------------------- - Reason: empty test - + Code + with_reporter("stop", run_tests()) + Output + Test passed with 1 success. + -- Failure: Failure:1 ---------------------------------------------------------- + Expected `x` to be TRUE. + Differences: + `actual`: FALSE + `expected`: TRUE + + Condition + Error: + ! Test failed with 1 failure and 0 successes. -# can suppress praise +# works nicely with nested tests - + Code + with_reporter("stop", run_tests()) + Output + Test passed with 2 successes. + -- Failure: failed then succeeded / failed-1 ----------------------------------- + Expected FALSE to be TRUE. + Differences: + `actual`: FALSE + `expected`: TRUE + + -- Failure: failed then succeeded / failed-2 ----------------------------------- + Expected FALSE to be TRUE. + Differences: + `actual`: FALSE + `expected`: TRUE + + Condition + Error: + ! Test failed with 2 failures and 1 success. -# stop if needed errors when needed +# errors when needed Code - r$stop_if_needed() + r$end_test() Condition Error: - ! Test failed. + ! Test failed with 1 failure and 0 successes. diff --git a/tests/testthat/reporters/nested.R b/tests/testthat/reporters/nested.R new file mode 100644 index 000000000..2121b013c --- /dev/null +++ b/tests/testthat/reporters/nested.R @@ -0,0 +1,10 @@ +describe("succeeded", { + it("succeeded-1", expect_true(TRUE)) + it("succeeded-2", expect_true(TRUE)) +}) + +describe("failed then succeeded", { + it("failed-1", expect_true(FALSE)) + it("failed-2", expect_true(FALSE)) + it("succeeded", expect_true(TRUE)) +}) diff --git a/tests/testthat/test-reporter-stop.R b/tests/testthat/test-reporter-stop.R index 2b05e2b3f..9cc923963 100644 --- a/tests/testthat/test-reporter-stop.R +++ b/tests/testthat/test-reporter-stop.R @@ -1,19 +1,29 @@ +# We can't use expect_snapshot_reporter() here because it uses test_one_file() +# which wraps code in `test_code()` which turns the error into a test failure +# It also only captures the output, but we also want to see the error + test_that("produces useful output", { - expect_snapshot_reporter(StopReporter$new()) + run_tests <- \() source(test_path("reporters/tests.R")) + expect_snapshot(with_reporter("stop", run_tests()), error = TRUE) }) test_that("can suppress praise", { - expect_snapshot_reporter( - StopReporter$new(praise = FALSE), - test_path("reporters/successes.R") - ) + run_tests <- \() source(test_path("reporters/successes.R")) + expect_silent(with_reporter(StopReporter$new(praise = FALSE), run_tests())) +}) + +test_that("works nicely with nested tests", { + run_tests <- \() source(test_path("reporters/nested.R")) + expect_snapshot(with_reporter("stop", run_tests()), error = TRUE) }) -test_that("stop if needed errors when needed", { +test_that("errors when needed", { r <- StopReporter$new() - expect_no_error(r$stop_if_needed()) + r$start_test() + expect_no_error(r$end_test()) + + r$start_test() r$n_fail <- 1 - expect_snapshot(error = TRUE, r$stop_if_needed()) - r$stop_reporter <- FALSE - expect_no_error(r$stop_if_needed()) + r$n_success <- 0 + expect_snapshot(error = TRUE, r$end_test()) })