diff --git a/benchmark/http/headers.js b/benchmark/http/headers.js index 62612d9fda1911..d1dfec6f93b771 100644 --- a/benchmark/http/headers.js +++ b/benchmark/http/headers.js @@ -27,7 +27,7 @@ function main({ len, n, duration }) { 'Transfer-Encoding': 'chunked', }; - const Is = [...Array(n / len).keys()]; + const Is = [...Array(parseInt(n / len)).keys()]; const Js = [...Array(len).keys()]; for (const i of Is) { diff --git a/benchmark/test_runner/global-concurrent-tests.js b/benchmark/test_runner/global-concurrent-tests.js index 6e3fb5c8ba5a1b..8c63dab9262f3c 100644 --- a/benchmark/test_runner/global-concurrent-tests.js +++ b/benchmark/test_runner/global-concurrent-tests.js @@ -2,13 +2,20 @@ const common = require('../common'); const { it } = require('node:test'); -const bench = common.createBenchmark(main, { - n: [100, 1000, 1e4], - type: ['sync', 'async'], -}, { - // We don't want to test the reporter here - flags: ['--test-reporter=./benchmark/fixtures/empty-test-reporter.js'], -}); +const bench = common.createBenchmark( + main, + { + n: [100, 1000, 1e4], + type: ['sync', 'async'], + }, + { + // We don't want to test the reporter here + flags: [ + '--test-reporter=./benchmark/fixtures/empty-test-reporter.js', + '--test-reporter-destination=stdout', + ], + }, +); async function run(n, type) { const promises = new Array(n); diff --git a/benchmark/test_runner/global-sequential-tests.js b/benchmark/test_runner/global-sequential-tests.js index 296022717c9b2f..d45108bfcda343 100644 --- a/benchmark/test_runner/global-sequential-tests.js +++ b/benchmark/test_runner/global-sequential-tests.js @@ -2,14 +2,20 @@ const common = require('../common'); const { it } = require('node:test'); - -const bench = common.createBenchmark(main, { - n: [100, 1000, 1e4], - type: ['sync', 'async'], -}, { - // We don't want to test the reporter here - flags: ['--test-reporter=./benchmark/fixtures/empty-test-reporter.js'], -}); +const bench = common.createBenchmark( + main, + { + n: [100, 1000, 1e4], + type: ['sync', 'async'], + }, + { + // We don't want to test the reporter here + flags: [ + '--test-reporter=./benchmark/fixtures/empty-test-reporter.js', + '--test-reporter-destination=stdout', + ], + }, +); async function run(n, type) { // eslint-disable-next-line no-unused-vars diff --git a/benchmark/test_runner/mock-fn.js b/benchmark/test_runner/mock-fn.js index 6489ccf815e294..e1298a74765d57 100644 --- a/benchmark/test_runner/mock-fn.js +++ b/benchmark/test_runner/mock-fn.js @@ -4,13 +4,20 @@ const common = require('../common'); const assert = require('node:assert'); const { test } = require('node:test'); -const bench = common.createBenchmark(main, { - n: [1e6], - mode: ['define', 'execute'], -}, { - // We don't want to test the reporter here - flags: ['--test-reporter=./benchmark/fixtures/empty-test-reporter.js'], -}); +const bench = common.createBenchmark( + main, + { + n: [1e6], + mode: ['define', 'execute'], + }, + { + // We don't want to test the reporter here + flags: [ + '--test-reporter=./benchmark/fixtures/empty-test-reporter.js', + '--test-reporter-destination=stdout', + ], + }, +); const noop = () => {}; diff --git a/benchmark/test_runner/run-single-test-file.js b/benchmark/test_runner/run-single-test-file.js index 00e95e088a223e..62a56f1e4a4959 100644 --- a/benchmark/test_runner/run-single-test-file.js +++ b/benchmark/test_runner/run-single-test-file.js @@ -31,13 +31,20 @@ function setup(numberOfTestFiles) { * Specifically, it compares the performance of running tests in the * same process versus creating multiple processes. */ -const bench = common.createBenchmark(main, { - numberOfTestFiles: [1, 10, 100], - isolation: ['none', 'process'], -}, { - // We don't want to test the reporter here - flags: ['--test-reporter=./benchmark/fixtures/empty-test-reporter.js'], -}); +const bench = common.createBenchmark( + main, + { + numberOfTestFiles: [1, 10, 100], + isolation: ['none', 'process'], + }, + { + // We don't want to test the reporter here + flags: [ + '--test-reporter=./benchmark/fixtures/empty-test-reporter.js', + '--test-reporter-destination=stdout', + ], + }, +); async function runBenchmark({ numberOfTestFiles, isolation }) { const dirPath = getTestDirPath(numberOfTestFiles); diff --git a/benchmark/test_runner/suite-tests.js b/benchmark/test_runner/suite-tests.js index 88b2aa1c74b19e..4de1b71624b944 100644 --- a/benchmark/test_runner/suite-tests.js +++ b/benchmark/test_runner/suite-tests.js @@ -6,15 +6,22 @@ const reporter = require('../fixtures/empty-test-reporter'); const { describe, it } = require('node:test'); -const bench = common.createBenchmark(main, { - numberOfSuites: [10, 100], - testsPerSuite: [10, 100, 1000], - testType: ['sync', 'async'], - concurrency: ['yes', 'no'], -}, { - // We don't want to test the reporter here - flags: ['--test-reporter=./benchmark/fixtures/empty-test-reporter.js'], -}); +const bench = common.createBenchmark( + main, + { + numberOfSuites: [10, 100], + testsPerSuite: [10, 100, 1000], + testType: ['sync', 'async'], + concurrency: ['yes', 'no'], + }, + { + // We don't want to test the reporter here + flags: [ + '--test-reporter=./benchmark/fixtures/empty-test-reporter.js', + '--test-reporter-destination=stdout', + ], + }, +); async function run({ numberOfSuites, testsPerSuite, testType, concurrency }) { concurrency = concurrency === 'yes'; diff --git a/doc/contributing/writing-and-running-benchmarks.md b/doc/contributing/writing-and-running-benchmarks.md index a60a06b695f242..770fc8b9b57303 100644 --- a/doc/contributing/writing-and-running-benchmarks.md +++ b/doc/contributing/writing-and-running-benchmarks.md @@ -700,6 +700,63 @@ Supported options keys are: * `benchmarker` - benchmarker to use, defaults to the first available http benchmarker +### Creating Benchmark Tests + +It is recommended to create a new test file when a new benchmark is introduced +so it can be easily made creating the new test file in `test/benchmark`. + +When calling the `runBenchmark`, provide the benchmark group name +(which is the folder name in the `benchmark/` folder) as the first parameter, +and optionally pass environment variables as the second parameter. + +```js +'use strict'; + +require('../common'); // Import the common module - required for all benchmark files + +const runBenchmark = require('../common/benchmark'); + +runBenchmark('buffers', { NODEJS_BENCHMARK_ZERO_ALLOWED: 1 }); +``` + +The environment variable `NODEJS_BENCHMARK_ZERO_ALLOWED` is required +when tests execute so quickly that they may produce errors or inconsistent results. +Setting this variable instructs the benchmark to disregard such issues. + +Test execution behavior depends on the `NODE_RUN_ALL_BENCH_TESTS` environment variable. +When set to **true**, benchmarks run with minimal iterations (`n=1`, `rounds=1`). +This approach bypasses performance analysis to verify that tests can complete without failures. +Despite the minimal iterations, execution remains time-consuming +as all configurations must be tested. + +When `NODE_RUN_ALL_BENCH_TESTS` is not set, +only a single configuration per benchmark executes. +While this dramatically reduces execution time, it provides limited coverage +and cannot guarantee that all configurations function properly. + +This PR introduces the usage of a new environment variable `NODE_RUN_ALL_BENCH_TESTS`, +which can be set to run all benchmark configurations in tests to cover more scenarios where benchmarks might fail. +This PR also documents how to write benchmark tests and provides more details about the environment variables: + +* NODE\_RUN\_ALL\_BENCH\_TESTS +* NODEJS\_BENCHMARK\_ZERO\_ALLOWED + +Benchmark tests were added for the following groups: + +* abort\_controller +* error +* https +* perf\_hooks +* permission +* sqlite +* test\_runner +* websocket + +Additionally, some inconsistent test files were renamed: + +test/benchmark/test-benchmark-async-hooks.js → test/benchmark/test-benchmark-async\_hooks.js +test/benchmark/test-benchmark-child-process.js → test/benchmark/test-benchmark-child\_process.js + [autocannon]: https://github.com/mcollina/autocannon [benchmark-ci]: https://github.com/nodejs/benchmarking/blob/HEAD/docs/core_benchmarks.md [git-for-windows]: https://git-scm.com/download/win diff --git a/test/benchmark/test-benchmark-abort_controller.js b/test/benchmark/test-benchmark-abort_controller.js new file mode 100644 index 00000000000000..08ac700d6c1464 --- /dev/null +++ b/test/benchmark/test-benchmark-abort_controller.js @@ -0,0 +1,10 @@ +'use strict'; + +require('../common'); + +// Minimal test for assert benchmarks. This makes sure the benchmarks aren't +// completely broken but nothing more than that. + +const runBenchmark = require('../common/benchmark'); + +runBenchmark('abort_controller'); diff --git a/test/benchmark/test-benchmark-async-hooks.js b/test/benchmark/test-benchmark-async_hooks.js similarity index 100% rename from test/benchmark/test-benchmark-async-hooks.js rename to test/benchmark/test-benchmark-async_hooks.js diff --git a/test/benchmark/test-benchmark-child-process.js b/test/benchmark/test-benchmark-child_process.js similarity index 100% rename from test/benchmark/test-benchmark-child-process.js rename to test/benchmark/test-benchmark-child_process.js diff --git a/test/benchmark/test-benchmark-error.js b/test/benchmark/test-benchmark-error.js new file mode 100644 index 00000000000000..d48222bb650048 --- /dev/null +++ b/test/benchmark/test-benchmark-error.js @@ -0,0 +1,7 @@ +'use strict'; + +require('../common'); + +const runBenchmark = require('../common/benchmark'); + +runBenchmark('error'); diff --git a/test/benchmark/test-benchmark-https.js b/test/benchmark/test-benchmark-https.js new file mode 100644 index 00000000000000..58764600c49e5d --- /dev/null +++ b/test/benchmark/test-benchmark-https.js @@ -0,0 +1,14 @@ +'use strict'; + +const common = require('../common'); + +if (!common.enoughTestMem) + common.skip('Insufficient memory for HTTPS benchmark test'); + +// Because the http benchmarks use hardcoded ports, this should be in sequential +// rather than parallel to make sure it does not conflict with tests that choose +// random available ports. + +const runBenchmark = require('../common/benchmark'); + +runBenchmark('https'); diff --git a/test/benchmark/test-benchmark-perf_hooks.js b/test/benchmark/test-benchmark-perf_hooks.js new file mode 100644 index 00000000000000..2c3bb4a59d4d8b --- /dev/null +++ b/test/benchmark/test-benchmark-perf_hooks.js @@ -0,0 +1,7 @@ +'use strict'; + +require('../common'); + +const runBenchmark = require('../common/benchmark'); + +runBenchmark('perf_hooks'); diff --git a/test/benchmark/test-benchmark-permission.js b/test/benchmark/test-benchmark-permission.js new file mode 100644 index 00000000000000..dd7ea6feda4269 --- /dev/null +++ b/test/benchmark/test-benchmark-permission.js @@ -0,0 +1,7 @@ +'use strict'; + +require('../common'); + +const runBenchmark = require('../common/benchmark'); + +runBenchmark('permission'); diff --git a/test/benchmark/test-bechmark-readline.js b/test/benchmark/test-benchmark-readline.js similarity index 100% rename from test/benchmark/test-bechmark-readline.js rename to test/benchmark/test-benchmark-readline.js diff --git a/test/benchmark/test-benchmark-source-map.js b/test/benchmark/test-benchmark-source_map.js similarity index 100% rename from test/benchmark/test-benchmark-source-map.js rename to test/benchmark/test-benchmark-source_map.js diff --git a/test/benchmark/test-benchmark-sqlite.js b/test/benchmark/test-benchmark-sqlite.js new file mode 100644 index 00000000000000..86be2e2a544818 --- /dev/null +++ b/test/benchmark/test-benchmark-sqlite.js @@ -0,0 +1,7 @@ +'use strict'; + +require('../common'); + +const runBenchmark = require('../common/benchmark'); + +runBenchmark('sqlite'); diff --git a/test/benchmark/test-benchmark-test_runner.js b/test/benchmark/test-benchmark-test_runner.js new file mode 100644 index 00000000000000..7702b69bcb93b7 --- /dev/null +++ b/test/benchmark/test-benchmark-test_runner.js @@ -0,0 +1,7 @@ +'use strict'; + +require('../common'); + +const runBenchmark = require('../common/benchmark'); + +runBenchmark('test_runner', { NODEJS_BENCHMARK_ZERO_ALLOWED: 1 }); diff --git a/test/benchmark/test-benchmark-webstorage.js b/test/benchmark/test-benchmark-webstorage.js new file mode 100644 index 00000000000000..d02e46a76b4e9e --- /dev/null +++ b/test/benchmark/test-benchmark-webstorage.js @@ -0,0 +1,9 @@ +'use strict'; + +const common = require('../common'); +if (!common.enoughTestMem) + common.skip('Insufficient memory for Websocket benchmark test'); + +const runBenchmark = require('../common/benchmark'); + +runBenchmark('websocket', { NODEJS_BENCHMARK_ZERO_ALLOWED: 1 }); diff --git a/test/common/benchmark.js b/test/common/benchmark.js index 7211ff8703e0e8..03d08b2e7ff3ab 100644 --- a/test/common/benchmark.js +++ b/test/common/benchmark.js @@ -7,7 +7,7 @@ const path = require('path'); const runjs = path.join(__dirname, '..', '..', 'benchmark', 'run.js'); function runBenchmark(name, env) { - const argv = ['test']; + const argv = process.env.NODE_RUN_ALL_BENCH_TESTS ? ['--set', 'n=1', '--set', 'roundtrips=1'] : ['test']; argv.push(name); @@ -28,8 +28,11 @@ function runBenchmark(name, env) { assert.strictEqual(code, 0); assert.strictEqual(signal, null); - // This bit makes sure that each benchmark file is being sent settings such - // that the benchmark file runs just one set of options. This helps keep the + // If NODE_RUN_ALL_BENCH_TESTS is passed it means all the configs need to be run + if (process.env.NODE_RUN_ALL_BENCH_TESTS) return; + + // If NODE_RUN_ALL_BENCH_TESTS isn't passed this bit makes sure that each benchmark file + // is being sent settings such that the benchmark file runs just one set of options. This helps keep the // benchmark tests from taking a long time to run. Therefore, stdout should be composed as follows: // The first and last lines should be empty. // Each test should be separated by a blank line. @@ -37,7 +40,6 @@ function runBenchmark(name, env) { // The second line of each test should contain the configuration for the test. // If the test configuration is not a group, there should be exactly two lines. // Otherwise, it is possible to have more than two lines. - const splitTests = stdout.split(/\n\s*\n/); for (let testIdx = 1; testIdx < splitTests.length - 1; testIdx++) {