diff --git a/.evergreen/config.yml b/.evergreen/config.yml index c54c688e46..ee29d65f2b 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -304,17 +304,6 @@ functions: args: - ${DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh - "run perf tests": - - command: subprocess.exec - type: test - params: - working_dir: "src" - binary: bash - include_expansions_in_env: [SUB_TEST_NAME] - args: - - .evergreen/scripts/run-with-env.sh - - .evergreen/scripts/run-perf-tests.sh - "attach benchmark test results": - command: attach.results params: @@ -461,84 +450,6 @@ tasks: commands: - func: "download and merge coverage" - - name: "perf-6.0-standalone" - tags: ["perf"] - commands: - - func: "run server" - vars: - VERSION: "v6.0-perf" - - func: "run perf tests" - vars: - SUB_TEST_NAME: "sync" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-6.0-standalone-ssl" - tags: ["perf"] - commands: - - func: "run server" - vars: - VERSION: "v6.0-perf" - SSL: "ssl" - - func: "run perf tests" - vars: - SUB_TEST_NAME: "sync" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-8.0-standalone" - tags: ["perf"] - commands: - - func: "run server" - vars: - VERSION: "8.0" - - func: "run perf tests" - vars: - SUB_TEST_NAME: "sync" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-6.0-standalone-async" - tags: [ "perf" ] - commands: - - func: "run server" - vars: - VERSION: "v6.0-perf" - TOPOLOGY: "server" - - func: "run perf tests" - vars: - SUB_TEST_NAME: "async" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-6.0-standalone-ssl-async" - tags: [ "perf" ] - commands: - - func: "run server" - vars: - VERSION: "v6.0-perf" - TOPOLOGY: "server" - SSL: "ssl" - - func: "run perf tests" - vars: - SUB_TEST_NAME: "async" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-8.0-standalone-async" - tags: [ "perf" ] - commands: - - func: "run server" - vars: - VERSION: "8.0" - TOPOLOGY: "server" - - func: "run perf tests" - vars: - SUB_TEST_NAME: "async" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "check-import-time" tags: ["pr"] commands: @@ -610,15 +521,3 @@ buildvariants: - rhel8.7-small tasks: - name: "backport-pr" - -- name: "perf-tests" - display_name: "Performance Benchmarks" - batchtime: 10080 # 7 days - run_on: rhel90-dbx-perf-large - tasks: - - name: "perf-6.0-standalone" - - name: "perf-6.0-standalone-ssl" - - name: "perf-8.0-standalone" - - name: "perf-6.0-standalone-async" - - name: "perf-6.0-standalone-ssl-async" - - name: "perf-8.0-standalone-async" diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml index 070b163e90..5b5cf92d68 100644 --- a/.evergreen/generated_configs/tasks.yml +++ b/.evergreen/generated_configs/tasks.yml @@ -1151,6 +1151,60 @@ tasks: SUB_TEST_NAME: gke tags: [auth_oidc, auth_oidc_remote] + # Perf tests + - name: perf-8.0-standalone-ssl + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: ssl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: sync + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + - name: perf-8.0-standalone-ssl-async + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: ssl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: async + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + - name: perf-8.0-standalone + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: nossl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: sync + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + - name: perf-8.0-standalone-async + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: nossl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: async + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + # Server tests - name: test-4.0-standalone-auth-ssl-sync commands: diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml index d70afa2bdd..864b061a15 100644 --- a/.evergreen/generated_configs/variants.yml +++ b/.evergreen/generated_configs/variants.yml @@ -928,6 +928,15 @@ buildvariants: - windows-64-vsMulti-small batchtime: 10080 + # Perf tests + - name: performance-benchmarks + tasks: + - name: .perf + display_name: Performance Benchmarks + run_on: + - rhel90-dbx-perf-large + batchtime: 10080 + # Pyopenssl tests - name: pyopenssl-macos-python3.9 tasks: diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh deleted file mode 100755 index cf88b93710..0000000000 --- a/.evergreen/run-perf-tests.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -git clone --depth 1 https://github.com/mongodb/specifications.git -pushd specifications/source/benchmarking/data -tar xf extended_bson.tgz -tar xf parallel.tgz -tar xf single_and_multi_document.tgz -popd - -export TEST_PATH="${PROJECT_DIRECTORY}/specifications/source/benchmarking/data" -export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" - -export PYTHON_BINARY=/opt/mongodbtoolchain/v4/bin/python3 - -bash ./.evergreen/just.sh setup-tests perf "${SUB_TEST_NAME}" -bash ./.evergreen/just.sh run-tests diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index f9a853f27c..40336c6d2d 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -31,9 +31,10 @@ if [ -f "./secrets-export.sh" ]; then fi # List the packages. -PIP_QUIET=0 uv run ${UV_ARGS} --with pip pip list +uv sync ${UV_ARGS} --reinstall +uv pip list # Start the test runner. -uv run ${UV_ARGS} .evergreen/scripts/run_tests.py "$@" +uv run .evergreen/scripts/run_tests.py "$@" popd diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py index b90a6af437..b9f9377066 100644 --- a/.evergreen/scripts/generate_config.py +++ b/.evergreen/scripts/generate_config.py @@ -69,6 +69,7 @@ class Host: HOSTS["ubuntu20"] = Host("ubuntu20", "ubuntu2004-small", "Ubuntu-20", dict()) HOSTS["ubuntu22"] = Host("ubuntu22", "ubuntu2204-small", "Ubuntu-22", dict()) HOSTS["rhel7"] = Host("rhel7", "rhel79-small", "RHEL7", dict()) +HOSTS["perf"] = Host("perf", "rhel90-dbx-perf-large", "", dict()) DEFAULT_HOST = HOSTS["rhel8"] # Other hosts @@ -722,6 +723,13 @@ def create_atlas_connect_variants(): ] +def create_perf_variants(): + host = HOSTS["perf"] + return [ + create_variant([".perf"], "Performance Benchmarks", host=host, batchtime=BATCHTIME_WEEK) + ] + + def create_aws_auth_variants(): variants = [] @@ -942,6 +950,26 @@ def create_enterprise_auth_tasks(): return [EvgTask(name=task_name, tags=tags, commands=[server_func, assume_func, test_func])] +def create_perf_tasks(): + tasks = [] + for version, ssl, sync in product(["8.0"], ["ssl", "nossl"], ["sync", "async"]): + vars = dict(VERSION=f"v{version}-perf", SSL=ssl) + server_func = FunctionCall(func="run server", vars=vars) + vars = dict(TEST_NAME="perf", SUB_TEST_NAME=sync) + test_func = FunctionCall(func="run tests", vars=vars) + attach_func = FunctionCall(func="attach benchmark test results") + send_func = FunctionCall(func="send dashboard data") + task_name = f"perf-{version}-standalone" + if ssl == "ssl": + task_name += "-ssl" + if sync == "async": + task_name += "-async" + tags = ["perf"] + commands = [server_func, test_func, attach_func, send_func] + tasks.append(EvgTask(name=task_name, tags=tags, commands=commands)) + return tasks + + def create_ocsp_tasks(): tasks = [] tests = [ diff --git a/.evergreen/scripts/run-perf-tests.sh b/.evergreen/scripts/run-perf-tests.sh deleted file mode 100755 index e1c1311d67..0000000000 --- a/.evergreen/scripts/run-perf-tests.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -PROJECT_DIRECTORY=${PROJECT_DIRECTORY} -SUB_TEST_NAME=${SUB_TEST_NAME} bash "${PROJECT_DIRECTORY}"/.evergreen/run-perf-tests.sh diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py index 13a510475f..2e23a366b1 100644 --- a/.evergreen/scripts/run_tests.py +++ b/.evergreen/scripts/run_tests.py @@ -4,7 +4,6 @@ import logging import os import platform -import shutil import sys from datetime import datetime @@ -142,10 +141,6 @@ def run() -> None: if TEST_PERF: handle_perf(start_time) - # Handle coverage post actions. - if os.environ.get("COVERAGE"): - shutil.rmtree(".pytest_cache", ignore_errors=True) - if __name__ == "__main__": run() diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py index 59928271c9..6d7c8037c7 100644 --- a/.evergreen/scripts/setup_tests.py +++ b/.evergreen/scripts/setup_tests.py @@ -43,6 +43,9 @@ # Map the test name to test group. GROUP_MAP = dict(mockupdb="mockupdb", perf="perf") +# The python version used for perf tests. +PERF_PYTHON_VERSION = "3.9.13" + def is_set(var: str) -> bool: value = os.environ.get(var, "") @@ -362,6 +365,19 @@ def handle_test_env() -> None: write_env("DISABLE_CONTEXT") if test_name == "perf": + data_dir = ROOT / "specifications/source/benchmarking/data" + if not data_dir.exists(): + run_command("git clone --depth 1 https://github.com/mongodb/specifications.git") + run_command("tar xf extended_bson.tgz", cwd=data_dir) + run_command("tar xf parallel.tgz", cwd=data_dir) + run_command("tar xf single_and_multi_document.tgz", cwd=data_dir) + write_env("TEST_PATH", str(data_dir)) + write_env("OUTPUT_FILE", str(ROOT / "results.json")) + # Overwrite the UV_PYTHON from the env.sh file. + write_env("UV_PYTHON", "") + + UV_ARGS.append(f"--python={PERF_PYTHON_VERSION}") + # PYTHON-4769 Run perf_test.py directly otherwise pytest's test collection negatively # affects the benchmark results. if sub_test_name == "sync": diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py index 750d2a0652..b081478423 100644 --- a/.evergreen/scripts/teardown_tests.py +++ b/.evergreen/scripts/teardown_tests.py @@ -1,9 +1,11 @@ from __future__ import annotations import os +import shutil import sys +from pathlib import Path -from utils import DRIVERS_TOOLS, LOGGER, run_command +from utils import DRIVERS_TOOLS, LOGGER, ROOT, run_command TEST_NAME = os.environ.get("TEST_NAME", "unconfigured") SUB_TEST_NAME = os.environ.get("SUB_TEST_NAME") @@ -44,10 +46,19 @@ elif TEST_NAME == "auth_aws" and sys.platform != "darwin": run_command(f"bash {DRIVERS_TOOLS}/.evergreen/auth_aws/teardown.sh") +# Tear down perf if applicable. +elif TEST_NAME == "perf": + shutil.rmtree(ROOT / "specifications", ignore_errors=True) + Path(os.environ["OUTPUT_FILE"]).unlink(missing_ok=True) + # Tear down mog_wsgi if applicable. elif TEST_NAME == "mod_wsgi": from mod_wsgi_tester import teardown_mod_wsgi teardown_mod_wsgi() +# Tear down coverage if applicable. +if os.environ.get("COVERAGE"): + shutil.rmtree(".pytest_cache", ignore_errors=True) + LOGGER.info(f"Tearing down tests of type '{TEST_NAME}'... done.") diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py index cd55410cf6..535e392ea2 100644 --- a/.evergreen/scripts/utils.py +++ b/.evergreen/scripts/utils.py @@ -50,7 +50,7 @@ class Distro: } # Tests that require a sub test suite. -SUB_TEST_REQUIRED = ["auth_aws", "auth_oidc", "kms", "mod_wsgi"] +SUB_TEST_REQUIRED = ["auth_aws", "auth_oidc", "kms", "mod_wsgi", "perf"] EXTRA_TESTS = ["mod_wsgi"] diff --git a/.gitignore b/.gitignore index 8c095c2157..966059e693 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,8 @@ expansion.yml *expansions.yml .evergreen/scripts/env.sh .evergreen/scripts/test-env.sh +specifications/ +results.json # Lambda temp files test/lambda/.aws-sam diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d2a833d874..47eb01dbf0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -297,6 +297,12 @@ The `mode` can be `standalone` or `embedded`. For the `replica_set` version of If you are running one of the `no-responder` tests, omit the `run-server` step. +### Perf Tests + +- Start the appropriate server, e.g. `just run-server --version=v8.0-perf --ssl`. +- Set up the tests with `sync` or `async`: `just setup-tests perf sync`. +- Run the tests: `just run-tests`. + ## Enable Debug Logs - Use `-o log_cli_level="DEBUG" -o log_cli=1` with `just test` or `pytest`. - Add `log_cli_level = "DEBUG` and `log_cli = 1` to the `tool.pytest.ini_options` section in `pyproject.toml` for Evergreen patches or to enable debug logs by default on your machine.