Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 0 additions & 18 deletions .github/workflows/docs-check.yml

This file was deleted.

65 changes: 65 additions & 0 deletions .github/workflows/release_check.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
name: Release Check

on:
pull_request:
types: [opened, synchronize, labeled]
workflow_dispatch:

jobs:
build-docs:
runs-on: ubuntu-latest
# Only run on release PRs (release-please labels them with 'autorelease: pending')
if: "contains(github.event.pull_request.labels.*.name, 'autorelease: pending')"
steps:
- uses: actions/checkout@v4
- uses: astral-sh/setup-uv@v4
- name: Install dependencies
run: uv sync --group doc
- name: Build docs
run: uv run mkdocs build --clean

performance-tests:
runs-on: ubuntu-latest
if: "contains(github.event.pull_request.labels.*.name, 'autorelease: pending')"
steps:
- uses: actions/checkout@v4
- uses: astral-sh/setup-uv@v4

- name: Set up Python 3.13
uses: actions/setup-python@v5
with:
python-version: 3.13

- name: Install dependencies
run: uv sync --dev

- name: Resolve app versions
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo "ROBOCOP_VERSIONS=$(gh release list --limit 4 --json tagName --jq '.[].tagName' | tr '\n' ',')" >> $GITHUB_ENV

- name: Run performance tests
run: uv run nox -s performance > performance.log 2>&1

- name: Merge report and prepare for publishing
run: uv run tests/performance/merge_reports.py

- name: Publish to job summary
run: cat perf_report.md >> "$GITHUB_STEP_SUMMARY"

- name: Publish to PR
if: github.event_name == 'pull_request'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
gh pr comment \
"${{ github.event.pull_request.number }}" \
--body-file perf_report.md

- name: Upload log artifact
if: always()
uses: actions/upload-artifact@v4
with:
name: performance.log
path: performance.log
2 changes: 1 addition & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ on:
push:
branches: [main]
pull_request:
branches: [ main ]
branches: [main]

jobs:
build:
Expand Down
44 changes: 44 additions & 0 deletions noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,20 @@
> uv run nox -s docs
"""

import os

import nox

nox.options.default_venv_backend = "uv"

PYTHON_VERSIONS = ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"]

ROBOT_VERSIONS = ["robotframework==4.*", "robotframework==5.*", "robotframework==6.*", "robotframework==7.*"]
ROBOCOP_VERSIONS = (
[*[non_empty for non_empty in os.environ["ROBOCOP_VERSIONS"].split(",") if non_empty], "local"]
if os.environ.get("ROBOCOP_VERSIONS")
else ["v7.1.0", "local"]
)


@nox.session(python=PYTHON_VERSIONS) # , reuse_venv=False
Expand Down Expand Up @@ -52,3 +59,40 @@ def docs(session):
# session.run("sphinx-build", "-a", "-E", "-b", "html", "docs", "docs/_build/")
command = ["sphinx-build", "-a", "-E", "--verbose", "-b", "html", "docs/source", "docs/_build/"]
session.run(*command)


@nox.session(python=PYTHON_VERSIONS[-2])
@nox.parametrize("robocop_version", ROBOCOP_VERSIONS)
def performance(session: nox.Session, robocop_version: str) -> None:
"""
Generate performance reports.

Used by the GitHub Workflow: ``.github/workflows/release_check.yml``

ROBOCOP_VERSIONS is created based on the environment variable in the workflow (set to latest 4 released tags) and
"latest" which means local installation. The goal is to execute performance tests in the isolated environment with
a selected past/current Robocop version for comparison.

The reports are designed in a way that specific results do not matter, but change between a version does. We are
re-executing the tests for the past version to get the baseline benchmark for the current version.
"""
robocop_version = robocop_version.removeprefix("v")
if not robocop_version:
return
if robocop_version == "local":
session.run_install(
"uv",
"sync",
f"--python={session.virtualenv.location}",
env={"UV_PROJECT_ENVIRONMENT": session.virtualenv.location},
)
else:
session.run(
"uv",
"pip",
"install",
f"robotframework-robocop=={robocop_version}",
f"--python={session.virtualenv.location}",
env={"UV_PROJECT_ENVIRONMENT": session.virtualenv.location},
)
session.run("python", "-m", "tests.performance.generate_reports", external=True, silent=False)
2 changes: 2 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,8 @@ dev = [
"pytest-xdist>=3.6.1",
"ruff==0.14.8",
"pysonar",
"nox>=2025.11.12",
"packaging>=25.0",
]
doc = [
"mkdocs",
Expand Down
Empty file added tests/performance/__init__.py
Empty file.
104 changes: 65 additions & 39 deletions tests/performance/generate_reports.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@

Reports from previous runs are stored in the reports folder and can be used for comparison.

Each report is run multiple times and calculates a trimmed mean by excluding the bottom and top 10% of values.
Each report is run multiple times and calculates a trimmed mean by excluding the bottom and top values (according to
cut_off parameter).
"""

import json
Expand All @@ -16,16 +17,25 @@

from robocop import __version__, config
from robocop.formatter.formatters import FORMATTERS
from robocop.linter.utils.version_matching import Version
from robocop.run import check_files, format_files
from tests import working_directory

LINTER_TESTS_DIR = Path(__file__).parent.parent / "linter"
TEST_DATA = Path(__file__).parent / "test_data"
ROBOCOP_VERSION = Version(__version__)
REPORTS = {}


def performance_report(runs: int = 100):
"""Use as decorator to measure performance of a function and store results."""
def performance_report(runs: int = 100, cut_off: int = 0):
"""
Use as decorator to measure performance of a function and store results.

Args:
runs: Number of runs to take into account when calculating the average.
cut_off: Number of slowest and fastest runs to exclude from the average.

"""

def decorator(func):
@wraps(func)
Expand All @@ -38,18 +48,13 @@ def wrapper(*args, **kwargs):
print(f"Run {run + 1} / {runs} of {func.__name__}")
start = time.perf_counter()
counter = func(*args, **kwargs)
end = time.perf_counter()
time_taken = end - start
time_taken = time.perf_counter() - start
run_times.append(time_taken)
print(f" Execution time: {time_taken:.6f} seconds")
run_times.sort()
cut_off = int(runs * 0.1)
if cut_off + 2 > runs:
cut_off = 0
if len(run_times) > 2:
avg_time = sum(run_times[cut_off:-cut_off]) / (len(run_times) - 2 * cut_off)
else:
avg_time = sum(run_times) / len(run_times)
if cut_off:
run_times = run_times[cut_off:-cut_off]
avg_time = sum(run_times) / len(run_times)
print(f"Mean average execution time over {runs} runs: {avg_time:.6f} seconds")
if report_name:
if func.__name__ not in REPORTS:
Expand All @@ -63,7 +68,7 @@ def wrapper(*args, **kwargs):
return decorator


@performance_report(runs=50)
@performance_report(runs=10, cut_off=2)
def project_traversing_report() -> int:
"""
Measure how long it takes to traverse Robocop repository files.
Expand All @@ -90,33 +95,41 @@ def project_traversing_report() -> int:
return files_count


@performance_report(runs=50)
def formatter_report(formatter: str, report_name: str, cache: bool = True) -> int: # noqa: ARG001
@performance_report(runs=10, cut_off=2)
def formatter_report(formatter: str, report_name: str, **kwargs) -> int: # noqa: ARG001
"""Measure how long it takes to format test files using a specific formatter."""
main_dir = Path(__file__).parent.parent.parent
formatter_dir = main_dir / "tests" / "formatter" / "formatters" / formatter
with working_directory(formatter_dir):
format_files(["source"], select=[formatter], overwrite=False, return_result=True, silent=True, cache=cache)
format_files(["source"], select=[formatter], overwrite=False, return_result=True, silent=True, **kwargs)
source_dir = formatter_dir / "source"
return len(list(source_dir.iterdir()))


@performance_report(runs=10)
@performance_report(runs=5)
def linter_report(report_name: str, **kwargs) -> int: # noqa: ARG001
"""Measure how long it takes to lint all linter test files."""
main_dir = Path(__file__).parent.parent.parent
linter_dir = main_dir / "tests" / "linter"
with working_directory(linter_dir):
check_files(return_result=True, select=["ALL"], **kwargs)
return len(list(linter_dir.glob("**/*.robot")))


@performance_report(runs=2)
@performance_report(runs=1)
def lint_large_file(report_name: str, lint_dir: Path, **kwargs) -> int: # noqa: ARG001
"""Measure how long it takes to lint a large file."""
with working_directory(lint_dir):
check_files(return_result=True, select=["ALL"], cache=False, **kwargs)
check_files(return_result=True, select=["ALL"], **kwargs)
return 1


def merge_dictionaries(d1: dict, d2: dict) -> dict:
"""
Merge two dictionaries recursively.

This function is used to merge two partial reports generated by different runs.
"""
for key, value in d2.items():
if key in d1 and isinstance(d1[key], dict) and isinstance(value, dict):
merge_dictionaries(d1[key], value)
Expand All @@ -126,6 +139,12 @@ def merge_dictionaries(d1: dict, d2: dict) -> dict:


def generate_large_file(template_path: Path, output_dir: Path) -> None:
"""
Generate a large file based on a template.

This function is used to generate a large file for performance testing. Because of the potential size and
complexity, it is easier to use a templated file than hardcoded one.
"""
env = Environment(loader=FileSystemLoader(template_path.parent), autoescape=True)
template = env.get_template(template_path.name)

Expand All @@ -135,30 +154,37 @@ def generate_large_file(template_path: Path, output_dir: Path) -> None:
f.write(rendered_content)


if __name__ == "__main__":
# TODO: prepare i.e. nox script to install external robocops and run this script
# So we can generate reports for multiple past versions. It is important since the actual seconds change depending
# on where we run the script from, but the % change between version should be comparable. Also we can use new tests
# on old versions
linter_report(report_name="with_print_cache", cache=True)
linter_report(report_name="with_print_no_cache", cache=False)
linter_report(report_name="without_print_cache", silent=True, cache=True)
linter_report(report_name="without_print_no_cache", silent=True, cache=False)
def generate_reports() -> None:
"""Entry point for generating performance reports and saving it to global REPORTS variable."""
if Version("7.1.0") > ROBOCOP_VERSION:
disable_cache_option = {}
elif Version("7.1.0") == ROBOCOP_VERSION:
disable_cache_option = {"no_cache": True}
else:
disable_cache_option = {"cache": False}

if disable_cache_option:
linter_report(report_name="with_print_cache")
linter_report(report_name="with_print_no_cache", **disable_cache_option)
if disable_cache_option:
linter_report(report_name="without_print_cache", silent=True)
linter_report(report_name="without_print_no_cache", silent=True, **disable_cache_option)
for formatter in FORMATTERS:
formatter_report(formatter=formatter, report_name=formatter)
formatter_report(formatter=formatter, report_name=f"{formatter}_no_cache", cache=False)
formatter_report(formatter=formatter, report_name=f"{formatter}_no_cache", **disable_cache_option)
project_traversing_report()
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir = Path(temp_dir)
generate_large_file(TEST_DATA / "large_file.robot", temp_dir)
lint_large_file(report_name="large_file_with_print", lint_dir=temp_dir)
lint_large_file(report_name="large_file_without_print", lint_dir=temp_dir, silent=True)
lint_large_file(report_name="large_file_with_print", lint_dir=temp_dir, **disable_cache_option)
lint_large_file(report_name="large_file_without_print", lint_dir=temp_dir, silent=True, **disable_cache_option)

report_path = Path(__file__).parent / "reports" / f"robocop_{__version__.replace('.', '_')}.json"
if report_path.exists():
with open(report_path) as fp:
prev_report = json.load(fp)
REPORTS = merge_dictionaries(prev_report, REPORTS)

with open(report_path, "w") as fp:
json.dump(REPORTS, fp, indent=4)
if __name__ == "__main__":
whole_run_start = time.perf_counter()
report_path = Path(__file__).parent / "reports" / f"robocop_{__version__.replace('.', '_')}.json"
if not report_path.exists(): # additional safe guard in case we run on the same version (there was no version bump)
generate_reports()
print(f"Generating report in {report_path}")
with open(report_path, "w") as fp:
json.dump(REPORTS, fp, indent=4)
print(f"Took {time.perf_counter() - whole_run_start:.2f} seconds to generate report.")
Loading