Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .github/workflows/build_and_test_on_every_pr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,4 +51,6 @@ jobs:
while read -r target; do
bazel run --config bl-x86_64-linux "$target"
done < ci/showcase_targets_run.txt

- name: Feature Integration Tests
run: |
bazel run --config bl-x86_64-linux //feature_integration_tests/python_test_cases:fit
9 changes: 8 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,11 @@ _logs
.ruff_cache
target/

rust-project.json
rust-project.json

# Python
.venv
__pycache__/
.pytest_cache/
/.coverage
**/*.egg-info/*
9 changes: 9 additions & 0 deletions MODULE.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,15 @@ python.toolchain(
)
use_repo(python)

pip = use_extension("@rules_python//python/extensions:pip.bzl", "pip", dev_dependency = True)
pip.parse(
hub_name = "pip_score_venv_test",
python_version = PYTHON_VERSION,
requirements_lock = "//feature_integration_tests/python_test_cases:requirements.txt.lock",
)

use_repo(pip, "pip_score_venv_test")

# Special imports for certain modules

# communication module dependencies
Expand Down
45 changes: 45 additions & 0 deletions feature_integration_tests/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# Feature Integration Tests

This directory contains Feature Integration Tests for the S-CORE project. It includes both Python test cases and Rust test scenarios to validate features work together.

## Structure

- `python_test_cases/` — Python-based integration test cases
- `conftest.py` — Pytest configuration and fixtures
- `fit_scenario.py` — Base scenario class
- `requirements.txt` — Python dependencies
- `BUILD` — Bazel build and test definitions
- `tests/` — Test cases (e.g., orchestration with persistency)
- `rust_test_scenarios/` — Rust-based integration test scenarios
- `src/` — Rust source code for test scenarios
- `BUILD` — Bazel build definitions

## Running Tests

### Python Test Cases

Python tests are managed with Bazel and Pytest. To run the main test target:

```sh
bazel test //feature_integration_tests/python_test_cases:fit
```

### Rust Test Scenarios

Rust test scenarios are defined in `rust_test_scenarios/src/scenarios`. Build and run them using Bazel:

```sh
bazel build //feature_integration_tests/rust_test_scenarios
```

```sh
bazel run //feature_integration_tests/rust_test_scenarios -- --list-scenarios
```

## Updating Python Requirements

To update Python dependencies:

```sh
bazel run //feature_integration_tests/python_test_cases:requirements.update
```
45 changes: 45 additions & 0 deletions feature_integration_tests/python_test_cases/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
load("@pip_score_venv_test//:requirements.bzl", "all_requirements")
load("@rules_python//python:pip.bzl", "compile_pip_requirements")
load("@score_tooling//python_basics:defs.bzl", "score_py_pytest", "score_virtualenv")

# In order to update the requirements, change the `requirements.txt` file and run:
# `bazel run //feature_integration_tests/python_test_cases:requirements.update`.
# This will update the `requirements.txt.lock` file.
# To upgrade all dependencies to their latest versions, run:
# `bazel run //feature_integration_tests/python_test_cases:requirements.update -- --upgrade`.
compile_pip_requirements(
name = "requirements",
srcs = [
"requirements.txt",
"@score_tooling//python_basics:requirements.txt",
],
requirements_txt = "requirements.txt.lock",
tags = [
"manual",
],
)

score_virtualenv(
name = "python_tc_venv",
reqs = all_requirements,
venv_name = ".python_tc_venv",
)

# Tests targets
score_py_pytest(
name = "fit",
srcs = glob(["tests/**/*.py"]) + ["conftest.py", "fit_scenario.py"],
args = [
"--traces=all",
"--rust-target-path=$(rootpath //feature_integration_tests/rust_test_scenarios)",
],
data = [
":python_tc_venv",
"//feature_integration_tests/rust_test_scenarios",
],
env = {
"RUST_BACKTRACE": "1",
},
pytest_ini = ":pytest.ini",
deps = all_requirements,
)
66 changes: 66 additions & 0 deletions feature_integration_tests/python_test_cases/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
from pathlib import Path

import pytest
from testing_utils import BazelTools

FAILED_CONFIGS = []


# Cmdline options
def pytest_addoption(parser):
parser.addoption(
"--traces",
choices=["none", "target", "all"],
default="none",
help="Verbosity of traces in output and HTML report. "
'"none" - show no traces, '
'"target" - show traces generated by test code, '
'"all" - show all traces. ',
)

parser.addoption(
"--rust-target-name",
type=str,
default="//feature_integration_tests/rust_test_scenarios:rust_test_scenarios",
help="Rust test scenario executable target.",
)
parser.addoption(
"--rust-target-path",
type=Path,
help="Rust test scenario executable target.",
)
parser.addoption(
"--build-scenarios",
action="store_true",
help="Build test scenarios executables.",
)
parser.addoption(
"--build-scenarios-timeout",
type=float,
default=180.0,
help="Build command timeout in seconds. Default: %(default)s",
)
parser.addoption(
"--default-execution-timeout",
type=float,
default=5.0,
help="Default execution timeout in seconds. Default: %(default)s",
)


# Hooks
@pytest.hookimpl(tryfirst=True)
def pytest_sessionstart(session):
try:
# Build scenarios.
if session.config.getoption("--build-scenarios"):
build_timeout = session.config.getoption("--build-scenarios-timeout")

# Build Rust test scenarios.
print("Building Rust test scenarios executable...")
cargo_tools = BazelTools(option_prefix="rust", build_timeout=build_timeout)
rust_target_name = session.config.getoption("--rust-target-name")
cargo_tools.build(rust_target_name)

except Exception as e:
pytest.exit(str(e), returncode=1)
135 changes: 135 additions & 0 deletions feature_integration_tests/python_test_cases/fit_scenario.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
import shutil
from pathlib import Path
from typing import Generator

import pytest
from testing_utils import (
BazelTools,
BuildTools,
LogContainer,
Scenario,
ScenarioResult,
)


def temp_dir_common(
tmp_path_factory: pytest.TempPathFactory, base_name: str, *args: str
) -> Generator[Path, None, None]:
"""
Create temporary directory and remove it after test.
Common implementation to be reused by fixtures.

Returns generator providing numbered path to temporary directory.
E.g., '<TMP_PATH>/<BASE_NAME>-<ARG1>-<ARG2><NUMBER>/'.

Parameters
----------
tmp_path_factory : pytest.TempPathFactory
Factory for temporary directories.
base_name : str
Base directory name.
'self.__class__.__name__' use is recommended.
*args : Any
Other parameters to be included in directory name.
"""
parts = [base_name, *args]
dir_name = "-".join(parts)
dir_path = tmp_path_factory.mktemp(dir_name, numbered=True)
yield dir_path
shutil.rmtree(dir_path)


class FitScenario(Scenario):
"""
CIT test scenario definition.
"""

@pytest.fixture(scope="class")
def build_tools(self) -> BuildTools:
return BazelTools(option_prefix="rust")

def expect_command_failure(self, *args, **kwargs) -> bool:
"""
Expect command failure (e.g., non-zero return code or hang).
"""
return False

@pytest.fixture(scope="class")
def results(
self,
command: list[str],
execution_timeout: float,
*args,
**kwargs,
) -> ScenarioResult:
result = self._run_command(command, execution_timeout, args, kwargs)
success = result.return_code == 0 and not result.hang
if self.expect_command_failure() and success:
raise RuntimeError(f"Command execution succeeded unexpectedly: {result=}")
if not self.expect_command_failure() and not success:
raise RuntimeError(f"Command execution failed unexpectedly: {result=}")
return result

@pytest.fixture(scope="class")
def logs_target(self, target_path: Path, logs: LogContainer) -> LogContainer:
"""
Logs with messages generated strictly by the tested code.

Parameters
----------
target_path : Path
Path to test scenario executable.
logs : LogContainer
Unfiltered logs.
"""
return logs.get_logs(field="target", pattern=f"{target_path.name}.*")

@pytest.fixture(scope="class")
def logs_info_level(self, logs_target: LogContainer) -> LogContainer:
"""
Logs with messages with INFO level.

Parameters
----------
logs_target : LogContainer
Logs with messages generated strictly by the tested code.
"""
return logs_target.get_logs(field="level", value="INFO")

@pytest.fixture(autouse=True)
def print_to_report(
self,
request: pytest.FixtureRequest,
logs: LogContainer,
logs_target: LogContainer,
) -> None:
"""
Print traces to stdout.

Allowed "--traces" values:
- "none" - show no traces.
- "target" - show traces generated by test code.
- "all" - show all traces.

Parameters
----------
request : FixtureRequest
Test request built-in fixture.
logs : LogContainer
Test scenario execution logs.
logs_target : LogContainer
Logs with messages generated strictly by the tested code.
"""
traces_param = request.config.getoption("--traces")
match traces_param:
case "all":
traces = logs
case "target":
traces = logs_target
case "none":
traces = LogContainer()
case _:
raise RuntimeError(f'Invalid "--traces" value: {traces_param}')

for trace in traces:
print(trace)
3 changes: 3 additions & 0 deletions feature_integration_tests/python_test_cases/pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[pytest]
addopts = -v
testpaths = tests
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
testing-utils @ git+https://github.com/eclipse-score/[email protected]
Loading