Skip to content

Commit e461d64

Browse files
authored
Merge pull request #25 from qorix-group/igorostrowskiq_add_feature_integration_test_demo
Feature Integration Tests implementation
2 parents 0352d6d + bb2ecc3 commit e461d64

File tree

18 files changed

+711
-66
lines changed

18 files changed

+711
-66
lines changed

.github/workflows/build_and_test_on_every_pr.yml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,4 +51,6 @@ jobs:
5151
while read -r target; do
5252
bazel run --config bl-x86_64-linux "$target"
5353
done < ci/showcase_targets_run.txt
54-
54+
- name: Feature Integration Tests
55+
run: |
56+
bazel run --config bl-x86_64-linux //feature_integration_tests/python_test_cases:fit

.gitignore

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,4 +9,11 @@ _logs
99
.ruff_cache
1010
target/
1111

12-
rust-project.json
12+
rust-project.json
13+
14+
# Python
15+
.venv
16+
__pycache__/
17+
.pytest_cache/
18+
/.coverage
19+
**/*.egg-info/*

MODULE.bazel

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,15 @@ python.toolchain(
3434
)
3535
use_repo(python)
3636

37+
pip = use_extension("@rules_python//python/extensions:pip.bzl", "pip", dev_dependency = True)
38+
pip.parse(
39+
hub_name = "pip_score_venv_test",
40+
python_version = PYTHON_VERSION,
41+
requirements_lock = "//feature_integration_tests/python_test_cases:requirements.txt.lock",
42+
)
43+
44+
use_repo(pip, "pip_score_venv_test")
45+
3746
# Special imports for certain modules
3847

3948
# communication module dependencies
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
# Feature Integration Tests
2+
3+
This directory contains Feature Integration Tests for the S-CORE project. It includes both Python test cases and Rust test scenarios to validate features work together.
4+
5+
## Structure
6+
7+
- `python_test_cases/` — Python-based integration test cases
8+
- `conftest.py` — Pytest configuration and fixtures
9+
- `fit_scenario.py` — Base scenario class
10+
- `requirements.txt` — Python dependencies
11+
- `BUILD` — Bazel build and test definitions
12+
- `tests/` — Test cases (e.g., orchestration with persistency)
13+
- `rust_test_scenarios/` — Rust-based integration test scenarios
14+
- `src/` — Rust source code for test scenarios
15+
- `BUILD` — Bazel build definitions
16+
17+
## Running Tests
18+
19+
### Python Test Cases
20+
21+
Python tests are managed with Bazel and Pytest. To run the main test target:
22+
23+
```sh
24+
bazel test //feature_integration_tests/python_test_cases:fit
25+
```
26+
27+
### Rust Test Scenarios
28+
29+
Rust test scenarios are defined in `rust_test_scenarios/src/scenarios`. Build and run them using Bazel:
30+
31+
```sh
32+
bazel build //feature_integration_tests/rust_test_scenarios
33+
```
34+
35+
```sh
36+
bazel run //feature_integration_tests/rust_test_scenarios -- --list-scenarios
37+
```
38+
39+
## Updating Python Requirements
40+
41+
To update Python dependencies:
42+
43+
```sh
44+
bazel run //feature_integration_tests/python_test_cases:requirements.update
45+
```
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
load("@pip_score_venv_test//:requirements.bzl", "all_requirements")
2+
load("@rules_python//python:pip.bzl", "compile_pip_requirements")
3+
load("@score_tooling//python_basics:defs.bzl", "score_py_pytest", "score_virtualenv")
4+
5+
# In order to update the requirements, change the `requirements.txt` file and run:
6+
# `bazel run //feature_integration_tests/python_test_cases:requirements.update`.
7+
# This will update the `requirements.txt.lock` file.
8+
# To upgrade all dependencies to their latest versions, run:
9+
# `bazel run //feature_integration_tests/python_test_cases:requirements.update -- --upgrade`.
10+
compile_pip_requirements(
11+
name = "requirements",
12+
srcs = [
13+
"requirements.txt",
14+
"@score_tooling//python_basics:requirements.txt",
15+
],
16+
requirements_txt = "requirements.txt.lock",
17+
tags = [
18+
"manual",
19+
],
20+
)
21+
22+
score_virtualenv(
23+
name = "python_tc_venv",
24+
reqs = all_requirements,
25+
venv_name = ".python_tc_venv",
26+
)
27+
28+
# Tests targets
29+
score_py_pytest(
30+
name = "fit",
31+
srcs = glob(["tests/**/*.py"]) + ["conftest.py", "fit_scenario.py"],
32+
args = [
33+
"--traces=all",
34+
"--rust-target-path=$(rootpath //feature_integration_tests/rust_test_scenarios)",
35+
],
36+
data = [
37+
":python_tc_venv",
38+
"//feature_integration_tests/rust_test_scenarios",
39+
],
40+
env = {
41+
"RUST_BACKTRACE": "1",
42+
},
43+
pytest_ini = ":pytest.ini",
44+
deps = all_requirements,
45+
)
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
from pathlib import Path
2+
3+
import pytest
4+
from testing_utils import BazelTools
5+
6+
FAILED_CONFIGS = []
7+
8+
9+
# Cmdline options
10+
def pytest_addoption(parser):
11+
parser.addoption(
12+
"--traces",
13+
choices=["none", "target", "all"],
14+
default="none",
15+
help="Verbosity of traces in output and HTML report. "
16+
'"none" - show no traces, '
17+
'"target" - show traces generated by test code, '
18+
'"all" - show all traces. ',
19+
)
20+
21+
parser.addoption(
22+
"--rust-target-name",
23+
type=str,
24+
default="//feature_integration_tests/rust_test_scenarios:rust_test_scenarios",
25+
help="Rust test scenario executable target.",
26+
)
27+
parser.addoption(
28+
"--rust-target-path",
29+
type=Path,
30+
help="Rust test scenario executable target.",
31+
)
32+
parser.addoption(
33+
"--build-scenarios",
34+
action="store_true",
35+
help="Build test scenarios executables.",
36+
)
37+
parser.addoption(
38+
"--build-scenarios-timeout",
39+
type=float,
40+
default=180.0,
41+
help="Build command timeout in seconds. Default: %(default)s",
42+
)
43+
parser.addoption(
44+
"--default-execution-timeout",
45+
type=float,
46+
default=5.0,
47+
help="Default execution timeout in seconds. Default: %(default)s",
48+
)
49+
50+
51+
# Hooks
52+
@pytest.hookimpl(tryfirst=True)
53+
def pytest_sessionstart(session):
54+
try:
55+
# Build scenarios.
56+
if session.config.getoption("--build-scenarios"):
57+
build_timeout = session.config.getoption("--build-scenarios-timeout")
58+
59+
# Build Rust test scenarios.
60+
print("Building Rust test scenarios executable...")
61+
cargo_tools = BazelTools(option_prefix="rust", build_timeout=build_timeout)
62+
rust_target_name = session.config.getoption("--rust-target-name")
63+
cargo_tools.build(rust_target_name)
64+
65+
except Exception as e:
66+
pytest.exit(str(e), returncode=1)
Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
import shutil
2+
from pathlib import Path
3+
from typing import Generator
4+
5+
import pytest
6+
from testing_utils import (
7+
BazelTools,
8+
BuildTools,
9+
LogContainer,
10+
Scenario,
11+
ScenarioResult,
12+
)
13+
14+
15+
def temp_dir_common(
16+
tmp_path_factory: pytest.TempPathFactory, base_name: str, *args: str
17+
) -> Generator[Path, None, None]:
18+
"""
19+
Create temporary directory and remove it after test.
20+
Common implementation to be reused by fixtures.
21+
22+
Returns generator providing numbered path to temporary directory.
23+
E.g., '<TMP_PATH>/<BASE_NAME>-<ARG1>-<ARG2><NUMBER>/'.
24+
25+
Parameters
26+
----------
27+
tmp_path_factory : pytest.TempPathFactory
28+
Factory for temporary directories.
29+
base_name : str
30+
Base directory name.
31+
'self.__class__.__name__' use is recommended.
32+
*args : Any
33+
Other parameters to be included in directory name.
34+
"""
35+
parts = [base_name, *args]
36+
dir_name = "-".join(parts)
37+
dir_path = tmp_path_factory.mktemp(dir_name, numbered=True)
38+
yield dir_path
39+
shutil.rmtree(dir_path)
40+
41+
42+
class FitScenario(Scenario):
43+
"""
44+
CIT test scenario definition.
45+
"""
46+
47+
@pytest.fixture(scope="class")
48+
def build_tools(self) -> BuildTools:
49+
return BazelTools(option_prefix="rust")
50+
51+
def expect_command_failure(self, *args, **kwargs) -> bool:
52+
"""
53+
Expect command failure (e.g., non-zero return code or hang).
54+
"""
55+
return False
56+
57+
@pytest.fixture(scope="class")
58+
def results(
59+
self,
60+
command: list[str],
61+
execution_timeout: float,
62+
*args,
63+
**kwargs,
64+
) -> ScenarioResult:
65+
result = self._run_command(command, execution_timeout, args, kwargs)
66+
success = result.return_code == 0 and not result.hang
67+
if self.expect_command_failure() and success:
68+
raise RuntimeError(f"Command execution succeeded unexpectedly: {result=}")
69+
if not self.expect_command_failure() and not success:
70+
raise RuntimeError(f"Command execution failed unexpectedly: {result=}")
71+
return result
72+
73+
@pytest.fixture(scope="class")
74+
def logs_target(self, target_path: Path, logs: LogContainer) -> LogContainer:
75+
"""
76+
Logs with messages generated strictly by the tested code.
77+
78+
Parameters
79+
----------
80+
target_path : Path
81+
Path to test scenario executable.
82+
logs : LogContainer
83+
Unfiltered logs.
84+
"""
85+
return logs.get_logs(field="target", pattern=f"{target_path.name}.*")
86+
87+
@pytest.fixture(scope="class")
88+
def logs_info_level(self, logs_target: LogContainer) -> LogContainer:
89+
"""
90+
Logs with messages with INFO level.
91+
92+
Parameters
93+
----------
94+
logs_target : LogContainer
95+
Logs with messages generated strictly by the tested code.
96+
"""
97+
return logs_target.get_logs(field="level", value="INFO")
98+
99+
@pytest.fixture(autouse=True)
100+
def print_to_report(
101+
self,
102+
request: pytest.FixtureRequest,
103+
logs: LogContainer,
104+
logs_target: LogContainer,
105+
) -> None:
106+
"""
107+
Print traces to stdout.
108+
109+
Allowed "--traces" values:
110+
- "none" - show no traces.
111+
- "target" - show traces generated by test code.
112+
- "all" - show all traces.
113+
114+
Parameters
115+
----------
116+
request : FixtureRequest
117+
Test request built-in fixture.
118+
logs : LogContainer
119+
Test scenario execution logs.
120+
logs_target : LogContainer
121+
Logs with messages generated strictly by the tested code.
122+
"""
123+
traces_param = request.config.getoption("--traces")
124+
match traces_param:
125+
case "all":
126+
traces = logs
127+
case "target":
128+
traces = logs_target
129+
case "none":
130+
traces = LogContainer()
131+
case _:
132+
raise RuntimeError(f'Invalid "--traces" value: {traces_param}')
133+
134+
for trace in traces:
135+
print(trace)
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
[pytest]
2+
addopts = -v
3+
testpaths = tests
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
testing-utils @ git+https://github.com/eclipse-score/[email protected]

0 commit comments

Comments
 (0)