Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 48 additions & 0 deletions docs/usages/help_functions.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
#############
Log Metrics
#############

The ``log_metric`` fixture is a powerful tool for recording performance metrics or other numerical data during your tests. It generates a file that follows the Prometheus text-based format, which is highly compatible with modern monitoring systems and the OpenMetrics standard.

**********
Use Case
**********

You can use this fixture to track key metrics from your embedded device, such as boot time, memory usage, or network throughput. By logging these values, you can monitor performance trends over time and catch regressions automatically.

**************
CLI Argument
**************

To enable metric logging, you need to provide the ``--metric-path`` command-line argument. This specifies the file where the metrics will be saved.

.. code:: bash

pytest --metric-path=output/metrics.txt

***************
Fixture Usage
***************

To use the fixture, simply include ``log_metric`` as an argument in your test function. It provides a callable that you can use to log your metrics.

.. code:: python

def test_my_app(log_metric):
# ... test code ...
boot_time = 123.45 # measured boot time
log_metric("boot_time", boot_time, target="esp32", sdk="v5.1")

***************
Output Format
***************

The metrics are written to the file specified by ``--metric-path`` in the Prometheus text-based format. Each line represents a single metric.

Example output in ``output/metrics.txt``:

.. code:: text

boot_time{target="esp32",sdk="v5.1"} 123.45

If ``--metric-path`` is not provided, the ``log_metric`` function will do nothing and issue a ``UserWarning``.
53 changes: 53 additions & 0 deletions pytest-embedded/pytest_embedded/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,10 @@ def pytest_addoption(parser):
base_group.addoption(
'--logfile-extension', default='.log', help='set the extension format of the log files. (Default: ".log")'
)
base_group.addoption(
'--metric-path',
help='Path to openmetrics txt file to log metrics. (Default: None)',
)

serial_group = parser.getgroup('embedded-serial')
serial_group.addoption('--port', help='serial port. (Env: "ESPPORT" if service "esp" specified, Default: "None")')
Expand Down Expand Up @@ -634,6 +638,55 @@ def port_app_cache() -> dict[str, str]:
return {}


@pytest.fixture(scope='session')
def metric_path(request: FixtureRequest) -> str | None:
"""
Get the metric file path from the command line option.

:param request: pytest request object
:return: The path to the metric file, or None if not provided.
"""
return request.config.getoption('metric_path', None)


@pytest.fixture(scope='session')
def log_metric(metric_path: str | None) -> t.Callable[..., None]:
"""
Provides a function to log metrics in OpenMetrics format.

The file is cleared at the beginning of the test session.

:param metric_path: Path to the metric file, from the ``--metric-path`` option.
:return: A function to log metrics, or a no-op function if the path is not provided.
"""
if not metric_path:

def no_op(key: str, value: t.Any, **kwargs: t.Any) -> None: # noqa: ARG001
warnings.warn('`--metric-path` is not specified, `log_metric` does nothing.')

return no_op

if os.path.exists(metric_path):
os.remove(metric_path)
elif os.path.dirname(metric_path):
os.makedirs(os.path.dirname(metric_path), exist_ok=True)
Comment on lines +671 to +672
Copy link

Copilot AI Nov 5, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The condition elif os.path.dirname(metric_path) will be False for paths in the current directory (e.g., 'metrics.txt'), which causes the directory creation to be skipped. This logic should handle the case where the file path is relative and has a non-empty directory component. Consider checking if os.path.dirname(metric_path) is not empty and that directory does not already exist, or use a simpler approach like unconditionally creating the directory if it's not empty.

Suggested change
elif os.path.dirname(metric_path):
os.makedirs(os.path.dirname(metric_path), exist_ok=True)
else:
dir_name = os.path.dirname(metric_path)
if dir_name:
os.makedirs(dir_name, exist_ok=True)

Copilot uses AI. Check for mistakes.

def _log_metric_impl(key: str, value: t.Any, **kwargs: t.Any) -> None:
labels = ''
if kwargs:
label_str = ','.join(f'{k}="{v}"' for k, v in kwargs.items())
labels = f'{{{label_str}}}'

line = f'{key}{labels} {value}\n'

lock = filelock.FileLock(f'{metric_path}.lock')
with lock:
with open(metric_path, 'a') as f:
f.write(line)

return _log_metric_impl


@pytest.fixture(scope='session', autouse=True)
def _mp_manager():
manager = MessageQueueManager()
Expand Down
29 changes: 29 additions & 0 deletions pytest-embedded/tests/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -792,3 +792,32 @@ def test_example():

result.assert_outcomes(passed=1)
assert 'Unknown pytest.mark.esp32 - is this a typo?' not in result.stdout.str()


def test_log_metric_with_path(pytester):
metric_file = pytester.path / 'metrics.txt'
pytester.makepyfile("""
def test_metric(log_metric):
log_metric('my_metric', 123.45, label1='value1', target='esp32')
""")

result = pytester.runpytest(f'--metric-path={metric_file}')
result.assert_outcomes(passed=1)

with open(metric_file) as f:
content = f.read()

assert content == 'my_metric{label1="value1",target="esp32"} 123.45\n'


def test_log_metric_without_path(pytester):
pytester.makepyfile("""
import pytest

def test_metric_no_path(log_metric):
with pytest.warns(UserWarning, match='`--metric-path` is not specified, `log_metric` does nothing.'):
log_metric('my_metric', 123.45)
""")

result = pytester.runpytest()
result.assert_outcomes(passed=1)
Loading