Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ format: install $(BIN)/buf $(BIN)/license-header ## Format code

.PHONY: test
test: generate install gettestdata ## Run unit tests
uv run -- python -m unittest
uv run -- pytest

.PHONY: conformance
conformance: $(BIN)/protovalidate-conformance generate install ## Run conformance tests
Expand Down
9 changes: 7 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ Issues = "https://github.com/bufbuild/protovalidate-python/issues"
dev = [
"google-re2-stubs>=0.1.1",
"mypy>=1.17.1",
"pytest>=8.4.1",
"ruff>=0.12.0",
"types-protobuf>=5.29.1.20250315",
]
Expand Down Expand Up @@ -101,8 +102,12 @@ known-first-party = ["protovalidate", "buf"]
ban-relative-imports = "all"

[tool.ruff.lint.per-file-ignores]
# Tests can use magic values, assertions, and relative imports.
"tests/**/*" = ["PLR2004", "S101", "TID252"]
# Tests can use assertions.
"test/**/*" = ["S101"]

[tool.pytest.ini_options]
# restrict testpaths to speed up test discovery
testpaths = ["test"]

[tool.mypy]
mypy_path = "gen"
Expand Down
116 changes: 52 additions & 64 deletions test/test_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
from collections.abc import MutableMapping
from collections.abc import Iterable, MutableMapping
from itertools import chain
from typing import Any, Optional

import celpy
import pytest
from celpy import celtypes
from google.protobuf import text_format

Expand Down Expand Up @@ -82,65 +82,53 @@ def get_eval_error_message(test: simple_pb2.SimpleTest) -> Optional[str]:
return None


class TestFormat(unittest.TestCase):
@classmethod
def setUpClass(cls):
# The test data from the cel-spec conformance tests
cel_test_data = load_test_data(f"test/testdata/string_ext_{CEL_SPEC_VERSION}.textproto")
# Our supplemental tests of functionality not in the cel conformance file, but defined in the spec.
supplemental_test_data = load_test_data("test/testdata/string_ext_supplemental.textproto")

# Combine the test data from both files into one
sections = cel_test_data.section
sections.extend(supplemental_test_data.section)

# Find the format tests which test successful formatting
cls._format_tests = chain.from_iterable(x.test for x in sections if x.name == "format")
# Find the format error tests which test errors during formatting
cls._format_error_tests = chain.from_iterable(x.test for x in sections if x.name == "format_errors")

cls._env = celpy.Environment(runner_class=InterpretedRunner)

def test_format_successes(self):
"""
Tests success scenarios for string.format
"""
for test in self._format_tests:
if test.name in skipped_tests:
continue
ast = self._env.compile(test.expr)
prog = self._env.program(ast, functions=extra_func.make_extra_funcs())

bindings = build_variables(test.bindings)
with self.subTest(test.name):
try:
result = prog.evaluate(bindings)
expected = get_expected_result(test)
if expected is not None:
self.assertEqual(result, expected)
else:
self.fail(f"[{test.name}]: expected a success result to be defined")
except celpy.CELEvalError as e:
self.fail(e)

def test_format_errors(self):
"""
Tests error scenarios for string.format
"""
for test in self._format_error_tests:
if test.name in skipped_error_tests:
continue
ast = self._env.compile(test.expr)
prog = self._env.program(ast, functions=extra_func.make_extra_funcs())

bindings = build_variables(test.bindings)
with self.subTest(test.name):
try:
prog.evaluate(bindings)
self.fail(f"[{test.name}]: expected an error to be raised during evaluation")
except celpy.CELEvalError as e:
msg = get_eval_error_message(test)
if msg is not None:
self.assertEqual(str(e), msg)
else:
self.fail(f"[{test.name}]: expected an eval error to be defined")
# The test data from the cel-spec conformance tests
cel_test_data = load_test_data(f"test/testdata/string_ext_{CEL_SPEC_VERSION}.textproto")
# Our supplemental tests of functionality not in the cel conformance file, but defined in the spec.
supplemental_test_data = load_test_data("test/testdata/string_ext_supplemental.textproto")

# Combine the test data from both files into one
sections = cel_test_data.section
sections.extend(supplemental_test_data.section)

# Find the format tests which test successful formatting
_format_tests: Iterable[simple_pb2.SimpleTest] = chain.from_iterable(x.test for x in sections if x.name == "format")
# Find the format error tests which test errors during formatting
_format_error_tests: Iterable[simple_pb2.SimpleTest] = chain.from_iterable(
x.test for x in sections if x.name == "format_errors"
)

env = celpy.Environment(runner_class=InterpretedRunner)


@pytest.mark.parametrize("format_test", _format_tests)
def test_format_successes(format_test):
"""Tests success scenarios for string.format"""
if format_test.name in skipped_tests:
pytest.skip(f"skipped test: {format_test.name}")
ast = env.compile(format_test.expr)
prog = env.program(ast, functions=extra_func.make_extra_funcs())

bindings = build_variables(format_test.bindings)
result = prog.evaluate(bindings)
expected = get_expected_result(format_test)
assert expected is not None, f"[{format_test.name}]: expected a success result to be defined"
assert result == expected


@pytest.mark.parametrize("format_error_test", _format_error_tests)
def test_format_errors(format_error_test):
"""Tests error scenarios for string.format"""
if format_error_test.name in skipped_error_tests:
pytest.skip(f"skipped test: {format_error_test.name}")
ast = env.compile(format_error_test.expr)
prog = env.program(ast, functions=extra_func.make_extra_funcs())

bindings = build_variables(format_error_test.bindings)
try:
prog.evaluate(bindings)
pytest.fail(f"[{format_error_test.name}]: expected an error to be raised during evaluation")
except celpy.CELEvalError as e:
msg = get_eval_error_message(format_error_test)
assert msg is not None, f"[{format_error_test.name}]: expected an eval error to be defined"
assert str(e) == msg
15 changes: 6 additions & 9 deletions test/test_matches.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import celpy
from celpy import celtypes

from protovalidate.internal.extra_func import cel_matches


class TestCollectViolations(unittest.TestCase):
def test_function_matches_re2(self):
empty_string = celtypes.StringType("")
# \z is valid re2 syntax for end of text
self.assertTrue(cel_matches(empty_string, "^\\z"))
# \Z is invalid re2 syntax
self.assertIsInstance(cel_matches(empty_string, "^\\Z"), celpy.CELEvalError)
def test_function_matches_re2():
empty_string = celtypes.StringType("")
# \z is valid re2 syntax for end of text
assert cel_matches(empty_string, "^\\z")
# \Z is invalid re2 syntax
assert isinstance(cel_matches(empty_string, "^\\Z"), celpy.CELEvalError)
Loading