diff --git a/.gitmodules b/.gitmodules index a8bef85f..fe96453f 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,3 +4,6 @@ [submodule "providers/openfeature-provider-flagd/test-harness"] path = providers/openfeature-provider-flagd/test-harness url = git@github.com:open-feature/flagd-testbed.git +[submodule "providers/openfeature-provider-flagd/spec"] + path = providers/openfeature-provider-flagd/spec + url = https://github.com/open-feature/spec diff --git a/providers/openfeature-provider-flagd/pyproject.toml b/providers/openfeature-provider-flagd/pyproject.toml index ff3daaea..5da8c69c 100644 --- a/providers/openfeature-provider-flagd/pyproject.toml +++ b/providers/openfeature-provider-flagd/pyproject.toml @@ -37,6 +37,9 @@ dependencies = [ "coverage[toml]>=6.5", "pytest", "pytest-bdd", + "testcontainers", + "asserts", + "grpcio-health-checking==1.60.0", ] post-install-commands = [ "./scripts/gen_protos.sh" diff --git a/providers/openfeature-provider-flagd/spec b/providers/openfeature-provider-flagd/spec new file mode 160000 index 00000000..3c737a6e --- /dev/null +++ b/providers/openfeature-provider-flagd/spec @@ -0,0 +1 @@ +Subproject commit 3c737a6e86ae0aa9bd81fcbfe8b6ada9a33993a7 diff --git a/providers/openfeature-provider-flagd/tests/e2e/conftest.py b/providers/openfeature-provider-flagd/tests/e2e/conftest.py index 243f5724..2aae58d9 100644 --- a/providers/openfeature-provider-flagd/tests/e2e/conftest.py +++ b/providers/openfeature-provider-flagd/tests/e2e/conftest.py @@ -1,208 +1,33 @@ import typing import pytest -from pytest_bdd import given, parsers, then, when -from tests.e2e.parsers import to_bool +from testcontainers.core.container import DockerContainer +from tests.e2e.flagd_container import FlagdContainer +from tests.e2e.steps import * # noqa: F403 from openfeature import api -from openfeature.client import OpenFeatureClient from openfeature.contrib.provider.flagd import FlagdProvider -from openfeature.contrib.provider.flagd.config import ResolverType -from openfeature.evaluation_context import EvaluationContext JsonPrimitive = typing.Union[str, bool, float, int] -@pytest.fixture -def evaluation_context() -> EvaluationContext: - return EvaluationContext() - - -@given("a flagd provider is set", target_fixture="client") -def setup_provider(flag_file) -> OpenFeatureClient: +@pytest.fixture(autouse=True, scope="package") +def setup(request, port, image, resolver_type): + container: DockerContainer = FlagdContainer( + image=image, + port=port, + ) + # Setup code + c = container.start() api.set_provider( FlagdProvider( - resolver_type=ResolverType.IN_PROCESS, - offline_flag_source_path=flag_file, - offline_poll_interval_seconds=0.1, + resolver_type=resolver_type, + port=int(container.get_exposed_port(port)), ) ) - return api.get_client() - - -@when( - parsers.cfparse( - 'a zero-value boolean flag with key "{key}" is evaluated with default value "{default:bool}"', - extra_types={"bool": to_bool}, - ), - target_fixture="key_and_default", -) -@when( - parsers.cfparse( - 'a zero-value string flag with key "{key}" is evaluated with default value "{default}"', - ), - target_fixture="key_and_default", -) -@when( - parsers.cfparse( - 'a string flag with key "{key}" is evaluated with default value "{default}"' - ), - target_fixture="key_and_default", -) -@when( - parsers.cfparse( - 'a zero-value integer flag with key "{key}" is evaluated with default value {default:d}', - ), - target_fixture="key_and_default", -) -@when( - parsers.cfparse( - 'an integer flag with key "{key}" is evaluated with default value {default:d}', - ), - target_fixture="key_and_default", -) -@when( - parsers.cfparse( - 'a zero-value float flag with key "{key}" is evaluated with default value {default:f}', - ), - target_fixture="key_and_default", -) -def setup_key_and_default( - key: str, default: JsonPrimitive -) -> typing.Tuple[str, JsonPrimitive]: - return (key, default) - - -@when( - parsers.cfparse( - 'a context containing a targeting key with value "{targeting_key}"' - ), -) -def assign_targeting_context(evaluation_context: EvaluationContext, targeting_key: str): - """a context containing a targeting key with value .""" - evaluation_context.targeting_key = targeting_key - - -@when( - parsers.cfparse('a context containing a key "{key}", with value "{value}"'), -) -@when( - parsers.cfparse('a context containing a key "{key}", with value {value:d}'), -) -def update_context( - evaluation_context: EvaluationContext, key: str, value: JsonPrimitive -): - """a context containing a key and value.""" - evaluation_context.attributes[key] = value - - -@when( - parsers.cfparse( - 'a context containing a nested property with outer key "{outer}" and inner key "{inner}", with value "{value}"' - ), -) -@when( - parsers.cfparse( - 'a context containing a nested property with outer key "{outer}" and inner key "{inner}", with value {value:d}' - ), -) -def update_context_nested( - evaluation_context: EvaluationContext, - outer: str, - inner: str, - value: typing.Union[str, int], -): - """a context containing a nested property with outer key, and inner key, and value.""" - if outer not in evaluation_context.attributes: - evaluation_context.attributes[outer] = {} - evaluation_context.attributes[outer][inner] = value - - -@then( - parsers.cfparse( - 'the resolved boolean zero-value should be "{expected_value:bool}"', - extra_types={"bool": to_bool}, - ) -) -def assert_boolean_value( - client: OpenFeatureClient, - key_and_default: tuple, - expected_value: bool, - evaluation_context: EvaluationContext, -): - key, default = key_and_default - evaluation_result = client.get_boolean_value(key, default, evaluation_context) - assert evaluation_result == expected_value - - -@then( - parsers.cfparse( - "the resolved integer zero-value should be {expected_value:d}", - ) -) -@then(parsers.cfparse("the returned value should be {expected_value:d}")) -def assert_integer_value( - client: OpenFeatureClient, - key_and_default: tuple, - expected_value: bool, - evaluation_context: EvaluationContext, -): - key, default = key_and_default - evaluation_result = client.get_integer_value(key, default, evaluation_context) - assert evaluation_result == expected_value - - -@then( - parsers.cfparse( - "the resolved float zero-value should be {expected_value:f}", - ) -) -def assert_float_value( - client: OpenFeatureClient, - key_and_default: tuple, - expected_value: bool, - evaluation_context: EvaluationContext, -): - key, default = key_and_default - evaluation_result = client.get_float_value(key, default, evaluation_context) - assert evaluation_result == expected_value - - -@then(parsers.cfparse('the returned value should be "{expected_value}"')) -def assert_string_value( - client: OpenFeatureClient, - key_and_default: tuple, - expected_value: bool, - evaluation_context: EvaluationContext, -): - key, default = key_and_default - evaluation_result = client.get_string_value(key, default, evaluation_context) - assert evaluation_result == expected_value - - -@then( - parsers.cfparse( - 'the resolved string zero-value should be ""', - ) -) -def assert_empty_string( - client: OpenFeatureClient, - key_and_default: tuple, - evaluation_context: EvaluationContext, -): - key, default = key_and_default - evaluation_result = client.get_string_value(key, default, evaluation_context) - assert evaluation_result == "" + def fin(): + c.stop() -@then(parsers.cfparse('the returned reason should be "{reason}"')) -def assert_reason( - client: OpenFeatureClient, - key_and_default: tuple, - evaluation_context: EvaluationContext, - reason: str, -): - """the returned reason should be .""" - key, default = key_and_default - evaluation_result = client.get_string_details(key, default, evaluation_context) - assert evaluation_result.reason.value == reason + # Teardown code + request.addfinalizer(fin) diff --git a/providers/openfeature-provider-flagd/tests/e2e/flagd_container.py b/providers/openfeature-provider-flagd/tests/e2e/flagd_container.py new file mode 100644 index 00000000..a9514363 --- /dev/null +++ b/providers/openfeature-provider-flagd/tests/e2e/flagd_container.py @@ -0,0 +1,59 @@ +import time + +import grpc +from grpc_health.v1 import health_pb2, health_pb2_grpc +from testcontainers.core.container import DockerContainer +from testcontainers.core.waiting_utils import wait_container_is_ready, wait_for_logs + +HEALTH_CHECK = 8014 + + +class FlagdContainer(DockerContainer): + def __init__( + self, + image: str = "ghcr.io/open-feature/flagd-testbed:v0.5.13", + port: int = 8013, + **kwargs, + ) -> None: + super().__init__(image, **kwargs) + self.port = port + self.with_exposed_ports(self.port, HEALTH_CHECK) + + def start(self) -> "FlagdContainer": + super().start() + self._checker(self.get_container_host_ip(), self.get_exposed_port(HEALTH_CHECK)) + return self + + @wait_container_is_ready(ConnectionError) + def _checker(self, host: str, port: str) -> None: + # First we wait for Flagd to say it's listening + wait_for_logs( + self, + "listening", + 5, + ) + + time.sleep(1) + # Second we use the GRPC health check endpoint + with grpc.insecure_channel(host + ":" + port) as channel: + health_stub = health_pb2_grpc.HealthStub(channel) + + def health_check_call(stub: health_pb2_grpc.HealthStub): + request = health_pb2.HealthCheckRequest() + resp = stub.Check(request) + if resp.status == health_pb2.HealthCheckResponse.SERVING: + return True + elif resp.status == health_pb2.HealthCheckResponse.NOT_SERVING: + return False + + # Should succeed + # Check health status every 1 second for 30 seconds + ok = False + for _ in range(30): + ok = health_check_call(health_stub) + if ok: + break + time.sleep(1) + + if not ok: + raise ConnectionError("flagD not ready in time") diff --git a/providers/openfeature-provider-flagd/tests/e2e/parsers.py b/providers/openfeature-provider-flagd/tests/e2e/parsers.py index 16e89d94..9d9560c6 100644 --- a/providers/openfeature-provider-flagd/tests/e2e/parsers.py +++ b/providers/openfeature-provider-flagd/tests/e2e/parsers.py @@ -1,2 +1,7 @@ def to_bool(s: str) -> bool: return s.lower() == "true" + + +def to_list(s: str) -> list: + values = s.replace('"', "").split(",") + return [s.strip() for s in values] diff --git a/providers/openfeature-provider-flagd/tests/e2e/steps.py b/providers/openfeature-provider-flagd/tests/e2e/steps.py new file mode 100644 index 00000000..fe490c5f --- /dev/null +++ b/providers/openfeature-provider-flagd/tests/e2e/steps.py @@ -0,0 +1,553 @@ +import time +import typing + +import pytest +from asserts import assert_equal, assert_in, assert_not_equal, assert_true +from pytest_bdd import given, parsers, then, when +from tests.e2e.parsers import to_bool, to_list + +from openfeature import api +from openfeature.client import OpenFeatureClient +from openfeature.evaluation_context import EvaluationContext +from openfeature.event import EventDetails, ProviderEvent +from openfeature.flag_evaluation import ErrorCode, FlagEvaluationDetails, Reason +from openfeature.provider import ProviderStatus + +JsonObject = typing.Union[dict, list] +JsonPrimitive = typing.Union[str, bool, float, int, JsonObject] + + +@pytest.fixture +def evaluation_context() -> EvaluationContext: + return EvaluationContext() + + +@given("a flagd provider is set", target_fixture="client") +@given("a provider is registered", target_fixture="client") +def setup_provider() -> OpenFeatureClient: + client = api.get_client() + wait_for(lambda: client.get_provider_status() == ProviderStatus.READY) + return client + + +@when( + parsers.cfparse( + 'a {ignored:s?}boolean flag with key "{key}" is evaluated with {details:s?}default value "{default:bool}"', + extra_types={"bool": to_bool, "s": str}, + ), + target_fixture="key_and_default", +) +@when( + parsers.cfparse( + 'a {ignored:s?}string flag with key "{key}" is evaluated with {details:s?}default value "{default}"', + extra_types={"s": str}, + ), + target_fixture="key_and_default", +) +@when( + parsers.cfparse( + 'a{ignored:s?} integer flag with key "{key}" is evaluated with {details:s?}default value {default:d}', + extra_types={"s": str}, + ), + target_fixture="key_and_default", +) +@when( + parsers.cfparse( + 'a {ignored:s?}float flag with key "{key}" is evaluated with {details:s?}default value {default:f}', + extra_types={"s": str}, + ), + target_fixture="key_and_default", +) +@when( + parsers.cfparse( + 'a string flag with key "{key}" is evaluated as an integer, with details and a default value {default:d}', + ), + target_fixture="key_and_default", +) +@when( + parsers.cfparse( + 'a flag with key "{key}" is evaluated with default value "{default}"', + ), + target_fixture="key_and_default", +) +def setup_key_and_default( + key: str, default: JsonPrimitive +) -> typing.Tuple[str, JsonPrimitive]: + return (key, default) + + +@when( + parsers.cfparse( + 'an object flag with key "{key}" is evaluated with a null default value', + ), + target_fixture="key_and_default", +) +@when( + parsers.cfparse( + 'an object flag with key "{key}" is evaluated with details and a null default value', + ), + target_fixture="key_and_default", +) +def setup_key_and_default_for_object(key: str) -> typing.Tuple[str, JsonObject]: + return (key, {}) + + +@when( + parsers.cfparse( + 'a context containing a targeting key with value "{targeting_key}"' + ), +) +def assign_targeting_context(evaluation_context: EvaluationContext, targeting_key: str): + """a context containing a targeting key with value .""" + evaluation_context.targeting_key = targeting_key + + +@when( + parsers.cfparse( + 'context contains keys {fields:s} with values "{svalue}", "{svalue2}", {ivalue:d}, "{bvalue:bool}"', + extra_types={"bool": to_bool, "s": to_list}, + ), +) +def assign_targeting_context_2( + evaluation_context: EvaluationContext, + fields: list, + svalue: str, + svalue2: str, + ivalue: int, + bvalue: bool, +): + evaluation_context.attributes[fields[0]] = svalue + evaluation_context.attributes[fields[1]] = svalue2 + evaluation_context.attributes[fields[2]] = ivalue + evaluation_context.attributes[fields[3]] = bvalue + + +@when( + parsers.cfparse('a context containing a key "{key}", with value "{value}"'), +) +@when( + parsers.cfparse('a context containing a key "{key}", with value {value:d}'), +) +def update_context( + evaluation_context: EvaluationContext, key: str, value: JsonPrimitive +): + """a context containing a key and value.""" + evaluation_context.attributes[key] = value + + +@when( + parsers.cfparse( + 'a context containing a nested property with outer key "{outer}" and inner key "{inner}", with value "{value}"' + ), +) +@when( + parsers.cfparse( + 'a context containing a nested property with outer key "{outer}" and inner key "{inner}", with value {value:d}' + ), +) +def update_context_nested( + evaluation_context: EvaluationContext, + outer: str, + inner: str, + value: typing.Union[str, int], +): + """a context containing a nested property with outer key, and inner key, and value.""" + if outer not in evaluation_context.attributes: + evaluation_context.attributes[outer] = {} + evaluation_context.attributes[outer][inner] = value + + +@then( + parsers.cfparse( + 'the resolved boolean value should be "{expected_value:bool}"', + extra_types={"bool": to_bool}, + ) +) +@then( + parsers.cfparse( + 'the resolved boolean zero-value should be "{expected_value:bool}"', + extra_types={"bool": to_bool}, + ) +) +def assert_boolean_value( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: bool, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_boolean_value(key, default, evaluation_context) + assert_equal(evaluation_result, expected_value) + + +@then( + parsers.cfparse( + 'the resolved boolean details value should be "{expected_value:bool}", the variant should be "{variant}", and the reason should be "{reason}"', + extra_types={"bool": to_bool}, + ) +) +def assert_boolean_value_with_details( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: bool, + variant: str, + reason: str, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_boolean_details(key, default, evaluation_context) + assert_equal(evaluation_result.value, expected_value) + assert_equal(evaluation_result.reason, reason) + assert_equal(evaluation_result.variant, variant) + + +@then( + parsers.cfparse( + "the resolved integer {ignored:s?}value should be {expected_value:d}", + extra_types={"s": str}, + ) +) +@then(parsers.cfparse("the returned value should be {expected_value:d}")) +def assert_integer_value( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: bool, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_integer_value(key, default, evaluation_context) + assert_equal(evaluation_result, expected_value) + + +@then( + parsers.cfparse( + 'the resolved integer details value should be {expected_value:d}, the variant should be "{variant}", and the reason should be "{reason}"', + ) +) +def assert_integer_value_with_details( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: int, + variant: str, + reason: str, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_integer_details(key, default, evaluation_context) + assert_equal(evaluation_result.value, expected_value) + assert_equal(evaluation_result.reason, reason) + assert_equal(evaluation_result.variant, variant) + + +@then( + parsers.cfparse( + "the resolved float {ignored:s?}value should be {expected_value:f}", + extra_types={"s": str}, + ) +) +def assert_float_value( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: bool, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_float_value(key, default, evaluation_context) + assert_equal(evaluation_result, expected_value) + + +@then( + parsers.cfparse( + 'the resolved float details value should be {expected_value:f}, the variant should be "{variant}", and the reason should be "{reason}"', + ) +) +def assert_float_value_with_details( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: float, + variant: str, + reason: str, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_float_details(key, default, evaluation_context) + assert_equal(evaluation_result.value, expected_value) + assert_equal(evaluation_result.reason, reason) + assert_equal(evaluation_result.variant, variant) + + +@then(parsers.cfparse('the returned value should be "{expected_value}"')) +def assert_string_value( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: bool, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_details = client.get_string_details(key, default, evaluation_context) + assert_equal(evaluation_details.value, expected_value) + + +@then( + parsers.cfparse( + 'the resolved string zero-value should be ""', + ) +) +def assert_empty_string( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, +): + assert_string(client, key_and_default, evaluation_context, "") + + +@then( + parsers.cfparse( + 'the resolved string value should be "{expected_value}"', + ) +) +def assert_string( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, + expected_value: str, +): + key, default = key_and_default + evaluation_result = client.get_string_value(key, default, evaluation_context) + assert_equal(evaluation_result, expected_value) + + +@then( + parsers.cfparse( + 'the resolved string response should be "{expected_value}"', + ) +) +def assert_string_response( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, + expected_value: str, +): + key, default = key_and_default + evaluation_result = client.get_string_value(key, default, evaluation_context) + assert_equal(evaluation_result, expected_value) + + +@then( + parsers.cfparse( + 'the resolved flag value is "{expected_value}" when the context is empty', + ) +) +def assert_string_without_context( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, + expected_value: str, +): + key, default = key_and_default + evaluation_result = client.get_string_value(key, default, None) + assert_equal(evaluation_result, expected_value) + + +@then( + parsers.cfparse( + 'the resolved object {details:s?}value should be contain fields "{bool_field}", "{string_field}", and "{int_field}", with values "{bvalue:bool}", "{svalue}" and {ivalue:d}, respectively', + extra_types={"bool": to_bool, "s": str}, + ), + target_fixture="evaluation_details", +) +def assert_object( # noqa: PLR0913 + client: OpenFeatureClient, + key_and_default: tuple, + bool_field: str, + string_field: str, + int_field: str, + bvalue: bool, + svalue: str, + ivalue: int, + details: str, +) -> FlagEvaluationDetails: + key, default = key_and_default + if details: + evaluation_result = client.get_object_details(key, default) + value = evaluation_result.value + assert_in(bool_field, value) + assert_in(string_field, value) + assert_in(string_field, value) + assert_equal(value[bool_field], bvalue) + assert_equal(value[string_field], svalue) + assert_equal(value[int_field], ivalue) + return evaluation_result + else: + evaluation_result = client.get_object_value(key, default) + assert_in(bool_field, evaluation_result) + assert_in(string_field, evaluation_result) + assert_in(string_field, evaluation_result) + assert_equal(evaluation_result[bool_field], bvalue) + assert_equal(evaluation_result[string_field], svalue) + assert_equal(evaluation_result[int_field], ivalue) + assert_not_equal(evaluation_result, None) + + +@then( + parsers.cfparse( + 'the variant should be "{variant}", and the reason should be "{reason}"', + ) +) +def assert_for_variant_and_reason( + client: OpenFeatureClient, + evaluation_details: FlagEvaluationDetails, + variant: str, + reason: str, +): + assert_equal(evaluation_details.reason, Reason[reason]) + assert_equal(evaluation_details.variant, variant) + + +@then( + parsers.cfparse( + "the default string value should be returned", + ), + target_fixture="evaluation_details", +) +def assert_default_string( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, +) -> FlagEvaluationDetails[str]: + key, default = key_and_default + evaluation_result = client.get_string_details(key, default, evaluation_context) + assert_equal(evaluation_result.value, default) + return evaluation_result + + +@then( + parsers.cfparse( + "the default integer value should be returned", + ), + target_fixture="evaluation_details", +) +def assert_default_integer( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, +) -> FlagEvaluationDetails[int]: + key, default = key_and_default + evaluation_result = client.get_integer_details(key, default, evaluation_context) + assert_equal(evaluation_result.value, default) + return evaluation_result + + +@then( + parsers.cfparse( + 'the reason should indicate an error and the error code should indicate a missing flag with "{error}"', + ) +) +@then( + parsers.cfparse( + 'the reason should indicate an error and the error code should indicate a type mismatch with "{error}"', + ) +) +def assert_for_error( + client: OpenFeatureClient, + evaluation_details: FlagEvaluationDetails, + error: str, +): + assert_equal(evaluation_details.error_code, ErrorCode[error]) + assert_equal(evaluation_details.reason, Reason.ERROR) + + +@then( + parsers.cfparse( + 'the resolved string details value should be "{expected_value}", the variant should be "{variant}", and the reason should be "{reason}"', + extra_types={"bool": to_bool}, + ) +) +def assert_string_value_with_details( + client: OpenFeatureClient, + key_and_default: tuple, + expected_value: str, + variant: str, + reason: str, + evaluation_context: EvaluationContext, +): + key, default = key_and_default + evaluation_result = client.get_string_details(key, default, evaluation_context) + assert_equal(evaluation_result.value, expected_value) + assert_equal(evaluation_result.reason, reason) + assert_equal(evaluation_result.variant, variant) + + +@then(parsers.cfparse('the returned reason should be "{reason}"')) +def assert_reason( + client: OpenFeatureClient, + key_and_default: tuple, + evaluation_context: EvaluationContext, + reason: str, +): + """the returned reason should be .""" + key, default = key_and_default + evaluation_result = client.get_string_details(key, default, evaluation_context) + assert_equal(evaluation_result.reason, reason) + + +@when(parsers.cfparse("a PROVIDER_READY handler is added")) +def provider_ready_add(client: OpenFeatureClient, context): + def provider_ready_handler(event_details: EventDetails): + context["provider_ready_ran"] = True + + client.add_handler(ProviderEvent.PROVIDER_READY, provider_ready_handler) + + +@then(parsers.cfparse("the PROVIDER_READY handler must run")) +def provider_ready_was_executed(client: OpenFeatureClient, context): + assert_true(context["provider_ready_ran"]) + + +@when(parsers.cfparse("a PROVIDER_CONFIGURATION_CHANGED handler is added")) +def provider_changed_add(client: OpenFeatureClient, context): + def provider_changed_handler(event_details: EventDetails): + context["provider_changed_ran"] = True + + client.add_handler( + ProviderEvent.PROVIDER_CONFIGURATION_CHANGED, provider_changed_handler + ) + + +@pytest.fixture(scope="function") +def context(): + return {} + + +@when(parsers.cfparse('a flag with key "{flag_key}" is modified')) +def assert_reason2( + client: OpenFeatureClient, + context, + flag_key: str, +): + context["flag_key"] = flag_key + + +@then( + parsers.cfparse("the PROVIDER_CONFIGURATION_CHANGED handler must run"), +) +def provider_changed_was_executed(client: OpenFeatureClient, context): + wait_for(lambda: context.get("provider_changed_ran")) + assert_equal(context["provider_changed_ran"], True) + + +@then(parsers.cfparse('the event details must indicate "{flag_name}" was altered')) +def flag_was_changed( + flag_name: str, + context, +): + wait_for(lambda: flag_name in context.get("changed_flags")) + assert_in(flag_name, context.get("changed_flags")) + + +def wait_for(pred, poll_sec=2, timeout_sec=10): + start = time.time() + while not (ok := pred()) and (time.time() - start < timeout_sec): + time.sleep(poll_sec) + assert_true(pred()) + return ok diff --git a/providers/openfeature-provider-flagd/tests/e2e/test_in-process-file.py b/providers/openfeature-provider-flagd/tests/e2e/test_in-process-file.py new file mode 100644 index 00000000..2d09ca11 --- /dev/null +++ b/providers/openfeature-provider-flagd/tests/e2e/test_in-process-file.py @@ -0,0 +1,72 @@ +import json +import os +import tempfile +from os import listdir + +import pytest +import yaml +from pytest_bdd import scenario, scenarios + +from openfeature import api +from openfeature.contrib.provider.flagd import FlagdProvider +from openfeature.contrib.provider.flagd.config import ResolverType + +KEY_EVALUATORS = "$evaluators" + +KEY_FLAGS = "flags" + +MERGED_FILE = "merged_file" + + +@pytest.fixture(params=["json", "yaml"], scope="package") +def file_name(request): + extension = request.param + result = {KEY_FLAGS: {}, KEY_EVALUATORS: {}} + + path = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../../test-harness/flags/") + ) + + for f in listdir(path): + with open(path + "/" + f, "rb") as infile: + loaded_json = json.load(infile) + result[KEY_FLAGS] = {**result[KEY_FLAGS], **loaded_json[KEY_FLAGS]} + if loaded_json.get(KEY_EVALUATORS): + result[KEY_EVALUATORS] = { + **result[KEY_EVALUATORS], + **loaded_json[KEY_EVALUATORS], + } + + with tempfile.NamedTemporaryFile( + "w", delete=False, suffix="." + extension + ) as outfile: + if extension == "json": + json.dump(result, outfile) + else: + yaml.dump(result, outfile) + + return outfile + + +@pytest.fixture(autouse=True, scope="package") +def setup(request, file_name): + """`file_name` tests""" + api.set_provider( + FlagdProvider( + resolver_type=ResolverType.IN_PROCESS, + offline_flag_source_path=file_name.name, + ) + ) + + +@pytest.mark.skip(reason="Eventing not implemented") +@scenario("../../test-harness/gherkin/flagd.feature", "Flag change event") +def test_flag_change_event(): + """not implemented""" + + +scenarios( + "../../test-harness/gherkin/flagd.feature", + "../../test-harness/gherkin/flagd-json-evaluator.feature", + "../../spec/specification/assets/gherkin/evaluation.feature", +) diff --git a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_custom_ops.py b/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_custom_ops.py deleted file mode 100644 index 70ceb1aa..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_custom_ops.py +++ /dev/null @@ -1,38 +0,0 @@ -import pytest -from pytest_bdd import scenario -from tests.conftest import setup_flag_file - - -@pytest.fixture -def flag_file(tmp_path): - return setup_flag_file(tmp_path, "custom-ops.json") - - -@scenario( - "../../test-harness/gherkin/flagd-json-evaluator.feature", "Fractional operator" -) -def test_fractional_operator(): - """Fractional operator.""" - - -@scenario( - "../../test-harness/gherkin/flagd-json-evaluator.feature", - "Semantic version operator numeric comparison", -) -def test_semantic_version_operator_numeric_comparison(): - """Semantic version operator numeric comparison.""" - - -@scenario( - "../../test-harness/gherkin/flagd-json-evaluator.feature", - "Semantic version operator semantic comparison", -) -def test_semantic_version_operator_semantic_comparison(): - """Semantic version operator semantic comparison.""" - - -@scenario( - "../../test-harness/gherkin/flagd-json-evaluator.feature", "Substring operators" -) -def test_substring_operators(): - """Substring operators.""" diff --git a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_edge_cases.py b/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_edge_cases.py deleted file mode 100644 index 0583d8e9..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_edge_cases.py +++ /dev/null @@ -1,15 +0,0 @@ -import pytest -from pytest_bdd import scenario -from tests.conftest import setup_flag_file - - -@pytest.fixture -def flag_file(tmp_path): - return setup_flag_file(tmp_path, "edge-case-flags.json") - - -@scenario( - "../../test-harness/gherkin/flagd-json-evaluator.feature", "Errors and edge cases" -) -def test_errors_and_edge_cases(): - """Errors and edge cases.""" diff --git a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_evaluator_reuse.py b/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_evaluator_reuse.py deleted file mode 100644 index 5abcddb5..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_evaluator_reuse.py +++ /dev/null @@ -1,13 +0,0 @@ -import pytest -from pytest_bdd import scenario -from tests.conftest import setup_flag_file - - -@pytest.fixture -def flag_file(tmp_path): - return setup_flag_file(tmp_path, "evaluator-refs.json") - - -@scenario("../../test-harness/gherkin/flagd-json-evaluator.feature", "Evaluator reuse") -def test_evaluator_reuse(): - """Evaluator reuse.""" diff --git a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_events.py b/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_events.py deleted file mode 100644 index e00a4844..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_events.py +++ /dev/null @@ -1,91 +0,0 @@ -import logging -import time - -import pytest -from pytest_bdd import parsers, scenario, then, when -from tests.conftest import setup_flag_file - -from openfeature.client import OpenFeatureClient, ProviderEvent - - -@scenario("../../test-harness/gherkin/flagd.feature", "Provider ready event") -def test_ready_event(caplog): - """Provider ready event""" - caplog.set_level(logging.DEBUG) - - -@scenario("../../test-harness/gherkin/flagd.feature", "Flag change event") -def test_change_event(): - """Flag change event""" - - -@pytest.fixture -def flag_file(tmp_path): - return setup_flag_file(tmp_path, "changing-flag-bar.json") - - -@pytest.fixture -def handles() -> list: - return [] - - -@when( - parsers.cfparse( - "a {event_type:ProviderEvent} handler is added", - extra_types={"ProviderEvent": ProviderEvent}, - ), - target_fixture="handles", -) -def add_event_handler( - client: OpenFeatureClient, event_type: ProviderEvent, handles: list -): - def handler(event): - handles.append( - { - "type": event_type, - "event": event, - } - ) - - client.add_handler(event_type, handler) - return handles - - -@then( - parsers.cfparse( - "the {event_type:ProviderEvent} handler must run", - extra_types={"ProviderEvent": ProviderEvent}, - ) -) -def assert_handler_run(handles, event_type: ProviderEvent): - max_wait = 2 - poll_interval = 0.1 - while max_wait > 0: - if all(h["type"] != event_type for h in handles): - max_wait -= poll_interval - time.sleep(poll_interval) - continue - break - - assert any(h["type"] == event_type for h in handles) - - -@when(parsers.cfparse('a flag with key "{key}" is modified')) -def modify_flag(flag_file, key): - time.sleep(0.1) # guard against race condition - with open("test-harness/flags/changing-flag-foo.json") as src_file: - contents = src_file.read() - with open(flag_file, "w") as f: - f.write(contents) - - -@then(parsers.cfparse('the event details must indicate "{key}" was altered')) -def assert_flag_changed(handles, key): - handle = None - for h in handles: - if h["type"] == ProviderEvent.PROVIDER_CONFIGURATION_CHANGED: - handle = h - break - - assert handle is not None - assert key in handle["event"].flags_changed diff --git a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_testing_flags.py b/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_testing_flags.py deleted file mode 100644 index 4e3bd069..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_testing_flags.py +++ /dev/null @@ -1,24 +0,0 @@ -import pytest -from pytest_bdd import scenario -from tests.conftest import setup_flag_file - - -@pytest.fixture -def flag_file(tmp_path): - return setup_flag_file(tmp_path, "testing-flags.json") - - -@scenario( - "../../test-harness/gherkin/flagd-json-evaluator.feature", - "Time-based operations", -) -def test_timebased_operations(): - """Time-based operations.""" - - -@scenario( - "../../test-harness/gherkin/flagd-json-evaluator.feature", - "Targeting by targeting key", -) -def test_targeting_by_targeting_key(): - """Targeting by targeting key.""" diff --git a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_zero_evals.py b/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_zero_evals.py deleted file mode 100644 index 30de0dc6..00000000 --- a/providers/openfeature-provider-flagd/tests/e2e/test_inprocess_zero_evals.py +++ /dev/null @@ -1,28 +0,0 @@ -import pytest -from pytest_bdd import scenario -from tests.conftest import setup_flag_file - - -@scenario("../../test-harness/gherkin/flagd.feature", "Resolves boolean zero value") -def test_eval_boolean(): - """Resolve boolean zero value""" - - -@scenario("../../test-harness/gherkin/flagd.feature", "Resolves string zero value") -def test_eval_string(): - """Resolve string zero value""" - - -@scenario("../../test-harness/gherkin/flagd.feature", "Resolves integer zero value") -def test_eval_integer(): - """Resolve integer zero value""" - - -@scenario("../../test-harness/gherkin/flagd.feature", "Resolves float zero value") -def test_eval_float(): - """Resolve float zero value""" - - -@pytest.fixture -def flag_file(tmp_path): - return setup_flag_file(tmp_path, "zero-flags.json") diff --git a/providers/openfeature-provider-flagd/tests/e2e/test_rpc.py b/providers/openfeature-provider-flagd/tests/e2e/test_rpc.py new file mode 100644 index 00000000..91ef090c --- /dev/null +++ b/providers/openfeature-provider-flagd/tests/e2e/test_rpc.py @@ -0,0 +1,50 @@ +import pytest +from pytest_bdd import scenario, scenarios + +from openfeature.contrib.provider.flagd.config import ResolverType + + +@pytest.fixture(autouse=True, scope="package") +def resolver_type() -> ResolverType: + return ResolverType.GRPC + + +@pytest.fixture(autouse=True, scope="package") +def port(): + return 8013 + + +@pytest.fixture(autouse=True, scope="package") +def image(): + return "ghcr.io/open-feature/flagd-testbed:v0.5.13" + + +@pytest.mark.skip(reason="Eventing not implemented") +@scenario("../../test-harness/gherkin/flagd.feature", "Flag change event") +def test_flag_change_event(): + """not implemented""" + + +@pytest.mark.skip(reason="issue #102") +@scenario( + "../../spec/specification/assets/gherkin/evaluation.feature", + "Resolves object value", +) +def test_resolves_object_value(): + """not implemented""" + + +@pytest.mark.skip(reason="issue #102") +@scenario( + "../../spec/specification/assets/gherkin/evaluation.feature", + "Resolves object details", +) +def test_resolves_object_details(): + """not implemented""" + + +scenarios( + "../../test-harness/gherkin/flagd.feature", + "../../test-harness/gherkin/flagd-json-evaluator.feature", + "../../spec/specification/assets/gherkin/evaluation.feature", +) diff --git a/providers/openfeature-provider-flagd/tests/test_errors.py b/providers/openfeature-provider-flagd/tests/test_errors.py index 3e576e8a..542a61d1 100644 --- a/providers/openfeature-provider-flagd/tests/test_errors.py +++ b/providers/openfeature-provider-flagd/tests/test_errors.py @@ -1,3 +1,5 @@ +import os + import pytest from openfeature import api @@ -27,10 +29,11 @@ def create_client(provider: FlagdProvider): ], ) def test_file_load_errors(file_name: str): + path = os.path.abspath(os.path.join(os.path.dirname(__file__), "./flags/")) client = create_client( FlagdProvider( resolver_type=ResolverType.IN_PROCESS, - offline_flag_source_path=f"tests/flags/{file_name}", + offline_flag_source_path=f"{path}/{file_name}", ) ) diff --git a/providers/openfeature-provider-flagd/tests/test_file_store.py b/providers/openfeature-provider-flagd/tests/test_file_store.py index 2ae98ffa..5d07f62e 100644 --- a/providers/openfeature-provider-flagd/tests/test_file_store.py +++ b/providers/openfeature-provider-flagd/tests/test_file_store.py @@ -1,3 +1,4 @@ +import os from unittest.mock import Mock import pytest @@ -25,7 +26,8 @@ def create_client(provider: FlagdProvider): ) def test_file_load_errors(file_name: str): provider = Mock(spec=AbstractProvider) - file_store = FileWatcherFlagStore(f"tests/flags/{file_name}", provider) + path = os.path.abspath(os.path.join(os.path.dirname(__file__), "./flags/")) + file_store = FileWatcherFlagStore(f"{path}/{file_name}", provider) flag = file_store.flag_data.get("basic-flag")