Skip to content
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 22 additions & 3 deletions openfeature/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,7 @@
error_code=ErrorCode.PROVIDER_FATAL,
)

flag_evaluation = None
try:
# https://github.com/open-feature/spec/blob/main/specification/sections/03-evaluation-context.md
# Any resulting evaluation context from a before hook will overwrite
Expand Down Expand Up @@ -364,13 +365,14 @@
except OpenFeatureError as err:
error_hooks(flag_type, hook_context, err, reversed_merged_hooks, hook_hints)

return FlagEvaluationDetails(
flag_evaluation = FlagEvaluationDetails(
flag_key=flag_key,
value=default_value,
reason=Reason.ERROR,
error_code=err.error_code,
error_message=err.error_message,
)
return flag_evaluation
# Catch any type of exception here since the user can provide any exception
# in the error hooks
except Exception as err: # pragma: no cover
Expand All @@ -381,16 +383,33 @@
error_hooks(flag_type, hook_context, err, reversed_merged_hooks, hook_hints)

error_message = getattr(err, "error_message", str(err))
return FlagEvaluationDetails(
flag_evaluation = FlagEvaluationDetails(
flag_key=flag_key,
value=default_value,
reason=Reason.ERROR,
error_code=ErrorCode.GENERAL,
error_message=error_message,
)
return flag_evaluation

finally:
after_all_hooks(flag_type, hook_context, reversed_merged_hooks, hook_hints)
if flag_evaluation is None:
# should never happen, but keeps the linter happy
flag_evaluation = FlagEvaluationDetails(

Check warning on line 398 in openfeature/client.py

View check run for this annotation

Codecov / codecov/patch

openfeature/client.py#L398

Added line #L398 was not covered by tests
flag_key=flag_key,
value=default_value,
reason=Reason.ERROR,
error_code=ErrorCode.GENERAL,
error_message="Unknown error",
)

after_all_hooks(
flag_type,
hook_context,
flag_evaluation,
reversed_merged_hooks,
hook_hints,
)

def _create_provider_evaluation(
self,
Expand Down
7 changes: 6 additions & 1 deletion openfeature/hook/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,12 @@ def error(
"""
pass

def finally_after(self, hook_context: HookContext, hints: HookHints) -> None:
def finally_after(
self,
hook_context: HookContext,
details: FlagEvaluationDetails[typing.Any],
hints: HookHints,
) -> None:
"""
Run after flag evaluation, including any error processing.
This will always run. Errors will be swallowed.
Expand Down
3 changes: 2 additions & 1 deletion openfeature/hook/_hook_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,11 @@ def error_hooks(
def after_all_hooks(
flag_type: FlagType,
hook_context: HookContext,
details: FlagEvaluationDetails[typing.Any],
hooks: typing.List[Hook],
hints: typing.Optional[HookHints] = None,
) -> None:
kwargs = {"hook_context": hook_context, "hints": hints}
kwargs = {"hook_context": hook_context, "details": details, "hints": hints}
_execute_hooks(
flag_type=flag_type, hooks=hooks, hook_method=HookType.FINALLY_AFTER, **kwargs
)
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ cov = [
]
e2e = [
"git submodule add --force https://github.com/open-feature/spec.git spec",
"cp -r spec/specification/assets/gherkin/evaluation.feature tests/features/",
"cp spec/specification/assets/gherkin/* tests/features/",
"behave tests/features/",
"rm tests/features/*.feature",
]
Expand Down
72 changes: 72 additions & 0 deletions tests/features/steps/hooks_steps.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
from unittest.mock import MagicMock

from behave import then, when

from openfeature.exception import ErrorCode
from openfeature.hook import Hook


@when("a hook is added to the client")
def step_impl_add_hook(context):
hook = MagicMock(spec=Hook)
hook.before = MagicMock()
hook.after = MagicMock()
hook.error = MagicMock()
hook.finally_after = MagicMock()
context.hook = hook
context.client.add_hooks([hook])


@then("error hooks should be called")
def step_impl_call_error(context):
assert context.hook.before.called
assert context.hook.error.called
assert context.hook.finally_after.called


@then("non-error hooks should be called")
def step_impl_call_non_error(context):
assert context.hook.before.called
assert context.hook.after.called
assert context.hook.finally_after.called


def get_hook_from_name(context, hook_name):
if hook_name.lower() == "before":
return context.hook.before
elif hook_name.lower() == "after":
return context.hook.after
elif hook_name.lower() == "error":
return context.hook.error
elif hook_name.lower() == "finally":
return context.hook.finally_after
else:
raise ValueError(str(hook_name) + " is not a valid hook name")


def convert_value_from_flag_type(value, flag_type):
if value == "None":
return None
if flag_type.lower() == "boolean":
return bool(value)
elif flag_type.lower() == "integer":
return int(value)
elif flag_type.lower() == "float":
return float(value)
return value


@then('"{hook_names}" hooks should have evaluation details')
def step_impl_should_have_eval_details(context, hook_names):
for hook_name in hook_names.split(", "):
hook = get_hook_from_name(context, hook_name)
for row in context.table:
flag_type, key, value = row

value = convert_value_from_flag_type(value, flag_type)

actual = hook.call_args[1]["details"].__dict__[key]
if isinstance(actual, ErrorCode):
actual = str(actual)

assert actual == value
3 changes: 2 additions & 1 deletion tests/features/steps/steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ def step_impl_provider(context):
'"{default_value}"'
)
def step_impl_evaluated_with_details(context, flag_type, key, default_value):
context.client = get_client()
if context.client is None:
context.client = get_client()
if flag_type == "boolean":
context.boolean_flag_details = context.client.get_boolean_details(
key, default_value
Expand Down
9 changes: 7 additions & 2 deletions tests/hook/test_hook_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,12 +137,17 @@ def test_after_hooks_run_after_method(mock_hook):
def test_finally_after_hooks_run_finally_after_method(mock_hook):
# Given
hook_context = HookContext("flag_key", FlagType.BOOLEAN, True, "")
flag_evaluation_details = FlagEvaluationDetails(
hook_context.flag_key, "val", "unknown"
)
hook_hints = MappingProxyType({})
# When
after_all_hooks(FlagType.BOOLEAN, hook_context, [mock_hook], hook_hints)
after_all_hooks(
FlagType.BOOLEAN, hook_context, flag_evaluation_details, [mock_hook], hook_hints
)
# Then
mock_hook.supports_flag_value_type.assert_called_once()
mock_hook.finally_after.assert_called_once()
mock_hook.finally_after.assert_called_with(
hook_context=hook_context, hints=hook_hints
hook_context=hook_context, details=flag_evaluation_details, hints=hook_hints
)
Loading