Skip to content
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 44 additions & 0 deletions src/ssvc/selection.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,51 @@ def model_json_schema(cls, **kwargs):
schema = strip_nullable_anyof(schema)

return order_schema(schema)
def _post_process(self, data):
"""
Ensures all Selection.values are lists and removes empty array elements.
"""
def fix_selection(selection):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see what this is doing, but I don't understand why we need it? Is there an example of a Selection object that needs this treatment? How would that get created? If so, can we put that into a unit test to demonstrate the need for this method?

# Convert tuple to list and filter out empty items
values = selection.get("values", [])
# Ensure it's a list, filter out empty/falsy items
selection["values"] = [v for v in list(values) if v]
return selection

# If this is a dict with selections, process each selection
if isinstance(data, dict) and "selections" in data:
data["selections"] = [
fix_selection(sel) for sel in data["selections"] if sel
]
# Remove empty array fields from the top level
keys_to_delete = [k for k, v in data.items() if isinstance(v, list) and not v]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we eliminate the need for the keys_to_delete loop by using exclude_none=True or exclude_if = lambda: ... in the class definition?

for k in keys_to_delete:
del data[k]
return data

def model_dump(self, *args, **kwargs):
data = super().model_dump(*args, **kwargs)
return self._post_process(data)

def model_dump_json(self, *args, **kwargs):
# Dump to python dict first, post-process, then dump to JSON
import json
from datetime import timezone
model_dump_kwargs = kwargs.copy()
json_kwargs = {}
# List of json.dumps kwargs you want to support
json_kwarg_names = ['indent', 'sort_keys', 'separators', 'ensure_ascii']
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we need to be picky about this instead of just passing **kwargs along? (If we do it this way, then we're breaking the expectation that model_dump_json passes kwargs through.)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeh will get rid of this for something simpler

return json.dumps(data, **{k: v for k, v in kwargs.items() if k in json.dumps.__code__.co_varnames})

for key in json_kwarg_names:
if key in model_dump_kwargs:
json_kwargs[key] = model_dump_kwargs.pop(key)
# Get dict with Pydantic's processing (exclude_none, etc.)
data = super().model_dump(*args, **model_dump_kwargs)
data = self._post_process(data)
# Format timestamp as UTC RFC3339 string
if "timestamp" in data and isinstance(data["timestamp"], datetime):
utc_dt = data["timestamp"].astimezone(timezone.utc)
data["timestamp"] = utc_dt.strftime("%Y-%m-%dT%H:%M:%SZ")
return json.dumps(data, **json_kwargs)

def main() -> None:
print(
Expand Down
Loading