Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions infrahub_sdk/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -830,7 +830,7 @@ async def process_batch() -> tuple[list[InfrahubNode], list[InfrahubNode]]:

for page_number in range(1, total_pages + 1):
page_offset = (page_number - 1) * pagination_size
batch_process.add(task=process_page, node=node, page_offset=page_offset, page_number=page_number)
batch_process.add(task=process_page, page_offset=page_offset, page_number=page_number)

async for _, response in batch_process.execute():
nodes.extend(response[1]["nodes"])
Expand Down Expand Up @@ -1946,9 +1946,10 @@ def filters(
"""
branch = branch or self.default_branch
schema = self.schema.get(kind=kind, branch=branch)
node = InfrahubNodeSync(client=self, schema=schema, branch=branch)
if at:
at = Timestamp(at)

node = InfrahubNodeSync(client=self, schema=schema, branch=branch)
filters = kwargs
pagination_size = self.pagination_size

Expand Down Expand Up @@ -1995,7 +1996,7 @@ def process_batch() -> tuple[list[InfrahubNodeSync], list[InfrahubNodeSync]]:

for page_number in range(1, total_pages + 1):
page_offset = (page_number - 1) * pagination_size
batch_process.add(task=process_page, node=node, page_offset=page_offset, page_number=page_number)
batch_process.add(task=process_page, page_offset=page_offset, page_number=page_number)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The removal of the node parameter here as well as in the async version is that something that's related to this change?

Looking at your change it doesn't seem like node provided any value previously. However we're still setting node with:

node = InfrahubNodeSync(client=self, schema=schema, branch=branch)

I wonder if it makes sense to keep this, it's also a bit problematic as we're shadowing the node variable a bit further down in the method:

        if populate_store:
            for node in nodes:
                if node.id:
                    self.store.set(node=node)
            related_nodes = list(set(related_nodes))
            for node in related_nodes:
                if node.id:
                    self.store.set(node=node)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I could let node or do it in another PR, while debugging, I saw that we have node set twice out of 4 times when using the function, so from my point of view the function signature was not matching the usage.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can perhaps create a follow up issue to remove the node = parts in these two methods.


for _, response in batch_process.execute():
nodes.extend(response[1]["nodes"])
Expand All @@ -2012,7 +2013,7 @@ def process_non_batch() -> tuple[list[InfrahubNodeSync], list[InfrahubNodeSync]]

while has_remaining_items:
page_offset = (page_number - 1) * pagination_size
response, process_result = process_page(page_offset, page_number)
response, process_result = process_page(page_offset=page_offset, page_number=page_number)

nodes.extend(process_result["nodes"])
related_nodes.extend(process_result["related_nodes"])
Expand Down
4 changes: 2 additions & 2 deletions infrahub_sdk/node/node.py
Original file line number Diff line number Diff line change
Expand Up @@ -402,10 +402,10 @@ def generate_query_data_init(
if order:
data["@filters"]["order"] = order

if offset:
if offset is not None:
data["@filters"]["offset"] = offset

if limit:
if limit is not None:
data["@filters"]["limit"] = limit

if include and exclude:
Expand Down