From d527bffd72df117c6197b82e4295f9451a0bc7b3 Mon Sep 17 00:00:00 2001 From: Bearchitek Date: Mon, 14 Jul 2025 13:23:56 +0200 Subject: [PATCH 1/3] fix signature delta --- infrahub_sdk/client.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/infrahub_sdk/client.py b/infrahub_sdk/client.py index dc1f539f..a72fe16e 100644 --- a/infrahub_sdk/client.py +++ b/infrahub_sdk/client.py @@ -830,7 +830,7 @@ async def process_batch() -> tuple[list[InfrahubNode], list[InfrahubNode]]: for page_number in range(1, total_pages + 1): page_offset = (page_number - 1) * pagination_size - batch_process.add(task=process_page, node=node, page_offset=page_offset, page_number=page_number) + batch_process.add(task=process_page, page_offset=page_offset, page_number=page_number) async for _, response in batch_process.execute(): nodes.extend(response[1]["nodes"]) @@ -1946,9 +1946,10 @@ def filters( """ branch = branch or self.default_branch schema = self.schema.get(kind=kind, branch=branch) - node = InfrahubNodeSync(client=self, schema=schema, branch=branch) if at: at = Timestamp(at) + + node = InfrahubNodeSync(client=self, schema=schema, branch=branch) filters = kwargs pagination_size = self.pagination_size @@ -1995,7 +1996,7 @@ def process_batch() -> tuple[list[InfrahubNodeSync], list[InfrahubNodeSync]]: for page_number in range(1, total_pages + 1): page_offset = (page_number - 1) * pagination_size - batch_process.add(task=process_page, node=node, page_offset=page_offset, page_number=page_number) + batch_process.add(task=process_page, page_offset=page_offset, page_number=page_number) for _, response in batch_process.execute(): nodes.extend(response[1]["nodes"]) @@ -2012,7 +2013,7 @@ def process_non_batch() -> tuple[list[InfrahubNodeSync], list[InfrahubNodeSync]] while has_remaining_items: page_offset = (page_number - 1) * pagination_size - response, process_result = process_page(page_offset, page_number) + response, process_result = process_page(page_offset=page_offset, page_number=page_number) nodes.extend(process_result["nodes"]) related_nodes.extend(process_result["related_nodes"]) From cdd64c37ed1c83164948b0cbca77428a2c18bfb5 Mon Sep 17 00:00:00 2001 From: Bearchitek Date: Mon, 14 Jul 2025 13:24:21 +0200 Subject: [PATCH 2/3] do not ommit offset when offset=0 --- infrahub_sdk/node/node.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/infrahub_sdk/node/node.py b/infrahub_sdk/node/node.py index bad53aca..f69c6231 100644 --- a/infrahub_sdk/node/node.py +++ b/infrahub_sdk/node/node.py @@ -402,10 +402,10 @@ def generate_query_data_init( if order: data["@filters"]["order"] = order - if offset: + if offset is not None: data["@filters"]["offset"] = offset - if limit: + if limit is not None: data["@filters"]["limit"] = limit if include and exclude: From 0dd2541d8d0a2cf5b8408b02498b8548f7e0b00d Mon Sep 17 00:00:00 2001 From: Bearchitek Date: Mon, 14 Jul 2025 16:26:03 +0200 Subject: [PATCH 3/3] fragment and rollback node change --- changelog/fixed.md | 1 + infrahub_sdk/client.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog/fixed.md diff --git a/changelog/fixed.md b/changelog/fixed.md new file mode 100644 index 00000000..ec53c282 --- /dev/null +++ b/changelog/fixed.md @@ -0,0 +1 @@ +Fixes using of parallel with filters for Infrahub Client Sync diff --git a/infrahub_sdk/client.py b/infrahub_sdk/client.py index a72fe16e..a95ac5a4 100644 --- a/infrahub_sdk/client.py +++ b/infrahub_sdk/client.py @@ -830,7 +830,7 @@ async def process_batch() -> tuple[list[InfrahubNode], list[InfrahubNode]]: for page_number in range(1, total_pages + 1): page_offset = (page_number - 1) * pagination_size - batch_process.add(task=process_page, page_offset=page_offset, page_number=page_number) + batch_process.add(task=process_page, node=node, page_offset=page_offset, page_number=page_number) async for _, response in batch_process.execute(): nodes.extend(response[1]["nodes"]) @@ -1996,7 +1996,7 @@ def process_batch() -> tuple[list[InfrahubNodeSync], list[InfrahubNodeSync]]: for page_number in range(1, total_pages + 1): page_offset = (page_number - 1) * pagination_size - batch_process.add(task=process_page, page_offset=page_offset, page_number=page_number) + batch_process.add(task=process_page, node=node, page_offset=page_offset, page_number=page_number) for _, response in batch_process.execute(): nodes.extend(response[1]["nodes"])