diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b5501ac6..f5fa441a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -54,7 +54,7 @@ jobs: token: ${{ github.token }} filters: .github/file-filters.yml - # ------------------------------------------ All Linter ------------------------------------------ + # ------------------------------------------ All Linter ------------------------------------------ yaml-lint: if: needs.files-changed.outputs.yaml == 'true' @@ -84,7 +84,6 @@ jobs: - name: "Linting: ruff format" run: "ruff format --check --diff ." - markdown-lint: if: | needs.files-changed.outputs.documentation == 'true' || @@ -119,7 +118,6 @@ jobs: env: SHELLCHECK_OPTS: --exclude=SC2086 --exclude=SC2046 --exclude=SC2004 --exclude=SC2129 - documentation: defaults: run: @@ -141,7 +139,7 @@ jobs: uses: actions/setup-node@v4 with: node-version: 20 - cache: 'npm' + cache: "npm" cache-dependency-path: docs/package-lock.json - name: "Install dependencies" run: npm install @@ -180,6 +178,50 @@ jobs: - name: "Validate generated documentation" run: "poetry run invoke docs-validate" + check-api-documentation-obsolescence: + if: | + always() && !cancelled() && + !contains(needs.*.result, 'failure') && + !contains(needs.*.result, 'cancelled') && + (needs.files-changed.outputs.python == 'true') || (needs.files-changed.outputs.documentation_generated == 'true') + needs: ["prepare-environment", "files-changed", "yaml-lint", "python-lint"] + runs-on: "ubuntu-22.04" + env: + DOCS_COMMAND: "poetry run invoke generate-sdk-api-docs" + SDK_API_DOCS_DIR: "docs/docs/python-sdk/sdk_ref" + timeout-minutes: 5 + steps: + - name: "Check out repository code" + uses: "actions/checkout@v4" + with: + submodules: true + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: "Setup Python environment" + run: | + pipx install poetry==${{ needs.prepare-environment.outputs.POETRY_VERSION }} + poetry config virtualenvs.create true --local + poetry env use 3.12 + - name: "Install dependencies" + run: "poetry install --no-interaction --no-ansi --extras ctl" + - name: "Setup environment" + run: "pip install invoke toml" + - name: Install Node.js + uses: actions/setup-node@v4 + with: + node-version: 20 + cache: "npm" + cache-dependency-path: "**/package-lock.json" + - name: Install markdown linter + run: npm install -g markdownlint-cli2 + - name: "Generate SDK API documentation" + run: ${{ env.DOCS_COMMAND }} + - name: "Check if SDK API documentation needs to be refreshed" + run: | + git diff --quiet ${SDK_API_DOCS_DIR} + validate-documentation-style: if: | always() && !cancelled() && @@ -203,7 +245,7 @@ jobs: env: VALE_VERSION: ${{ env.VALE_VERSION }} - name: "Validate documentation style" - run: ./vale $(find ./docs -type f \( -name "*.mdx" -o -name "*.md" \) ) + run: ./vale $(find ./docs -type d -name sdk_ref -prune -false -o -type f \( -name "*.mdx" -o -name "*.md" \) ) unit-tests: env: diff --git a/changelog/201.added.md b/changelog/201.added.md new file mode 100644 index 00000000..b64cb2fa --- /dev/null +++ b/changelog/201.added.md @@ -0,0 +1 @@ +Add support for automatic Python SDK API from docstrings in the code. \ No newline at end of file diff --git a/docs/docs/python-sdk/sdk_ref/infrahub_sdk/client.mdx b/docs/docs/python-sdk/sdk_ref/infrahub_sdk/client.mdx new file mode 100644 index 00000000..94f25565 --- /dev/null +++ b/docs/docs/python-sdk/sdk_ref/infrahub_sdk/client.mdx @@ -0,0 +1,819 @@ +--- +title: client +sidebarTitle: client +--- + +# `infrahub_sdk.client` + +## Functions + +### `handle_relogin` + +```python +handle_relogin(func: Callable[..., Coroutine[Any, Any, httpx.Response]]) +``` + +### `handle_relogin_sync` + +```python +handle_relogin_sync(func: Callable[..., httpx.Response]) +``` + +## Classes + +### `ProcessRelationsNode` + +### `ProcessRelationsNodeSync` + +### `BaseClient` + +Base class for InfrahubClient and InfrahubClientSync + +**Methods:** + +#### `request_context` + +```python +request_context(self) -> RequestContext | None +``` + +#### `request_context` + +```python +request_context(self, request_context: RequestContext) -> None +``` + +#### `start_tracking` + +```python +start_tracking(self, identifier: str | None = None, params: dict[str, Any] | None = None, delete_unused_nodes: bool = False, group_type: str | None = None, group_params: dict[str, Any] | None = None, branch: str | None = None) -> Self +``` + +#### `set_context_properties` + +```python +set_context_properties(self, identifier: str, params: dict[str, str] | None = None, delete_unused_nodes: bool = True, reset: bool = True, group_type: str | None = None, group_params: dict[str, Any] | None = None, branch: str | None = None) -> None +``` + +### `InfrahubClient` + +GraphQL Client to interact with Infrahub. + +**Methods:** + +#### `get_version` + +```python +get_version(self) -> str +``` + +Return the Infrahub version. + +#### `get_user` + +```python +get_user(self) -> dict +``` + +Return user information + +#### `get_user_permissions` + +```python +get_user_permissions(self) -> dict +``` + +Return user permissions + +#### `create` + +```python +create(self, kind: str, data: dict | None = ..., branch: str | None = ..., **kwargs: Any) -> InfrahubNode +``` + +#### `create` + +```python +create(self, kind: type[SchemaType], data: dict | None = ..., branch: str | None = ..., **kwargs: Any) -> SchemaType +``` + +#### `create` + +```python +create(self, kind: str | type[SchemaType], data: dict | None = None, branch: str | None = None, timeout: int | None = None, **kwargs: Any) -> InfrahubNode | SchemaType +``` + +#### `delete` + +```python +delete(self, kind: str | type[SchemaType], id: str, branch: str | None = None) -> None +``` + +#### `get` + +```python +get(self, kind: type[SchemaType], raise_when_missing: Literal[False], at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., id: str | None = ..., hfid: list[str] | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., populate_store: bool = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., **kwargs: Any) -> SchemaType | None +``` + +#### `get` + +```python +get(self, kind: type[SchemaType], raise_when_missing: Literal[True], at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., id: str | None = ..., hfid: list[str] | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., populate_store: bool = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., **kwargs: Any) -> SchemaType +``` + +#### `get` + +```python +get(self, kind: type[SchemaType], raise_when_missing: bool = ..., at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., id: str | None = ..., hfid: list[str] | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., populate_store: bool = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., **kwargs: Any) -> SchemaType +``` + +#### `get` + +```python +get(self, kind: str, raise_when_missing: Literal[False], at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., id: str | None = ..., hfid: list[str] | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., populate_store: bool = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., **kwargs: Any) -> InfrahubNode | None +``` + +#### `get` + +```python +get(self, kind: str, raise_when_missing: Literal[True], at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., id: str | None = ..., hfid: list[str] | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., populate_store: bool = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., **kwargs: Any) -> InfrahubNode +``` + +#### `get` + +```python +get(self, kind: str, raise_when_missing: bool = ..., at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., id: str | None = ..., hfid: list[str] | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., populate_store: bool = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., **kwargs: Any) -> InfrahubNode +``` + +#### `get` + +```python +get(self, kind: str | type[SchemaType], raise_when_missing: bool = True, at: Timestamp | None = None, branch: str | None = None, timeout: int | None = None, id: str | None = None, hfid: list[str] | None = None, include: list[str] | None = None, exclude: list[str] | None = None, populate_store: bool = True, fragment: bool = False, prefetch_relationships: bool = False, property: bool = False, **kwargs: Any) -> InfrahubNode | SchemaType | None +``` + +#### `count` + +```python +count(self, kind: str | type[SchemaType], at: Timestamp | None = None, branch: str | None = None, timeout: int | None = None, partial_match: bool = False, **kwargs: Any) -> int +``` + +Return the number of nodes of a given kind. + +#### `all` + +```python +all(self, kind: type[SchemaType], at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., populate_store: bool = ..., offset: int | None = ..., limit: int | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., parallel: bool = ..., order: Order | None = ...) -> list[SchemaType] +``` + +#### `all` + +```python +all(self, kind: str, at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., populate_store: bool = ..., offset: int | None = ..., limit: int | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., parallel: bool = ..., order: Order | None = ...) -> list[InfrahubNode] +``` + +#### `all` + +```python +all(self, kind: str | type[SchemaType], at: Timestamp | None = None, branch: str | None = None, timeout: int | None = None, populate_store: bool = True, offset: int | None = None, limit: int | None = None, include: list[str] | None = None, exclude: list[str] | None = None, fragment: bool = False, prefetch_relationships: bool = False, property: bool = False, parallel: bool = False, order: Order | None = None) -> list[InfrahubNode] | list[SchemaType] +``` + +Retrieve all nodes of a given kind + +**Args:** + +- `kind`: kind of the nodes to query +- `at`: Time of the query. Defaults to Now. +- `branch`: Name of the branch to query from. Defaults to default_branch. +- `populate_store`: Flag to indicate whether to populate the store with the retrieved nodes. +- `timeout`: Overrides default timeout used when querying the GraphQL API. Specified in seconds. +- `offset`: The offset for pagination. +- `limit`: The limit for pagination. +- `include`: List of attributes or relationships to include in the query. +- `exclude`: List of attributes or relationships to exclude from the query. +- `fragment`: Flag to use GraphQL fragments for generic schemas. +- `prefetch_relationships`: Flag to indicate whether to prefetch related node data. +- `parallel`: Whether to use parallel processing for the query. +- `order`: Ordering related options. Setting `disable=True` enhances performances. + +**Returns:** + +- list\[InfrahubNode]: List of Nodes + +#### `filters` + +```python +filters(self, kind: type[SchemaType], at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., populate_store: bool = ..., offset: int | None = ..., limit: int | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., fragment: bool = ..., prefetch_relationships: bool = ..., partial_match: bool = ..., property: bool = ..., parallel: bool = ..., order: Order | None = ..., **kwargs: Any) -> list[SchemaType] +``` + +#### `filters` + +```python +filters(self, kind: str, at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., populate_store: bool = ..., offset: int | None = ..., limit: int | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., fragment: bool = ..., prefetch_relationships: bool = ..., partial_match: bool = ..., property: bool = ..., parallel: bool = ..., order: Order | None = ..., **kwargs: Any) -> list[InfrahubNode] +``` + +#### `filters` + +```python +filters(self, kind: str | type[SchemaType], at: Timestamp | None = None, branch: str | None = None, timeout: int | None = None, populate_store: bool = True, offset: int | None = None, limit: int | None = None, include: list[str] | None = None, exclude: list[str] | None = None, fragment: bool = False, prefetch_relationships: bool = False, partial_match: bool = False, property: bool = False, parallel: bool = False, order: Order | None = None, **kwargs: Any) -> list[InfrahubNode] | list[SchemaType] +``` + +Retrieve nodes of a given kind based on provided filters. + +**Args:** + +- `kind`: kind of the nodes to query +- `at`: Time of the query. Defaults to Now. +- `branch`: Name of the branch to query from. Defaults to default_branch. +- `timeout`: Overrides default timeout used when querying the GraphQL API. Specified in seconds. +- `populate_store`: Flag to indicate whether to populate the store with the retrieved nodes. +- `offset`: The offset for pagination. +- `limit`: The limit for pagination. +- `include`: List of attributes or relationships to include in the query. +- `exclude`: List of attributes or relationships to exclude from the query. +- `fragment`: Flag to use GraphQL fragments for generic schemas. +- `prefetch_relationships`: Flag to indicate whether to prefetch related node data. +- `partial_match`: Allow partial match of filter criteria for the query. +- `parallel`: Whether to use parallel processing for the query. +- `order`: Ordering related options. Setting `disable=True` enhances performances. +- `**kwargs`: Additional filter criteria for the query. + +**Returns:** + +- list\[InfrahubNodeSync]: List of Nodes that match the given filters. + +#### `clone` + +```python +clone(self, branch: str | None = None) -> InfrahubClient +``` + +Return a cloned version of the client using the same configuration + +#### `execute_graphql` + +```python +execute_graphql(self, query: str, variables: dict | None = None, branch_name: str | None = None, at: str | Timestamp | None = None, timeout: int | None = None, raise_for_error: bool = True, tracker: str | None = None) -> dict +``` + +Execute a GraphQL query (or mutation). +If retry_on_failure is True, the query will retry until the server becomes reacheable. + +**Args:** + +- `query`: GraphQL Query to execute, can be a query or a mutation +- `variables`: Variables to pass along with the GraphQL query. Defaults to None. +- `branch_name`: Name of the branch on which the query will be executed. Defaults to None. +- `at`: Time when the query should be executed. Defaults to None. +- `timeout`: Timeout in second for the query. Defaults to None. +- `raise_for_error`: Flag to indicate that we need to raise an exception if the response has some errors. Defaults to True. + +Raises: + GraphQLError: _description_ + +**Returns:** + +- _description_ + +#### `refresh_login` + +```python +refresh_login(self) -> None +``` + +#### `login` + +```python +login(self, refresh: bool = False) -> None +``` + +#### `query_gql_query` + +```python +query_gql_query(self, name: str, variables: dict | None = None, update_group: bool = False, subscribers: list[str] | None = None, params: dict | None = None, branch_name: str | None = None, at: str | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool = True) -> dict +``` + +#### `get_diff_summary` + +```python +get_diff_summary(self, branch: str, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool = True) -> list[NodeDiff] +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> SchemaType +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> SchemaType | None +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool = ...) -> SchemaType +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> CoreNode +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> CoreNode | None +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool = ...) -> CoreNode | None +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNode, kind: type[SchemaType] | None = None, identifier: str | None = None, prefix_length: int | None = None, address_type: str | None = None, data: dict[str, Any] | None = None, branch: str | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool = True) -> CoreNode | SchemaType | None +``` + +Allocate a new IP address by using the provided resource pool. + +**Args:** + +- `resource_pool`: Node corresponding to the pool to allocate resources from. +- `identifier`: Value to perform idempotent allocation, the same resource will be returned for a given identifier. +- `prefix_length`: Length of the prefix to set on the address to allocate. +- `address_type`: Kind of the address to allocate. +- `data`: A key/value map to use to set attributes values on the allocated address. +- `branch`: Name of the branch to allocate from. Defaults to default_branch. +- `timeout`: Flag to indicate whether to populate the store with the retrieved nodes. +- `tracker`: The offset for pagination. +- `raise_for_error`: The limit for pagination. + +Returns: + InfrahubNode: Node corresponding to the allocated resource. + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> SchemaType +``` + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> SchemaType | None +``` + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: type[SchemaType], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool = ...) -> SchemaType +``` + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> CoreNode +``` + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> CoreNode | None +``` + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool = ...) -> CoreNode | None +``` + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNode, kind: type[SchemaType] | None = None, identifier: str | None = None, prefix_length: int | None = None, member_type: str | None = None, prefix_type: str | None = None, data: dict[str, Any] | None = None, branch: str | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool = True) -> CoreNode | SchemaType | None +``` + +Allocate a new IP prefix by using the provided resource pool. + +**Args:** + +- `resource_pool`: Node corresponding to the pool to allocate resources from. +- `identifier`: Value to perform idempotent allocation, the same resource will be returned for a given identifier. +- `prefix_length`: Length of the prefix to allocate. +- `member_type`: Member type of the prefix to allocate. +- `prefix_type`: Kind of the prefix to allocate. +- `data`: A key/value map to use to set attributes values on the allocated prefix. +- `branch`: Name of the branch to allocate from. Defaults to default_branch. +- `timeout`: Flag to indicate whether to populate the store with the retrieved nodes. +- `tracker`: The offset for pagination. +- `raise_for_error`: The limit for pagination. + +Returns: + InfrahubNode: Node corresponding to the allocated resource. + +#### `create_batch` + +```python +create_batch(self, return_exceptions: bool = False) -> InfrahubBatch +``` + +#### `get_list_repositories` + +```python +get_list_repositories(self, branches: dict[str, BranchData] | None = None, kind: str = 'CoreGenericRepository') -> dict[str, RepositoryData] +``` + +#### `repository_update_commit` + +```python +repository_update_commit(self, branch_name: str, repository_id: str, commit: str, is_read_only: bool = False) -> bool +``` + +### `InfrahubClientSync` + +**Methods:** + +#### `get_version` + +```python +get_version(self) -> str +``` + +Return the Infrahub version. + +#### `get_user` + +```python +get_user(self) -> dict +``` + +Return user information + +#### `get_user_permissions` + +```python +get_user_permissions(self) -> dict +``` + +Return user permissions + +#### `create` + +```python +create(self, kind: str, data: dict | None = ..., branch: str | None = ..., **kwargs: Any) -> InfrahubNodeSync +``` + +#### `create` + +```python +create(self, kind: type[SchemaTypeSync], data: dict | None = ..., branch: str | None = ..., **kwargs: Any) -> SchemaTypeSync +``` + +#### `create` + +```python +create(self, kind: str | type[SchemaTypeSync], data: dict | None = None, branch: str | None = None, timeout: int | None = None, **kwargs: Any) -> InfrahubNodeSync | SchemaTypeSync +``` + +#### `delete` + +```python +delete(self, kind: str | type[SchemaTypeSync], id: str, branch: str | None = None) -> None +``` + +#### `clone` + +```python +clone(self, branch: str | None = None) -> InfrahubClientSync +``` + +Return a cloned version of the client using the same configuration + +#### `execute_graphql` + +```python +execute_graphql(self, query: str, variables: dict | None = None, branch_name: str | None = None, at: str | Timestamp | None = None, timeout: int | None = None, raise_for_error: bool = True, tracker: str | None = None) -> dict +``` + +Execute a GraphQL query (or mutation). +If retry_on_failure is True, the query will retry until the server becomes reacheable. + +**Args:** + +- `query`: GraphQL Query to execute, can be a query or a mutation +- `variables`: Variables to pass along with the GraphQL query. Defaults to None. +- `branch_name`: Name of the branch on which the query will be executed. Defaults to None. +- `at`: Time when the query should be executed. Defaults to None. +- `timeout`: Timeout in second for the query. Defaults to None. +- `raise_for_error`: Flag to indicate that we need to raise an exception if the response has some errors. Defaults to True. + +Raises: + GraphQLError: When an error occurs during the execution of the GraphQL query or mutation. + +**Returns:** + +- The result of the GraphQL query or mutation. + +#### `count` + +```python +count(self, kind: str | type[SchemaType], at: Timestamp | None = None, branch: str | None = None, timeout: int | None = None, partial_match: bool = False, **kwargs: Any) -> int +``` + +Return the number of nodes of a given kind. + +#### `all` + +```python +all(self, kind: type[SchemaTypeSync], at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., populate_store: bool = ..., offset: int | None = ..., limit: int | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., parallel: bool = ..., order: Order | None = ...) -> list[SchemaTypeSync] +``` + +#### `all` + +```python +all(self, kind: str, at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., populate_store: bool = ..., offset: int | None = ..., limit: int | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., parallel: bool = ..., order: Order | None = ...) -> list[InfrahubNodeSync] +``` + +#### `all` + +```python +all(self, kind: str | type[SchemaTypeSync], at: Timestamp | None = None, branch: str | None = None, timeout: int | None = None, populate_store: bool = True, offset: int | None = None, limit: int | None = None, include: list[str] | None = None, exclude: list[str] | None = None, fragment: bool = False, prefetch_relationships: bool = False, property: bool = False, parallel: bool = False, order: Order | None = None) -> list[InfrahubNodeSync] | list[SchemaTypeSync] +``` + +Retrieve all nodes of a given kind + +**Args:** + +- `kind`: kind of the nodes to query +- `at`: Time of the query. Defaults to Now. +- `branch`: Name of the branch to query from. Defaults to default_branch. +- `timeout`: Overrides default timeout used when querying the GraphQL API. Specified in seconds. +- `populate_store`: Flag to indicate whether to populate the store with the retrieved nodes. +- `offset`: The offset for pagination. +- `limit`: The limit for pagination. +- `include`: List of attributes or relationships to include in the query. +- `exclude`: List of attributes or relationships to exclude from the query. +- `fragment`: Flag to use GraphQL fragments for generic schemas. +- `prefetch_relationships`: Flag to indicate whether to prefetch related node data. +- `parallel`: Whether to use parallel processing for the query. +- `order`: Ordering related options. Setting `disable=True` enhances performances. + +**Returns:** + +- list\[InfrahubNodeSync]: List of Nodes + +#### `filters` + +```python +filters(self, kind: type[SchemaTypeSync], at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., populate_store: bool = ..., offset: int | None = ..., limit: int | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., fragment: bool = ..., prefetch_relationships: bool = ..., partial_match: bool = ..., property: bool = ..., parallel: bool = ..., order: Order | None = ..., **kwargs: Any) -> list[SchemaTypeSync] +``` + +#### `filters` + +```python +filters(self, kind: str, at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., populate_store: bool = ..., offset: int | None = ..., limit: int | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., fragment: bool = ..., prefetch_relationships: bool = ..., partial_match: bool = ..., property: bool = ..., parallel: bool = ..., order: Order | None = ..., **kwargs: Any) -> list[InfrahubNodeSync] +``` + +#### `filters` + +```python +filters(self, kind: str | type[SchemaTypeSync], at: Timestamp | None = None, branch: str | None = None, timeout: int | None = None, populate_store: bool = True, offset: int | None = None, limit: int | None = None, include: list[str] | None = None, exclude: list[str] | None = None, fragment: bool = False, prefetch_relationships: bool = False, partial_match: bool = False, property: bool = False, parallel: bool = False, order: Order | None = None, **kwargs: Any) -> list[InfrahubNodeSync] | list[SchemaTypeSync] +``` + +Retrieve nodes of a given kind based on provided filters. + +**Args:** + +- `kind`: kind of the nodes to query +- `at`: Time of the query. Defaults to Now. +- `branch`: Name of the branch to query from. Defaults to default_branch. +- `timeout`: Overrides default timeout used when querying the GraphQL API. Specified in seconds. +- `populate_store`: Flag to indicate whether to populate the store with the retrieved nodes. +- `offset`: The offset for pagination. +- `limit`: The limit for pagination. +- `include`: List of attributes or relationships to include in the query. +- `exclude`: List of attributes or relationships to exclude from the query. +- `fragment`: Flag to use GraphQL fragments for generic schemas. +- `prefetch_relationships`: Flag to indicate whether to prefetch related node data. +- `partial_match`: Allow partial match of filter criteria for the query. +- `parallel`: Whether to use parallel processing for the query. +- `order`: Ordering related options. Setting `disable=True` enhances performances. +- `**kwargs`: Additional filter criteria for the query. + +**Returns:** + +- list\[InfrahubNodeSync]: List of Nodes that match the given filters. + +#### `get` + +```python +get(self, kind: type[SchemaTypeSync], raise_when_missing: Literal[False], at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., id: str | None = ..., hfid: list[str] | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., populate_store: bool = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., **kwargs: Any) -> SchemaTypeSync | None +``` + +#### `get` + +```python +get(self, kind: type[SchemaTypeSync], raise_when_missing: Literal[True], at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., id: str | None = ..., hfid: list[str] | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., populate_store: bool = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., **kwargs: Any) -> SchemaTypeSync +``` + +#### `get` + +```python +get(self, kind: type[SchemaTypeSync], raise_when_missing: bool = ..., at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., id: str | None = ..., hfid: list[str] | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., populate_store: bool = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., **kwargs: Any) -> SchemaTypeSync +``` + +#### `get` + +```python +get(self, kind: str, raise_when_missing: Literal[False], at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., id: str | None = ..., hfid: list[str] | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., populate_store: bool = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., **kwargs: Any) -> InfrahubNodeSync | None +``` + +#### `get` + +```python +get(self, kind: str, raise_when_missing: Literal[True], at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., id: str | None = ..., hfid: list[str] | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., populate_store: bool = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., **kwargs: Any) -> InfrahubNodeSync +``` + +#### `get` + +```python +get(self, kind: str, raise_when_missing: bool = ..., at: Timestamp | None = ..., branch: str | None = ..., timeout: int | None = ..., id: str | None = ..., hfid: list[str] | None = ..., include: list[str] | None = ..., exclude: list[str] | None = ..., populate_store: bool = ..., fragment: bool = ..., prefetch_relationships: bool = ..., property: bool = ..., **kwargs: Any) -> InfrahubNodeSync +``` + +#### `get` + +```python +get(self, kind: str | type[SchemaTypeSync], raise_when_missing: bool = True, at: Timestamp | None = None, branch: str | None = None, timeout: int | None = None, id: str | None = None, hfid: list[str] | None = None, include: list[str] | None = None, exclude: list[str] | None = None, populate_store: bool = True, fragment: bool = False, prefetch_relationships: bool = False, property: bool = False, **kwargs: Any) -> InfrahubNodeSync | SchemaTypeSync | None +``` + +#### `create_batch` + +```python +create_batch(self, return_exceptions: bool = False) -> InfrahubBatchSync +``` + +Create a batch to execute multiple queries concurrently. + +Executing the batch will be performed using a thread pool, meaning it cannot guarantee the execution order. It is not recommended to use such +batch to manipulate objects that depend on each others. + +#### `get_list_repositories` + +```python +get_list_repositories(self, branches: dict[str, BranchData] | None = None, kind: str = 'CoreGenericRepository') -> dict[str, RepositoryData] +``` + +#### `query_gql_query` + +```python +query_gql_query(self, name: str, variables: dict | None = None, update_group: bool = False, subscribers: list[str] | None = None, params: dict | None = None, branch_name: str | None = None, at: str | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool = True) -> dict +``` + +#### `get_diff_summary` + +```python +get_diff_summary(self, branch: str, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool = True) -> list[NodeDiff] +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> SchemaTypeSync +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> SchemaTypeSync | None +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool = ...) -> SchemaTypeSync +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> CoreNodeSync +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> CoreNodeSync | None +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., address_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool = ...) -> CoreNodeSync | None +``` + +#### `allocate_next_ip_address` + +```python +allocate_next_ip_address(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync] | None = None, identifier: str | None = None, prefix_length: int | None = None, address_type: str | None = None, data: dict[str, Any] | None = None, branch: str | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool = True) -> CoreNodeSync | SchemaTypeSync | None +``` + +Allocate a new IP address by using the provided resource pool. + +**Args:** + +- `resource_pool`: Node corresponding to the pool to allocate resources from. +- `identifier`: Value to perform idempotent allocation, the same resource will be returned for a given identifier. +- `prefix_length`: Length of the prefix to set on the address to allocate. +- `address_type`: Kind of the address to allocate. +- `data`: A key/value map to use to set attributes values on the allocated address. +- `branch`: Name of the branch to allocate from. Defaults to default_branch. +- `timeout`: Flag to indicate whether to populate the store with the retrieved nodes. +- `tracker`: The offset for pagination. +- `raise_for_error`: The limit for pagination. + +Returns: + InfrahubNodeSync: Node corresponding to the allocated resource. + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> SchemaTypeSync +``` + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> SchemaTypeSync | None +``` + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync], identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool = ...) -> SchemaTypeSync +``` + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[True] = True) -> CoreNodeSync +``` + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: Literal[False] = False) -> CoreNodeSync | None +``` + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: None = ..., identifier: str | None = ..., prefix_length: int | None = ..., member_type: str | None = ..., prefix_type: str | None = ..., data: dict[str, Any] | None = ..., branch: str | None = ..., timeout: int | None = ..., tracker: str | None = ..., raise_for_error: bool = ...) -> CoreNodeSync | None +``` + +#### `allocate_next_ip_prefix` + +```python +allocate_next_ip_prefix(self, resource_pool: CoreNodeSync, kind: type[SchemaTypeSync] | None = None, identifier: str | None = None, prefix_length: int | None = None, member_type: str | None = None, prefix_type: str | None = None, data: dict[str, Any] | None = None, branch: str | None = None, timeout: int | None = None, tracker: str | None = None, raise_for_error: bool = True) -> CoreNodeSync | SchemaTypeSync | None +``` + +Allocate a new IP prefix by using the provided resource pool. + +**Args:** + +- `resource_pool`: Node corresponding to the pool to allocate resources from. +- `identifier`: Value to perform idempotent allocation, the same resource will be returned for a given identifier. +- `size`: Length of the prefix to allocate. +- `member_type`: Member type of the prefix to allocate. +- `prefix_type`: Kind of the prefix to allocate. +- `data`: A key/value map to use to set attributes values on the allocated prefix. +- `branch`: Name of the branch to allocate from. Defaults to default_branch. +- `timeout`: Flag to indicate whether to populate the store with the retrieved nodes. +- `tracker`: The offset for pagination. +- `raise_for_error`: The limit for pagination. + +Returns: + InfrahubNodeSync: Node corresponding to the allocated resource. + +#### `repository_update_commit` + +```python +repository_update_commit(self, branch_name: str, repository_id: str, commit: str, is_read_only: bool = False) -> bool +``` + +#### `refresh_login` + +```python +refresh_login(self) -> None +``` + +#### `login` + +```python +login(self, refresh: bool = False) -> None +``` diff --git a/docs/docs/python-sdk/sdk_ref/infrahub_sdk/node/node.mdx b/docs/docs/python-sdk/sdk_ref/infrahub_sdk/node/node.mdx new file mode 100644 index 00000000..3d826871 --- /dev/null +++ b/docs/docs/python-sdk/sdk_ref/infrahub_sdk/node/node.mdx @@ -0,0 +1,376 @@ +--- +title: node +sidebarTitle: node +--- + +# `infrahub_sdk.node.node` + +## Classes + +### `InfrahubNodeBase` + +Base class for InfrahubNode and InfrahubNodeSync + +**Methods:** + +#### `get_branch` + +```python +get_branch(self) -> str +``` + +#### `get_path_value` + +```python +get_path_value(self, path: str) -> Any +``` + +#### `get_human_friendly_id` + +```python +get_human_friendly_id(self) -> list[str] | None +``` + +#### `get_human_friendly_id_as_string` + +```python +get_human_friendly_id_as_string(self, include_kind: bool = False) -> str | None +``` + +#### `hfid` + +```python +hfid(self) -> list[str] | None +``` + +#### `hfid_str` + +```python +hfid_str(self) -> str | None +``` + +#### `get_kind` + +```python +get_kind(self) -> str +``` + +#### `get_all_kinds` + +```python +get_all_kinds(self) -> list[str] +``` + +#### `is_ip_prefix` + +```python +is_ip_prefix(self) -> bool +``` + +#### `is_ip_address` + +```python +is_ip_address(self) -> bool +``` + +#### `is_resource_pool` + +```python +is_resource_pool(self) -> bool +``` + +#### `get_raw_graphql_data` + +```python +get_raw_graphql_data(self) -> dict | None +``` + +#### `generate_query_data_init` + +```python +generate_query_data_init(self, filters: dict[str, Any] | None = None, offset: int | None = None, limit: int | None = None, include: list[str] | None = None, exclude: list[str] | None = None, partial_match: bool = False, order: Order | None = None) -> dict[str, Any | dict] +``` + +### `InfrahubNode` + +Represents a Infrahub node in an asynchronous context. + +**Methods:** + +#### `from_graphql` + +```python +from_graphql(cls, client: InfrahubClient, branch: str, data: dict, schema: MainSchemaTypesAPI | None = None, timeout: int | None = None) -> Self +``` + +#### `generate` + +```python +generate(self, nodes: list[str] | None = None) -> None +``` + +#### `artifact_generate` + +```python +artifact_generate(self, name: str) -> None +``` + +#### `artifact_fetch` + +```python +artifact_fetch(self, name: str) -> str | dict[str, Any] +``` + +#### `delete` + +```python +delete(self, timeout: int | None = None, request_context: RequestContext | None = None) -> None +``` + +#### `save` + +```python +save(self, allow_upsert: bool = False, update_group_context: bool | None = None, timeout: int | None = None, request_context: RequestContext | None = None) -> None +``` + +#### `generate_query_data` + +```python +generate_query_data(self, filters: dict[str, Any] | None = None, offset: int | None = None, limit: int | None = None, include: list[str] | None = None, exclude: list[str] | None = None, fragment: bool = False, prefetch_relationships: bool = False, partial_match: bool = False, property: bool = False, order: Order | None = None) -> dict[str, Any | dict] +``` + +#### `generate_query_data_node` + +```python +generate_query_data_node(self, include: list[str] | None = None, exclude: list[str] | None = None, inherited: bool = True, insert_alias: bool = False, prefetch_relationships: bool = False, property: bool = False) -> dict[str, Any | dict] +``` + +Generate the node part of a GraphQL Query with attributes and nodes. + +**Args:** + +- `include`: List of attributes or relationships to include. Defaults to None. +- `exclude`: List of attributes or relationships to exclude. Defaults to None. +- `inherited`: Indicated of the attributes and the relationships inherited from generics should be included as well. + Defaults to True. +- `insert_alias`: If True, inserts aliases in the query for each attribute or relationship. +- `prefetch_relationships`: If True, pre-fetches relationship data as part of the query. + +**Returns:** + +- dict\[str, Union\[Any, Dict]]: GraphQL query in dictionary format + +#### `add_relationships` + +```python +add_relationships(self, relation_to_update: str, related_nodes: list[str]) -> None +``` + +#### `remove_relationships` + +```python +remove_relationships(self, relation_to_update: str, related_nodes: list[str]) -> None +``` + +#### `create` + +```python +create(self, allow_upsert: bool = False, timeout: int | None = None, request_context: RequestContext | None = None) -> None +``` + +#### `update` + +```python +update(self, do_full_update: bool = False, timeout: int | None = None, request_context: RequestContext | None = None) -> None +``` + +#### `get_pool_allocated_resources` + +```python +get_pool_allocated_resources(self, resource: InfrahubNode) -> list[InfrahubNode] +``` + +Fetch all nodes that were allocated for the pool and a given resource. + +**Args:** + +- `resource`: The resource from which the nodes were allocated. + +**Returns:** + +- list\[InfrahubNode]: The allocated nodes. + +#### `get_pool_resources_utilization` + +```python +get_pool_resources_utilization(self) -> list[dict[str, Any]] +``` + +Fetch the utilization of each resource for the pool. + +**Returns:** + +- list\[dict\[str, Any]]: A list containing the allocation numbers for each resource of the pool. + +#### `get_flat_value` + +```python +get_flat_value(self, key: str, separator: str = '__') -> Any +``` + +Query recursively a value defined in a flat notation (string), on a hierarchy of objects + +**Examples:** + +name__value +module.object.value + +#### `extract` + +```python +extract(self, params: dict[str, str]) -> dict[str, Any] +``` + +Extract some datapoints defined in a flat notation. + +### `InfrahubNodeSync` + +Represents a Infrahub node in a synchronous context. + +**Methods:** + +#### `from_graphql` + +```python +from_graphql(cls, client: InfrahubClientSync, branch: str, data: dict, schema: MainSchemaTypesAPI | None = None, timeout: int | None = None) -> Self +``` + +#### `generate` + +```python +generate(self, nodes: list[str] | None = None) -> None +``` + +#### `artifact_generate` + +```python +artifact_generate(self, name: str) -> None +``` + +#### `artifact_fetch` + +```python +artifact_fetch(self, name: str) -> str | dict[str, Any] +``` + +#### `delete` + +```python +delete(self, timeout: int | None = None, request_context: RequestContext | None = None) -> None +``` + +#### `save` + +```python +save(self, allow_upsert: bool = False, update_group_context: bool | None = None, timeout: int | None = None, request_context: RequestContext | None = None) -> None +``` + +#### `generate_query_data` + +```python +generate_query_data(self, filters: dict[str, Any] | None = None, offset: int | None = None, limit: int | None = None, include: list[str] | None = None, exclude: list[str] | None = None, fragment: bool = False, prefetch_relationships: bool = False, partial_match: bool = False, property: bool = False, order: Order | None = None) -> dict[str, Any | dict] +``` + +#### `generate_query_data_node` + +```python +generate_query_data_node(self, include: list[str] | None = None, exclude: list[str] | None = None, inherited: bool = True, insert_alias: bool = False, prefetch_relationships: bool = False, property: bool = False) -> dict[str, Any | dict] +``` + +Generate the node part of a GraphQL Query with attributes and nodes. + +**Args:** + +- `include`: List of attributes or relationships to include. Defaults to None. +- `exclude`: List of attributes or relationships to exclude. Defaults to None. +- `inherited`: Indicated of the attributes and the relationships inherited from generics should be included as well. + Defaults to True. +- `insert_alias`: If True, inserts aliases in the query for each attribute or relationship. +- `prefetch_relationships`: If True, pre-fetches relationship data as part of the query. + +**Returns:** + +- dict\[str, Union\[Any, Dict]]: GraphQL query in dictionary format + +#### `add_relationships` + +```python +add_relationships(self, relation_to_update: str, related_nodes: list[str]) -> None +``` + +#### `remove_relationships` + +```python +remove_relationships(self, relation_to_update: str, related_nodes: list[str]) -> None +``` + +#### `create` + +```python +create(self, allow_upsert: bool = False, timeout: int | None = None, request_context: RequestContext | None = None) -> None +``` + +#### `update` + +```python +update(self, do_full_update: bool = False, timeout: int | None = None, request_context: RequestContext | None = None) -> None +``` + +#### `get_pool_allocated_resources` + +```python +get_pool_allocated_resources(self, resource: InfrahubNodeSync) -> list[InfrahubNodeSync] +``` + +Fetch all nodes that were allocated for the pool and a given resource. + +**Args:** + +- `resource`: The resource from which the nodes were allocated. + +**Returns:** + +- list\[InfrahubNodeSync]: The allocated nodes. + +#### `get_pool_resources_utilization` + +```python +get_pool_resources_utilization(self) -> list[dict[str, Any]] +``` + +Fetch the utilization of each resource for the pool. + +**Returns:** + +- list\[dict\[str, Any]]: A list containing the allocation numbers for each resource of the pool. + +#### `get_flat_value` + +```python +get_flat_value(self, key: str, separator: str = '__') -> Any +``` + +Query recursively a value defined in a flat notation (string), on a hierarchy of objects + +**Examples:** + +name__value +module.object.value + +#### `extract` + +```python +extract(self, params: dict[str, str]) -> dict[str, Any] +``` + +Extract some datapoints defined in a flat notation. diff --git a/docs/package-lock.json b/docs/package-lock.json index b12fb6ab..50d090fb 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -10,6 +10,7 @@ "dependencies": { "@docusaurus/core": "^3.7.0", "@docusaurus/preset-classic": "^3.7.0", + "@iconify/react": "^6.0.0", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "prism-react-renderer": "^2.3.0", @@ -4128,6 +4129,27 @@ "@hapi/hoek": "^9.0.0" } }, + "node_modules/@iconify/react": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@iconify/react/-/react-6.0.0.tgz", + "integrity": "sha512-eqNscABVZS8eCpZLU/L5F5UokMS9mnCf56iS1nM9YYHdH8ZxqZL9zyjSwW60IOQFsXZkilbBiv+1paMXBhSQnw==", + "license": "MIT", + "dependencies": { + "@iconify/types": "^2.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/cyberalien" + }, + "peerDependencies": { + "react": ">=16" + } + }, + "node_modules/@iconify/types": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@iconify/types/-/types-2.0.0.tgz", + "integrity": "sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg==", + "license": "MIT" + }, "node_modules/@jest/schemas": { "version": "29.6.3", "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", diff --git a/docs/package.json b/docs/package.json index aabc17c5..f1cffe15 100644 --- a/docs/package.json +++ b/docs/package.json @@ -17,6 +17,7 @@ "dependencies": { "@docusaurus/core": "^3.7.0", "@docusaurus/preset-classic": "^3.7.0", + "@iconify/react": "^6.0.0", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "prism-react-renderer": "^2.3.0", diff --git a/docs/sidebars-python-sdk.ts b/docs/sidebars-python-sdk.ts index e2fc932c..49d7a606 100644 --- a/docs/sidebars-python-sdk.ts +++ b/docs/sidebars-python-sdk.ts @@ -1,4 +1,4 @@ -import type {SidebarsConfig} from '@docusaurus/plugin-content-docs'; +import type { SidebarsConfig } from '@docusaurus/plugin-content-docs'; const sidebars: SidebarsConfig = { pythonSdkSidebar: [ @@ -39,6 +39,16 @@ const sidebars: SidebarsConfig = { type: 'category', label: 'Reference', items: [ + { + type: 'category', + label: 'Python SDK API', + items: [ + { + type: 'autogenerated', + dirName: 'sdk_ref', + }, + ], + }, 'reference/config', 'reference/templating', ], diff --git a/docs/src/theme/MDXComponents.js b/docs/src/theme/MDXComponents.js new file mode 100644 index 00000000..cea81a13 --- /dev/null +++ b/docs/src/theme/MDXComponents.js @@ -0,0 +1,10 @@ +import React from 'react'; +// Import the original mapper +import MDXComponents from '@theme-original/MDXComponents'; +import { Icon } from '@iconify/react'; // Import the entire Iconify library. + +export default { + // Re-use the default mapping + ...MDXComponents, + Icon: Icon, // Make the iconify Icon component available in MDX as . +}; \ No newline at end of file diff --git a/infrahub_sdk/client.py b/infrahub_sdk/client.py index 16c1c73a..9bca0b16 100644 --- a/infrahub_sdk/client.py +++ b/infrahub_sdk/client.py @@ -549,7 +549,7 @@ async def _process_nodes_and_relationships( schema_kind (str): The kind of schema being queried. branch (str): The branch name. prefetch_relationships (bool): Flag to indicate whether to prefetch relationship data. - timeout (int, optional): Overrides default timeout used when querying the graphql API. Specified in seconds. + timeout (int, optional): Overrides default timeout used when querying the GraphQL API. Specified in seconds. Returns: ProcessRelationsNodeSync: A TypedDict containing two lists: @@ -666,7 +666,7 @@ async def all( at (Timestamp, optional): Time of the query. Defaults to Now. branch (str, optional): Name of the branch to query from. Defaults to default_branch. populate_store (bool, optional): Flag to indicate whether to populate the store with the retrieved nodes. - timeout (int, optional): Overrides default timeout used when querying the graphql API. Specified in seconds. + timeout (int, optional): Overrides default timeout used when querying the GraphQL API. Specified in seconds. offset (int, optional): The offset for pagination. limit (int, optional): The limit for pagination. include (list[str], optional): List of attributes or relationships to include in the query. @@ -763,7 +763,7 @@ async def filters( kind (str): kind of the nodes to query at (Timestamp, optional): Time of the query. Defaults to Now. branch (str, optional): Name of the branch to query from. Defaults to default_branch. - timeout (int, optional): Overrides default timeout used when querying the graphql API. Specified in seconds. + timeout (int, optional): Overrides default timeout used when querying the GraphQL API. Specified in seconds. populate_store (bool, optional): Flag to indicate whether to populate the store with the retrieved nodes. offset (int, optional): The offset for pagination. limit (int, optional): The limit for pagination. @@ -1794,7 +1794,7 @@ def all( kind (str): kind of the nodes to query at (Timestamp, optional): Time of the query. Defaults to Now. branch (str, optional): Name of the branch to query from. Defaults to default_branch. - timeout (int, optional): Overrides default timeout used when querying the graphql API. Specified in seconds. + timeout (int, optional): Overrides default timeout used when querying the GraphQL API. Specified in seconds. populate_store (bool, optional): Flag to indicate whether to populate the store with the retrieved nodes. offset (int, optional): The offset for pagination. limit (int, optional): The limit for pagination. @@ -1840,7 +1840,7 @@ def _process_nodes_and_relationships( schema_kind (str): The kind of schema being queried. branch (str): The branch name. prefetch_relationships (bool): Flag to indicate whether to prefetch relationship data. - timeout (int, optional): Overrides default timeout used when querying the graphql API. Specified in seconds. + timeout (int, optional): Overrides default timeout used when querying the GraphQL API. Specified in seconds. Returns: ProcessRelationsNodeSync: A TypedDict containing two lists: @@ -1927,7 +1927,7 @@ def filters( kind (str): kind of the nodes to query at (Timestamp, optional): Time of the query. Defaults to Now. branch (str, optional): Name of the branch to query from. Defaults to default_branch. - timeout (int, optional): Overrides default timeout used when querying the graphql API. Specified in seconds. + timeout (int, optional): Overrides default timeout used when querying the GraphQL API. Specified in seconds. populate_store (bool, optional): Flag to indicate whether to populate the store with the retrieved nodes. offset (int, optional): The offset for pagination. limit (int, optional): The limit for pagination. diff --git a/infrahub_sdk/ctl/utils.py b/infrahub_sdk/ctl/utils.py index 66f86865..027eab4a 100644 --- a/infrahub_sdk/ctl/utils.py +++ b/infrahub_sdk/ctl/utils.py @@ -51,7 +51,7 @@ def init_logging(debug: bool = False) -> None: def handle_exception(exc: Exception, console: Console, exit_code: int) -> NoReturn: - """Handle exeception in a different fashion based on its type.""" + """Handle exception in a different fashion based on its type.""" if isinstance(exc, Exit): raise typer.Exit(code=exc.exit_code) if isinstance(exc, AuthenticationError): diff --git a/infrahub_sdk/jinja2.py b/infrahub_sdk/jinja2.py index 29afbf06..d64d22c1 100644 --- a/infrahub_sdk/jinja2.py +++ b/infrahub_sdk/jinja2.py @@ -7,7 +7,7 @@ def identify_faulty_jinja_code(traceback: Traceback, nbr_context_lines: int = 3) -> list[tuple[Frame, Syntax]]: """This function identifies the faulty Jinja2 code and beautify it to provide meaningful information to the user. - We use the rich's Traceback to parse the complete stack trace and extract Frames for each expection found in the trace. + We use the rich's Traceback to parse the complete stack trace and extract Frames for each exception found in the trace. """ response = [] diff --git a/infrahub_sdk/pytest_plugin/items/base.py b/infrahub_sdk/pytest_plugin/items/base.py index e00db68a..ce899c3f 100644 --- a/infrahub_sdk/pytest_plugin/items/base.py +++ b/infrahub_sdk/pytest_plugin/items/base.py @@ -75,7 +75,7 @@ def reportinfo(self) -> tuple[Path | str, int | None, str]: def repository_base(self) -> str: """Return the path to the root of the repository - This will be an absolute path if --infrahub-config-path is an absolut path as happens when + This will be an absolute path if --infrahub-config-path is an absolute path as happens when tests are started from within Infrahub server. """ config_path: Path = getattr(self.session, _infrahub_config_path_attribute) diff --git a/poetry.lock b/poetry.lock index a1b4c065..bbe5984c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -220,7 +220,7 @@ files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -markers = {main = "(extra == \"ctl\" or extra == \"all\") and platform_system == \"Windows\" or sys_platform == \"win32\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\""} +markers = {main = "(extra == \"ctl\" or extra == \"all\") and platform_system == \"Windows\" or sys_platform == \"win32\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\" or python_version >= \"3.10\""} [[package]] name = "coverage" @@ -537,6 +537,22 @@ files = [ {file = "graphql_core-3.2.4-py3-none-any.whl", hash = "sha256:1604f2042edc5f3114f49cac9d77e25863be51b23a54a61a23245cf32f6476f0"}, ] +[[package]] +name = "griffe" +version = "1.8.0" +description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "griffe-1.8.0-py3-none-any.whl", hash = "sha256:110faa744b2c5c84dd432f4fa9aa3b14805dd9519777dd55e8db214320593b02"}, + {file = "griffe-1.8.0.tar.gz", hash = "sha256:0b4658443858465c13b2de07ff5e15a1032bc889cfafad738a476b8b97bb28d7"}, +] + +[package.dependencies] +colorama = ">=0.4" + [[package]] name = "h11" version = "0.14.0" @@ -921,6 +937,22 @@ files = [ ] markers = {main = "extra == \"ctl\" or extra == \"tests\" or extra == \"all\""} +[[package]] +name = "mdxify" +version = "0.2.23" +description = "Generate MDX API documentation from Python modules" +optional = false +python-versions = ">=3.10" +groups = ["dev"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "mdxify-0.2.23-py3-none-any.whl", hash = "sha256:9e920bd01bc34f11f6e250cc7a926ed8ae8a29ea39cb4707a69700972965fc4b"}, + {file = "mdxify-0.2.23.tar.gz", hash = "sha256:283ec938c7e11259f949042fad2ec62c3196c82e66c38e7827efa5532bcbfbeb"}, +] + +[package.dependencies] +griffe = ">=0.36.0" + [[package]] name = "mypy" version = "1.11.2" @@ -2394,4 +2426,4 @@ tests = ["Jinja2", "pytest", "pyyaml", "rich"] [metadata] lock-version = "2.1" python-versions = "^3.9, <3.14" -content-hash = "978a8ed3c6f4f4e46d39b8c33affb767a91275ee2bee532a48a7abc3d224deb8" +content-hash = "3be561b51687bf09fe7a2c6d5954cb158e474eb42b9388a65f3e3bc94cb15b6a" diff --git a/pyproject.toml b/pyproject.toml index 2465500f..c96709f1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,6 +47,7 @@ netutils = "^1.0.0" click = { version = "8.1.*", optional = true } [tool.poetry.group.dev.dependencies] +mdxify = {version = "^0.2.23", python = ">=3.10"} pytest = "*" pytest-asyncio = "<0.23" pytest-clarity = "^1.0.1" diff --git a/tasks.py b/tasks.py index e10ca2c1..93cbd2c4 100644 --- a/tasks.py +++ b/tasks.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import asyncio import sys from pathlib import Path @@ -254,3 +256,61 @@ def generate_python_sdk(context: Context) -> None: # noqa: ARG001 """Generate documentation for the Python SDK.""" _generate_infrahub_sdk_configuration_documentation() _generate_infrahub_sdk_template_documentation() + + +@task(name="generate-sdk-api-docs") +def generate_sdk_api_docs(context: Context, output: str | None = None) -> None: + """Generate API documentation for the Python SDK.""" + + # This is the list of code modules to generate documentation for. + MODULES_LIST = [ + "infrahub_sdk.client", + "infrahub_sdk.node.node", + ] + + import operator + import shutil + import tempfile + from functools import reduce + + output_dir = Path(output) if output else DOCUMENTATION_DIRECTORY / "docs" / "python-sdk" / "sdk_ref" + + if not is_tool_installed("mdxify"): + print(" - mdxify is not installed, skipping documentation generation") + return + + # Create a temporary directory to store the generated documentation + with tempfile.TemporaryDirectory() as tmp_dir: + # Generate the API documentation using mdxify and get flat file structure + exec_cmd = f"mdxify {' '.join(MODULES_LIST)} --output-dir {tmp_dir}" + context.run(exec_cmd, pty=True) + + # Remove current obsolete documentation file structure + if (output_dir / "infrahub_sdk").exists(): + shutil.rmtree(output_dir / "infrahub_sdk") + + # Get all .mdx files in the generated doc folder and apply filters + filters = ["__init__"] + filtered_files = [ + file + for file in list(Path(tmp_dir).glob("*.mdx")) + if all(filter.lower() not in file.name for filter in filters) + ] + + # Reorganize the generated relevant files into the desired structure + for mdx_file in filtered_files: + target_path = output_dir / reduce(operator.truediv, (Path(part) for part in mdx_file.name.split("-"))) + + # Create the future parent directory if it doesn't exist + target_path.parent.mkdir(parents=True, exist_ok=True) + + # Move the file to the new location + shutil.move(mdx_file, target_path) + + # Fix possible linting issues in the generated documentation + if not is_tool_installed("markdownlint-cli2"): + print(" - markdownlint-cli2 is not installed, skipping documentation linting") + return + + exec_cmd = f"markdownlint-cli2 {output_dir}/ --fix --config .markdownlint.yaml" + context.run(exec_cmd, pty=True)