|
| 1 | +import requests |
| 2 | +from uuid import UUID |
| 3 | +from typing import Optional |
| 4 | +from sempy_labs._helper_functions import ( |
| 5 | + get_pbi_token_headers, |
| 6 | + _get_url_prefix, |
| 7 | + get_model_id, |
| 8 | + resolve_item_name_and_id, |
| 9 | + resolve_workspace_name_and_id, |
| 10 | +) |
| 11 | +from sempy.fabric.exceptions import FabricHTTPException |
| 12 | +from sempy._utils._log import log |
| 13 | +import sempy_labs._icons as icons |
| 14 | + |
| 15 | + |
| 16 | +@log |
| 17 | +def enable_query_caching( |
| 18 | + dataset: str | UUID, workspace: Optional[str | UUID] = None, enable: bool = True, |
| 19 | +): |
| 20 | + """ |
| 21 | + Enables or disables `query caching <http://aka.ms/queryCaching>`_ for a semantic model. |
| 22 | +
|
| 23 | + Parameters |
| 24 | + ---------- |
| 25 | + dataset : str | uuid.UUID |
| 26 | + Name or ID of the semantic model. |
| 27 | + workspace : str | uuid.UUID, default=None |
| 28 | + The workspace name or ID. |
| 29 | + Defaults to None which resolves to the workspace of the attached lakehouse |
| 30 | + or if no lakehouse attached, resolves to the workspace of the notebook. |
| 31 | + enable : bool, default=True |
| 32 | + Set to True to enable query caching, or False to disable it. |
| 33 | + """ |
| 34 | + prefix = _get_url_prefix() |
| 35 | + headers = get_pbi_token_headers() |
| 36 | + (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) |
| 37 | + (item_name, item_id) = resolve_item_name_and_id( |
| 38 | + item=dataset, type="SemanticModel", workspace=workspace_id |
| 39 | + ) |
| 40 | + model_id = get_model_id(item_id=item_id, headers=headers, prefix=prefix) |
| 41 | + if model_id is None: |
| 42 | + raise ValueError(f"Failed to retrieve model ID for semantic model '{item_name}'") |
| 43 | + |
| 44 | + caching_map = { |
| 45 | + True: 2, |
| 46 | + False: 1, |
| 47 | + } |
| 48 | + |
| 49 | + payload = {"queryCachingState": caching_map.get(enable)} |
| 50 | + |
| 51 | + response = requests.post( |
| 52 | + f"{prefix}/metadata/models/{model_id}/caching", headers=headers, json=payload |
| 53 | + ) |
| 54 | + |
| 55 | + if response.status_code != 204: |
| 56 | + raise FabricHTTPException(response) |
| 57 | + |
| 58 | + print( |
| 59 | + f"{icons.green_dot} Query caching has been {'enabled' if enable else 'disabled'} for the '{item_name}' semantic model within the '{workspace_name}' workspace." |
| 60 | + ) |
0 commit comments