diff --git a/sdk/cosmos/azure-cosmos/azure/cosmos/_base.py b/sdk/cosmos/azure-cosmos/azure/cosmos/_base.py index 5330089bdcba..7e16a64c23ca 100644 --- a/sdk/cosmos/azure-cosmos/azure/cosmos/_base.py +++ b/sdk/cosmos/azure-cosmos/azure/cosmos/_base.py @@ -116,6 +116,7 @@ def GetHeaders( # pylint: disable=too-many-statements,too-many-branches path: str, resource_id: Optional[str], resource_type: str, + operation_type: str, options: Mapping[str, Any], partition_key_range_id: Optional[str] = None, ) -> Dict[str, Any]: @@ -323,6 +324,11 @@ def GetHeaders( # pylint: disable=too-many-statements,too-many-branches if resource_type != 'dbs' and options.get("containerRID"): headers[http_constants.HttpHeaders.IntendedCollectionRID] = options["containerRID"] + if resource_type == "": + resource_type = "databaseaccount" + headers[http_constants.HttpHeaders.ThinClientProxyResourceType] = resource_type + headers[http_constants.HttpHeaders.ThinClientProxyOperationType] = operation_type + return headers diff --git a/sdk/cosmos/azure-cosmos/azure/cosmos/_cosmos_client_connection.py b/sdk/cosmos/azure-cosmos/azure/cosmos/_cosmos_client_connection.py index 26e3f294ed56..ba1dcc1993cd 100644 --- a/sdk/cosmos/azure-cosmos/azure/cosmos/_cosmos_client_connection.py +++ b/sdk/cosmos/azure-cosmos/azure/cosmos/_cosmos_client_connection.py @@ -2038,7 +2038,8 @@ def PatchItem( if options is None: options = {} - headers = base.GetHeaders(self, self.default_headers, "patch", path, document_id, resource_type, options) + headers = base.GetHeaders(self, self.default_headers, "patch", path, document_id, resource_type, + documents._OperationType.Patch, options) # Patch will use WriteEndpoint since it uses PUT operation request_params = RequestObject(resource_type, documents._OperationType.Patch) request_data = {} @@ -2126,7 +2127,8 @@ def _Batch( ) -> Tuple[List[Dict[str, Any]], CaseInsensitiveDict]: initial_headers = self.default_headers.copy() base._populate_batch_headers(initial_headers) - headers = base.GetHeaders(self, initial_headers, "post", path, collection_id, "docs", options) + headers = base.GetHeaders(self, initial_headers, "post", path, collection_id, "docs", + documents._OperationType.Batch, options) request_params = RequestObject("docs", documents._OperationType.Batch) return cast( Tuple[List[Dict[str, Any]], CaseInsensitiveDict], @@ -2185,7 +2187,8 @@ def DeleteAllItemsByPartitionKey( # Specified url to perform background operation to delete all items by partition key path = '{}{}/{}'.format(path, "operations", "partitionkeydelete") collection_id = base.GetResourceIdOrFullNameFromLink(collection_link) - headers = base.GetHeaders(self, self.default_headers, "post", path, collection_id, "partitionkey", options) + headers = base.GetHeaders(self, self.default_headers, "post", path, collection_id, + "partitionkey", documents._OperationType.Delete, options) request_params = RequestObject("partitionkey", documents._OperationType.Delete) _, last_response_headers = self.__Post( path=path, @@ -2353,7 +2356,8 @@ def ExecuteStoredProcedure( path = base.GetPathFromLink(sproc_link) sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link) - headers = base.GetHeaders(self, initial_headers, "post", path, sproc_id, "sprocs", options) + headers = base.GetHeaders(self, initial_headers, "post", path, sproc_id, "sprocs", + documents._OperationType.ExecuteJavaScript, options) # ExecuteStoredProcedure will use WriteEndpoint since it uses POST operation request_params = RequestObject("sprocs", documents._OperationType.ExecuteJavaScript) @@ -2550,7 +2554,8 @@ def GetDatabaseAccount( if url_connection is None: url_connection = self.url_connection - headers = base.GetHeaders(self, self.default_headers, "get", "", "", "", {}) + headers = base.GetHeaders(self, self.default_headers, "get", "", "", "", + documents._OperationType.Read,{}) request_params = RequestObject("databaseaccount", documents._OperationType.Read, url_connection) result, last_response_headers = self.__Get("", request_params, headers, **kwargs) self.last_response_headers = last_response_headers @@ -2615,7 +2620,8 @@ def Create( options = {} initial_headers = initial_headers or self.default_headers - headers = base.GetHeaders(self, initial_headers, "post", path, id, typ, options) + headers = base.GetHeaders(self, initial_headers, "post", path, id, typ, documents._OperationType.Create, + options) # Create will use WriteEndpoint since it uses POST operation request_params = RequestObject(typ, documents._OperationType.Create) @@ -2659,7 +2665,8 @@ def Upsert( options = {} initial_headers = initial_headers or self.default_headers - headers = base.GetHeaders(self, initial_headers, "post", path, id, typ, options) + headers = base.GetHeaders(self, initial_headers, "post", path, id, typ, documents._OperationType.Upsert, + options) headers[http_constants.HttpHeaders.IsUpsert] = True # Upsert will use WriteEndpoint since it uses POST operation @@ -2703,7 +2710,8 @@ def Replace( options = {} initial_headers = initial_headers or self.default_headers - headers = base.GetHeaders(self, initial_headers, "put", path, id, typ, options) + headers = base.GetHeaders(self, initial_headers, "put", path, id, typ, documents._OperationType.Replace, + options) # Replace will use WriteEndpoint since it uses PUT operation request_params = RequestObject(typ, documents._OperationType.Replace) result, last_response_headers = self.__Put(path, request_params, resource, headers, **kwargs) @@ -2744,7 +2752,7 @@ def Read( options = {} initial_headers = initial_headers or self.default_headers - headers = base.GetHeaders(self, initial_headers, "get", path, id, typ, options) + headers = base.GetHeaders(self, initial_headers, "get", path, id, typ, documents._OperationType.Read, options) # Read will use ReadEndpoint since it uses GET operation request_params = RequestObject(typ, documents._OperationType.Read) result, last_response_headers = self.__Get(path, request_params, headers, **kwargs) @@ -2782,7 +2790,8 @@ def DeleteResource( options = {} initial_headers = initial_headers or self.default_headers - headers = base.GetHeaders(self, initial_headers, "delete", path, id, typ, options) + headers = base.GetHeaders(self, initial_headers, "delete", path, id, typ, documents._OperationType.Delete, + options) # Delete will use WriteEndpoint since it uses DELETE operation request_params = RequestObject(typ, documents._OperationType.Delete) result, last_response_headers = self.__Delete(path, request_params, headers, **kwargs) @@ -3027,6 +3036,7 @@ def __GetBodiesFromQueryResult(result: Dict[str, Any]) -> List[Dict[str, Any]]: path, resource_id, resource_type, + request_params.operation_type, options, partition_key_range_id ) @@ -3064,6 +3074,7 @@ def __GetBodiesFromQueryResult(result: Dict[str, Any]) -> List[Dict[str, Any]]: path, resource_id, resource_type, + documents._OperationType.SqlQuery, options, partition_key_range_id ) diff --git a/sdk/cosmos/azure-cosmos/azure/cosmos/_retry_utility.py b/sdk/cosmos/azure-cosmos/azure/cosmos/_retry_utility.py index cf3e35a15170..0a04213c0a37 100644 --- a/sdk/cosmos/azure-cosmos/azure/cosmos/_retry_utility.py +++ b/sdk/cosmos/azure-cosmos/azure/cosmos/_retry_utility.py @@ -22,10 +22,11 @@ """Internal methods for executing functions in the Azure Cosmos database service. """ import json +from requests.exceptions import ReadTimeout, ConnectTimeout import time from typing import Optional -from azure.core.exceptions import AzureError, ClientAuthenticationError, ServiceRequestError +from azure.core.exceptions import AzureError, ClientAuthenticationError, ServiceRequestError, ServiceResponseError from azure.core.pipeline import PipelineRequest from azure.core.pipeline.policies import RetryPolicy from azure.core.pipeline.transport._base import HttpRequest @@ -38,6 +39,7 @@ from . import _gone_retry_policy from . import _timeout_failover_retry_policy from . import _container_recreate_retry_policy +from . import _service_response_retry_policy from .http_constants import HttpHeaders, StatusCodes, SubStatusCodes @@ -78,6 +80,9 @@ def Execute(client, global_endpoint_manager, function, *args, **kwargs): timeout_failover_retry_policy = _timeout_failover_retry_policy._TimeoutFailoverRetryPolicy( client.connection_policy, global_endpoint_manager, *args ) + service_response_retry_policy = _service_response_retry_policy.ServiceResponseRetryPolicy( + client.connection_policy, global_endpoint_manager, *args, + ) # HttpRequest we would need to modify for Container Recreate Retry Policy request: Optional[HttpRequest] = None if args and len(args) > 3: @@ -188,6 +193,15 @@ def Execute(client, global_endpoint_manager, function, *args, **kwargs): if kwargs['timeout'] <= 0: raise exceptions.CosmosClientTimeoutError() + except ServiceResponseError as e: + if _has_retryable_headers(request.http_request.headers): + # we resolve the request endpoint to the next preferred region + # once we are out of preferred regions we stop retrying + retry_policy = service_response_retry_policy + if not retry_policy.ShouldRetry(): + if args and args[0].should_clear_session_token_on_session_read_failure and client.session: + client.session.clear_session_token(client.last_response_headers) + raise def ExecuteFunction(function, *args, **kwargs): """Stub method so that it can be used for mocking purposes as well. @@ -198,6 +212,12 @@ def ExecuteFunction(function, *args, **kwargs): """ return function(*args, **kwargs) +def _has_retryable_headers(request_headers): + if (request_headers.get(HttpHeaders.ThinClientProxyResourceType) in ["docs"] + and request_headers.get(HttpHeaders.ThinClientProxyOperationType) in ["Read", "Query", "QueryPlan", + "ReadFeed", "SqlQuery"]): + return True + return False def _configure_timeout(request: PipelineRequest, absolute: Optional[int], per_request: int) -> None: if absolute is not None: @@ -261,6 +281,9 @@ def send(self, request): timeout_error.history = retry_settings['history'] raise except ServiceRequestError as err: + if _has_retryable_headers(request.http_request.headers): + # raise exception immediately to be dealt with in client retry policies + raise err # the request ran into a socket timeout or failed to establish a new connection # since request wasn't sent, we retry up to however many connection retries are configured (default 3) if retry_settings['connect'] > 0: @@ -269,13 +292,16 @@ def send(self, request): self.sleep(retry_settings, request.context.transport) continue raise err - except AzureError as err: + except ServiceResponseError as err: retry_error = err - if self._is_method_retryable(retry_settings, request.http_request): - retry_active = self.increment(retry_settings, response=request, error=err) - if retry_active: - self.sleep(retry_settings, request.context.transport) - continue + if err.exc_type in [ReadTimeout, ConnectTimeout]: + if _has_retryable_headers(request.http_request.headers): + # raise exception immediately to be dealt with in client retry policies + raise err + retry_active = self.increment(retry_settings, response=request, error=err) + if retry_active: + self.sleep(retry_settings, request.context.transport) + continue raise err finally: end_time = time.time() diff --git a/sdk/cosmos/azure-cosmos/azure/cosmos/_service_response_retry_policy.py b/sdk/cosmos/azure-cosmos/azure/cosmos/_service_response_retry_policy.py new file mode 100644 index 000000000000..059b3e744158 --- /dev/null +++ b/sdk/cosmos/azure-cosmos/azure/cosmos/_service_response_retry_policy.py @@ -0,0 +1,45 @@ +# The MIT License (MIT) +# Copyright (c) Microsoft Corporation. All rights reserved. + +"""Internal class for service response read errors implementation in the Azure +Cosmos database service. +""" + +class ServiceResponseRetryPolicy(object): + + def __init__(self, connection_policy, global_endpoint_manager, *args): + self.args = args + self.global_endpoint_manager = global_endpoint_manager + self.total_retries = len(self.global_endpoint_manager.location_cache.read_endpoints) + self.failover_retry_count = 0 + self.connection_policy = connection_policy + self.request = args[0] if args else None + if self.request: + self.location_endpoint = self.global_endpoint_manager.resolve_service_endpoint(self.request) + + def ShouldRetry(self): + """Returns true if the request should retry based on preferred regions and retries already done. + + """ + if not self.connection_policy.EnableEndpointDiscovery: + return False + if self.args[0].operation_type != 'Read' and self.args[0].resource_type != 'docs': + return False + + self.failover_retry_count += 1 + if self.failover_retry_count > self.total_retries: + return False + + if self.request: + # clear previous location-based routing directive + self.request.clear_route_to_location() + + # set location-based routing directive based on retry count + # ensuring usePreferredLocations is set to True for retry + self.request.route_to_location_with_preferred_location_flag(self.failover_retry_count, True) + + # Resolve the endpoint for the request and pin the resolution to the resolved endpoint + # This enables marking the endpoint unavailability on endpoint failover/unreachability + self.location_endpoint = self.global_endpoint_manager.resolve_service_endpoint(self.request) + self.request.route_to_location(self.location_endpoint) + return True diff --git a/sdk/cosmos/azure-cosmos/azure/cosmos/aio/_cosmos_client_connection_async.py b/sdk/cosmos/azure-cosmos/azure/cosmos/aio/_cosmos_client_connection_async.py index c7acfcac6302..16d4da3d2080 100644 --- a/sdk/cosmos/azure-cosmos/azure/cosmos/aio/_cosmos_client_connection_async.py +++ b/sdk/cosmos/azure-cosmos/azure/cosmos/aio/_cosmos_client_connection_async.py @@ -409,9 +409,11 @@ async def GetDatabaseAccount( url_connection = self.url_connection initial_headers = dict(self.default_headers) - headers = base.GetHeaders(self, initial_headers, "get", "", "", "", {}) # path # id # type + headers = base.GetHeaders(self, initial_headers, "get", "", "", "", + documents._OperationType.Read, {}) # path # id # type request_params = _request_object.RequestObject("databaseaccount", documents._OperationType.Read, url_connection) + result, self.last_response_headers = await self.__Get("", request_params, headers, **kwargs) database_account = documents.DatabaseAccount() database_account.DatabasesLink = "/dbs/" @@ -698,7 +700,8 @@ async def ExecuteStoredProcedure( path = base.GetPathFromLink(sproc_link) sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link) - headers = base.GetHeaders(self, initial_headers, "post", path, sproc_id, "sprocs", options) + headers = base.GetHeaders(self, initial_headers, "post", path, sproc_id, "sprocs", + documents._OperationType.ExecuteJavaScript, options) # ExecuteStoredProcedure will use WriteEndpoint since it uses POST operation request_params = _request_object.RequestObject("sprocs", documents._OperationType.ExecuteJavaScript) @@ -735,7 +738,8 @@ async def Create( options = {} initial_headers = initial_headers or self.default_headers - headers = base.GetHeaders(self, initial_headers, "post", path, id, typ, options) + headers = base.GetHeaders(self, initial_headers, "post", path, id, typ, + documents._OperationType.Create, options) # Create will use WriteEndpoint since it uses POST operation request_params = _request_object.RequestObject(typ, documents._OperationType.Create) @@ -871,7 +875,8 @@ async def Upsert( options = {} initial_headers = initial_headers or self.default_headers - headers = base.GetHeaders(self, initial_headers, "post", path, id, typ, options) + headers = base.GetHeaders(self, initial_headers, "post", path, id, typ, documents._OperationType.Upsert, + options) headers[http_constants.HttpHeaders.IsUpsert] = True @@ -1174,7 +1179,8 @@ async def Read( options = {} initial_headers = initial_headers or self.default_headers - headers = base.GetHeaders(self, initial_headers, "get", path, id, typ, options) + headers = base.GetHeaders(self, initial_headers, "get", path, id, typ, documents._OperationType.Read, + options) # Read will use ReadEndpoint since it uses GET operation request_params = _request_object.RequestObject(typ, documents._OperationType.Read) result, last_response_headers = await self.__Get(path, request_params, headers, **kwargs) @@ -1431,7 +1437,8 @@ async def PatchItem( options = {} initial_headers = self.default_headers - headers = base.GetHeaders(self, initial_headers, "patch", path, document_id, typ, options) + headers = base.GetHeaders(self, initial_headers, "patch", path, document_id, typ, + documents._OperationType.Patch, options) # Patch will use WriteEndpoint since it uses PUT operation request_params = _request_object.RequestObject(typ, documents._OperationType.Patch) request_data = {} @@ -1534,7 +1541,8 @@ async def Replace( options = {} initial_headers = initial_headers or self.default_headers - headers = base.GetHeaders(self, initial_headers, "put", path, id, typ, options) + headers = base.GetHeaders(self, initial_headers, "put", path, id, typ, documents._OperationType.Replace, + options) # Replace will use WriteEndpoint since it uses PUT operation request_params = _request_object.RequestObject(typ, documents._OperationType.Replace) result, last_response_headers = await self.__Put(path, request_params, resource, headers, **kwargs) @@ -1856,7 +1864,8 @@ async def DeleteResource( options = {} initial_headers = initial_headers or self.default_headers - headers = base.GetHeaders(self, initial_headers, "delete", path, id, typ, options) + headers = base.GetHeaders(self, initial_headers, "delete", path, id, typ, documents._OperationType.Delete, + options) # Delete will use WriteEndpoint since it uses DELETE operation request_params = _request_object.RequestObject(typ, documents._OperationType.Delete) result, last_response_headers = await self.__Delete(path, request_params, headers, **kwargs) @@ -1969,7 +1978,8 @@ async def _Batch( ) -> Tuple[List[Dict[str, Any]], CaseInsensitiveDict]: initial_headers = self.default_headers.copy() base._populate_batch_headers(initial_headers) - headers = base.GetHeaders(self, initial_headers, "post", path, collection_id, "docs", options) + headers = base.GetHeaders(self, initial_headers, "post", path, collection_id, "docs", + documents._OperationType.Batch, options) request_params = _request_object.RequestObject("docs", documents._OperationType.Batch) result = await self.__Post(path, request_params, batch_operations, headers, **kwargs) return cast(Tuple[List[Dict[str, Any]], CaseInsensitiveDict], result) @@ -2825,7 +2835,8 @@ def __GetBodiesFromQueryResult(result: Dict[str, Any]) -> List[Dict[str, Any]]: typ, documents._OperationType.QueryPlan if is_query_plan else documents._OperationType.ReadFeed ) - headers = base.GetHeaders(self, initial_headers, "get", path, id_, typ, options, partition_key_range_id) + headers = base.GetHeaders(self, initial_headers, "get", path, id_, typ, request_params.operation_type, + options, partition_key_range_id) change_feed_state: Optional[ChangeFeedState] = options.get("changeFeedState") if change_feed_state is not None: @@ -2853,7 +2864,8 @@ def __GetBodiesFromQueryResult(result: Dict[str, Any]) -> List[Dict[str, Any]]: # Query operations will use ReadEndpoint even though it uses POST(for regular query operations) request_params = _request_object.RequestObject(typ, documents._OperationType.SqlQuery) - req_headers = base.GetHeaders(self, initial_headers, "post", path, id_, typ, options, partition_key_range_id) + req_headers = base.GetHeaders(self, initial_headers, "post", path, id_, typ, request_params.operation_type, + options, partition_key_range_id) # check if query has prefix partition key cont_prop = kwargs.pop("containerProperties", None) @@ -3218,7 +3230,8 @@ async def DeleteAllItemsByPartitionKey( path = '{}{}/{}'.format(path, "operations", "partitionkeydelete") collection_id = base.GetResourceIdOrFullNameFromLink(collection_link) initial_headers = dict(self.default_headers) - headers = base.GetHeaders(self, initial_headers, "post", path, collection_id, "partitionkey", options) + headers = base.GetHeaders(self, initial_headers, "post", path, collection_id, "partitionkey", + documents._OperationType.Delete, options) request_params = _request_object.RequestObject("partitionkey", documents._OperationType.Delete) _, last_response_headers = await self.__Post(path=path, request_params=request_params, req_headers=headers, body=None, **kwargs) diff --git a/sdk/cosmos/azure-cosmos/azure/cosmos/aio/_retry_utility_async.py b/sdk/cosmos/azure-cosmos/azure/cosmos/aio/_retry_utility_async.py index f239031c67fb..23baaefafee1 100644 --- a/sdk/cosmos/azure-cosmos/azure/cosmos/aio/_retry_utility_async.py +++ b/sdk/cosmos/azure-cosmos/azure/cosmos/aio/_retry_utility_async.py @@ -24,27 +24,28 @@ import json import time import asyncio +from aiohttp.client_exceptions import ConnectionTimeoutError, ServerTimeoutError from typing import Optional -from azure.core.exceptions import AzureError, ClientAuthenticationError, ServiceRequestError +from azure.core.exceptions import AzureError, ClientAuthenticationError, ServiceRequestError, ServiceResponseError from azure.core.pipeline.policies import AsyncRetryPolicy from azure.core.pipeline.transport._base import HttpRequest from .. import exceptions from ..http_constants import HttpHeaders, StatusCodes, SubStatusCodes -from .._retry_utility import _configure_timeout +from .._retry_utility import _configure_timeout, _has_retryable_headers from .. import _endpoint_discovery_retry_policy from .. import _resource_throttle_retry_policy from .. import _default_retry_policy from .. import _session_retry_policy from .. import _gone_retry_policy from .. import _timeout_failover_retry_policy +from .. import _service_response_retry_policy from .._container_recreate_retry_policy import ContainerRecreateRetryPolicy # pylint: disable=protected-access, disable=too-many-lines, disable=too-many-statements, disable=too-many-branches - async def ExecuteAsync(client, global_endpoint_manager, function, *args, **kwargs): """Executes the function with passed parameters applying all retry policies @@ -77,6 +78,9 @@ async def ExecuteAsync(client, global_endpoint_manager, function, *args, **kwarg timeout_failover_retry_policy = _timeout_failover_retry_policy._TimeoutFailoverRetryPolicy( client.connection_policy, global_endpoint_manager, *args ) + service_response_retry_policy = _service_response_retry_policy.ServiceResponseRetryPolicy( + client.connection_policy, global_endpoint_manager, *args, + ) # HttpRequest we would need to modify for Container Recreate Retry Policy request: Optional[HttpRequest] = None if args and len(args) > 3: @@ -189,6 +193,16 @@ async def ExecuteAsync(client, global_endpoint_manager, function, *args, **kwarg if kwargs['timeout'] <= 0: raise exceptions.CosmosClientTimeoutError() + except ServiceResponseError as e: + if _has_retryable_headers(request.http_request.headers): + # we resolve the request endpoint to the next preferred region + # once we are out of preferred regions we stop retrying + retry_policy = service_response_retry_policy + if not retry_policy.ShouldRetry(): + if args and args[0].should_clear_session_token_on_session_read_failure and client.session: + client.session.clear_session_token(client.last_response_headers) + raise + async def ExecuteFunctionAsync(function, *args, **kwargs): """Stub method so that it can be used for mocking purposes as well. @@ -220,6 +234,11 @@ async def send(self, request): """ absolute_timeout = request.context.options.pop('timeout', None) per_request_timeout = request.context.options.pop('connection_timeout', 0) + # TODO: remove this once done testing + if "docs" in request.http_request.url and request.http_request.method == "GET": + per_request_timeout = 0.001 + else: + per_request_timeout = 1 retry_error = None retry_active = True @@ -247,6 +266,9 @@ async def send(self, request): timeout_error.history = retry_settings['history'] raise except ServiceRequestError as err: + if _has_retryable_headers(request.http_request.headers): + # raise exception immediately to be dealt with in client retry policies + raise err # the request ran into a socket timeout or failed to establish a new connection # since request wasn't sent, we retry up to however many connection retries are configured (default 3) if retry_settings['connect'] > 0: @@ -255,6 +277,17 @@ async def send(self, request): await self.sleep(retry_settings, request.context.transport) continue raise err + except ServiceResponseError as err: + retry_error = err + if err.exc_type in [ConnectionTimeoutError, ServerTimeoutError]: + if _has_retryable_headers(request.http_request.headers): + # raise exception immediately to be dealt with in client retry policies + raise err + retry_active = self.increment(retry_settings, response=request, error=err) + if retry_active: + await self.sleep(retry_settings, request.context.transport) + continue + raise err except AzureError as err: retry_error = err if self._is_method_retryable(retry_settings, request.http_request): diff --git a/sdk/cosmos/azure-cosmos/azure/cosmos/http_constants.py b/sdk/cosmos/azure-cosmos/azure/cosmos/http_constants.py index b0b6a87e7ca2..34277cb185e1 100644 --- a/sdk/cosmos/azure-cosmos/azure/cosmos/http_constants.py +++ b/sdk/cosmos/azure-cosmos/azure/cosmos/http_constants.py @@ -252,6 +252,10 @@ class HttpHeaders: CosmosQuorumAckedLsn = "x-ms-cosmos-quorum-acked-llsn" # cspell:disable-line RequestDurationMs = "x-ms-request-duration-ms" + # Thin Client headers + ThinClientProxyOperationType = "x-ms-thinclient-proxy-operation-type" + ThinClientProxyResourceType = "x-ms-thinclient-proxy-resource-type" + class HttpHeaderPreferenceTokens: """Constants of http header preference tokens. """