Skip to content

Commit d0eedd2

Browse files
author
Andrew Mathew
committed
got rid of concurrent throughput example
1 parent 75e7e8f commit d0eedd2

File tree

1 file changed

+1
-66
lines changed

1 file changed

+1
-66
lines changed

sdk/cosmos/azure-cosmos/samples/autoscale_throughput_management_async.py

Lines changed: 1 addition & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -225,68 +225,6 @@ async def update_autoscale_max_throughput(container, new_max_throughput):
225225
except exceptions.CosmosHttpResponseError as e:
226226
print(f"Error updating autoscale throughput: {e.message}")
227227

228-
async def demonstrate_concurrent_throughput_operations(database):
229-
"""
230-
Demonstrate concurrent throughput operations on multiple containers.
231-
232-
Shows how async operations can efficiently manage throughput settings
233-
for multiple containers simultaneously.
234-
235-
Args:
236-
database: DatabaseProxy instance
237-
"""
238-
print("\nDemonstrate Concurrent Throughput Operations (Async)")
239-
print("=" * 70)
240-
241-
try:
242-
# Create multiple containers with different autoscale settings concurrently
243-
container_configs = [
244-
('container_async_1', 4000),
245-
('container_async_2', 5000),
246-
('container_async_3', 6000)
247-
]
248-
249-
# Create containers concurrently
250-
create_tasks = [
251-
database.create_container(
252-
id=container_id,
253-
partition_key=PartitionKey(path='/id'),
254-
offer_throughput=ThroughputProperties(
255-
auto_scale_max_throughput=max_throughput,
256-
auto_scale_increment_percent=0
257-
)
258-
)
259-
for container_id, max_throughput in container_configs
260-
]
261-
262-
containers = await asyncio.gather(*create_tasks, return_exceptions=True)
263-
264-
print(f"Created {len([c for c in containers if not isinstance(c, Exception)])} containers concurrently")
265-
266-
# Read throughput settings concurrently
267-
valid_containers = [c for c in containers if not isinstance(c, Exception)]
268-
read_tasks = [container.get_throughput() for container in valid_containers]
269-
270-
offers = await asyncio.gather(*read_tasks, return_exceptions=True)
271-
272-
print("\nThroughput settings for all containers:")
273-
for container, offer in zip(valid_containers, offers):
274-
if not isinstance(offer, Exception):
275-
autopilot_settings = offer.properties.get('content', {}).get('offerAutopilotSettings')
276-
if autopilot_settings:
277-
max_throughput = autopilot_settings.get('maxThroughput')
278-
print(f" - {container.id}: {max_throughput} RU/s (autoscale)")
279-
280-
# Cleanup containers concurrently
281-
delete_tasks = [database.delete_container(container.id) for container in valid_containers]
282-
await asyncio.gather(*delete_tasks, return_exceptions=True)
283-
284-
print(f"\nCleaned up {len(valid_containers)} containers concurrently")
285-
286-
except Exception as e:
287-
print(f"Error in concurrent operations: {str(e)}")
288-
289-
290228
async def run_sample():
291229
"""
292230
Run the async autoscale throughput management sample.
@@ -312,10 +250,7 @@ async def run_sample():
312250

313251
# 5. Read updated settings
314252
await read_autoscale_throughput(database, container)
315-
316-
# 6. Demonstrate concurrent operations
317-
await demonstrate_concurrent_throughput_operations(database)
318-
253+
319254
# Cleanup
320255
print("\n" + "=" * 70)
321256
print("Cleaning up resources...")

0 commit comments

Comments
 (0)