diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/cli.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/cli.py index 1f7ac0b2c218..e9f936db19f3 100644 --- a/scripts/maintenance/computational-clusters/autoscaled_monitor/cli.py +++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/cli.py @@ -129,6 +129,7 @@ def main( def summary( user_id: Annotated[int, typer.Option(help="filters by the user ID")] = 0, wallet_id: Annotated[int, typer.Option(help="filters by the wallet ID")] = 0, + as_json: Annotated[bool, typer.Option(help="outputs as json")] = False, ) -> None: """Show a summary of the current situation of autoscaled EC2 instances. @@ -140,7 +141,9 @@ def summary( """ - if not asyncio.run(api.summary(state, user_id or None, wallet_id or None)): + if not asyncio.run( + api.summary(state, user_id or None, wallet_id or None, output_json=as_json) + ): raise typer.Exit(1) @@ -152,7 +155,7 @@ def cancel_jobs( typer.Option(help="the wallet ID"), ] = None, *, - force: Annotated[ + abort_in_db: Annotated[ bool, typer.Option( help="will also force the job to abort in the database (use only if job is in WAITING FOR CLUSTER/WAITING FOR RESOURCE)" @@ -166,14 +169,16 @@ def cancel_jobs( Keyword Arguments: user_id -- the user ID wallet_id -- the wallet ID + abort_in_db -- will also force the job to abort in the database (use only if job is in WAITING FOR CLUSTER/WAITING FOR RESOURCE) """ - asyncio.run(api.cancel_jobs(state, user_id, wallet_id, force=force)) + asyncio.run(api.cancel_jobs(state, user_id, wallet_id, abort_in_db=abort_in_db)) @app.command() def trigger_cluster_termination( user_id: Annotated[int, typer.Option(help="the user ID")], wallet_id: Annotated[int, typer.Option(help="the wallet ID")], + force: Annotated[bool, typer.Option(help="will not ask for confirmation")] = False, ) -> None: """this will set the Heartbeat tag on the primary machine to 1 hour, thus ensuring the clusters-keeper will properly terminate that cluster. @@ -181,8 +186,9 @@ def trigger_cluster_termination( Keyword Arguments: user_id -- the user ID wallet_id -- the wallet ID + force -- will not ask for confirmation (VERY RISKY! USE WITH CAUTION!) """ - asyncio.run(api.trigger_cluster_termination(state, user_id, wallet_id)) + asyncio.run(api.trigger_cluster_termination(state, user_id, wallet_id, force=force)) @app.command() diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/core.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/core.py index 9801b3998697..b6be4b74f612 100755 --- a/scripts/maintenance/computational-clusters/autoscaled_monitor/core.py +++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/core.py @@ -413,7 +413,61 @@ async def _parse_dynamic_instances( return dynamic_instances -async def summary(state: AppState, user_id: int | None, wallet_id: int | None) -> bool: +def _print_summary_as_json( + dynamic_instances: list[DynamicInstance], + computational_clusters: list[ComputationalCluster], +) -> None: + result = { + "dynamic_instances": [ + { + "name": instance.name, + "ec2_instance_id": instance.ec2_instance.instance_id, + "running_services": [ + { + "user_id": service.user_id, + "project_id": service.project_id, + "node_id": service.node_id, + "service_name": service.service_name, + "service_version": service.service_version, + "created_at": service.created_at.isoformat(), + "needs_manual_intervention": service.needs_manual_intervention, + } + for service in instance.running_services + ], + "disk_space": instance.disk_space.human_readable(), + } + for instance in dynamic_instances + ], + "computational_clusters": [ + { + "primary": { + "name": cluster.primary.name, + "ec2_instance_id": cluster.primary.ec2_instance.instance_id, + "user_id": cluster.primary.user_id, + "wallet_id": cluster.primary.wallet_id, + "disk_space": cluster.primary.disk_space.human_readable(), + "last_heartbeat": cluster.primary.last_heartbeat.isoformat(), + }, + "workers": [ + { + "name": worker.name, + "ec2_instance_id": worker.ec2_instance.instance_id, + "disk_space": worker.disk_space.human_readable(), + } + for worker in cluster.workers + ], + "datasets": cluster.datasets, + "tasks": cluster.task_states_to_tasks, + } + for cluster in computational_clusters + ], + } + rich.print_json(json.dumps(result)) + + +async def summary( + state: AppState, user_id: int | None, wallet_id: int | None, *, output_json: bool +) -> bool: # get all the running instances assert state.ec2_resource_autoscaling dynamic_instances = await ec2.list_dynamic_instances_from_ec2( @@ -422,19 +476,6 @@ async def summary(state: AppState, user_id: int | None, wallet_id: int | None) - dynamic_autoscaled_instances = await _parse_dynamic_instances( state, dynamic_instances, state.ssh_key_path, user_id, wallet_id ) - _print_dynamic_instances( - dynamic_autoscaled_instances, - state.environment, - state.ec2_resource_autoscaling.meta.client.meta.region_name, - ) - - time_threshold = arrow.utcnow().shift(minutes=-30).datetime - - dynamic_services_in_error = any( - service.needs_manual_intervention and service.created_at < time_threshold - for instance in dynamic_autoscaled_instances - for service in instance.running_services - ) assert state.ec2_resource_clusters_keeper computational_instances = await ec2.list_computational_instances_from_ec2( @@ -443,10 +484,27 @@ async def summary(state: AppState, user_id: int | None, wallet_id: int | None) - computational_clusters = await _parse_computational_clusters( state, computational_instances, state.ssh_key_path, user_id, wallet_id ) - _print_computational_clusters( - computational_clusters, - state.environment, - state.ec2_resource_clusters_keeper.meta.client.meta.region_name, + + if output_json: + _print_summary_as_json(dynamic_autoscaled_instances, computational_clusters) + + if not output_json: + _print_dynamic_instances( + dynamic_autoscaled_instances, + state.environment, + state.ec2_resource_autoscaling.meta.client.meta.region_name, + ) + _print_computational_clusters( + computational_clusters, + state.environment, + state.ec2_resource_clusters_keeper.meta.client.meta.region_name, + ) + + time_threshold = arrow.utcnow().shift(minutes=-30).datetime + dynamic_services_in_error = any( + service.needs_manual_intervention and service.created_at < time_threshold + for instance in dynamic_autoscaled_instances + for service in instance.running_services ) return not dynamic_services_in_error @@ -504,29 +562,48 @@ async def _list_computational_clusters( ) -async def cancel_jobs( # noqa: C901, PLR0912 - state: AppState, user_id: int, wallet_id: int | None, *, force: bool +async def _cancel_all_jobs( + state: AppState, + the_cluster: ComputationalCluster, + *, + task_to_dask_job: list[tuple[ComputationalTask | None, DaskTask | None]], + abort_in_db: bool, ) -> None: - # get the theory - computational_tasks = await db.list_computational_tasks_from_db(state, user_id) + rich.print("cancelling all tasks") + for comp_task, dask_task in task_to_dask_job: + if dask_task is not None and dask_task.state != "unknown": + await dask.trigger_job_cancellation_in_scheduler( + state, + the_cluster, + dask_task.job_id, + ) + if comp_task is None: + # we need to clear it of the cluster + await dask.remove_job_from_scheduler( + state, + the_cluster, + dask_task.job_id, + ) + if comp_task is not None and abort_in_db: + await db.abort_job_in_db(state, comp_task.project_id, comp_task.node_id) - # get the reality - computational_clusters = await _list_computational_clusters( - state, user_id, wallet_id - ) - job_id_to_dask_state: dict[TaskId, TaskState] = {} - if computational_clusters: - assert ( - len(computational_clusters) == 1 - ), "too many clusters found! TIP: fix this code or something weird is playing out" + rich.print("cancelled all tasks") - the_cluster = computational_clusters[0] - rich.print(f"{the_cluster.task_states_to_tasks=}") - for job_state, job_ids in the_cluster.task_states_to_tasks.items(): - for job_id in job_ids: - job_id_to_dask_state[job_id] = job_state +async def _get_job_id_to_dask_state_from_cluster( + cluster: ComputationalCluster, +) -> dict[TaskId, TaskState]: + job_id_to_dask_state: dict[TaskId, TaskState] = {} + for job_state, job_ids in cluster.task_states_to_tasks.items(): + for job_id in job_ids: + job_id_to_dask_state[job_id] = job_state + return job_id_to_dask_state + +async def _get_db_task_to_dask_job( + computational_tasks: list[ComputationalTask], + job_id_to_dask_state: dict[TaskId, TaskState], +) -> list[tuple[ComputationalTask | None, DaskTask | None]]: task_to_dask_job: list[tuple[ComputationalTask | None, DaskTask | None]] = [] for task in computational_tasks: dask_task = None @@ -539,6 +616,32 @@ async def cancel_jobs( # noqa: C901, PLR0912 # keep the jobs still in the cluster for job_id, dask_state in job_id_to_dask_state.items(): task_to_dask_job.append((None, DaskTask(job_id=job_id, state=dask_state))) + return task_to_dask_job + + +async def cancel_jobs( # noqa: C901, PLR0912 + state: AppState, user_id: int, wallet_id: int | None, *, abort_in_db: bool +) -> None: + # get the theory + computational_tasks = await db.list_computational_tasks_from_db(state, user_id) + + # get the reality + computational_clusters = await _list_computational_clusters( + state, user_id, wallet_id + ) + + if computational_clusters: + assert ( + len(computational_clusters) == 1 + ), "too many clusters found! TIP: fix this code or something weird is playing out" + + the_cluster = computational_clusters[0] + rich.print(f"{the_cluster.task_states_to_tasks=}") + + job_id_to_dask_state = await _get_job_id_to_dask_state_from_cluster(the_cluster) + task_to_dask_job: list[tuple[ComputationalTask | None, DaskTask | None]] = ( + await _get_db_task_to_dask_job(computational_tasks, job_id_to_dask_state) + ) if not task_to_dask_job: rich.print("[red]nothing found![/red]") @@ -554,27 +657,12 @@ async def cancel_jobs( # noqa: C901, PLR0912 if response == "none": rich.print("[yellow]not cancelling anything[/yellow]") elif response == "all": - rich.print("cancelling all tasks") - for comp_task, dask_task in task_to_dask_job: - if dask_task is not None and dask_task.state != "unknown": - await dask.trigger_job_cancellation_in_scheduler( - state, - the_cluster, - dask_task.job_id, - ) - if comp_task is None: - # we need to clear it of the cluster - await dask.remove_job_from_scheduler( - state, - the_cluster, - dask_task.job_id, - ) - if comp_task is not None and force: - await db.abort_job_in_db( - state, comp_task.project_id, comp_task.node_id - ) - - rich.print("cancelled all tasks") + await _cancel_all_jobs( + state, + the_cluster, + task_to_dask_job=task_to_dask_job, + abort_in_db=abort_in_db, + ) else: try: # Split the response and handle ranges @@ -597,7 +685,7 @@ async def cancel_jobs( # noqa: C901, PLR0912 state, the_cluster, dask_task.job_id ) - if comp_task is not None and force: + if comp_task is not None and abort_in_db: await db.abort_job_in_db( state, comp_task.project_id, comp_task.node_id ) @@ -616,7 +704,7 @@ async def cancel_jobs( # noqa: C901, PLR0912 async def trigger_cluster_termination( - state: AppState, user_id: int, wallet_id: int + state: AppState, user_id: int, wallet_id: int, *, force: bool ) -> None: assert state.ec2_resource_clusters_keeper computational_instances = await ec2.list_computational_instances_from_ec2( @@ -635,8 +723,20 @@ async def trigger_cluster_termination( state.environment, state.ec2_resource_clusters_keeper.meta.client.meta.region_name, ) - if typer.confirm("Are you sure you want to trigger termination of that cluster?"): + if (force is True) or typer.confirm( + "Are you sure you want to trigger termination of that cluster?" + ): the_cluster = computational_clusters[0] + + computational_tasks = await db.list_computational_tasks_from_db(state, user_id) + job_id_to_dask_state = await _get_job_id_to_dask_state_from_cluster(the_cluster) + task_to_dask_job: list[tuple[ComputationalTask | None, DaskTask | None]] = ( + await _get_db_task_to_dask_job(computational_tasks, job_id_to_dask_state) + ) + await _cancel_all_jobs( + state, the_cluster, task_to_dask_job=task_to_dask_job, abort_in_db=force + ) + new_heartbeat_tag: TagTypeDef = { "Key": "last_heartbeat", "Value": f"{arrow.utcnow().datetime - datetime.timedelta(hours=1)}", diff --git a/scripts/maintenance/computational-clusters/autoscaled_monitor/ssh.py b/scripts/maintenance/computational-clusters/autoscaled_monitor/ssh.py index 3caee6e3cb0a..0d3159f818fc 100644 --- a/scripts/maintenance/computational-clusters/autoscaled_monitor/ssh.py +++ b/scripts/maintenance/computational-clusters/autoscaled_monitor/ssh.py @@ -151,27 +151,48 @@ async def get_available_disk_space( async def get_dask_ip( state: AppState, instance: Instance, username: str, private_key_path: Path ) -> str: - try: async with ssh_instance( instance, state=state, username=username, private_key_path=private_key_path ) as ssh_client: - dask_ip_command = "docker inspect -f '{{.NetworkSettings.Networks.dask_stack_cluster.IPAddress}}' $(docker ps --filter 'name=dask-sidecar|dask-scheduler' --format '{{.ID}}')" + # First, get the container IDs for dask-sidecar or dask-scheduler + list_containers_command = "docker ps --filter 'name=dask-sidecar|dask-scheduler' --format '{{.ID}}'" + _, stdout, stderr = ssh_client.exec_command(list_containers_command) + container_ids = stdout.read().decode("utf-8").strip() + exit_status = stdout.channel.recv_exit_status() - # Run the command on the remote machine + if exit_status != 0 or not container_ids: + error_message = stderr.read().decode().strip() + _logger.warning( + "No matching containers found or command failed with exit status %s: %s", + exit_status, + error_message, + ) + return "No Containers Found / Not Ready" + + # If containers are found, inspect their IP addresses + dask_ip_command = ( + "docker inspect -f '{{.NetworkSettings.Networks.dask_stack_cluster.IPAddress}}' " + f"{container_ids}" + ) _, stdout, stderr = ssh_client.exec_command(dask_ip_command) exit_status = stdout.channel.recv_exit_status() + if exit_status != 0: error_message = stderr.read().decode().strip() _logger.error( - "Inspecting dask IP Command failed with exit status %s: %s", + "Inspecting Dask IP command failed with exit status %s: %s", exit_status, error_message, ) - return "Not Found / Drained / Not Ready" + return "Not docker network Found / Drained / Not Ready" - # Available disk space will be captured here - return stdout.read().decode("utf-8").strip() + ip_address = stdout.read().decode("utf-8").strip() + if not ip_address: + _logger.error("Dask IP address not found in the output") + return "Not IP Found / Drained / Not Ready" + + return ip_address except ( paramiko.AuthenticationException, paramiko.SSHException, @@ -209,7 +230,7 @@ async def list_running_dyn_services( arrow.get( match["created_at"], "YYYY-MM-DD HH:mm:ss", - tzinfo=datetime.timezone.utc, + tzinfo=datetime.UTC, ).datetime, container, ( diff --git a/scripts/maintenance/computational-clusters/pyproject.toml b/scripts/maintenance/computational-clusters/pyproject.toml index acd2a5099751..55fe924fb1bf 100644 --- a/scripts/maintenance/computational-clusters/pyproject.toml +++ b/scripts/maintenance/computational-clusters/pyproject.toml @@ -20,6 +20,7 @@ dependencies = [ "sqlalchemy[asyncio]", "sshtunnel", "ansible>=10.7.0", + "lz4==4.3.3", ] name = "autoscaled-monitor" version = "1.0.0" diff --git a/scripts/maintenance/computational-clusters/uv.lock b/scripts/maintenance/computational-clusters/uv.lock index d2ac6d7f1733..f5c49567b8a9 100644 --- a/scripts/maintenance/computational-clusters/uv.lock +++ b/scripts/maintenance/computational-clusters/uv.lock @@ -186,6 +186,7 @@ dependencies = [ { name = "boto3" }, { name = "cloudpickle" }, { name = "dask", extra = ["distributed"] }, + { name = "lz4" }, { name = "mypy-boto3-ec2" }, { name = "paramiko" }, { name = "parse" }, @@ -209,6 +210,7 @@ requires-dist = [ { name = "boto3" }, { name = "cloudpickle", specifier = "==3.1.0" }, { name = "dask", extras = ["distributed"], specifier = "==2024.12.0" }, + { name = "lz4", specifier = "==4.3.3" }, { name = "mypy-boto3-ec2" }, { name = "paramiko" }, { name = "parse" }, @@ -687,6 +689,35 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/db/bc/83e112abc66cd466c6b83f99118035867cecd41802f8d044638aa78a106e/locket-1.0.0-py2.py3-none-any.whl", hash = "sha256:b6c819a722f7b6bd955b80781788e4a66a55628b858d347536b7e81325a3a5e3", size = 4398 }, ] +[[package]] +name = "lz4" +version = "4.3.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a4/31/ec1259ca8ad11568abaf090a7da719616ca96b60d097ccc5799cd0ff599c/lz4-4.3.3.tar.gz", hash = "sha256:01fe674ef2889dbb9899d8a67361e0c4a2c833af5aeb37dd505727cf5d2a131e", size = 171509 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/53/61258b5effac76dea5768b07042b2c3c56e15a91194cef92284a0dc0f5e7/lz4-4.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b891880c187e96339474af2a3b2bfb11a8e4732ff5034be919aa9029484cd201", size = 254266 }, + { url = "https://files.pythonhosted.org/packages/92/84/c243a5515950d72ff04220fd49903801825e4ac23691e19e7082d9d9f94b/lz4-4.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:222a7e35137d7539c9c33bb53fcbb26510c5748779364014235afc62b0ec797f", size = 212359 }, + { url = "https://files.pythonhosted.org/packages/10/26/5287564a909d069fdd6c25f2f420c58c5758993fa3ad2e064a7b610e6e5f/lz4-4.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f76176492ff082657ada0d0f10c794b6da5800249ef1692b35cf49b1e93e8ef7", size = 1237799 }, + { url = "https://files.pythonhosted.org/packages/cf/50/75c8f966dbcc524e7253f99b8e04c6cad7328f517eb0323abf8b4068f5bb/lz4-4.3.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1d18718f9d78182c6b60f568c9a9cec8a7204d7cb6fad4e511a2ef279e4cb05", size = 1263957 }, + { url = "https://files.pythonhosted.org/packages/91/54/0f61c77a9599beb14ac5b828e8da20a04c6eaadb4f3fdbd79a817c66eb74/lz4-4.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6cdc60e21ec70266947a48839b437d46025076eb4b12c76bd47f8e5eb8a75dcc", size = 1184035 }, + { url = "https://files.pythonhosted.org/packages/8e/84/3be7fad87d84b67cd43174d67fc567e0aa3be154f8b0a1c2c0ff8df30854/lz4-4.3.3-cp310-cp310-win32.whl", hash = "sha256:c81703b12475da73a5d66618856d04b1307e43428a7e59d98cfe5a5d608a74c6", size = 87235 }, + { url = "https://files.pythonhosted.org/packages/21/08/dc4714eb771b502deec8a714e40e5fbd2242bacd5fe55dcd29a0cb35c567/lz4-4.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:43cf03059c0f941b772c8aeb42a0813d68d7081c009542301637e5782f8a33e2", size = 99781 }, + { url = "https://files.pythonhosted.org/packages/f9/f7/cfb942edd53c8a6aba168720ccf3d6a0cac3e891a7feba97d5823b5dd047/lz4-4.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:30e8c20b8857adef7be045c65f47ab1e2c4fabba86a9fa9a997d7674a31ea6b6", size = 254267 }, + { url = "https://files.pythonhosted.org/packages/71/ca/046bd7e7e1ed4639eb398192374bc3fbf5010d3c168361fec161b63e8bfa/lz4-4.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2f7b1839f795315e480fb87d9bc60b186a98e3e5d17203c6e757611ef7dcef61", size = 212353 }, + { url = "https://files.pythonhosted.org/packages/0c/c2/5beb6a7bb7fd27cd5fe5bb93c15636d30987794b161e4609fbf20dc3b5c7/lz4-4.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edfd858985c23523f4e5a7526ca6ee65ff930207a7ec8a8f57a01eae506aaee7", size = 1239095 }, + { url = "https://files.pythonhosted.org/packages/cf/d4/12915eb3083dfd1746d50b71b73334030b129cd25abbed9133dd2d413c21/lz4-4.3.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e9c410b11a31dbdc94c05ac3c480cb4b222460faf9231f12538d0074e56c563", size = 1265760 }, + { url = "https://files.pythonhosted.org/packages/94/7b/5e72b7504d7675b484812bfc65fe958f7649a64e0d6fe35c11812511f0b5/lz4-4.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d2507ee9c99dbddd191c86f0e0c8b724c76d26b0602db9ea23232304382e1f21", size = 1185451 }, + { url = "https://files.pythonhosted.org/packages/2f/b5/3726a678b3a0c64d24e71179e35e7ff8e3553da9d32c2fddce879d042b63/lz4-4.3.3-cp311-cp311-win32.whl", hash = "sha256:f180904f33bdd1e92967923a43c22899e303906d19b2cf8bb547db6653ea6e7d", size = 87232 }, + { url = "https://files.pythonhosted.org/packages/55/f9/69ed96043dae4d982286a4dda2feb473f49e95e4c90a928ec583d93769a2/lz4-4.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:b14d948e6dce389f9a7afc666d60dd1e35fa2138a8ec5306d30cd2e30d36b40c", size = 99794 }, + { url = "https://files.pythonhosted.org/packages/4d/6f/081811b17ccaec5f06b3030756af2737841447849118a6e1078481a78c6c/lz4-4.3.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e36cd7b9d4d920d3bfc2369840da506fa68258f7bb176b8743189793c055e43d", size = 254213 }, + { url = "https://files.pythonhosted.org/packages/53/4d/8e04ef75feff8848ba3c624ce81c7732bdcea5f8f994758afa88cd3d7764/lz4-4.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:31ea4be9d0059c00b2572d700bf2c1bc82f241f2c3282034a759c9a4d6ca4dc2", size = 212354 }, + { url = "https://files.pythonhosted.org/packages/a3/04/257a72d6a879dbc8c669018989f776fcdd5b4bf3c2c51c09a54f1ca31721/lz4-4.3.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33c9a6fd20767ccaf70649982f8f3eeb0884035c150c0b818ea660152cf3c809", size = 1238643 }, + { url = "https://files.pythonhosted.org/packages/d9/93/4a7e489156fa7ded03ba9cde4a8ca7f373672b5787cac9a0391befa752a1/lz4-4.3.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca8fccc15e3add173da91be8f34121578dc777711ffd98d399be35487c934bf", size = 1265014 }, + { url = "https://files.pythonhosted.org/packages/fd/a4/f84ebc23bc7602623b1b003b4e1120cbf86fb03a35c595c226be1985449b/lz4-4.3.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7d84b479ddf39fe3ea05387f10b779155fc0990125f4fb35d636114e1c63a2e", size = 1184881 }, + { url = "https://files.pythonhosted.org/packages/de/3d/8ba48305378e84908221de143a21ba0c0ce52778893865cf85b66b1068da/lz4-4.3.3-cp312-cp312-win32.whl", hash = "sha256:337cb94488a1b060ef1685187d6ad4ba8bc61d26d631d7ba909ee984ea736be1", size = 87241 }, + { url = "https://files.pythonhosted.org/packages/c4/5d/7b70965a0692de29af2af1007fe837f46fd456bbe2aa8f838a8543a3b5cb/lz4-4.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:5d35533bf2cee56f38ced91f766cd0038b6abf46f438a80d50c52750088be93f", size = 99776 }, +] + [[package]] name = "markdown-it-py" version = "3.0.0"