Skip to content

Commit 6d56404

Browse files
committed
remove parse_obj
1 parent 98ca8ce commit 6d56404

14 files changed

+129
-116
lines changed

services/autoscaling/src/simcore_service_autoscaling/modules/auto_scaling_mode_computational.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -145,15 +145,15 @@ async def compute_node_used_resources(
145145
async def compute_cluster_used_resources(
146146
app: FastAPI, instances: list[AssociatedInstance]
147147
) -> Resources:
148-
list_of_used_resources = await logged_gather(
148+
list_of_used_resources: list[Resources] = await logged_gather(
149149
*(
150150
ComputationalAutoscaling.compute_node_used_resources(app, i)
151151
for i in instances
152152
)
153153
)
154154
counter = collections.Counter({k: 0 for k in Resources.model_fields})
155155
for result in list_of_used_resources:
156-
counter.update(result.dict())
156+
counter.update(result.model_dump())
157157
return Resources.model_validate(dict(counter))
158158

159159
@staticmethod

services/autoscaling/src/simcore_service_autoscaling/modules/dask.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from dask_task_models_library.resource_constraints import DaskTaskResources
1414
from distributed.core import Status
1515
from models_library.clusters import InternalClusterAuthentication, TLSAuthentication
16-
from pydantic import AnyUrl, ByteSize, parse_obj_as
16+
from pydantic import AnyUrl, ByteSize, TypeAdapter
1717

1818
from ..core.errors import (
1919
DaskNoWorkersError,
@@ -291,7 +291,9 @@ def _list_processing_tasks_on_worker(
291291
_logger.debug("found %s for %s", f"{total_resources_used=}", f"{worker_url=}")
292292
return Resources(
293293
cpus=total_resources_used.get("CPU", 0),
294-
ram=parse_obj_as(ByteSize, total_resources_used.get("RAM", 0)),
294+
ram=TypeAdapter(ByteSize).validate_python(
295+
total_resources_used.get("RAM", 0)
296+
),
295297
)
296298

297299

services/autoscaling/src/simcore_service_autoscaling/utils/rabbitmq.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,8 @@ async def create_autoscaling_status_message(
8686
+ len(cluster.buffer_drained_nodes),
8787
nodes_active=len(cluster.active_nodes),
8888
nodes_drained=len(cluster.drained_nodes) + len(cluster.buffer_drained_nodes),
89-
cluster_total_resources=cluster_total_resources.dict(),
90-
cluster_used_resources=cluster_used_resources.dict(),
89+
cluster_total_resources=cluster_total_resources.model_dump(),
90+
cluster_used_resources=cluster_used_resources.model_dump(),
9191
instances_pending=len(cluster.pending_ec2s),
9292
instances_running=len(cluster.active_nodes)
9393
+ len(cluster.drained_nodes)

services/autoscaling/src/simcore_service_autoscaling/utils/utils_docker.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -358,7 +358,9 @@ async def compute_node_used_resources(
358358
and task.spec.resources
359359
and task.spec.resources.reservations
360360
):
361-
task_reservations = task.spec.resources.reservations.dict(exclude_none=True)
361+
task_reservations = task.spec.resources.reservations.model_dump(
362+
exclude_none=True
363+
)
362364
cluster_resources_counter.update(
363365
{
364366
"ram": task_reservations.get("MemoryBytes", 0),
@@ -377,7 +379,7 @@ async def compute_cluster_used_resources(
377379
)
378380
counter = collections.Counter({k: 0 for k in Resources.model_fields})
379381
for result in list_of_used_resources:
380-
counter.update(result.dict())
382+
counter.update(result.model_dump())
381383

382384
return Resources.model_validate(dict(counter))
383385

services/autoscaling/tests/unit/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def mocked_ec2_server_envs(
119119
# NOTE: overrides the EC2Settings with what autoscaling expects
120120
changed_envs: EnvVarsDict = {
121121
f"{AUTOSCALING_ENV_PREFIX}{k}": v
122-
for k, v in mocked_ec2_server_settings.dict().items()
122+
for k, v in mocked_ec2_server_settings.model_dump().items()
123123
}
124124
return setenvs_from_dict(monkeypatch, changed_envs) # type: ignore
125125

services/autoscaling/tests/unit/test_models.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
import pytest
1111
from models_library.docker import DockerLabelKey, StandardSimcoreDockerLabels
1212
from models_library.generated_models.docker_rest_api import Service, Task
13-
from pydantic import ValidationError, parse_obj_as
13+
from pydantic import TypeAdapter, ValidationError
1414

1515

1616
async def test_get_simcore_service_docker_labels_from_task_with_missing_labels_raises(
@@ -20,11 +20,10 @@ async def test_get_simcore_service_docker_labels_from_task_with_missing_labels_r
2020
):
2121
service_missing_osparc_labels = await create_service(task_template, {}, "running")
2222
assert service_missing_osparc_labels.spec
23-
service_tasks = parse_obj_as(
24-
list[Task],
23+
service_tasks = TypeAdapter(list[Task]).validate_python(
2524
await async_docker_client.tasks.list(
2625
filters={"service": service_missing_osparc_labels.spec.name}
27-
),
26+
)
2827
)
2928
assert service_tasks
3029
assert len(service_tasks) == 1
@@ -46,11 +45,10 @@ async def test_get_simcore_service_docker_labels(
4645
"running",
4746
)
4847
assert service_with_labels.spec
49-
service_tasks = parse_obj_as(
50-
list[Task],
48+
service_tasks = TypeAdapter(list[Task]).validate_python(
5149
await async_docker_client.tasks.list(
5250
filters={"service": service_with_labels.spec.name}
53-
),
51+
)
5452
)
5553
assert service_tasks
5654
assert len(service_tasks) == 1

services/autoscaling/tests/unit/test_modules_auto_scaling_computational.py

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
from models_library.generated_models.docker_rest_api import Node as DockerNode
3232
from models_library.generated_models.docker_rest_api import NodeState, NodeStatus
3333
from models_library.rabbitmq_messages import RabbitAutoscalingStatusMessage
34-
from pydantic import ByteSize, parse_obj_as
34+
from pydantic import ByteSize, TypeAdapter
3535
from pytest_mock import MockerFixture
3636
from pytest_simcore.helpers.aws_ec2 import assert_autoscaled_computational_ec2_instances
3737
from pytest_simcore.helpers.monkeypatch_envs import EnvVarsDict, setenvs_from_dict
@@ -109,12 +109,12 @@ def _assert_rabbit_autoscaling_message_sent(
109109
nodes_total=0,
110110
nodes_active=0,
111111
nodes_drained=0,
112-
cluster_total_resources=Resources.create_as_empty().dict(),
113-
cluster_used_resources=Resources.create_as_empty().dict(),
112+
cluster_total_resources=Resources.create_as_empty().model_dump(),
113+
cluster_used_resources=Resources.create_as_empty().model_dump(),
114114
instances_pending=0,
115115
instances_running=0,
116116
)
117-
expected_message = default_message.copy(update=message_update_kwargs)
117+
expected_message = default_message.model_copy(update=message_update_kwargs)
118118
mock_rabbitmq_post_message.assert_called_once_with(
119119
app,
120120
expected_message,
@@ -241,7 +241,9 @@ async def test_cluster_scaling_with_task_with_too_much_resources_starts_nothing(
241241
dask_spec_local_cluster: distributed.SpecCluster,
242242
):
243243
# create a task that needs too much power
244-
dask_future = create_dask_task({"RAM": int(parse_obj_as(ByteSize, "12800GiB"))})
244+
dask_future = create_dask_task(
245+
{"RAM": int(TypeAdapter(ByteSize).validate_python("12800GiB"))}
246+
)
245247
assert dask_future
246248

247249
await auto_scale_cluster(
@@ -317,8 +319,7 @@ async def _create_task_with_resources(
317319
assert instance_types["InstanceTypes"]
318320
assert "MemoryInfo" in instance_types["InstanceTypes"][0]
319321
assert "SizeInMiB" in instance_types["InstanceTypes"][0]["MemoryInfo"]
320-
dask_ram = parse_obj_as(
321-
ByteSize,
322+
dask_ram = TypeAdapter(ByteSize).validate_python(
322323
f"{instance_types['InstanceTypes'][0]['MemoryInfo']['SizeInMiB']}MiB",
323324
)
324325
dask_task_resources = create_dask_task_resources(
@@ -335,7 +336,7 @@ async def _create_task_with_resources(
335336
[
336337
pytest.param(
337338
None,
338-
parse_obj_as(ByteSize, "128Gib"),
339+
TypeAdapter(ByteSize).validate_python("128Gib"),
339340
"r5n.4xlarge",
340341
id="No explicit instance defined",
341342
),
@@ -347,7 +348,7 @@ async def _create_task_with_resources(
347348
),
348349
pytest.param(
349350
"r5n.8xlarge",
350-
parse_obj_as(ByteSize, "116Gib"),
351+
TypeAdapter(ByteSize).validate_python("116Gib"),
351352
"r5n.8xlarge",
352353
id="Explicitely ask for r5n.8xlarge and set the resources",
353354
),
@@ -751,7 +752,7 @@ async def test_cluster_does_not_scale_up_if_defined_instance_is_not_allowed(
751752

752753
# create a task that needs more power
753754
dask_task_resources = create_dask_task_resources(
754-
faker.pystr(), parse_obj_as(ByteSize, "128GiB")
755+
faker.pystr(), TypeAdapter(ByteSize).validate_python("128GiB")
755756
)
756757
dask_future = create_dask_task(dask_task_resources)
757758
assert dask_future
@@ -787,7 +788,7 @@ async def test_cluster_does_not_scale_up_if_defined_instance_is_not_fitting_reso
787788

788789
# create a task that needs more power
789790
dask_task_resources = create_dask_task_resources(
790-
"t2.xlarge", parse_obj_as(ByteSize, "128GiB")
791+
"t2.xlarge", TypeAdapter(ByteSize).validate_python("128GiB")
791792
)
792793
dask_future = create_dask_task(dask_task_resources)
793794
assert dask_future
@@ -817,7 +818,8 @@ class _ScaleUpParams:
817818

818819
def _dask_task_resources_from_resources(resources: Resources) -> DaskTaskResources:
819820
return {
820-
res_key.upper(): res_value for res_key, res_value in resources.dict().items()
821+
res_key.upper(): res_value
822+
for res_key, res_value in resources.model_dump().items()
821823
}
822824

823825

@@ -847,7 +849,9 @@ async def _change_parameters(*args, **kwargs) -> list[EC2InstanceData]:
847849
[
848850
pytest.param(
849851
_ScaleUpParams(
850-
task_resources=Resources(cpus=5, ram=parse_obj_as(ByteSize, "36Gib")),
852+
task_resources=Resources(
853+
cpus=5, ram=TypeAdapter(ByteSize).validate_python("36Gib")
854+
),
851855
num_tasks=10,
852856
expected_instance_type="g3.4xlarge",
853857
expected_num_instances=4,
@@ -1106,7 +1110,7 @@ async def test_cluster_scaling_up_more_than_allowed_with_multiple_types_max_star
11061110
[
11071111
pytest.param(
11081112
None,
1109-
parse_obj_as(ByteSize, "128Gib"),
1113+
TypeAdapter(ByteSize).validate_python("128Gib"),
11101114
"r5n.4xlarge",
11111115
id="No explicit instance defined",
11121116
),

services/autoscaling/tests/unit/test_modules_auto_scaling_dynamic.py

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
Task,
3636
)
3737
from models_library.rabbitmq_messages import RabbitAutoscalingStatusMessage
38-
from pydantic import ByteSize, parse_obj_as
38+
from pydantic import ByteSize, TypeAdapter
3939
from pytest_mock import MockType
4040
from pytest_mock.plugin import MockerFixture
4141
from pytest_simcore.helpers.aws_ec2 import assert_autoscaled_dynamic_ec2_instances
@@ -130,7 +130,7 @@ def with_valid_time_before_termination(
130130
) -> datetime.timedelta:
131131
time = "00:11:00"
132132
monkeypatch.setenv("EC2_INSTANCES_TIME_BEFORE_TERMINATION", time)
133-
return parse_obj_as(datetime.timedelta, time)
133+
return TypeAdapter(datetime.timedelta).validate_python(time)
134134

135135

136136
@pytest.fixture
@@ -154,14 +154,14 @@ async def drained_host_node(
154154
"Role": host_node.spec.role.value,
155155
},
156156
)
157-
drained_node = parse_obj_as(
158-
Node, await async_docker_client.nodes.inspect(node_id=host_node.id)
157+
drained_node = TypeAdapter(Node).validate_python(
158+
await async_docker_client.nodes.inspect(node_id=host_node.id)
159159
)
160160
yield drained_node
161161
# revert
162162
# NOTE: getting the node again as the version might have changed
163-
drained_node = parse_obj_as(
164-
Node, await async_docker_client.nodes.inspect(node_id=host_node.id)
163+
drained_node = TypeAdapter(Node).validate_python(
164+
await async_docker_client.nodes.inspect(node_id=host_node.id)
165165
)
166166
assert drained_node.id
167167
assert drained_node.version
@@ -208,12 +208,12 @@ def _assert_rabbit_autoscaling_message_sent(
208208
nodes_total=0,
209209
nodes_active=0,
210210
nodes_drained=0,
211-
cluster_total_resources=Resources.create_as_empty().dict(),
212-
cluster_used_resources=Resources.create_as_empty().dict(),
211+
cluster_total_resources=Resources.create_as_empty().model_dump(),
212+
cluster_used_resources=Resources.create_as_empty().model_dump(),
213213
instances_pending=0,
214214
instances_running=0,
215215
)
216-
expected_message = default_message.copy(update=message_update_kwargs)
216+
expected_message = default_message.model_copy(update=message_update_kwargs)
217217
assert mock_rabbitmq_post_message.call_args == mock.call(app, expected_message)
218218

219219

@@ -907,7 +907,7 @@ async def _assert_wait_for_ec2_instances_terminated() -> None:
907907
_ScaleUpParams(
908908
imposed_instance_type=None,
909909
service_resources=Resources(
910-
cpus=4, ram=parse_obj_as(ByteSize, "128Gib")
910+
cpus=4, ram=TypeAdapter(ByteSize).validate_python("128Gib")
911911
),
912912
num_services=1,
913913
expected_instance_type="r5n.4xlarge",
@@ -918,7 +918,9 @@ async def _assert_wait_for_ec2_instances_terminated() -> None:
918918
pytest.param(
919919
_ScaleUpParams(
920920
imposed_instance_type="t2.xlarge",
921-
service_resources=Resources(cpus=4, ram=parse_obj_as(ByteSize, "4Gib")),
921+
service_resources=Resources(
922+
cpus=4, ram=TypeAdapter(ByteSize).validate_python("4Gib")
923+
),
922924
num_services=1,
923925
expected_instance_type="t2.xlarge",
924926
expected_num_instances=1,
@@ -929,7 +931,7 @@ async def _assert_wait_for_ec2_instances_terminated() -> None:
929931
_ScaleUpParams(
930932
imposed_instance_type="r5n.8xlarge",
931933
service_resources=Resources(
932-
cpus=4, ram=parse_obj_as(ByteSize, "128Gib")
934+
cpus=4, ram=TypeAdapter(ByteSize).validate_python("128Gib")
933935
),
934936
num_services=1,
935937
expected_instance_type="r5n.8xlarge",
@@ -998,7 +1000,7 @@ async def test_cluster_scaling_up_and_down(
9981000
_ScaleUpParams(
9991001
imposed_instance_type=None,
10001002
service_resources=Resources(
1001-
cpus=4, ram=parse_obj_as(ByteSize, "62Gib")
1003+
cpus=4, ram=TypeAdapter(ByteSize).validate_python("62Gib")
10021004
),
10031005
num_services=1,
10041006
expected_instance_type="r6a.2xlarge",
@@ -1084,7 +1086,7 @@ async def test_cluster_scaling_up_and_down_against_aws(
10841086
_ScaleUpParams(
10851087
imposed_instance_type=None,
10861088
service_resources=Resources(
1087-
cpus=5, ram=parse_obj_as(ByteSize, "36Gib")
1089+
cpus=5, ram=TypeAdapter(ByteSize).validate_python("36Gib")
10881090
),
10891091
num_services=10,
10901092
expected_instance_type="g3.4xlarge", # 1 GPU, 16 CPUs, 122GiB
@@ -1096,7 +1098,7 @@ async def test_cluster_scaling_up_and_down_against_aws(
10961098
_ScaleUpParams(
10971099
imposed_instance_type="g4dn.8xlarge",
10981100
service_resources=Resources(
1099-
cpus=5, ram=parse_obj_as(ByteSize, "20480MB")
1101+
cpus=5, ram=TypeAdapter(ByteSize).validate_python("20480MB")
11001102
),
11011103
num_services=7,
11021104
expected_instance_type="g4dn.8xlarge", # 1 GPU, 32 CPUs, 128GiB
@@ -1190,7 +1192,7 @@ async def test_cluster_scaling_up_starts_multiple_instances(
11901192
[
11911193
pytest.param(
11921194
None,
1193-
parse_obj_as(ByteSize, "128Gib"),
1195+
TypeAdapter(ByteSize).validate_python("128Gib"),
11941196
"r5n.4xlarge",
11951197
id="No explicit instance defined",
11961198
),
@@ -1452,11 +1454,10 @@ async def test__activate_drained_nodes_with_no_drained_nodes(
14521454
task_template_that_runs, {}, "running"
14531455
)
14541456
assert service_with_no_reservations.spec
1455-
service_tasks = parse_obj_as(
1456-
list[Task],
1457+
service_tasks = TypeAdapter(list[Task]).validate_python(
14571458
await autoscaling_docker.tasks.list(
14581459
filters={"service": service_with_no_reservations.spec.name}
1459-
),
1460+
)
14601461
)
14611462
assert service_tasks
14621463
assert len(service_tasks) == 1
@@ -1496,11 +1497,10 @@ async def test__activate_drained_nodes_with_drained_node(
14961497
task_template_that_runs, {}, "pending"
14971498
)
14981499
assert service_with_no_reservations.spec
1499-
service_tasks = parse_obj_as(
1500-
list[Task],
1500+
service_tasks = TypeAdapter(list[Task]).validate_python(
15011501
await autoscaling_docker.tasks.list(
15021502
filters={"service": service_with_no_reservations.spec.name}
1503-
),
1503+
)
15041504
)
15051505
assert service_tasks
15061506
assert len(service_tasks) == 1

0 commit comments

Comments
 (0)