4141from models_library .services import ServiceMetaDataPublished
4242from models_library .services_resources import BootMode
4343from packaging import version
44- from pydantic import AnyUrl , SecretStr , parse_obj_as
44+ from pydantic import AnyUrl , SecretStr , TypeAdapter
4545from pytest_mock .plugin import MockerFixture
4646from pytest_simcore .helpers .typing_env import EnvVarsDict
4747from settings_library .s3 import S3Settings
@@ -178,7 +178,7 @@ def integration_version(request: pytest.FixtureRequest) -> version.Version:
178178
179179@pytest .fixture
180180def additional_envs (faker : Faker ) -> dict [EnvVarKey , str ]:
181- return parse_obj_as (dict [EnvVarKey , str ], faker .pydict (allowed_types = (str ,)))
181+ return TypeAdapter (dict [EnvVarKey , str ]). validate_python ( faker .pydict (allowed_types = (str ,)))
182182
183183
184184@pytest .fixture
@@ -198,7 +198,7 @@ def sleeper_task(
198198 list_of_files = [file_on_s3_server () for _ in range (NUM_FILES )]
199199
200200 # defines the inputs of the task
201- input_data = TaskInputData .parse_obj (
201+ input_data = TaskInputData .model_validate (
202202 {
203203 "input_1" : 23 ,
204204 "input_23" : "a string input" ,
@@ -276,7 +276,7 @@ def sleeper_task(
276276 "pytest_bool" : False ,
277277 }
278278 output_file_url = s3_remote_file_url (file_path = "output_file" )
279- expected_output_keys = TaskOutputDataSchema .parse_obj (
279+ expected_output_keys = TaskOutputDataSchema .model_validate (
280280 {
281281 ** (
282282 {k : {"required" : True } for k in jsonable_outputs }
@@ -295,7 +295,7 @@ def sleeper_task(
295295 ),
296296 }
297297 )
298- expected_output_data = TaskOutputData .parse_obj (
298+ expected_output_data = TaskOutputData .model_validate (
299299 {
300300 ** (
301301 jsonable_outputs
@@ -395,10 +395,10 @@ def _creator(command: list[str] | None = None) -> ServiceExampleParam:
395395 service_version = "latest" ,
396396 command = command
397397 or ["/bin/bash" , "-c" , "echo 'hello I'm an empty ubuntu task!" ],
398- input_data = TaskInputData .parse_obj ({}),
399- output_data_keys = TaskOutputDataSchema .parse_obj ({}),
398+ input_data = TaskInputData .model_validate ({}),
399+ output_data_keys = TaskOutputDataSchema .model_validate ({}),
400400 log_file_url = s3_remote_file_url (file_path = "log.dat" ),
401- expected_output_data = TaskOutputData .parse_obj ({}),
401+ expected_output_data = TaskOutputData .model_validate ({}),
402402 expected_logs = [],
403403 integration_version = integration_version ,
404404 task_envs = {},
@@ -437,8 +437,8 @@ def caplog_info_level(
437437def mocked_get_image_labels (
438438 integration_version : version .Version , mocker : MockerFixture
439439) -> mock .Mock :
440- labels : ImageLabels = parse_obj_as (
441- ImageLabels , ServiceMetaDataPublished .Config . schema_extra [ " examples" ][0 ]
440+ labels : ImageLabels = TypeAdapter ( ImageLabels ). validate_python (
441+ ServiceMetaDataPublished .model_config [ "json_schema_extra" ][ " examples" ][0 ],
442442 )
443443 labels .integration_version = f"{ integration_version } "
444444 return mocker .patch (
@@ -580,15 +580,15 @@ async def test_run_computational_sidecar_dask(
580580
581581 # check that the task produces expected logs
582582 worker_progresses = [
583- TaskProgressEvent .parse_raw (msg ).progress for msg in progress_sub .buffer
583+ TaskProgressEvent .model_validate_json (msg ).progress for msg in progress_sub .buffer
584584 ]
585585 # check ordering
586586 assert worker_progresses == sorted (
587587 set (worker_progresses )
588588 ), "ordering of progress values incorrectly sorted!"
589589 assert worker_progresses [0 ] == 0 , "missing/incorrect initial progress value"
590590 assert worker_progresses [- 1 ] == 1 , "missing/incorrect final progress value"
591- worker_logs = [TaskLogEvent .parse_raw (msg ).log for msg in log_sub .buffer ]
591+ worker_logs = [TaskLogEvent .model_validate_json (msg ).log for msg in log_sub .buffer ]
592592 print (f"<-- we got { len (worker_logs )} lines of logs" )
593593
594594 for log in sleeper_task .expected_logs :
@@ -649,7 +649,7 @@ async def test_run_computational_sidecar_dask_does_not_lose_messages_with_pubsub
649649
650650 # check that the task produces expected logs
651651 worker_progresses = [
652- TaskProgressEvent .parse_raw (msg ).progress for msg in progress_sub .buffer
652+ TaskProgressEvent .model_validate_json (msg ).progress for msg in progress_sub .buffer
653653 ]
654654 # check length
655655 assert len (worker_progresses ) == len (
@@ -659,7 +659,7 @@ async def test_run_computational_sidecar_dask_does_not_lose_messages_with_pubsub
659659 assert worker_progresses [0 ] == 0 , "missing/incorrect initial progress value"
660660 assert worker_progresses [- 1 ] == 1 , "missing/incorrect final progress value"
661661
662- worker_logs = [TaskLogEvent .parse_raw (msg ).log for msg in log_sub .buffer ]
662+ worker_logs = [TaskLogEvent .model_validate_json (msg ).log for msg in log_sub .buffer ]
663663 # check all the awaited logs are in there
664664 filtered_worker_logs = filter (lambda log : "This is iteration" in log , worker_logs )
665665 assert len (list (filtered_worker_logs )) == NUMBER_OF_LOGS
0 commit comments