Skip to content

Commit 92477ff

Browse files
committed
use jumpstart deployment config image as default optimization image
1 parent a58654e commit 92477ff

File tree

4 files changed

+311
-5
lines changed

4 files changed

+311
-5
lines changed

src/sagemaker/serve/builder/jumpstart_builder.py

Lines changed: 108 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
import re
1818
from abc import ABC, abstractmethod
1919
from datetime import datetime, timedelta
20-
from typing import Type, Any, List, Dict, Optional
20+
from typing import Type, Any, List, Dict, Optional, Tuple
2121
import logging
2222

2323
from botocore.exceptions import ClientError
@@ -829,7 +829,13 @@ def _optimize_for_jumpstart(
829829
self.pysdk_model._enable_network_isolation = False
830830

831831
if quantization_config or sharding_config or is_compilation:
832-
return create_optimization_job_args
832+
# only apply default image for vLLM usecases.
833+
# vLLM does not support compilation for now so skip on compilation
834+
return (
835+
create_optimization_job_args
836+
if is_compilation
837+
else self._set_optimization_image_default(create_optimization_job_args)
838+
)
833839
return None
834840

835841
def _is_gated_model(self, model=None) -> bool:
@@ -986,3 +992,103 @@ def _get_neuron_model_env_vars(
986992
)
987993
return job_model.env
988994
return None
995+
996+
def _set_optimization_image_default(
997+
self, create_optimization_job_args: Dict[str, Any]
998+
) -> Dict[str, Any]:
999+
"""Defaults the optimization image to the JumpStart deployment config default
1000+
1001+
Args:
1002+
create_optimization_job_args (Dict[str, Any]): create optimization job request
1003+
1004+
Returns:
1005+
Dict[str, Any]: create optimization job request with image uri default
1006+
"""
1007+
default_image = self._get_default_vllm_image(self.pysdk_model.init_kwargs["image_uri"])
1008+
1009+
# find the latest vLLM image version
1010+
for optimization_config in create_optimization_job_args.get("OptimizationConfigs"):
1011+
if optimization_config.get("ModelQuantizationConfig"):
1012+
model_quantization_config = optimization_config.get("ModelQuantizationConfig")
1013+
provided_image = model_quantization_config.get("Image")
1014+
if provided_image and self._compare_lmi_versions(default_image, provided_image):
1015+
default_image = provided_image
1016+
elif optimization_config.get("ModelShardingConfig"):
1017+
model_sharding_config = optimization_config.get("ModelShardingConfig")
1018+
provided_image = model_sharding_config.get("Image")
1019+
if provided_image and self._compare_lmi_versions(default_image, provided_image):
1020+
default_image = provided_image
1021+
1022+
# default to latest vLLM version
1023+
for optimization_config in create_optimization_job_args.get("OptimizationConfigs"):
1024+
if optimization_config.get("ModelQuantizationConfig") is not None:
1025+
optimization_config.get("ModelQuantizationConfig")["Image"] = default_image
1026+
elif optimization_config.get("ModelShardingConfig") is not None:
1027+
optimization_config.get("ModelShardingConfig")["Image"] = default_image
1028+
1029+
logger.info(
1030+
f"Defaulting to {default_image} image for optimization {create_optimization_job_args}"
1031+
)
1032+
1033+
return create_optimization_job_args
1034+
1035+
def _get_default_vllm_image(self, image: str) -> bool:
1036+
"""Ensures the minimum working image version for vLLM enabled optimization techniques
1037+
1038+
Args:
1039+
image (str): JumpStart provided default image
1040+
1041+
Returns:
1042+
str: minimum working image version
1043+
"""
1044+
dlc_name, _ = image.split(":")
1045+
major_version_number, _, _ = self._parse_lmi_version(image)
1046+
1047+
if int(major_version_number) < 13:
1048+
minimum_version_default = f"{dlc_name}:0.31.0-lmi13.0.0-cu124"
1049+
return minimum_version_default
1050+
return image
1051+
1052+
def _compare_lmi_versions(self, version: str, version_to_compare: str) -> bool:
1053+
"""LMI version comparator
1054+
1055+
Args:
1056+
version (str): current version
1057+
version_to_compare (str): version to compare to
1058+
1059+
Returns:
1060+
bool: if version_to_compare larger or equal to version
1061+
"""
1062+
parse_lmi_version = self._parse_lmi_version(version)
1063+
parse_lmi_version_to_compare = self._parse_lmi_version(version_to_compare)
1064+
1065+
# Check major version
1066+
if parse_lmi_version_to_compare[0] > parse_lmi_version[0]:
1067+
return True
1068+
# Check minor version
1069+
if parse_lmi_version_to_compare[0] == parse_lmi_version[0]:
1070+
if parse_lmi_version_to_compare[1] > parse_lmi_version[1]:
1071+
return True
1072+
if parse_lmi_version_to_compare[1] == parse_lmi_version[1]:
1073+
# Check patch version
1074+
if parse_lmi_version_to_compare[2] >= parse_lmi_version[2]:
1075+
return True
1076+
return False
1077+
return False
1078+
return False
1079+
1080+
def _parse_lmi_version(self, image: str) -> Tuple[int, int, int]:
1081+
"""Parse out LMI version
1082+
1083+
Args:
1084+
image (str): image to parse version out of
1085+
1086+
Returns:
1087+
Tuple[int, int, it]: LMI version split into major, minor, patch
1088+
"""
1089+
dlc_name, dlc_tag = image.split(":")
1090+
_, lmi_version, _ = dlc_tag.split("-")
1091+
major_version, minor_version, patch_version = lmi_version.split(".")
1092+
major_version_number = major_version[3:]
1093+
1094+
return (int(major_version_number), int(minor_version), int(patch_version))

tests/integ/sagemaker/serve/test_serve_js_deep_unit_tests.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,8 @@ def test_js_model_with_optimize_speculative_decoding_config_gated_requests_are_e
3232
iam_client = sagemaker_session.boto_session.client("iam")
3333
role_arn = iam_client.get_role(RoleName=ROLE_NAME)["Role"]["Arn"]
3434

35+
sagemaker_session.sagemaker_client.create_optimization_job = MagicMock()
36+
3537
schema_builder = SchemaBuilder("test", "test")
3638
model_builder = ModelBuilder(
3739
model="meta-textgeneration-llama-3-1-8b-instruct",
@@ -50,6 +52,8 @@ def test_js_model_with_optimize_speculative_decoding_config_gated_requests_are_e
5052
accept_eula=True,
5153
)
5254

55+
assert not sagemaker_session.sagemaker_client.create_optimization_job.called
56+
5357
optimized_model.deploy()
5458

5559
mock_create_model.assert_called_once_with(
@@ -126,6 +130,13 @@ def test_js_model_with_optimize_sharding_and_resource_requirements_requests_are_
126130
accept_eula=True,
127131
)
128132

133+
assert (
134+
sagemaker_session.sagemaker_client.create_optimization_job.call_args_list[0][1][
135+
"OptimizationConfigs"
136+
][0]["ModelShardingConfig"]["Image"]
137+
is not None
138+
)
139+
129140
optimized_model.deploy(
130141
resources=ResourceRequirements(requests={"memory": 196608, "num_accelerators": 8})
131142
)
@@ -206,6 +217,13 @@ def test_js_model_with_optimize_quantization_on_pre_optimized_model_requests_are
206217
accept_eula=True,
207218
)
208219

220+
assert (
221+
sagemaker_session.sagemaker_client.create_optimization_job.call_args_list[0][1][
222+
"OptimizationConfigs"
223+
][0]["ModelQuantizationConfig"]["Image"]
224+
is not None
225+
)
226+
209227
optimized_model.deploy()
210228

211229
mock_create_model.assert_called_once_with(

0 commit comments

Comments
 (0)