Skip to content

Commit 5608040

Browse files
committed
Additional bug fixes for tests
1 parent fef17bd commit 5608040

File tree

3 files changed

+14
-12
lines changed

3 files changed

+14
-12
lines changed

sagemaker-core/pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ dependencies = [
3737
# Remote function dependencies
3838
"cloudpickle>=2.0.0",
3939
"paramiko>=2.11.0",
40+
"tblib>=1.7.0",
4041
]
4142
requires-python = ">=3.9"
4243
classifiers = [

sagemaker-serve/tests/integ/test_model_customization_deployment.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -577,13 +577,15 @@ def training_job(self, setup_region):
577577
session=session,
578578
region="us-east-1")
579579

580+
@pytest.mark.skip(reason="Bedrock Nova deployment test skipped per team decision")
580581
def test_bedrock_model_builder_creation(self, training_job):
581582
"""Test BedrockModelBuilder creation with Nova model."""
582583
bedrock_builder = BedrockModelBuilder(model=training_job)
583584
assert bedrock_builder is not None
584585
assert bedrock_builder.model == training_job
585586
assert bedrock_builder.s3_model_artifacts is not None
586587

588+
@pytest.mark.skip(reason="Bedrock Nova deployment test skipped per team decision")
587589
@pytest.mark.slow
588590
def test_nova_model_deployment(self, training_job):
589591
"""Test Nova model deployment to Bedrock."""

sagemaker-train/src/sagemaker/train/evaluate/benchmark_evaluator.py

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -360,13 +360,7 @@ def _validate_subtasks(cls, v, values):
360360
f"Subtask list cannot be empty for benchmark '{benchmark.value}'. "
361361
f"Provide at least one subtask or use 'ALL'."
362362
)
363-
if len(v) > 1 :
364-
raise ValueError(
365-
f"Currently only one subtask is supported for benchmark '{benchmark.value}'. "
366-
f"Provide only one subtask or use 'ALL'."
367-
)
368363

369-
# TODO : Should support list of subtasks.
370364
# Validate each subtask in the list
371365
for subtask in v:
372366
if not isinstance(subtask, str):
@@ -509,7 +503,7 @@ def _resolve_subtask_for_evaluation(self, subtask: Optional[Union[str, List[str]
509503
# Use provided subtask or fall back to constructor subtasks
510504
eval_subtask = subtask if subtask is not None else self.subtasks
511505

512-
if eval_subtask is None or eval_subtask.upper() == "ALL":
506+
if eval_subtask is None or (isinstance(eval_subtask, str) and eval_subtask.upper() == "ALL"):
513507
#TODO : Check All Vs None subtask for evaluation
514508
return None
515509

@@ -528,11 +522,13 @@ def _resolve_subtask_for_evaluation(self, subtask: Optional[Union[str, List[str]
528522
f"Subtask list cannot be empty for benchmark '{self.benchmark.value}'. "
529523
f"Provide at least one subtask or use 'ALL'."
530524
)
531-
if len(eval_subtask) > 1:
532-
raise ValueError(
533-
f"Currently only one subtask is supported for benchmark '{self.benchmark.value}'. "
534-
f"Provide only one subtask or use 'ALL'."
535-
)
525+
# Validate each subtask in the list
526+
for st in eval_subtask:
527+
if config.get("subtasks") and st not in config["subtasks"]:
528+
raise ValueError(
529+
f"Invalid subtask '{st}' for benchmark '{self.benchmark.value}'. "
530+
f"Available subtasks: {', '.join(config['subtasks'])}"
531+
)
536532

537533

538534
return eval_subtask
@@ -568,6 +564,9 @@ def _get_benchmark_template_additions(self, eval_subtask: Optional[Union[str, Li
568564

569565
if isinstance(eval_subtask, str):
570566
benchmark_context['subtask'] = eval_subtask
567+
elif isinstance(eval_subtask, list):
568+
# Convert list to comma-separated string
569+
benchmark_context['subtask'] = ','.join(eval_subtask)
571570

572571
# Add all configured hyperparameters
573572
for key in configured_params.keys():

0 commit comments

Comments
 (0)