diff --git a/src/sagemaker/processing.py b/src/sagemaker/processing.py index 7beef2e5bd..eda4ffc01e 100644 --- a/src/sagemaker/processing.py +++ b/src/sagemaker/processing.py @@ -314,6 +314,7 @@ def _normalize_args( "code argument has to be a valid S3 URI or local file path " + "rather than a pipeline variable" ) + if arguments is not None: processed_arguments = [] for arg in arguments: @@ -321,7 +322,7 @@ def _normalize_args( processed_value = json.dumps(arg.expr) processed_arguments.append(processed_value) else: - processed_arguments.append(str(arg)) + processed_arguments.append(arg) arguments = processed_arguments self._current_job_name = self._generate_current_job_name(job_name=job_name) diff --git a/tests/unit/sagemaker/workflow/test_processing_step.py b/tests/unit/sagemaker/workflow/test_processing_step.py index f94e0791cb..9ee8242a45 100644 --- a/tests/unit/sagemaker/workflow/test_processing_step.py +++ b/tests/unit/sagemaker/workflow/test_processing_step.py @@ -824,12 +824,14 @@ def test_spark_processor(spark_processor, processing_input, pipeline_session): processor, run_inputs = spark_processor processor.sagemaker_session = pipeline_session processor.role = ROLE + arguments_output = [ "--input", "input-data-uri", "--output", '{"Get": "Parameters.MyArgOutput"}', ] + run_inputs["inputs"] = processing_input step_args = processor.run(**run_inputs) @@ -1024,6 +1026,7 @@ def test_spark_processor_local_code(spark_processor, processing_input, pipeline_ processor, run_inputs = spark_processor processor.sagemaker_session = pipeline_session processor.role = ROLE + arguments_output = [ "--input", "input-data-uri",