Skip to content

Commit 8097312

Browse files
committed
Refactor logging statements and improve error handling in operators
- Updated logging statements in Llama3VILAInferenceOperator, MonaiBundleInferenceOperator, and VLMResultsWriterOperator to use more consistent formatting with repr for better clarity. - Enhanced error handling in the pipeline generator by raising exceptions with context to improve debugging. Signed-off-by: Victor Chang <[email protected]>
1 parent 0218aaf commit 8097312

File tree

5 files changed

+16
-15
lines changed

5 files changed

+16
-15
lines changed

monai/deploy/operators/llama3_vila_inference_operator.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -159,10 +159,10 @@ def _generate_response(self, image_tensor: torch.Tensor, prompt: str, generation
159159
if self._mock_mode:
160160
# Mock response based on common medical VQA patterns
161161
mock_responses = {
162-
"what is this image showing": "This medical image shows anatomical structures with various tissue densities and contrast patterns.",
163-
"summarize key findings": "Key findings include: 1) Normal anatomical structures visible, 2) No obvious pathological changes detected, 3) Image quality is adequate for assessment.",
164-
"is there a focal lesion": "No focal lesion is identified in the visible field of view.",
165-
"describe the image": "This appears to be a medical imaging study showing cross-sectional anatomy with good tissue contrast.",
162+
"what is this image showing": "This medical image shows anatomical structures with various tissue densities and contrast patterns.", # noqa: B950
163+
"summarize key findings": "Key findings include: 1) Normal anatomical structures visible, 2) No obvious pathological changes detected, 3) Image quality is adequate for assessment.", # noqa: B950
164+
"is there a focal lesion": "No focal lesion is identified in the visible field of view.", # noqa: B950
165+
"describe the image": "This appears to be a medical imaging study showing cross-sectional anatomy with good tissue contrast.", # noqa: B950
166166
}
167167

168168
# Find best matching response
@@ -172,7 +172,7 @@ def _generate_response(self, image_tensor: torch.Tensor, prompt: str, generation
172172
return response
173173

174174
# Default response
175-
return f"Analysis of the medical image based on the prompt: '{prompt}'. [Mock response - actual model not loaded]"
175+
return f"Analysis of the medical image based on the prompt: {prompt!r}. [Mock response - actual model not loaded]"
176176

177177
# In a real implementation, you would:
178178
# 1. Tokenize the prompt
@@ -272,7 +272,7 @@ def compute(self, op_input, op_output, context):
272272
request_id = op_input.receive("request_id")
273273
generation_params = op_input.receive("generation_params")
274274

275-
self._logger.info(f"Processing request {request_id} with output type '{output_type}'")
275+
self._logger.info(f"Processing request {request_id} with output type {output_type!r}")
276276

277277
try:
278278
# Preprocess image
@@ -317,3 +317,4 @@ def compute(self, op_input, op_output, context):
317317
op_output.emit(error_result, "result")
318318
op_output.emit(output_type, "output_type")
319319
op_output.emit(request_id, "request_id")
320+
raise e from None

monai/deploy/operators/monai_bundle_inference_operator.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -836,7 +836,7 @@ def _receive_input(self, name: str, op_input, context):
836836
# No channel present (W, H, D)
837837
if expected_channels is not None and expected_channels > 1:
838838
raise ValueError(
839-
f"Input for '{name}' has no channel dimension but bundle expects {expected_channels} channels. "
839+
f"Input for '{name!r}' has no channel dimension but bundle expects {expected_channels} channels. "
840840
"Provide multi-channel input or add a transform to stack channels before inference."
841841
)
842842
# else expected 1 or unknown -> proceed without channel
@@ -854,12 +854,12 @@ def _receive_input(self, name: str, op_input, context):
854854
ndims = 3
855855
else:
856856
raise ValueError(
857-
f"Input for '{name}' has {actual_channels} channels but bundle expects {expected_channels}."
857+
f"Input for '{name!r}' has {actual_channels} channels but bundle expects {expected_channels}."
858858
)
859859
# else exact match or unknown -> keep as-is
860860
else:
861861
# Unsupported rank for medical image input
862-
raise ValueError(f"Unsupported input rank {ndims} for '{name}'. Expected 3D (W,H,D) or 4D (W,H,D,C).")
862+
raise ValueError(f"Unsupported input rank {ndims} for '{name!r}'. Expected 3D (W,H,D) or 4D (W,H,D,C).")
863863
value = torch.from_numpy(value).to(self._device)
864864
if metadata is None:
865865
metadata = {}

monai/deploy/operators/vlm_results_writer_operator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ def compute(self, op_input, op_output, context):
117117
output_type = op_input.receive("output_type")
118118
request_id = op_input.receive("request_id")
119119

120-
self._logger.info(f"Writing result for request {request_id} with output type '{output_type}'")
120+
self._logger.info(f"Writing result for request {request_id} with output type {output_type!r}")
121121

122122
try:
123123
if output_type == "json":

tools/pipeline-generator/pipeline_generator/cli/run.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ def run(
134134
progress.update(task, description="[green]Virtual environment created")
135135
except subprocess.CalledProcessError as e:
136136
console.print(f"[red]Error creating virtual environment: {e.stderr}[/red]")
137-
raise click.Abort()
137+
raise click.Abort() from e
138138
else:
139139
console.print(f"[dim]Using existing virtual environment: {venv_name}[/dim]")
140140

@@ -251,7 +251,7 @@ def run(
251251
progress.update(task, description="[green]Dependencies installed")
252252
except subprocess.CalledProcessError as e:
253253
console.print(f"[red]Error installing dependencies: {e.stderr}[/red]")
254-
raise click.Abort()
254+
raise click.Abort() from e
255255

256256
# Step 3: Run the application
257257
console.print("\n[green]Starting application...[/green]\n")
@@ -308,7 +308,7 @@ def run(
308308
raise click.Abort()
309309
except Exception as e:
310310
console.print(f"[red]Error running application: {e}[/red]")
311-
raise click.Abort()
311+
raise click.Abort() from e
312312

313313

314314
if __name__ == "__main__":

tools/pipeline-generator/tests/test_security.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def test_model_id_validation(self):
5353
generator.generate_app(model_id, output_dir)
5454
except ValueError as e:
5555
if "Invalid model_id" in str(e):
56-
pytest.fail(f"Valid model_id '{model_id}' was rejected: {e}")
56+
pytest.fail(f"Valid model_id '{model_id!r}' was rejected: {e}")
5757
# Other errors are fine (e.g., download failures)
5858

5959
# Test invalid IDs (should raise ValueError)
@@ -82,7 +82,7 @@ def test_app_name_sanitization(self):
8282
result_with_app = f"{sanitized}App"
8383
assert (
8484
result_with_app == expected_class_name
85-
), f"Failed for '{input_name}': got '{result_with_app}', expected '{expected_class_name}'"
85+
), f"Failed for '{input_name!r}': got '{result_with_app!r}', expected '{expected_class_name!r}'"
8686

8787
def test_sanitize_for_python_identifier(self):
8888
"""Test the Python identifier sanitization method."""

0 commit comments

Comments
 (0)