Skip to content

Commit 46f921c

Browse files
committed
Refactor code formatting and improve readability across multiple operators
- Consolidated and simplified code formatting in various operator files, including ImageOverlayWriter, JSONResultsWriter, and Llama3VILAInferenceOperator. - Removed unnecessary line breaks and improved the consistency of function definitions and logging statements. - Enhanced overall code clarity by ensuring uniform indentation and spacing. Signed-off-by: Victor Chang <[email protected]>
1 parent 83671d3 commit 46f921c

25 files changed

+216
-663
lines changed

monai/deploy/operators/image_overlay_writer_operator.py

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -78,9 +78,7 @@ def _to_hwc_uint8(self, image) -> np.ndarray:
7878
else:
7979
arr = np.asarray(image)
8080
if arr.ndim != 3 or arr.shape[2] not in (3, 4):
81-
raise ValueError(
82-
f"Expected HWC image with 3 or 4 channels, got shape {arr.shape}"
83-
)
81+
raise ValueError(f"Expected HWC image with 3 or 4 channels, got shape {arr.shape}")
8482
# Drop alpha if present
8583
if arr.shape[2] == 4:
8684
arr = arr[..., :3]
@@ -105,17 +103,14 @@ def _to_mask_uint8(self, pred) -> np.ndarray:
105103
return arr
106104

107105
@staticmethod
108-
def _blend_overlay(
109-
img: np.ndarray, mask_u8: np.ndarray, alpha: float, color: Tuple[int, int, int]
110-
) -> np.ndarray:
106+
def _blend_overlay(img: np.ndarray, mask_u8: np.ndarray, alpha: float, color: Tuple[int, int, int]) -> np.ndarray:
111107
# img: HWC uint8, mask_u8: HW uint8
112108
mask = (mask_u8 > 0).astype(np.float32)[..., None]
113109
color_img = np.zeros_like(img, dtype=np.uint8)
114110
color_img[..., 0] = color[0]
115111
color_img[..., 1] = color[1]
116112
color_img[..., 2] = color[2]
117113
blended = (
118-
img.astype(np.float32) * (1.0 - alpha * mask)
119-
+ color_img.astype(np.float32) * (alpha * mask)
114+
img.astype(np.float32) * (1.0 - alpha * mask) + color_img.astype(np.float32) * (alpha * mask)
120115
).astype(np.uint8)
121116
return blended

monai/deploy/operators/json_results_writer_operator.py

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -128,15 +128,11 @@ def _process_prediction(self, pred: Any, filename: str) -> Dict[str, Any]:
128128
}
129129
else:
130130
# Generic classification
131-
result["probabilities"] = {
132-
f"class_{i}": float(pred_data[i]) for i in range(len(pred_data))
133-
}
131+
result["probabilities"] = {f"class_{i}": float(pred_data[i]) for i in range(len(pred_data))}
134132

135133
# Add predicted class
136134
max_idx = int(np.argmax(pred_data))
137-
result["predicted_class"] = list(result["probabilities"].keys())[
138-
max_idx
139-
]
135+
result["predicted_class"] = list(result["probabilities"].keys())[max_idx]
140136
result["confidence"] = float(pred_data[max_idx])
141137

142138
elif pred_data.ndim == 2: # 2D array (batch of predictions)
@@ -172,9 +168,7 @@ def _print_classification_summary(self, result: Dict[str, Any]):
172168
for class_name, prob in probs.items():
173169
print(f" {class_name}: {prob:.4f}")
174170
if "predicted_class" in result:
175-
print(
176-
f" Predicted: {result['predicted_class']} (confidence: {result['confidence']:.4f})"
177-
)
171+
print(f" Predicted: {result['predicted_class']} (confidence: {result['confidence']:.4f})")
178172

179173

180174
def test():

monai/deploy/operators/llama3_vila_inference_operator.py

Lines changed: 8 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -116,9 +116,7 @@ def _load_model(self):
116116
config = AutoConfig.from_pretrained(self.model_path)
117117

118118
# Load tokenizer
119-
self.tokenizer = AutoTokenizer.from_pretrained(
120-
self.model_path / "llm", use_fast=False
121-
)
119+
self.tokenizer = AutoTokenizer.from_pretrained(self.model_path / "llm", use_fast=False)
122120

123121
# For LLaVA-style models, we typically need to handle image processing
124122
# and model loading in a specific way. For now, we'll create a simplified
@@ -156,9 +154,7 @@ def _preprocess_image(self, image: Image) -> torch.Tensor:
156154
# For now, we'll just convert to tensor
157155
return torch.from_numpy(image_array).float()
158156

159-
def _generate_response(
160-
self, image_tensor: torch.Tensor, prompt: str, generation_params: Dict[str, Any]
161-
) -> str:
157+
def _generate_response(self, image_tensor: torch.Tensor, prompt: str, generation_params: Dict[str, Any]) -> str:
162158
"""Generate text response from the model."""
163159
if self._mock_mode:
164160
# Mock response based on common medical VQA patterns
@@ -276,44 +272,30 @@ def compute(self, op_input, op_output, context):
276272
request_id = op_input.receive("request_id")
277273
generation_params = op_input.receive("generation_params")
278274

279-
self._logger.info(
280-
f"Processing request {request_id} with output type '{output_type}'"
281-
)
275+
self._logger.info(f"Processing request {request_id} with output type '{output_type}'")
282276

283277
try:
284278
# Preprocess image
285279
image_tensor = self._preprocess_image(image)
286280

287281
# Generate text response
288-
text_response = self._generate_response(
289-
image_tensor, prompt, generation_params
290-
)
282+
text_response = self._generate_response(image_tensor, prompt, generation_params)
291283

292284
# Get image metadata if available
293-
image_metadata = (
294-
image.metadata()
295-
if hasattr(image, "metadata") and callable(image.metadata)
296-
else None
297-
)
285+
image_metadata = image.metadata() if hasattr(image, "metadata") and callable(image.metadata) else None
298286

299287
# Create result based on output type
300288
if output_type == "json":
301-
result = self._create_json_result(
302-
text_response, request_id, prompt, image_metadata
303-
)
289+
result = self._create_json_result(text_response, request_id, prompt, image_metadata)
304290
elif output_type == "image":
305291
# For now, just return the original image
306292
# In future, this could generate new images
307293
result = image
308294
elif output_type == "image_overlay":
309295
result = self._create_image_overlay(image, text_response)
310296
else:
311-
self._logger.warning(
312-
f"Unknown output type: {output_type}, defaulting to json"
313-
)
314-
result = self._create_json_result(
315-
text_response, request_id, prompt, image_metadata
316-
)
297+
self._logger.warning(f"Unknown output type: {output_type}, defaulting to json")
298+
result = self._create_json_result(text_response, request_id, prompt, image_metadata)
317299

318300
# Emit outputs
319301
op_output.emit(result, "result")

0 commit comments

Comments
 (0)