Skip to content

Commit b3bcce4

Browse files
authored
Merge pull request #73 from AllenNeuralDynamics/add-camera-pixel-saturation
Add QC visualizer for image saturation
2 parents 53ff9d0 + 9ae3f59 commit b3bcce4

File tree

1 file changed

+62
-28
lines changed

1 file changed

+62
-28
lines changed

src/contraqctor/qc/camera.py

Lines changed: 62 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ def __init__(
6060
clock_jitter_s: float = 1e-4,
6161
start_time_s: t.Optional[float] = None,
6262
stop_time_s: t.Optional[float] = None,
63+
saturation_bounds: tuple[t.Optional[int], t.Optional[int]] = (5, 250),
6364
):
6465
"""Initialize the camera test suite.
6566
@@ -69,12 +70,14 @@ def __init__(
6970
clock_jitter_s: Maximum allowed time difference between frame timestamps, in seconds.
7071
start_time_s: Optional expected start time for validation, in seconds.
7172
stop_time_s: Optional expected stop time for validation, in seconds.
73+
saturation_bounds: Pixel intensity bounds to check for saturation (min, max).
7274
"""
7375
self.data_stream: Camera = data_stream
7476
self.expected_fps = expected_fps
7577
self.clock_jitter_s = clock_jitter_s
7678
self.start_time_s = start_time_s
7779
self.stop_time_s = stop_time_s
80+
self.saturation_bounds = saturation_bounds
7881

7982
def test_metadata_shape(self):
8083
"""
@@ -201,34 +204,65 @@ def test_histogram_and_create_asset(self):
201204
return self.fail_test(None, "Failed to read a frame from the video")
202205
max_d = 2 ** (frame.dtype.itemsize * 8)
203206

204-
if frame.shape[2] == 1:
205-
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
206-
elif frame.shape[2] == 3:
207-
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
208-
else:
209-
return self.fail_test(None, f"Frame has unexpected number of channels({frame.shape[2]}).")
210-
211-
hist_r = cv2.calcHist([frame], [0], None, [max_d], [0, max_d])
212-
hist_g = cv2.calcHist([frame], [1], None, [max_d], [0, max_d])
213-
hist_b = cv2.calcHist([frame], [2], None, [max_d], [0, max_d])
214-
215-
hist_r /= hist_r.sum()
216-
hist_g /= hist_g.sum()
217-
hist_b /= hist_b.sum()
218-
219-
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
220-
221-
ax[0].imshow(frame)
222-
ax[0].axis("off")
223-
ax[0].set_title("Frame from video")
224-
ax[1].plot(hist_r, color="red", label="Red")
225-
ax[1].plot(hist_g, color="green", label="Green")
226-
ax[1].plot(hist_b, color="blue", label="Blue")
227-
ax[1].set_xlim([0, max_d])
228-
ax[1].set_xlabel("Pixel Value")
229-
ax[1].set_ylabel("Normalized Frequency")
230-
ax[1].set_title("Color Histogram")
231-
ax[1].legend()
207+
fig, ax = plt.subplots(2, frame.shape[2], figsize=(15, 7))
208+
209+
for channel in range(frame.shape[2]):
210+
hist = cv2.calcHist([frame], [channel], None, [max_d], [0, max_d])
211+
hist /= hist.sum()
212+
ax[0, channel].imshow(frame[:, :, channel], cmap="gray")
213+
ax[0, channel].axis("off")
214+
ax[1, channel].plot(hist, color="k", label=f"Channel-{channel}")
215+
ax[1, channel].set_xlim([0, max_d])
216+
ax[1, channel].set_xlabel("Pixel Value")
217+
ax[1, channel].set_ylabel("Normalized Frequency")
218+
ax[1, channel].set_title(f"Histogram channel-{channel}")
219+
fig.subplots_adjust(top=0.9) # Leave space for suptitle
220+
fig.suptitle("Pixel value histogram")
221+
fig.tight_layout()
222+
223+
return self.pass_test(
224+
None, "Histogram and asset created successfully.", context=ContextExportableObj.as_context(fig)
225+
)
226+
227+
def test_create_pixel_saturation_visualizer(self):
228+
"""Creates a visualization highlighting saturated and underexposed pixels in the video frame."""
229+
data = self.data_stream.data
230+
if not data.has_video:
231+
return self.skip_test("No video data available. Skipping test.")
232+
233+
with data.as_video_capture() as video:
234+
video.set(cv2.CAP_PROP_POS_FRAMES, video.get(cv2.CAP_PROP_FRAME_COUNT) // 2)
235+
ret, frame = video.read()
236+
237+
if not ret:
238+
return self.fail_test(None, "Failed to read a frame from the video")
239+
240+
lower_bound, upper_bound = self.saturation_bounds
241+
242+
fig, ax = plt.subplots(1, frame.shape[2], figsize=(15, 5))
243+
244+
for channel in range(frame.shape[2]):
245+
channel_data = frame[:, :, channel]
246+
247+
channel_saturated = np.zeros(frame.shape[:2], dtype=bool)
248+
channel_underexposed = np.zeros(frame.shape[:2], dtype=bool)
249+
250+
if upper_bound is not None:
251+
channel_saturated = channel_data >= upper_bound
252+
if lower_bound is not None:
253+
channel_underexposed = channel_data <= lower_bound
254+
255+
# Create RGB image: grayscale with saturated pixels in red and underexposed in blue
256+
colored_frame = np.stack([channel_data, channel_data, channel_data], axis=-1)
257+
colored_frame[channel_saturated] = [255, 0, 0] # Red for saturated
258+
colored_frame[channel_underexposed] = [0, 0, 255] # Blue for underexposed
259+
260+
ax[channel].imshow(colored_frame)
261+
ax[channel].axis("off")
262+
ax[channel].set_title(f"Channel-{channel}")
263+
264+
fig.subplots_adjust(top=0.9) # Leave space for suptitle
265+
fig.suptitle("Pixel Saturation Visualization (bounds: {})".format(self.saturation_bounds))
232266
fig.tight_layout()
233267

234268
return self.pass_test(

0 commit comments

Comments
 (0)