Skip to content

Commit 9983fe0

Browse files
committed
Improved PerformanceMonitor tests
Signed-off-by: Álvaro Bacca Peña <[email protected]>
1 parent 796ec10 commit 9983fe0

File tree

2 files changed

+38
-10
lines changed

2 files changed

+38
-10
lines changed

art/performance_monitor.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -189,10 +189,7 @@ def plot_results(self, title: str | None = None) -> Any | None:
189189
fig.suptitle(title or "Resource Usage During Execution", fontsize=16)
190190

191191
# Flatten axes array for easier indexing
192-
if n_plots > 2:
193-
axes = axes.flatten()
194-
else:
195-
axes = [axes]
192+
axes = axes.flatten()
196193

197194
# CPU usage plot
198195
axes[0].plot(data["time"], data["cpu_percent"], "b-")
@@ -208,7 +205,8 @@ def plot_results(self, title: str | None = None) -> Any | None:
208205
axes[1].set_ylabel("Memory (MB)")
209206
axes[1].grid(True)
210207

211-
if self.has_gpu and len(axes) > 2:
208+
# the axes length check is redundant here; has_gpu already validates this
209+
if self.has_gpu:
212210
# GPU usage plot
213211
axes[2].plot(data["time"], data["gpu_percent"], "g-")
214212
axes[2].set_title("GPU Usage (%)")

tests/test_performance_monitor.py

Lines changed: 35 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
import unittest
22
import time
33
import numpy as np
4+
import pytest
5+
6+
from art import performance_monitor
47
from art.performance_monitor import ResourceMonitor, PerformanceTimer, HAS_TENSORFLOW, HAS_TORCH
58

69

@@ -42,12 +45,39 @@ def test_performance_timer(self):
4245
self.assertGreater(len(data["cpu_percent"]), 0)
4346

4447

48+
@pytest.mark.parametrize(
49+
"has_nvml, gpu_count, expected_has_gpu",
50+
[
51+
# Scenario 1: No NVML, regardless of GPU count -> No GPU detected
52+
(False, 0, False),
53+
(False, 1, False),
54+
(False, 2, False),
55+
56+
# Scenario 2: NVML available, but no GPUs -> No GPU detected
57+
(True, 0, False),
58+
59+
# Scenario 3: NVML available and GPUs present -> GPU detected
60+
(True, 1, True),
61+
(True, 2, True),
62+
]
63+
)
64+
def test_gpu_detection(monkeypatch, has_nvml: bool, gpu_count: int, expected_has_gpu: bool):
65+
"""
66+
Test that GPU detection works correctly based on HAS_NVML and GPU_COUNT.
67+
68+
This test uses parametrization to cover various combinations of NVML
69+
availability and detected GPU count.
70+
"""
71+
# Initialize the ResourceMonitor with the current parameters
72+
monkeypatch.setattr(performance_monitor, 'HAS_NVML', has_nvml)
73+
monkeypatch.setattr(performance_monitor, 'GPU_COUNT', gpu_count)
74+
monitor = ResourceMonitor()
75+
76+
# Assert that the monitor's detected GPU status matches the expected value
77+
assert monitor.has_gpu == expected_has_gpu
78+
79+
4580
class TestGPUMonitoring(unittest.TestCase):
46-
def test_gpu_detection(self):
47-
"""Test that GPU detection works correctly."""
48-
monitor = ResourceMonitor()
49-
# Check if has_gpu is correctly set based on available libraries
50-
self.assertEqual(monitor.has_gpu, (HAS_TENSORFLOW or HAS_TORCH))
5181

5282
def test_gpu_data_collection(self):
5383
"""Test GPU data is collected when available."""

0 commit comments

Comments
 (0)