1
1
import unittest
2
2
import time
3
3
import numpy as np
4
- from art .performance_monitor import ResourceMonitor , PerformanceTimer , HAS_TENSORFLOW , HAS_TORCH , HAS_GPUTIL
4
+ from art .performance_monitor import ResourceMonitor , PerformanceTimer , HAS_TENSORFLOW , HAS_TORCH
5
5
6
6
7
7
class TestPerformanceMonitoring (unittest .TestCase ):
@@ -49,13 +49,16 @@ def test_gpu_detection(self):
49
49
"""Test that GPU detection works correctly."""
50
50
monitor = ResourceMonitor ()
51
51
# Check if has_gpu is correctly set based on available libraries
52
- self .assertEqual (monitor .has_gpu , (HAS_GPUTIL or HAS_TENSORFLOW or HAS_TORCH ))
52
+ self .assertEqual (monitor .has_gpu , (HAS_TENSORFLOW or HAS_TORCH ))
53
53
54
54
def test_gpu_data_collection (self ):
55
55
"""Test GPU data is collected when available."""
56
56
monitor = ResourceMonitor ()
57
57
monitor .start ()
58
58
59
+ tf = None
60
+ torch = None
61
+
59
62
# Create a workload that might use GPU if available
60
63
if HAS_TENSORFLOW :
61
64
import tensorflow as tf
@@ -115,6 +118,7 @@ def test_performance_timer_with_gpu(self):
115
118
b = tf .random .normal ([5000 , 5000 ])
116
119
c = tf .matmul (a , b )
117
120
result = c .numpy ()
121
+ self .assertIsNotNone (result ) # not needed, but avoids false warnings
118
122
elif HAS_TORCH :
119
123
import torch
120
124
if torch .cuda .is_available ():
@@ -123,6 +127,7 @@ def test_performance_timer_with_gpu(self):
123
127
b = torch .randn (5000 , 5000 , device = device )
124
128
c = torch .matmul (a , b )
125
129
torch .cuda .synchronize ()
130
+ self .assertIsNotNone (c ) # not needed, but avoids false warnings
126
131
127
132
time .sleep (1 )
128
133
@@ -152,10 +157,6 @@ def test_multiple_gpus(self):
152
157
import torch
153
158
if torch .cuda .is_available ():
154
159
multi_gpu = torch .cuda .device_count () > 1
155
- elif HAS_GPUTIL :
156
- import GPUtil
157
- gpus = GPUtil .getGPUs ()
158
- multi_gpu = len (gpus ) > 1
159
160
160
161
if not multi_gpu :
161
162
self .skipTest ("Multiple GPUs not available" )
@@ -173,6 +174,7 @@ def test_multiple_gpus(self):
173
174
b = tf .random .normal ([3000 , 3000 ])
174
175
c = tf .matmul (a , b )
175
176
result = c .numpy ()
177
+ self .assertIsNotNone (result ) # not needed, but avoids false warnings
176
178
elif HAS_TORCH :
177
179
import torch
178
180
# Use first two GPUs
@@ -182,6 +184,7 @@ def test_multiple_gpus(self):
182
184
b = torch .randn (3000 , 3000 , device = device )
183
185
c = torch .matmul (a , b )
184
186
torch .cuda .synchronize (device )
187
+ self .assertIsNotNone (c ) # not needed, but avoids false warnings
185
188
186
189
time .sleep (2 )
187
190
monitor .stop ()
0 commit comments