Skip to content

Commit 96e543e

Browse files
committed
Various fixes proposed by CodeCov
Restored utils.py, removed GPUtil from performance_monitor.py, initialized variables in tests Signed-off-by: Álvaro Bacca Peña <[email protected]>
1 parent c7fbc28 commit 96e543e

File tree

3 files changed

+10
-20
lines changed

3 files changed

+10
-20
lines changed

art/performance_monitor.py

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,6 @@
1212
import psutil
1313
from matplotlib import pyplot as plt
1414

15-
# GPU monitoring support
16-
try:
17-
import gputil
18-
19-
HAS_GPUTIL = True
20-
except ImportError:
21-
HAS_GPUTIL = False
22-
2315
try:
2416
import tensorflow as tf
2517

@@ -43,11 +35,9 @@
4335
except ImportError as e:
4436
HAS_NVML = False
4537
GPU_COUNT = 0
46-
print(f"Warning: pynvml not installed. GPU monitoring will be disabled. Error: {e}")
4738
except Exception as e:
4839
HAS_NVML = False
4940
GPU_COUNT = 0
50-
print(f"Warning: Error initializing NVML. GPU monitoring might be unavailable. Error: {e}")
5141

5242

5343
class ResourceMonitor:

art/utils.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,25 +21,22 @@
2121

2222
from __future__ import absolute_import, division, print_function, unicode_literals, annotations
2323

24+
from collections.abc import Callable
2425
import logging
2526
import math
2627
import os
27-
import re
2828
import shutil
2929
import sys
3030
import tarfile
3131
import warnings
3232
import zipfile
33-
from collections.abc import Callable
3433
from functools import wraps
3534
from inspect import signature
3635
from typing import TYPE_CHECKING, Optional, Tuple, Union
3736

3837
import numpy as np
39-
import pandas as pd
4038
import six
4139
from scipy.special import gammainc
42-
from sklearn.model_selection import train_test_split
4340
from tqdm.auto import tqdm
4441

4542
from art import config

tests/test_performance_monitor.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import unittest
22
import time
33
import numpy as np
4-
from art.performance_monitor import ResourceMonitor, PerformanceTimer, HAS_TENSORFLOW, HAS_TORCH, HAS_GPUTIL
4+
from art.performance_monitor import ResourceMonitor, PerformanceTimer, HAS_TENSORFLOW, HAS_TORCH
55

66

77
class TestPerformanceMonitoring(unittest.TestCase):
@@ -49,13 +49,16 @@ def test_gpu_detection(self):
4949
"""Test that GPU detection works correctly."""
5050
monitor = ResourceMonitor()
5151
# Check if has_gpu is correctly set based on available libraries
52-
self.assertEqual(monitor.has_gpu, (HAS_GPUTIL or HAS_TENSORFLOW or HAS_TORCH))
52+
self.assertEqual(monitor.has_gpu, (HAS_TENSORFLOW or HAS_TORCH))
5353

5454
def test_gpu_data_collection(self):
5555
"""Test GPU data is collected when available."""
5656
monitor = ResourceMonitor()
5757
monitor.start()
5858

59+
tf = None
60+
torch = None
61+
5962
# Create a workload that might use GPU if available
6063
if HAS_TENSORFLOW:
6164
import tensorflow as tf
@@ -115,6 +118,7 @@ def test_performance_timer_with_gpu(self):
115118
b = tf.random.normal([5000, 5000])
116119
c = tf.matmul(a, b)
117120
result = c.numpy()
121+
self.assertIsNotNone(result) # not needed, but avoids false warnings
118122
elif HAS_TORCH:
119123
import torch
120124
if torch.cuda.is_available():
@@ -123,6 +127,7 @@ def test_performance_timer_with_gpu(self):
123127
b = torch.randn(5000, 5000, device=device)
124128
c = torch.matmul(a, b)
125129
torch.cuda.synchronize()
130+
self.assertIsNotNone(c) # not needed, but avoids false warnings
126131

127132
time.sleep(1)
128133

@@ -152,10 +157,6 @@ def test_multiple_gpus(self):
152157
import torch
153158
if torch.cuda.is_available():
154159
multi_gpu = torch.cuda.device_count() > 1
155-
elif HAS_GPUTIL:
156-
import GPUtil
157-
gpus = GPUtil.getGPUs()
158-
multi_gpu = len(gpus) > 1
159160

160161
if not multi_gpu:
161162
self.skipTest("Multiple GPUs not available")
@@ -173,6 +174,7 @@ def test_multiple_gpus(self):
173174
b = tf.random.normal([3000, 3000])
174175
c = tf.matmul(a, b)
175176
result = c.numpy()
177+
self.assertIsNotNone(result) # not needed, but avoids false warnings
176178
elif HAS_TORCH:
177179
import torch
178180
# Use first two GPUs
@@ -182,6 +184,7 @@ def test_multiple_gpus(self):
182184
b = torch.randn(3000, 3000, device=device)
183185
c = torch.matmul(a, b)
184186
torch.cuda.synchronize(device)
187+
self.assertIsNotNone(c) # not needed, but avoids false warnings
185188

186189
time.sleep(2)
187190
monitor.stop()

0 commit comments

Comments
 (0)