Skip to content

Commit f543f75

Browse files
committed
Disable tests that need Nvidia GPU and cuda
To enable tests to run for now, disabling tests that require Nvidia GPU and cuda Signed-off-by: Martin Hickey <[email protected]>
1 parent f04d3af commit f543f75

File tree

4 files changed

+41
-38
lines changed

4 files changed

+41
-38
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,3 +37,6 @@ venv/
3737
# generated by setuptools_scm
3838
/fms_mo/_version.py
3939

40+
#Generated by tests
41+
qcfg.json
42+

tests/models/conftest.py

Lines changed: 37 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -505,52 +505,54 @@ def model_config_fp16():
505505
return deepcopy(ToyModel4().half())
506506

507507

508-
class ToyModelQuantized(torch.nn.Module):
509-
"""
510-
Three layer Linear model that has a quantized layer
508+
# QLinear class requires Nvidia GPU and cuda
509+
if torch.cuda.is_available():
510+
class ToyModelQuantized(torch.nn.Module):
511+
"""
512+
Three layer Linear model that has a quantized layer
511513
512-
Extends:
513-
torch.nn.Module
514-
"""
514+
Extends:
515+
torch.nn.Module
516+
"""
515517

516-
def __init__(self):
517-
super().__init__()
518-
kwargs = {"qcfg": qconfig_init()} # QLinear requires qconfig to work
519-
self.first_layer = torch.nn.Linear(3, 3, bias=True)
520-
self.second_layer = QLinear(3, 3, bias=True, **kwargs)
521-
self.third_layer = torch.nn.Linear(3, 3, bias=True)
518+
def __init__(self):
519+
super().__init__()
520+
kwargs = {"qcfg": qconfig_init()} # QLinear requires qconfig to work
521+
self.first_layer = torch.nn.Linear(3, 3, bias=True)
522+
self.second_layer = QLinear(3, 3, bias=True, **kwargs)
523+
self.third_layer = torch.nn.Linear(3, 3, bias=True)
522524

523-
def forward(self, input_tensor):
524-
"""
525-
Forward func for Toy Model
525+
def forward(self, input_tensor):
526+
"""
527+
Forward func for Toy Model
526528
527-
Args:
528-
input_tensor (torch.FloatTensor): Tensor to operate on
529+
Args:
530+
input_tensor (torch.FloatTensor): Tensor to operate on
529531
530-
Returns:
531-
torch.FloatTensor:
532-
"""
533-
out = self.first_layer(input_tensor)
534-
out = self.second_layer(out)
535-
out = self.third_layer(out)
536-
return out
532+
Returns:
533+
torch.FloatTensor:
534+
"""
535+
out = self.first_layer(input_tensor)
536+
out = self.second_layer(out)
537+
out = self.third_layer(out)
538+
return out
537539

538540

539-
model_quantized_params = [ToyModelQuantized()]
541+
model_quantized_params = [ToyModelQuantized()]
540542

541543

542-
@pytest.fixture(scope="function", params=model_quantized_params)
543-
def model_quantized(request):
544-
"""
545-
Toy Model that has quantized layer
544+
@pytest.fixture(scope="function", params=model_quantized_params)
545+
def model_quantized(request):
546+
"""
547+
Toy Model that has quantized layer
546548
547-
Args:
548-
request (torch.nn.Module): Toy Model
549+
Args:
550+
request (torch.nn.Module): Toy Model
549551
550-
Returns:
551-
torch.nn.Module: Toy Model
552-
"""
553-
return deepcopy(request.param)
552+
Returns:
553+
torch.nn.Module: Toy Model
554+
"""
555+
return deepcopy(request.param)
554556

555557

556558
# Get a model to test layer uniqueness

tests/models/test_qmodelprep.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
################
3333
# Qmodel tests #
3434
################
35+
@pytest.mark.skip(reason="needs Nvidia GPU cuda for testing this")
3536
def test_model_quantized(
3637
model_quantized: torch.nn.Module,
3738
sample_input_fp32: torch.FloatTensor,

tox.ini

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,6 @@ minversion = 4.4
44

55
[testenv]
66
description = run tests (unit)
7-
# Use PyTorch CPU build instead of CUDA build in test envs
8-
setenv =
9-
PIP_EXTRA_INDEX_URL=https://download.pytorch.org/whl/cpu
107
extras =
118
dev
129
package = wheel

0 commit comments

Comments
 (0)