Skip to content

Commit 2792a04

Browse files
author
Anurag Dixit
committed
Added test case
Signed-off-by: Anurag Dixit <[email protected]>
1 parent 00cf1d5 commit 2792a04

File tree

2 files changed

+30
-9
lines changed

2 files changed

+30
-9
lines changed

tests/py/multi_gpu_test_case.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import unittest
2+
import trtorch
3+
import torch
4+
import torchvision.models as models
5+
6+
7+
class MultiGpuTestCase(unittest.TestCase):
8+
9+
def __init__(self, methodName='runTest', model=None):
10+
super(MultiGpuTestCase, self).__init__(methodName)
11+
self.model = model
12+
self.model.eval().to("cuda")
13+
14+
@staticmethod
15+
def parametrize(testcase_class, model=None):
16+
testloader = unittest.TestLoader()
17+
testnames = testloader.getTestCaseNames(testcase_class)
18+
suite = unittest.TestSuite()
19+
for name in testnames:
20+
suite.addTest(testcase_class(name, model=model))
21+
return suite

tests/py/test_api_multi_gpu.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3,17 +3,12 @@
33
import torch
44
import torchvision.models as models
55

6-
from model_test_case import ModelTestCase
6+
from multi_gpu_test_case import MultiGpuTestCase
77

8+
gpu_id = 1
89
class TestCompile(MultiGpuTestCase):
910

1011
def setUp(self):
11-
if not torch.cuda.device_count() > 1:
12-
raise ValueError("This test case is applicable for multi-gpu configurations only")
13-
14-
self.gpu_id = 1
15-
# Setting it up here so that all CUDA allocations are done on correct device
16-
trtorch.set_device(self.gpu_id)
1712
self.input = torch.randn((1, 3, 224, 224)).to("cuda")
1813
self.traced_model = torch.jit.trace(self.model, [self.input])
1914
self.scripted_model = torch.jit.script(self.model)
@@ -23,7 +18,7 @@ def test_compile_traced(self):
2318
"input_shapes": [self.input.shape],
2419
"device": {
2520
"device_type": trtorch.DeviceType.GPU,
26-
"gpu_id": self.gpu_id,
21+
"gpu_id": gpu_id,
2722
"dla_core": 0,
2823
"allow_gpu_fallback": False,
2924
"disable_tf32": False
@@ -39,7 +34,7 @@ def test_compile_script(self):
3934
"input_shapes": [self.input.shape],
4035
"device": {
4136
"device_type": trtorch.DeviceType.GPU,
42-
"gpu_id": self.gpu_id,
37+
"gpu_id": gpu_id,
4338
"dla_core": 0,
4439
"allow_gpu_fallback": False,
4540
"disable_tf32": False
@@ -58,6 +53,11 @@ def test_suite():
5853

5954
return suite
6055

56+
if not torch.cuda.device_count() > 1:
57+
raise ValueError("This test case is applicable for multi-gpu configurations only")
58+
59+
# Setting it up here so that all CUDA allocations are done on correct device
60+
trtorch.set_device(gpu_id)
6161
suite = test_suite()
6262

6363
runner = unittest.TextTestRunner()

0 commit comments

Comments
 (0)