Skip to content

Commit 8aec25b

Browse files
committed
rm module_tests import
1 parent 00c9370 commit 8aec25b

File tree

1 file changed

+0
-18
lines changed

1 file changed

+0
-18
lines changed

torch2trt/converters/plugin_converters.py

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import torch
22
import torch.nn as nn
33
from torch2trt.torch2trt import *
4-
from torch2trt.module_test import add_module_test
54
import numpy as np
65
import ctypes
76

@@ -53,22 +52,5 @@ def convert_reflection_pad(ctx):
5352
layer = ctx.network.add_plugin_v2([input_trt], plugin)
5453
output._trt = layer.get_output(0)
5554

56-
57-
@add_module_test(torch.float32, torch.device("cuda"), [(1, 1, 3, 3)])
58-
@add_module_test(torch.float32, torch.device("cuda"), [(1, 2, 3, 3)])
59-
def test_reflection_pad_2d_simple():
60-
return nn.ReflectionPad2d(1)
61-
62-
63-
@add_module_test(torch.float32, torch.device("cuda"), [(1, 1, 3, 3)])
64-
@add_module_test(torch.float32, torch.device("cuda"), [(1, 2, 3, 3)])
65-
def test_reflection_pad_2d_simple():
66-
return nn.ReflectionPad2d(2)
67-
68-
69-
@add_module_test(torch.float32, torch.device("cuda"), [(1, 1, 3, 3)])
70-
@add_module_test(torch.float32, torch.device("cuda"), [(1, 2, 3, 3)])
71-
def test_reflection_pad_2d_simple():
72-
return nn.ReflectionPad2d((1, 0, 1, 0))
7355
except:
7456
pass

0 commit comments

Comments
 (0)