@@ -106,22 +106,31 @@ converter and use it in our application:
106
106
import torch
107
107
import trtorch
108
108
109
+ # After "python3 setup install", you should find this .so file under generated "build" directory
109
110
torch.ops.load_library(' ./build/lib.linux-x86_64-3.6/elu_converter.cpython-36m-x86_64-linux-gnu.so' )
110
111
112
+
111
113
class Elu (torch .nn .Module ):
114
+
112
115
def __init__ (self ):
113
116
super (Elu, self ).__init__ ()
114
117
self .elu = torch.nn.ELU()
115
118
116
119
def forward (self , x ):
117
120
return self .elu(x)
118
121
122
+
123
+ def MaxDiff (pytorch_out , trtorch_out ):
124
+ diff = torch.sub(pytorch_out, trtorch_out)
125
+ abs_diff = torch.abs(diff)
126
+ max_diff = torch.max(abs_diff)
127
+ print (" Maximum differnce between TRTorch and PyTorch: \n " , max_diff)
128
+
129
+
119
130
def main ():
120
- data = torch.randn((1 , 1 , 2 , 2 )).to(" cuda" )
121
131
model = Elu().eval() # .cuda()
122
132
123
133
scripted_model = torch.jit.script(model)
124
- print (scripted_model.graph)
125
134
compile_settings = {
126
135
" input_shapes" : [{
127
136
" min" : [1024 , 1 , 32 , 32 ],
@@ -133,10 +142,14 @@ def main():
133
142
}
134
143
trt_ts_module = trtorch.compile(scripted_model, compile_settings)
135
144
input_data = torch.randn((1024 , 1 , 32 , 32 ))
136
- print (input_data[0 , :, :, 0 ])
137
145
input_data = input_data.half().to(" cuda" )
138
- result = trt_ts_module(input_data)
139
- print (result[0 , :, :, 0 ])
146
+ pytorch_out = model.forward(input_data)
147
+
148
+ trtorch_out = trt_ts_module(input_data)
149
+ print (' PyTorch output: \n ' , pytorch_out[0 , :, :, 0 ])
150
+ print (' TRTorch output: \n ' , trtorch_out[0 , :, :, 0 ])
151
+ MaxDiff(pytorch_out, trtorch_out)
152
+
140
153
141
154
if __name__ == " __main__" :
142
155
main()
0 commit comments