@@ -13,13 +13,13 @@ nvinfer1::ILayer* add_elementwise(ConversionCtx* ctx, nvinfer1::ElementWiseOpera
13
13
auto other_dims = other->getDimensions ();
14
14
15
15
TRTORCH_CHECK (util::volume (self_dims) == util::volume (other_dims), " Found inputs to elementwise operation do not have the same number of elements:\n Found: self " << self_dims << " other " << other_dims);
16
-
16
+
17
17
nvinfer1::ILayer* ele;
18
18
if (scalar != 1 ) {
19
19
LOG_WARNING (" Please verify scalar handling in add converter, channel axis set to 3 but scaling is uniform" );
20
20
21
21
auto shape = util::toVec (other_dims);
22
-
22
+
23
23
if (shape.size () < 4 ) {
24
24
auto new_shape = util::toDimsPad (shape, 4 );
25
25
LOG_DEBUG (" Input shape is less than 4D got: " << util::toDims (shape) << " , inserting shuffle layers to reshape to 4D tensor shape: " << new_shape);
@@ -33,7 +33,7 @@ nvinfer1::ILayer* add_elementwise(ConversionCtx* ctx, nvinfer1::ElementWiseOpera
33
33
self_shuffle->setName (std::string (" [Reshape self to " + util::toStr (new_shape) + ' ]' ).c_str ());
34
34
self = self_shuffle->getOutput (0 );
35
35
}
36
-
36
+
37
37
auto scale = Weights (ctx, scalar);
38
38
auto scaled = ctx->net ->addScaleNd (*other, nvinfer1::ScaleMode::kUNIFORM , {}, scale.data , {}, 0 );
39
39
auto scaled_other = scaled->getOutput (0 );
@@ -45,48 +45,49 @@ nvinfer1::ILayer* add_elementwise(ConversionCtx* ctx, nvinfer1::ElementWiseOpera
45
45
// shuffle->setName(std::string("[Reshape other to " + util::toStr(util::toDims(shape)) + ']').c_str());
46
46
// scaled_other = shuffle->getOutput(0);
47
47
// }
48
-
48
+
49
49
ele = ctx->net ->addElementWise (*self, *scaled_other, op);
50
50
} else {
51
51
ele = ctx->net ->addElementWise (*self, *other, op);
52
52
}
53
+
53
54
return ele;
54
-
55
+
55
56
}
56
57
57
58
auto element_wise_registrations = RegisterNodeConversionPatterns()
58
59
.pattern({
59
60
" aten::add.Tensor(Tensor self, Tensor other, Scalar alpha=1) -> Tensor" ,
60
61
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
61
- // Should implement self + alpha * other
62
+ // Should implement self + alpha * other
62
63
auto self = args[0 ].ITensor ();
63
64
auto other = args[1 ].ITensor ();
64
65
auto scalar = args[2 ].unwrapToScalar ().to <float >();
65
66
auto add = add_elementwise (ctx, nvinfer1::ElementWiseOperation::kSUM , self, other, scalar);
67
+
68
+ TRTORCH_CHECK (add, " Unable to create add layer from node: " << *n);
69
+
66
70
add->setName (util::node_info (n).c_str ());
67
- auto out_value = n->outputs ()[0 ];
68
- auto out_tensor = add->getOutput (0 );
69
- out_tensor->setName (out_value->debugName ().c_str ());
70
- ctx->value_tensor_map [out_value] = out_tensor;
71
- LOG_DEBUG (" Output tensor shape: " << out_tensor->getDimensions ());
72
-
71
+ auto out = associate_value_and_tensor (ctx, n->outputs ()[0 ], add->getOutput (0 ));
72
+
73
+ LOG_DEBUG (" Output tensor shape: " << out->getDimensions ());
73
74
return true ;
74
75
}
75
76
}).pattern({
76
77
" aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> (Tensor(a!))" ,
77
78
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
78
- // Should implement self + alpha * other
79
+ // Should implement self + alpha * other
79
80
auto self = args[0 ].ITensor ();
80
81
auto other = args[1 ].ITensor ();
81
82
auto scalar = args[2 ].unwrapToScalar ().to <float >();
82
83
auto add = add_elementwise (ctx, nvinfer1::ElementWiseOperation::kSUM , self, other, scalar);
84
+
85
+ TRTORCH_CHECK (add, " Unable to create add layer from node: " << *n);
86
+
83
87
add->setName (util::node_info (n).c_str ());
84
- auto out_value = n->outputs ()[0 ];
85
- auto out_tensor = add->getOutput (0 );
86
- out_tensor->setName (out_value->debugName ().c_str ());
87
- ctx->value_tensor_map [out_value] = out_tensor;
88
- LOG_DEBUG (" Output tensor shape: " << out_tensor->getDimensions ());
89
-
88
+ auto out = associate_value_and_tensor (ctx, n->outputs ()[0 ], add->getOutput (0 ));
89
+
90
+ LOG_DEBUG (" Output tensor shape: " << out->getDimensions ());
90
91
return true ;
91
92
}
92
93
}).pattern({
@@ -97,53 +98,53 @@ auto element_wise_registrations = RegisterNodeConversionPatterns()
97
98
auto other = args[1 ].ITensor ();
98
99
auto scalar = args[2 ].unwrapToScalar ().to <float >();
99
100
auto sub = add_elementwise (ctx, nvinfer1::ElementWiseOperation::kSUB , self, other, scalar);
101
+
102
+ TRTORCH_CHECK (sub, " Unable to create sub layer from node: " << *n);
103
+
100
104
sub->setName (util::node_info (n).c_str ());
101
- auto out_value = n->outputs ()[0 ];
102
- auto out_tensor = sub->getOutput (0 );
103
- out_tensor->setName (out_value->debugName ().c_str ());
104
- ctx->value_tensor_map [out_value] = out_tensor;
105
- LOG_DEBUG (" Output tensor shape: " << out_tensor->getDimensions ());
106
-
105
+ auto out = associate_value_and_tensor (ctx, n->outputs ()[0 ], sub->getOutput (0 ));
106
+
107
+ LOG_DEBUG (" Output tensor shape: " << out->getDimensions ());
107
108
return true ;
108
109
}
109
110
}).pattern({
110
- " aten::div(Tensor self, Tensor other) -> Tensor" ,
111
- [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
112
- // Should implement self / other
113
- auto self = args[0 ].ITensor ();
114
- auto other = args[1 ].ITensor ();
115
- auto div = add_elementwise (ctx, nvinfer1::ElementWiseOperation::kDIV , self, other);
116
- div-> setName ( util::node_info (n). c_str ());
117
- auto out_value = n-> outputs ()[ 0 ] ;
118
- auto out_tensor = div-> getOutput ( 0 );
119
- out_tensor ->setName (out_value-> debugName ( ).c_str ());
120
- ctx-> value_tensor_map [out_value] = out_tensor ;
121
- LOG_DEBUG ( " Output tensor shape: " << out_tensor-> getDimensions ());
122
-
111
+ " aten::div(Tensor self, Tensor other) -> Tensor" ,
112
+ [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
113
+ // Should implement self / other
114
+ auto self = args[0 ].ITensor ();
115
+ auto other = args[1 ].ITensor ();
116
+ auto div = add_elementwise (ctx, nvinfer1::ElementWiseOperation::kDIV , self, other);
117
+
118
+ TRTORCH_CHECK (div, " Unable to create div layer from node: " << *n) ;
119
+
120
+ div ->setName (util::node_info (n ).c_str ());
121
+ auto out = associate_value_and_tensor ( ctx, n-> outputs ()[ 0 ], div-> getOutput ( 0 )) ;
122
+
123
+ LOG_DEBUG ( " Output tensor shape: " << out-> getDimensions ());
123
124
return true ;
124
125
}
125
126
}).pattern({
126
- " aten::mul(Tensor self, Tensor other) -> Tensor" ,
127
- [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
128
- // Should implement self * other
129
- auto self = args[0 ].ITensor ();
130
- auto other = args[1 ].ITensor ();
131
- auto mul = add_elementwise (ctx, nvinfer1::ElementWiseOperation::kPROD , self, other);
132
- mul-> setName ( util::node_info (n). c_str ());
133
- auto out_value = n-> outputs ()[ 0 ] ;
134
- auto out_tensor = mul-> getOutput ( 0 );
135
- out_tensor ->setName (out_value-> debugName ( ).c_str ());
136
- ctx-> value_tensor_map [out_value] = out_tensor ;
137
- LOG_DEBUG ( " Output tensor shape: " << out_tensor-> getDimensions ());
138
-
127
+ " aten::mul(Tensor self, Tensor other) -> Tensor" ,
128
+ [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
129
+ // Should implement self * other
130
+ auto self = args[0 ].ITensor ();
131
+ auto other = args[1 ].ITensor ();
132
+ auto mul = add_elementwise (ctx, nvinfer1::ElementWiseOperation::kPROD , self, other);
133
+
134
+ TRTORCH_CHECK (mul, " Unable to create mul layer from node: " << *n) ;
135
+
136
+ mul ->setName (util::node_info (n ).c_str ());
137
+ auto out = associate_value_and_tensor ( ctx, n-> outputs ()[ 0 ], mul-> getOutput ( 0 )) ;
138
+
139
+ LOG_DEBUG ( " Output tensor shape: " << out-> getDimensions ());
139
140
return true ;
140
141
}
141
142
});
142
-
143
-
143
+
144
+
144
145
} // namespace
145
146
} // namespace impl
146
147
} // namespace converters
147
148
} // namespace conversion
148
149
} // namespace core
149
- } // trtorch
150
+ } // trtorch
0 commit comments