Skip to content

Commit 1d9200d

Browse files
committed
Merge branch 'main' into aten_size_fix
2 parents 10aaaf4 + 3467511 commit 1d9200d

File tree

161 files changed

+2024
-562
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

161 files changed

+2024
-562
lines changed

.circleci/config.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,7 @@ commands:
263263
parameters:
264264
torch-build:
265265
type: string
266-
default: "2.0.0.dev20230120+cu117"
266+
default: "2.0.0.dev20230129+cu117"
267267
torch-build-index:
268268
type: string
269269
default: "https://download.pytorch.org/whl/nightly/cu117"
@@ -1026,7 +1026,7 @@ parameters:
10261026
# Nightly platform config
10271027
torch-build:
10281028
type: string
1029-
default: "2.0.0.dev20230120+cu117"
1029+
default: "2.0.0.dev20230129+cu117"
10301030
torch-build-index:
10311031
type: string
10321032
default: "https://download.pytorch.org/whl/nightly/cu117"

.github/code-owners.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,5 +109,4 @@
109109
"component: fx":
110110
- "frank-wei"
111111
- "yinghai"
112-
- "842974287"
113112
- "wushirong"

core/conversion/converters/converter_util.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -85,10 +85,10 @@ nvinfer1::ILayer* add_elementwise(
8585
const std::string& name) {
8686
if (self->getType() == nvinfer1::DataType::kFLOAT && other->getType() == nvinfer1::DataType::kINT32) {
8787
LOG_DEBUG("Type mismatch, casting other to " << self->getType());
88-
other = castITensor(ctx, other, self->getType());
88+
other = castITensor(ctx, other, self->getType(), name);
8989
} else if (self->getType() == nvinfer1::DataType::kINT32 && other->getType() == nvinfer1::DataType::kFLOAT) {
9090
LOG_DEBUG("Type mismatch, casting self to " << other->getType());
91-
self = castITensor(ctx, self, other->getType());
91+
self = castITensor(ctx, self, other->getType(), name);
9292
}
9393
// ensure self to have larger number of dimension
9494
bool swapSelfOther = false;
@@ -106,13 +106,13 @@ nvinfer1::ILayer* add_elementwise(
106106
LOG_DEBUG(
107107
"Element-wise op type promotion adding cast from " << self->getType() << " to " << promo_type << " for layer "
108108
<< name);
109-
self = castITensor(ctx, self, promo_type);
109+
self = castITensor(ctx, self, promo_type, name);
110110
}
111111
if (other->getType() != promo_type) {
112112
LOG_DEBUG(
113113
"Element-wise op type promotion adding cast from " << other->getType() << " to " << promo_type
114114
<< " for layer " << name);
115-
other = castITensor(ctx, other, promo_type);
115+
other = castITensor(ctx, other, promo_type, name);
116116
}
117117
}
118118

core/conversion/converters/impl/conv_deconv.cpp

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,12 @@ namespace {
1313
bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args) {
1414
// Input to conv/deconv
1515
auto in = args[0].ITensor();
16-
16+
if (in->getType() == nvinfer1::DataType::kINT32) {
17+
LOG_WARNING(
18+
"Found type " << in->getType() << "in aten::convolution, casting to" << nvinfer1::DataType::kFLOAT
19+
<< " for compatibility.");
20+
in = castITensor(ctx, in, nvinfer1::DataType::kFLOAT);
21+
}
1722
// Conv /deconv parameters
1823
auto stride = util::toDims(args[3].unwrapToIntList());
1924
auto padding = util::toDims(args[4].unwrapToIntList());

core/conversion/converters/impl/select.cpp

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -149,8 +149,26 @@ auto select_registrations TORCHTRT_UNUSED =
149149
// IShuffleLayer removes redundant dimensions
150150
auto shuffle_layer = ctx->net->addShuffle(*out);
151151
TORCHTRT_CHECK(shuffle_layer, "Unable to create shuffle layer from node: " << *n);
152-
shuffle_layer->setReshapeDimensions(
153-
util::squeezeDims(out->getDimensions(), dim, !ctx->input_is_dynamic));
152+
153+
auto num_zero_dimensions =
154+
util::validateInputDimsForShuffle(out->getDimensions(), ctx->input_is_dynamic);
155+
TORCHTRT_CHECK(
156+
num_zero_dimensions >= 0,
157+
"Detected multiple zero dimensions and dynamic shape in aten::select, "
158+
<< "which is not currently supported in TensorRT");
159+
160+
// If the input is not dynamic, and the tensor is empty (has some dimension 0)
161+
// Then 0 is no longer a placeholder for inherited dimensions
162+
if (!ctx->input_is_dynamic && (num_zero_dimensions > 0)) {
163+
LOG_DEBUG("Setting zero as a true dimension (not placeholder) in aten::select");
164+
shuffle_layer->setZeroIsPlaceholder(false);
165+
}
166+
167+
shuffle_layer->setReshapeDimensions(util::squeezeDims(
168+
out->getDimensions(),
169+
dim,
170+
ctx->input_is_dynamic,
171+
ctx->input_is_dynamic && (num_zero_dimensions > 0)));
154172
shuffle_layer->setName(util::node_info(n).c_str());
155173
out = shuffle_layer->getOutput(0);
156174
}

core/ir/Input.cpp

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,11 +69,16 @@ bool valid_input_dtype(nvinfer1::DataType dtype) {
6969
}
7070
}
7171

72+
bool valid_input_domain(std::vector<double> domain) {
73+
return (domain.size() == 2) && (domain[0] < domain[1]);
74+
}
75+
7276
Input::Input(
7377
std::vector<int64_t> shape,
7478
at::ScalarType dtype,
7579
nvinfer1::TensorFormat format,
76-
bool dtype_is_user_defined) {
80+
bool dtype_is_user_defined,
81+
std::vector<double> tensor_domain) {
7782
if (shape.size() > 5) {
7883
LOG_WARNING("Verify that this dim size is accepted");
7984
}
@@ -93,6 +98,11 @@ Input::Input(
9398
<< "), Torch-TensorRT only supports contiguous format (NCHW) except with input type Float32 where channel last (NHWC) is also supported");
9499
this->format = format;
95100
this->dtype_is_user_defined = dtype_is_user_defined;
101+
102+
TORCHTRT_CHECK(
103+
valid_input_domain(tensor_domain),
104+
"Unsupported tensor domain: [" << tensor_domain[0] << ", " << tensor_domain[1] << ")");
105+
this->tensor_domain = tensor_domain;
96106
}
97107

98108
Input::Input(
@@ -101,7 +111,8 @@ Input::Input(
101111
std::vector<int64_t> max_shape,
102112
at::ScalarType dtype,
103113
nvinfer1::TensorFormat format,
104-
bool dtype_is_user_defined) {
114+
bool dtype_is_user_defined,
115+
std::vector<double> tensor_domain) {
105116
if (min_shape.size() > 5 || opt_shape.size() > 5 || max_shape.size() > 5) {
106117
LOG_WARNING("Verify that this dim size is accepted");
107118
}
@@ -146,6 +157,10 @@ Input::Input(
146157
<< "), Torch-TensorRT only supports contiguous format (NCHW) except with input type Float32 where channel last (NHWC) is also supported");
147158
this->format = format;
148159
this->dtype_is_user_defined = dtype_is_user_defined;
160+
TORCHTRT_CHECK(
161+
valid_input_domain(tensor_domain),
162+
"Unsupported tensor domain: [" << tensor_domain[0] << ", " << tensor_domain[1] << ")");
163+
this->tensor_domain = tensor_domain;
149164
}
150165

151166
std::ostream& operator<<(std::ostream& os, const Input& input) {

core/ir/ir.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,19 +31,22 @@ struct Input : torch::CustomClassHolder {
3131
std::vector<int64_t> shape,
3232
at::ScalarType dtype = at::kFloat,
3333
nvinfer1::TensorFormat format = nvinfer1::TensorFormat::kLINEAR,
34-
bool dtype_is_user_defined = false);
34+
bool dtype_is_user_defined = false,
35+
std::vector<double> tensor_domain = std::vector<double>{0, 2});
3536
Input(
3637
std::vector<int64_t> min_shape,
3738
std::vector<int64_t> opt_shape,
3839
std::vector<int64_t> max_shape,
3940
at::ScalarType dtype = at::kFloat,
4041
nvinfer1::TensorFormat format = nvinfer1::TensorFormat::kLINEAR,
41-
bool dtype_is_used_defined = false);
42+
bool dtype_is_user_defined = false,
43+
std::vector<double> tensor_domain = std::vector<double>{0, 2});
4244

4345
friend std::ostream& operator<<(std::ostream& os, const Input& input);
4446

4547
bool input_is_dynamic = false;
4648
bool dtype_is_user_defined = false;
49+
std::vector<double> tensor_domain;
4750
nvinfer1::Dims input_shape;
4851
nvinfer1::Dims min;
4952
nvinfer1::Dims max;

core/lowering/lowering.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,7 @@ void LowerGraph(std::shared_ptr<torch::jit::Graph>& g, std::vector<torch::jit::I
144144
passes::UnpackAndCastFull(g, lower_info.getGPUDeviceString());
145145
passes::ReplaceScalarImplicit(g);
146146
passes::RewriteInputsWithParams(g, params);
147+
passes::ReplaceAtenPad(g);
147148
LOG_GRAPH(*g);
148149
}
149150

core/lowering/passes/BUILD

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ cc_library(
2828
"remove_dropout.cpp",
2929
"remove_nops.cpp",
3030
"remove_unnecessary_casts.cpp",
31+
"replace_aten_pad.cpp",
3132
"rewrite_inputs_with_params.cpp",
3233
"silu_to_sigmoid_multiplication.cpp",
3334
"unpack_addmm.cpp",

core/lowering/passes/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ target_sources(${lib_name}
1515
"${CMAKE_CURRENT_SOURCE_DIR}/remove_nops.cpp"
1616
"${CMAKE_CURRENT_SOURCE_DIR}/remove_set_attrs.cpp"
1717
"${CMAKE_CURRENT_SOURCE_DIR}/remove_unnecessary_casts.cpp"
18+
"${CMAKE_CURRENT_SOURCE_DIR}/replace_aten_pad.cpp"
1819
"${CMAKE_CURRENT_SOURCE_DIR}/silu_to_sigmoid_multiplication.cpp"
1920
"${CMAKE_CURRENT_SOURCE_DIR}/unpack_addmm.cpp"
2021
"${CMAKE_CURRENT_SOURCE_DIR}/unpack_batch_norm.cpp"

0 commit comments

Comments
 (0)