Skip to content

Commit af12039

Browse files
committed
Revert "chore: revert back deprecated changes"
This reverts commit d736499.
1 parent fdd7d42 commit af12039

File tree

9 files changed

+0
-17
lines changed

9 files changed

+0
-17
lines changed

core/conversion/conversionctx/ConversionCtx.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ std::ostream& operator<<(std::ostream& os, const BuilderSettings& s) {
2020
<< "\n Debuggable Engine: " << s.debug \
2121
<< "\n GPU ID: " << s.device.gpu_id \
2222
<< "\n Allow GPU Fallback (if running on DLA): " << s.device.allow_gpu_fallback \
23-
<< "\n Min Timing Iterations: " << s.num_min_timing_iters \
2423
<< "\n Avg Timing Iterations: " << s.num_avg_timing_iters \
2524
<< "\n Max Workspace Size: " << s.workspace_size;
2625

@@ -104,7 +103,6 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
104103
cfg->setFlag(nvinfer1::BuilderFlag::kGPU_FALLBACK);
105104
}
106105

107-
cfg->setMinTimingIterations(settings.num_min_timing_iters);
108106
cfg->setAvgTimingIterations(settings.num_avg_timing_iters);
109107
cfg->setMaxWorkspaceSize(settings.workspace_size);
110108
cfg->setDefaultDeviceType(settings.device.device_type);

cpp/bin/torchtrtc/main.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -113,8 +113,6 @@ int main(int argc, char** argv) {
113113
"Whether to treat input file as a serialized TensorRT engine and embed it into a TorchScript module (device spec must be provided)",
114114
{"embed-engine"});
115115

116-
args::ValueFlag<uint64_t> num_min_timing_iters(
117-
parser, "num_iters", "Number of minimization timing iterations used to select kernels", {"num-min-timing-iter"});
118116
args::ValueFlag<uint64_t> num_avg_timing_iters(
119117
parser, "num_iters", "Number of averaging timing iterations used to select kernels", {"num-avg-timing-iters"});
120118
args::ValueFlag<uint64_t> workspace_size(

cpp/src/compile_spec.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,6 @@ torchtrt::core::CompileSpec to_internal_compile_spec(CompileSpec external) {
8181

8282
internal.convert_info.engine_settings.device.gpu_id = external.device.gpu_id;
8383
internal.convert_info.engine_settings.device.dla_core = external.device.dla_core;
84-
internal.convert_info.engine_settings.num_min_timing_iters = external.num_min_timing_iters;
8584
internal.convert_info.engine_settings.num_avg_timing_iters = external.num_avg_timing_iters;
8685
internal.convert_info.engine_settings.workspace_size = external.workspace_size;
8786

docsrc/tutorials/use_from_pytorch.rst

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@ at the documentation for the Torch-TensorRT ``TensorRTCompileSpec`` API.
4545
"allow_gpu_fallback": True
4646
},
4747
"capability": torch_tensorrt.EngineCapability.default,
48-
"num_min_timing_iters": 2,
4948
"num_avg_timing_iters": 1,
5049
})
5150
}

py/torch_tensorrt/csrc/tensorrt_classes.cpp

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -221,8 +221,6 @@ core::CompileSpec CompileSpec::toInternalCompileSpec() {
221221
info.convert_info.engine_settings.truncate_long_and_double = truncate_long_and_double;
222222

223223
info.convert_info.engine_settings.capability = toTRTEngineCapability(capability);
224-
TORCHTRT_CHECK(num_min_timing_iters >= 0, "num_min_timing_iters must be 0 or greater");
225-
info.convert_info.engine_settings.num_min_timing_iters = num_min_timing_iters;
226224
TORCHTRT_CHECK(num_avg_timing_iters >= 0, "num_avg_timing_iters must be 0 or greater");
227225
info.convert_info.engine_settings.num_avg_timing_iters = num_avg_timing_iters;
228226
TORCHTRT_CHECK(workspace_size >= 0, "workspace_size must be 0 or greater");
@@ -249,7 +247,6 @@ std::string CompileSpec::stringify() {
249247
ss << " \"Debug\": " << debug << std::endl;
250248
ss << " \"Device\": " << device.to_str() << std::endl;
251249
ss << " \"Engine Capability\": " << to_str(capability) << std::endl;
252-
ss << " \"Num Min Timing Iters\": " << num_min_timing_iters << std::endl;
253250
ss << " \"Num Avg Timing Iters\": " << num_avg_timing_iters << std::endl;
254251
ss << " \"Workspace Size\": " << workspace_size << std::endl;
255252
ss << " \"Truncate long and double\": " << truncate_long_and_double << std::endl;

py/torch_tensorrt/csrc/tensorrt_classes.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,6 @@ struct CompileSpec : torch::CustomClassHolder {
147147
ADD_FIELD_GET_SET(refit, bool);
148148
ADD_FIELD_GET_SET(debug, bool);
149149
ADD_ENUM_GET_SET(capability, EngineCapability, static_cast<int64_t>(EngineCapability::kSAFE_DLA));
150-
ADD_FIELD_GET_SET(num_min_timing_iters, int64_t);
151150
ADD_FIELD_GET_SET(num_avg_timing_iters, int64_t);
152151
ADD_FIELD_GET_SET(workspace_size, int64_t);
153152
ADD_FIELD_GET_SET(truncate_long_and_double, bool);
@@ -166,7 +165,6 @@ struct CompileSpec : torch::CustomClassHolder {
166165
Device device;
167166
TorchFallback torch_fallback;
168167
EngineCapability capability = EngineCapability::kDEFAULT;
169-
int64_t num_min_timing_iters = 2;
170168
int64_t num_avg_timing_iters = 1;
171169
int64_t workspace_size = 0;
172170
};

py/torch_tensorrt/ts/_compile_spec.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -203,10 +203,6 @@ def _parse_compile_spec(compile_spec: Dict[str, Any]) -> _ts_C.CompileSpec:
203203
assert isinstance(compile_spec["capability"], _enums.EngineCapability)
204204
info.capability = compile_spec["capability"]
205205

206-
if "num_min_timing_iters" in compile_spec:
207-
assert type(compile_spec["num_min_timing_iters"]) is int
208-
info.num_min_timing_iters = compile_spec["num_min_timing_iters"]
209-
210206
if "num_avg_timing_iters" in compile_spec:
211207
assert type(compile_spec["num_avg_timing_iters"]) is int
212208
info.num_avg_timing_iters = compile_spec["num_avg_timing_iters"]

py/torch_tensorrt/ts/_compiler.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ def compile(module: torch.jit.ScriptModule,
1818
refit=False,
1919
debug=False,
2020
capability=_enums.EngineCapability.default,
21-
num_min_timing_iters=2,
2221
num_avg_timing_iters=1,
2322
workspace_size=0,
2423
calibrator=None,

tests/py/test_to_backend_api.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ def setUp(self):
2626
"allow_gpu_fallback": True
2727
},
2828
"capability": torchtrt.EngineCapability.default,
29-
"num_min_timing_iters": 2,
3029
"num_avg_timing_iters": 1,
3130
"disable_tf32": False,
3231
})

0 commit comments

Comments
 (0)