@@ -128,9 +128,13 @@ bool ApplyProfileShapesFromProviderOptions(std::vector<nvinfer1::IOptimizationPr
128128 std::unordered_map<std::string, std::vector<std::vector<int64_t >>>& profile_min_shapes,
129129 std::unordered_map<std::string, std::vector<std::vector<int64_t >>>& profile_max_shapes,
130130 std::unordered_map<std::string, std::vector<std::vector<int64_t >>>& profile_opt_shapes,
131- ShapeRangesMap& input_explicit_shape_ranges) {
131+ ShapeRangesMap& input_explicit_shape_ranges,
132+ const OrtLogger* logger) {
132133 if (trt_profiles.size () == 0 ) {
133- // LOGS_DEFAULT(WARNING) << "[TensorRT EP] Number of optimization profiles should be greater than 0, but it's 0.";
134+ std::string message = " [TensorRT EP] Number of optimization profiles should be greater than 0, but it's 0." ;
135+ Ort::ThrowOnError (g_ort_api->Logger_LogMessage (logger,
136+ OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING,
137+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
134138 return false ;
135139 }
136140
@@ -144,8 +148,11 @@ bool ApplyProfileShapesFromProviderOptions(std::vector<nvinfer1::IOptimizationPr
144148 input_explicit_shape_ranges[input_name] = inner_map;
145149 }
146150
147- // LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] Begin to apply profile shapes ...";
148- // LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] Input tensor name is '" << input_name << "', number of profiles found is " << trt_profiles.size();
151+ std::string message = " [TensorRT EP] Begin to apply profile shapes ...\n " +
152+ std::string (" [TensorRT EP] Input tensor name is '" ) + input_name + std::string (" ', number of profiles found is " ) + std::to_string (trt_profiles.size ());
153+ Ort::ThrowOnError (g_ort_api->Logger_LogMessage (logger,
154+ OrtLoggingLevel::ORT_LOGGING_LEVEL_VERBOSE,
155+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
149156
150157 for (size_t i = 0 ; i < trt_profiles.size (); i++) {
151158 nvinfer1::Dims dims = input->getDimensions ();
@@ -158,7 +165,10 @@ bool ApplyProfileShapesFromProviderOptions(std::vector<nvinfer1::IOptimizationPr
158165 int shape_size = nb_dims == 0 ? 1 : static_cast <int >(profile_min_shapes[input_name][i].size ());
159166 std::vector<int32_t > shapes_min (shape_size), shapes_opt (shape_size), shapes_max (shape_size);
160167
161- // LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] shape size of this shape tensor is " << shape_size;
168+ std::string message = " [TensorRT EP] shape size of this shape tensor is " + std::to_string (shape_size);
169+ Ort::ThrowOnError (g_ort_api->Logger_LogMessage (logger,
170+ OrtLoggingLevel::ORT_LOGGING_LEVEL_VERBOSE,
171+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
162172
163173 for (int j = 0 ; j < shape_size; j++) {
164174 auto min_value = profile_min_shapes[input_name][i][j];
@@ -167,9 +177,12 @@ bool ApplyProfileShapesFromProviderOptions(std::vector<nvinfer1::IOptimizationPr
167177 shapes_min[j] = static_cast <int32_t >(min_value);
168178 shapes_max[j] = static_cast <int32_t >(max_value);
169179 shapes_opt[j] = static_cast <int32_t >(opt_value);
170- // LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] shapes_min.d[" << j << "] is " << shapes_min[j];
171- // LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] shapes_max.d[" << j << "] is " << shapes_max[j];
172- // LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] shapes_opt.d[" << j << "] is " << shapes_opt[j];
180+ std::string message = " [TensorRT EP] shapes_min.d[" + std::to_string (j) + std::string (" ] is " ) + std::to_string (shapes_min[j]) + std::string (" \n " ) +
181+ std::string (" [TensorRT EP] shapes_max.d[" ) + std::to_string (j) + std::string (" ] is " ) + std::to_string (shapes_max[j]) + std::string (" \n " ) +
182+ std::string (" [TensorRT EP] shapes_opt.d[" ) + std::to_string (j) + std::string (" ] is " ) + std::to_string (shapes_opt[j]);
183+ Ort::ThrowOnError (g_ort_api->Logger_LogMessage (logger,
184+ OrtLoggingLevel::ORT_LOGGING_LEVEL_VERBOSE,
185+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
173186
174187 if (input_explicit_shape_ranges[input_name].find (j) == input_explicit_shape_ranges[input_name].end ()) {
175188 std::vector<std::vector<int64_t >> profile_vector (trt_profiles.size ());
@@ -191,7 +204,10 @@ bool ApplyProfileShapesFromProviderOptions(std::vector<nvinfer1::IOptimizationPr
191204 dims_max.nbDims = nb_dims;
192205 dims_opt.nbDims = nb_dims;
193206
194- // LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] number of dimension of this execution tensor is " << nb_dims;
207+ std::string message = " [TensorRT EP] number of dimension of this execution tensor is " + std::to_string (nb_dims);
208+ Ort::ThrowOnError (g_ort_api->Logger_LogMessage (logger,
209+ OrtLoggingLevel::ORT_LOGGING_LEVEL_VERBOSE,
210+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
195211
196212 for (int j = 0 ; j < nb_dims; j++) {
197213 if (dims.d [j] == -1 ) {
@@ -201,9 +217,13 @@ bool ApplyProfileShapesFromProviderOptions(std::vector<nvinfer1::IOptimizationPr
201217 dims_min.d [j] = static_cast <int32_t >(min_value);
202218 dims_max.d [j] = static_cast <int32_t >(max_value);
203219 dims_opt.d [j] = static_cast <int32_t >(opt_value);
204- // LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] dims_min.d[" << j << "] is " << dims_min.d[j];
205- // LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] dims_max.d[" << j << "] is " << dims_max.d[j];
206- // LOGS_DEFAULT(VERBOSE) << "[TensorRT EP] dims_opt.d[" << j << "] is " << dims_opt.d[j];
220+
221+ std::string message = " [TensorRT EP] dims_min.d[" + std::to_string (j) + std::string (" ] is " ) + std::to_string (dims_min.d [j]) + std::string (" \n " ) +
222+ std::string (" [TensorRT EP] dims_max.d[" ) + std::to_string (j) + std::string (" ] is " ) + std::to_string (dims_max.d [j]) + std::string (" \n " ) +
223+ std::string (" [TensorRT EP] dims_opt.d[" ) + std::to_string (j) + std::string (" ] is " ) + std::to_string (dims_opt.d [j]);
224+ Ort::ThrowOnError (g_ort_api->Logger_LogMessage (logger,
225+ OrtLoggingLevel::ORT_LOGGING_LEVEL_VERBOSE,
226+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
207227
208228 if (input_explicit_shape_ranges[input_name].find (j) == input_explicit_shape_ranges[input_name].end ()) {
209229 std::vector<std::vector<int64_t >> profile_vector (trt_profiles.size ());
@@ -1178,7 +1198,7 @@ OrtStatus* TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(OrtEp* this
11781198 if (has_explicit_profile) {
11791199 apply_explicit_profile =
11801200 ApplyProfileShapesFromProviderOptions (trt_profiles, input, profile_min_shapes_, profile_max_shapes_,
1181- profile_opt_shapes_, input_explicit_shape_ranges);
1201+ profile_opt_shapes_, input_explicit_shape_ranges, &ep-> logger_ );
11821202 }
11831203
11841204 // If no explicit optimization profile is being applied, TRT EP will later set min/max/opt shape values based on
@@ -1270,8 +1290,10 @@ OrtStatus* TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(OrtEp* this
12701290#pragma warning(pop)
12711291#endif
12721292 int8_enable_ = false ;
1273- // LOGS_DEFAULT(WARNING)
1274- // << "[TensorRT EP] ORT_TENSORRT_INT8_ENABLE is set, but platform doesn't support fast native int8";
1293+ std::string message = " [TensorRT EP] ORT_TENSORRT_INT8_ENABLE is set, but platform doesn't support fast native int8" ;
1294+ Ort::ThrowOnError (ep->ort_api .Logger_LogMessage (&ep->logger_ ,
1295+ OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING,
1296+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
12751297 }
12761298 }
12771299
@@ -1356,9 +1378,12 @@ OrtStatus* TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(OrtEp* this
13561378#if NV_TENSORRT_MAJOR == 8 && NV_TENSORRT_MINOR == 5
13571379 if (build_heuristics_enable_) {
13581380 trt_config->setFlag (nvinfer1::BuilderFlag::kENABLE_TACTIC_HEURISTIC );
1359- LOGS_DEFAULT (WARNING) << " [TensorRT EP] Builder heuristics are enabled."
1360- << " For TRT > 8.5, trt_build_heuristics_enable is deprecated, please set builder "
1361- " optimization level as 2 to enable builder heuristics." ;
1381+ std::string message = " [TensorRT EP] Builder heuristics are enabled." +
1382+ std::string (" For TRT > 8.5, trt_build_heuristics_enable is deprecated, please set builder " ) +
1383+ std::string (" optimization level as 2 to enable builder heuristics." );
1384+ Ort::ThrowOnError (ep->ort_api .Logger_LogMessage (&ep->logger_ ,
1385+ OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING,
1386+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
13621387 }
13631388#elif NV_TENSORRT_MAJOR == 8 && NV_TENSORRT_MINOR > 5 || NV_TENSORRT_MAJOR > 8
13641389 // for TRT 8.6 onwards, heuristic-based tactic option is automatically enabled by setting builder optimization level 2
@@ -1399,10 +1424,16 @@ OrtStatus* TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(OrtEp* this
13991424 }
14001425#else
14011426 if (builder_optimization_level_ != 3 ) {
1402- LOGS_DEFAULT (WARNING) << " [TensorRT EP] Builder optimization level can only be used on TRT 8.6 onwards!" ;
1427+ std::string message = " [TensorRT EP] Builder optimization level can only be used on TRT 8.6 onwards!" ;
1428+ Ort::ThrowOnError (ep->ort_api .Logger_LogMessage (&ep->logger_ ,
1429+ OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING,
1430+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
14031431 }
14041432 if (auxiliary_streams_ >= 0 ) {
1405- LOGS_DEFAULT (WARNING) << " [TensorRT EP] Auxiliary streams can only be set on TRT 8.6 onwards!" ;
1433+ std::string message = " [TensorRT EP] Auxiliary streams can only be set on TRT 8.6 onwards!" ;
1434+ Ort::ThrowOnError (ep->ort_api .Logger_LogMessage (&ep->logger_ ,
1435+ OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING,
1436+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
14061437 }
14071438#endif
14081439
@@ -1419,7 +1450,10 @@ OrtStatus* TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(OrtEp* this
14191450 OrtLoggingLevel::ORT_LOGGING_LEVEL_VERBOSE,
14201451 message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
14211452#else
1422- LOGS_DEFAULT (WARNING) << " [TensorRT EP] weight-stripped engines can only be used on TRT 10.0 onwards!" ;
1453+ std::string message = " [TensorRT EP] weight-stripped engines can only be used on TRT 10.0 onwards!" ;
1454+ Ort::ThrowOnError (ep->ort_api .Logger_LogMessage (&ep->logger_ ,
1455+ OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING,
1456+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
14231457#endif
14241458 }
14251459
@@ -1613,10 +1647,11 @@ OrtStatus* TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(OrtEp* this
16131647 }
16141648 if (detailed_build_log_) {
16151649 auto engine_build_stop = std::chrono::steady_clock::now ();
1616- // LOGS_DEFAULT(INFO)
1617- // << "TensorRT engine build for " << trt_node_name_with_precision << " took: "
1618- // << std::chrono::duration_cast<std::chrono::milliseconds>(engine_build_stop - engine_build_start).count()
1619- // << "ms" << std::endl;
1650+ std::string message = " TensorRT engine build for " + trt_node_name_with_precision + std::string (" took: " ) +
1651+ std::to_string (std::chrono::duration_cast<std::chrono::milliseconds>(engine_build_stop - engine_build_start).count ()) + std::string (" ms" );
1652+ Ort::ThrowOnError (ep->ort_api .Logger_LogMessage (&ep->logger_ ,
1653+ OrtLoggingLevel::ORT_LOGGING_LEVEL_INFO,
1654+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
16201655 }
16211656 if (engine_cache_enable_) {
16221657 // Serialize engine profile if it has explicit profiles
@@ -1642,8 +1677,10 @@ OrtStatus* TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(OrtEp* this
16421677 OrtLoggingLevel::ORT_LOGGING_LEVEL_VERBOSE,
16431678 message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
16441679 } else {
1645- // LOGS_DEFAULT(WARNING)
1646- // << "[TensorRT EP] Engine cache encryption function is not found. No cache is written to disk";
1680+ std::string message = " [TensorRT EP] Engine cache encryption function is not found. No cache is written to disk" ;
1681+ Ort::ThrowOnError (ep->ort_api .Logger_LogMessage (&ep->logger_ ,
1682+ OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING,
1683+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
16471684 }
16481685 } else {
16491686 std::ofstream file (engine_cache_path, std::ios::binary | std::ios::out);
@@ -3013,8 +3050,10 @@ OrtStatus* TRTEpNodeComputeInfo::ComputeImpl(OrtNodeComputeInfo* this_ptr, void*
30133050 OrtLoggingLevel::ORT_LOGGING_LEVEL_VERBOSE,
30143051 message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
30153052 } else {
3016- // LOGS_DEFAULT(WARNING)
3017- // << "[TensorRT EP] Engine cache encryption function is not found. No cache is written to disk";
3053+ std::string message = " [TensorRT EP] Engine cache encryption function is not found. No cache is written to disk" ;
3054+ Ort::ThrowOnError (ep.ort_api .Logger_LogMessage (&ep.logger_ ,
3055+ OrtLoggingLevel::ORT_LOGGING_LEVEL_WARNING,
3056+ message.c_str (), ORT_FILE, __LINE__, __FUNCTION__));
30183057 }
30193058 } else {
30203059 std::ofstream file (engine_cache_path, std::ios::binary | std::ios::out);
0 commit comments