Skip to content

Commit e847175

Browse files
committed
[service] Add latency profiling support for single
- Let the ml service using single conf can set latency profiling option Example: ```json { "single": { ... "profile": "true" } } ``` - To make it compatible with pipeline way `(... latency=1 ...)`, key of "latency" and value of "1" will do the same thing. Signed-off-by: Yongjoo Ahn <yongjoo1.ahn@samsung.com>
1 parent b80c7f5 commit e847175

File tree

3 files changed

+28
-2
lines changed

3 files changed

+28
-2
lines changed

c/include/nnstreamer-tizen-internal.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,8 @@ typedef struct {
5151
int invoke_dynamic; /**< True for supporting invoke with flexible output. */
5252
int invoke_async; /**< The sub-plugin must support asynchronous output to use this option. If set to TRUE, the sub-plugin can generate multiple outputs asynchronously per single input. Otherwise, only synchronous single-output is expected and async callback is ignored. */
5353
ml_tensors_data_cb invoke_async_cb; /**< Callback function to be called when the sub-plugin generates an output asynchronously. This is only available when invoke_async is set to TRUE. */
54-
void *invoke_async_pdata; /**< Private data to be passed to async callback. */
54+
void *invoke_async_pdata; /**< Private data to be passed to async callback. */
55+
int latency_mode; /**< 1 - log invoke latency, 0 (default) - do not log */
5556
} ml_single_preset;
5657

5758
/**

c/src/ml-api-inference-single.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1162,7 +1162,7 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
11621162

11631163
g_object_set (filter_obj, "framework", fw_name, "accelerator", hw_name,
11641164
"model", converted_models, "invoke-dynamic", single_h->invoke_dynamic,
1165-
"invoke-async", single_h->invoke_async, NULL);
1165+
"invoke-async", single_h->invoke_async, "latency", info->latency_mode, NULL);
11661166
g_free (hw_name);
11671167

11681168
if (info->custom_option) {
@@ -1318,6 +1318,15 @@ ml_single_open_with_option (ml_single_h * single, const ml_option_h option)
13181318
if (ML_ERROR_NONE == ml_option_get (option, "async_data", &value)) {
13191319
info.invoke_async_pdata = value;
13201320
}
1321+
if (ML_ERROR_NONE == ml_option_get (option, "profile", &value)) {
1322+
if (g_ascii_strcasecmp ((gchar *) value, "true") == 0) {
1323+
info.latency_mode = 1;
1324+
} else if (g_ascii_strtoll ((gchar *) value, NULL, 10) > 0) {
1325+
info.latency_mode = 1;
1326+
} else {
1327+
info.latency_mode = 0;
1328+
}
1329+
}
13211330

13221331
return ml_single_open_custom (single, &info);
13231332
}

c/src/ml-api-service-extension.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -343,6 +343,22 @@ _ml_extension_conf_parse_single (ml_service_s * mls, JsonObject * single)
343343
_ml_extension_destroy_tensors_info);
344344
}
345345

346+
/* parse latency profiling option - "profile": "true" or "1" */
347+
if (json_object_has_member (single, "profile")) {
348+
const gchar *profile = json_object_get_string_member (single, "profile");
349+
350+
if (STR_IS_VALID (profile))
351+
ml_option_set (option, "profile", g_strdup (profile), g_free);
352+
}
353+
354+
/* parse latency profiling option - "latency": "true" or "1" */
355+
if (json_object_has_member (single, "latency")) {
356+
const gchar *latency = json_object_get_string_member (single, "latency");
357+
358+
if (STR_IS_VALID (latency))
359+
ml_option_set (option, "profile", g_strdup (latency), g_free);
360+
}
361+
346362
if (json_object_has_member (single, "custom")) {
347363
const gchar *custom = json_object_get_string_member (single, "custom");
348364

0 commit comments

Comments
 (0)