Skip to content

Commit 7330e84

Browse files
committed
[ml service] Add Flare as a new nnfw type
- Implemented support for 'flare' nnfw type in ML service API - Included test cases to validate flare functionality Signed-off-by: hyunil park <[email protected]>
1 parent e4d25a8 commit 7330e84

File tree

5 files changed

+81
-16
lines changed

5 files changed

+81
-16
lines changed

c/include/nnstreamer-tizen-internal.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919
extern "C" {
2020
#endif /* __cplusplus */
2121

22+
#define ML_NNFW_TYPE_FLARE 23 /**< FLARE framework, It will be moved to ml-api-common.h after Tizen 10.0 M2 release. */
23+
2224
/**
2325
* @brief Callback for tensor data stream of machine-learning API.
2426
* @details Note that the buffer may be deallocated after the return and this is synchronously called. Thus, if you need the data afterwards, copy the data to another buffer and return fast. Do not spend too much time in the callback.

c/src/ml-api-inference-single.c

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ static const char *ml_nnfw_subplugin_name[] = {
114114
[ML_NNFW_TYPE_QNN] = "qnn",
115115
[ML_NNFW_TYPE_LLAMACPP] = "llamacpp",
116116
[ML_NNFW_TYPE_TIZEN_HAL] = "tizen-hal",
117+
[ML_NNFW_TYPE_FLARE] = "flare",
117118
NULL
118119
};
119120

@@ -1058,6 +1059,12 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
10581059
for (i = 0; i < num_models; i++)
10591060
g_strstrip (list_models[i]);
10601061

1062+
/**
1063+
* Currently ML_NNFW_TYPE_FLARE is defined temporarily to avoid ACR.
1064+
*/
1065+
if (info->fw_name && strcasecmp (info->fw_name, "flare") == 0) {
1066+
nnfw = ML_NNFW_TYPE_FLARE;
1067+
}
10611068
status = _ml_validate_model_file ((const char **) list_models, num_models,
10621069
&nnfw);
10631070
if (status != ML_ERROR_NONE) {
@@ -2073,7 +2080,21 @@ _ml_validate_model_file (const char *const *model,
20732080
file_ext[i] = g_ascii_strdown (pos, -1);
20742081
}
20752082

2076-
/** @todo Make sure num_models is correct for each nnfw type */
2083+
/**
2084+
* @todo Currently ML_NNFW_TYPE_FLARE is defined temporarily to avoid ACR.
2085+
* Move checking ML_NNFW_TYPE_FLARE to below switch statement.
2086+
*/
2087+
if (*nnfw == ML_NNFW_TYPE_FLARE) {
2088+
if (!g_str_equal (file_ext[0], ".bin")) {
2089+
_ml_error_report
2090+
("Flare accepts .bin file only. Please support correct file extension. You have specified: \"%s\"",
2091+
file_ext[0]);
2092+
status = ML_ERROR_INVALID_PARAMETER;
2093+
}
2094+
goto done;
2095+
}
2096+
2097+
/** @todo Make sure num_models is correct for each nnfw type */
20772098
switch (*nnfw) {
20782099
case ML_NNFW_TYPE_NNFW:
20792100
case ML_NNFW_TYPE_TVM:

tests/capi/unittest_capi_service_extension.cc

Lines changed: 45 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
#include <gtest/gtest.h>
1111
#include <glib.h>
12-
12+
#include <iostream>
1313
#include <ml-api-service-private.h>
1414
#include <ml-api-service.h>
1515
#include "ml-api-service-extension.h"
@@ -394,9 +394,9 @@ _extension_test_imgclf (ml_service_h handle, gboolean is_pipeline)
394394
/**
395395
* @brief Macro to skip testcase if model file is not ready.
396396
*/
397-
#define skip_llamacpp_tc(tc_name) \
397+
#define skip_llm_tc(tc_name, model_name) \
398398
do { \
399-
g_autofree gchar *model_file = _get_model_path ("llama-2-7b-chat.Q2_K.gguf"); \
399+
g_autofree gchar *model_file = _get_model_path (model_name); \
400400
if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) { \
401401
g_autofree gchar *msg = g_strdup_printf ( \
402402
"Skipping '%s' due to missing model file. " \
@@ -406,12 +406,12 @@ _extension_test_imgclf (ml_service_h handle, gboolean is_pipeline)
406406
} \
407407
} while (0)
408408

409+
409410
/**
410411
* @brief Callback function for scenario test.
411412
*/
412413
static void
413-
_extension_test_llamacpp_cb (
414-
ml_service_event_e event, ml_information_h event_data, void *user_data)
414+
_extension_test_llm_cb (ml_service_event_e event, ml_information_h event_data, void *user_data)
415415
{
416416
extension_test_data_s *tdata = (extension_test_data_s *) user_data;
417417
ml_tensors_data_h data = NULL;
@@ -429,7 +429,8 @@ _extension_test_llamacpp_cb (
429429
status = ml_tensors_data_get_tensor_data (data, 0U, &_raw, &_size);
430430
EXPECT_EQ (status, ML_ERROR_NONE);
431431

432-
g_print ("%s", (char *) _raw);
432+
std::cout.write (static_cast<const char *> (_raw), _size); /* korean output */
433+
std::cout.flush ();
433434

434435
if (tdata)
435436
tdata->received++;
@@ -443,23 +444,34 @@ _extension_test_llamacpp_cb (
443444
* @brief Internal function to run test with ml-service extension handle.
444445
*/
445446
static inline void
446-
_extension_test_llamacpp (const gchar *config, gboolean is_pipeline)
447+
_extension_test_llm (const gchar *config, gchar *input_file, guint sleep_us, gboolean is_pipeline)
447448
{
448449
extension_test_data_s *tdata;
449450
ml_service_h handle;
450451
ml_tensors_info_h info;
451452
ml_tensors_data_h input;
452453
int status;
454+
gsize len = 0;
455+
g_autofree gchar *contents = NULL;
453456

454-
const gchar input_text[] = "Hello my name is";
457+
if (input_file != NULL) {
458+
459+
g_autofree gchar *data_file = _get_data_path (input_file);
460+
ASSERT_TRUE (g_file_test (data_file, G_FILE_TEST_EXISTS));
461+
ASSERT_TRUE (g_file_get_contents (data_file, &contents, &len, NULL));
462+
} else {
463+
contents = g_strdup ("Hello my name is");
464+
len = strlen (contents);
465+
}
455466

456467
tdata = _create_test_data (is_pipeline);
457468
ASSERT_TRUE (tdata != NULL);
458469

459470
status = ml_service_new (config, &handle);
460471
ASSERT_EQ (status, ML_ERROR_NONE);
461472

462-
status = ml_service_set_event_cb (handle, _extension_test_llamacpp_cb, tdata);
473+
status = ml_service_set_event_cb (handle, _extension_test_llm_cb, tdata);
474+
463475
EXPECT_EQ (status, ML_ERROR_NONE);
464476

465477
/* Create and push input data. */
@@ -468,12 +480,12 @@ _extension_test_llamacpp (const gchar *config, gboolean is_pipeline)
468480

469481
ml_tensors_data_create (info, &input);
470482

471-
ml_tensors_data_set_tensor_data (input, 0U, input_text, strlen (input_text));
483+
ml_tensors_data_set_tensor_data (input, 0U, contents, len);
472484

473485
status = ml_service_request (handle, NULL, input);
474486
EXPECT_EQ (status, ML_ERROR_NONE);
475487

476-
g_usleep (5000000U);
488+
g_usleep (sleep_us);
477489
EXPECT_GT (tdata->received, 0);
478490

479491
/* Clear callback before releasing tdata. */
@@ -494,26 +506,44 @@ _extension_test_llamacpp (const gchar *config, gboolean is_pipeline)
494506
*/
495507
TEST (MLServiceExtension, scenarioConfigLlamacpp)
496508
{
497-
skip_llamacpp_tc ("scenarioConfigLlamacpp");
509+
skip_llm_tc ("scenarioConfigLlamacpp", "llama-2-7b-chat.Q2_K.gguf");
498510

499511
g_autofree gchar *config = get_config_path ("config_single_llamacpp.conf");
500512

501-
_extension_test_llamacpp (config, FALSE);
513+
_extension_test_llm (config, NULL, 5000000U, FALSE);
502514
}
503515

504516
/**
505517
* @brief Usage of ml-service extension API.
506518
*/
507519
TEST (MLServiceExtension, scenarioConfigLlamacppAsync)
508520
{
509-
skip_llamacpp_tc ("scenarioConfigLlamacppAsync");
521+
skip_llm_tc ("scenarioConfigLlamacppAsync", "llama-2-7b-chat.Q2_K.gguf");
510522

511523
g_autofree gchar *config = get_config_path ("config_single_llamacpp_async.conf");
512524

513-
_extension_test_llamacpp (config, FALSE);
525+
_extension_test_llm (config, NULL, 5000000U, FALSE);
526+
}
527+
528+
/**
529+
* @brief Usage of ml-service extension API.
530+
*
531+
* Note: For test, copy modelfile to current dir
532+
* There are some commonly used functions, so Flare is temporarily put into ENABLE_LLAMACPP.
533+
*/
534+
TEST (MLServiceExtension, scenarioConfigFlare)
535+
{
536+
g_autofree gchar *input_file = g_strdup ("flare_input.txt");
537+
538+
skip_llm_tc ("scenarioConfigFlare", "sflare_if_4bit_3b.bin");
539+
540+
g_autofree gchar *config = get_config_path ("config_single_flare.conf");
541+
542+
_extension_test_llm (config, input_file, 40000000U, FALSE);
514543
}
515544
#endif /* ENABLE_LLAMACPP */
516545

546+
517547
/**
518548
* @brief Usage of ml-service extension API.
519549
*/
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
{
2+
"single" :
3+
{
4+
"framework" : "flare",
5+
"model" : ["../tests/test_models/models/sflare_if_4bit_3b.bin"],
6+
"adapter" : ["../tests/test_models/models/history_lora.bin"],
7+
"custom" : "tokenizer_path:../tests/test_models/data/tokenizer.json,backend:CPU,output_size:1024,model_type:3B,data_type:W4A32",
8+
"invoke_dynamic" : "true",
9+
"invoke_async" : "false"
10+
}
11+
}
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
<|begin_of_text|><|turn_start|>System\n<|turn_end|>\n<|turn_start|>User\nYou are a summarization expert.Please read the provided <Text> carefully and summarize it in 3 sentences in English. The summary should comprehensively cover the entire content of the original text and be written with the same meaning as the source material.<|begin_of_text|><|turn_start|>System<|turn_end|>국제 연구 컨소시엄이 차세대 재생 에너지 기술 개발에 박차를 가하고 있습니다. 스탠포드 대학을 주도로 MIT, KAIST 등 12개 기관이 참여한 이 프로젝트는 기존 실리콘 기반 태양광 패널의 한계를 극복하기 위해 페로브스카이트-실리콘 탠덤 구조를 적용했습니다. 실험 결과, 이 신소재는 31.2%의 광변환 효율을 달성하며 상용화 가능성을 입증했는데, 이는 기존 단일 접합 태양전지의 최대 효율(26%)을 크게 상회하는 수치입니다. 연구팀은 나노스케일 광포획 구조와 양자점 기술을 접목해 적층형 셀의 내구성을 향상시키는 데 성공했으며, 2025년 상용화를 목표로 대량 생산 공정 개발에 착수했습니다. 현재 캘리포니아의 모하비 사막과 독일 바이에른 지역에 설치된 시범 플랜트에서 실외 테스트가 진행 중이며, 초기 데이터는 일사량 변동 환경에서도 안정적인 성능을 보여주고 있습니다. 산업계 전문가들은 이 기술이 2030년까지 전 세계 태양광 시장의 35%를 점유할 것으로 예상하며, 화석 연료 의존도를 12% 감소시킬 수 있을 것으로 내다보고 있습니다. 특히 개발도상국을 위한 저비용 버전 개발도 병행 중인데, 필리핀과 케냐에서 2024년 말 시범 설치될 예정입니다. 한편 유럽 에너지 위원회는 이 기술이 RE100 목표 달성 시기를 5년 앞당길 수 있을 것으로 평가하며 추가 지원 방안을 검토 중입니다.\n<|turn_end|>\n<|turn_start|>Assistant\n"

0 commit comments

Comments
 (0)