Skip to content

Commit 65bad43

Browse files
committed
[ml service] Add Flare as a new nnfw type
- Implemented support for 'flare' nnfw type in ML service API - Included test cases to validate flare functionality Signed-off-by: hyunil park <hyunil46.park@samsung.com>
1 parent 9583757 commit 65bad43

File tree

5 files changed

+85
-14
lines changed

5 files changed

+85
-14
lines changed

c/include/nnstreamer-tizen-internal.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919
extern "C" {
2020
#endif /* __cplusplus */
2121

22+
#define ML_NNFW_TYPE_FLARE 23 /**< FLARE framework */
23+
2224
/**
2325
* @brief Constructs the pipeline (GStreamer + NNStreamer).
2426
* @details This function is to construct the pipeline without checking the permission in platform internally. See ml_pipeline_construct() for the details.

c/src/ml-api-inference-single.c

Lines changed: 24 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ static const char *ml_nnfw_subplugin_name[] = {
114114
[ML_NNFW_TYPE_QNN] = "qnn",
115115
[ML_NNFW_TYPE_LLAMACPP] = "llamacpp",
116116
[ML_NNFW_TYPE_TIZEN_HAL] = "tizen-hal",
117+
[ML_NNFW_TYPE_FLARE] = "flare",
117118
NULL
118119
};
119120

@@ -783,7 +784,7 @@ ml_single_set_info_in_handle (ml_single_h single, gboolean is_input,
783784
ml_single_get_gst_info (single_h, is_input, &gst_info);
784785
if (single_h->format == _NNS_TENSOR_FORMAT_FLEXIBLE) {
785786
gst_info.format = single_h->format;
786-
gst_info.num_tensors = 1U; /* TODO: Consider multiple input tensors filter */
787+
gst_info.num_tensors = 1U; /* TODO: Consider multiple input tensors filter */
787788
}
788789
_ml_tensors_info_create_from_gst (&info, &gst_info);
789790

@@ -979,6 +980,12 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
979980
for (i = 0; i < num_models; i++)
980981
g_strstrip (list_models[i]);
981982

983+
/**
984+
* Currently ML_NNFW_TYPE_FLARE is defined temporarily to avoid ACR.
985+
*/
986+
if (info->fw_name && strcasecmp (info->fw_name, "flare") == 0) {
987+
nnfw = ML_NNFW_TYPE_FLARE;
988+
}
982989
status = _ml_validate_model_file ((const char **) list_models, num_models,
983990
&nnfw);
984991
if (status != ML_ERROR_NONE) {
@@ -1097,7 +1104,7 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
10971104
}
10981105

10991106
/* handle flexible single */
1100-
/**
1107+
/**
11011108
* Set invoke_dynamic as TRUE if the given nnfw do invoke_dynamic
11021109
*
11031110
* if (info->nnfw == ML_NNFW_TYPE_EXECUTORCH_LLAMA || info->nnfw == ML_NNFW_TYPE_LLAMACPP) {
@@ -1984,7 +1991,21 @@ _ml_validate_model_file (const char *const *model,
19841991
file_ext[i] = g_ascii_strdown (pos, -1);
19851992
}
19861993

1987-
/** @todo Make sure num_models is correct for each nnfw type */
1994+
/**
1995+
* @todo Currently ML_NNFW_TYPE_FLARE is defined temporarily to avoid ACR.
1996+
* Move checking ML_NNFW_TYPE_FLARE to below switch statement.
1997+
*/
1998+
if (*nnfw == ML_NNFW_TYPE_FLARE) {
1999+
if (!g_str_equal (file_ext[0], ".bin")) {
2000+
_ml_error_report
2001+
("Flare accepts .bin file only. Please support correct file extension. You have specified: \"%s\"",
2002+
file_ext[0]);
2003+
status = ML_ERROR_INVALID_PARAMETER;
2004+
}
2005+
goto done;
2006+
}
2007+
2008+
/** @todo Make sure num_models is correct for each nnfw type */
19882009
switch (*nnfw) {
19892010
case ML_NNFW_TYPE_NNFW:
19902011
case ML_NNFW_TYPE_TVM:

tests/capi/unittest_capi_service_extension.cc

Lines changed: 47 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010
#include <gtest/gtest.h>
1111
#include <glib.h>
12-
12+
#include <iostream>
1313
#include <ml-api-service-private.h>
1414
#include <ml-api-service.h>
1515
#include "ml-api-service-extension.h"
@@ -394,8 +394,7 @@ _extension_test_imgclf (ml_service_h handle, gboolean is_pipeline)
394394
* @brief Callback function for scenario test.
395395
*/
396396
static void
397-
_extension_test_llamacpp_cb (
398-
ml_service_event_e event, ml_information_h event_data, void *user_data)
397+
_extension_test_llm_cb (ml_service_event_e event, ml_information_h event_data, void *user_data)
399398
{
400399
extension_test_data_s *tdata = (extension_test_data_s *) user_data;
401400
ml_tensors_data_h data = NULL;
@@ -413,7 +412,8 @@ _extension_test_llamacpp_cb (
413412
status = ml_tensors_data_get_tensor_data (data, 0U, &_raw, &_size);
414413
EXPECT_EQ (status, ML_ERROR_NONE);
415414

416-
g_print ("%s", (char *) _raw);
415+
std::cout.write (static_cast<const char *> (_raw), _size); /* korean output */
416+
std::cout.flush ();
417417

418418
if (tdata)
419419
tdata->received++;
@@ -427,19 +427,29 @@ _extension_test_llamacpp_cb (
427427
* @brief Internal function to run test with ml-service extension handle.
428428
*/
429429
static inline void
430-
_extension_test_llamacpp (ml_service_h handle, gboolean is_pipeline)
430+
_extension_test_llm (ml_service_h handle, gboolean is_pipeline, gchar *file_name, guint sleep_us)
431431
{
432432
extension_test_data_s *tdata;
433433
ml_tensors_info_h info;
434434
ml_tensors_data_h input;
435435
int status;
436+
gsize len = 0;
437+
g_autofree gchar *contents = NULL;
438+
439+
if (file_name != NULL) {
436440

437-
const gchar input_text[] = "Hello my name is";
441+
g_autofree gchar *data_file = _get_data_path (file_name);
442+
ASSERT_TRUE (g_file_test (data_file, G_FILE_TEST_EXISTS));
443+
ASSERT_TRUE (g_file_get_contents (data_file, &contents, &len, NULL));
444+
} else {
445+
contents = g_strdup ("Hello my name is");
446+
len = strlen (contents);
447+
}
438448

439449
tdata = _create_test_data (is_pipeline);
440450
ASSERT_TRUE (tdata != NULL);
441451

442-
status = ml_service_set_event_cb (handle, _extension_test_llamacpp_cb, tdata);
452+
status = ml_service_set_event_cb (handle, _extension_test_llm_cb, tdata);
443453
EXPECT_EQ (status, ML_ERROR_NONE);
444454

445455
/* Create and push input data. */
@@ -448,12 +458,12 @@ _extension_test_llamacpp (ml_service_h handle, gboolean is_pipeline)
448458

449459
ml_tensors_data_create (info, &input);
450460

451-
ml_tensors_data_set_tensor_data (input, 0U, input_text, strlen (input_text));
461+
ml_tensors_data_set_tensor_data (input, 0U, contents, len);
452462

453463
status = ml_service_request (handle, NULL, input);
454464
EXPECT_EQ (status, ML_ERROR_NONE);
455465

456-
g_usleep (5000000U);
466+
g_usleep (sleep_us);
457467
EXPECT_GT (tdata->received, 0);
458468

459469
/* Clear callback before releasing tdata. */
@@ -473,8 +483,8 @@ TEST (MLServiceExtension, scenarioConfigLlamacpp)
473483
{
474484
ml_service_h handle;
475485
int status;
476-
477486
g_autofree gchar *model_file = _get_model_path ("llama-2-7b-chat.Q2_K.gguf");
487+
478488
if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) {
479489
g_critical ("Skipping scenarioConfigLlamacpp test due to missing model file. "
480490
"Please download model file from https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF");
@@ -486,7 +496,33 @@ TEST (MLServiceExtension, scenarioConfigLlamacpp)
486496
status = ml_service_new (config, &handle);
487497
ASSERT_EQ (status, ML_ERROR_NONE);
488498

489-
_extension_test_llamacpp (handle, FALSE);
499+
_extension_test_llm (handle, FALSE, NULL, 5000000U);
500+
501+
status = ml_service_destroy (handle);
502+
EXPECT_EQ (status, ML_ERROR_NONE);
503+
}
504+
505+
/**
506+
* @brief Usage of ml-service extension API.
507+
*/
508+
TEST (MLServiceExtension, scenarioConfigFlare)
509+
{
510+
ml_service_h handle;
511+
int status;
512+
g_autofree gchar *input_file = g_strdup ("flare_input.txt");
513+
g_autofree gchar *model_file = _get_model_path ("sflare_if_4bit_3b.bin");
514+
515+
if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) {
516+
g_critical ("Skipping scenarioConfigFlare test due to missing model file.Please download model file");
517+
return;
518+
}
519+
520+
g_autofree gchar *config = get_config_path ("config_single_flare.conf");
521+
522+
status = ml_service_new (config, &handle);
523+
ASSERT_EQ (status, ML_ERROR_NONE);
524+
525+
_extension_test_llm (handle, FALSE, input_file, 40000000U);
490526

491527
status = ml_service_destroy (handle);
492528
EXPECT_EQ (status, ML_ERROR_NONE);
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
{
2+
"single" :
3+
{
4+
"framework" : "flare",
5+
"model" : ["../tests/test_models/models/sflare_if_4bit_3b.bin"],
6+
"adapter" : ["../tests/test_models/models/history_lora.bin"],
7+
"custom" : "tokenizer_path:../tests/test_models/data/tokenizer.json,backend:CPU,output_size:1024,model_type:3B,data_type:W4A32",
8+
"invoke_dynamic" : "true",
9+
"invoke_async" : "false"
10+
}
11+
}
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
<|begin_of_text|><|turn_start|>System\n<|turn_end|>\n<|turn_start|>User\nYou are a summarization expert.Please read the provided <Text> carefully and summarize it in 3 sentences in English. The summary should comprehensively cover the entire content of the original text and be written with the same meaning as the source material.<|begin_of_text|><|turn_start|>System<|turn_end|>국제 연구 컨소시엄이 차세대 재생 에너지 기술 개발에 박차를 가하고 있습니다. 스탠포드 대학을 주도로 MIT, KAIST 등 12개 기관이 참여한 이 프로젝트는 기존 실리콘 기반 태양광 패널의 한계를 극복하기 위해 페로브스카이트-실리콘 탠덤 구조를 적용했습니다. 실험 결과, 이 신소재는 31.2%의 광변환 효율을 달성하며 상용화 가능성을 입증했는데, 이는 기존 단일 접합 태양전지의 최대 효율(26%)을 크게 상회하는 수치입니다. 연구팀은 나노스케일 광포획 구조와 양자점 기술을 접목해 적층형 셀의 내구성을 향상시키는 데 성공했으며, 2025년 상용화를 목표로 대량 생산 공정 개발에 착수했습니다. 현재 캘리포니아의 모하비 사막과 독일 바이에른 지역에 설치된 시범 플랜트에서 실외 테스트가 진행 중이며, 초기 데이터는 일사량 변동 환경에서도 안정적인 성능을 보여주고 있습니다. 산업계 전문가들은 이 기술이 2030년까지 전 세계 태양광 시장의 35%를 점유할 것으로 예상하며, 화석 연료 의존도를 12% 감소시킬 수 있을 것으로 내다보고 있습니다. 특히 개발도상국을 위한 저비용 버전 개발도 병행 중인데, 필리핀과 케냐에서 2024년 말 시범 설치될 예정입니다. 한편 유럽 에너지 위원회는 이 기술이 RE100 목표 달성 시기를 5년 앞당길 수 있을 것으로 평가하며 추가 지원 방안을 검토 중입니다.\n<|turn_end|>\n<|turn_start|>Assistant\n"

0 commit comments

Comments
 (0)