Skip to content

Commit e4d25a8

Browse files
committed
[Test] new test for async invoke
Add new testcase for async invoke and revise llamacpp test code. Signed-off-by: Jaeyun Jung <jy1210.jung@samsung.com>
1 parent e5a43e0 commit e4d25a8

File tree

2 files changed

+45
-15
lines changed

2 files changed

+45
-15
lines changed

tests/capi/unittest_capi_service_extension.cc

Lines changed: 35 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -391,6 +391,21 @@ _extension_test_imgclf (ml_service_h handle, gboolean is_pipeline)
391391
}
392392

393393
#if defined(ENABLE_LLAMACPP)
394+
/**
395+
* @brief Macro to skip testcase if model file is not ready.
396+
*/
397+
#define skip_llamacpp_tc(tc_name) \
398+
do { \
399+
g_autofree gchar *model_file = _get_model_path ("llama-2-7b-chat.Q2_K.gguf"); \
400+
if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) { \
401+
g_autofree gchar *msg = g_strdup_printf ( \
402+
"Skipping '%s' due to missing model file. " \
403+
"Please download model file from https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF.", \
404+
tc_name); \
405+
GTEST_SKIP () << msg; \
406+
} \
407+
} while (0)
408+
394409
/**
395410
* @brief Callback function for scenario test.
396411
*/
@@ -428,9 +443,10 @@ _extension_test_llamacpp_cb (
428443
* @brief Internal function to run test with ml-service extension handle.
429444
*/
430445
static inline void
431-
_extension_test_llamacpp (ml_service_h handle, gboolean is_pipeline)
446+
_extension_test_llamacpp (const gchar *config, gboolean is_pipeline)
432447
{
433448
extension_test_data_s *tdata;
449+
ml_service_h handle;
434450
ml_tensors_info_h info;
435451
ml_tensors_data_h input;
436452
int status;
@@ -440,6 +456,9 @@ _extension_test_llamacpp (ml_service_h handle, gboolean is_pipeline)
440456
tdata = _create_test_data (is_pipeline);
441457
ASSERT_TRUE (tdata != NULL);
442458

459+
status = ml_service_new (config, &handle);
460+
ASSERT_EQ (status, ML_ERROR_NONE);
461+
443462
status = ml_service_set_event_cb (handle, _extension_test_llamacpp_cb, tdata);
444463
EXPECT_EQ (status, ML_ERROR_NONE);
445464

@@ -461,6 +480,9 @@ _extension_test_llamacpp (ml_service_h handle, gboolean is_pipeline)
461480
status = ml_service_set_event_cb (handle, NULL, NULL);
462481
EXPECT_EQ (status, ML_ERROR_NONE);
463482

483+
status = ml_service_destroy (handle);
484+
EXPECT_EQ (status, ML_ERROR_NONE);
485+
464486
ml_tensors_info_destroy (info);
465487
ml_tensors_data_destroy (input);
466488

@@ -472,25 +494,23 @@ _extension_test_llamacpp (ml_service_h handle, gboolean is_pipeline)
472494
*/
473495
TEST (MLServiceExtension, scenarioConfigLlamacpp)
474496
{
475-
ml_service_h handle;
476-
int status;
477-
478-
g_autofree gchar *model_file = _get_model_path ("llama-2-7b-chat.Q2_K.gguf");
479-
if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) {
480-
g_critical ("Skipping scenarioConfigLlamacpp test due to missing model file. "
481-
"Please download model file from https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF");
482-
return;
483-
}
497+
skip_llamacpp_tc ("scenarioConfigLlamacpp");
484498

485499
g_autofree gchar *config = get_config_path ("config_single_llamacpp.conf");
486500

487-
status = ml_service_new (config, &handle);
488-
ASSERT_EQ (status, ML_ERROR_NONE);
501+
_extension_test_llamacpp (config, FALSE);
502+
}
503+
504+
/**
505+
* @brief Usage of ml-service extension API.
506+
*/
507+
TEST (MLServiceExtension, scenarioConfigLlamacppAsync)
508+
{
509+
skip_llamacpp_tc ("scenarioConfigLlamacppAsync");
489510

490-
_extension_test_llamacpp (handle, FALSE);
511+
g_autofree gchar *config = get_config_path ("config_single_llamacpp_async.conf");
491512

492-
status = ml_service_destroy (handle);
493-
EXPECT_EQ (status, ML_ERROR_NONE);
513+
_extension_test_llamacpp (config, FALSE);
494514
}
495515
#endif /* ENABLE_LLAMACPP */
496516

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
{
2+
"single" :
3+
{
4+
"framework" : "llamacpp",
5+
"model" : ["../tests/test_models/models/llama-2-7b-chat.Q2_K.gguf"],
6+
"custom" : "num_predict:32",
7+
"invoke_dynamic" : "true",
8+
"invoke_async" : "true"
9+
}
10+
}

0 commit comments

Comments
 (0)