@@ -391,6 +391,21 @@ _extension_test_imgclf (ml_service_h handle, gboolean is_pipeline)
391391}
392392
393393#if defined(ENABLE_LLAMACPP)
394+ /* *
395+ * @brief Macro to skip testcase if model file is not ready.
396+ */
397+ #define skip_llamacpp_tc (tc_name ) \
398+ do { \
399+ g_autofree gchar *model_file = _get_model_path (" llama-2-7b-chat.Q2_K.gguf" ); \
400+ if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) { \
401+ g_autofree gchar *msg = g_strdup_printf ( \
402+ " Skipping '%s' due to missing model file. " \
403+ " Please download model file from https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF." , \
404+ tc_name); \
405+ GTEST_SKIP () << msg; \
406+ } \
407+ } while (0 )
408+
394409/* *
395410 * @brief Callback function for scenario test.
396411 */
@@ -428,9 +443,10 @@ _extension_test_llamacpp_cb (
428443 * @brief Internal function to run test with ml-service extension handle.
429444 */
430445static inline void
431- _extension_test_llamacpp (ml_service_h handle , gboolean is_pipeline)
446+ _extension_test_llamacpp (const gchar *config , gboolean is_pipeline)
432447{
433448 extension_test_data_s *tdata;
449+ ml_service_h handle;
434450 ml_tensors_info_h info;
435451 ml_tensors_data_h input;
436452 int status;
@@ -440,6 +456,9 @@ _extension_test_llamacpp (ml_service_h handle, gboolean is_pipeline)
440456 tdata = _create_test_data (is_pipeline);
441457 ASSERT_TRUE (tdata != NULL );
442458
459+ status = ml_service_new (config, &handle);
460+ ASSERT_EQ (status, ML_ERROR_NONE);
461+
443462 status = ml_service_set_event_cb (handle, _extension_test_llamacpp_cb, tdata);
444463 EXPECT_EQ (status, ML_ERROR_NONE);
445464
@@ -461,6 +480,9 @@ _extension_test_llamacpp (ml_service_h handle, gboolean is_pipeline)
461480 status = ml_service_set_event_cb (handle, NULL , NULL );
462481 EXPECT_EQ (status, ML_ERROR_NONE);
463482
483+ status = ml_service_destroy (handle);
484+ EXPECT_EQ (status, ML_ERROR_NONE);
485+
464486 ml_tensors_info_destroy (info);
465487 ml_tensors_data_destroy (input);
466488
@@ -472,25 +494,23 @@ _extension_test_llamacpp (ml_service_h handle, gboolean is_pipeline)
472494 */
473495TEST (MLServiceExtension, scenarioConfigLlamacpp)
474496{
475- ml_service_h handle;
476- int status;
477-
478- g_autofree gchar *model_file = _get_model_path (" llama-2-7b-chat.Q2_K.gguf" );
479- if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) {
480- g_critical (" Skipping scenarioConfigLlamacpp test due to missing model file. "
481- " Please download model file from https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF" );
482- return ;
483- }
497+ skip_llamacpp_tc (" scenarioConfigLlamacpp" );
484498
485499 g_autofree gchar *config = get_config_path (" config_single_llamacpp.conf" );
486500
487- status = ml_service_new (config, &handle);
488- ASSERT_EQ (status, ML_ERROR_NONE);
501+ _extension_test_llamacpp (config, FALSE );
502+ }
503+
504+ /* *
505+ * @brief Usage of ml-service extension API.
506+ */
507+ TEST (MLServiceExtension, scenarioConfigLlamacppAsync)
508+ {
509+ skip_llamacpp_tc (" scenarioConfigLlamacppAsync" );
489510
490- _extension_test_llamacpp (handle, FALSE );
511+ g_autofree gchar *config = get_config_path ( " config_single_llamacpp_async.conf " );
491512
492- status = ml_service_destroy (handle);
493- EXPECT_EQ (status, ML_ERROR_NONE);
513+ _extension_test_llamacpp (config, FALSE );
494514}
495515#endif /* ENABLE_LLAMACPP */
496516
0 commit comments