@@ -3180,6 +3180,52 @@ TEST (nnstreamer_capi_singleshot, invoke_ncnn)
31803180}
31813181#endif /* ENABLE_NCNN */
31823182
3183+ /* *
3184+ * @brief DISABLED Test to show executorch_llama filter usage
3185+ */
3186+ TEST (nnstreamer_capi_singleshot, DISABLED_executorch_llama)
3187+ {
3188+ int status;
3189+ ml_single_h single;
3190+
3191+ status = ml_single_open (&single, " /path/to/pte,/path/to/tokienizer" , NULL ,
3192+ NULL , ML_NNFW_TYPE_EXECUTORCH_LLAMA, ML_NNFW_HW_ANY);
3193+ ASSERT_EQ (status, ML_ERROR_NONE);
3194+
3195+ /* prepare input data */
3196+ std::string prompt (" Once upon a time" );
3197+ ml_tensors_info_h in_info;
3198+ ml_tensors_data_h in_data;
3199+ ml_tensor_dimension dim = { (unsigned int ) prompt.size () + 1 , 0 };
3200+
3201+ ml_tensors_info_create (&in_info);
3202+ ml_tensors_info_set_count (in_info, 1 );
3203+ ml_tensors_info_set_tensor_type (in_info, 0 , ML_TENSOR_TYPE_UINT8);
3204+ ml_tensors_info_set_tensor_dimension (in_info, 0 , dim);
3205+
3206+ ml_tensors_data_create (in_info, &in_data);
3207+ ml_tensors_data_set_tensor_data (in_data, 0 , prompt.c_str (), prompt.size () + 1 );
3208+
3209+ /* invoke */
3210+ ml_tensors_data_h out_data;
3211+ status = ml_single_invoke (single, in_data, &out_data);
3212+ EXPECT_EQ (ML_ERROR_NONE, status);
3213+
3214+ char *result;
3215+ size_t result_size;
3216+ status = ml_tensors_data_get_tensor_data (out_data, 0U , (void **) &result, &result_size);
3217+ EXPECT_EQ (ML_ERROR_NONE, status);
3218+
3219+ g_info (" result: %s" , result);
3220+ EXPECT_EQ (0 , strncmp (result, prompt.c_str (), prompt.size ()));
3221+
3222+ /* free data */
3223+ ml_tensors_data_destroy (out_data);
3224+ ml_tensors_data_destroy (in_data);
3225+ ml_tensors_info_destroy (in_info);
3226+ ml_single_close (single);
3227+ }
3228+
31833229/* *
31843230 * @brief Test NNStreamer single shot (custom filter)
31853231 * @detail Run pipeline with custom filter with allocate in invoke, handle multi tensors.
0 commit comments