@@ -62,6 +62,9 @@ void ggmlqnn_log_internal(ggml_log_level level, const char * file, const char *
6262#if (defined __ANDROID__) || (defined ANDROID)
6363 // for Android application(standard APP or command line tool)
6464 __android_log_print (ANDROID_LOG_INFO, " ggml-qnn" , " %s\n " , s_ggmlqnn_log_internal_buf);
65+ if (GGML_LOG_LEVEL_INFO == level) {
66+ printf (" %s\n " , s_ggmlqnn_log_internal_buf);
67+ }
6568#else
6669 // for Snapdragon based WoA(Windows on ARM) device or Linux
6770 printf (" %s\n " , s_ggmlqnn_log_internal_buf);
@@ -1038,25 +1041,25 @@ void * ggmlqnn_type_trait(ggml_backend_qnn_context * ctx, ggml_tensor * op) {
10381041 void * wdata = ctx->work_data .get ();
10391042 // convert src0 to float
10401043 if (src0_type != GGML_TYPE_F32) {
1041- const auto *type_traits = ggml_get_type_traits (src0_type);
1042- ggml_to_float_t const to_float = type_traits->to_float ;
1044+ const auto * type_traits = ggml_get_type_traits (src0_type);
1045+ ggml_to_float_t const to_float = type_traits->to_float ;
10431046
10441047 for (int64_t i03 = 0 ; i03 < ne03; i03++) {
10451048 for (int64_t i02 = 0 ; i02 < ne02; i02++) {
1046- const void *x = (char *) src0->data + i02 * nb02 + i03 * nb03;
1047- float *const wplane = (float *) wdata + i02 * ne_plane + i03 * ne02 * ne_plane;
1049+ const void * x = (char *)src0->data + i02 * nb02 + i03 * nb03;
1050+ float * const wplane = (float *)wdata + i02 * ne_plane + i03 * ne02 * ne_plane;
10481051
10491052 const int min_cols_per_thread = 4096 ;
1050- const int min_rows_per_thread = std::max ((int ) (min_cols_per_thread / ne00), 1 );
1053+ const int min_rows_per_thread = std::max ((int )(min_cols_per_thread / ne00), 1 );
10511054 const int n_threads = std::max (
1052- std::min (ctx->n_threads , (int ) (ne01 / min_rows_per_thread)), 1 );
1055+ std::min (ctx->n_threads , (int )(ne01 / min_rows_per_thread)), 1 );
10531056 for (int i = 1 ; i < n_threads; i++) {
10541057 const int64_t start = i * ne01 / n_threads;
1055- const int64_t end = (i + 1 ) * ne01 / n_threads;
1058+ const int64_t end = (i + 1 ) * ne01 / n_threads;
10561059 if (start < end) {
10571060 ctx->tasks .push_back (std::async (std::launch::async, [=]() {
10581061 for (int64_t i01 = start; i01 < end; i01++) {
1059- to_float ((const char *) x + i01 * nb01, wplane + i01 * ne00, ne00);
1062+ to_float ((const char *)x + i01 * nb01, wplane + i01 * ne00, ne00);
10601063 }
10611064 }));
10621065 }
@@ -1980,7 +1983,7 @@ int qnn_instance::init_qnn_graph(const std::string & graph_name, QNNBackend devi
19801983 return error;
19811984 }
19821985
1983- GGMLQNN_LOG_INFO (" [%s]create graph %s succeed" , ggml_backend_qnn_get_devname (device), graph_name.c_str ());
1986+ GGMLQNN_LOG_DEBUG (" [%s]create graph %s succeed" , ggml_backend_qnn_get_devname (device), graph_name.c_str ());
19841987 _qnn_graph_handle = graph_handle;
19851988 return QNN_SUCCESS;
19861989}
0 commit comments