Skip to content

Commit c1d283e

Browse files
author
zhouwg
committed
ggml-qnn: self code-review
1 parent 623ecdb commit c1d283e

File tree

1 file changed

+12
-12
lines changed

1 file changed

+12
-12
lines changed

ggml/src/ggml-qnn/ggml-qnn.cpp

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2547,7 +2547,7 @@ int qnn_instance::unload_system() {
25472547
return result;
25482548
}
25492549

2550-
static void ggmlqnn_compute_logcallback(const char * fmt,
2550+
static void ggmlqnn_sdk_logcallback(const char * fmt,
25512551
QnnLog_Level_t level,
25522552
uint64_t timestamp,
25532553
va_list argp) {
@@ -2556,7 +2556,7 @@ static void ggmlqnn_compute_logcallback(const char * fmt,
25562556
return;
25572557

25582558
static std::mutex log_mutex;
2559-
static unsigned char s_ggmlqnn_compute_logbuf[GGML_QNN_LOGBUF_LEN];
2559+
static unsigned char s_ggmlqnn_sdk_logbuf[GGML_QNN_LOGBUF_LEN];
25602560

25612561
const char * log_level_desc = "";
25622562
switch (level) {
@@ -2583,9 +2583,9 @@ static void ggmlqnn_compute_logcallback(const char * fmt,
25832583
double ms = (double) timestamp / 1000000.0;
25842584
{
25852585
std::lock_guard<std::mutex> lock(log_mutex);
2586-
memset(s_ggmlqnn_compute_logbuf, 0, GGML_QNN_LOGBUF_LEN);
2587-
vsnprintf(reinterpret_cast<char *const>(s_ggmlqnn_compute_logbuf), GGML_QNN_LOGBUF_LEN, fmt, argp);
2588-
GGMLQNN_LOG_DEBUG("%8.1fms [%-7s] %s\n", ms, log_level_desc, s_ggmlqnn_compute_logbuf);
2586+
memset(s_ggmlqnn_sdk_logbuf, 0, GGML_QNN_LOGBUF_LEN);
2587+
vsnprintf(reinterpret_cast<char *const>(s_ggmlqnn_sdk_logbuf), GGML_QNN_LOGBUF_LEN, fmt, argp);
2588+
GGMLQNN_LOG_DEBUG("%8.1fms [%-7s] %s\n", ms, log_level_desc, s_ggmlqnn_sdk_logbuf);
25892589
}
25902590
}
25912591

@@ -2625,9 +2625,9 @@ int qnn_instance::qnn_init(const QnnSaver_Config_t ** saver_config) {
26252625

26262626
_qnn_interface.set_qnn_interface(_loaded_backend);
26272627
#if 1
2628-
_qnn_interface.qnn_log_create(ggmlqnn_compute_logcallback, _qnn_log_level, &_qnn_log_handle);
2628+
_qnn_interface.qnn_log_create(ggmlqnn_sdk_logcallback, _qnn_log_level, &_qnn_log_handle);
26292629
#else
2630-
_qnn_raw_interface.logCreate(ggmlqnn_compute_logcallback, _qnn_log_level, &_qnn_log_handle);
2630+
_qnn_raw_interface.logCreate(ggmlqnn_sdk_logcallback, _qnn_log_level, &_qnn_log_handle);
26312631
#endif
26322632
if (nullptr == _qnn_log_handle) {
26332633
GGMLQNN_LOG_WARN("why failed to initialize qnn log\n"); //NPU backend not work on Qualcomm SoC based low-end phone
@@ -3476,7 +3476,7 @@ static bool ggmlqnn_same_types(const ggml_backend_qnn_context * ctx, const ggml_
34763476
return true;
34773477
}
34783478

3479-
static bool ggmlqnn_compute_can_handle_op(const ggml_backend_qnn_context * ctx, const struct ggml_tensor * op_tensor) {
3479+
static bool ggmlqnn_can_handle_op(const ggml_backend_qnn_context * ctx, const struct ggml_tensor * op_tensor) {
34803480
if (op_tensor->op == GGML_OP_NONE) {
34813481
return true;
34823482
}
@@ -3567,7 +3567,7 @@ static bool ggmlqnn_compute_can_handle_op(const ggml_backend_qnn_context * ctx,
35673567
}
35683568
}
35693569

3570-
static bool ggmlqnn_compute_compute_forward(ggml_backend_t backend, struct ggml_tensor * dst) {
3570+
static bool ggmlqnn_compute_forward(ggml_backend_t backend, struct ggml_tensor * dst) {
35713571
ggmlqnn_op_func_t func = nullptr;
35723572
ggml_backend_qnn_context * ctx = (ggml_backend_qnn_context *)backend->context;
35733573

@@ -3913,7 +3913,7 @@ static enum ggml_status ggmlqnn_backend_graph_compute_general(ggml_backend_t bac
39133913
|| node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) {
39143914
continue;
39153915
}
3916-
bool ok = ggmlqnn_compute_compute_forward(backend, node);
3916+
bool ok = ggmlqnn_compute_forward(backend, node);
39173917
if (!ok) {
39183918
GGMLQNN_LOG_DEBUG("%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op));
39193919
}
@@ -4074,7 +4074,7 @@ static ggml_backend_buffer_t ggml_backend_qnn_device_buffer_from_host_ptr(ggml_b
40744074

40754075
static bool ggml_backend_qnn_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) {
40764076
ggml_backend_qnn_context * ctx = (ggml_backend_qnn_context *) dev->context;
4077-
return (ggmlqnn_compute_can_handle_op(ctx,op));
4077+
return (ggmlqnn_can_handle_op(ctx,op));
40784078
}
40794079

40804080
static bool ggml_backend_qnn_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
@@ -5033,7 +5033,7 @@ static void ggmlqnn_compute_rope(ggml_backend_qnn_context * ctx, ggml_tensor * d
50335033
static enum ggml_status ggmlqnn_backend_graph_compute_special(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
50345034
enum ggml_status ggml_result = GGML_STATUS_SUCCESS;
50355035
Qnn_ErrorHandle_t qnn_error = QNN_SUCCESS;
5036-
qnn_perf op_perf = qnn_perf("ggml_backend_qnn_graph_compute_special");
5036+
qnn_perf op_perf = qnn_perf("ggmlqnn_backend_graph_compute_special");
50375037
qnn_instance * instance = nullptr;
50385038
Qnn_GraphHandle_t graph_handle = nullptr;
50395039
ggml_backend_qnn_context * ctx = (ggml_backend_qnn_context *) backend->context;

0 commit comments

Comments
 (0)