Skip to content

Commit 22f578e

Browse files
committed
Merge branch 'layla-build' of https://github.com/l3utterfly/llama.cpp into layla-build
2 parents 1c66c4d + 0f216d3 commit 22f578e

File tree

10 files changed

+4
-5814
lines changed

10 files changed

+4
-5814
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -161,3 +161,5 @@ prebuilts/QNN_SDK/v2.36.0.250627.zip
161161
prebuilts/Hexagon_SDK/minimal-hexagon-sdk-6.2.0.1.xz
162162
prebuilts/OpenCL_SDK/
163163
prebuilts/Vulkan_SDK/
164+
165+
pkg-adb/

src/llama-context.cpp

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,6 @@
1212
#include <limits>
1313
#include <stdexcept>
1414

15-
#ifdef GGML_USE_HEXAGON
16-
#include "ggml-hexagon.h"
17-
#endif
18-
1915
//
2016
// llama_context
2117
//
@@ -142,15 +138,7 @@ llama_context::llama_context(
142138
if (!hparams.vocab_only) {
143139
// GPU backends
144140
for (auto * dev : model.devices) {
145-
#ifdef GGML_USE_HEXAGON
146-
if (model.params.main_gpu == HEXAGON_BACKEND_GGML)
147-
break;
148-
#endif
149-
#ifndef GGML_USE_HEXAGON
150141
ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
151-
#else
152-
ggml_backend_t backend = ggml_backend_dev_init(dev,reinterpret_cast<const char *>(model.params.main_gpu));
153-
#endif
154142
if (backend == nullptr) {
155143
throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev)));
156144
}
@@ -159,18 +147,9 @@ llama_context::llama_context(
159147

160148
// add ACCEL backends (such as BLAS)
161149
for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
162-
#ifdef GGML_USE_HEXAGON
163-
if (model.params.main_gpu == HEXAGON_BACKEND_GGML)
164-
break;
165-
#endif
166150
ggml_backend_dev_t dev = ggml_backend_dev_get(i);
167-
168151
if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) {
169-
#ifndef GGML_USE_HEXAGON
170152
ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
171-
#else
172-
ggml_backend_t backend = ggml_backend_dev_init(dev,reinterpret_cast<const char *>(model.params.main_gpu));
173-
#endif
174153
if (backend == nullptr) {
175154
throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev)));
176155
}

src/llama.cpp

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,6 @@
99

1010
#include "ggml.h"
1111
#include "ggml-backend.h"
12-
#ifdef GGML_USE_HEXAGON
13-
#include "ggml-hexagon.h"
14-
#endif
1512

1613
#include <algorithm>
1714
#include <cstddef>
@@ -207,12 +204,6 @@ static struct llama_model * llama_model_load_from_file_impl(
207204
break;
208205

209206
case GGML_BACKEND_DEVICE_TYPE_GPU: {
210-
#if GGML_USE_HEXAGON
211-
if (params.main_gpu == HEXAGON_BACKEND_GGML) {
212-
break;
213-
}
214-
#endif
215-
216207
ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
217208
if (ggml_backend_reg_name(reg) == std::string("RPC")) {
218209
rpc_servers.push_back(dev);

tests/CMakeLists.txt

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -203,10 +203,6 @@ endif()
203203
llama_build_and_test(test-gguf.cpp)
204204
llama_build_and_test(test-backend-ops.cpp)
205205

206-
#dedicated for ggml-hexagon
207-
llama_build_and_test(ggmlhexagon-benchmark.cpp)
208-
llama_build_and_test(ggmlhexagon-testops.cpp)
209-
210206
llama_build_and_test(test-model-load-cancel.cpp LABEL "model")
211207
llama_build_and_test(test-autorelease.cpp LABEL "model")
212208

0 commit comments

Comments
 (0)