Skip to content

Commit db8d018

Browse files
committed
llama : adapt to backend changes
ggml-ci
1 parent df1e8db commit db8d018

File tree

3 files changed

+16
-45
lines changed

3 files changed

+16
-45
lines changed

ggml/src/ggml-backend.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -479,6 +479,10 @@ ggml_backend_buffer_type_t ggml_backend_dev_buffer_type(ggml_backend_dev_t devic
479479
}
480480

481481
ggml_backend_buffer_type_t ggml_backend_dev_host_buffer_type(ggml_backend_dev_t device) {
482+
if (device->iface.get_host_buffer_type == NULL) {
483+
return NULL;
484+
}
485+
482486
return device->iface.get_host_buffer_type(device);
483487
}
484488

ggml/src/ggml-metal.m

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3628,6 +3628,13 @@ static bool ggml_backend_metal_device_supports_buft(ggml_backend_dev_t dev, ggml
36283628
UNUSED(dev);
36293629
}
36303630

3631+
static bool ggml_backend_metal_device_offload_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) {
3632+
return false;
3633+
3634+
GGML_UNUSED(dev);
3635+
GGML_UNUSED(op);
3636+
}
3637+
36313638
static struct ggml_backend_device_i ggml_backend_metal_device_i = {
36323639
/* .get_name = */ ggml_backend_metal_device_get_name,
36333640
/* .get_description = */ ggml_backend_metal_device_get_description,
@@ -3640,7 +3647,7 @@ static bool ggml_backend_metal_device_supports_buft(ggml_backend_dev_t dev, ggml
36403647
/* .buffer_from_host_ptr = */ ggml_backend_metal_device_buffer_from_ptr,
36413648
/* .supports_op = */ ggml_backend_metal_device_supports_op,
36423649
/* .supports_buft = */ ggml_backend_metal_device_supports_buft,
3643-
/* .offload_op = */ NULL,
3650+
/* .offload_op = */ ggml_backend_metal_device_offload_op,
36443651
/* .event_new = */ NULL,
36453652
/* .event_free = */ NULL,
36463653
/* .event_synchronize = */ NULL,

src/llama.cpp

Lines changed: 4 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,6 @@
2626
# include "ggml-blas.h"
2727
#endif
2828

29-
#ifdef GGML_USE_METAL
30-
# include "ggml-metal.h"
31-
#endif
32-
3329
// TODO: replace with ggml API call
3430
#define QK_K 256
3531

@@ -3292,9 +3288,6 @@ struct llama_context {
32923288
std::unordered_map<struct llama_lora_adapter *, float> lora_adapters;
32933289

32943290
std::vector<ggml_backend_t> backends;
3295-
#ifdef GGML_USE_METAL
3296-
ggml_backend_t backend_metal = nullptr;
3297-
#endif
32983291
#ifdef GGML_USE_BLAS
32993292
ggml_backend_t backend_blas = nullptr;
33003293
#endif
@@ -3420,9 +3413,7 @@ static int llama_get_device_count(const llama_model & model) {
34203413
count += (int) model.rpc_servers.size();
34213414
#endif
34223415

3423-
#if defined(GGML_USE_METAL)
3424-
count += 1;
3425-
#elif defined(GGML_USE_SYCL)
3416+
#if defined(GGML_USE_SYCL)
34263417
count += ggml_backend_sycl_get_device_count();
34273418
#elif defined(GGML_USE_VULKAN)
34283419
count += ggml_backend_vk_get_device_count();
@@ -3488,9 +3479,7 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_offload(const llama_
34883479
}
34893480
device -= (int)model.devices.size();
34903481

3491-
#if defined(GGML_USE_METAL)
3492-
buft = ggml_backend_metal_buffer_type();
3493-
#elif defined(GGML_USE_VULKAN)
3482+
#if defined(GGML_USE_VULKAN)
34943483
buft = ggml_backend_vk_buffer_type(device);
34953484
#elif defined(GGML_USE_SYCL)
34963485
buft = ggml_backend_sycl_buffer_type(device);
@@ -8937,25 +8926,6 @@ static bool llm_load_tensors(
89378926
bufs.emplace(idx, buf);
89388927
}
89398928
}
8940-
#ifdef GGML_USE_METAL
8941-
else if (ml.use_mmap && use_mmap_buffer && buft == ggml_backend_metal_buffer_type()) {
8942-
for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
8943-
const size_t max_size = ggml_get_max_tensor_size(ctx);
8944-
void * addr = nullptr;
8945-
size_t first, last;
8946-
ml.get_mapping_range(&first, &last, &addr, idx, ctx);
8947-
if (first >= last) {
8948-
continue;
8949-
}
8950-
ggml_backend_buffer_t buf = ggml_backend_metal_buffer_from_ptr((char *) addr + first, last - first, max_size);
8951-
if (buf == nullptr) {
8952-
throw std::runtime_error("unable to allocate backend metal buffer");
8953-
}
8954-
model.bufs.push_back(buf);
8955-
bufs.emplace(idx, buf);
8956-
}
8957-
}
8958-
#endif
89598929
else {
89608930
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
89618931
if (buf == nullptr) {
@@ -19041,7 +19011,7 @@ bool llama_supports_mlock(void) {
1904119011
}
1904219012

1904319013
bool llama_supports_gpu_offload(void) {
19044-
#if defined(GGML_USE_METAL) || defined(GGML_USE_VULKAN) || \
19014+
#if defined(GGML_USE_VULKAN) || \
1904519015
defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE) || defined(GGML_USE_RPC)
1904619016
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
1904719017
return true;
@@ -19344,17 +19314,7 @@ struct llama_context * llama_new_context_with_model(
1934419314
}
1934519315
#endif
1934619316

19347-
#if defined(GGML_USE_METAL)
19348-
if (model->n_gpu_layers > 0) {
19349-
ctx->backend_metal = ggml_backend_metal_init();
19350-
if (ctx->backend_metal == nullptr) {
19351-
LLAMA_LOG_ERROR("%s: failed to initialize Metal backend\n", __func__);
19352-
llama_free(ctx);
19353-
return nullptr;
19354-
}
19355-
ctx->backends.push_back(ctx->backend_metal);
19356-
}
19357-
#elif defined(GGML_USE_VULKAN)
19317+
#if defined(GGML_USE_VULKAN)
1935819318
if (model->split_mode == LLAMA_SPLIT_MODE_ROW) {
1935919319
LLAMA_LOG_ERROR("%s: Row split not supported. Failed to initialize Vulkan backend\n", __func__);
1936019320
llama_free(ctx);

0 commit comments

Comments
 (0)