|
6 | 6 | extern "C" { |
7 | 7 | #endif |
8 | 8 |
|
| 9 | +// |
| 10 | +// device |
| 11 | +// |
| 12 | + |
9 | 13 | typedef struct ggml_backend_metal_device * ggml_backend_metal_device_t; |
10 | 14 |
|
11 | 15 | struct ggml_backend_metal_device_props { |
@@ -39,6 +43,36 @@ void ggml_backend_metal_device_get_memory(ggml_backend_metal_device_t ctx, size_ |
39 | 43 |
|
40 | 44 | struct ggml_backend_metal_device_props ggml_backend_metal_device_get_props(ggml_backend_metal_device_t ctx); |
41 | 45 |
|
| 46 | +// |
| 47 | +// device buffers |
| 48 | +// |
| 49 | + |
| 50 | +typedef struct ggml_backend_metal_buffer * ggml_backend_metal_buffer_t; |
| 51 | + |
| 52 | +ggml_backend_metal_buffer_t ggml_backend_metal_buffer_init(ggml_backend_metal_device_t device, size_t size, bool shared); |
| 53 | +ggml_backend_metal_buffer_t ggml_backend_metal_buffer_map (ggml_backend_metal_device_t device, void * ptr, size_t size, size_t max_tensor_size); |
| 54 | + |
| 55 | +void ggml_backend_metal_buffer_free (ggml_backend_metal_buffer_t buffer); |
| 56 | +void * ggml_backend_metal_buffer_get_base (ggml_backend_metal_buffer_t buffer); |
| 57 | +bool ggml_backend_metal_buffer_is_shared(ggml_backend_metal_buffer_t buffer); |
| 58 | + |
| 59 | +void ggml_backend_metal_buffer_memset_tensor(ggml_backend_metal_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size); |
| 60 | +void ggml_backend_metal_buffer_set_tensor (ggml_backend_metal_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); |
| 61 | +void ggml_backend_metal_buffer_get_tensor (ggml_backend_metal_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); |
| 62 | +void ggml_backend_metal_buffer_clear (ggml_backend_metal_buffer_t buffer, uint8_t value); |
| 63 | + |
| 64 | + |
| 65 | +struct ggml_backend_metal_buffer_id { |
| 66 | + void * metal; |
| 67 | + size_t offs; |
| 68 | +}; |
| 69 | + |
| 70 | +// finds the Metal buffer that contains the tensor data on the GPU device |
| 71 | +// the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the |
| 72 | +// Metal buffer based on the host memory pointer |
| 73 | +// |
| 74 | +struct ggml_backend_metal_buffer_id ggml_backend_metal_buffer_get_id(ggml_backend_metal_buffer_t buffer, const struct ggml_tensor * t); |
| 75 | + |
42 | 76 | #ifdef __cplusplus |
43 | 77 | } |
44 | 78 | #endif |
0 commit comments