Skip to content

Commit efdd713

Browse files
committed
more build fixes
1 parent ab26fb9 commit efdd713

File tree

17 files changed

+253
-239
lines changed

17 files changed

+253
-239
lines changed

ggml/include/ggml-amx.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,16 +9,16 @@ extern "C" {
99
#endif
1010

1111
// buffer_type API
12-
GGML_API ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void);
12+
GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void);
1313

14-
GGML_API bool ggml_backend_is_amx(ggml_backend_t backend);
14+
GGML_BACKEND_API bool ggml_backend_is_amx(ggml_backend_t backend);
1515

1616
// backend API
17-
GGML_API ggml_backend_t ggml_backend_amx_init(void);
17+
GGML_BACKEND_API ggml_backend_t ggml_backend_amx_init(void);
1818

19-
GGML_API void ggml_backend_amx_set_n_threads(ggml_backend_t backend_amx, int n_threads);
19+
GGML_BACKEND_API void ggml_backend_amx_set_n_threads(ggml_backend_t backend_amx, int n_threads);
2020

21-
GGML_API ggml_backend_reg_t ggml_backend_amx_reg(void);
21+
GGML_BACKEND_API ggml_backend_reg_t ggml_backend_amx_reg(void);
2222

2323
#ifdef __cplusplus
2424
}

ggml/include/ggml-backend.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,20 @@
33
#include "ggml.h"
44
#include "ggml-alloc.h"
55

6+
#ifdef GGML_BACKEND_SHARED
7+
# if defined(_WIN32) && !defined(__MINGW32__)
8+
# ifdef GGML_BACKEND_BUILD
9+
# define GGML_BACKEND_API __declspec(dllexport) extern
10+
# else
11+
# define GGML_BACKEND_API __declspec(dllimport) extern
12+
# endif
13+
# else
14+
# define GGML_BACKEND_API __attribute__ ((visibility ("default"))) extern
15+
# endif
16+
#else
17+
# define GGML_BACKEND_API extern
18+
#endif
19+
620
#ifdef __cplusplus
721
extern "C" {
822
#endif

ggml/include/ggml-blas.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,15 @@ extern "C" {
99
#endif
1010

1111
// backend API
12-
GGML_API ggml_backend_t ggml_backend_blas_init(void);
12+
GGML_BACKEND_API ggml_backend_t ggml_backend_blas_init(void);
1313

14-
GGML_API bool ggml_backend_is_blas(ggml_backend_t backend);
14+
GGML_BACKEND_API bool ggml_backend_is_blas(ggml_backend_t backend);
1515

1616
// number of threads used for conversion to float
1717
// for openblas and blis, this will also set the number of threads used for blas operations
18-
GGML_API void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads);
18+
GGML_BACKEND_API void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads);
1919

20-
GGML_API ggml_backend_reg_t ggml_backend_blas_reg(void);
20+
GGML_BACKEND_API ggml_backend_reg_t ggml_backend_blas_reg(void);
2121

2222

2323
#ifdef __cplusplus

ggml/include/ggml-cann.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ extern "C" {
3434
*/
3535
#define GGML_CANN_MAX_DEVICES 16
3636

37-
GGML_API ggml_backend_reg_t ggml_backend_cann_reg(void);
37+
GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cann_reg(void);
3838

3939
/**
4040
* @brief Initializes the CANN backend for a specified device.
@@ -46,7 +46,7 @@ GGML_API ggml_backend_reg_t ggml_backend_cann_reg(void);
4646
* @param device The index of the device to initialize.
4747
* @return A pointer to the initialized backend instance, or nullptr on failure.
4848
*/
49-
GGML_API ggml_backend_t ggml_backend_cann_init(int32_t device);
49+
GGML_BACKEND_API ggml_backend_t ggml_backend_cann_init(int32_t device);
5050

5151
/**
5252
* @brief Checks if a given backend is a CANN backend.
@@ -57,7 +57,7 @@ GGML_API ggml_backend_t ggml_backend_cann_init(int32_t device);
5757
* @param backend The backend instance to check.
5858
* @return True if the backend is a CANN backend, false otherwise.
5959
*/
60-
GGML_API bool ggml_backend_is_cann(ggml_backend_t backend);
60+
GGML_BACKEND_API bool ggml_backend_is_cann(ggml_backend_t backend);
6161

6262
/**
6363
* @brief Retrieves the CANN buffer type for a specified device.
@@ -69,7 +69,7 @@ GGML_API bool ggml_backend_is_cann(ggml_backend_t backend);
6969
* @return A pointer to the buffer type interface for the specified device, or
7070
* nullptr if the device index is out of range.
7171
*/
72-
GGML_API ggml_backend_buffer_type_t
72+
GGML_BACKEND_API ggml_backend_buffer_type_t
7373
ggml_backend_cann_buffer_type(int32_t device);
7474

7575
/**
@@ -80,14 +80,14 @@ ggml_backend_cann_buffer_type(int32_t device);
8080
*
8181
* @return The number of CANN devices available.
8282
*/
83-
GGML_API int32_t ggml_backend_cann_get_device_count(void);
83+
GGML_BACKEND_API int32_t ggml_backend_cann_get_device_count(void);
8484

8585
/**
8686
* @brief pinned host buffer for use with the CPU backend for faster copies between CPU and NPU.
8787
*
8888
* @return A pointer to the host buffer type interface.
8989
*/
90-
GGML_API ggml_backend_buffer_type_t ggml_backend_cann_host_buffer_type(void);
90+
GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cann_host_buffer_type(void);
9191

9292
/**
9393
* @brief Retrieves the description of a specific CANN device.
@@ -99,7 +99,7 @@ GGML_API ggml_backend_buffer_type_t ggml_backend_cann_host_buffer_type(void);
9999
* @param description Pointer to a buffer where the description will be written.
100100
* @param description_size Size of the description buffer.
101101
*/
102-
GGML_API void ggml_backend_cann_get_device_description(
102+
GGML_BACKEND_API void ggml_backend_cann_get_device_description(
103103
int32_t device, char* description, size_t description_size);
104104

105105
/**
@@ -114,7 +114,7 @@ GGML_API void ggml_backend_cann_get_device_description(
114114
* @param total Pointer to a variable where the total memory size will be
115115
* stored.
116116
*/
117-
GGML_API void ggml_backend_cann_get_device_memory(int32_t device,
117+
GGML_BACKEND_API void ggml_backend_cann_get_device_memory(int32_t device,
118118
size_t* free,
119119
size_t* total);
120120

ggml/include/ggml-cpu.h

Lines changed: 56 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -54,77 +54,77 @@ extern "C" {
5454
GGML_NUMA_STRATEGY_COUNT
5555
};
5656

57-
GGML_API void ggml_numa_init(enum ggml_numa_strategy numa); // call once for better performance on NUMA systems
58-
GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
57+
GGML_BACKEND_API void ggml_numa_init(enum ggml_numa_strategy numa); // call once for better performance on NUMA systems
58+
GGML_BACKEND_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
5959

60-
GGML_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
61-
GGML_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
60+
GGML_BACKEND_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
61+
GGML_BACKEND_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
6262

63-
GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
64-
GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
63+
GGML_BACKEND_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
64+
GGML_BACKEND_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
6565

66-
GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
67-
GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
66+
GGML_BACKEND_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
67+
GGML_BACKEND_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
6868

69-
GGML_API int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
70-
GGML_API void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
69+
GGML_BACKEND_API int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
70+
GGML_BACKEND_API void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
7171

72-
GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
73-
GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
72+
GGML_BACKEND_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
73+
GGML_BACKEND_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
7474

75-
GGML_API float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
76-
GGML_API void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
75+
GGML_BACKEND_API float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
76+
GGML_BACKEND_API void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
7777

78-
GGML_API struct ggml_threadpool_params ggml_threadpool_params_default(int n_threads);
79-
GGML_API void ggml_threadpool_params_init (struct ggml_threadpool_params * p, int n_threads);
80-
GGML_API bool ggml_threadpool_params_match (const struct ggml_threadpool_params * p0, const struct ggml_threadpool_params * p1);
81-
GGML_API struct ggml_threadpool * ggml_threadpool_new (struct ggml_threadpool_params * params);
82-
GGML_API void ggml_threadpool_free (struct ggml_threadpool * threadpool);
83-
GGML_API int ggml_threadpool_get_n_threads(struct ggml_threadpool * threadpool);
84-
GGML_API void ggml_threadpool_pause (struct ggml_threadpool * threadpool);
85-
GGML_API void ggml_threadpool_resume (struct ggml_threadpool * threadpool);
78+
GGML_BACKEND_API struct ggml_threadpool_params ggml_threadpool_params_default(int n_threads);
79+
GGML_BACKEND_API void ggml_threadpool_params_init (struct ggml_threadpool_params * p, int n_threads);
80+
GGML_BACKEND_API bool ggml_threadpool_params_match (const struct ggml_threadpool_params * p0, const struct ggml_threadpool_params * p1);
81+
GGML_BACKEND_API struct ggml_threadpool * ggml_threadpool_new (struct ggml_threadpool_params * params);
82+
GGML_BACKEND_API void ggml_threadpool_free (struct ggml_threadpool * threadpool);
83+
GGML_BACKEND_API int ggml_threadpool_get_n_threads(struct ggml_threadpool * threadpool);
84+
GGML_BACKEND_API void ggml_threadpool_pause (struct ggml_threadpool * threadpool);
85+
GGML_BACKEND_API void ggml_threadpool_resume (struct ggml_threadpool * threadpool);
8686

8787
// ggml_graph_plan() has to be called before ggml_graph_compute()
8888
// when plan.work_size > 0, caller must allocate memory for plan.work_data
89-
GGML_API struct ggml_cplan ggml_graph_plan(
89+
GGML_BACKEND_API struct ggml_cplan ggml_graph_plan(
9090
const struct ggml_cgraph * cgraph,
9191
int n_threads, /* = GGML_DEFAULT_N_THREADS */
9292
struct ggml_threadpool * threadpool /* = NULL */ );
93-
GGML_API enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
93+
GGML_BACKEND_API enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
9494

9595
// same as ggml_graph_compute() but the work data is allocated as a part of the context
9696
// note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
97-
GGML_API enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads);
97+
GGML_BACKEND_API enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads);
9898

9999
//
100100
// system info
101101
//
102102

103103
// x86
104-
GGML_API int ggml_cpu_has_sse3 (void);
105-
GGML_API int ggml_cpu_has_ssse3 (void);
106-
GGML_API int ggml_cpu_has_avx (void);
107-
GGML_API int ggml_cpu_has_avx2 (void);
108-
GGML_API int ggml_cpu_has_f16c (void);
109-
GGML_API int ggml_cpu_has_fma (void);
110-
GGML_API int ggml_cpu_has_avx_vnni (void);
111-
GGML_API int ggml_cpu_has_avx512 (void);
112-
GGML_API int ggml_cpu_has_avx512_vbmi(void);
113-
GGML_API int ggml_cpu_has_avx512_vnni(void);
114-
GGML_API int ggml_cpu_has_avx512_bf16(void);
115-
GGML_API int ggml_cpu_has_amx_int8 (void);
104+
GGML_BACKEND_API int ggml_cpu_has_sse3 (void);
105+
GGML_BACKEND_API int ggml_cpu_has_ssse3 (void);
106+
GGML_BACKEND_API int ggml_cpu_has_avx (void);
107+
GGML_BACKEND_API int ggml_cpu_has_avx2 (void);
108+
GGML_BACKEND_API int ggml_cpu_has_f16c (void);
109+
GGML_BACKEND_API int ggml_cpu_has_fma (void);
110+
GGML_BACKEND_API int ggml_cpu_has_avx_vnni (void);
111+
GGML_BACKEND_API int ggml_cpu_has_avx512 (void);
112+
GGML_BACKEND_API int ggml_cpu_has_avx512_vbmi(void);
113+
GGML_BACKEND_API int ggml_cpu_has_avx512_vnni(void);
114+
GGML_BACKEND_API int ggml_cpu_has_avx512_bf16(void);
115+
GGML_BACKEND_API int ggml_cpu_has_amx_int8 (void);
116116
// ARM
117-
GGML_API int ggml_cpu_has_neon (void);
118-
GGML_API int ggml_cpu_has_arm_fma (void);
119-
GGML_API int ggml_cpu_has_fp16_va (void);
120-
GGML_API int ggml_cpu_has_matmul_int8(void);
121-
GGML_API int ggml_cpu_has_sve (void);
122-
GGML_API int ggml_cpu_get_sve_cnt (void); // sve vector length in bytes
117+
GGML_BACKEND_API int ggml_cpu_has_neon (void);
118+
GGML_BACKEND_API int ggml_cpu_has_arm_fma (void);
119+
GGML_BACKEND_API int ggml_cpu_has_fp16_va (void);
120+
GGML_BACKEND_API int ggml_cpu_has_matmul_int8(void);
121+
GGML_BACKEND_API int ggml_cpu_has_sve (void);
122+
GGML_BACKEND_API int ggml_cpu_get_sve_cnt (void); // sve vector length in bytes
123123
// other
124-
GGML_API int ggml_cpu_has_riscv_v (void);
125-
GGML_API int ggml_cpu_has_vsx (void);
126-
GGML_API int ggml_cpu_has_wasm_simd (void);
127-
GGML_API int ggml_cpu_has_llamafile (void);
124+
GGML_BACKEND_API int ggml_cpu_has_riscv_v (void);
125+
GGML_BACKEND_API int ggml_cpu_has_vsx (void);
126+
GGML_BACKEND_API int ggml_cpu_has_wasm_simd (void);
127+
GGML_BACKEND_API int ggml_cpu_has_llamafile (void);
128128

129129
// Internal types and functions exposed for tests and benchmarks
130130

@@ -148,25 +148,25 @@ extern "C" {
148148
ggml_gemm_t gemm;
149149
};
150150

151-
GGML_API const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type);
151+
GGML_BACKEND_API const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type);
152152

153-
GGML_API void ggml_cpu_init(void);
153+
GGML_BACKEND_API void ggml_cpu_init(void);
154154

155155
//
156156
// CPU backend
157157
//
158158

159-
GGML_API ggml_backend_t ggml_backend_cpu_init(void);
159+
GGML_BACKEND_API ggml_backend_t ggml_backend_cpu_init(void);
160160

161-
GGML_API bool ggml_backend_is_cpu (ggml_backend_t backend);
162-
GGML_API void ggml_backend_cpu_set_n_threads (ggml_backend_t backend_cpu, int n_threads);
163-
GGML_API void ggml_backend_cpu_set_threadpool (ggml_backend_t backend_cpu, ggml_threadpool_t threadpool);
164-
GGML_API void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data);
161+
GGML_BACKEND_API bool ggml_backend_is_cpu (ggml_backend_t backend);
162+
GGML_BACKEND_API void ggml_backend_cpu_set_n_threads (ggml_backend_t backend_cpu, int n_threads);
163+
GGML_BACKEND_API void ggml_backend_cpu_set_threadpool (ggml_backend_t backend_cpu, ggml_threadpool_t threadpool);
164+
GGML_BACKEND_API void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data);
165165

166-
GGML_API ggml_backend_reg_t ggml_backend_cpu_reg(void);
166+
GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cpu_reg(void);
167167

168168
#ifdef GGML_USE_CPU_HBM
169-
GGML_API ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void);
169+
GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void);
170170
#endif
171171

172172
#ifdef __cplusplus

ggml/include/ggml-cuda.h

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -20,27 +20,27 @@ extern "C" {
2020
#define GGML_CUDA_MAX_DEVICES 16
2121

2222
// backend API
23-
GGML_API ggml_backend_t ggml_backend_cuda_init(int device);
23+
GGML_BACKEND_API ggml_backend_t ggml_backend_cuda_init(int device);
2424

25-
GGML_API bool ggml_backend_is_cuda(ggml_backend_t backend);
25+
GGML_BACKEND_API bool ggml_backend_is_cuda(ggml_backend_t backend);
2626

2727
// device buffer
28-
GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
28+
GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device);
2929

3030
// split tensor buffer that splits matrices by rows across multiple devices
31-
GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(int main_device, const float * tensor_split);
31+
GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(int main_device, const float * tensor_split);
3232

3333
// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU
34-
GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
34+
GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void);
3535

36-
GGML_API int ggml_backend_cuda_get_device_count(void);
37-
GGML_API void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size);
38-
GGML_API void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total);
36+
GGML_BACKEND_API int ggml_backend_cuda_get_device_count(void);
37+
GGML_BACKEND_API void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size);
38+
GGML_BACKEND_API void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total);
3939

40-
GGML_API bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size);
41-
GGML_API void ggml_backend_cuda_unregister_host_buffer(void * buffer);
40+
GGML_BACKEND_API bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size);
41+
GGML_BACKEND_API void ggml_backend_cuda_unregister_host_buffer(void * buffer);
4242

43-
GGML_API ggml_backend_reg_t ggml_backend_cuda_reg(void);
43+
GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cuda_reg(void);
4444

4545
#ifdef __cplusplus
4646
}

ggml/include/ggml-kompute.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,13 +37,13 @@ struct ggml_vk_device ggml_vk_current_device(void);
3737
// forward declaration
3838
typedef struct ggml_backend * ggml_backend_t;
3939

40-
GGML_API ggml_backend_t ggml_backend_kompute_init(int device);
40+
GGML_BACKEND_API ggml_backend_t ggml_backend_kompute_init(int device);
4141

42-
GGML_API bool ggml_backend_is_kompute(ggml_backend_t backend);
42+
GGML_BACKEND_API bool ggml_backend_is_kompute(ggml_backend_t backend);
4343

44-
GGML_API ggml_backend_buffer_type_t ggml_backend_kompute_buffer_type(int device);
44+
GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_kompute_buffer_type(int device);
4545

46-
GGML_API ggml_backend_reg_t ggml_backend_kompute_reg(void);
46+
GGML_BACKEND_API ggml_backend_reg_t ggml_backend_kompute_reg(void);
4747

4848
#ifdef __cplusplus
4949
}

0 commit comments

Comments
 (0)