Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
58 commits
Select commit Hold shift + click to select a range
e407bcc
rpc: fix known RCE in rpc-server (ggml/1103)
retr0reg Feb 6, 2025
982535f
metal : use residency set for other platforms (llama/11648)
jhen0409 Feb 4, 2025
8440a75
HIP: force max threads per block to be 1024 (llama/11621)
fxzjshm Feb 4, 2025
53ad347
CUDA: non-contiguous (RMS) norm support (llama/11659)
JohannesGaessler Feb 4, 2025
88ce132
CUDA: support for mat. mul. with ne03 != ne13 (llama/11656)
JohannesGaessler Feb 5, 2025
26ab6ec
metal : adjust support conditions for norm operators (llama/11671)
ggerganov Feb 5, 2025
c2b8bf0
metal : avoid breaking build when metal API predates TARGET_OS_VISION…
charles-dyfis-net Feb 6, 2025
34a9e8a
vulkan: use smaller combined allocations to avoid fragmentation (llam…
jeffbolznv Feb 6, 2025
96bd41f
vulkan: initial support for IQ4_XS quantization (llama/11501)
remyoudompheng Feb 6, 2025
6724a2a
vulkan: optimize coopmat2 iq2/iq3 callbacks (llama/11521)
jeffbolznv Feb 6, 2025
f62a152
ggml : fix LoongArch compile error with 128-bit SIMD (llama/11701)
junchao-loongson Feb 6, 2025
1f1ddf8
SYCL: Adjust support condition for norm operators (llama/11674)
qnixsynapse Feb 6, 2025
e4c89e5
ggml : optimize and build warning fix for LoongArch (llama/11709)
MQ-mengqing Feb 7, 2025
52d3ac8
SYCL: remove XMX info from print devices (llama/11712)
qnixsynapse Feb 7, 2025
2ea46bc
vulkan: print shared memory size (llama/11719)
jeffbolznv Feb 7, 2025
03aba11
CUDA: fix min. version for movmatrix (llama/11751)
JohannesGaessler Feb 8, 2025
a616ea5
ggml: Fix data race in ggml threadpool (llama/11736)
kkontny Feb 8, 2025
d196c35
vulkan: account for lookup tables when checking shared memory size (l…
jeffbolznv Feb 9, 2025
7da8fa6
vulkan: add environment variable GGML_VK_PREFER_HOST_MEMORY to avoid …
wbruna Feb 10, 2025
94eaf97
vulkan: Make Vulkan optional at runtime (ggml/11493). (llama/11494)
daym Feb 10, 2025
06db520
fix: typos in documentation files (llama/11791)
maximevtush Feb 10, 2025
8903621
CUDA: use arch list for compatibility check (llama/11775)
JohannesGaessler Feb 10, 2025
7ebc835
Fix #11802: Compile bug - RegQueryValueExA changed to RegQueryValueEx…
sheldonrobinson Feb 11, 2025
523f00b
CUDA: fix CUDART_VERSION checks (llama/11821)
JohannesGaessler Feb 12, 2025
2686930
ggml-cpu: Fix duplicate MATMUL_INT8 (llama/11817)
ownia Feb 12, 2025
04063cb
ggml : fix multi-threaded clamp_f32 (llama/11824)
Burton2000 Feb 12, 2025
e288e30
cleanup: fix compile warnings associated with gnu_printf (llama/11811)
bandoti Feb 12, 2025
5590b2b
HIP: Switch to std::vector in rocblas version check (llama/11820)
IMbackK Feb 12, 2025
5b62183
HIP: Remove GCN from list of devices that avoid MMQ (llama/11831)
IMbackK Feb 12, 2025
4151a81
ggml : x2 speed for WASM by optimizing SIMD (llama/11453)
ngxson Feb 12, 2025
322965d
ggml-cpu : add chunking support to mul_mat_id (llama/11666)
slaren Feb 13, 2025
07bbd8e
musa: bump MUSA SDK version to rc3.1.1 (llama/11822)
yeahdongcn Feb 13, 2025
f7244e0
llamafile: use member variable instead of constant for iq4nlt (llama/…
jmorganca Feb 13, 2025
0fc51b7
vulkan: linux builds + small subgroup size fixes (llama/11767)
netrunnereve Feb 14, 2025
74a285a
ggml: optimize some vec dot functions for LoongArch ASX (llama/11842)
MQ-mengqing Feb 14, 2025
14b9afe
cuda : add ampere to the list of default architectures (llama/11870)
slaren Feb 14, 2025
3151f2c
opencl: Fix rope and softmax (llama/11833)
lhez Feb 14, 2025
bcc9bef
vulkan: initial support for IQ1_S and IQ1_M quantizations (llama/11528)
remyoudompheng Feb 15, 2025
c65a301
repo : update links to new url (llama/11886)
ggerganov Feb 15, 2025
c9dfdfa
metal : optimize dequant q6_K kernel (llama/11892)
akretz Feb 15, 2025
3dba9f7
metal : fix the crash caused by the lack of residency set support on …
halechan Feb 16, 2025
1a97554
vulkan: support multi/vision rope, and noncontiguous rope (llama/11902)
jeffbolznv Feb 16, 2025
1c5790a
vulkan: implement several ops relevant for ggml_opt (llama/11769)
remyoudompheng Feb 17, 2025
06fed44
CUDA: use async data loading for FlashAttention (llama/11894)
JohannesGaessler Feb 17, 2025
38542f3
ggml: aarch64: implement SVE kernels for q3_K_q8_K vector dot (llama/…
Vithulep Feb 20, 2025
bb7026d
ggml-cpu: Add CPU backend support for KleidiAI library (llama/11390)
chaxu01 Feb 20, 2025
1d80839
MUSA: support ARM64 and enable dp4a .etc (llama/11843)
BodhiHu Feb 21, 2025
3852692
CUDA: correct the lowest Maxwell supported by CUDA 12 (llama/11984)
PureJourney Feb 21, 2025
4fdcb18
cuda: Add Q5_1, Q5_0, Q4_1 and Q4_0 to F32 conversion support. (llama…
gcp Feb 22, 2025
e4f3c48
CUDA: optimize FA for GQA + large batches (llama/12014)
JohannesGaessler Feb 22, 2025
51bb2f9
CUDA: app option to compile without FlashAttention (llama/12025)
JohannesGaessler Feb 22, 2025
c28f401
ggml-cpu: Support s390x SIMD Instruction Set (llama/12019)
taronaeo Feb 22, 2025
56d1ec4
SYCL: Fix GGML_SYCL_DEBUG macro (llama/11995)
qnixsynapse Feb 24, 2025
8ba9b11
Optimize mul_mat for Q4_0 on Intel GPU (llama/12035)
NeoZhangJianyu Feb 24, 2025
fa016e3
opencl: fix for small models (llama/11950)
lhez Feb 24, 2025
351801a
metal : copy kernels for quant to F32/F16 conversions (llama/12017)
gcp Feb 25, 2025
eaa7e83
Support pure float16 add/sub/mul/div operations in the CUDA (and CPU)…
cmdr2 Feb 25, 2025
ee6ab0b
sync : ggml
ggerganov Feb 26, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions ggml/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ endif()

option(GGML_CPU_HBM "ggml: use memkind for CPU HBM" OFF)
option(GGML_CPU_AARCH64 "ggml: use runtime weight conversion of Q4_0 to Q4_X_X" ON)
option(GGML_CPU_KLEIDIAI "ggml: use KleidiAI optimized kernels if applicable" OFF)
option(GGML_AVX "ggml: enable AVX" ${INS_ENB})
option(GGML_AVX_VNNI "ggml: enable AVX-VNNI" OFF)
option(GGML_AVX2 "ggml: enable AVX2" ${INS_ENB})
Expand All @@ -121,6 +122,7 @@ endif()
option(GGML_LASX "ggml: enable lasx" ON)
option(GGML_LSX "ggml: enable lsx" ON)
option(GGML_RVV "ggml: enable rvv" ON)
option(GGML_VXE "ggml: enable vxe" ON)

option(GGML_CPU_ALL_VARIANTS "ggml: build all variants of the CPU backend (requires GGML_BACKEND_DL)" OFF)
set(GGML_CPU_ARM_ARCH "" CACHE STRING "ggml: CPU architecture for ARM")
Expand Down Expand Up @@ -150,6 +152,7 @@ set (GGML_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING
"ggml: max. batch size for using peer access")
option(GGML_CUDA_NO_PEER_COPY "ggml: do not use peer to peer copies" OFF)
option(GGML_CUDA_NO_VMM "ggml: do not try to use CUDA VMM" OFF)
option(GGML_CUDA_FA "ggml: compile ggml FlashAttention CUDA kernels" ON)
option(GGML_CUDA_FA_ALL_QUANTS "ggml: compile all quants for FlashAttention" OFF)
option(GGML_CUDA_GRAPHS "ggml: use CUDA graphs (llama.cpp only)" ${GGML_CUDA_GRAPHS_DEFAULT})

Expand Down
4 changes: 3 additions & 1 deletion ggml/include/ggml-cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ extern "C" {
#endif

// the compute plan that needs to be prepared for ggml_graph_compute()
// since https://github.com/ggerganov/ggml/issues/287
// since https://github.com/ggml-org/ggml/issues/287
struct ggml_cplan {
size_t work_size; // size of work buffer, calculated by `ggml_graph_plan()`
uint8_t * work_data; // work buffer, to be allocated by caller before calling to `ggml_graph_compute()`
Expand Down Expand Up @@ -95,9 +95,11 @@ extern "C" {
GGML_BACKEND_API int ggml_cpu_has_matmul_int8(void);
GGML_BACKEND_API int ggml_cpu_has_sve (void);
GGML_BACKEND_API int ggml_cpu_get_sve_cnt (void); // sve vector length in bytes
GGML_BACKEND_API int ggml_cpu_has_sme (void);
// other
GGML_BACKEND_API int ggml_cpu_has_riscv_v (void);
GGML_BACKEND_API int ggml_cpu_has_vsx (void);
GGML_BACKEND_API int ggml_cpu_has_vxe (void);
GGML_BACKEND_API int ggml_cpu_has_wasm_simd (void);
GGML_BACKEND_API int ggml_cpu_has_llamafile (void);

Expand Down
2 changes: 1 addition & 1 deletion ggml/include/ggml-metal.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ GGML_BACKEND_API bool ggml_backend_is_metal(ggml_backend_t backend);

GGML_DEPRECATED(
GGML_BACKEND_API ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size),
"obsoleted by the new device interface - https://github.com/ggerganov/llama.cpp/pull/9713");
"obsoleted by the new device interface - https://github.com/ggml-org/llama.cpp/pull/9713");

GGML_BACKEND_API void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data);

Expand Down
2 changes: 0 additions & 2 deletions ggml/include/ggml-vulkan.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@ extern "C" {
#define GGML_VK_NAME "Vulkan"
#define GGML_VK_MAX_DEVICES 16

GGML_BACKEND_API void ggml_vk_instance_init(void);

// backend API
GGML_BACKEND_API ggml_backend_t ggml_backend_vk_init(size_t dev_num);

Expand Down
2 changes: 1 addition & 1 deletion ggml/include/ggml.h
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@

#ifndef __GNUC__
# define GGML_ATTRIBUTE_FORMAT(...)
#elif defined(__MINGW32__)
#elif defined(__MINGW32__) && !defined(__clang__)
# define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
#else
# define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
Expand Down
14 changes: 1 addition & 13 deletions ggml/src/ggml-alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -989,19 +989,7 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte
this_size = GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), alignment);
}

if (this_size > max_size) {
GGML_LOG_ERROR("%s: tensor %s is too large to fit in a %s buffer (tensor size: %zu, max buffer size: %zu)\n",
__func__, t->name,
ggml_backend_buft_name(buft),
this_size, max_size);
for (size_t i = 0; i < n_buffers; i++) {
ggml_backend_buffer_free(buffers[i]);
}
free(buffers);
return NULL;
}

if ((cur_buf_size + this_size) > max_size) {
if (cur_buf_size > 0 && (cur_buf_size + this_size) > max_size) {
// allocate tensors in the current buffer
if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) {
return NULL;
Expand Down
2 changes: 0 additions & 2 deletions ggml/src/ggml-common.h
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,6 @@ GGML_TABLE_BEGIN(uint8_t, ksigns_iq2xs, 128)
240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255,
GGML_TABLE_END()

//#if __CUDA_ARCH__ >= GGML_CUDA_CC_DP4A // lowest compute capability for integer intrinsics
GGML_TABLE_BEGIN(uint64_t, ksigns64, 128)
0x0000000000000000, 0xff000000000000ff, 0xff0000000000ff00, 0x000000000000ffff,
0xff00000000ff0000, 0x0000000000ff00ff, 0x0000000000ffff00, 0xff00000000ffffff,
Expand Down Expand Up @@ -508,7 +507,6 @@ GGML_TABLE_BEGIN(uint64_t, ksigns64, 128)
0x00ffffffff000000, 0xffffffffff0000ff, 0xffffffffff00ff00, 0x00ffffffff00ffff,
0xffffffffffff0000, 0x00ffffffffff00ff, 0x00ffffffffffff00, 0xffffffffffffffff,
GGML_TABLE_END()
//#endif


GGML_TABLE_BEGIN(uint64_t, iq2xxs_grid, 256)
Expand Down
123 changes: 117 additions & 6 deletions ggml/src/ggml-cpu/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -111,21 +111,23 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
function(check_arm_feature tag code)
set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
set(CMAKE_REQUIRED_FLAGS "${ARM_MCPU_FLAG}+${tag}")
check_cxx_source_runs(
"${code}"
GGML_MACHINE_SUPPORTS_${tag}
)
check_cxx_source_runs("${code}" GGML_MACHINE_SUPPORTS_${tag})
if (GGML_MACHINE_SUPPORTS_${tag})
set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+${tag}" PARENT_SCOPE)
else()
set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+no${tag}" PARENT_SCOPE)
set(CMAKE_REQUIRED_FLAGS "${ARM_MCPU_FLAG}+no${tag}")
check_cxx_source_compiles("int main() { return 0; }" GGML_MACHINE_SUPPORTS_no${tag})
if (GGML_MACHINE_SUPPORTS_no${tag})
set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+no${tag}" PARENT_SCOPE)
endif()
endif()
set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE})
endfunction()

check_arm_feature(dotprod "#include <arm_neon.h>\nint main() { int8x16_t _a, _b; volatile int32x4_t _s = vdotq_s32(_s, _a, _b); return 0; }")
check_arm_feature(i8mm "#include <arm_neon.h>\nint main() { int8x16_t _a, _b; volatile int32x4_t _s = vmmlaq_s32(_s, _a, _b); return 0; }")
check_arm_feature(sve "#include <arm_sve.h>\nint main() { svfloat32_t _a, _b; volatile svfloat32_t _c = svadd_f32_z(svptrue_b8(), _a, _b); return 0; }")
check_arm_feature(sme "#include <arm_sme.h>\n__arm_locally_streaming int main() { __asm__ volatile(\"smstart; smstop;\"); return 0; }")

list(APPEND ARCH_FLAGS "${ARM_MCPU_FLAG}${ARM_MCPU_FLAG_FIX}")
else()
Expand All @@ -150,7 +152,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
if (ARM_FEATURE_RESULT)
message(WARNING "Failed to get ARM features")
else()
foreach(feature DOTPROD SVE MATMUL_INT8 FMA FP16_VECTOR_ARITHMETIC)
foreach(feature DOTPROD SVE MATMUL_INT8 FMA FP16_VECTOR_ARITHMETIC SME)
string(FIND "${ARM_FEATURE}" "__ARM_FEATURE_${feature} 1" feature_pos)
if (NOT ${feature_pos} EQUAL -1)
message(STATUS "ARM feature ${feature} enabled")
Expand Down Expand Up @@ -304,6 +306,27 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
if (GGML_RVV)
list(APPEND ARCH_FLAGS -march=rv64gcv -mabi=lp64d)
endif()
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
message(STATUS "s390x detected")
file(READ "/proc/cpuinfo" CPUINFO_CONTENTS)
string(REGEX REPLACE "machine[ \t\r\n]*=[ \t\r\n]*([0-9]+)" "\\1" S390X_M ${CPUINFO_CONTENTS})

# TODO: Separation to determine activation of VX/VXE/VXE2
if (${S390X_M} MATCHES "8561|8562")
message(STATUS "z15 target")
list(APPEND ARCH_FLAGS -march=z15 -mtune=z15)
elseif (${S390X_M} MATCHES "3931")
message(STATUS "z16 target")
list(APPEND ARCH_FLAGS -march=z16 -mtune=z16)
else()
message(STATUS "Unknown target")
message(WARNING "Unknown target. If you are compiling for z14 and earlier, you might have to add -DGGML_VXE=OFF.")
list(APPEND ARCH_FLAGS -march=native -mtune=native)
endif()

if (GGML_VXE)
list(APPEND ARCH_FLAGS -mvx -mzvector)
endif()
else()
message(STATUS "Unknown architecture")
endif()
Expand All @@ -312,6 +335,94 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_AARCH64)
endif()

if (GGML_CPU_KLEIDIAI)
message(STATUS "Using KleidiAI optimized kernels if applicable")

# Disable the KleidiAI tests
set(KLEIDIAI_BUILD_TESTS OFF)

# Fetch KleidiAI sources:
include(FetchContent)
set(KLEIDIAI_COMMIT_TAG "v1.3.0")
set(KLEIDIAI_DOWNLOAD_URL "https://github.com/ARM-software/kleidiai/archive/refs/tags/${KLEIDIAI_COMMIT_TAG}.tar.gz")
set(KLEIDIAI_ARCHIVE_MD5 "060bd2dc64642b091f461cc8dd7426d9")

if (POLICY CMP0135)
cmake_policy(SET CMP0135 NEW)
endif()

FetchContent_Declare(KleidiAI_Download
URL ${KLEIDIAI_DOWNLOAD_URL}
DOWNLOAD_EXTRACT_TIMESTAMP NEW
URL_HASH MD5=${KLEIDIAI_ARCHIVE_MD5})

FetchContent_MakeAvailable(KleidiAI_Download)
FetchContent_GetProperties(KleidiAI_Download
SOURCE_DIR KLEIDIAI_SRC
POPULATED KLEIDIAI_POPULATED)

if (NOT KLEIDIAI_POPULATED)
message(FATAL_ERROR "KleidiAI source downloaded failed.")
endif()

add_compile_definitions(GGML_USE_CPU_KLEIDIAI)

# Remove kleidiai target after fetching it
if (TARGET kleidiai)
set_target_properties(kleidiai PROPERTIES EXCLUDE_FROM_ALL TRUE)
endif()

list(APPEND GGML_CPU_SOURCES
ggml-cpu/kleidiai/kleidiai.cpp
ggml-cpu/kleidiai/kernels.cpp
ggml-cpu/kleidiai/kleidiai.h
ggml-cpu/kleidiai/kernels.h
)

# KleidiAI
include_directories(
${KLEIDIAI_SRC}/
${KLEIDIAI_SRC}/kai/
${KLEIDIAI_SRC}/kai/ukernels/
${KLEIDIAI_SRC}/kai/ukernels/matmul/
${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/
${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/)

set(ARCH_FLAGS_TEMP "${ARCH_FLAGS}")
if (NOT ARCH_FLAGS_TEMP)
string(REGEX MATCH "-march=[^ ]+" ARCH_FLAGS_TEMP "${CMAKE_C_FLAGS}")
endif()
string(FIND "${ARCH_FLAGS_TEMP}" "+dotprod" DOTPROD_ENABLED)
string(FIND "${ARCH_FLAGS_TEMP}" "+i8mm" I8MM_ENABLED)
string(FIND "${ARCH_FLAGS_TEMP}" "+sme" SME_ENABLED)

set(PRIVATE_ARCH_FLAGS ${ARCH_FLAGS})

list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qsi8d32p_f32.c)
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi4c32ps1s0scalef16_qsu4c32s16s0_neon.c)
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_lhs_quant_pack_qsi8d32p_f32_neon.c)
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0.c)

if (NOT DOTPROD_ENABLED MATCHES -1)
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod.c)
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4x4_1x4_neon_dotprod.c)
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x4_qsi4c32p4x4_16x4_neon_dotprod.c)
endif()

if (NOT I8MM_ENABLED MATCHES -1)
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_16x4_neon_i8mm.c)
endif()

if (NOT SME_ENABLED MATCHES -1)
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1vlx4_qsi4c32p4vlx4_1vlx4vl_sme2_mopa.c)
list(APPEND GGML_KLEIDIAI_SOURCES ${KLEIDIAI_SRC}/kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p1x4_qsi4c32p4vlx4_1x4vl_sme2_sdot.c)
set(PRIVATE_ARCH_FLAGS "${PRIVATE_ARCH_FLAGS}+sve+sve2")
endif()

set_source_files_properties(${GGML_KLEIDIAI_SOURCES} PROPERTIES COMPILE_OPTIONS "${PRIVATE_ARCH_FLAGS}")
list(APPEND GGML_CPU_SOURCES ${GGML_KLEIDIAI_SOURCES})
endif()

message(STATUS "Adding CPU backend variant ${GGML_CPU_NAME}: ${ARCH_FLAGS} ${ARCH_DEFINITIONS}")
target_sources(${GGML_CPU_NAME} PRIVATE ${GGML_CPU_SOURCES})
target_compile_options(${GGML_CPU_NAME} PRIVATE ${ARCH_FLAGS})
Expand Down
Loading
Loading