@@ -5,43 +5,49 @@ project(llama_cpp)
55option (LLAMA_BUILD "Build llama.cpp shared library and install alongside python package" ON )
66option (MTMD_BUILD "Build mtmd shared library and install alongside python package" ON )
77
8+ # Helper function to install targets to Python package directories
89function (llama_cpp_python_install_target target )
910 if (NOT TARGET ${target} )
1011 return ()
1112 endif ()
1213
13- install (
14- TARGETS ${target}
15- LIBRARY DESTINATION ${CMAKE_CURRENT_SOURCE_DIR} /llama_cpp/lib
16- RUNTIME DESTINATION ${CMAKE_CURRENT_SOURCE_DIR} /llama_cpp/lib
17- ARCHIVE DESTINATION ${CMAKE_CURRENT_SOURCE_DIR} /llama_cpp/lib
18- FRAMEWORK DESTINATION ${CMAKE_CURRENT_SOURCE_DIR} /llama_cpp/lib
19- RESOURCE DESTINATION ${CMAKE_CURRENT_SOURCE_DIR} /llama_cpp/lib
20- )
21- install (
22- TARGETS ${target}
23- LIBRARY DESTINATION ${SKBUILD_PLATLIB_DIR} /llama_cpp/lib
24- RUNTIME DESTINATION ${SKBUILD_PLATLIB_DIR} /llama_cpp/lib
25- ARCHIVE DESTINATION ${SKBUILD_PLATLIB_DIR} /llama_cpp/lib
26- FRAMEWORK DESTINATION ${SKBUILD_PLATLIB_DIR} /llama_cpp/lib
27- RESOURCE DESTINATION ${SKBUILD_PLATLIB_DIR} /llama_cpp/lib
28- )
29- set_target_properties (${target} PROPERTIES
30- INSTALL_RPATH "$ORIGIN"
31- BUILD_WITH_INSTALL_RPATH TRUE
14+ # Define install destinations to avoid code duplication
15+ set (INSTALL_DIRS
16+ "${CMAKE_CURRENT_SOURCE_DIR} /llama_cpp/lib"
17+ "${SKBUILD_PLATLIB_DIR} /llama_cpp/lib"
3218 )
19+
20+ foreach (DIR ${INSTALL_DIRS} )
21+ install (
22+ TARGETS ${target}
23+ LIBRARY DESTINATION ${DIR}
24+ RUNTIME DESTINATION ${DIR}
25+ ARCHIVE DESTINATION ${DIR}
26+ FRAMEWORK DESTINATION ${DIR}
27+ RESOURCE DESTINATION ${DIR}
28+ )
29+
30+ # Automatically handle Windows DLL installation for each target
31+ if (WIN32 )
32+ install (
33+ FILES $<TARGET_RUNTIME_DLLS:${target} >
34+ DESTINATION ${DIR}
35+ OPTIONAL # Prevent errors if the target has no DLLs
36+ )
37+ endif ()
38+ endforeach ()
39+
40+ # Configure RPATH
3341 if (UNIX )
42+ set (INSTALL_RPATH_VAL "$ORIGIN" )
3443 if (APPLE )
35- set_target_properties (${target} PROPERTIES
36- INSTALL_RPATH "@loader_path"
37- BUILD_WITH_INSTALL_RPATH TRUE
38- )
39- else ()
40- set_target_properties (${target} PROPERTIES
41- INSTALL_RPATH "$ORIGIN"
42- BUILD_WITH_INSTALL_RPATH TRUE
43- )
44+ set (INSTALL_RPATH_VAL "@loader_path" )
4445 endif ()
46+
47+ set_target_properties (${target} PROPERTIES
48+ INSTALL_RPATH "${INSTALL_RPATH_VAL} "
49+ BUILD_WITH_INSTALL_RPATH TRUE
50+ )
4551 endif ()
4652endfunction ()
4753
@@ -72,19 +78,12 @@ if (LLAMA_BUILD)
7278
7379 # Architecture detection and settings for Apple platforms
7480 if (APPLE )
75- # Get the target architecture
76- execute_process (
77- COMMAND uname -m
78- OUTPUT_VARIABLE HOST_ARCH
79- OUTPUT_STRIP_TRAILING_WHITESPACE
80- )
81-
8281 # If CMAKE_OSX_ARCHITECTURES is not set, use the host architecture
8382 if (NOT CMAKE_OSX_ARCHITECTURES)
84- set (CMAKE_OSX_ARCHITECTURES ${HOST_ARCH } CACHE STRING "Build architecture for macOS" FORCE)
83+ set (CMAKE_OSX_ARCHITECTURES ${CMAKE_HOST_SYSTEM_PROCESSOR } CACHE STRING "Build architecture for macOS" FORCE)
8584 endif ()
8685
87- message (STATUS "Host architecture: ${HOST_ARCH } " )
86+ message (STATUS "Host architecture: ${CMAKE_HOST_SYSTEM_PROCESSOR } " )
8887 message (STATUS "Target architecture: ${CMAKE_OSX_ARCHITECTURES} " )
8988
9089 # Configure based on target architecture
@@ -109,45 +108,31 @@ if (LLAMA_BUILD)
109108 endif ()
110109 endif ()
111110
112- llama_cpp_python_install_target(llama)
113- llama_cpp_python_install_target(ggml)
114-
115- llama_cpp_python_install_target(ggml-base)
116-
117- llama_cpp_python_install_target(ggml-blas)
118- llama_cpp_python_install_target(ggml-cann)
119- llama_cpp_python_install_target(ggml-cpu)
120- llama_cpp_python_install_target(ggml-cuda)
121- llama_cpp_python_install_target(ggml-hexagon)
122- llama_cpp_python_install_target(ggml-hip)
123- llama_cpp_python_install_target(ggml-metal)
124- llama_cpp_python_install_target(ggml-musa)
125- llama_cpp_python_install_target(ggml-opencl)
126- llama_cpp_python_install_target(ggml-rpc)
127- llama_cpp_python_install_target(ggml-sycl)
128- llama_cpp_python_install_target(ggml-vulkan)
129- llama_cpp_python_install_target(ggml-webgpu)
130- llama_cpp_python_install_target(ggml-zdnn)
131-
132- # Workaround for Windows + CUDA https://github.com/abetlen/llama-cpp-python/issues/563
133- if (WIN32 )
134- install (
135- FILES $<TARGET_RUNTIME_DLLS:llama>
136- DESTINATION ${CMAKE_CURRENT_SOURCE_DIR} /llama_cpp/lib
137- )
138- install (
139- FILES $<TARGET_RUNTIME_DLLS:llama>
140- DESTINATION ${SKBUILD_PLATLIB_DIR} /llama_cpp/lib
141- )
142- install (
143- FILES $<TARGET_RUNTIME_DLLS:ggml>
144- DESTINATION ${CMAKE_CURRENT_SOURCE_DIR} /llama_cpp/lib
145- )
146- install (
147- FILES $<TARGET_RUNTIME_DLLS:ggml>
148- DESTINATION ${SKBUILD_PLATLIB_DIR} /llama_cpp/lib
149- )
150- endif ()
111+ # Define list of GGML targets to install
112+ set (GGML_TARGETS
113+ llama
114+ ggml
115+ ggml-base
116+ ggml-blas
117+ ggml-cann
118+ ggml-cpu
119+ ggml-cuda
120+ ggml-hexagon
121+ ggml-hip
122+ ggml-metal
123+ ggml-musa
124+ ggml-opencl
125+ ggml-rpc
126+ ggml-sycl
127+ ggml-vulkan
128+ ggml-webgpu
129+ ggml-zdnn
130+ )
131+
132+ # Loop through targets to avoid repetitive function calls
133+ foreach (TARGET_NAME ${GGML_TARGETS} )
134+ llama_cpp_python_install_target(${TARGET_NAME} )
135+ endforeach ()
151136
152137 if (MTMD_BUILD)
153138 if (NOT DEFINED LLAMA_BUILD_NUMBER)
@@ -172,10 +157,5 @@ if (LLAMA_BUILD)
172157 endif ()
173158
174159 llama_cpp_python_install_target(mtmd)
175-
176- if (WIN32 )
177- install (FILES $<TARGET_RUNTIME_DLLS:mtmd> DESTINATION ${CMAKE_CURRENT_SOURCE_DIR} /llama_cpp/lib)
178- install (FILES $<TARGET_RUNTIME_DLLS:mtmd> DESTINATION ${SKBUILD_PLATLIB_DIR} /llama_cpp/lib)
179- endif ()
180160 endif ()
181161endif ()
0 commit comments