@@ -19,10 +19,13 @@ Release: 1%{?dist}
1919Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
2020License: MIT
2121Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
22- BuildRequires: coreutils make gcc-c++ git libstdc++-devel
22+ BuildRequires: coreutils cmake make gcc-c++ git libstdc++-devel
2323Requires: libstdc++
2424URL: https://github.com/ggerganov/llama.cpp
2525
26+ # CMake rpaths kill the build.
27+ %global __brp_check_rpaths %{nil }
28+
2629%define debug_package %{nil }
2730%define source_date_epoch_from_changelog 0
2831
@@ -34,13 +37,14 @@ Models are not included in this package and must be downloaded separately.
3437%setup -n llama.cpp-master
3538
3639%build
37- make -j
40+ cmake -B build -DBUILD_SHARED_LIBS=0
41+ cmake --build build --config Release -j$( nproc)
3842
3943%install
4044mkdir -p %{buildroot }%{_bindir }/
41- cp -p llama-cli %{buildroot }%{_bindir }/llama-cli
45+ cd build/bin
46+ ls
4247cp -p llama-server %{buildroot }%{_bindir }/llama-server
43- cp -p llama-simple %{buildroot }%{_bindir }/llama-simple
4448
4549mkdir -p %{buildroot }/usr/lib/systemd/system
4650%{__cat} << EOF > %{buildroot }/usr/lib/systemd/system/llama.service
@@ -69,9 +73,7 @@ rm -rf %{buildroot}
6973rm -rf %{_builddir }/*
7074
7175%files
72- %{_bindir }/llama-cli
7376%{_bindir }/llama-server
74- %{_bindir }/llama-simple
7577/usr/lib/systemd/system/llama.service
7678%config /etc/sysconfig/llama
7779
0 commit comments