7171in
7272effectiveStdenv . mkDerivation ( finalAttrs : {
7373 pname = "llama-cpp" ;
74- version = "3091 " ;
74+ version = "3260 " ;
7575
7676 src = fetchFromGitHub {
7777 owner = "ggerganov" ;
7878 repo = "llama.cpp" ;
7979 rev = "refs/tags/b${ finalAttrs . version } " ;
80- hash = "sha256-ppujag6Nrk/M9QMQ4mYe2iADsfKzmfKtOP8Ib7GZBmk =" ;
80+ hash = "sha256-0KVwSzxfGinpv5KkDCgF2J+1ijDv87PlDrC+ldscP6s =" ;
8181 leaveDotGit = true ;
8282 postFetch = ''
8383 git -C "$out" rev-parse --short HEAD > $out/COMMIT
@@ -86,12 +86,12 @@ effectiveStdenv.mkDerivation (finalAttrs: {
8686 } ;
8787
8888 postPatch = ''
89- substituteInPlace ./ggml-metal.m \
89+ substituteInPlace ./ggml/src/ggml -metal.m \
9090 --replace-fail '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
9191
92- substituteInPlace ./scripts/build-info.cmake \
93- --replace-fail 'set(BUILD_NUMBER 0) ' 'set(BUILD_NUMBER ${ finalAttrs . version } ) ' \
94- --replace-fail 'set(BUILD_COMMIT "unknown") ' "set(BUILD_COMMIT \"$(cat COMMIT)\") "
92+ substituteInPlace ./scripts/build-info.sh \
93+ --replace-fail 'build_number="0" ' 'build_number=" ${ finalAttrs . version } " ' \
94+ --replace-fail 'build_commit= "unknown"' "build_commit= \"$(cat COMMIT)\""
9595 '' ;
9696
9797 nativeBuildInputs = [ cmake ninja pkg-config git ]
@@ -109,17 +109,16 @@ effectiveStdenv.mkDerivation (finalAttrs: {
109109
110110 cmakeFlags = [
111111 # -march=native is non-deterministic; override with platform-specific flags if needed
112- ( cmakeBool "LLAMA_NATIVE " false )
113- ( cmakeBool "BUILD_SHARED_SERVER " true )
112+ ( cmakeBool "GGML_NATIVE " false )
113+ ( cmakeBool "LLAMA_BUILD_SERVER " true )
114114 ( cmakeBool "BUILD_SHARED_LIBS" true )
115- ( cmakeBool "BUILD_SHARED_LIBS" true )
116- ( cmakeBool "LLAMA_BLAS" blasSupport )
117- ( cmakeBool "LLAMA_CLBLAST" openclSupport )
118- ( cmakeBool "LLAMA_CUDA" cudaSupport )
119- ( cmakeBool "LLAMA_HIPBLAS" rocmSupport )
120- ( cmakeBool "LLAMA_METAL" metalSupport )
121- ( cmakeBool "LLAMA_RPC" rpcSupport )
122- ( cmakeBool "LLAMA_VULKAN" vulkanSupport )
115+ ( cmakeBool "GGML_BLAS" blasSupport )
116+ ( cmakeBool "GGML_CLBLAST" openclSupport )
117+ ( cmakeBool "GGML_CUDA" cudaSupport )
118+ ( cmakeBool "GGML_HIPBLAS" rocmSupport )
119+ ( cmakeBool "GGML_METAL" metalSupport )
120+ ( cmakeBool "GGML_RPC" rpcSupport )
121+ ( cmakeBool "GGML_VULKAN" vulkanSupport )
123122 ]
124123 ++ optionals cudaSupport [
125124 ( cmakeFeature "CMAKE_CUDA_ARCHITECTURES" cudaPackages . flags . cmakeCudaArchitecturesString )
@@ -138,7 +137,6 @@ effectiveStdenv.mkDerivation (finalAttrs: {
138137 ( cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1" )
139138 ( cmakeBool "LLAMA_METAL_EMBED_LIBRARY" true )
140139 ] ++ optionals rpcSupport [
141- "-DLLAMA_RPC=ON"
142140 # This is done so we can move rpc-server out of bin because llama.cpp doesn't
143141 # install rpc-server in their install target.
144142 "-DCMAKE_SKIP_BUILD_RPATH=ON"
@@ -147,10 +145,11 @@ effectiveStdenv.mkDerivation (finalAttrs: {
147145 # upstream plans on adding targets at the cmakelevel, remove those
148146 # additional steps after that
149147 postInstall = ''
150- mv $out/bin/main $out/bin/llama
151- mv $out/bin/server $out/bin/llama-server
148+ # Match previous binary name for this package
149+ ln -sf $out/bin/llama-cli $out/bin/llama
150+
152151 mkdir -p $out/include
153- cp $src/llama.h $out/include/
152+ cp $src/include/ llama.h $out/include/
154153 '' + optionalString rpcSupport "cp bin/rpc-server $out/bin/llama-rpc-server" ;
155154
156155 passthru . updateScript = nix-update-script {
0 commit comments