Skip to content

Commit 8daed6e

Browse files
Merge pull request #98 from menloresearch/update-dev-from-master-2025-05-23-00-08
Sync master with upstream release b5460
2 parents edc2f9b + 3079e9a commit 8daed6e

29 files changed

+95500
-276
lines changed

.editorconfig

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,3 +48,7 @@ end_of_line = unset
4848
charset = unset
4949
trim_trailing_whitespace = unset
5050
insert_final_newline = unset
51+
52+
[tools/mtmd/miniaudio.h]
53+
trim_trailing_whitespace = unset
54+
insert_final_newline = unset

.github/workflows/release.yml

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -448,6 +448,7 @@ jobs:
448448
WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/7cd9bba0-7aab-4e30-b3ae-2221006a4a05/intel-oneapi-base-toolkit-2025.1.1.34_offline.exe
449449
WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel:intel.oneapi.win.dnnl:intel.oneapi.win.tbb.devel
450450
ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI"
451+
451452
steps:
452453
- name: Clone
453454
id: checkout
@@ -513,7 +514,9 @@ jobs:
513514

514515
strategy:
515516
matrix:
516-
gpu_target: [gfx1100, gfx1101, gfx1030]
517+
include:
518+
- name: "radeon"
519+
gpu_targets: "gfx1100;gfx1101;gfx1102;gfx1030;gfx1031;gfx1032"
517520

518521
steps:
519522
- name: Clone
@@ -528,7 +531,7 @@ jobs:
528531
- name: ccache
529532
uses: hendrikmuhs/[email protected]
530533
with:
531-
key: windows-latest-cmake-hip-${{ matrix.gpu_target }}-x64
534+
key: windows-latest-cmake-hip-${{ matrix.name }}-x64
532535
evict-old-files: 1d
533536

534537
- name: Install
@@ -554,9 +557,12 @@ jobs:
554557
cmake -G "Unix Makefiles" -B build -S . `
555558
-DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" `
556559
-DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" `
557-
-DCMAKE_CXX_FLAGS="-I$($PWD.Path.Replace('\', '/'))/rocwmma/library/include/" `
560+
-DCMAKE_CXX_FLAGS="-I$($PWD.Path.Replace('\', '/'))/rocwmma/library/include/ -Wno-ignored-attributes -Wno-nested-anon-types" `
558561
-DCMAKE_BUILD_TYPE=Release `
559-
-DAMDGPU_TARGETS=${{ matrix.gpu_target }} `
562+
-DGGML_BACKEND_DL=ON `
563+
-DGGML_NATIVE=OFF `
564+
-DGGML_CPU=OFF `
565+
-DAMDGPU_TARGETS="${{ matrix.gpu_targets }}" `
560566
-DGGML_HIP_ROCWMMA_FATTN=ON `
561567
-DGGML_HIP=ON `
562568
-DLLAMA_CURL=OFF
@@ -569,13 +575,13 @@ jobs:
569575
- name: Pack artifacts
570576
id: pack_artifacts
571577
run: |
572-
7z a llama-bin-win-hip-${{ matrix.gpu_target }}-x64.zip .\build\bin\*
578+
7z a llama-bin-win-hip-${{ matrix.name }}-x64.zip .\build\bin\*
573579
574580
- name: Upload artifacts
575581
uses: actions/upload-artifact@v4
576582
with:
577-
path: llama-bin-win-hip-${{ matrix.gpu_target }}-x64.zip
578-
name: llama-bin-win-hip-${{ matrix.gpu_target }}-x64.zip
583+
path: llama-bin-win-hip-${{ matrix.name }}-x64.zip
584+
name: llama-bin-win-hip-${{ matrix.name }}-x64.zip
579585

580586
ios-xcode-build:
581587
runs-on: macos-latest

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -580,3 +580,4 @@ $ echo "source ~/.llama-completion.bash" >> ~/.bashrc
580580
- [minja](https://github.com/google/minja) - Minimal Jinja parser in C++, used by various tools/examples - MIT License
581581
- [linenoise.cpp](./tools/run/linenoise.cpp/linenoise.cpp) - C++ library that provides readline-like line editing capabilities, used by `llama-run` - BSD 2-Clause License
582582
- [curl](https://curl.se/) - Client-side URL transfer library, used by various tools/examples - [CURL License](https://curl.se/docs/copyright.html)
583+
- [miniaudio.h](https://github.com/mackron/miniaudio) - Single-header audio format decoder, used by multimodal subsystem - Public domain

common/arg.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
using json = nlohmann::ordered_json;
4040

4141
std::initializer_list<enum llama_example> mmproj_examples = {
42-
LLAMA_EXAMPLE_LLAVA,
42+
LLAMA_EXAMPLE_MTMD,
4343
LLAMA_EXAMPLE_SERVER,
4444
};
4545

@@ -2233,12 +2233,12 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
22332233
}
22342234
).set_examples(mmproj_examples).set_env("LLAMA_ARG_NO_MMPROJ_OFFLOAD"));
22352235
add_opt(common_arg(
2236-
{"--image"}, "FILE",
2237-
"path to an image file. use with multimodal models. Specify multiple times for batching",
2236+
{"--image", "--audio"}, "FILE",
2237+
"path to an image or audio file. use with multimodal models, can be repeated if you have multiple files\n",
22382238
[](common_params & params, const std::string & value) {
22392239
params.image.emplace_back(value);
22402240
}
2241-
).set_examples({LLAMA_EXAMPLE_LLAVA}));
2241+
).set_examples({LLAMA_EXAMPLE_MTMD}));
22422242
if (llama_supports_rpc()) {
22432243
add_opt(common_arg(
22442244
{"--rpc"}, "SERVERS",
@@ -2868,7 +2868,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
28682868
[](common_params & params, const std::string & value) {
28692869
params.chat_template = value;
28702870
}
2871-
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_LLAVA}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
2871+
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MTMD}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
28722872
add_opt(common_arg(
28732873
{"--chat-template-file"}, "JINJA_TEMPLATE_FILE",
28742874
string_format(

common/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ enum llama_example {
7676
LLAMA_EXAMPLE_SERVER,
7777
LLAMA_EXAMPLE_CVECTOR_GENERATOR,
7878
LLAMA_EXAMPLE_EXPORT_LORA,
79-
LLAMA_EXAMPLE_LLAVA,
79+
LLAMA_EXAMPLE_MTMD,
8080
LLAMA_EXAMPLE_LOOKUP,
8181
LLAMA_EXAMPLE_PARALLEL,
8282
LLAMA_EXAMPLE_TTS,

0 commit comments

Comments
 (0)