Skip to content

Commit 55cc9ac

Browse files
committed
Merge branch 'upstream' into concedo_experimental
# Conflicts: # .github/workflows/release.yml # README.md # ggml/src/ggml-cann/aclnn_ops.cpp # ggml/src/ggml-cann/ggml-cann.cpp # tools/mtmd/CMakeLists.txt # tools/mtmd/clip.cpp # tools/mtmd/clip.h
2 parents ec04115 + b775345 commit 55cc9ac

35 files changed

+95820
-465
lines changed

.editorconfig

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,3 +48,7 @@ end_of_line = unset
4848
charset = unset
4949
trim_trailing_whitespace = unset
5050
insert_final_newline = unset
51+
52+
[tools/mtmd/miniaudio.h]
53+
trim_trailing_whitespace = unset
54+
insert_final_newline = unset

.github/workflows/winget.yml

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
name: Update Winget Package
2+
3+
on:
4+
workflow_dispatch: # allows manual triggering
5+
schedule:
6+
- cron: '28 5 * * *' # Update every day at 5:28 UTC
7+
8+
jobs:
9+
update:
10+
name: Update Winget Package
11+
runs-on: ubuntu-latest
12+
13+
steps:
14+
- name: Install cargo binstall
15+
uses: cargo-bins/cargo-binstall@268643a6b5ea099f5718ee5cd3ff7dc89a5eb49b
16+
17+
- name: Install komac
18+
run: |
19+
cargo binstall [email protected] -y
20+
21+
- name: Find latest release
22+
id: find_latest_release
23+
uses: actions/github-script@v6
24+
with:
25+
script: |
26+
const { data: releases } = await github.rest.repos.listReleases({
27+
owner: context.repo.owner,
28+
repo: context.repo.repo,
29+
});
30+
console.log("Latest release:", releases[0].tag_name);
31+
return releases[0].tag_name;
32+
33+
- name: Update manifest
34+
env:
35+
VERSION: ${{ steps.find_latest_release.outputs.result }}
36+
run: |
37+
echo "Updating manifest..."
38+
komac update --version ${{ env.VERSION }} \
39+
--urls "https://github.com/ggml-org/llama.cpp/releases/download/${{ env.VERSION }}/llama-${{ env.VERSION }}-bin-win-vulkan-x64.zip" \
40+
--token ${{ secrets.WINGET_GITHUB_TOKEN }} \
41+
--submit \
42+
ggml.llamacpp

common/arg.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
using json = nlohmann::ordered_json;
4141

4242
std::initializer_list<enum llama_example> mmproj_examples = {
43-
LLAMA_EXAMPLE_LLAVA,
43+
LLAMA_EXAMPLE_MTMD,
4444
LLAMA_EXAMPLE_SERVER,
4545
};
4646

@@ -2234,12 +2234,12 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
22342234
}
22352235
).set_examples(mmproj_examples).set_env("LLAMA_ARG_NO_MMPROJ_OFFLOAD"));
22362236
add_opt(common_arg(
2237-
{"--image"}, "FILE",
2238-
"path to an image file. use with multimodal models. Specify multiple times for batching",
2237+
{"--image", "--audio"}, "FILE",
2238+
"path to an image or audio file. use with multimodal models, can be repeated if you have multiple files\n",
22392239
[](common_params & params, const std::string & value) {
22402240
params.image.emplace_back(value);
22412241
}
2242-
).set_examples({LLAMA_EXAMPLE_LLAVA}));
2242+
).set_examples({LLAMA_EXAMPLE_MTMD}));
22432243
if (llama_supports_rpc()) {
22442244
add_opt(common_arg(
22452245
{"--rpc"}, "SERVERS",
@@ -2869,7 +2869,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
28692869
[](common_params & params, const std::string & value) {
28702870
params.chat_template = value;
28712871
}
2872-
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_LLAVA}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
2872+
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MTMD}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
28732873
add_opt(common_arg(
28742874
{"--chat-template-file"}, "JINJA_TEMPLATE_FILE",
28752875
string_format(

common/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ enum llama_example {
7272
LLAMA_EXAMPLE_SERVER,
7373
LLAMA_EXAMPLE_CVECTOR_GENERATOR,
7474
LLAMA_EXAMPLE_EXPORT_LORA,
75-
LLAMA_EXAMPLE_LLAVA,
75+
LLAMA_EXAMPLE_MTMD,
7676
LLAMA_EXAMPLE_LOOKUP,
7777
LLAMA_EXAMPLE_PARALLEL,
7878
LLAMA_EXAMPLE_TTS,

0 commit comments

Comments
 (0)