Skip to content

Commit c946364

Browse files
committed
2 parents 923e9a8 + 8d59d91 commit c946364

File tree

104 files changed

+3695
-2051
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

104 files changed

+3695
-2051
lines changed

.github/ISSUE_TEMPLATE/010-bug-compilation.yml

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,12 +65,22 @@ body:
6565
If possible, please do a git bisect and identify the exact commit that introduced the bug.
6666
validations:
6767
required: false
68+
- type: textarea
69+
id: command
70+
attributes:
71+
label: Compile command
72+
description: >
73+
Please provide the exact command you used to compile llama.cpp. For example: `cmake -B ...`.
74+
This will be automatically formatted into code, so no need for backticks.
75+
render: shell
76+
validations:
77+
required: true
6878
- type: textarea
6979
id: logs
7080
attributes:
7181
label: Relevant log output
7282
description: >
73-
Please copy and paste any relevant log output, including the command that you entered and any generated text.
83+
Please copy and paste any relevant log output, including any generated text.
7484
This will be automatically formatted into code, so no need for backticks.
7585
render: shell
7686
validations:

.github/ISSUE_TEMPLATE/019-bug-misc.yml

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,16 @@ body:
5252
- Other (Please specify in the next section)
5353
validations:
5454
required: false
55+
- type: textarea
56+
id: command
57+
attributes:
58+
label: Command line
59+
description: >
60+
Please provide the exact commands you entered, if applicable. For example: `llama-server -m ... -c ...`, `llama-cli -m ...`, etc.
61+
This will be automatically formatted into code, so no need for backticks.
62+
render: shell
63+
validations:
64+
required: false
5565
- type: textarea
5666
id: info
5767
attributes:
@@ -74,7 +84,7 @@ body:
7484
attributes:
7585
label: Relevant log output
7686
description: >
77-
If applicable, please copy and paste any relevant log output, including the command that you entered and any generated text.
87+
If applicable, please copy and paste any relevant log output, including any generated text.
7888
This will be automatically formatted into code, so no need for backticks.
7989
render: shell
8090
validations:

.github/workflows/build.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -665,7 +665,7 @@ jobs:
665665
- build: 'llvm-arm64'
666666
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON'
667667
- build: 'msvc-arm64'
668-
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=O'
668+
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON'
669669
- build: 'llvm-arm64-opencl-adreno'
670670
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON'
671671

@@ -1237,7 +1237,7 @@ jobs:
12371237

12381238
- name: Create release
12391239
id: create_release
1240-
uses: anzz1/action-create-release@v1
1240+
uses: ggml-org/action-create-release@v1
12411241
env:
12421242
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
12431243
with:

.github/workflows/docker.yml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,10 +97,9 @@ jobs:
9797
GITHUB_BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
9898
GITHUB_REPOSITORY_OWNER: '${{ github.repository_owner }}'
9999

100-
# https://github.com/jlumbroso/free-disk-space/tree/54081f138730dfa15788a46383842cd2f914a1be#example
101100
- name: Free Disk Space (Ubuntu)
102101
if: ${{ matrix.config.free_disk_space == true }}
103-
uses: jlumbroso/free-disk-space@main
102+
uses: ggml-org/free-disk-space@v1.3.1
104103
with:
105104
# this might remove tools that are actually needed,
106105
# if set to "true" but frees about 6 GB

.github/workflows/editorconfig.yml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,5 +23,7 @@ jobs:
2323
runs-on: ubuntu-latest
2424
steps:
2525
- uses: actions/checkout@v4
26-
- uses: editorconfig-checker/action-editorconfig-checker@main
26+
- uses: editorconfig-checker/action-editorconfig-checker@v2
27+
with:
28+
version: v3.0.3
2729
- run: editorconfig-checker

CODEOWNERS

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,11 @@
11
# collaborators can optionally add themselves here to indicate their availability for reviewing related PRs
22

33
/ci/ @ggerganov
4-
/.devops/ @ngxson
4+
/.devops/*.Dockerfile @ngxson
55
/examples/server/ @ngxson
6+
/ggml/src/ggml-cuda/fattn* @JohannesGaessler
7+
/ggml/src/ggml-cuda/mmq.* @JohannesGaessler
8+
/ggml/src/ggml-cuda/mmv.* @JohannesGaessler
9+
/ggml/src/ggml-cuda/mmvq.* @JohannesGaessler
10+
/ggml/src/ggml-opt.cpp @JohannesGaessler
11+
/ggml/src/gguf.cpp @JohannesGaessler

common/arg.cpp

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,11 @@ common_arg & common_arg::set_examples(std::initializer_list<enum llama_example>
2222
return *this;
2323
}
2424

25+
common_arg & common_arg::set_excludes(std::initializer_list<enum llama_example> excludes) {
26+
this->excludes = std::move(excludes);
27+
return *this;
28+
}
29+
2530
common_arg & common_arg::set_env(const char * env) {
2631
help = help + "\n(env: " + env + ")";
2732
this->env = env;
@@ -37,6 +42,10 @@ bool common_arg::in_example(enum llama_example ex) {
3742
return examples.find(ex) != examples.end();
3843
}
3944

45+
bool common_arg::is_exclude(enum llama_example ex) {
46+
return excludes.find(ex) != excludes.end();
47+
}
48+
4049
bool common_arg::get_value_from_env(std::string & output) {
4150
if (env == nullptr) return false;
4251
char * value = std::getenv(env);
@@ -420,7 +429,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
420429
* - if both {LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_*,} are set, we will prioritize the LLAMA_EXAMPLE_* matching current example
421430
*/
422431
auto add_opt = [&](common_arg arg) {
423-
if (arg.in_example(ex) || arg.in_example(LLAMA_EXAMPLE_COMMON)) {
432+
if ((arg.in_example(ex) || arg.in_example(LLAMA_EXAMPLE_COMMON)) && !arg.is_exclude(ex)) {
424433
ctx_arg.options.push_back(std::move(arg));
425434
}
426435
};
@@ -649,7 +658,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
649658
[](common_params & params, const std::string & value) {
650659
params.prompt = value;
651660
}
652-
));
661+
).set_excludes({LLAMA_EXAMPLE_SERVER}));
653662
add_opt(common_arg(
654663
{"--no-perf"},
655664
string_format("disable internal libllama performance timings (default: %s)", params.no_perf ? "true" : "false"),
@@ -673,7 +682,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
673682
params.prompt.pop_back();
674683
}
675684
}
676-
));
685+
).set_excludes({LLAMA_EXAMPLE_SERVER}));
677686
add_opt(common_arg(
678687
{"--in-file"}, "FNAME",
679688
"an input file (repeat to specify multiple files)",
@@ -700,7 +709,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
700709
params.prompt = ss.str();
701710
fprintf(stderr, "Read %zu bytes from binary file %s\n", params.prompt.size(), value.c_str());
702711
}
703-
));
712+
).set_excludes({LLAMA_EXAMPLE_SERVER}));
704713
add_opt(common_arg(
705714
{"-e", "--escape"},
706715
string_format("process escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\) (default: %s)", params.escape ? "true" : "false"),

common/arg.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212

1313
struct common_arg {
1414
std::set<enum llama_example> examples = {LLAMA_EXAMPLE_COMMON};
15+
std::set<enum llama_example> excludes = {};
1516
std::vector<const char *> args;
1617
const char * value_hint = nullptr; // help text or example for arg value
1718
const char * value_hint_2 = nullptr; // for second arg value
@@ -53,9 +54,11 @@ struct common_arg {
5354
) : args(args), value_hint(value_hint), value_hint_2(value_hint_2), help(help), handler_str_str(handler) {}
5455

5556
common_arg & set_examples(std::initializer_list<enum llama_example> examples);
57+
common_arg & set_excludes(std::initializer_list<enum llama_example> excludes);
5658
common_arg & set_env(const char * env);
5759
common_arg & set_sparam();
5860
bool in_example(enum llama_example ex);
61+
bool is_exclude(enum llama_example ex);
5962
bool get_value_from_env(std::string & output);
6063
bool has_value_from_env();
6164
std::string to_string();

common/common.cpp

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,9 @@
22
#define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING
33
#endif
44

5+
#include "ggml.h"
6+
#include "gguf.h"
7+
58
#include "common.h"
69
#include "log.h"
710
// Change JSON_ASSERT from assert() to GGML_ASSERT:
@@ -846,7 +849,7 @@ struct common_init_result common_init_from_params(common_params & params) {
846849
} else if (!params.model_url.empty()) {
847850
model = common_load_model_from_url(params.model_url, params.model, params.hf_token, mparams);
848851
} else {
849-
model = llama_load_model_from_file(params.model.c_str(), mparams);
852+
model = llama_model_load_from_file(params.model.c_str(), mparams);
850853
}
851854

852855
if (model == NULL) {
@@ -873,7 +876,7 @@ struct common_init_result common_init_from_params(common_params & params) {
873876
}
874877

875878
if (!ok) {
876-
llama_free_model(model);
879+
llama_model_free(model);
877880

878881
return iparams;
879882
}
@@ -884,7 +887,7 @@ struct common_init_result common_init_from_params(common_params & params) {
884887
llama_context * lctx = llama_new_context_with_model(model, cparams);
885888
if (lctx == NULL) {
886889
LOG_ERR("%s: failed to create context with model '%s'\n", __func__, params.model.c_str());
887-
llama_free_model(model);
890+
llama_model_free(model);
888891
return iparams;
889892
}
890893

@@ -900,7 +903,7 @@ struct common_init_result common_init_from_params(common_params & params) {
900903
const auto cvec = common_control_vector_load(params.control_vectors);
901904
if (cvec.n_embd == -1) {
902905
llama_free(lctx);
903-
llama_free_model(model);
906+
llama_model_free(model);
904907

905908
return iparams;
906909
}
@@ -913,7 +916,7 @@ struct common_init_result common_init_from_params(common_params & params) {
913916
params.control_vector_layer_end);
914917
if (err) {
915918
llama_free(lctx);
916-
llama_free_model(model);
919+
llama_model_free(model);
917920

918921
return iparams;
919922
}
@@ -926,7 +929,7 @@ struct common_init_result common_init_from_params(common_params & params) {
926929
if (lora == nullptr) {
927930
LOG_ERR("%s: failed to apply lora adapter '%s'\n", __func__, la.path.c_str());
928931
llama_free(lctx);
929-
llama_free_model(model);
932+
llama_model_free(model);
930933
return iparams;
931934
}
932935

@@ -982,7 +985,7 @@ struct common_init_result common_init_from_params(common_params & params) {
982985
if (llama_model_has_encoder(model)) {
983986
llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size()));
984987
llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
985-
if (decoder_start_token_id == -1) {
988+
if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
986989
decoder_start_token_id = bos;
987990
}
988991
tmp.clear();
@@ -1411,7 +1414,7 @@ struct llama_model * common_load_model_from_url(
14111414
}
14121415
}
14131416

1414-
return llama_load_model_from_file(local_path.c_str(), params);
1417+
return llama_model_load_from_file(local_path.c_str(), params);
14151418
}
14161419

14171420
struct llama_model * common_load_model_from_hf(

common/ngram-cache.cpp

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -65,13 +65,13 @@ constexpr int draft_min_percent_strict[LLAMA_NGRAM_MAX] = {75, 66, 66, 66};
6565
static llama_token try_draft(common_ngram_cache & nc_static, const common_ngram ngram_static) {
6666
common_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
6767
if (part_static_it == nc_static.end()) {
68-
return -1;
68+
return LLAMA_TOKEN_NULL;
6969
}
7070
const common_ngram_cache_part part_static = part_static_it->second;
7171

7272
int max_count_static = 0;
7373
int sum_count_static = 0;
74-
llama_token max_token = -1;
74+
llama_token max_token = LLAMA_TOKEN_NULL;
7575

7676
for (std::pair<llama_token, int> token_count_static : part_static) {
7777
const llama_token token = token_count_static.first;
@@ -85,10 +85,10 @@ static llama_token try_draft(common_ngram_cache & nc_static, const common_ngram
8585
}
8686

8787
if (sum_count_static < draft_min_sample_size_lax[LLAMA_NGRAM_STATIC-1]) {
88-
return -1;
88+
return LLAMA_TOKEN_NULL;
8989
}
9090
if (100*max_count_static < draft_min_percent_lax[LLAMA_NGRAM_STATIC-1]*sum_count_static) {
91-
return -1;
91+
return LLAMA_TOKEN_NULL;
9292
}
9393
return max_token;
9494
}
@@ -98,9 +98,9 @@ static llama_token try_draft(
9898
common_ngram_cache & nc_primary, const std::vector<common_ngram> & ngrams_primary, common_ngram_cache_part & part_static,
9999
const int * min_sample_size, const int * min_percent) {
100100

101-
llama_token drafted_token = -1;
101+
llama_token drafted_token = LLAMA_TOKEN_NULL;
102102

103-
for (int i = ngrams_primary.size()-1; i >= 0 && drafted_token == -1; --i) {
103+
for (int i = ngrams_primary.size()-1; i >= 0 && drafted_token == LLAMA_TOKEN_NULL; --i) {
104104
const common_ngram ngram_primary = ngrams_primary[i];
105105

106106
common_ngram_cache::iterator part_primary_it = nc_primary.find(ngram_primary);
@@ -112,7 +112,7 @@ static llama_token try_draft(
112112
int max_count_primary = 0;
113113
int max_count_static = 0;
114114
int sum_count_primary = 0;
115-
llama_token max_token = -1;
115+
llama_token max_token = LLAMA_TOKEN_NULL;
116116

117117
for (std::pair<llama_token, int> token_count_primary : part_primary) {
118118
const llama_token token = token_count_primary.first;
@@ -154,7 +154,7 @@ void common_ngram_cache_draft(
154154
}
155155

156156
while ((int) draft.size()-1 < n_draft) {
157-
llama_token drafted_token = -1;
157+
llama_token drafted_token = LLAMA_TOKEN_NULL;
158158

159159
const int ngram_start_static = inp_size-LLAMA_NGRAM_STATIC + draft.size()-1;
160160
common_ngram ngram_static;
@@ -177,17 +177,17 @@ void common_ngram_cache_draft(
177177
}
178178
ngrams_cd.push_back(ngram_cd);
179179
}
180-
if (drafted_token == -1) {
180+
if (drafted_token == LLAMA_TOKEN_NULL) {
181181
drafted_token = try_draft(nc_context, ngrams_cd, part_static, draft_min_sample_size_lax, draft_min_percent_lax);
182182
}
183-
if (drafted_token == -1) {
183+
if (drafted_token == LLAMA_TOKEN_NULL) {
184184
drafted_token = try_draft(nc_dynamic, ngrams_cd, part_static, draft_min_sample_size_strict, draft_min_percent_strict);
185185
}
186-
if (drafted_token == -1) {
186+
if (drafted_token == LLAMA_TOKEN_NULL) {
187187
drafted_token = try_draft(nc_static, ngram_static);
188188
}
189189

190-
if (drafted_token == -1) {
190+
if (drafted_token == LLAMA_TOKEN_NULL) {
191191
break;
192192
}
193193

0 commit comments

Comments
 (0)