Skip to content

Commit 2a87e1c

Browse files
Merge pull request #234 from menloresearch/update-dev-from-master-2025-09-02-00-34
Sync master with upstream release b6351
2 parents 7a7b96a + 5d804a4 commit 2a87e1c

29 files changed

+1427
-643
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
137137
- [X] [Trillion-7B-preview](https://huggingface.co/trillionlabs/Trillion-7B-preview)
138138
- [x] [Ling models](https://huggingface.co/collections/inclusionAI/ling-67c51c85b34a7ea0aba94c32)
139139
- [x] [LFM2 models](https://huggingface.co/collections/LiquidAI/lfm2-686d721927015b2ad73eaa38)
140+
- [x] [Hunyuan models](https://huggingface.co/collections/tencent/hunyuan-dense-model-6890632cda26b19119c9c5e7)
140141

141142
#### Multimodal
142143

common/sampling.cpp

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -426,8 +426,29 @@ uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl) {
426426

427427
// helpers
428428

429-
llama_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl) {
430-
return &gsmpl->cur_p;
429+
llama_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl, bool do_sort) {
430+
auto * res = &gsmpl->cur_p;
431+
432+
if (do_sort && !res->sorted) {
433+
// remember the selected token before sorting
434+
const llama_token id = res->data[res->selected].id;
435+
436+
std::sort(res->data, res->data + res->size, [](const llama_token_data & a, const llama_token_data & b) {
437+
return a.p > b.p;
438+
});
439+
440+
// restore the selected token after sorting
441+
for (size_t i = 0; i < res->size; ++i) {
442+
if (res->data[i].id == id) {
443+
res->selected = i;
444+
break;
445+
}
446+
}
447+
448+
res->sorted = true;
449+
}
450+
451+
return res;
431452
}
432453

433454
llama_token common_sampler_last(const struct common_sampler * gsmpl) {

common/sampling.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,9 @@ uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl);
8686
// helpers
8787

8888
// access the internal list of current candidate tokens
89-
llama_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl);
89+
// if do_sort == true, the candidates are guaranteed to be sorted afterwards (in descending order of probability)
90+
// the .sorted flag of the result indicates whether the returned candidates are sorted
91+
llama_token_data_array * common_sampler_get_candidates(struct common_sampler * gsmpl, bool do_sort);
9092

9193
// get the last accepted token
9294
llama_token common_sampler_last(const struct common_sampler * gsmpl);

common/speculative.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -317,7 +317,7 @@ llama_tokens common_speculative_gen_draft(
317317

318318
common_sampler_sample(smpl, ctx_dft, 0, true);
319319

320-
const auto * cur_p = common_sampler_get_candidates(smpl);
320+
const auto * cur_p = common_sampler_get_candidates(smpl, true);
321321

322322
for (int k = 0; k < std::min(3, (int) cur_p->size); ++k) {
323323
LOG_DBG(" - draft candidate %3d, pos %3d: %6d (%8.3f) '%s'\n",

convert_hf_to_gguf.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -302,10 +302,6 @@ def prepare_tensors(self):
302302
# data = data_torch.squeeze().numpy()
303303
data = data_torch.numpy()
304304

305-
# if data ends up empty, it means data_torch was a scalar tensor -> restore
306-
if len(data.shape) == 0:
307-
data = data_torch.numpy()
308-
309305
n_dims = len(data.shape)
310306
data_qtype: gguf.GGMLQuantizationType | bool = self.tensor_force_quant(name, new_name, bid, n_dims)
311307

examples/speculative/speculative.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ int main(int argc, char ** argv) {
244244
// stochastic verification
245245
common_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft], true);
246246

247-
auto & dist_tgt = *common_sampler_get_candidates(smpl);
247+
auto & dist_tgt = *common_sampler_get_candidates(smpl, true);
248248

249249
float p_tgt = 0.0f;
250250
float p_dft = 0.0f;
@@ -493,7 +493,7 @@ int main(int argc, char ** argv) {
493493

494494
common_sampler_sample(drafts[s].smpl, ctx_dft, drafts[s].i_batch_dft, true);
495495

496-
const auto * cur_p = common_sampler_get_candidates(drafts[s].smpl);
496+
const auto * cur_p = common_sampler_get_candidates(drafts[s].smpl, true);
497497

498498
for (int k = 0; k < std::min(n_seq_dft + 3, (int) cur_p->size); ++k) {
499499
LOG_DBG(" - draft candidate %3d for seq %3d, pos %3d: %6d (%8.3f) '%s'\n",

ggml/src/ggml-backend.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -651,7 +651,7 @@ static bool ggml_is_view_op(enum ggml_op op) {
651651
#endif
652652

653653
#ifndef GGML_SCHED_MAX_SPLIT_INPUTS
654-
#define GGML_SCHED_MAX_SPLIT_INPUTS GGML_MAX_SRC
654+
#define GGML_SCHED_MAX_SPLIT_INPUTS 30
655655
#endif
656656

657657
#ifndef GGML_SCHED_MAX_COPIES

0 commit comments

Comments
 (0)