Skip to content

Commit 876aa7c

Browse files
Merge pull request #186 from menloresearch/update-dev-from-master-2025-08-01-07-40
Sync master with upstream release b6056
2 parents 5f0df8c + baad948 commit 876aa7c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+5559
-748
lines changed

.devops/cann.Dockerfile

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
# ==============================================================================
2+
# ARGUMENTS
3+
# ==============================================================================
4+
5+
# Define the CANN base image for easier version updates later
6+
ARG CANN_BASE_IMAGE=quay.io/ascend/cann:8.1.rc1-910b-openeuler22.03-py3.10
7+
8+
# ==============================================================================
9+
# BUILD STAGE
10+
# Compile all binary files and libraries
11+
# ==============================================================================
12+
FROM ${CANN_BASE_IMAGE} AS build
13+
14+
# Define the Ascend chip model for compilation. Default is Ascend910B3
15+
ARG ASCEND_SOC_TYPE=Ascend910B3
16+
17+
# -- Install build dependencies --
18+
RUN yum install -y gcc g++ cmake make git libcurl-devel python3 python3-pip && \
19+
yum clean all && \
20+
rm -rf /var/cache/yum
21+
22+
# -- Set the working directory --
23+
WORKDIR /app
24+
25+
# -- Copy project files --
26+
COPY . .
27+
28+
# -- Set CANN environment variables (required for compilation) --
29+
# Using ENV instead of `source` allows environment variables to persist across the entire image layer
30+
ENV ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest
31+
ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${LD_LIBRARY_PATH}
32+
ENV PATH=${ASCEND_TOOLKIT_HOME}/bin:${PATH}
33+
ENV ASCEND_OPP_PATH=${ASCEND_TOOLKIT_HOME}/opp
34+
ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/runtime/lib64/stub:$LD_LIBRARY_PATH
35+
# ... You can add other environment variables from the original file as needed ...
36+
# For brevity, only core variables are listed here. You can paste the original ENV list here.
37+
38+
# -- Build llama.cpp --
39+
# Use the passed ASCEND_SOC_TYPE argument and add general build options
40+
RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh --force \
41+
&& \
42+
cmake -B build \
43+
-DGGML_CANN=ON \
44+
-DCMAKE_BUILD_TYPE=Release \
45+
-DSOC_TYPE=${ASCEND_SOC_TYPE} \
46+
. && \
47+
cmake --build build --config Release -j$(nproc)
48+
49+
# -- Organize build artifacts for copying in later stages --
50+
# Create a lib directory to store all .so files
51+
RUN mkdir -p /app/lib && \
52+
find build -name "*.so" -exec cp {} /app/lib \;
53+
54+
# Create a full directory to store all executables and Python scripts
55+
RUN mkdir -p /app/full && \
56+
cp build/bin/* /app/full/ && \
57+
cp *.py /app/full/ && \
58+
cp -r gguf-py /app/full/ && \
59+
cp -r requirements /app/full/ && \
60+
cp requirements.txt /app/full/
61+
# If you have a tools.sh script, make sure it is copied here
62+
# cp .devops/tools.sh /app/full/tools.sh
63+
64+
# ==============================================================================
65+
# BASE STAGE
66+
# Create a minimal base image with CANN runtime and common libraries
67+
# ==============================================================================
68+
FROM ${CANN_BASE_IMAGE} AS base
69+
70+
# -- Install runtime dependencies --
71+
RUN yum install -y libgomp curl && \
72+
yum clean all && \
73+
rm -rf /var/cache/yum
74+
75+
# -- Set CANN environment variables (required for runtime) --
76+
ENV ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest
77+
ENV LD_LIBRARY_PATH=/app:${ASCEND_TOOLKIT_HOME}/lib64:${LD_LIBRARY_PATH}
78+
ENV PATH=${ASCEND_TOOLKIT_HOME}/bin:${PATH}
79+
ENV ASCEND_OPP_PATH=${ASCEND_TOOLKIT_HOME}/opp
80+
# ... You can add other environment variables from the original file as needed ...
81+
82+
WORKDIR /app
83+
84+
# Copy compiled .so files from the build stage
85+
COPY --from=build /app/lib/ /app
86+
87+
# ==============================================================================
88+
# FINAL STAGES (TARGETS)
89+
# ==============================================================================
90+
91+
### Target: full
92+
# Complete image with all tools, Python bindings, and dependencies
93+
# ==============================================================================
94+
FROM base AS full
95+
96+
COPY --from=build /app/full /app
97+
98+
# Install Python dependencies
99+
RUN yum install -y git python3 python3-pip && \
100+
pip3 install --no-cache-dir --upgrade pip setuptools wheel && \
101+
pip3 install --no-cache-dir -r requirements.txt && \
102+
yum clean all && \
103+
rm -rf /var/cache/yum
104+
105+
# You need to provide a tools.sh script as the entrypoint
106+
ENTRYPOINT ["/app/tools.sh"]
107+
# If there is no tools.sh, you can set the default to start the server
108+
# ENTRYPOINT ["/app/llama-server"]
109+
110+
### Target: light
111+
# Lightweight image containing only llama-cli
112+
# ==============================================================================
113+
FROM base AS light
114+
115+
COPY --from=build /app/full/llama-cli /app
116+
117+
ENTRYPOINT [ "/app/llama-cli" ]
118+
119+
### Target: server
120+
# Dedicated server image containing only llama-server
121+
# ==============================================================================
122+
FROM base AS server
123+
124+
ENV LLAMA_ARG_HOST=0.0.0.0
125+
126+
COPY --from=build /app/full/llama-server /app
127+
128+
HEALTHCHECK --interval=5m CMD [ "curl", "-f", "http://localhost:8080/health" ]
129+
130+
ENTRYPOINT [ "/app/llama-server" ]

common/arg.cpp

Lines changed: 51 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -977,6 +977,10 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context
977977
for (auto & seq_breaker : params.sampling.dry_sequence_breakers) {
978978
string_process_escapes(seq_breaker);
979979
}
980+
for (auto & pair : params.speculative.replacements) {
981+
string_process_escapes(pair.first);
982+
string_process_escapes(pair.second);
983+
}
980984
}
981985

982986
if (!params.kv_overrides.empty()) {
@@ -2091,6 +2095,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
20912095
params.no_kv_offload = true;
20922096
}
20932097
).set_env("LLAMA_ARG_NO_KV_OFFLOAD"));
2098+
add_opt(common_arg(
2099+
{"-nr", "--no-repack"},
2100+
"disable weight repacking",
2101+
[](common_params & params) {
2102+
params.no_extra_bufts = true;
2103+
}
2104+
).set_env("LLAMA_ARG_NO_REPACK"));
20942105
add_opt(common_arg(
20952106
{"-ctk", "--cache-type-k"}, "TYPE",
20962107
string_format(
@@ -2369,6 +2380,15 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
23692380
}
23702381
}
23712382
));
2383+
add_opt(common_arg(
2384+
{"--cpu-moe"},
2385+
"use CPU for Mixture of Experts (MoE) weights",
2386+
[](common_params & params) {
2387+
params.tensor_buft_overrides.push_back({"\\.ffn_up_exps\\.weight$", ggml_backend_cpu_buffer_type()});
2388+
params.tensor_buft_overrides.push_back({"\\.ffn_down_exps\\.weight$", ggml_backend_cpu_buffer_type()});
2389+
params.tensor_buft_overrides.push_back({"\\.ffn_gate_exps\\.weight$", ggml_backend_cpu_buffer_type()});
2390+
}
2391+
).set_env("LLAMA_ARG_CPU_MOE"));
23722392
add_opt(common_arg(
23732393
{"-ngl", "--gpu-layers", "--n-gpu-layers"}, "N",
23742394
"number of layers to store in VRAM",
@@ -3249,6 +3269,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
32493269
params.speculative.model.path = value;
32503270
}
32513271
).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODEL_DRAFT"));
3272+
add_opt(common_arg(
3273+
{"--spec-replace"}, "TARGET", "DRAFT",
3274+
"translate the string in TARGET into DRAFT if the draft model and main model are not compatible",
3275+
[](common_params & params, const std::string & tgt, const std::string & dft) {
3276+
params.speculative.replacements.push_back({ tgt, dft });
3277+
}
3278+
).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}));
32523279
add_opt(common_arg(
32533280
{"-ctkd", "--cache-type-k-draft"}, "TYPE",
32543281
string_format(
@@ -3438,34 +3465,51 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34383465
}
34393466
).set_examples({LLAMA_EXAMPLE_SERVER}));
34403467

3441-
// diffusion parameters
34423468
add_opt(common_arg(
34433469
{ "--diffusion-steps" }, "N",
34443470
string_format("number of diffusion steps (default: %d)", params.diffusion.steps),
34453471
[](common_params & params, int value) { params.diffusion.steps = value; }
34463472
).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
3473+
add_opt(common_arg(
3474+
{ "--diffusion-visual" },
3475+
string_format("enable visual diffusion mode (show progressive generation) (default: %s)",
3476+
params.diffusion.visual_mode ? "true" : "false"),
3477+
[](common_params & params) { params.diffusion.visual_mode = true; }
3478+
).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
3479+
34473480
add_opt(common_arg(
34483481
{ "--diffusion-eps" }, "F",
34493482
string_format("epsilon for timesteps (default: %.6f)", (double) params.diffusion.eps),
34503483
[](common_params & params, const std::string & value) { params.diffusion.eps = std::stof(value); }
34513484
).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
34523485
add_opt(common_arg(
34533486
{ "--diffusion-algorithm" }, "N",
3454-
string_format("diffusion algorithm: 0=ORIGIN, 1=MASKGIT_PLUS, 2=TOPK_MARGIN, 3=ENTROPY (default: %d)",
3487+
string_format("diffusion algorithm: 0=ORIGIN, 1=ENTROPY_BASED, 2=MARGIN_BASED, 3=RANDOM, 4=LOW_CONFIDENCE (default: %d)",
34553488
params.diffusion.algorithm),
34563489
[](common_params & params, int value) { params.diffusion.algorithm = value; }
34573490
).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
34583491
add_opt(common_arg(
34593492
{ "--diffusion-alg-temp" }, "F",
3460-
string_format("algorithm temperature (default: %.3f)", (double) params.diffusion.alg_temp),
3493+
string_format("dream algorithm temperature (default: %.3f)", (double) params.diffusion.alg_temp),
34613494
[](common_params & params, const std::string & value) { params.diffusion.alg_temp = std::stof(value); }
34623495
).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
3496+
34633497
add_opt(common_arg(
3464-
{ "--diffusion-visual" },
3465-
string_format("enable visual diffusion mode (show progressive generation) (default: %s)",
3466-
params.diffusion.visual_mode ? "true" : "false"),
3467-
[](common_params & params) { params.diffusion.visual_mode = true; }
3498+
{ "--diffusion-block-length" }, "N",
3499+
string_format("llada block length for generation (default: %d)", params.diffusion.block_length),
3500+
[](common_params & params, int value) { params.diffusion.block_length = value; }
3501+
).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
3502+
add_opt(common_arg(
3503+
{ "--diffusion-cfg-scale" }, "F",
3504+
string_format("llada classifier-free guidance scale (default: %.3f)", (double) params.diffusion.cfg_scale),
3505+
[](common_params & params, const std::string & value) { params.diffusion.cfg_scale = std::stof(value); }
34683506
).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
3507+
add_opt(common_arg(
3508+
{ "--diffusion-add-gumbel-noise" }, "F",
3509+
string_format("add gumbel noise to the logits if temp > 0.0 (default: %s)", params.diffusion.add_gumbel_noise ? "true" : "false"),
3510+
[](common_params & params, const std::string & value) { params.diffusion.add_gumbel_noise = std::stof(value); }
3511+
).set_examples({ LLAMA_EXAMPLE_DIFFUSION }));
3512+
34693513

34703514
return ctx_arg;
34713515
}

common/common.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1122,6 +1122,7 @@ struct llama_model_params common_model_params_to_llama(common_params & params) {
11221122
mparams.use_mmap = params.use_mmap;
11231123
mparams.use_mlock = params.use_mlock;
11241124
mparams.check_tensors = params.check_tensors;
1125+
mparams.use_extra_bufts = !params.no_extra_bufts;
11251126

11261127
if (params.kv_overrides.empty()) {
11271128
mparams.kv_overrides = NULL;

common/common.h

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -201,6 +201,7 @@ struct common_params_speculative {
201201
int32_t n_gpu_layers = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
202202
float p_split = 0.1f; // speculative decoding split probability
203203
float p_min = 0.75f; // minimum speculative decoding probability (greedy)
204+
std::vector<std::pair<std::string, std::string>> replacements; // main to speculative model replacements
204205

205206
ggml_type cache_type_k = GGML_TYPE_F16; // KV cache data type for the K
206207
ggml_type cache_type_v = GGML_TYPE_F16; // KV cache data type for the V
@@ -220,11 +221,17 @@ struct common_params_vocoder {
220221
};
221222

222223
struct common_params_diffusion {
223-
int32_t steps = 64; // number of diffusion steps
224-
float eps = 1e-3f; // epsilon for timesteps
225-
int32_t algorithm = 0; // diffusion algorithm (0=ORIGIN, 1=MASKGIT_PLUS, 2=TOPK_MARGIN, 3=ENTROPY)
226-
float alg_temp = 0.0f; // algorithm temperature
227-
bool visual_mode = false; // show progressive diffusion on screen
224+
int32_t steps = 128;
225+
bool visual_mode = false;
226+
227+
float eps = 0; // epsilon for timesteps
228+
int32_t block_length = 0; // block length for generation
229+
230+
int32_t algorithm = 4; // default algorithm: low-confidence
231+
float alg_temp = 0.0f; // algorithm temperature
232+
233+
float cfg_scale = 0; // classifier-free guidance scale
234+
bool add_gumbel_noise = false; // add gumbel noise to the logits if temp > 0.0
228235
};
229236

230237
enum common_reasoning_format {
@@ -352,6 +359,7 @@ struct common_params {
352359
bool warmup = true; // warmup run
353360
bool check_tensors = false; // validate tensor data
354361
bool no_op_offload = false; // globally disable offload host tensor operations to device
362+
bool no_extra_bufts = false; // disable extra buffer types (used for weight repacking)
355363

356364
bool single_turn = false; // single turn chat conversation
357365

0 commit comments

Comments
 (0)