Skip to content

Commit be35439

Browse files
committed
Merge remote-tracking branch 'upstream/master'
2 parents b877e07 + 8f8f227 commit be35439

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+1061
-360
lines changed

.clang-format

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,13 @@ AllowShortIfStatementsOnASingleLine: Never
2222
AllowShortLambdasOnASingleLine: Inline
2323
AllowShortLoopsOnASingleLine: false
2424
AlwaysBreakBeforeMultilineStrings: true
25+
# Treat CUDA keywords/attributes as "attribute macros" and avoid breaking lines inside them
26+
AttributeMacros:
27+
- __host__
28+
- __device__
29+
- __global__
30+
- __forceinline__
31+
- __launch_bounds__
2532
BinPackArguments: true
2633
BinPackParameters: false # OnePerLine
2734
BitFieldColonSpacing: Both

.devops/rocm.Dockerfile

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,11 @@ FROM ${BASE_ROCM_DEV_CONTAINER} AS build
1717
# gfx906 is deprecated
1818
#check https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.4.1/reference/system-requirements.html
1919

20-
ARG ROCM_DOCKER_ARCH='gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201'
21-
#ARG ROCM_DOCKER_ARCH=gfx1100
20+
ARG ROCM_DOCKER_ARCH='gfx803;gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1010;gfx1030;gfx1032;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201;gfx1151'
21+
#ARG ROCM_DOCKER_ARCH='gfx1151'
2222

23-
# Set ROCm architectured
23+
# Set ROCm architectures
2424
ENV AMDGPU_TARGETS=${ROCM_DOCKER_ARCH}
25-
# Enable ROCm
26-
# ENV CC=/opt/rocm/llvm/bin/clang
27-
# ENV CXX=/opt/rocm/llvm/bin/clang++
2825

2926
RUN apt-get update \
3027
&& apt-get install -y \
@@ -39,8 +36,16 @@ WORKDIR /app
3936

4037
COPY . .
4138

39+
RUN git clone https://github.com/rocm/rocwmma --branch develop --depth 1
40+
4241
RUN HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \
43-
cmake -S . -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=$ROCM_DOCKER_ARCH -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DCMAKE_BUILD_TYPE=Release -DLLAMA_BUILD_TESTS=OFF \
42+
cmake -S . -B build \
43+
-DGGML_HIP=ON \
44+
-DGGML_HIP_ROCWMMA_FATTN=ON \
45+
-DCMAKE_HIP_FLAGS="-I$(pwd)/rocwmma/library/include/" \
46+
-DAMDGPU_TARGETS="$ROCM_DOCKER_ARCH" \
47+
-DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON \
48+
-DCMAKE_BUILD_TYPE=Release -DLLAMA_BUILD_TESTS=OFF \
4449
&& cmake --build build --config Release -j$(nproc)
4550

4651
RUN mkdir -p /app/lib \

.github/workflows/build.yml

Lines changed: 19 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ env:
5656

5757
jobs:
5858
macOS-latest-cmake-arm64:
59-
runs-on: macos-14
59+
runs-on: macos-latest
6060

6161
steps:
6262
- name: Clone
@@ -138,7 +138,7 @@ jobs:
138138
ctest -L main --verbose --timeout 900
139139
140140
macOS-latest-cmake-arm64-webgpu:
141-
runs-on: macos-14
141+
runs-on: macos-latest
142142

143143
steps:
144144
- name: Clone
@@ -711,6 +711,7 @@ jobs:
711711
712712
macOS-latest-swift:
713713
runs-on: macos-latest
714+
needs: ios-xcode-build
714715

715716
strategy:
716717
matrix:
@@ -727,6 +728,12 @@ jobs:
727728
key: macOS-latest-swift
728729
evict-old-files: 1d
729730

731+
- name: Download xcframework artifact
732+
uses: actions/download-artifact@v4
733+
with:
734+
name: llama-xcframework
735+
path: build-apple/llama.xcframework/
736+
730737
- name: Dependencies
731738
id: depends
732739
continue-on-error: true
@@ -748,11 +755,6 @@ jobs:
748755
-DCMAKE_OSX_ARCHITECTURES="arm64;x86_64"
749756
cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
750757
751-
- name: xcodebuild for swift package
752-
id: xcodebuild
753-
run: |
754-
./build-xcframework.sh
755-
756758
windows-msys2:
757759
runs-on: windows-2025
758760

@@ -1170,8 +1172,17 @@ jobs:
11701172
run: |
11711173
./build-xcframework.sh
11721174
1175+
- name: Upload xcframework artifact
1176+
uses: actions/upload-artifact@v4
1177+
with:
1178+
name: llama-xcframework
1179+
path: build-apple/llama.xcframework/
1180+
retention-days: 1
1181+
11731182
- name: Build Xcode project
1174-
run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' FRAMEWORK_FOLDER_PATH=./build-ios build
1183+
run: |
1184+
xcodebuild -downloadPlatform iOS
1185+
xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' FRAMEWORK_FOLDER_PATH=./build-ios build
11751186
11761187
android-build:
11771188
runs-on: ubuntu-latest

.github/workflows/release.yml

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -530,15 +530,13 @@ jobs:
530530
runs-on: windows-2022
531531

532532
env:
533-
# The ROCm version must correspond to the version used in the HIP SDK.
534-
ROCM_VERSION: "6.4.2"
535533
HIPSDK_INSTALLER_VERSION: "25.Q3"
536534

537535
strategy:
538536
matrix:
539537
include:
540538
- name: "radeon"
541-
gpu_targets: "gfx1200;gfx1201;gfx1100;gfx1101;gfx1102;gfx1030;gfx1031;gfx1032"
539+
gpu_targets: "gfx1151;gfx1200;gfx1201;gfx1100;gfx1101;gfx1102;gfx1030;gfx1031;gfx1032"
542540

543541
steps:
544542
- name: Clone
@@ -548,7 +546,7 @@ jobs:
548546
- name: Clone rocWMMA repository
549547
id: clone_rocwmma
550548
run: |
551-
git clone https://github.com/rocm/rocwmma --branch rocm-${{ env.ROCM_VERSION }} --depth 1
549+
git clone https://github.com/rocm/rocwmma --branch develop --depth 1
552550
553551
- name: Cache ROCm Installation
554552
id: cache-rocm

CMakeLists.txt

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,12 @@ if (MSVC)
5858
add_compile_options("$<$<COMPILE_LANGUAGE:CXX>:/bigobj>")
5959
endif()
6060

61+
if (CMAKE_SYSTEM_NAME STREQUAL "iOS")
62+
set(LLAMA_TOOLS_INSTALL_DEFAULT OFF)
63+
else()
64+
set(LLAMA_TOOLS_INSTALL_DEFAULT ${LLAMA_STANDALONE})
65+
endif()
66+
6167
#
6268
# option list
6369
#
@@ -82,6 +88,7 @@ option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
8288
option(LLAMA_BUILD_TOOLS "llama: build tools" ${LLAMA_STANDALONE})
8389
option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
8490
option(LLAMA_BUILD_SERVER "llama: build server example" ${LLAMA_STANDALONE})
91+
option(LLAMA_TOOLS_INSTALL "llama: install tools" ${LLAMA_TOOLS_INSTALL_DEFAULT})
8592

8693
# 3rd party libs
8794
option(LLAMA_CURL "llama: use libcurl to download model from an URL" ON)

common/arg.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1704,7 +1704,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
17041704
[](common_params & params, const std::string & value) {
17051705
params.system_prompt = value;
17061706
}
1707-
).set_examples({LLAMA_EXAMPLE_MAIN}));
1707+
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_DIFFUSION}));
17081708
add_opt(common_arg(
17091709
{"--no-perf"},
17101710
string_format("disable internal libllama performance timings (default: %s)", params.no_perf ? "true" : "false"),
@@ -2548,7 +2548,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
25482548
{"--cpu-moe", "-cmoe"},
25492549
"keep all Mixture of Experts (MoE) weights in the CPU",
25502550
[](common_params & params) {
2551-
params.tensor_buft_overrides.push_back({"\\.ffn_(up|down|gate)_exps", ggml_backend_cpu_buffer_type()});
2551+
params.tensor_buft_overrides.push_back(llm_ffn_exps_cpu_override());
25522552
}
25532553
).set_env("LLAMA_ARG_CPU_MOE"));
25542554
add_opt(common_arg(
@@ -2561,7 +2561,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
25612561
for (int i = 0; i < value; ++i) {
25622562
// keep strings alive and avoid leaking memory by storing them in a static vector
25632563
static std::list<std::string> buft_overrides;
2564-
buft_overrides.push_back(string_format("blk\\.%d\\.ffn_(up|down|gate)_exps", i));
2564+
buft_overrides.push_back(llm_ffn_exps_block_regex(i));
25652565
params.tensor_buft_overrides.push_back({buft_overrides.back().c_str(), ggml_backend_cpu_buffer_type()});
25662566
}
25672567
}
@@ -2570,7 +2570,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
25702570
{"--cpu-moe-draft", "-cmoed"},
25712571
"keep all Mixture of Experts (MoE) weights in the CPU for the draft model",
25722572
[](common_params & params) {
2573-
params.speculative.tensor_buft_overrides.push_back({"\\.ffn_(up|down|gate)_exps", ggml_backend_cpu_buffer_type()});
2573+
params.speculative.tensor_buft_overrides.push_back(llm_ffn_exps_cpu_override());
25742574
}
25752575
).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CPU_MOE_DRAFT"));
25762576
add_opt(common_arg(
@@ -2582,7 +2582,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
25822582
}
25832583
for (int i = 0; i < value; ++i) {
25842584
static std::list<std::string> buft_overrides_draft;
2585-
buft_overrides_draft.push_back(string_format("blk\\.%d\\.ffn_(up|down|gate)_exps", i));
2585+
buft_overrides_draft.push_back(llm_ffn_exps_block_regex(i));
25862586
params.speculative.tensor_buft_overrides.push_back({buft_overrides_draft.back().c_str(), ggml_backend_cpu_buffer_type()});
25872587
}
25882588
}

common/common.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -734,6 +734,20 @@ const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";
734734

735735
}
736736

737+
//
738+
// MoE utils
739+
//
740+
741+
const char * const LLM_FFN_EXPS_REGEX = "\\.ffn_(up|down|gate)_exps";
742+
743+
static std::string llm_ffn_exps_block_regex(int idx) {
744+
return string_format("blk\\.%d%s", idx, LLM_FFN_EXPS_REGEX);
745+
}
746+
747+
static llama_model_tensor_buft_override llm_ffn_exps_cpu_override() {
748+
return { LLM_FFN_EXPS_REGEX, ggml_backend_cpu_buffer_type() };
749+
}
750+
737751
//
738752
// training utils
739753
//

common/json-schema-to-grammar.cpp

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -257,12 +257,13 @@ std::unordered_map<std::string, BuiltinRule> STRING_FORMAT_RULES = {
257257
};
258258

259259
static bool is_reserved_name(const std::string & name) {
260-
static std::unordered_set<std::string> RESERVED_NAMES;
261-
if (RESERVED_NAMES.empty()) {
262-
RESERVED_NAMES.insert("root");
263-
for (const auto &p : PRIMITIVE_RULES) RESERVED_NAMES.insert(p.first);
264-
for (const auto &p : STRING_FORMAT_RULES) RESERVED_NAMES.insert(p.first);
265-
}
260+
static const std::unordered_set<std::string> RESERVED_NAMES = [] {
261+
std::unordered_set<std::string> s;
262+
s.insert("root");
263+
for (const auto & p : PRIMITIVE_RULES) s.insert(p.first);
264+
for (const auto & p : STRING_FORMAT_RULES) s.insert(p.first);
265+
return s;
266+
}();
266267
return RESERVED_NAMES.find(name) != RESERVED_NAMES.end();
267268
}
268269

convert_hf_to_gguf.py

Lines changed: 106 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -888,6 +888,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
888888
if chkhsh == "a1e163ecab2e718a4c829d1148b6e86824ec36163bb71941c3dca9cd5ac25756":
889889
# ref: https://huggingface.co/JetBrains/Mellum-4b-base
890890
res = "mellum"
891+
if chkhsh == "9b1be57e70d20d9501b2b3186e792d81181ae36ada3903c26f9fea418cf87206":
892+
# ref: https://huggingface.co/inclusionAI/LLaDA-MoE-7B-A1B-Base
893+
res = "llada-moe"
891894

892895
if res is None:
893896
logger.warning("\n")
@@ -2390,7 +2393,10 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
23902393
return [] # skip other tensors
23912394

23922395

2393-
@ModelBase.register("Llama4ForConditionalGeneration")
2396+
@ModelBase.register(
2397+
"Llama4ForConditionalGeneration",
2398+
"Llama4ForCausalLM",
2399+
)
23942400
class Llama4Model(LlamaModel):
23952401
model_arch = gguf.MODEL_ARCH.LLAMA4
23962402
undo_permute = False
@@ -2408,6 +2414,10 @@ def set_gguf_parameters(self):
24082414
super().set_gguf_parameters()
24092415
self.gguf_writer.add_interleave_moe_layer_step(self.hparams["interleave_moe_layer_step"])
24102416
self.gguf_writer.add_expert_feed_forward_length(self.hparams["intermediate_size_moe"])
2417+
if "layer_types" in self.hparams:
2418+
if all(lt == "full_attention" for lt in self.hparams["layer_types"]):
2419+
# all layers are full attention (for MobileLLM), disable swa
2420+
self.gguf_writer.add_sliding_window(0)
24112421

24122422
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None):
24132423
if name.startswith("language_model."):
@@ -6006,9 +6016,34 @@ class SeedOssModel(TextModel):
60066016

60076017

60086018
@ModelBase.register("Olmo2ForCausalLM")
6019+
@ModelBase.register("Olmo3ForCausalLM")
60096020
class Olmo2Model(TextModel):
60106021
model_arch = gguf.MODEL_ARCH.OLMO2
60116022

6023+
def set_gguf_parameters(self):
6024+
super().set_gguf_parameters()
6025+
6026+
rope_scaling = self.hparams.get("rope_scaling") or {}
6027+
if rope_scaling.get("rope_type", rope_scaling.get("type")) == "yarn" and "factor" in rope_scaling:
6028+
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
6029+
self.gguf_writer.add_rope_scaling_factor(rope_scaling["factor"])
6030+
self.gguf_writer.add_rope_scaling_attn_factors(rope_scaling["attention_factor"])
6031+
self.gguf_writer.add_rope_scaling_orig_ctx_len(rope_scaling["original_max_position_embeddings"])
6032+
6033+
if "sliding_window" in self.hparams:
6034+
self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
6035+
6036+
sliding_window_pattern = []
6037+
if "layer_types" in self.hparams:
6038+
sliding_window_pattern = [t == "sliding_attention" for t in self.hparams["layer_types"]]
6039+
else:
6040+
# Olmo2 does not use sliding window attention.
6041+
# Olmo3 defaults to using sliding window for all layers except every 4th.
6042+
for i in range(self.hparams["num_hidden_layers"]):
6043+
sliding_window_pattern.append((i + 1) % 4 != 0)
6044+
6045+
self.gguf_writer.add_sliding_window_pattern(sliding_window_pattern)
6046+
60126047

60136048
@ModelBase.register("OlmoeForCausalLM")
60146049
class OlmoeModel(TextModel):
@@ -8239,6 +8274,76 @@ def prepare_tensors(self):
82398274
raise ValueError(f"Unprocessed experts: {experts}")
82408275

82418276

8277+
@ModelBase.register("LLaDAMoEModel", "LLaDAMoEModelLM")
8278+
class LLaDAMoEModel(TextModel):
8279+
model_arch = gguf.MODEL_ARCH.LLADA_MOE
8280+
8281+
def set_gguf_parameters(self):
8282+
super().set_gguf_parameters()
8283+
if (n_experts := self.hparams.get("num_experts")) is not None:
8284+
self.gguf_writer.add_expert_count(n_experts)
8285+
8286+
if (expert_intermediate_size := self.hparams.get("expert_intermediate_size")) is not None:
8287+
self.gguf_writer.add_expert_feed_forward_length(expert_intermediate_size)
8288+
8289+
# number of experts used per token (top-k)
8290+
if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None:
8291+
self.gguf_writer.add_expert_used_count(n_experts_used)
8292+
8293+
self.gguf_writer.add_mask_token_id(156895)
8294+
self.gguf_writer.add_causal_attention(False)
8295+
self.gguf_writer.add_diffusion_shift_logits(False)
8296+
8297+
_experts: list[dict[str, Tensor]] | None = None
8298+
8299+
# Copied from: Qwen2MoeModel
8300+
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
8301+
# process the experts separately
8302+
if name.find("experts") != -1:
8303+
n_experts = self.hparams["num_experts"]
8304+
assert bid is not None
8305+
8306+
if self._experts is None:
8307+
self._experts = [{} for _ in range(self.block_count)]
8308+
8309+
self._experts[bid][name] = data_torch
8310+
8311+
if len(self._experts[bid]) >= n_experts * 3:
8312+
tensors: list[tuple[str, Tensor]] = []
8313+
8314+
# merge the experts into a single 3d tensor
8315+
for w_name in ["down_proj", "gate_proj", "up_proj"]:
8316+
datas: list[Tensor] = []
8317+
8318+
for xid in range(n_experts):
8319+
ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
8320+
datas.append(self._experts[bid][ename])
8321+
del self._experts[bid][ename]
8322+
8323+
data_torch = torch.stack(datas, dim=0)
8324+
8325+
merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
8326+
8327+
new_name = self.map_tensor_name(merged_name)
8328+
8329+
tensors.append((new_name, data_torch))
8330+
return tensors
8331+
else:
8332+
return []
8333+
8334+
return [(self.map_tensor_name(name), data_torch)]
8335+
8336+
# Copied from: Qwen2MoeModel
8337+
def prepare_tensors(self):
8338+
super().prepare_tensors()
8339+
8340+
if self._experts is not None:
8341+
# flatten `list[dict[str, Tensor]]` into `list[str]`
8342+
experts = [k for d in self._experts for k in d.keys()]
8343+
if len(experts) > 0:
8344+
raise ValueError(f"Unprocessed experts: {experts}")
8345+
8346+
82428347
@ModelBase.register("HunYuanDenseV1ForCausalLM")
82438348
class HunYuanModel(TextModel):
82448349
model_arch = gguf.MODEL_ARCH.HUNYUAN_DENSE

0 commit comments

Comments
 (0)