Skip to content

Commit 1a5e2f0

Browse files
committed
bugfix: resolve compilation issues when building without NPU TORCH.
1 parent 5911088 commit 1a5e2f0

File tree

3 files changed

+6
-3
lines changed

3 files changed

+6
-3
lines changed

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,7 @@ else()
298298
endif()
299299

300300
if(USE_NPU)
301-
add_definitions(-DUSE_NPU_TORCH)
301+
# add_definitions(-DUSE_NPU_TORCH)
302302
add_definitions(-DUSE_NPU)
303303
add_definitions(-DBUILD_LIBTORCH)
304304
add_definitions(-DTORCH_SETCUSTOMHANDLER=ON)

xllm/models/llm/qwen3.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ class QWen3ModelImpl : public LlmModelImplBase<QWen3DecoderLayer> {
4545
xllm::layer::RmsNorm(
4646
model_args.hidden_size(), model_args.rms_norm_eps(), options));
4747
#else
48-
norm_ = register_module("norm", layer::RmsNorm(context));
48+
norm_ = register_module("norm", layer::NpuRmsNorm(context));
4949
#endif
5050
for (auto i = 0; i < FLAGS_micro_batch_num; i++) {
5151
#if defined(USE_NPU_TORCH)

xllm/models/llm/qwen3_moe.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -274,10 +274,13 @@ class Qwen3MoeModelImpl : public torch::nn::Module {
274274
torch::Dtype dtype_;
275275
layer::WordEmbedding embed_tokens_{nullptr};
276276
layer::AttentionMask attn_mask_;
277-
layer::RmsNorm norm_{nullptr};
277+
278278
#if defined(USE_NPU)
279279
torch::Tensor cos_sin_;
280280
layer::PosEmbedding atb_pos_emb_{nullptr};
281+
layer::NpuRmsNorm norm_{nullptr};
282+
#else
283+
layer::RmsNorm norm_{nullptr};
281284
#endif
282285
};
283286
TORCH_MODULE(Qwen3MoeModel);

0 commit comments

Comments
 (0)