Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions src/megatron/bridge/models/qwen/qwen_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,9 @@

import torch
import torch.nn.functional as F
from megatron.core.models.gpt.gpt_layer_specs import get_gpt_decoder_block_spec
from megatron.core.models.gpt.experimental_attention_variant_module_specs import (
get_transformer_block_with_experimental_attention_variant_spec,
)
from megatron.core.transformer.spec_utils import ModuleSpec

from megatron.bridge.models.gpt_provider import GPTModelProvider
Expand Down Expand Up @@ -433,8 +435,8 @@ class Qwen3MoEModelProvider235B_A22B(Qwen3MoEModelProvider):
class Qwen3NextModelProvider(Qwen3MoEModelProvider):
"""Base provider for Qwen 3 Next Models."""

transformer_layer_spec: ModuleSpec | Callable[["GPTModelProvider"], ModuleSpec] = partial(
get_gpt_decoder_block_spec, use_transformer_engine=HAVE_TE
transformer_layer_spec: ModuleSpec | Callable[["GPTModelProvider"], ModuleSpec] = (
get_transformer_block_with_experimental_attention_variant_spec
)

layernorm_zero_centered_gamma: bool = True # Zero-centered RMSNorm
Expand Down
11 changes: 10 additions & 1 deletion src/megatron/bridge/training/mlm_compat/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@
from megatron.core.enums import ModelType
from megatron.core.fp8_utils import correct_amax_history_if_needed
from megatron.core.models.gpt import GPTModel
from megatron.core.models.gpt.experimental_attention_variant_module_specs import (
get_transformer_block_with_experimental_attention_variant_spec,
)
from megatron.core.models.gpt.gpt_layer_specs import (
get_gpt_decoder_block_spec,
get_gpt_layer_local_spec,
Expand Down Expand Up @@ -90,7 +93,13 @@ def _gpt_provider(
if config is None:
config = _transformer_config_from_args(args)

if args.num_experts:
if args.experimental_attention_variant is not None:
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this should be safely accessed with getattr

Suggested change
if args.experimental_attention_variant is not None:
if getattr(args, "experimental_attention_variant", None) is not None:

transformer_layer_spec = (
get_transformer_block_with_experimental_attention_variant_spec(
config=config, vp_stage=vp_stage
)
)
elif args.num_experts:
# Define the decoder block spec
transformer_layer_spec = get_gpt_decoder_block_spec(
config,
Expand Down
Loading