Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions paddleformers/transformers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,10 @@
"get_triangle_upper_mask",
"DeepseekV2LinearScalingRotaryEmbedding",
],
"deepseek_v2.modeling_fast": [
"DeepseekV2ModelFast",
"DeepseekV2PretrainedModelFast",
],
"deepseek_v2.modeling_auto": [
"DeepseekV2LMHeadAuto",
"DeepseekV2ForCausalLMAuto",
Expand Down
6 changes: 6 additions & 0 deletions paddleformers/transformers/deepseek_v2/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,12 @@
"yarn_find_correction_range",
"get_triangle_upper_mask",
"DeepseekV2LinearScalingRotaryEmbedding",
"set_global_step",
"get_global_step",
],
"modeling_fast": [
"DeepseekV2ModelFast",
"DeepseekV2PretrainedModelFast",
],
"modeling_auto": [
"DeepseekV2LMHeadAuto",
Expand Down
26 changes: 25 additions & 1 deletion paddleformers/transformers/deepseek_v2/configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" DeepSeekV2 model configuration"""
from ..configuration_utils import PretrainedConfig
from paddleformers.transformers.configuration_utils import PretrainedConfig

__all__ = [
"DeepseekV2Config",
Expand Down Expand Up @@ -179,6 +179,18 @@ def __init__(
attention_dropout=0.0,
speculate_model_type=False,
using_flex_token=False,
use_dualpipev=False,
send_mtp_embed=True,
using_post_norm_recompute=False,
recompute_fwd_gate_up=0,
is_split_group_gemm=False,
fakse_gate_restrict_balance=False,
adaptive_remained_O1_recompute_ratio=0,
offline_quant_expert_weight=True,
clear_origin_weight_when_offline_quant=True,
mlp_bwd_subbatch_rows=0,
mlp_fwd_subbatch_rows=0,
output_subbatch_rows=0,
**kwargs,
):
self.vocab_size = vocab_size
Expand Down Expand Up @@ -227,6 +239,18 @@ def __init__(
self.speculate_model_type = speculate_model_type
self.use_fp8 = False
self.using_flex_token = using_flex_token
self.use_dualpipev = use_dualpipev
self.send_mtp_embed = send_mtp_embed
self.using_post_norm_recompute = using_post_norm_recompute
self.recompute_fwd_gate_up = recompute_fwd_gate_up
self.is_split_group_gemm = is_split_group_gemm
self.fakse_gate_restrict_balance = fakse_gate_restrict_balance
self.adaptive_remained_O1_recompute_ratio = adaptive_remained_O1_recompute_ratio
self.offline_quant_expert_weight = offline_quant_expert_weight
self.clear_origin_weight_when_offline_quant = clear_origin_weight_when_offline_quant
self.mlp_bwd_subbatch_rows = mlp_bwd_subbatch_rows
self.mlp_fwd_subbatch_rows = mlp_fwd_subbatch_rows
self.output_subbatch_rows = output_subbatch_rows

super().__init__(
pad_token_id=pad_token_id,
Expand Down
Loading
Loading