Skip to content

Commit c623901

Browse files
authored
[XPU] add xpu support for llama sft (#9152)
Co-authored-by: tizhou86 <[email protected]>
1 parent c4d3a2f commit c623901

File tree

1 file changed

+10
-0
lines changed

1 file changed

+10
-0
lines changed

llm/run_finetune.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@
6464
init_chat_template,
6565
)
6666
from paddlenlp.utils.log import logger
67+
from paddlenlp.utils.tools import get_env_device
6768

6869
# Fine-tune Environment Variables to support sharding stage1 overlap optimization.
6970
os.environ["USE_CASUAL_MASK"] = "False"
@@ -105,6 +106,15 @@ def main():
105106
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
106107
)
107108

109+
if get_env_device() == "xpu" and training_args.gradient_accumulation_steps > 1:
110+
try:
111+
from paddle_xpu.layers.nn.linear import LinearConfig # noqa: F401
112+
LinearConfig.enable_accumulate_steps_opt()
113+
LinearConfig.set_accumulate_steps(training_args.gradient_accumulation_steps)
114+
except ImportError:
115+
# It's OK, not use accumulate_steps optimization
116+
pass
117+
108118
# Load model
109119
if training_args.fp16_opt_level == "O2":
110120
if training_args.fp16:

0 commit comments

Comments
 (0)