We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
2 parents 4e6e7e4 + b75a79e commit a8e9f4eCopy full SHA for a8e9f4e
src/llama_recipes/finetuning.py
@@ -9,6 +9,7 @@
9
import random
10
import torch
11
import torch.optim as optim
12
+import numpy as np
13
from peft import get_peft_model, PeftModel
14
from torch.distributed.fsdp import (
15
FullyShardedDataParallel as FSDP,
@@ -82,6 +83,7 @@ def main(**kwargs):
82
83
torch.xpu.manual_seed(train_config.seed)
84
torch.manual_seed(train_config.seed)
85
random.seed(train_config.seed)
86
+ np.random.seed(train_config.seed)
87
88
if train_config.enable_fsdp:
89
setup()
0 commit comments