Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions apps/grpo/qwen3_1_7b.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

# Global configuration
group_size: 8
batch_size: 16
local_batch_size: 16 # per-device batch size
max_req_tokens: 512
max_res_tokens: 512
model: "Qwen/Qwen3-1.7B"
Expand Down Expand Up @@ -56,7 +56,7 @@ trainer:
lr_scheduler:
warmup_steps: 1
training:
local_batch_size: ${batch_size}
local_batch_size: ${local_batch_size}
seq_len: 2048
max_norm: 1.0
steps: 1000000
Expand Down Expand Up @@ -85,7 +85,7 @@ trainer:

# Replay buffer configuration
replay_buffer:
batch_size: ${batch_size}
batch_size: ${local_batch_size}
max_policy_age: ${off_by_n}
dp_size: ${trainer.parallelism.data_parallel_shard_degree} # Must equal trainer DP degree

Expand Down
6 changes: 3 additions & 3 deletions apps/grpo/qwen3_32b.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

# Global configuration
group_size: 2
batch_size: 8
local_batch_size: 8 # per-device batch size
max_req_tokens: 512
max_res_tokens: 512
model: "Qwen/Qwen3-32B"
Expand Down Expand Up @@ -59,7 +59,7 @@ trainer:
lr_scheduler:
warmup_steps: 1
training:
local_batch_size: ${batch_size}
local_batch_size: ${local_batch_size}
seq_len: 2048
max_norm: 1.0
steps: 1000000
Expand Down Expand Up @@ -87,7 +87,7 @@ trainer:

# Replay buffer configuration
replay_buffer:
batch_size: ${batch_size}
batch_size: ${local_batch_size}
max_policy_age: ${off_by_n}
# dp_size: ${trainer.parallelism.data_parallel_shard_degree} # Must equal trainer DP degree
dp_size: 8
Expand Down
6 changes: 3 additions & 3 deletions apps/grpo/qwen3_8b.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

# Global configuration
group_size: 8
batch_size: 16
local_batch_size: 16 # per-device batch size
max_req_tokens: 512
max_res_tokens: 512
model: "Qwen/Qwen3-8B"
Expand Down Expand Up @@ -55,7 +55,7 @@ trainer:
lr_scheduler:
warmup_steps: 1
training:
local_batch_size: ${batch_size}
local_local_batch_size: ${local_batch_size}
seq_len: 2048
max_norm: 1.0
steps: 1000000
Expand Down Expand Up @@ -84,7 +84,7 @@ trainer:

# Replay buffer configuration
replay_buffer:
batch_size: ${batch_size}
local_batch_size: ${local_batch_size}
max_policy_age: ${off_by_n}
# This should match the dp_size of TorchTitan
# Here it's set explicitly to 2, because we've set
Expand Down
Loading