Skip to content

Commit f14fafd

Browse files
committed
fix types
1 parent f7db153 commit f14fafd

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

trinity/common/verl_config.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ class Checkpoint:
6868
class Actor:
6969
strategy: str = "fsdp"
7070
ppo_mini_batch_size: int = 256
71-
ppo_micro_batch_size: Any = None
71+
ppo_micro_batch_size: Optional[int] = None
7272
ppo_micro_batch_size_per_gpu: int = 1
7373
use_dynamic_bsz: bool = False
7474
ppo_max_token_len_per_gpu: int = (
@@ -95,7 +95,7 @@ class Actor:
9595
@dataclass
9696
class Ref:
9797
fsdp_config: FSDPConfig = field(default_factory=FSDPConfig)
98-
log_prob_micro_batch_size: Any = None
98+
log_prob_micro_batch_size: Optional[int] = None
9999
log_prob_micro_batch_size_per_gpu: int = 1
100100
log_prob_use_dynamic_bsz: bool = False
101101
log_prob_max_token_len_per_gpu: int = 0
@@ -121,7 +121,7 @@ class Rollout:
121121
max_num_batched_tokens: int = 8192
122122
max_model_len: Optional[int] = None
123123
max_num_seqs: int = 1024
124-
log_prob_micro_batch_size: Any = None
124+
log_prob_micro_batch_size: Optional[int] = None
125125
log_prob_micro_batch_size_per_gpu: int = 1
126126
log_prob_use_dynamic_bsz: bool = False
127127
log_prob_max_token_len_per_gpu: int = 0
@@ -158,7 +158,7 @@ class Critic:
158158
optim: Optim = field(default_factory=Optim)
159159
model: CriticModel = field(default_factory=CriticModel)
160160
ppo_mini_batch_size: int = 0
161-
ppo_micro_batch_size: Any = None
161+
ppo_micro_batch_size: Optional[int] = None
162162
ppo_micro_batch_size_per_gpu: int = 1
163163
forward_micro_batch_size: Optional[int] = None
164164
forward_micro_batch_size_per_gpu: Optional[int] = None

0 commit comments

Comments
 (0)