forked from RLinf/RLinf
-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathmaniskill_ppo_openvlaoft.yaml
More file actions
172 lines (150 loc) · 3.59 KB
/
maniskill_ppo_openvlaoft.yaml
File metadata and controls
172 lines (150 loc) · 3.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
defaults:
- env/train: PutOnPlateInScene25Main
- env/eval: maniskill_ood_template
- override hydra/job_logging: stdout
hydra:
run:
dir: .
output_subdir: null
searchpath:
- file://${oc.env:EMBODIED_PATH}/config/
cluster:
num_nodes: 1
component_placement:
actor: 0-7
env: 0-3
rollout: 4-7
runner:
task_type: embodied
logger:
log_path: "../results"
project_name: rlinf
experiment_name: "test_openvla"
logger_backends: ["tensorboard"] # wandb, swanlab
max_epochs: 1000
max_steps: -1
only_eval: False
val_check_interval: -1
save_interval: 40
seq_length: 4096
max_prompt_length: 30
algorithm:
auto_reset: True
ignore_terminations: False
use_fixed_reset_state_ids: False
require_values: True
shuffle_samples: True
normalize_advantages: True
kl_penalty: kl # how to estimate kl divergence: kl or kl_penalty
group_size: 1
n_chunk_steps: 20
n_eval_chunk_steps: 10
num_group_envs: 128
rollout_epoch: 1
reward_type: step_level
logprob_type: token_level
entropy_type: token_level
adv_type: embodied_gae
loss_type: embodied_ppo
loss_agg_func: "token-mean"
kl_beta: 0.0
entropy_bonus: 0
clip_ratio_high: 0.28
clip_ratio_low: 0.2
clip_ratio_c: 3.0
value_clip: 0.2
huber_delta: 10.0
gamma: 0.99
gae_lambda: 0.95
# params for rollout
sampling_params:
use_greedy: False
temperature_train: 1.0
temperature_eval: 0.6
top_k: 0
top_p: 1.0
repetition_penalty: 1.0
# length argument for autoregressive sampling
# max length means max amount of tokens to generate
length_params:
max_new_token: null
max_length: 1024
min_length: 1
env:
group_name: "EnvGroup"
channel:
name: "env_buffer_list"
queue_name: "obs_buffer"
queue_size: 0
enable_offload: False
rollout:
group_name: "RolloutGroup"
channel:
name: ${env.channel.name}
queue_name: "action_buffer"
queue_size: 0
mode: "colocate"
backend: "huggingface"
model_dir: "/path/to/model/Openvla-oft-SFT-libero10-trajall/"
enable_offload: True
pipeline_stage_num: 1
actor:
group_name: "ActorGroup"
channel:
name: ${env.channel.name}
queue_name: "replay_buffer"
queue_size: 0
training_backend: "fsdp"
checkpoint_load_path: "/path/to/model/Openvla-oft-SFT-libero10-trajall/"
checkpoint_save_path: "../results"
micro_batch_size: 40
global_batch_size: 640
seed: 1234
enable_offload: True
model:
model_name: "openvla_oft"
value_type: ${algorithm.reward_type} # 'action' or 'token'
action_dim: 7
num_action_chunks: 8
use_proprio: False
unnorm_key: bridge_orig
center_crop: True
precision: "bf16"
add_bias_linear: False
add_qkv_bias: True
vocab_size: 32000
hidden_size: 4096
policy_setup: "widowx_bridge"
vh_mode: "a0"
image_size: [224, 224]
is_lora: True
lora_rank: 32
lora_path: /path/to/model/oft-sft/lora_004000
ckpt_path: null
num_images_in_input: 1
attn_implementation: "flash_attention_2"
low_cpu_mem_usage: True
trust_remote_code: True
gradient_checkpointing: False
optim:
lr: 1.0e-4
value_lr: 3.0e-3
adam_beta1: 0.9
adam_beta2: 0.999
adam_eps: 1.0e-05
clip_grad: 10.0
tokenizer:
tokenizer_type: "HuggingFaceTokenizer"
tokenizer_model: "/path/to/model/Openvla-oft-SFT-libero10-trajall/"
use_fast: False
trust_remote_code: True
padding_side: "right"
fsdp:
forward_prefetch: False
limit_all_gathers: False
backward_prefetch: False
use_orig_params: False
reward:
use_reward_model: False
critic:
use_critic_model: False