|
| 1 | +import gym |
| 2 | +from ditk import logging |
| 3 | +from ding.data.model_loader import FileModelLoader |
| 4 | +from ding.data.storage_loader import FileStorageLoader |
| 5 | +from ding.model import DQN |
| 6 | +from ding.policy import DQNPolicy |
| 7 | +from ding.envs import DingEnvWrapper, SubprocessEnvManagerV2 |
| 8 | +from ding.data import DequeBuffer |
| 9 | +from ding.config import compile_config |
| 10 | +from ding.framework import task, ding_init |
| 11 | +from ding.framework.context import OnlineRLContext |
| 12 | +from ding.framework.middleware import OffPolicyLearner, StepCollector, interaction_evaluator, data_pusher, \ |
| 13 | + eps_greedy_handler, CkptSaver, ContextExchanger, ModelExchanger, online_logger, termination_checker, \ |
| 14 | + nstep_reward_enhancer |
| 15 | +from ding.utils import set_pkg_seed |
| 16 | +from dizoo.box2d.lunarlander.config.lunarlander_dqn_config import main_config, create_config |
| 17 | + |
| 18 | + |
| 19 | +def main(): |
| 20 | + logging.getLogger().setLevel(logging.INFO) |
| 21 | + cfg = compile_config(main_config, create_cfg=create_config, auto=True, save_cfg=task.router.node_id == 0) |
| 22 | + ding_init(cfg) |
| 23 | + with task.start(async_mode=False, ctx=OnlineRLContext()): |
| 24 | + collector_env = SubprocessEnvManagerV2( |
| 25 | + env_fn=[lambda: DingEnvWrapper(gym.make("LunarLander-v2")) for _ in range(cfg.env.collector_env_num)], |
| 26 | + cfg=cfg.env.manager |
| 27 | + ) |
| 28 | + evaluator_env = SubprocessEnvManagerV2( |
| 29 | + env_fn=[lambda: DingEnvWrapper(gym.make("LunarLander-v2")) for _ in range(cfg.env.evaluator_env_num)], |
| 30 | + cfg=cfg.env.manager |
| 31 | + ) |
| 32 | + |
| 33 | + set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda) |
| 34 | + |
| 35 | + model = DQN(**cfg.policy.model) |
| 36 | + buffer_ = DequeBuffer(size=cfg.policy.other.replay_buffer.replay_buffer_size) |
| 37 | + policy = DQNPolicy(cfg.policy, model=model) |
| 38 | + |
| 39 | + # Consider the case with multiple processes |
| 40 | + if task.router.is_active: |
| 41 | + # You can use labels to distinguish between workers with different roles, |
| 42 | + # here we use node_id to distinguish. |
| 43 | + if task.router.node_id == 0: |
| 44 | + task.add_role(task.role.LEARNER) |
| 45 | + elif task.router.node_id == 1: |
| 46 | + task.add_role(task.role.EVALUATOR) |
| 47 | + else: |
| 48 | + task.add_role(task.role.COLLECTOR) |
| 49 | + |
| 50 | + # Sync their context and model between each worker. |
| 51 | + task.use(ContextExchanger(skip_n_iter=1)) |
| 52 | + task.use(ModelExchanger(model)) |
| 53 | + |
| 54 | + # Here is the part of single process pipeline. |
| 55 | + task.use(interaction_evaluator(cfg, policy.eval_mode, evaluator_env)) |
| 56 | + task.use(eps_greedy_handler(cfg)) |
| 57 | + task.use(StepCollector(cfg, policy.collect_mode, collector_env)) |
| 58 | + task.use(nstep_reward_enhancer(cfg)) |
| 59 | + task.use(data_pusher(cfg, buffer_)) |
| 60 | + task.use(OffPolicyLearner(cfg, policy.learn_mode, buffer_)) |
| 61 | + task.use(online_logger(train_show_freq=50)) |
| 62 | + task.use(CkptSaver(cfg, policy, train_freq=1000)) |
| 63 | + task.use(termination_checker(max_env_step=int(3e6))) |
| 64 | + task.run() |
| 65 | + |
| 66 | + |
| 67 | +if __name__ == "__main__": |
| 68 | + main() |
0 commit comments