diff --git a/.gitignore b/.gitignore index e3e32e85c7..56ce708c5f 100644 --- a/.gitignore +++ b/.gitignore @@ -46,5 +46,11 @@ src .cache *.lprof *.prof +*.zip MUJOCO_LOG.TXT + +dummy.py +rsa2c/ +exptd3/ +train.py diff --git a/example.py b/example.py new file mode 100644 index 0000000000..dd37a2748c --- /dev/null +++ b/example.py @@ -0,0 +1,11 @@ +import numpy as np +import gymnasium as gym +from stable_baselines3 import PPO, RSPPO +from stable_baselines3.common.utils import set_random_seed + +set_random_seed(42) + +env = gym.make('CartPole-v1') +model = RSPPO('MlpPolicy', env, verbose=1) + +model.learn(total_timesteps=1e6) diff --git a/stable_baselines3/__init__.py b/stable_baselines3/__init__.py index 0775a8ec5d..8b46cf0e1b 100644 --- a/stable_baselines3/__init__.py +++ b/stable_baselines3/__init__.py @@ -8,6 +8,9 @@ from stable_baselines3.ppo import PPO from stable_baselines3.sac import SAC from stable_baselines3.td3 import TD3 +from stable_baselines3.rsppo import RSPPO +# from stable_baselines3.rsa2c import RSA2C +# from stable_baselines3.exptd3 import EXPTD3 # Read version from file version_file = os.path.join(os.path.dirname(__file__), "version.txt") @@ -29,6 +32,8 @@ def HER(*args, **kwargs): "PPO", "SAC", "TD3", + "RSPPO", + # "RSA2C", "HerReplayBuffer", "get_system_info", ] diff --git a/stable_baselines3/common/buffers.py b/stable_baselines3/common/buffers.py index 004adae90e..8826b5d05d 100644 --- a/stable_baselines3/common/buffers.py +++ b/stable_baselines3/common/buffers.py @@ -419,7 +419,7 @@ def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarra :param last_values: state value estimation for the last step (one for each env) :param dones: if the last step was a terminal step (one bool for each env). """ - # Convert to numpy + # # Convert to numpy last_values = last_values.clone().cpu().numpy().flatten() # type: ignore[assignment] last_gae_lam = 0 @@ -437,6 +437,21 @@ def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarra # in David Silver Lecture 4: https://www.youtube.com/watch?v=PnHCvfgC_ZA self.returns = self.advantages + self.values + # last_values = last_values.clone().cpu().numpy().flatten() # type: ignore[assignment] + # values = np.concatenate((self.values, last_values.reshape(1, -1))) + # dones = np.concatenate((self.episode_starts, dones.reshape(1, -1))) + # next_non_terminal = (1.0 - dones.astype(np.float32))[1:] + + # # self.returns = self.rewards + self.gamma * next_non_terminal * values[1:] + # # self.advantages = self.returns - self.values + + # returns = [self.values[-1]] + # interm = self.rewards + self.gamma * (1 - self.gae_lambda) * next_non_terminal * values[1:] + # for step in reversed(range(self.buffer_size)): + # returns.append(interm[step] + self.gamma * self.gae_lambda * next_non_terminal[step] * returns[-1]) + # self.returns = np.stack(list(reversed(returns))[:-1], 0) + # self.advantages = self.returns - self.values + def add( self, obs: np.ndarray, @@ -521,6 +536,48 @@ def _get_samples( return RolloutBufferSamples(*tuple(map(self.to_torch, data))) +class ExpRolloutBuffer(RolloutBuffer): + + def __init__(self, buffer_size, observation_space, action_space, device = "auto", gae_lambda = 0.95, gamma = 0.99, n_envs = 1, beta = 0): + super().__init__(buffer_size, observation_space, action_space, device, gae_lambda, gamma, n_envs) + self.beta = beta + + def compute_returns_and_advantage(self, last_values, dones): + + # # Convert to numpy + # last_values = last_values.clone().cpu().numpy().flatten() # type: ignore[assignment] + + # last_gae_lam = 0 + # for step in reversed(range(self.buffer_size)): + # if step == self.buffer_size - 1: + # next_non_terminal = 1.0 - dones.astype(np.float32) + # next_values = last_values + # else: + # next_non_terminal = 1.0 - self.episode_starts[step + 1] + # next_values = self.values[step + 1] + # delta = np.exp(self.beta * self.rewards[step] + self.gamma * np.log(1e-15 + np.maximum(next_values, 0)) * next_non_terminal) - self.values[step] + # # delta = self.rewards[step] + self.gamma * next_values * next_non_terminal - self.values[step] + # last_gae_lam = delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam + # self.advantages[step] = last_gae_lam + # # TD(lambda) estimator, see Github PR #375 or "Telescoping in TD(lambda)" + # # in David Silver Lecture 4: https://www.youtube.com/watch?v=PnHCvfgC_ZA + # self.returns = self.advantages + self.values + + last_values = last_values.clone().cpu().numpy().flatten() # type: ignore[assignment] + values = np.concatenate((self.values, last_values.reshape(1, -1))) + dones = np.concatenate((self.episode_starts, dones.reshape(1, -1))) + next_non_terminal = (1.0 - dones.astype(np.float32))[1:] + + returns = [self.values[-1]] + interm = self.beta * self.rewards + self.gamma * (1 - self.gae_lambda) * next_non_terminal * np.log(1e-15 + np.maximum(0, values[1:])) + for step in reversed(range(self.buffer_size)): + returns.append(np.exp(interm[step] + self.gamma * self.gae_lambda * next_non_terminal[step] * np.log(1e-15 + np.maximum(0, returns[-1])))) + self.returns = np.stack(list(reversed(returns))[:-1], 0) + self.advantages = (self.returns - self.values) + + + + class DictReplayBuffer(ReplayBuffer): """ Dict Replay buffer used in off-policy algorithms like SAC/TD3. diff --git a/stable_baselines3/ppo/ppo.py b/stable_baselines3/ppo/ppo.py index 7ed1b4bbca..3f13e927b7 100644 --- a/stable_baselines3/ppo/ppo.py +++ b/stable_baselines3/ppo/ppo.py @@ -217,7 +217,7 @@ def train(self) -> None: # Normalization does not make sense if mini batchsize == 1, see GH issue #325 if self.normalize_advantage and len(advantages) > 1: advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) - + # ratio between old and new policy, should be one at the first iteration ratio = th.exp(log_prob - rollout_data.old_log_prob) diff --git a/stable_baselines3/rsppo/__init__.py b/stable_baselines3/rsppo/__init__.py new file mode 100644 index 0000000000..d5c5935f69 --- /dev/null +++ b/stable_baselines3/rsppo/__init__.py @@ -0,0 +1,4 @@ +from stable_baselines3.rsppo.policies import CnnPolicy, MlpPolicy, MultiInputPolicy +from stable_baselines3.rsppo.rsppo import RSPPO + +__all__ = ["PPO", "CnnPolicy", "MlpPolicy", "MultiInputPolicy"] diff --git a/stable_baselines3/rsppo/policies.py b/stable_baselines3/rsppo/policies.py new file mode 100644 index 0000000000..fb7afaef13 --- /dev/null +++ b/stable_baselines3/rsppo/policies.py @@ -0,0 +1,7 @@ +# This file is here just to define MlpPolicy/CnnPolicy +# that work for PPO +from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, MultiInputActorCriticPolicy + +MlpPolicy = ActorCriticPolicy +CnnPolicy = ActorCriticCnnPolicy +MultiInputPolicy = MultiInputActorCriticPolicy diff --git a/stable_baselines3/rsppo/rsppo.py b/stable_baselines3/rsppo/rsppo.py new file mode 100644 index 0000000000..8ba0350318 --- /dev/null +++ b/stable_baselines3/rsppo/rsppo.py @@ -0,0 +1,307 @@ +import warnings +from typing import Any, ClassVar, Optional, TypeVar, Union + +import numpy as np +import torch as th +from gymnasium import spaces +from torch.nn import functional as F + +from stable_baselines3.common.buffers import RolloutBuffer, ExpRolloutBuffer +from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm +from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, BasePolicy, MultiInputActorCriticPolicy +from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule +from stable_baselines3.common.utils import explained_variance, get_schedule_fn + +SelfRSPPO = TypeVar("SelfRSPPO", bound="RSPPO") + + +class RSPPO(OnPolicyAlgorithm): + """ + Proximal Policy Optimization algorithm (PPO) (clip version) + + Paper: https://arxiv.org/abs/1707.06347 + Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/) + https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and + Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines) + + Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html + + :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) + :param env: The environment to learn from (if registered in Gym, can be str) + :param learning_rate: The learning rate, it can be a function + of the current progress remaining (from 1 to 0) + :param n_steps: The number of steps to run for each environment per update + (i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel) + NOTE: n_steps * n_envs must be greater than 1 (because of the advantage normalization) + See https://github.com/pytorch/pytorch/issues/29372 + :param batch_size: Minibatch size + :param n_epochs: Number of epoch when optimizing the surrogate loss + :param gamma: Discount factor + :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator + :param clip_range: Clipping parameter, it can be a function of the current progress + remaining (from 1 to 0). + :param clip_range_vf: Clipping parameter for the value function, + it can be a function of the current progress remaining (from 1 to 0). + This is a parameter specific to the OpenAI implementation. If None is passed (default), + no clipping will be done on the value function. + IMPORTANT: this clipping depends on the reward scaling. + :param normalize_advantage: Whether to normalize or not the advantage + :param vf_coef: Value function coefficient for the loss calculation + :param max_grad_norm: The maximum value for the gradient clipping + :param use_sde: Whether to use generalized State Dependent Exploration (gSDE) + instead of action noise exploration (default: False) + :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE + Default: -1 (only sample at the beginning of the rollout) + :param rollout_buffer_class: Rollout buffer class to use. If ``None``, it will be automatically selected. + :param rollout_buffer_kwargs: Keyword arguments to pass to the rollout buffer on creation + :param target_kl: Limit the KL divergence between updates, + because the clipping is not enough to prevent large update + see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213) + By default, there is no limit on the kl div. + :param stats_window_size: Window size for the rollout logging, specifying the number of episodes to average + the reported success rate, mean episode length, and mean reward over + :param tensorboard_log: the log location for tensorboard (if None, no logging) + :param policy_kwargs: additional arguments to be passed to the policy on creation. See :ref:`ppo_policies` + :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for + debug messages + :param seed: Seed for the pseudo random generators + :param device: Device (cpu, cuda, ...) on which the code should be run. + Setting it to auto, the code will be run on the GPU if possible. + :param _init_setup_model: Whether or not to build the network at the creation of the instance + """ + + policy_aliases: ClassVar[dict[str, type[BasePolicy]]] = { + "MlpPolicy": ActorCriticPolicy, + "CnnPolicy": ActorCriticCnnPolicy, + "MultiInputPolicy": MultiInputActorCriticPolicy, + } + + def __init__( + self, + policy: Union[str, type[ActorCriticPolicy]], + env: Union[GymEnv, str], + learning_rate: Union[float, Schedule] = 3e-4, + n_steps: int = 2048, + batch_size: int = 64, + n_epochs: int = 10, + gamma: float = 0.99, + gae_lambda: float = 0.95, + clip_range: Union[float, Schedule] = 0.2, + clip_range_vf: Union[None, float, Schedule] = None, + normalize_advantage: bool = True, + vf_coef: float = 0.5, + max_grad_norm: float = 0.5, + use_sde: bool = False, + sde_sample_freq: int = -1, + rollout_buffer_class: Optional[type[RolloutBuffer]] = ExpRolloutBuffer, + rollout_buffer_kwargs: Optional[dict[str, Any]] = {'beta': 0.001}, + target_kl: Optional[float] = None, + stats_window_size: int = 100, + tensorboard_log: Optional[str] = None, + policy_kwargs: Optional[dict[str, Any]] = None, + verbose: int = 0, + seed: Optional[int] = None, + device: Union[th.device, str] = "auto", + _init_setup_model: bool = True, + ): + super().__init__( + policy, + env, + learning_rate=learning_rate, + n_steps=n_steps, + gamma=gamma, + gae_lambda=gae_lambda, + vf_coef=vf_coef, + max_grad_norm=max_grad_norm, + use_sde=use_sde, + sde_sample_freq=sde_sample_freq, + rollout_buffer_class=rollout_buffer_class, + rollout_buffer_kwargs=rollout_buffer_kwargs, + stats_window_size=stats_window_size, + tensorboard_log=tensorboard_log, + policy_kwargs=policy_kwargs, + verbose=verbose, + device=device, + seed=seed, + ent_coef=0, + _init_setup_model=False, + supported_action_spaces=( + spaces.Box, + spaces.Discrete, + spaces.MultiDiscrete, + spaces.MultiBinary, + ), + ) + + # Sanity check, otherwise it will lead to noisy gradient and NaN + # because of the advantage normalization + if normalize_advantage: + assert ( + batch_size > 1 + ), "`batch_size` must be greater than 1. See https://github.com/DLR-RM/stable-baselines3/issues/440" + + if self.env is not None: + # Check that `n_steps * n_envs > 1` to avoid NaN + # when doing advantage normalization + buffer_size = self.env.num_envs * self.n_steps + assert buffer_size > 1 or ( + not normalize_advantage + ), f"`n_steps * n_envs` must be greater than 1. Currently n_steps={self.n_steps} and n_envs={self.env.num_envs}" + # Check that the rollout buffer size is a multiple of the mini-batch size + untruncated_batches = buffer_size // batch_size + if buffer_size % batch_size > 0: + warnings.warn( + f"You have specified a mini-batch size of {batch_size}," + f" but because the `RolloutBuffer` is of size `n_steps * n_envs = {buffer_size}`," + f" after every {untruncated_batches} untruncated mini-batches," + f" there will be a truncated mini-batch of size {buffer_size % batch_size}\n" + f"We recommend using a `batch_size` that is a factor of `n_steps * n_envs`.\n" + f"Info: (n_steps={self.n_steps} and n_envs={self.env.num_envs})" + ) + self.batch_size = batch_size + self.n_epochs = n_epochs + self.clip_range = clip_range + self.clip_range_vf = clip_range_vf + self.normalize_advantage = normalize_advantage + self.target_kl = target_kl + + if _init_setup_model: + self._setup_model() + + def _setup_model(self) -> None: + super()._setup_model() + + # Initialize schedules for policy/value clipping + self.clip_range = get_schedule_fn(self.clip_range) + if self.clip_range_vf is not None: + if isinstance(self.clip_range_vf, (float, int)): + assert self.clip_range_vf > 0, "`clip_range_vf` must be positive, " "pass `None` to deactivate vf clipping" + + self.clip_range_vf = get_schedule_fn(self.clip_range_vf) + + def train(self) -> None: + """ + Update policy using the currently gathered rollout buffer. + """ + # Switch to train mode (this affects batch norm / dropout) + self.policy.set_training_mode(True) + # Update optimizer learning rate + self._update_learning_rate(self.policy.optimizer) + # Compute current clip range + clip_range = self.clip_range(self._current_progress_remaining) # type: ignore[operator] + # Optional: clip range for the value function + if self.clip_range_vf is not None: + clip_range_vf = self.clip_range_vf(self._current_progress_remaining) # type: ignore[operator] + + pg_losses, value_losses = [], [] + clip_fractions = [] + + continue_training = True + # train for n_epochs epochs + for epoch in range(self.n_epochs): + approx_kl_divs = [] + # Do a complete pass on the rollout buffer + for rollout_data in self.rollout_buffer.get(self.batch_size): + actions = rollout_data.actions + if isinstance(self.action_space, spaces.Discrete): + # Convert discrete action from float to long + actions = rollout_data.actions.long().flatten() + + values, log_prob, _ = self.policy.evaluate_actions(rollout_data.observations, actions) + values = values.flatten() + # Normalize advantage + advantages = rollout_data.advantages + # print(advantages.max()) + # Normalization does not make sense if mini batchsize == 1, see GH issue #325 + if self.normalize_advantage and len(advantages) > 1: + advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) + advantages = np.sign(self.rollout_buffer_kwargs['beta']) * advantages + + # ratio between old and new policy, should be one at the first iteration + ratio = th.exp(log_prob - rollout_data.old_log_prob) + + # clipped surrogate loss + policy_loss_1 = advantages * ratio + policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range) + policy_loss = -th.min(policy_loss_1, policy_loss_2).mean() + + # Logging + pg_losses.append(policy_loss.item()) + clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item() + clip_fractions.append(clip_fraction) + + if self.clip_range_vf is None: + # No clipping + values_pred = values + else: + # Clip the difference between old and new value + # NOTE: this depends on the reward scaling + values_pred = rollout_data.old_values + th.clamp( + values - rollout_data.old_values, -clip_range_vf, clip_range_vf + ) + # Value loss using the TD(gae_lambda) target + value_loss = F.mse_loss(rollout_data.returns, values_pred) + value_losses.append(value_loss.item()) + + loss = policy_loss + self.vf_coef * value_loss + + # Calculate approximate form of reverse KL Divergence for early stopping + # see issue #417: https://github.com/DLR-RM/stable-baselines3/issues/417 + # and discussion in PR #419: https://github.com/DLR-RM/stable-baselines3/pull/419 + # and Schulman blog: http://joschu.net/blog/kl-approx.html + with th.no_grad(): + log_ratio = log_prob - rollout_data.old_log_prob + approx_kl_div = th.mean((th.exp(log_ratio) - 1) - log_ratio).cpu().numpy() + approx_kl_divs.append(approx_kl_div) + + if self.target_kl is not None and approx_kl_div > 1.5 * self.target_kl: + continue_training = False + if self.verbose >= 1: + print(f"Early stopping at step {epoch} due to reaching max kl: {approx_kl_div:.2f}") + break + + # Optimization step + self.policy.optimizer.zero_grad() + loss.backward() + # Clip grad norm + th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm) + self.policy.optimizer.step() + + self._n_updates += 1 + if not continue_training: + break + + explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten()) + + # Logs + self.logger.record("train/policy_gradient_loss", np.mean(pg_losses)) + self.logger.record("train/value_loss", np.mean(value_losses)) + self.logger.record("train/approx_kl", np.mean(approx_kl_divs)) + self.logger.record("train/clip_fraction", np.mean(clip_fractions)) + self.logger.record("train/loss", loss.item()) + self.logger.record("train/explained_variance", explained_var) + if hasattr(self.policy, "log_std"): + self.logger.record("train/std", th.exp(self.policy.log_std).mean().item()) + + self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard") + self.logger.record("train/clip_range", clip_range) + if self.clip_range_vf is not None: + self.logger.record("train/clip_range_vf", clip_range_vf) + + def learn( + self: SelfRSPPO, + total_timesteps: int, + callback: MaybeCallback = None, + log_interval: int = 1, + tb_log_name: str = "PPO", + reset_num_timesteps: bool = True, + progress_bar: bool = False, + ) -> SelfRSPPO: + return super().learn( + total_timesteps=total_timesteps, + callback=callback, + log_interval=log_interval, + tb_log_name=tb_log_name, + reset_num_timesteps=reset_num_timesteps, + progress_bar=progress_bar, + )