Source code for d3rlpy.algos.sac

from typing import Any, Dict, Optional, Sequence

from ..argument_utility import (
    ActionScalerArg,
    EncoderArg,
    QFuncArg,
    RewardScalerArg,
    ScalerArg,
    UseGPUArg,
    check_encoder,
    check_q_func,
    check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .torch.sac_impl import DiscreteSACImpl, SACImpl


[docs]class SAC(AlgoBase): r"""Soft Actor-Critic algorithm. SAC is a DDPG-based maximum entropy RL algorithm, which produces state-of-the-art performance in online RL settings. SAC leverages twin Q functions proposed in TD3. Additionally, `delayed policy update` in TD3 is also implemented, which is not done in the paper. .. math:: L(\theta_i) = \mathbb{E}_{s_t,\, a_t,\, r_{t+1},\, s_{t+1} \sim D,\, a_{t+1} \sim \pi_\phi(\cdot|s_{t+1})} \Big[ \big(y - Q_{\theta_i}(s_t, a_t)\big)^2\Big] .. math:: y = r_{t+1} + \gamma \Big(\min_j Q_{\theta_j}(s_{t+1}, a_{t+1}) - \alpha \log \big(\pi_\phi(a_{t+1}|s_{t+1})\big)\Big) .. math:: J(\phi) = \mathbb{E}_{s_t \sim D,\, a_t \sim \pi_\phi(\cdot|s_t)} \Big[\alpha \log (\pi_\phi (a_t|s_t)) - \min_i Q_{\theta_i}\big(s_t, \pi_\phi(a_t|s_t)\big)\Big] The temperature parameter :math:`\alpha` is also automatically adjustable. .. math:: J(\alpha) = \mathbb{E}_{s_t \sim D,\, a_t \sim \pi_\phi(\cdot|s_t)} \bigg[-\alpha \Big(\log \big(\pi_\phi(a_t|s_t)\big) + H\Big)\bigg] where :math:`H` is a target entropy, which is defined as :math:`\dim a`. References: * `Haarnoja et al., Soft Actor-Critic: Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor. <https://arxiv.org/abs/1801.01290>`_ * `Haarnoja et al., Soft Actor-Critic Algorithms and Applications. <https://arxiv.org/abs/1812.05905>`_ Args: actor_learning_rate (float): learning rate for policy function. critic_learning_rate (float): learning rate for Q functions. temp_learning_rate (float): learning rate for temperature parameter. actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory): optimizer factory for the actor. critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory): optimizer factory for the critic. temp_optim_factory (d3rlpy.models.optimizers.OptimizerFactory): optimizer factory for the temperature. actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str): encoder factory for the actor. critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str): encoder factory for the critic. q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str): Q function factory. batch_size (int): mini-batch size. n_frames (int): the number of frames to stack for image observation. n_steps (int): N-step TD calculation. gamma (float): discount factor. tau (float): target network synchronization coefficiency. n_critics (int): the number of Q functions for ensemble. target_reduction_type (str): ensemble reduction method at target value estimation. The available options are ``['min', 'max', 'mean', 'mix', 'none']``. initial_temperature (float): initial temperature value. use_gpu (bool, int or d3rlpy.gpu.Device): flag to use GPU, device ID or device. scaler (d3rlpy.preprocessing.Scaler or str): preprocessor. The available options are `['pixel', 'min_max', 'standard']`. action_scaler (d3rlpy.preprocessing.ActionScaler or str): action preprocessor. The available options are ``['min_max']``. reward_scaler (d3rlpy.preprocessing.RewardScaler or str): reward preprocessor. The available options are ``['clip', 'min_max', 'standard']``. impl (d3rlpy.algos.torch.sac_impl.SACImpl): algorithm implementation. """ _actor_learning_rate: float _critic_learning_rate: float _temp_learning_rate: float _actor_optim_factory: OptimizerFactory _critic_optim_factory: OptimizerFactory _temp_optim_factory: OptimizerFactory _actor_encoder_factory: EncoderFactory _critic_encoder_factory: EncoderFactory _q_func_factory: QFunctionFactory _tau: float _n_critics: int _target_reduction_type: str _initial_temperature: float _use_gpu: Optional[Device] _impl: Optional[SACImpl] def __init__( self, *, actor_learning_rate: float = 3e-4, critic_learning_rate: float = 3e-4, temp_learning_rate: float = 3e-4, actor_optim_factory: OptimizerFactory = AdamFactory(), critic_optim_factory: OptimizerFactory = AdamFactory(), temp_optim_factory: OptimizerFactory = AdamFactory(), actor_encoder_factory: EncoderArg = "default", critic_encoder_factory: EncoderArg = "default", q_func_factory: QFuncArg = "mean", batch_size: int = 100, n_frames: int = 1, n_steps: int = 1, gamma: float = 0.99, tau: float = 0.005, n_critics: int = 2, target_reduction_type: str = "min", initial_temperature: float = 1.0, use_gpu: UseGPUArg = False, scaler: ScalerArg = None, action_scaler: ActionScalerArg = None, reward_scaler: RewardScalerArg = None, impl: Optional[SACImpl] = None, **kwargs: Any ): super().__init__( batch_size=batch_size, n_frames=n_frames, n_steps=n_steps, gamma=gamma, scaler=scaler, action_scaler=action_scaler, reward_scaler=reward_scaler, kwargs=kwargs, ) self._actor_learning_rate = actor_learning_rate self._critic_learning_rate = critic_learning_rate self._temp_learning_rate = temp_learning_rate self._actor_optim_factory = actor_optim_factory self._critic_optim_factory = critic_optim_factory self._temp_optim_factory = temp_optim_factory self._actor_encoder_factory = check_encoder(actor_encoder_factory) self._critic_encoder_factory = check_encoder(critic_encoder_factory) self._q_func_factory = check_q_func(q_func_factory) self._tau = tau self._n_critics = n_critics self._target_reduction_type = target_reduction_type self._initial_temperature = initial_temperature self._use_gpu = check_use_gpu(use_gpu) self._impl = impl def _create_impl( self, observation_shape: Sequence[int], action_size: int ) -> None: self._impl = SACImpl( observation_shape=observation_shape, action_size=action_size, actor_learning_rate=self._actor_learning_rate, critic_learning_rate=self._critic_learning_rate, temp_learning_rate=self._temp_learning_rate, actor_optim_factory=self._actor_optim_factory, critic_optim_factory=self._critic_optim_factory, temp_optim_factory=self._temp_optim_factory, actor_encoder_factory=self._actor_encoder_factory, critic_encoder_factory=self._critic_encoder_factory, q_func_factory=self._q_func_factory, gamma=self._gamma, tau=self._tau, n_critics=self._n_critics, target_reduction_type=self._target_reduction_type, initial_temperature=self._initial_temperature, use_gpu=self._use_gpu, scaler=self._scaler, action_scaler=self._action_scaler, reward_scaler=self._reward_scaler, ) self._impl.build() def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]: assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR metrics = {} # lagrangian parameter update for SAC temperature if self._temp_learning_rate > 0: temp_loss, temp = self._impl.update_temp(batch) metrics.update({"temp_loss": temp_loss, "temp": temp}) critic_loss = self._impl.update_critic(batch) metrics.update({"critic_loss": critic_loss}) actor_loss = self._impl.update_actor(batch) metrics.update({"actor_loss": actor_loss}) self._impl.update_critic_target() self._impl.update_actor_target() return metrics
[docs] def get_action_type(self) -> ActionSpace: return ActionSpace.CONTINUOUS
[docs]class DiscreteSAC(AlgoBase): r"""Soft Actor-Critic algorithm for discrete action-space. This discrete version of SAC is built based on continuous version of SAC with additional modifications. The target state-value is calculated as expectation of all action-values. .. math:: V(s_t) = \pi_\phi (s_t)^T [Q_\theta(s_t) - \alpha \log (\pi_\phi (s_t))] Similarly, the objective function for the temperature parameter is as follows. .. math:: J(\alpha) = \pi_\phi (s_t)^T [-\alpha (\log(\pi_\phi (s_t)) + H)] Finally, the objective function for the policy function is as follows. .. math:: J(\phi) = \mathbb{E}_{s_t \sim D} [\pi_\phi(s_t)^T [\alpha \log(\pi_\phi(s_t)) - Q_\theta(s_t)]] References: * `Christodoulou, Soft Actor-Critic for Discrete Action Settings. <https://arxiv.org/abs/1910.07207>`_ Args: actor_learning_rate (float): learning rate for policy function. critic_learning_rate (float): learning rate for Q functions. temp_learning_rate (float): learning rate for temperature parameter. actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory): optimizer factory for the actor. critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory): optimizer factory for the critic. temp_optim_factory (d3rlpy.models.optimizers.OptimizerFactory): optimizer factory for the temperature. actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str): encoder factory for the actor. critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str): encoder factory for the critic. q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str): Q function factory. batch_size (int): mini-batch size. n_frames (int): the number of frames to stack for image observation. n_steps (int): N-step TD calculation. gamma (float): discount factor. n_critics (int): the number of Q functions for ensemble. initial_temperature (float): initial temperature value. use_gpu (bool, int or d3rlpy.gpu.Device): flag to use GPU, device ID or device. scaler (d3rlpy.preprocessing.Scaler or str): preprocessor. The available options are `['pixel', 'min_max', 'standard']` reward_scaler (d3rlpy.preprocessing.RewardScaler or str): reward preprocessor. The available options are ``['clip', 'min_max', 'standard']``. impl (d3rlpy.algos.torch.sac_impl.DiscreteSACImpl): algorithm implementation. """ _actor_learning_rate: float _critic_learning_rate: float _temp_learning_rate: float _actor_optim_factory: OptimizerFactory _critic_optim_factory: OptimizerFactory _temp_optim_factory: OptimizerFactory _actor_encoder_factory: EncoderFactory _critic_encoder_factory: EncoderFactory _q_func_factory: QFunctionFactory _n_critics: int _initial_temperature: float _target_update_interval: int _use_gpu: Optional[Device] _impl: Optional[DiscreteSACImpl] def __init__( self, *, actor_learning_rate: float = 3e-4, critic_learning_rate: float = 3e-4, temp_learning_rate: float = 3e-4, actor_optim_factory: OptimizerFactory = AdamFactory(eps=1e-4), critic_optim_factory: OptimizerFactory = AdamFactory(eps=1e-4), temp_optim_factory: OptimizerFactory = AdamFactory(eps=1e-4), actor_encoder_factory: EncoderArg = "default", critic_encoder_factory: EncoderArg = "default", q_func_factory: QFuncArg = "mean", batch_size: int = 64, n_frames: int = 1, n_steps: int = 1, gamma: float = 0.99, n_critics: int = 2, initial_temperature: float = 1.0, target_update_interval: int = 8000, use_gpu: UseGPUArg = False, scaler: ScalerArg = None, reward_scaler: RewardScalerArg = None, impl: Optional[DiscreteSACImpl] = None, **kwargs: Any ): super().__init__( batch_size=batch_size, n_frames=n_frames, n_steps=n_steps, gamma=gamma, scaler=scaler, action_scaler=None, reward_scaler=reward_scaler, kwargs=kwargs, ) self._actor_learning_rate = actor_learning_rate self._critic_learning_rate = critic_learning_rate self._temp_learning_rate = temp_learning_rate self._actor_optim_factory = actor_optim_factory self._critic_optim_factory = critic_optim_factory self._temp_optim_factory = temp_optim_factory self._actor_encoder_factory = check_encoder(actor_encoder_factory) self._critic_encoder_factory = check_encoder(critic_encoder_factory) self._q_func_factory = check_q_func(q_func_factory) self._n_critics = n_critics self._initial_temperature = initial_temperature self._target_update_interval = target_update_interval self._use_gpu = check_use_gpu(use_gpu) self._impl = impl def _create_impl( self, observation_shape: Sequence[int], action_size: int ) -> None: self._impl = DiscreteSACImpl( observation_shape=observation_shape, action_size=action_size, actor_learning_rate=self._actor_learning_rate, critic_learning_rate=self._critic_learning_rate, temp_learning_rate=self._temp_learning_rate, actor_optim_factory=self._actor_optim_factory, critic_optim_factory=self._critic_optim_factory, temp_optim_factory=self._temp_optim_factory, actor_encoder_factory=self._actor_encoder_factory, critic_encoder_factory=self._critic_encoder_factory, q_func_factory=self._q_func_factory, gamma=self._gamma, n_critics=self._n_critics, initial_temperature=self._initial_temperature, use_gpu=self._use_gpu, scaler=self._scaler, reward_scaler=self._reward_scaler, ) self._impl.build() def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]: assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR metrics = {} # lagrangian parameter update for SAC temeprature if self._temp_learning_rate > 0: temp_loss, temp = self._impl.update_temp(batch) metrics.update({"temp_loss": temp_loss, "temp": temp}) critic_loss = self._impl.update_critic(batch) metrics.update({"critic_loss": critic_loss}) actor_loss = self._impl.update_actor(batch) metrics.update({"actor_loss": actor_loss}) if self._grad_step % self._target_update_interval == 0: self._impl.update_target() return metrics
[docs] def get_action_type(self) -> ActionSpace: return ActionSpace.DISCRETE