from typing import Any, Dict, Optional, Sequence
from ..argument_utility import (
ActionScalerArg,
EncoderArg,
QFuncArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_encoder,
check_q_func,
check_use_gpu,
)
from ..constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ..dataset import TransitionMiniBatch
from ..gpu import Device
from ..models.encoders import EncoderFactory
from ..models.optimizers import AdamFactory, OptimizerFactory
from ..models.q_functions import QFunctionFactory
from .base import AlgoBase
from .dqn import DoubleDQN
from .torch.cql_impl import CQLImpl, DiscreteCQLImpl
[docs]class CQL(AlgoBase):
r"""Conservative Q-Learning algorithm.
CQL is a SAC-based data-driven deep reinforcement learning algorithm, which
achieves state-of-the-art performance in offline RL problems.
CQL mitigates overestimation error by minimizing action-values under the
current policy and maximizing values under data distribution for
underestimation issue.
.. math::
L(\theta_i) = \alpha\, \mathbb{E}_{s_t \sim D}
\left[\log{\sum_a \exp{Q_{\theta_i}(s_t, a)}}
- \mathbb{E}_{a \sim D} \big[Q_{\theta_i}(s, a)\big] - \tau\right]
+ L_\mathrm{SAC}(\theta_i)
where :math:`\alpha` is an automatically adjustable value via Lagrangian
dual gradient descent and :math:`\tau` is a threshold value.
If the action-value difference is smaller than :math:`\tau`, the
:math:`\alpha` will become smaller.
Otherwise, the :math:`\alpha` will become larger to aggressively penalize
action-values.
In continuous control, :math:`\log{\sum_a \exp{Q(s, a)}}` is computed as
follows.
.. math::
\log{\sum_a \exp{Q(s, a)}} \approx \log{\left(
\frac{1}{2N} \sum_{a_i \sim \text{Unif}(a)}^N
\left[\frac{\exp{Q(s, a_i)}}{\text{Unif}(a)}\right]
+ \frac{1}{2N} \sum_{a_i \sim \pi_\phi(a|s)}^N
\left[\frac{\exp{Q(s, a_i)}}{\pi_\phi(a_i|s)}\right]\right)}
where :math:`N` is the number of sampled actions.
The rest of optimization is exactly same as :class:`d3rlpy.algos.SAC`.
References:
* `Kumar et al., Conservative Q-Learning for Offline Reinforcement
Learning. <https://arxiv.org/abs/2006.04779>`_
Args:
actor_learning_rate (float): learning rate for policy function.
critic_learning_rate (float): learning rate for Q functions.
temp_learning_rate (float):
learning rate for temperature parameter of SAC.
alpha_learning_rate (float): learning rate for :math:`\alpha`.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the critic.
temp_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for the temperature.
alpha_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory for :math:`\alpha`.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory for the critic.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
tau (float): target network synchronization coefficiency.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
initial_temperature (float): initial temperature value.
initial_alpha (float): initial :math:`\alpha` value.
alpha_threshold (float): threshold value described as :math:`\tau`.
conservative_weight (float): constant weight to scale conservative loss.
n_action_samples (int): the number of sampled actions to compute
:math:`\log{\sum_a \exp{Q(s, a)}}`.
soft_q_backup (bool): flag to use SAC-style backup.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`.
action_scaler (d3rlpy.preprocessing.ActionScaler or str):
action preprocessor. The available options are ``['min_max']``.
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.cql_impl.CQLImpl): algorithm implementation.
"""
_actor_learning_rate: float
_critic_learning_rate: float
_temp_learning_rate: float
_alpha_learning_rate: float
_actor_optim_factory: OptimizerFactory
_critic_optim_factory: OptimizerFactory
_temp_optim_factory: OptimizerFactory
_alpha_optim_factory: OptimizerFactory
_actor_encoder_factory: EncoderFactory
_critic_encoder_factory: EncoderFactory
_q_func_factory: QFunctionFactory
_tau: float
_n_critics: int
_target_reduction_type: str
_initial_temperature: float
_initial_alpha: float
_alpha_threshold: float
_conservative_weight: float
_n_action_samples: int
_soft_q_backup: bool
_use_gpu: Optional[Device]
_impl: Optional[CQLImpl]
def __init__(
self,
*,
actor_learning_rate: float = 1e-4,
critic_learning_rate: float = 3e-4,
temp_learning_rate: float = 1e-4,
alpha_learning_rate: float = 1e-4,
actor_optim_factory: OptimizerFactory = AdamFactory(),
critic_optim_factory: OptimizerFactory = AdamFactory(),
temp_optim_factory: OptimizerFactory = AdamFactory(),
alpha_optim_factory: OptimizerFactory = AdamFactory(),
actor_encoder_factory: EncoderArg = "default",
critic_encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 256,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
tau: float = 0.005,
n_critics: int = 2,
target_reduction_type: str = "min",
initial_temperature: float = 1.0,
initial_alpha: float = 1.0,
alpha_threshold: float = 10.0,
conservative_weight: float = 5.0,
n_action_samples: int = 10,
soft_q_backup: bool = False,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[CQLImpl] = None,
**kwargs: Any,
):
super().__init__(
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
kwargs=kwargs,
)
self._actor_learning_rate = actor_learning_rate
self._critic_learning_rate = critic_learning_rate
self._temp_learning_rate = temp_learning_rate
self._alpha_learning_rate = alpha_learning_rate
self._actor_optim_factory = actor_optim_factory
self._critic_optim_factory = critic_optim_factory
self._temp_optim_factory = temp_optim_factory
self._alpha_optim_factory = alpha_optim_factory
self._actor_encoder_factory = check_encoder(actor_encoder_factory)
self._critic_encoder_factory = check_encoder(critic_encoder_factory)
self._q_func_factory = check_q_func(q_func_factory)
self._tau = tau
self._n_critics = n_critics
self._target_reduction_type = target_reduction_type
self._initial_temperature = initial_temperature
self._initial_alpha = initial_alpha
self._alpha_threshold = alpha_threshold
self._conservative_weight = conservative_weight
self._n_action_samples = n_action_samples
self._soft_q_backup = soft_q_backup
self._use_gpu = check_use_gpu(use_gpu)
self._impl = impl
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = CQLImpl(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=self._actor_learning_rate,
critic_learning_rate=self._critic_learning_rate,
temp_learning_rate=self._temp_learning_rate,
alpha_learning_rate=self._alpha_learning_rate,
actor_optim_factory=self._actor_optim_factory,
critic_optim_factory=self._critic_optim_factory,
temp_optim_factory=self._temp_optim_factory,
alpha_optim_factory=self._alpha_optim_factory,
actor_encoder_factory=self._actor_encoder_factory,
critic_encoder_factory=self._critic_encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
tau=self._tau,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
initial_temperature=self._initial_temperature,
initial_alpha=self._initial_alpha,
alpha_threshold=self._alpha_threshold,
conservative_weight=self._conservative_weight,
n_action_samples=self._n_action_samples,
soft_q_backup=self._soft_q_backup,
use_gpu=self._use_gpu,
scaler=self._scaler,
action_scaler=self._action_scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
# lagrangian parameter update for SAC temperature
if self._temp_learning_rate > 0:
temp_loss, temp = self._impl.update_temp(batch)
metrics.update({"temp_loss": temp_loss, "temp": temp})
# lagrangian parameter update for conservative loss weight
if self._alpha_learning_rate > 0:
alpha_loss, alpha = self._impl.update_alpha(batch)
metrics.update({"alpha_loss": alpha_loss, "alpha": alpha})
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
actor_loss = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss})
self._impl.update_critic_target()
self._impl.update_actor_target()
return metrics
[docs] def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS
[docs]class DiscreteCQL(DoubleDQN):
r"""Discrete version of Conservative Q-Learning algorithm.
Discrete version of CQL is a DoubleDQN-based data-driven deep reinforcement
learning algorithm (the original paper uses DQN), which achieves
state-of-the-art performance in offline RL problems.
CQL mitigates overestimation error by minimizing action-values under the
current policy and maximizing values under data distribution for
underestimation issue.
.. math::
L(\theta) = \alpha \mathbb{E}_{s_t \sim D}
[\log{\sum_a \exp{Q_{\theta}(s_t, a)}}
- \mathbb{E}_{a \sim D} [Q_{\theta}(s, a)]]
+ L_{DoubleDQN}(\theta)
References:
* `Kumar et al., Conservative Q-Learning for Offline Reinforcement
Learning. <https://arxiv.org/abs/2006.04779>`_
Args:
learning_rate (float): learning rate.
optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
optimizer factory.
encoder_factory (d3rlpy.models.encoders.EncoderFactory or str):
encoder factory.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory or str):
Q function factory.
batch_size (int): mini-batch size.
n_frames (int): the number of frames to stack for image observation.
n_steps (int): N-step TD calculation.
gamma (float): discount factor.
n_critics (int): the number of Q functions for ensemble.
target_reduction_type (str): ensemble reduction method at target value
estimation. The available options are
``['min', 'max', 'mean', 'mix', 'none']``.
target_update_interval (int): interval to synchronize the target
network.
alpha (float): the :math:`\alpha` value above.
use_gpu (bool, int or d3rlpy.gpu.Device):
flag to use GPU, device ID or device.
scaler (d3rlpy.preprocessing.Scaler or str): preprocessor.
The available options are `['pixel', 'min_max', 'standard']`
reward_scaler (d3rlpy.preprocessing.RewardScaler or str):
reward preprocessor. The available options are
``['clip', 'min_max', 'standard']``.
impl (d3rlpy.algos.torch.cql_impl.DiscreteCQLImpl):
algorithm implementation.
"""
_alpha: float
_impl: Optional[DiscreteCQLImpl]
def __init__(
self,
*,
learning_rate: float = 6.25e-5,
optim_factory: OptimizerFactory = AdamFactory(),
encoder_factory: EncoderArg = "default",
q_func_factory: QFuncArg = "mean",
batch_size: int = 32,
n_frames: int = 1,
n_steps: int = 1,
gamma: float = 0.99,
n_critics: int = 1,
target_reduction_type: str = "min",
target_update_interval: int = 8000,
alpha: float = 1.0,
use_gpu: UseGPUArg = False,
scaler: ScalerArg = None,
reward_scaler: RewardScalerArg = None,
impl: Optional[DiscreteCQLImpl] = None,
**kwargs: Any,
):
super().__init__(
learning_rate=learning_rate,
optim_factory=optim_factory,
encoder_factory=encoder_factory,
q_func_factory=q_func_factory,
batch_size=batch_size,
n_frames=n_frames,
n_steps=n_steps,
gamma=gamma,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
target_update_interval=target_update_interval,
use_gpu=use_gpu,
scaler=scaler,
reward_scaler=reward_scaler,
impl=impl,
**kwargs,
)
self._alpha = alpha
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
self._impl = DiscreteCQLImpl(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=self._learning_rate,
optim_factory=self._optim_factory,
encoder_factory=self._encoder_factory,
q_func_factory=self._q_func_factory,
gamma=self._gamma,
n_critics=self._n_critics,
target_reduction_type=self._target_reduction_type,
alpha=self._alpha,
use_gpu=self._use_gpu,
scaler=self._scaler,
reward_scaler=self._reward_scaler,
)
self._impl.build()