import dataclasses
from ...base import DeviceArg, LearnableConfig, register_learnable
from ...constants import ActionSpace
from ...models.builders import (
create_continuous_q_function,
create_deterministic_policy,
)
from ...models.encoders import EncoderFactory, make_encoder_field
from ...models.optimizers import OptimizerFactory, make_optimizer_field
from ...models.q_functions import QFunctionFactory, make_q_func_field
from ...types import Shape
from .base import QLearningAlgoBase
from .torch.ddpg_impl import DDPGModules
from .torch.td3_impl import TD3Impl
__all__ = ["TD3Config", "TD3"]
[docs]@dataclasses.dataclass()
class TD3Config(LearnableConfig):
r"""Config of Twin Delayed Deep Deterministic Policy Gradients algorithm.
TD3 is an improved DDPG-based algorithm.
Major differences from DDPG are as follows.
* TD3 has twin Q functions to reduce overestimation bias at TD learning.
The number of Q functions can be designated by `n_critics`.
* TD3 adds noise to target value estimation to avoid overfitting with the
deterministic policy.
* TD3 updates the policy function after several Q function updates in order
to reduce variance of action-value estimation. The interval of the policy
function update can be designated by `update_actor_interval`.
.. math::
L(\theta_i) = \mathbb{E}_{s_t, a_t, r_{t+1}, s_{t+1} \sim D} [(r_{t+1}
+ \gamma \min_j Q_{\theta_j'}(s_{t+1}, \pi_{\phi'}(s_{t+1}) +
\epsilon) - Q_{\theta_i}(s_t, a_t))^2]
.. math::
J(\phi) = \mathbb{E}_{s_t \sim D}
[\min_i Q_{\theta_i}(s_t, \pi_\phi(s_t))]
where :math:`\epsilon \sim clip (N(0, \sigma), -c, c)`
References:
* `Fujimoto et al., Addressing Function Approximation Error in
Actor-Critic Methods. <https://arxiv.org/abs/1802.09477>`_
Args:
observation_scaler (d3rlpy.preprocessing.ObservationScaler):
Observation preprocessor.
action_scaler (d3rlpy.preprocessing.ActionScaler): Action preprocessor.
reward_scaler (d3rlpy.preprocessing.RewardScaler): Reward preprocessor.
actor_learning_rate (float): Learning rate for a policy function.
critic_learning_rate (float): Learning rate for Q functions.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
Optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
Optimizer factory for the critic.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory):
Encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory):
Encoder factory for the critic.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory):
Q function factory.
batch_size (int): Mini-batch size.
gamma (float): Discount factor.
tau (float): Target network synchronization coefficiency.
n_critics (int): Number of Q functions for ensemble.
target_smoothing_sigma (float): Standard deviation for target noise.
target_smoothing_clip (float): Clipping range for target noise.
update_actor_interval (int): Interval to update policy function
described as `delayed policy update` in the paper.
"""
actor_learning_rate: float = 3e-4
critic_learning_rate: float = 3e-4
actor_optim_factory: OptimizerFactory = make_optimizer_field()
critic_optim_factory: OptimizerFactory = make_optimizer_field()
actor_encoder_factory: EncoderFactory = make_encoder_field()
critic_encoder_factory: EncoderFactory = make_encoder_field()
q_func_factory: QFunctionFactory = make_q_func_field()
batch_size: int = 256
gamma: float = 0.99
tau: float = 0.005
n_critics: int = 2
target_smoothing_sigma: float = 0.2
target_smoothing_clip: float = 0.5
update_actor_interval: int = 2
[docs] def create(self, device: DeviceArg = False) -> "TD3":
return TD3(self, device)
@staticmethod
def get_type() -> str:
return "td3"
[docs]class TD3(QLearningAlgoBase[TD3Impl, TD3Config]):
def inner_create_impl(
self, observation_shape: Shape, action_size: int
) -> None:
policy = create_deterministic_policy(
observation_shape,
action_size,
self._config.actor_encoder_factory,
device=self._device,
)
targ_policy = create_deterministic_policy(
observation_shape,
action_size,
self._config.actor_encoder_factory,
device=self._device,
)
q_funcs, q_func_forwarder = create_continuous_q_function(
observation_shape,
action_size,
self._config.critic_encoder_factory,
self._config.q_func_factory,
n_ensembles=self._config.n_critics,
device=self._device,
)
targ_q_funcs, targ_q_func_forwarder = create_continuous_q_function(
observation_shape,
action_size,
self._config.critic_encoder_factory,
self._config.q_func_factory,
n_ensembles=self._config.n_critics,
device=self._device,
)
actor_optim = self._config.actor_optim_factory.create(
policy.named_modules(), lr=self._config.actor_learning_rate
)
critic_optim = self._config.critic_optim_factory.create(
q_funcs.named_modules(), lr=self._config.critic_learning_rate
)
modules = DDPGModules(
policy=policy,
targ_policy=targ_policy,
q_funcs=q_funcs,
targ_q_funcs=targ_q_funcs,
actor_optim=actor_optim,
critic_optim=critic_optim,
)
self._impl = TD3Impl(
observation_shape=observation_shape,
action_size=action_size,
modules=modules,
q_func_forwarder=q_func_forwarder,
targ_q_func_forwarder=targ_q_func_forwarder,
gamma=self._config.gamma,
tau=self._config.tau,
target_smoothing_sigma=self._config.target_smoothing_sigma,
target_smoothing_clip=self._config.target_smoothing_clip,
update_actor_interval=self._config.update_actor_interval,
device=self._device,
)
[docs] def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS
register_learnable(TD3Config)