import dataclasses
from typing import Dict
from ...base import DeviceArg, LearnableConfig, register_learnable
from ...constants import IMPL_NOT_INITIALIZED_ERROR, ActionSpace
from ...dataset import Shape
from ...models.builders import (
create_continuous_q_function,
create_non_squashed_normal_policy,
)
from ...models.encoders import EncoderFactory, make_encoder_field
from ...models.optimizers import OptimizerFactory, make_optimizer_field
from ...models.q_functions import QFunctionFactory, make_q_func_field
from ...torch_utility import TorchMiniBatch
from .base import QLearningAlgoBase
from .torch.awac_impl import AWACImpl
__all__ = ["AWACConfig", "AWAC"]
[docs]@dataclasses.dataclass()
class AWACConfig(LearnableConfig):
r"""Config of Advantage Weighted Actor-Critic algorithm.
AWAC is a TD3-based actor-critic algorithm that enables efficient
fine-tuning where the policy is trained with offline datasets and is
deployed to online training.
The policy is trained as a supervised regression.
.. math::
J(\phi) = \mathbb{E}_{s_t, a_t \sim D}
[\log \pi_\phi(a_t|s_t)
\exp(\frac{1}{\lambda} A^\pi (s_t, a_t))]
where :math:`A^\pi (s_t, a_t) = Q_\theta(s_t, a_t) -
Q_\theta(s_t, a'_t)` and :math:`a'_t \sim \pi_\phi(\cdot|s_t)`
The key difference from AWR is that AWAC uses Q-function trained via TD
learning for the better sample-efficiency.
References:
* `Nair et al., Accelerating Online Reinforcement Learning with Offline
Datasets. <https://arxiv.org/abs/2006.09359>`_
Args:
observation_scaler (d3rlpy.preprocessing.ObservationScaler):
Observation preprocessor.
action_scaler (d3rlpy.preprocessing.ActionScaler): Action preprocessor.
reward_scaler (d3rlpy.preprocessing.RewardScaler): Reward preprocessor.
actor_learning_rate (float): Learning rate for policy function.
critic_learning_rate (float): Learning rate for Q functions.
actor_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
Optimizer factory for the actor.
critic_optim_factory (d3rlpy.models.optimizers.OptimizerFactory):
Optimizer factory for the critic.
actor_encoder_factory (d3rlpy.models.encoders.EncoderFactory):
Encoder factory for the actor.
critic_encoder_factory (d3rlpy.models.encoders.EncoderFactory):
Encoder factory for the critic.
q_func_factory (d3rlpy.models.q_functions.QFunctionFactory):
Q function factory.
batch_size (int): Mini-batch size.
gamma (float): Discount factor.
tau (float): Target network synchronization coefficiency.
lam (float): :math:`\lambda` for weight calculation.
n_action_samples (int): Number of sampled actions to calculate
:math:`A^\pi(s_t, a_t)`.
n_critics (int): Number of Q functions for ensemble.
update_actor_interval (int): Interval to update policy function.
"""
actor_learning_rate: float = 3e-4
critic_learning_rate: float = 3e-4
actor_optim_factory: OptimizerFactory = make_optimizer_field()
critic_optim_factory: OptimizerFactory = make_optimizer_field()
actor_encoder_factory: EncoderFactory = make_encoder_field()
critic_encoder_factory: EncoderFactory = make_encoder_field()
q_func_factory: QFunctionFactory = make_q_func_field()
batch_size: int = 1024
gamma: float = 0.99
tau: float = 0.005
lam: float = 1.0
n_action_samples: int = 1
n_critics: int = 2
update_actor_interval: int = 1
[docs] def create(self, device: DeviceArg = False) -> "AWAC":
return AWAC(self, device)
@staticmethod
def get_type() -> str:
return "awac"
[docs]class AWAC(QLearningAlgoBase[AWACImpl, AWACConfig]):
def inner_create_impl(
self, observation_shape: Shape, action_size: int
) -> None:
policy = create_non_squashed_normal_policy(
observation_shape,
action_size,
self._config.actor_encoder_factory,
min_logstd=-6.0,
max_logstd=0.0,
use_std_parameter=True,
device=self._device,
)
q_func = create_continuous_q_function(
observation_shape,
action_size,
self._config.critic_encoder_factory,
self._config.q_func_factory,
n_ensembles=self._config.n_critics,
device=self._device,
)
actor_optim = self._config.actor_optim_factory.create(
policy.parameters(), lr=self._config.actor_learning_rate
)
critic_optim = self._config.critic_optim_factory.create(
q_func.parameters(), lr=self._config.critic_learning_rate
)
self._impl = AWACImpl(
observation_shape=observation_shape,
action_size=action_size,
q_func=q_func,
policy=policy,
actor_optim=actor_optim,
critic_optim=critic_optim,
gamma=self._config.gamma,
tau=self._config.tau,
lam=self._config.lam,
n_action_samples=self._config.n_action_samples,
device=self._device,
)
[docs] def inner_update(self, batch: TorchMiniBatch) -> Dict[str, float]:
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
metrics = {}
critic_loss = self._impl.update_critic(batch)
metrics.update({"critic_loss": critic_loss})
# delayed policy update
if self._grad_step % self._config.update_actor_interval == 0:
actor_loss = self._impl.update_actor(batch)
metrics.update({"actor_loss": actor_loss})
self._impl.update_critic_target()
self._impl.update_actor_target()
return metrics
[docs] def get_action_type(self) -> ActionSpace:
return ActionSpace.CONTINUOUS
register_learnable(AWACConfig)