Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TD3 baseline #499

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions brax/training/agents/td3/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# Copyright 2024 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

71 changes: 71 additions & 0 deletions brax/training/agents/td3/losses.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
"""Twin Delayed Deep Deterministic Policy Gradient (TD3) losses.

See: https://arxiv.org/pdf/1802.09477.pdf
"""
from typing import Any

import jax
import jax.numpy as jnp
from brax.training import types
from brax.training.types import Params
from brax.training.types import PRNGKey
from brax.training.agents.td3 import networks as td3_networks

Transition = types.Transition


def make_losses(
td3_network: td3_networks.TD3Networks,
reward_scaling: float,
discounting: float,
smoothing: float,
noise_clip: float,
max_action: float = 1.0,
bc: bool = False,
alpha: float = 2.5):
"""Creates the TD3 losses."""
policy_network = td3_network.policy_network
q_network = td3_network.q_network

def critic_loss(
q_params: Params,
target_q_params: Params,
target_policy_params: Params,
normalizer_params: Any,
transitions: Transition,
key: PRNGKey) -> jnp.ndarray:
"""Calculates the TD3 critic loss."""

current_q1_q2 = q_network.apply(normalizer_params, q_params, transitions.observation, transitions.action)
next_actions = policy_network.apply(normalizer_params, target_policy_params, transitions.next_observation)
smoothing_noise = (jax.random.normal(key, next_actions.shape) * smoothing).clip(-noise_clip, noise_clip)
next_actions = (next_actions + smoothing_noise).clip(-max_action, max_action)

next_q1_q2 = q_network.apply(normalizer_params, target_q_params, transitions.next_observation, next_actions)
target_q = jnp.min(next_q1_q2, axis=-1)
target_q = jax.lax.stop_gradient(
transitions.reward * reward_scaling + transitions.discount * discounting * target_q)

q_error = current_q1_q2 - jnp.expand_dims(target_q, -1)
q_loss = 0.5 * jnp.mean(jnp.square(q_error))

return q_loss

def actor_loss(
policy_params: Params,
q_params: Params,
normalizer_params: Any,
transitions: Transition) -> jnp.ndarray:
"""Calculates the TD3 actor loss."""

new_actions = policy_network.apply(normalizer_params, policy_params, transitions.observation)
q_new_actions = q_network.apply(normalizer_params, q_params, transitions.observation, new_actions)
q_new_actions, _ = jnp.split(q_new_actions, 2, axis=-1)
lmbda = jax.lax.stop_gradient(bc * alpha / jnp.mean(jnp.abs(q_new_actions)) + (1 - bc))
q_mean = jnp.mean(q_new_actions)
return - lmbda * q_mean + bc * mean_squared_error(new_actions, transitions.action)

def mean_squared_error(predictions, targets):
return jnp.mean(jnp.square(predictions - targets))

return critic_loss, actor_loss
88 changes: 88 additions & 0 deletions brax/training/agents/td3/networks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
"""TD3 networks."""

from typing import Sequence, Tuple

import jax.numpy as jnp
from brax.training import networks
from brax.training import types
from brax.training.networks import ActivationFn, FeedForwardNetwork, Initializer, MLP
from brax.training.types import PRNGKey
from flax import linen, struct
import jax


@struct.dataclass
class TD3Networks:
policy_network: networks.FeedForwardNetwork
q_network: networks.FeedForwardNetwork


def make_inference_fn(td3_networks: TD3Networks):
"""Creates params and inference function for the TD3 agent."""

def make_policy(params: types.PolicyParams, exploration_noise, noise_clip) -> types.Policy:
def policy(observations: types.Observation,
key_noise: PRNGKey) -> Tuple[types.Action, types.Extra]:
actions = td3_networks.policy_network.apply(*params, observations)
noise = (jax.random.normal(key_noise, actions.shape) * exploration_noise).clip(-noise_clip, noise_clip)
return actions + noise, {}

return policy

return make_policy


def make_policy_network(
param_size: int,
obs_size: int,
preprocess_observations_fn: types.PreprocessObservationFn = types
.identity_observation_preprocessor,
hidden_layer_sizes: Sequence[int] = (256, 256),
activation: ActivationFn = linen.relu,
kernel_init: Initializer = jax.nn.initializers.lecun_uniform(),
layer_norm: bool = False) -> FeedForwardNetwork:
"""Creates a policy network."""
policy_module = MLP(
layer_sizes=list(hidden_layer_sizes) + [param_size],
activation=activation,
kernel_init=kernel_init,
layer_norm=layer_norm)

def apply(processor_params, policy_params, obs):
obs = preprocess_observations_fn(obs, processor_params)
raw_actions = policy_module.apply(policy_params, obs)
return linen.tanh(raw_actions)

dummy_obs = jnp.zeros((1, obs_size))
return FeedForwardNetwork(init=lambda key: policy_module.init(key, dummy_obs), apply=apply)


def make_td3_networks(
observation_size: int,
action_size: int,
preprocess_observations_fn: types.PreprocessObservationFn = types
.identity_observation_preprocessor,
hidden_layer_sizes: Sequence[int] = (256, 256),
activation: networks.ActivationFn = linen.relu) -> TD3Networks:
"""Make TD3 networks."""
policy_network = make_policy_network(
action_size,
observation_size,
preprocess_observations_fn=preprocess_observations_fn,
hidden_layer_sizes=hidden_layer_sizes,
activation=activation,
)

q_network = networks.make_q_network(
observation_size,
action_size,
preprocess_observations_fn=preprocess_observations_fn,
hidden_layer_sizes=hidden_layer_sizes,
activation=activation)

return TD3Networks(
policy_network=policy_network,
q_network=q_network)



Loading