-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathASAC.py
More file actions
216 lines (171 loc) · 9.36 KB
/
ASAC.py
File metadata and controls
216 lines (171 loc) · 9.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
from typing import Optional
import numpy as np
import torch
from BaseAgent import BaseAgent, get_new_params
from network_monitor import NetworkMonitorCallback, create_monitor_for_agent
from utils import polyak
from Architectures import DummyActor, make_gaussian_actor, make_mlp, make_sac_critic_mlp
# TODO There is a big question about how to correctly specify different architectures. how do we allow for shared backbones?
class ASAC(BaseAgent):
def __init__(self,
*args,
alpha: float = 1.0,
reward_rate_learning_rate: float = 1e-3,
reset_penalty_learning_rate: float = 1e-3,
use_target_network: bool = False,
target_update_interval: int = 1,
polyak_tau: float = 1.0,
actor_learning_rate: Optional[float] = None,
architecture_kwargs: dict = {},
**kwargs,
):
super().__init__(*args, **kwargs)
self.kwargs = get_new_params(self, locals())
self.algo_name = 'ASAC'
self.alpha = alpha
self.reward_rate_learning_rate = reward_rate_learning_rate
self.reset_penalty_learning_rate = reset_penalty_learning_rate
self.use_target_network = use_target_network
self.target_update_interval = target_update_interval
self.polyak_tau = polyak_tau
self.actor_learning_rate = actor_learning_rate if actor_learning_rate is not None else self.learning_rate
if isinstance(self.architecture, list):
print("A list of architectures is provided. Unpacking as [Actor, Critic].")
self.actor, self.critic = [arch(**kwargs) for arch, kwargs in zip(self.architecture, architecture_kwargs)]
else:
raise NotImplementedError("SAC requires a list of architectures [Actor, Critic].")
# Ensure the actor network forward method has a deterministic kwarg:
# assert isinstance(self.actor, DummyActor), "Actor must inherit from DummyActor class"
# assert 'deterministic' in self.actor.forward.__kwargs__, "Actor forward pass must have deterministic kwarg"
self.nA = self.env.action_space.shape[0]
# TODO: Later use state action spaces to check the architecture
# Ensure algo/env are present in logged hparams for dashboard display
self.kwargs["algo_name"] = self.algo_name
self.kwargs["env_str"] = self.env_str
self.log_hparams(self.kwargs)
if self.use_target_network:
# Use a target critic
# TODO: Toggle use of target actor
self.target_critic = self.architecture[1](**architecture_kwargs[1]) # [Actor, Critic]
self.target_critic.load_state_dict(self.critic.state_dict())
# self.target_actor.load_state_dict(self.actor.state_dict())
self.polyak_tau = polyak_tau
if target_update_interval is None:
print("WARNING: Target network update interval not specified. Using default interval of 1 step.")
self.target_update_interval = 1
# Alias the "target" with online net if target is not used:
else:
self.target_critic = self.critic
# Raise a warning if update interval is specified:
if target_update_interval is not None:
print("WARNING: Target network update interval specified but target network is not used.")
self.rho = torch.tensor(0.0, requires_grad=True)
self.reset_penalty = torch.tensor(0.0, requires_grad=True)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.learning_rate)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.actor_learning_rate)
self.rho_optimizer = torch.optim.Adam([self.rho], lr=self.reward_rate_learning_rate)
self.reset_penalty_optimizer = torch.optim.Adam([self.reset_penalty], lr=self.reset_penalty_learning_rate)
def _on_step(self) -> None:
super()._on_step()
# Periodically update the target network:
if self.use_target_network and self.learn_env_steps % self.target_update_interval == 0:
# Use Polyak averaging as specified:
polyak(self.target_critic, self.critic, self.polyak_tau)
# TODO: implement multiple nets inside the critic which is min'd over. This can be an architecture "MinQNet".
def exploration_policy(self, state: np.ndarray) -> int:
# Return a stochastic sample from the actor network:
with torch.no_grad():
action, log_prob = self.actor(state, deterministic=False)
return action.cpu().numpy()
def evaluation_policy(self, state: np.ndarray) -> int:
with torch.no_grad():
# Get the greedy action from the actor network:
action, log_prob = self.actor(state, deterministic=True)
return action.cpu().numpy()
def calculate_critic_loss(self, batch):
states, actions, rewards, next_states, dones = batch
# Reward centering:
centered_rewards = rewards - self.rho
# Reset penalty adjustment:
centered_rewards = centered_rewards - self.reset_penalty * dones
dones = dones.float()
curr_q = self.critic(states, actions)
with torch.no_grad():
# Use Bellman backup equation for expected q value:
# In SAC, we need V(s') = logsumexp(Q(s',a))
# The integral is intractable so it is replaced with a single action
next_actions, next_log_prob = self.actor(next_states, deterministic=False)
next_q = self.target_critic(next_states, next_actions)
# add soft policy contrib:
next_v = next_q - self.alpha * next_log_prob
expected_curr_q = centered_rewards + next_v * (1 - dones)
# Calculate the q ("critic") loss:
critic_loss = 0.5*torch.nn.functional.mse_loss(curr_q, expected_curr_q)
self.log_history("train/online_q_mean", curr_q.mean().item(), self.learn_env_steps)
self.log_history("train/critic_loss", critic_loss.item(), self.learn_env_steps)
# Mean of reward and entropy:
reward_rate_target = torch.mean(rewards - self.alpha * next_log_prob)
reward_rate_loss = 0.5*(self.rho - reward_rate_target).pow(2)
return critic_loss, reward_rate_loss
def calculate_actor_loss(self, batch):
states, _, _, _, _ = batch
actions, log_prob = self.actor(states, deterministic=False)
q_values = self.critic(states, actions)
# Actor loss is based on minimizing KL for pi \propto exp(Q(s,a)/alpha)
actor_loss = (self.alpha * log_prob - q_values).mean()
self.log_history("train/actor_loss", actor_loss.item(), self.learn_env_steps)
return actor_loss
def gradient_step(self, grad_step):
batch = self.buffer.sample(self.batch_size)
critic_loss, reward_rate_loss = self.calculate_critic_loss(batch)
# Update reward rate rho
self.rho_optimizer.zero_grad()
reward_rate_loss.backward(retain_graph=True) # shouldnt be needed if we just detach on entropy
self.rho_optimizer.step()
# Update critic network
self.critic_optimizer.zero_grad()
critic_loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic.parameters(), self.max_grad_norm)
self.critic_optimizer.step()
actor_loss = self.calculate_actor_loss(batch)
# Update actor network, using the new critic
self.actor_optimizer.zero_grad()
actor_loss.backward()
# torch.nn.utils.clip_grad_norm_(self.actor.parameters(), self.max_grad_norm)
self.actor_optimizer.step()
if __name__ == "__main__":
from Logger import WandBLogger, TensorboardLogger
logger = TensorboardLogger('logs/baseline')
import gymnasium as gym
monitor, networks = create_monitor_for_agent(
named_networks=["critic", "actor"],
log_frequency=100, # Log every 100 gradient steps
track_eigenvalues=True, # Expensive, keep disabled
)
callback = NetworkMonitorCallback(monitor, networks)
env = "Pendulum-v1"
agent = ASAC(env,
architecture=[make_gaussian_actor, make_sac_critic_mlp],
architecture_kwargs=[{'obs_dim': gym.make(env).observation_space.shape[0], # actor takes a state and outputs a mean action
'action_dim': gym.make(env).action_space.shape[0],
'hidden_dims': [256, 256]},
{'obs_dim': gym.make(env).observation_space.shape[0],
'action_dim': gym.make(env).action_space.shape[0],
'hidden_dims': [256, 256]},
],
loggers=(logger,),
learning_rate=0.003,
alpha=0.2,
train_interval=1,
gradient_steps=1,
batch_size=256,
use_target_network=True,
target_update_interval=1,
polyak_tau=0.995,
learning_starts=500,
log_interval=500,
record_eval_video=True,
eval_video_every=5,
network_monitor=callback, # <-- Add monitoring
)
agent.learn(total_timesteps=160_000)