-
Notifications
You must be signed in to change notification settings - Fork 328
/
Copy pathsac.py
244 lines (200 loc) · 7.89 KB
/
sac.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""SAC Example.
This is a simple self-contained example of a SAC training script.
It supports state environments like MuJoCo.
The helper functions are coded in the utils.py associated with this script.
"""
from __future__ import annotations
import warnings
import hydra
import numpy as np
import torch
import torch.cuda
import tqdm
from tensordict import TensorDict
from tensordict.nn import CudaGraphModule
from torchrl._utils import compile_with_warmup, timeit
from torchrl.envs.utils import ExplorationType, set_exploration_type
from torchrl.objectives import group_optimizers
from torchrl.record.loggers import generate_exp_name, get_logger
from utils import (
dump_video,
log_metrics,
make_collector,
make_environment,
make_loss_module,
make_replay_buffer,
make_sac_agent,
make_sac_optimizer,
)
torch.set_float32_matmul_precision("high")
@hydra.main(version_base="1.1", config_path="", config_name="config")
def main(cfg: "DictConfig"): # noqa: F821
device = cfg.network.device
if device in ("", None):
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
device = torch.device(device)
# Create logger
exp_name = generate_exp_name("SAC", cfg.logger.exp_name)
logger = None
if cfg.logger.backend:
logger = get_logger(
logger_type=cfg.logger.backend,
logger_name="sac_logging",
experiment_name=exp_name,
wandb_kwargs={
"mode": cfg.logger.mode,
"config": dict(cfg),
"project": cfg.logger.project_name,
"group": cfg.logger.group_name,
},
)
torch.manual_seed(cfg.env.seed)
np.random.seed(cfg.env.seed)
# Create environments
train_env, eval_env = make_environment(cfg, logger=logger)
# Create agent
model, exploration_policy = make_sac_agent(cfg, train_env, eval_env, device)
# Create SAC loss
loss_module, target_net_updater = make_loss_module(cfg, model)
compile_mode = None
if cfg.compile.compile:
compile_mode = cfg.compile.compile_mode
if compile_mode in ("", None):
if cfg.compile.cudagraphs:
compile_mode = "default"
else:
compile_mode = "reduce-overhead"
# Create off-policy collector
collector = make_collector(
cfg, train_env, exploration_policy, compile_mode=compile_mode
)
# Create replay buffer
replay_buffer = make_replay_buffer(
batch_size=cfg.optim.batch_size,
prb=cfg.replay_buffer.prb,
buffer_size=cfg.replay_buffer.size,
scratch_dir=cfg.replay_buffer.scratch_dir,
device=device,
)
# Create optimizers
(
optimizer_actor,
optimizer_critic,
optimizer_alpha,
) = make_sac_optimizer(cfg, loss_module)
optimizer = group_optimizers(optimizer_actor, optimizer_critic, optimizer_alpha)
del optimizer_actor, optimizer_critic, optimizer_alpha
def update(sampled_tensordict):
# Compute loss
loss_td = loss_module(sampled_tensordict)
actor_loss = loss_td["loss_actor"]
q_loss = loss_td["loss_qvalue"]
alpha_loss = loss_td["loss_alpha"]
(actor_loss + q_loss + alpha_loss).sum().backward()
optimizer.step()
optimizer.zero_grad(set_to_none=True)
# Update qnet_target params
target_net_updater.step()
return loss_td.detach()
if cfg.compile.compile:
update = compile_with_warmup(update, mode=compile_mode, warmup=1)
if cfg.compile.cudagraphs:
warnings.warn(
"CudaGraphModule is experimental and may lead to silently wrong results. Use with caution.",
category=UserWarning,
)
update = CudaGraphModule(update, in_keys=[], out_keys=[], warmup=5)
# Main loop
collected_frames = 0
pbar = tqdm.tqdm(total=cfg.collector.total_frames)
init_random_frames = cfg.collector.init_random_frames
num_updates = int(cfg.collector.frames_per_batch * cfg.optim.utd_ratio)
prb = cfg.replay_buffer.prb
eval_iter = cfg.logger.eval_iter
frames_per_batch = cfg.collector.frames_per_batch
eval_rollout_steps = cfg.env.max_episode_steps
collector_iter = iter(collector)
total_iter = len(collector)
for i in range(total_iter):
timeit.printevery(num_prints=1000, total_count=total_iter, erase=True)
with timeit("collect"):
tensordict = next(collector_iter)
# Update weights of the inference policy
collector.update_policy_weights_()
current_frames = tensordict.numel()
pbar.update(current_frames)
with timeit("rb - extend"):
# Add to replay buffer
tensordict = tensordict.reshape(-1)
replay_buffer.extend(tensordict)
collected_frames += current_frames
# Optimization steps
with timeit("train"):
if collected_frames >= init_random_frames:
losses = TensorDict(batch_size=[num_updates])
for i in range(num_updates):
with timeit("rb - sample"):
# Sample from replay buffer
sampled_tensordict = replay_buffer.sample()
with timeit("update"):
torch.compiler.cudagraph_mark_step_begin()
loss_td = update(sampled_tensordict).clone()
losses[i] = loss_td.select(
"loss_actor", "loss_qvalue", "loss_alpha"
)
# Update priority
if prb:
replay_buffer.update_priority(sampled_tensordict)
episode_end = (
tensordict["next", "done"]
if tensordict["next", "done"].any()
else tensordict["next", "truncated"]
)
episode_rewards = tensordict["next", "episode_reward"][episode_end]
# Logging
metrics_to_log = {}
if len(episode_rewards) > 0:
episode_length = tensordict["next", "step_count"][episode_end]
metrics_to_log["train/reward"] = episode_rewards
metrics_to_log["train/episode_length"] = episode_length.sum() / len(
episode_length
)
if collected_frames >= init_random_frames:
losses = losses.mean()
metrics_to_log["train/q_loss"] = losses.get("loss_qvalue")
metrics_to_log["train/actor_loss"] = losses.get("loss_actor")
metrics_to_log["train/alpha_loss"] = losses.get("loss_alpha")
metrics_to_log["train/alpha"] = loss_td["alpha"]
metrics_to_log["train/entropy"] = loss_td["entropy"]
# Evaluation
if abs(collected_frames % eval_iter) < frames_per_batch:
with set_exploration_type(
ExplorationType.DETERMINISTIC
), torch.no_grad(), timeit("eval"):
eval_rollout = eval_env.rollout(
eval_rollout_steps,
model[0],
auto_cast_to_device=True,
break_when_any_done=True,
)
eval_env.apply(dump_video)
eval_reward = eval_rollout["next", "reward"].sum(-2).mean().item()
metrics_to_log["eval/reward"] = eval_reward
if logger is not None:
metrics_to_log.update(timeit.todict(prefix="time"))
metrics_to_log["time/speed"] = pbar.format_dict["rate"]
log_metrics(logger, metrics_to_log, collected_frames)
collector.shutdown()
if not eval_env.is_closed:
eval_env.close()
if not train_env.is_closed:
train_env.close()
if __name__ == "__main__":
main()