-
Notifications
You must be signed in to change notification settings - Fork 0
/
returns.py
68 lines (53 loc) · 2.11 KB
/
returns.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
"""
This file implements various return methods such as
- (n-step) Monte Carlo return
- Generalized Advantage Estimation (GAE)
High-Dimensional Continuous Control Using Generalized Advantage Estimation, Schulman et al. 2016
https://arxiv.org/abs/1506.02438
"""
import torch
def returns():
raise NotImplementedError
def A(Rt, rewards, masks, values, args):
values = torch.stack(values)
returns = []
for step in reversed(range(len(rewards))):
Rt = rewards[step] + args.gamma * Rt * masks[step]
returns.insert(0, Rt)
# Still normalize the returns
returns = torch.stack(returns).detach()
returns = (returns - returns.mean()) / (returns.std() + 1e-10)
return returns - values
def Q(next_value, rewards, masks, values, args):
values = values + [next_value]
returns = []
for step in reversed(range(len(rewards))):
Qsa = rewards[step] + args.gamma * values[step + 1] * masks[step]
returns.insert(0, Qsa)
# Still normalize the returns
returns = torch.stack(returns).detach()
returns = (returns - returns.mean()) / (returns.std() + 1e-10)
return returns
def GAE(next_value, rewards, masks, values, args):
"""
Calculate the Generalized Advantage Estimation return as proposed in Schulman et al. 2015
:param next_value: value estimation at timestep t+1 (used to estimate Qsa)
:param rewards: rewards for each timestep
:param masks: masks for env multiprocessing
:param values: state value estimates for each timestep
:param args: argument list
:return: Return the 'returns', the discounted sum of rewards at each timestep
"""
values = values + [next_value]
gae = 0
returns = []
for step in reversed(range(len(rewards))):
Qsa = rewards[step] + args.gamma * values[step + 1] * masks[step]
Vs = values[step]
delta = Qsa - Vs
gae = delta + args.gamma * args.gae_lambda * masks[step] * gae
returns.insert(0, gae)
# Still normalize the returns
returns = torch.stack(returns).detach()
returns = (returns - returns.mean()) / (returns.std() + 1e-10)
return returns