-
Notifications
You must be signed in to change notification settings - Fork 0
/
ppo.py
73 lines (49 loc) · 2.9 KB
/
ppo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
"""
The script to run PPO on classic control environments (with discrete action space).
"""
import argparse
from RLAlgos.PPO import PPO
from Networks.CombinedActorCriticNetworks import PPOClassicControlAgent
from utils.env_makers import sync_vector_classic_envs_maker
def parse_args():
parser = argparse.ArgumentParser(description="Run PPO on classic control environments.")
parser.add_argument("--exp-name", type=str, default="ppo")
parser.add_argument("--env-id", type=str, default="CartPole-v1")
parser.add_argument("--num-envs", type=int, default=4)
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--cuda", type=int, default=0)
parser.add_argument("--gamma", type=float, default=0.99)
parser.add_argument("--gae-lambda", type=float, default=0.95)
parser.add_argument("--rollout-length", type=int, default=128)
parser.add_argument("--num-mini-batches", type=int, default=4)
parser.add_argument("--update-epochs", type=int, default=4)
parser.add_argument("--lr", type=float, default=2.5e-4)
parser.add_argument("--eps", type=float, default=1e-5)
parser.add_argument("--anneal-lr", type=bool, default=True)
parser.add_argument("--norm-adv", type=bool, default=True)
parser.add_argument("--clip-value-loss", type=bool, default=True)
parser.add_argument("--clip-coef", type=float, default=0.2)
parser.add_argument("--entropy-coef", type=float, default=0.01)
parser.add_argument("--value-coef", type=float, default=0.5)
parser.add_argument("--max-grad-norm", type=float, default=0.5)
parser.add_argument("--target-kl", type=float, default=None)
parser.add_argument("--write-frequency", type=int, default=100)
parser.add_argument("--save-folder", type=str, default="./ppo/")
parser.add_argument("--total-timesteps", type=int, default=500000)
args = parser.parse_args()
return args
def run():
args = parse_args()
# ! note the env maker needs the additional argument gamma
envs = sync_vector_classic_envs_maker(env_id=args.env_id, num_envs=args.num_envs, seed=args.seed)
agent = PPO(envs=envs, agent_class=PPOClassicControlAgent, exp_name=args.exp_name, seed=args.seed, cuda=args.cuda,
gamma=args.gamma, gae_lambda=args.gae_lambda, rollout_length=args.rollout_length, lr=args.lr,
eps=args.eps, anneal_lr=args.anneal_lr, num_mini_batches=args.num_mini_batches,
update_epochs=args.update_epochs, norm_adv=args.norm_adv, clip_value_loss=args.clip_value_loss,
clip_coef=args.clip_coef, entropy_coef=args.entropy_coef, value_coef=args.value_coef,
max_grad_norm=args.max_grad_norm, target_kl=args.target_kl, rpo_alpha=None,
write_frequency=args.write_frequency, save_folder=args.save_folder)
agent.learn(total_timesteps=args.total_timesteps)
agent.save(indicator="final")
if __name__ == "__main__":
run()