-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_ddpg_naive.py
55 lines (49 loc) · 1.55 KB
/
test_ddpg_naive.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import gymnasium as gym
from rlearn_dev.methods.ddpg.naive.agent import DDPGAgent
def make_env(env_id, seed, idx, capture_video, run_name):
def thunk():
if capture_video and idx == 0:
env = gym.make(env_id, render_mode="rgb_array")
env = gym.wrappers.RecordVideo(
env,
f"videos/{run_name}",
name_prefix=f"ddpg_{env_id}",
episode_trigger=lambda episode_idx: episode_idx % 100 == 0,
video_length=0, # 0 means infinite
)
else:
env = gym.make(env_id)
env = gym.wrappers.RecordEpisodeStatistics(env)
env.action_space.seed(seed)
return env
return thunk
def test_ddpg_naive():
capture_video = True
# capture_video = False
run_name = 'test'
num_envs = 5
env_id = 'Hopper-v4'
env = gym.vector.SyncVectorEnv([make_env(env_id, 36, i, capture_video, run_name)
for i in range(num_envs)])
g_seed = 36
config = {
'gamma': 0.99,
'batch_size': 64,
'tau': 0.005,
'actor_lr': 0.0003,
'critic_lr': 0.0003,
'buffer_size': 100_000,
'exploration_noise': 0.1,
}
agent = DDPGAgent(env, config=config, seed=g_seed)
learn_config = {
'max_episodes': 30_000//256,
'max_episode_steps': 256,
'max_total_steps': 1000_000,
'verbose_freq': 1,
}
agent.learn(**learn_config)
env.close()
if __name__ == '__main__':
if 1:
test_ddpg_naive()