-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
283 lines (255 loc) · 9.6 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
from imitation.algorithms import preference_comparisons
from test_net.non_recurrent_net import CustomBasicRewardNet
from imitation.util.networks import RunningNorm
from imitation.util.util import make_vec_env
from imitation.policies.base import NormalizeFeaturesExtractor
from imitation.rewards.reward_nets import NormalizedRewardNet
from common.reward_nets.recurrent_reward_nets import RecurrentNormalizedRewardNet
from custom_preference import CustomPreferenceComparisons, CustomPreferenceModel, CustomRandomFragmenter, CustomSyntheticGatherer
from stable_baselines3 import PPO
import numpy as np
import gymnasium as gym
import recurrent_preference
from test_net import recurrent_net
import custom_envs
import torch as th
import torch.nn as nn
import seals , os
from tensorboardX import SummaryWriter
from gymnasium.wrappers.record_video import RecordVideo
import random
random.seed(0)
np.random.seed(0)
th.manual_seed(0)
if th.cuda.is_available():
th.cuda.manual_seed_all(0)
th.cuda.manual_seed(0)
"""
This is for variable horizon environment(episode)
"""
VARIABLE_HORIZON= False
LOG_DIR = '/home/cai/Desktop/GRU_reward/results'
N_ENVS = 8
N_STEPS = int(2048 / N_ENVS)
BATCH_SIZE = 64
TEST_EPI = 10
RENDER = True
RECODE_TEST_EP = True
RENDER_MODE = 'rgb_array'
CLIP_SIZE = 50
MAX_EPI_STEP = 500
ENV_NAME = 'FHContinuous_MountainCarEnv-v0'
HARDCORE = False # it only works BipedalWalker family
INTERACTION_NUM = 60
TOTAL_TIME_STEP = 5_000
TOTAL_COMPARISION = 600
QUEUE_SIZE = 100
OVERSAMPLEING_FACTOR = 5
"""
ENV_NAME:
BipedalWalker family:
FHBipedalWalker-v1: absorb observation and reward
FHBipedalWalker-v2: only has a truncated (i.e. hasn't a termination) and hasn't absorb series
BipedalWalker-v3: original
VHBipedalWalker-v1: add step penalty
CartPole
seals/CartPole-v0: reference https://github.com/HumanCompatibleAI/seals/blob/master/src/seals/classic_control.py
Pendulum
Pendulum-v1: reference https://imitation.readthedocs.io/en/latest/tutorials/5_train_preference_comparisons.html
Continuous_MountainCarEnv
FHContinuous_MountainCarEnv-v0: reference https://github.com/HumanCompatibleAI/seals/blob/master/src/seals/classic_control.py
MountainCarContinuous-v0: original
"""
if RECODE_TEST_EP and RENDER_MODE != 'rgb_array':
raise ValueError('If you want to record test_epi, then must set render mode to "rgb_array".')
rng = np.random.default_rng(0)
policy_kwargs = dict(
net_arch = dict(pi = [32,32], vf = [32,32]),
activation_fn = nn.ReLU,
features_extractor_class=NormalizeFeaturesExtractor,
features_extractor_kwargs=dict(normalize_class=RunningNorm),
)
venv = make_vec_env(ENV_NAME, rng=rng,
n_envs= N_ENVS,
max_episode_steps = MAX_EPI_STEP,
env_make_kwargs = {'hardcore':HARDCORE} if 'BipedalWalker' in ENV_NAME else None
)
reward_net = recurrent_net.CustomRewardNet(
venv.observation_space, venv.action_space, normalize_input_layer=RunningNorm
)
reward_net = RecurrentNormalizedRewardNet(reward_net, normalize_output_layer= RunningNorm)
fragmenter = recurrent_preference.RecurrentRandomFragmenter(
warning_threshold=0,
rng=rng,
allow_variable_horizon = VARIABLE_HORIZON
)
gatherer = recurrent_preference.RecurrentSyntheticGatherer(rng=rng, allow_variable_horizon = VARIABLE_HORIZON)
preference_model = recurrent_preference.RecurrentPreferenceModel(reward_net, allow_variable_horizon = VARIABLE_HORIZON)
reward_trainer = recurrent_preference.RecurrentBasicRewardTrainer(
preference_model=preference_model,
loss=recurrent_preference.RecurrentCrossEntropyRewardLoss(),
epochs=3,
rng=rng,
)
agent = PPO(
policy="MlpPolicy",
env=venv,
seed=0,
policy_kwargs = policy_kwargs,
learning_rate= 0.0003,
use_sde = False,
n_steps=N_STEPS,
batch_size=BATCH_SIZE,
ent_coef=0.01,
clip_range=0.1,
gae_lambda=0.95,
gamma=0.97,
n_epochs=10,
)
trajectory_generator = recurrent_preference.RecurrentAgentTrainer(
algorithm=agent,
reward_fn=reward_net,
venv=venv,
rng=rng,
)
writer_1 = SummaryWriter(os.path.join(LOG_DIR,ENV_NAME,'GRU'))
pref_comparisons = recurrent_preference.RecurrentPreferenceComparisons(
trajectory_generator,
reward_net,
num_iterations=INTERACTION_NUM,
fragmenter=fragmenter,
preference_gatherer=gatherer,
reward_trainer=reward_trainer,
fragment_length=CLIP_SIZE,
transition_oversampling=OVERSAMPLEING_FACTOR,
initial_comparison_frac=0.1,
comparison_queue_size= QUEUE_SIZE,
allow_variable_horizon=VARIABLE_HORIZON,
initial_epoch_multiplier=4,
query_schedule="hyperbolic",
tensorboard= writer_1
)
pref_comparisons.train(
total_timesteps=TOTAL_TIME_STEP,
total_comparisons=TOTAL_COMPARISION,
)
del pref_comparisons, trajectory_generator, rng, reward_trainer , preference_model, gatherer, fragmenter, venv, reward_net
rng = np.random.default_rng(0)
writer_2 = SummaryWriter(os.path.join(LOG_DIR,ENV_NAME,'Non_GRU'))
venv = make_vec_env(ENV_NAME, rng=rng,
n_envs= N_ENVS,
max_episode_steps = MAX_EPI_STEP,
env_make_kwargs = {'hardcore':HARDCORE} if 'BipedalWalker' in ENV_NAME else None
)
reward_net = CustomBasicRewardNet(
venv.observation_space, venv.action_space, normalize_input_layer=RunningNorm
)
reward_net = NormalizedRewardNet(reward_net, normalize_output_layer= RunningNorm)
fragmenter = CustomRandomFragmenter(
warning_threshold=0,
rng=rng,
allow_variable_horizon = VARIABLE_HORIZON
)
gatherer = CustomSyntheticGatherer(rng=rng, allow_variable_horizon = VARIABLE_HORIZON)
preference_model = CustomPreferenceModel(reward_net, allow_variable_horizon = VARIABLE_HORIZON)
reward_trainer = preference_comparisons.BasicRewardTrainer(
preference_model=preference_model,
loss=preference_comparisons.CrossEntropyRewardLoss(),
epochs=3,
rng=rng,
)
agent2 = PPO(
policy="MlpPolicy",
env=venv,
seed=0,
policy_kwargs = policy_kwargs,
learning_rate= 0.0003,
use_sde = False,
n_steps=N_STEPS,
batch_size=BATCH_SIZE,
ent_coef=0.01,
clip_range=0.1,
gae_lambda=0.95,
gamma=0.97,
n_epochs=10,
)
trajectory_generator = preference_comparisons.AgentTrainer(
algorithm=agent2,
reward_fn=reward_net,
venv=venv,
exploration_frac=0.00,
rng=rng,
)
pref_comparisons = CustomPreferenceComparisons(
trajectory_generator,
reward_net,
num_iterations=INTERACTION_NUM, # Set to 60 for better performance
fragmenter=fragmenter,
preference_gatherer=gatherer,
reward_trainer=reward_trainer,
fragment_length=CLIP_SIZE,
transition_oversampling=OVERSAMPLEING_FACTOR,
initial_comparison_frac=0.1,
allow_variable_horizon=VARIABLE_HORIZON,
comparison_queue_size= QUEUE_SIZE,
initial_epoch_multiplier=4,
query_schedule="hyperbolic",
tensorboard = writer_2
)
pref_comparisons.train(
total_timesteps=TOTAL_TIME_STEP,
total_comparisons=TOTAL_COMPARISION,
)
del pref_comparisons, trajectory_generator, rng, reward_trainer , preference_model, gatherer, fragmenter, venv, reward_net
import gymnasium as gym
agents_rewards = []
agents_epi_lenghts = []
for index ,(model, writer) in enumerate(zip([agent, agent2],[writer_1, writer_2])):
reward_list = []
epi_lenght_list = []
if 'BipedalWalker' in ENV_NAME:
env = gym.make('BipedalWalker-v3',
max_episode_steps = MAX_EPI_STEP,
render_mode = RENDER_MODE,
hardcore = HARDCORE
)
else:
env = gym.make(ENV_NAME,
max_episode_steps = MAX_EPI_STEP,
render_mode = RENDER_MODE,
)
env = RecordVideo(env= env,
video_folder= os.path.join(LOG_DIR, ENV_NAME, f'{VARIABLE_HORIZON}_video'),
name_prefix= f'GRU_reward_{CLIP_SIZE}' if index == 0 else f"Non_GRU_reward_{CLIP_SIZE}"
) if RECODE_TEST_EP else env
total_reward = 0
step = 0
for epi in range(TEST_EPI):
is_done = False
observation, _ = env.reset()
reward_ = 0
epi_lenght = 0
while not is_done:
action, _= model.policy.predict(observation, deterministic = True)
observation, reward, terminated, truncated, _ = env.step(action)
epi_lenght += 1
reward_ += reward
if RENDER:
env.render()
if terminated or truncated:
is_done = True
writer.add_scalar('eval/cummulated_reward',total_reward, step)
step += 1
total_reward += reward
reward_list.append(reward_)
epi_lenght_list.append(epi_lenght)
del env
agents_rewards.append([np.mean(reward_list), np.var(reward_list), np.std(reward_list)])
agents_epi_lenghts.append([np.mean(epi_lenght_list), np.var(epi_lenght_list), np.std(epi_lenght_list)])
print(f'[ENV_NAME]: {ENV_NAME}, [CLIP_SIZE]: {CLIP_SIZE}, [TEST_EPI]: {TEST_EPI}')
print(f'[GRU]\n\
[reward mean]: {agents_rewards[0][0]}, [reward var]: {agents_rewards[0][1]}, [reward std]: {agents_rewards[0][2]}\n\
[episode mean]: {agents_epi_lenghts[0][0]}, [episode var]: {agents_epi_lenghts[0][1]}, [episode std]: {agents_epi_lenghts[0][2]}')
print(f'[No GRU]\n\
[reward mean]: {agents_rewards[1][0]}, [reward var]: {agents_rewards[1][1]}, [reward std]: {agents_rewards[1][2]}\n\
[episode mean]: {agents_epi_lenghts[1][0]}, [episode var]: {agents_epi_lenghts[1][1]}, [episode std]: {agents_epi_lenghts[1][2]}')