Skip to content

Commit

Permalink
Fix RLlib PPO example.
Browse files Browse the repository at this point in the history
  • Loading branch information
Gamenot committed Jan 4, 2024
1 parent 0d7c352 commit 94a0bcf
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 4 deletions.
2 changes: 1 addition & 1 deletion examples/e12_rllib/ppo_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def main(
enable_tf1_exec_eagerly=True,
)
.training(
lr=[[0, 1e-3], [1e3, 5e-4], [1e5, 1e-4], [1e7, 5e-5], [1e8, 1e-5]],
lr_schedule=[[0, 1e-3], [1e3, 5e-4], [1e5, 1e-4], [1e7, 5e-5], [1e8, 1e-5]],
train_batch_size=train_batch_size,
)
.multi_agent(
Expand Down
5 changes: 2 additions & 3 deletions examples/e12_rllib/ppo_pbt_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def main(
scheduler=pbt,
max_concurrent_trials=4,
)
trainable = "PG"
trainable = "PPO"
if resume_training:
tuner = tune.Tuner.restore(
str(experiment_dir),
Expand All @@ -246,8 +246,7 @@ def main(

# Get the best checkpoint corresponding to the best result.
best_checkpoint = best_result.checkpoint

best_logdir = Path(best_result.log_dir)
best_logdir = Path(best_checkpoint.path)
model_path = best_logdir

copy_tree(str(model_path), save_model_path, overwrite=True)
Expand Down

0 comments on commit 94a0bcf

Please sign in to comment.