-
-
Notifications
You must be signed in to change notification settings - Fork 39
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Co-authored-by: Lucas Alegre <[email protected]> Co-authored-by: Mark Towers <[email protected]> Co-authored-by: Florian Felten <[email protected]>
- Loading branch information
1 parent
5099244
commit 5b248ed
Showing
15 changed files
with
456 additions
and
15 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,60 @@ | ||
import numpy as np | ||
from gymnasium.envs.mujoco.ant_v5 import AntEnv | ||
from gymnasium.spaces import Box | ||
from gymnasium.utils import EzPickle | ||
|
||
|
||
class MOAntEnv(AntEnv, EzPickle): | ||
""" | ||
## Description | ||
Multi-objective version of the AntEnv environment. | ||
See [Gymnasium's env](https://gymnasium.farama.org/environments/mujoco/ant/) for more information. | ||
The original Gymnasium's 'Ant-v5' is recovered by the following linear scalarization: | ||
env = mo_gym.make('mo-ant-v4', cost_objective=False) | ||
LinearReward(env, weight=np.array([1.0, 0.0])) | ||
## Reward Space | ||
The reward is 2- or 3-dimensional: | ||
- 0: x-velocity | ||
- 1: y-velocity | ||
- 2: Control cost of the action | ||
If the cost_objective flag is set to False, the reward is 2-dimensional, and the cost is added to other objectives. | ||
A healthy reward and a cost for contact forces is added to all objectives. | ||
A 2-objective version (without the cost objective as a separate objective) can be instantiated via: | ||
env = mo_gym.make('mo-ant-2obj-v5') | ||
## Version History | ||
- v5: Now includes contact forces in the reward and observation. | ||
The 2-objective version has now id 'mo-ant-2obj-v5', instead of 'mo-ant-2d-v4'. | ||
See https://gymnasium.farama.org/environments/mujoco/ant/#version-history | ||
""" | ||
|
||
def __init__(self, cost_objective=True, **kwargs): | ||
super().__init__(**kwargs) | ||
EzPickle.__init__(self, cost_objective, **kwargs) | ||
self._cost_objective = cost_objective | ||
self.reward_dim = 3 if cost_objective else 2 | ||
self.reward_space = Box(low=-np.inf, high=np.inf, shape=(self.reward_dim,)) | ||
|
||
def step(self, action): | ||
observation, reward, terminated, truncated, info = super().step(action) | ||
x_velocity = info["x_velocity"] | ||
y_velocity = info["y_velocity"] | ||
cost = info["reward_ctrl"] | ||
healthy_reward = info["reward_survive"] | ||
|
||
if self._cost_objective: | ||
cost /= self._ctrl_cost_weight # Ignore the weight in the original AntEnv | ||
vec_reward = np.array([x_velocity, y_velocity, cost], dtype=np.float32) | ||
else: | ||
vec_reward = np.array([x_velocity, y_velocity], dtype=np.float32) | ||
vec_reward += cost | ||
|
||
vec_reward += healthy_reward | ||
vec_reward += info["reward_contact"] # Do not treat contact forces as a separate objective | ||
|
||
return observation, vec_reward, terminated, truncated, info |
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,40 @@ | ||
import numpy as np | ||
from gymnasium.envs.mujoco.half_cheetah_v5 import HalfCheetahEnv | ||
from gymnasium.spaces import Box | ||
from gymnasium.utils import EzPickle | ||
|
||
|
||
class MOHalfCheehtahEnv(HalfCheetahEnv, EzPickle): | ||
""" | ||
## Description | ||
Multi-objective version of the HalfCheetahEnv environment. | ||
See [Gymnasium's env](https://gymnasium.farama.org/environments/mujoco/half_cheetah/) for more information. | ||
The original Gymnasium's 'HalfCheetah-v5' is recovered by the following linear scalarization: | ||
env = mo_gym.make('mo-halfcheetah-v5') | ||
LinearReward(env, weight=np.array([1.0, 0.1])) | ||
## Reward Space | ||
The reward is 2-dimensional: | ||
- 0: Reward for running forward | ||
- 1: Control cost of the action | ||
## Version History | ||
- v5: The scales of the control cost has changed from v4. | ||
See https://gymnasium.farama.org/environments/mujoco/half_cheetah/#version-history for other changes. | ||
""" | ||
|
||
def __init__(self, **kwargs): | ||
super().__init__(**kwargs) | ||
EzPickle.__init__(self, **kwargs) | ||
self.reward_space = Box(low=-np.inf, high=np.inf, shape=(2,)) | ||
self.reward_dim = 2 | ||
|
||
def step(self, action): | ||
observation, reward, terminated, truncated, info = super().step(action) | ||
x_velocity = info["x_velocity"] | ||
neg_energy_cost = info["reward_ctrl"] / self._ctrl_cost_weight # Revert the scale applied in the original environment | ||
vec_reward = np.array([x_velocity, neg_energy_cost], dtype=np.float32) | ||
return observation, vec_reward, terminated, truncated, info |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,55 @@ | ||
import numpy as np | ||
from gymnasium.envs.mujoco.hopper_v5 import HopperEnv | ||
from gymnasium.spaces import Box | ||
from gymnasium.utils import EzPickle | ||
|
||
|
||
class MOHopperEnv(HopperEnv, EzPickle): | ||
""" | ||
## Description | ||
Multi-objective version of the HopperEnv environment. | ||
See [Gymnasium's env](https://gymnasium.farama.org/environments/mujoco/hopper/) for more information. | ||
The original Gymnasium's 'Hopper-v5' is recovered by the following linear scalarization: | ||
env = mo_gym.make('mo-hopper-v5') | ||
LinearReward(env, weight=np.array([1.0, 0.0, 1e-3])) | ||
## Reward Space | ||
The reward is 3-dimensional: | ||
- 0: Reward for going forward on the x-axis | ||
- 1: Reward for jumping high on the z-axis | ||
- 2: Control cost of the action | ||
If the cost_objective flag is set to False, the reward is 2-dimensional, and the cost is added to other objectives. | ||
A 2-objective version (without the cost objective as a separate objective) can be instantiated via: | ||
env = mo_gym.make('mo-hopper-2obj-v5') | ||
## Version History | ||
- v5: The 2-objective version has now id 'mo-hopper-2obj-v5', instead of 'mo-hopper-2d-v4'. | ||
See https://gymnasium.farama.org/environments/mujoco/hopper/#version-history | ||
""" | ||
|
||
def __init__(self, cost_objective=True, **kwargs): | ||
super().__init__(**kwargs) | ||
EzPickle.__init__(self, cost_objective, **kwargs) | ||
self._cost_objective = cost_objective | ||
self.reward_dim = 3 if cost_objective else 2 | ||
self.reward_space = Box(low=-np.inf, high=np.inf, shape=(self.reward_dim,)) | ||
|
||
def step(self, action): | ||
observation, reward, terminated, truncated, info = super().step(action) | ||
x_velocity = info["x_velocity"] | ||
height = 10 * info["z_distance_from_origin"] | ||
neg_energy_cost = info["reward_ctrl"] | ||
if self._cost_objective: | ||
neg_energy_cost /= self._ctrl_cost_weight # Revert the scale applied in the original environment | ||
vec_reward = np.array([x_velocity, height, neg_energy_cost], dtype=np.float32) | ||
else: | ||
vec_reward = np.array([x_velocity, height], dtype=np.float32) | ||
vec_reward += neg_energy_cost | ||
|
||
vec_reward += info["reward_survive"] | ||
|
||
return observation, vec_reward, terminated, truncated, info |
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,44 @@ | ||
import numpy as np | ||
from gymnasium.envs.mujoco.humanoid_v5 import HumanoidEnv | ||
from gymnasium.spaces import Box | ||
from gymnasium.utils import EzPickle | ||
|
||
|
||
class MOHumanoidEnv(HumanoidEnv, EzPickle): | ||
""" | ||
## Description | ||
Multi-objective version of the HumanoidEnv environment. | ||
See [Gymnasium's env](https://gymnasium.farama.org/environments/mujoco/humanoid/) for more information. | ||
The original Gymnasium's 'Humanoid-v5' is recovered by the following linear scalarization: | ||
env = mo_gym.make('mo-humanoid-v5') | ||
LinearReward(env, weight=np.array([1.25, 0.1])) | ||
## Reward Space | ||
The reward is 2-dimensional: | ||
- 0: Reward for running forward (x-velocity) | ||
- 1: Control cost of the action | ||
## Version History: | ||
- v5: Now includes contact forces. See: https://gymnasium.farama.org/environments/mujoco/humanoid/#version-history | ||
The scales of the control cost has changed from v4. | ||
""" | ||
|
||
def __init__(self, **kwargs): | ||
super().__init__(**kwargs) | ||
EzPickle.__init__(self, **kwargs) | ||
self.reward_space = Box(low=-np.inf, high=np.inf, shape=(2,)) | ||
self.reward_dim = 2 | ||
|
||
def step(self, action): | ||
observation, reward, terminated, truncated, info = super().step(action) | ||
velocity = info["x_velocity"] | ||
neg_energy_cost = info["reward_ctrl"] / self._ctrl_cost_weight # Revert the scale applied in the original environment | ||
vec_reward = np.array([velocity, neg_energy_cost], dtype=np.float32) | ||
|
||
vec_reward += self.healthy_reward # All objectives are penalyzed when the agent falls | ||
vec_reward += info["reward_contact"] # Do not treat contact forces as a separate objective | ||
|
||
return observation, vec_reward, terminated, truncated, info |
File renamed without changes.
Oops, something went wrong.