Skip to content

Commit

Permalink
missing goal fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
reginald-mclean committed Jun 25, 2024
1 parent fb7ee00 commit 0cc4754
Show file tree
Hide file tree
Showing 14 changed files with 27 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def reset_model(self) -> npt.NDArray[np.float64]:
self.model.body("coffee_machine").pos = pos_machine

self._target_pos = pos_mug_goal
self.model.site("coffee_goal").pos = self._target_pos
self.model.site("mug_goal").pos = self._target_pos
return self._get_obs()

def compute_reward(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,8 @@ def reset_model(self) -> npt.NDArray[np.float64]:
self.model.body("puck_goal").pos = self.obj_init_pos
self._set_obj_xyz(np.array([-0.15, 0.0]))

self.model.site("goal").pos = self._target_pos

return self._get_obs()

def compute_reward(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,8 @@ def reset_model(self) -> npt.NDArray[np.float64]:
self.data.body("puck_goal").xpos = self._target_pos
self._set_obj_xyz(np.array([0, 0.15]))

self.model.site("goal").pos = self._target_pos

return self._get_obs()

def compute_reward(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,8 @@ def reset_model(self) -> npt.NDArray[np.float64]:
self.data.body("puck_goal").xpos = self._target_pos
self._set_obj_xyz(np.zeros(2))

self.model.site("goal").pos = self._target_pos

return self._get_obs()

def compute_reward(
Expand Down
2 changes: 2 additions & 0 deletions metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,8 @@ def reset_model(self) -> npt.NDArray[np.float64]:
self.model.body("puck_goal").pos = self._target_pos
self._set_obj_xyz(np.zeros(2))

self.model.site("goal").pos = self._target_pos

return self._get_obs()

def compute_reward(
Expand Down
3 changes: 2 additions & 1 deletion metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_reach_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,8 @@ def reset_model(self) -> npt.NDArray[np.float64]:
self.obj_init_pos = goal_pos[:3]
self._set_obj_xyz(self.obj_init_pos)

self._set_pos_site("goal", self._target_pos)
self.model.site("goal").pos = self._target_pos

return self._get_obs()

def compute_reward(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def reset_model(self) -> npt.NDArray[np.float64]:
self.obj_init_pos = goal_pos[:3]

self._set_obj_xyz(self.obj_init_pos)
self._set_pos_site("goal", self._target_pos)
self.model.site("goal").pos = self._target_pos
return self._get_obs()

def compute_reward(
Expand Down
4 changes: 3 additions & 1 deletion metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_soccer_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,9 @@ def reset_model(self) -> npt.NDArray[np.float64]:
self.maxPushDist = np.linalg.norm(
self.obj_init_pos[:2] - np.array(self._target_pos)[:2]
)
self._set_pos_site("goal", self._target_pos)

self.model.site("goal").pos = self._target_pos

return self._get_obs()

def _gripper_caging_reward(
Expand Down
4 changes: 3 additions & 1 deletion metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_stick_pull_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,9 @@ def reset_model(self) -> npt.NDArray[np.float64]:
self._set_stick_xyz(self.stick_init_pos)
self._set_obj_xyz(self.obj_init_qpos)
self.obj_init_pos = self.get_body_com("object").copy()
self._set_pos_site("goal", self._target_pos)

self.model.site("goal").pos = self._target_pos

return self._get_obs()

def _stick_is_inserted(
Expand Down
4 changes: 3 additions & 1 deletion metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_stick_push_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,9 @@ def reset_model(self) -> npt.NDArray[np.float64]:
self._set_stick_xyz(self.stick_init_pos)
self._set_obj_xyz(self.obj_init_qpos)
self.obj_init_pos = self.get_body_com("object").copy()
self._set_pos_site("goal", self._target_pos)

self.model.site("goal").pos = self._target_pos

return self._get_obs()

def _gripper_caging_reward(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,10 +106,7 @@ def reset_model(self) -> npt.NDArray[np.float64]:
self.obj_init_pos = np.concatenate([goal_pos[:2], [self.obj_init_pos[-1]]])

self._set_obj_xyz(self.obj_init_pos)
self.maxPushDist = np.linalg.norm(
self.obj_init_pos[:2] - np.array(self._target_pos)[:2]
)
self._set_pos_site("goal", self._target_pos)
self.model.site("goal").pos = self._target_pos
return self._get_obs()

def _gripper_caging_reward(
Expand Down
6 changes: 1 addition & 5 deletions metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_sweep_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,11 +101,7 @@ def reset_model(self) -> npt.NDArray[np.float64]:
self._target_pos[1] = obj_pos.copy()[1]

self._set_obj_xyz(self.obj_init_pos)
self.maxPushDist = np.linalg.norm(
self.get_body_com("obj")[:-1] - self._target_pos[:-1]
)
self.target_reward = 1000 * self.maxPushDist + 1000 * 2
self._set_pos_site("goal", self._target_pos)
self.model.site("goal").pos = self._target_pos
return self._get_obs()

def _gripper_caging_reward(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def reset_model(self) -> npt.NDArray[np.float64]:
[0.2, 0.0, 0.0]
)
self.data.joint("window_slide").qpos = 0.2
self._set_pos_site("goal", self._target_pos)
self.model.site("goal").pos = self._target_pos
return self._get_obs()

def _reset_hand(self, steps: int = 50) -> None:
Expand Down
4 changes: 3 additions & 1 deletion metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_window_open_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,9 @@ def reset_model(self) -> npt.NDArray[np.float64]:
self.window_handle_pos_init = self._get_pos_objects()
self.data.joint("window_slide").qpos = 0.0
assert self._target_pos is not None
self._set_pos_site("goal", self._target_pos)

self.model.site("goal").pos = self._target_pos

return self._get_obs()

def compute_reward(
Expand Down

0 comments on commit 0cc4754

Please sign in to comment.