-
Notifications
You must be signed in to change notification settings - Fork 10
/
DiamondEnv.py
124 lines (97 loc) · 3.72 KB
/
DiamondEnv.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
# -*- coding: utf-8 -*-
"""Specific environment for the Diamond Robot.
"""
__authors__ = "PSC"
__contact__ = "[email protected]"
__version__ = "1.0.0"
__copyright__ = "(c) 2021, Robocath, CNRS, Inria"
__date__ = "Dec 01 2021"
import os
from sofagym.AbstractEnv import AbstractEnv
from sofagym.rpc_server import start_scene
from sofagym.viewer import LegacyViewer
from sofagym.envs.Diamond.DiamondToolbox import startCmd
from gym import spaces
import numpy as np
class DiamondRobotEnv(AbstractEnv):
"""Sub-class of AbstractEnv, dedicated to the trunk scene.
See the class AbstractEnv for arguments and methods.
"""
# Setting a default configuration
path = os.path.dirname(os.path.abspath(__file__))
metadata = {'render.modes': ['human', 'rgb_array']}
DEFAULT_CONFIG = {"scene": "Diamond",
"deterministic": True,
"source": [-288, -81, 147],
"target": [4, -6, 52],
"goalList": [[30.0, 0.0, 150.0], [-30.0, 0.0, 150.0], [0.0, 30.0, 150.0], [0.0, -30.0, 150.0]],
"scale_factor": 5,
"timer_limit": 50,
"timeout": 30,
"display_size": (1600, 800),
"render": 1,
"save_data": True,
"save_path": path + "/Results" + "/Diamond",
"planning": True,
"discrete": True,
"seed": 0,
"start_from_history": None,
"python_version": "python3.8",
"zFar": 5000,
"dt": 0.01
}
def __init__(self, config=None):
super().__init__(config)
nb_actions = 8
self.action_space = spaces.Discrete(nb_actions)
self.nb_actions = str(nb_actions)
dim_state = (6, 3)
low_coordinates = np.ones(shape=dim_state)*-1
high_coordinates = np.ones(shape=dim_state)
self.observation_space = spaces.Box(low_coordinates, high_coordinates,
dtype='float32')
def step(self, action):
if self.viewer:
self.viewer.step(action)
return super().step(action)
def reset(self):
"""Reset simulation.
Note:
----
We launch a client to create the scene. The scene of the program is
client_<scene>Env.py.
"""
super().reset()
self.goal = [-30 + 60 * np.random.random(), -30 + 60 * np.random.random(), 125 + 20 * np.random.random()]
self.config.update({'goalPos': self.goal})
obs = start_scene(self.config, self.nb_actions)
if self.viewer:
self.viewer.reset()
return np.array(obs['observation'])
def render(self, mode='rgb_array'):
"""See the current state of the environment.
Get the OpenGL Context to render an image (snapshot) of the simulation
state.
Parameters:
----------
mode: string, default = 'rgb_array'
Type of representation.
Returns:
-------
None.
"""
if not self.viewer:
display_size = self.config["display_size"] # Sim display
self.viewer = LegacyViewer(self, display_size, startCmd=startCmd)
# Use the viewer to display the environment.
self.viewer.render()
def get_available_actions(self):
"""Gives the actions available in the environment.
Parameters:
----------
None.
Returns:
-------
list of the action available in the environment.
"""
return list(range(int(self.nb_actions)))