forked from openai/multiagent-particle-envs
-
Notifications
You must be signed in to change notification settings - Fork 0
/
make_env.py
executable file
·44 lines (39 loc) · 1.87 KB
/
make_env.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
"""
Code for creating a multiagent environment with one of the scenarios listed
in ./scenarios/.
Can be called by using, for example:
env = make_env('simple_speaker_listener')
After producing the env object, can be used similarly to an OpenAI gym
environment.
A policy using this environment must output actions in the form of a list
for all agents. Each element of the list should be a numpy array,
of size (env.world.dim_p + env.world.dim_c, 1). Physical actions precede
communication actions in this array. See environment.py for more details.
"""
def make_env(scenario_name, benchmark=False):
'''
Creates a MultiAgentEnv object as env. This can be used similar to a gym
environment by calling env.reset() and env.step().
Use env.render() to view the environment on the screen.
Input:
scenario_name : name of the scenario from ./scenarios/ to be Returns
(without the .py extension)
benchmark : whether you want to produce benchmarking data
(usually only done during evaluation)
Some useful env properties (see environment.py):
.observation_space : Returns the observation space for each agent
.action_space : Returns the action space for each agent
.n : Returns the number of Agents
'''
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios
# load scenario from script
scenario = scenarios.load(scenario_name + ".py").Scenario()
# create world
world = scenario.make_world()
# create multiagent environment
if benchmark:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data)
else:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)
return env