diff --git a/.github/workflows/ci-base-tests-linux.yml b/.github/workflows/ci-base-tests-linux.yml index a869e64326..e974896d08 100644 --- a/.github/workflows/ci-base-tests-linux.yml +++ b/.github/workflows/ci-base-tests-linux.yml @@ -31,7 +31,8 @@ jobs: pip install --upgrade pip pip install wheel==0.38.4 pip install -e .[camera_obs,opendrive,test,test_notebook,torch,train,gif_recorder,gymnasium,argoverse,envision,sumo] - if echo ${{matrix.tests}} | grep -q -e "test_rllib_hiway_env.py" -e "test_examples.py"; then pip install -e .[rllib]; fi + if echo ${{matrix.tests}} | grep -q -e "test_rllib_hiway_env.py"; then pip install -e .[rllib]; fi + if echo ${{matrix.tests}} | grep -q -e "test_examples.py"; then pip install -e .[examples,rllib]; fi if echo ${{matrix.tests}} | grep -q -e "/smarts/ray"; then pip install -e .[ray]; fi - name: Build scenarios run: | @@ -71,23 +72,23 @@ jobs: strategy: matrix: tests: - - drive - - platoon + - e10_drive + - e11_platoon steps: - name: Checkout uses: actions/checkout@v2 - name: Install dependencies run: | - cd ${GITHUB_WORKSPACE}/examples/rl/${{matrix.tests}} + cd ${GITHUB_WORKSPACE}/examples/${{matrix.tests}} python3.8 -m venv ${{env.venv_dir}} . ${{env.venv_dir}}/bin/activate pip install --upgrade pip pip install wheel==0.38.4 - pip install -e ./../../../.[camera_obs,argoverse,sumo,test] + pip install -e ./../../.[camera_obs,argoverse,sumo,test] pip install -e ./inference/ - name: Run smoke tests run: | - cd ${GITHUB_WORKSPACE}/examples/rl/${{matrix.tests}} + cd ${GITHUB_WORKSPACE}/examples/${{matrix.tests}} . ${{env.venv_dir}}/bin/activate PYTHONPATH=$PWD PYTHONHASHSEED=42 pytest -v \ --doctest-modules \ @@ -103,8 +104,8 @@ jobs: strategy: matrix: tests: - - drive - - platoon + - e10_drive + - e11_platoon steps: - name: Checkout uses: actions/checkout@v2 @@ -116,7 +117,7 @@ jobs: pip install --upgrade pip pip install wheel==0.38.4 pip install -e .[camera_obs,argoverse,test,ray,sumo] - scl zoo install examples/rl/${{matrix.tests}}/inference + scl zoo install examples/${{matrix.tests}}/inference - name: Run smoke tests run: | cd ${GITHUB_WORKSPACE} diff --git a/.github/workflows/ci-base-tests-mac.yml b/.github/workflows/ci-base-tests-mac.yml index 9f63721cb4..b311eb4a52 100644 --- a/.github/workflows/ci-base-tests-mac.yml +++ b/.github/workflows/ci-base-tests-mac.yml @@ -49,7 +49,8 @@ jobs: pip install wheel==0.38.4 pip install -r utils/setup/mac_requirements.txt pip install -e .[camera_obs,opendrive,rllib,test,test_notebook,torch,train,argoverse,envision,sumo] - if echo ${{matrix.tests}} | grep -q -e "/env" -e "/examples"; then pip install -e .[rllib]; fi + if echo ${{matrix.tests}} | grep -q -e "/env"; then pip install -e .[rllib]; fi + if echo ${{matrix.tests}} | grep -q -e "/examples"; then pip install -e .[examples,rllib]; fi if echo ${{matrix.tests}} | grep -q "/ray"; then pip install -e .[ray]; fi - name: Run smoke tests run: | diff --git a/.gitignore b/.gitignore index abc9cab2a9..190aeba651 100644 --- a/.gitignore +++ b/.gitignore @@ -73,7 +73,7 @@ target/ celerybeat-schedule # Environments -.venv +.venv* venv .bugtest @@ -150,4 +150,7 @@ collected_observations/ .pytype # Benchmark -**/diagnostic/reports/* \ No newline at end of file +**/diagnostic/reports/* + +# Experiments +outputs/ \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 71aaac0a45..74709ad478 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,12 +10,23 @@ Copy and pasting the git commit messages is __NOT__ enough. ## [Unreleased] - XXXX-XX-XX ### Added +- SMARTS option `smarts[examples]` added for running SMARTS examples. ### Changed - The following dependencies have been loosened: `numpy`, `opencv`, `torch`. +- Clarified engine configuration location under `logging.info` instead of `print`. +- `ScenarioOrder` enumeration values are now lower-case (e.g. `scrambled` instead of `Scrambled`). +- `EnvReturnMode`, `ScenarioOrder`, and `SumoOptions` have been moved to `smarts.env.configs.hiway_env_configs`. +- `trimesh` version has been loosened to `trimesh>=3.9.29`. ### Deprecated ### Fixed - The `smarts` package now works with `python3.10` and `python3.11`. +- Fixed an issue where default `AgentInterface.events` shared a reference. +- Episode log now lists current value out of maximum rather than index. +- Episode log now correctly shows all agent scores. +- Added `scipy` back to dependencies to fix scenario building. +- Fixed `gymnasium` floating type cast warnings in action conversion. ### Removed +- Removed previously deprecated `SMARTS.timestep_sec` attribute. ### Security ## [1.3.0] - 2023-07-11 diff --git a/README.md b/README.md index de12e49dde..f56ba7af08 100644 --- a/README.md +++ b/README.md @@ -17,37 +17,38 @@ Check out the paper at [SMARTS: Scalable Multi-Agent Reinforcement Learning Trai :rotating_light: :bell: Read the docs :notebook_with_decorative_cover: at [smarts.readthedocs.io](https://smarts.readthedocs.io/) . :bell: :rotating_light: # Examples -### Egoless -Simulate a SMARTS environment without any ego agents, but with only background traffic. -1. [Egoless](examples/egoless.py) example. +### Primitive +1. [Egoless](examples/e1_egoless.py) example. + + Run a SMARTS simulation without any ego agents, but with only background traffic. +1. [Single-Agent](examples/e2_single_agent.py) example. + + Run a SMARTS simulation with a single ego agent. +1. [Multi-Agent](examples/e3_multi_agent.py) example. + + Run a SMARTS simulation with multiple ego agents. +1. [Environment Config](examples/e4_environment_config.py) example. + + Demonstrate the main observation/action configuration of the environment. +1. [Agent Zoo](examples/e5_agent_zoo.py) example. + + Demonstrate how the agent zoo works. +1. [Agent interface example](examples/6_agent_interface.py) + + TODO demonstrate how the agent interface works. -### Control Theory -Several agent control policies and agent [action types](smarts/core/controllers/__init__.py) are demonstrated. +### Integration examples +A few more complex integrations are demonstrated. -1. Chase Via Points - + script: [control/chase_via_points.py](examples/control/chase_via_points.py) - + Multi agent - + ActionSpaceType: LaneWithContinuousSpeed -1. Trajectory Tracking - + script: [control/trajectory_tracking.py](examples/control/trajectory_tracking.py) - + ActionSpaceType: Trajectory -1. OpEn Adaptive Control - + script: [control/ego_open_agent.py](examples/control/ego_open_agent.py) - + ActionSpaceType: MPC -1. Laner - + script: [control/laner.py](examples/control/laner.py) - + Multi agent - + ActionSpaceType: Lane +1. Configurable example + + script: [examples/e7_experiment_base.py](examples/e7_experiment_base.py) + + Configurable agent number. + + Configurable agent type. + + Configurable environment. 1. Parallel environments - + script: [control/parallel_environment.py](examples/control/parallel_environment.py) + + script: [examples/e8_parallel_environment.py](examples/e8_parallel_environment.py) + Multiple SMARTS environments in parallel + ActionSpaceType: LaneWithContinuousSpeed -### RL Model -1. [Drive](examples/rl/drive). See [Driving SMARTS 2023.1 & 2023.2](https://smarts.readthedocs.io/en/latest/benchmarks/driving_smarts_2023_1.html) for more info. -1. [VehicleFollowing](examples/rl/platoon). See [Driving SMARTS 2023.3](https://smarts.readthedocs.io/en/latest/benchmarks/driving_smarts_2023_3.html) for more info. -1. [PG](examples/rl/rllib/pg_example.py). See [RLlib](https://smarts.readthedocs.io/en/latest/ecosystem/rllib.html) for more info. -1. [PG Population Based Training](examples/rl/rllib/pg_pbt_example.py). See [RLlib](https://smarts.readthedocs.io/en/latest/ecosystem/rllib.html) for more info. +### RL Examples +1. [Drive](examples/e10_drive). See [Driving SMARTS 2023.1 & 2023.2](https://smarts.readthedocs.io/en/latest/benchmarks/driving_smarts_2023_1.html) for more info. +1. [VehicleFollowing](examples/e11_platoon). See [Driving SMARTS 2023.3](https://smarts.readthedocs.io/en/latest/benchmarks/driving_smarts_2023_3.html) for more info. +1. [PG](examples/e12_rllib/pg_example.py). See [RLlib](https://smarts.readthedocs.io/en/latest/ecosystem/rllib.html) for more info. +1. [PG Population Based Training](examples/e12_rllib/pg_pbt_example.py). See [RLlib](https://smarts.readthedocs.io/en/latest/ecosystem/rllib.html) for more info. ### RL Environment 1. [ULTRA](https://github.com/smarts-project/smarts-project.rl/blob/master/ultra) provides a gym-based environment built upon SMARTS to tackle intersection navigation, specifically the unprotected left turn. diff --git a/docs/benchmarks/driving_smarts_2023_1.rst b/docs/benchmarks/driving_smarts_2023_1.rst index 5e598af5a0..780b60e2a2 100644 --- a/docs/benchmarks/driving_smarts_2023_1.rst +++ b/docs/benchmarks/driving_smarts_2023_1.rst @@ -178,7 +178,7 @@ the user. agent_params=agent_params, ) - register(locator="contrib-agent-v0", entry_point=entry_point) + register("contrib-agent-v0", entry_point=entry_point) + User may fill in the ``<...>`` spaces in the template. + User may specify the ego's interface by configuring any field of :class:`~smarts.core.agent_interface.AgentInterface`, except @@ -239,7 +239,7 @@ Example ------- An example training and inference code is provided for this benchmark. -See the :examples:`rl/drive` example. The example uses PPO algorithm from +See the :examples:`e10_drive` example. The example uses PPO algorithm from `Stable Baselines3 `_ reinforcement learning library. It uses :attr:`~smarts.core.controllers.action_space_type.ActionSpaceType.RelativeTargetPose` action space. Instructions for training and evaluating the example is as follows. @@ -251,12 +251,12 @@ Train .. code-block:: bash # In terminal-A - $ cd /SMARTS/examples/rl/drive + $ cd /SMARTS/examples/e10_drive $ python3.8 -m venv ./.venv $ source ./.venv/bin/activate $ pip install --upgrade pip $ pip install wheel==0.38.4 - $ pip install -e ./../../../.[camera_obs,argoverse,envision,sumo] + $ pip install -e ./../../.[camera_obs,argoverse,envision,sumo] $ pip install -e ./inference/ + Train locally without visualization @@ -271,7 +271,7 @@ Train .. code-block:: bash # In a different terminal-B - $ cd /SMARTS/examples/rl/drive + $ cd /SMARTS/examples/e10_drive $ source ./.venv/bin/activate $ scl envision start # Open http://localhost:8081/ @@ -281,7 +281,7 @@ Train # In terminal-A $ python3.8 train/run.py --head -+ Trained models are saved by default inside the ``/SMARTS/examples/rl/drive/train/logs/`` folder. ++ Trained models are saved by default inside the ``/SMARTS/examples/e10_drive/train/logs/`` folder. Docker ^^^^^^ @@ -290,14 +290,14 @@ Docker .. code-block:: bash $ cd /SMARTS - $ docker build --file=./examples/rl/drive/train/Dockerfile --network=host --tag=drive . + $ docker build --file=./examples/e10_drive/train/Dockerfile --network=host --tag=drive . $ docker run --rm -it --network=host --gpus=all drive - (container) $ cd /SMARTS/examples/rl/drive + (container) $ cd /SMARTS/examples/e10_drive (container) $ python3.8 train/run.py Evaluate ^^^^^^^^ -+ Choose a desired saved model from the previous training step, rename it as ``saved_model.zip``, and move it to ``/SMARTS/examples/rl/drive/inference/contrib_policy/saved_model.zip``. ++ Choose a desired saved model from the previous training step, rename it as ``saved_model.zip``, and move it to ``/SMARTS/examples/e10_drive/inference/contrib_policy/saved_model.zip``. + Evaluate locally .. code-block:: bash @@ -308,11 +308,11 @@ Evaluate $ pip install --upgrade pip $ pip install wheel==0.38.4 $ pip install -e .[camera_obs,argoverse,envision,sumo] - $ scl zoo install examples/rl/drive/inference + $ scl zoo install examples/e10_drive/inference # For Driving SMARTS 2023.1 - $ scl benchmark run driving_smarts_2023_1 examples.rl.drive.inference:contrib-agent-v0 --auto-install + $ scl benchmark run driving_smarts_2023_1 examples.e10_drive.inference:contrib-agent-v0 --auto-install # For Driving SMARTS 2023.2 - $ scl benchmark run driving_smarts_2023_2 examples.rl.drive.inference:contrib-agent-v0 --auto-install + $ scl benchmark run driving_smarts_2023_2 examples.e10_drive.inference:contrib-agent-v0 --auto-install Zoo agents ---------- diff --git a/docs/benchmarks/driving_smarts_2023_3.rst b/docs/benchmarks/driving_smarts_2023_3.rst index 0a028b7e81..48716ca78b 100644 --- a/docs/benchmarks/driving_smarts_2023_3.rst +++ b/docs/benchmarks/driving_smarts_2023_3.rst @@ -156,7 +156,7 @@ the user. agent_params=agent_params, ) - register(locator="contrib-agent-v0", entry_point=entry_point) + register("contrib-agent-v0", entry_point=entry_point) + User may fill in the ``<...>`` spaces in the template. + User may specify the ego's interface by configuring any field of :class:`~smarts.core.agent_interface.AgentInterface`, except @@ -217,7 +217,7 @@ Example ------- An example training and inference code is provided for this benchmark. -See the :examples:`rl/platoon` example. The example uses PPO algorithm from +See the :examples:`e11_platoon` example. The example uses PPO algorithm from `Stable Baselines3 `_ reinforcement learning library. It uses :attr:`~smarts.core.controllers.action_space_type.ActionSpaceType.Continuous` action space. Instructions for training and evaluating the example is as follows. @@ -229,12 +229,12 @@ Train .. code-block:: bash # In terminal-A - $ cd /SMARTS/examples/rl/platoon + $ cd /SMARTS/examples/e11_platoon $ python3.8 -m venv ./.venv $ source ./.venv/bin/activate $ pip install --upgrade pip $ pip install wheel==0.38.4 - $ pip install -e ./../../../.[camera_obs,argoverse,envision,sumo] + $ pip install -e ./../../.[camera_obs,argoverse,envision,sumo] $ pip install -e ./inference/ + Train locally without visualization @@ -249,7 +249,7 @@ Train .. code-block:: bash # In a different terminal-B - $ cd /SMARTS/examples/rl/platoon + $ cd /SMARTS/examples/e11_platoon $ source ./.venv/bin/activate $ scl envision start # Open http://localhost:8081/ @@ -259,7 +259,7 @@ Train # In terminal-A $ python3.8 train/run.py --head -+ Trained models are saved by default inside the ``/SMARTS/examples/rl/platoon/train/logs/`` folder. ++ Trained models are saved by default inside the ``/SMARTS/examples/e11_platoon/train/logs/`` folder. Docker ^^^^^^ @@ -268,14 +268,14 @@ Docker .. code-block:: bash $ cd /SMARTS - $ docker build --file=./examples/rl/platoon/train/Dockerfile --network=host --tag=platoon . + $ docker build --file=./examples/e11_platoon/train/Dockerfile --network=host --tag=platoon . $ docker run --rm -it --network=host --gpus=all platoon - (container) $ cd /SMARTS/examples/rl/platoon + (container) $ cd /SMARTS/examples/e11_platoon (container) $ python3.8 train/run.py Evaluate ^^^^^^^^ -+ Choose a desired saved model from the previous training step, rename it as ``saved_model.zip``, and move it to ``/SMARTS/examples/rl/platoon/inference/contrib_policy/saved_model.zip``. ++ Choose a desired saved model from the previous training step, rename it as ``saved_model.zip``, and move it to ``/SMARTS/examples/e11_platoon/inference/contrib_policy/saved_model.zip``. + Evaluate locally .. code-block:: bash @@ -286,8 +286,8 @@ Evaluate $ pip install --upgrade pip $ pip install wheel==0.38.4 $ pip install -e .[camera_obs,argoverse,envision,sumo] - $ scl zoo install examples/rl/platoon/inference - $ scl benchmark run driving_smarts_2023_3 examples.rl.platoon.inference:contrib-agent-v0 --auto-install + $ scl zoo install examples/e11_platoon/inference + $ scl benchmark run driving_smarts_2023_3 examples.e11_platoon.inference:contrib-agent-v0 --auto-install Zoo agents ---------- diff --git a/docs/conf.py b/docs/conf.py index 186800f968..7882a21fb5 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -121,6 +121,7 @@ ("py:class", "ActType"), ("py:class", "ObsType"), ("py:class", "smarts.env.gymnasium.wrappers.metric.utils.T"), + ("py:class", "enum.Enum"), } nitpick_ignore_regex = { (r"py:.*", r"av2\..*"), diff --git a/docs/ecosystem/rllib.rst b/docs/ecosystem/rllib.rst index 0b09585d4a..778eb26142 100644 --- a/docs/ecosystem/rllib.rst +++ b/docs/ecosystem/rllib.rst @@ -10,9 +10,9 @@ deep learning frameworks. SMARTS contains two examples using `Policy Gradients (PG) `_. -1. ``rllib/pg_example.py`` +1. ``e12_rllib/pg_example.py`` This example shows the basics of using RLlib with SMARTS through :class:`~smarts.env.rllib_hiway_env.RLlibHiWayEnv`. -1. ``rllib/pg_pbt_example.py`` +1. ``e12_rllib/pg_pbt_example.py`` This example combines Policy Gradients with `Population Based Training (PBT) `_ scheduling. Recommended reads diff --git a/docs/quickstart.rst b/docs/quickstart.rst index 4724004f3e..b342a963d5 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -16,14 +16,14 @@ A typical workflow would look like this. Example ------- -In this quickstart guide, we will run the `Chase Via Points` example. Here, +In this quickstart guide, we will run the `multi-agent` example. Here, 1. a pre-designed scenario :scenarios:`scenarios/sumo/loop ` is used. -2. a simple agent with `interface` == :attr:`~smarts.core.agent_interface.AgentType.LanerWithSpeed` and `policy` == `Chase Via Points` is demonstrated. The agent chases via points or follows nearby waypoints if a via point is unavailable. +2. a simple agent with `interface` == :attr:`~smarts.core.agent_interface.AgentType.Laner` and `policy` == `Random Laner` is demonstrated. The agent chases via points or follows nearby waypoints if a via point is unavailable. -File: :examples:`examples/control/chase_via_points.py ` +File: :examples:`examples/e3_multi_agent.py ` -.. literalinclude:: ../examples/control/chase_via_points.py +.. literalinclude:: ../examples/e3_multi_agent.py :language: python Use the `scl` command to run SMARTS together with it's supporting processes. @@ -31,10 +31,8 @@ Use the `scl` command to run SMARTS together with it's supporting processes. .. code-block:: bash $ cd /SMARTS - # Build the scenario `scenarios/sumo/loop`. - $ scl scenario build scenarios/sumo/loop # Run SMARTS simulation with Envision display and `loop` scenario. - $ scl run --envision examples/control/chase_via_points.py scenarios/sumo/loop + $ scl run --envision examples/e3_multi_agent.py scenarios/sumo/loop Visit `http://localhost:8081/ `_ to view the experiment. diff --git a/docs/sim/agent.rst b/docs/sim/agent.rst index ecc5b86823..10b9b13818 100644 --- a/docs/sim/agent.rst +++ b/docs/sim/agent.rst @@ -59,7 +59,7 @@ Next, a minimal example of how to create and register an agent is illustrated. # Register the agent. register( - locator="follow-waypoints-v0", + "follow-waypoints-v0", entry_point=entry_point, ) diff --git a/envision/client.py b/envision/client.py index 0a250c7058..e3eb247fde 100644 --- a/envision/client.py +++ b/envision/client.py @@ -256,6 +256,7 @@ def on_error(ws, error): def on_open(ws): nonlocal connection_established connection_established = True + self._log.info(f"Envision connection established at `{ws.url}`.") while True: state = state_queue.get() @@ -285,7 +286,7 @@ def run_socket(endpoint, wait_between_retries): ws.run_forever() if not connection_established: - self._log.info(f"Attempt {tries} to connect to Envision.") + self._log.debug(f"Attempt {tries} to connect to Envision.") if not state_queue.empty(): break else: diff --git a/envision/tests/test_data_replay.py b/envision/tests/test_data_replay.py index 32231bf292..a98bbe5c2c 100644 --- a/envision/tests/test_data_replay.py +++ b/envision/tests/test_data_replay.py @@ -81,6 +81,7 @@ def __init__(self, endpoint, on_error, on_close, on_open): self._on_error = on_error self._on_close = on_close self._on_open = on_open + self.url = "localhost:0000" def send(self, data): sent.put(data) diff --git a/envision/web/src/client.js b/envision/web/src/client.js index a757bc453c..4084e13ee7 100644 --- a/envision/web/src/client.js +++ b/envision/web/src/client.js @@ -88,8 +88,8 @@ export default class Client { this._lastSeek = new Promise((resolve) => resolve( - this._sockets[simulationId].send(JSON.stringify({ seek: seconds })), - ), + this._sockets[simulationId].send(JSON.stringify({ seek: seconds })) + ) ) .then(wait(500)) .finally(() => { @@ -124,7 +124,7 @@ export default class Client { ? Infinity : value === "-Infinity" ? -Infinity - : value, + : value ); if ( stateQueue.length > 0 && @@ -150,7 +150,7 @@ export default class Client { // recent events in the simulation (when not in near-real-time // playing mode). stateQueue = stateQueue.filter( - (frame, ind) => ind % 2 == 0 && ind > 5, + (frame, ind) => ind % 2 == 0 && ind > 5 ); self._stateQueues[simulationId] = stateQueue; break; @@ -158,7 +158,7 @@ export default class Client { case frameBufferModes.PRIMACY_BIAS: { // newer frames have a higher probability of being evicted... let removeIndex = Math.floor( - stateQueue.length * Math.sqrt(Math.random()), + stateQueue.length * Math.sqrt(Math.random()) ); stateQueue.splice(removeIndex, 1); break; @@ -168,7 +168,7 @@ export default class Client { // randomly choose a frame to remove... // spread the degradation randomly throughout the history. let removeIndex = Math.floor( - stateQueue.length * Math.random(), + stateQueue.length * Math.random() ); stateQueue.splice(removeIndex, 1); } @@ -185,7 +185,7 @@ export default class Client { socket.onerror = (error) => { console.warn( `Socket encountered error=${error.message} ` + - `trying to connect to endpoint=${url}`, + `trying to connect to endpoint=${url}` ); reject(error); }; @@ -194,7 +194,7 @@ export default class Client { } catch (error) { if (remainingRetries === 0) throw error; console.info( - `Retrying connection, attempts remaining=${remainingRetries}`, + `Retrying connection, attempts remaining=${remainingRetries}` ); remainingRetries -= 1; @@ -202,7 +202,7 @@ export default class Client { return await this._obtainStream( simulationId, stateQueue, - remainingRetries, + remainingRetries ); } } @@ -239,7 +239,7 @@ export default class Client { this._sockets[simulationId] = await this._obtainStream( simulationId, this._stateQueues[simulationId], - this._maxRetries, + this._maxRetries ); } diff --git a/envision/web/src/components/app.js b/envision/web/src/components/app.js index 7804fb5d75..178ae3566a 100644 --- a/envision/web/src/components/app.js +++ b/envision/web/src/components/app.js @@ -100,7 +100,7 @@ function App({ client }) { { type: "canvas", mimeType: "video/webm;codecs=h264", - }, + } ); await recorderRef.current.startRecording(); } @@ -114,7 +114,7 @@ function App({ client }) { let outputBlob = await transcode(blob, onMessage); invokeSaveAsDialog( outputBlob, - `envision-${Math.round(Date.now() / 1000)}.mp4`, + `envision-${Math.round(Date.now() / 1000)}.mp4` ); } diff --git a/envision/web/src/components/bubbles.js b/envision/web/src/components/bubbles.js index bb19ac57d1..239a8265bc 100644 --- a/envision/web/src/components/bubbles.js +++ b/envision/web/src/components/bubbles.js @@ -52,7 +52,7 @@ export default function Bubbles({ scene, worldState }) { shape: points, depth: 5, }, - scene, + scene ); polygon.position.y = 4; let material = new StandardMaterial(`bubble-${idx}-material`, scene); diff --git a/envision/web/src/components/camera.js b/envision/web/src/components/camera.js index 5ae0cf37bb..92698b64d3 100644 --- a/envision/web/src/components/camera.js +++ b/envision/web/src/components/camera.js @@ -42,7 +42,7 @@ export default function Camera({ scene, roadNetworkBbox, egoView }) { let egoCamera = new UniversalCamera( "ego-camera", new Vector3(0, 5, -15), // Relative to camera root position - scene, + scene ); egoCamera.parent = egoCamRoot; egoCamRootRef.current = egoCamRoot; @@ -55,7 +55,7 @@ export default function Camera({ scene, roadNetworkBbox, egoView }) { 0, // beta 200, // radius new Vector3(0, 0, 0), // target - scene, + scene ); thirdPersonCamera.attachControl(canvas, true); thirdPersonCamera.panningSensibility = 50; @@ -79,7 +79,7 @@ export default function Camera({ scene, roadNetworkBbox, egoView }) { thirdPersonCamera.target.z = mapCenter[1]; thirdPersonCamera.radius = Math.max( Math.abs(roadNetworkBbox[0] - roadNetworkBbox[2]), - Math.abs(roadNetworkBbox[1] - roadNetworkBbox[3]), + Math.abs(roadNetworkBbox[1] - roadNetworkBbox[3]) ); }, [JSON.stringify(roadNetworkBbox)]); diff --git a/envision/web/src/components/control_panel.js b/envision/web/src/components/control_panel.js index 1f9387806e..b6f287f687 100644 --- a/envision/web/src/components/control_panel.js +++ b/envision/web/src/components/control_panel.js @@ -101,7 +101,7 @@ export default function ControlPanel({ showControls, toggleControlModes }) { if (checkedKeys.includes(info.node.key)) { // remove from list setCheckedKeys((prevKeys) => - prevKeys.filter((key) => key != info.node.key), + prevKeys.filter((key) => key != info.node.key) ); toggleControlModes({ [info.node.key]: false }); } else { diff --git a/envision/web/src/components/driven_paths.js b/envision/web/src/components/driven_paths.js index 911f9fed77..e74aff5c00 100644 --- a/envision/web/src/components/driven_paths.js +++ b/envision/web/src/components/driven_paths.js @@ -66,11 +66,11 @@ export default function DrivenPaths({ } else { let geomPos = new Vector2( drivenPath[i].position.x, - drivenPath[i].position.z, + drivenPath[i].position.z ); let newGeomPos = Vector2.Center( new Vector2(...worldState.traffic[vehicle_id].driven_path[0]), - new Vector2(...worldState.traffic[vehicle_id].driven_path[1]), + new Vector2(...worldState.traffic[vehicle_id].driven_path[1]) ); if (geomPos.equalsWithEpsilon(newGeomPos, 0.0001)) { newDrivenPathGeometries[vehicle_id] = []; @@ -88,11 +88,11 @@ export default function DrivenPaths({ if (egoDrivenPathModel.material == null) { egoDrivenPathModel.material = new StandardMaterial( "ego-driven-path-material", - scene, + scene ); egoDrivenPathModel.material.specularColor = new Color3(0, 0, 0); egoDrivenPathModel.material.diffuseColor = new Color4( - ...SceneColors.EgoDrivenPath, + ...SceneColors.EgoDrivenPath ); egoDrivenPathModel.material.alpha = SceneColors.EgoDrivenPath[3]; } @@ -100,7 +100,7 @@ export default function DrivenPaths({ if (socialDrivenPathModel.material == null) { socialDrivenPathModel.material = new StandardMaterial( "social-driven-path-material", - scene, + scene ); socialDrivenPathModel.material.specularColor = new Color3(0, 0, 0); let color = vehicleMeshColor(ActorTypes.SOCIAL_AGENT); @@ -111,7 +111,7 @@ export default function DrivenPaths({ // Add in new driven path segments let drivenPathOffsetY = 0.1; for (const [vehicle_id, trafficActor] of Object.entries( - worldState.traffic, + worldState.traffic )) { if (!(vehicle_id in newDrivenPathGeometries)) { newDrivenPathGeometries[vehicle_id] = []; @@ -125,23 +125,23 @@ export default function DrivenPaths({ let drivenPathSegment_ = null; if (trafficActor.actor_type == ActorTypes.SOCIAL_AGENT) { drivenPathSegment_ = socialDrivenPathModel.createInstance( - "social-driven-path-segment", + "social-driven-path-segment" ); } else { drivenPathSegment_ = egoDrivenPathModel.createInstance( - "ego-driven-path-segment", + "ego-driven-path-segment" ); } let p0 = new Vector3( trafficActor.driven_path[i][0], drivenPathOffsetY, - trafficActor.driven_path[i][1], + trafficActor.driven_path[i][1] ); let p1 = new Vector3( trafficActor.driven_path[i + 1][0], drivenPathOffsetY, - trafficActor.driven_path[i + 1][1], + trafficActor.driven_path[i + 1][1] ); drivenPathSegment_.position = Vector3.Center(p0, p1); @@ -154,7 +154,7 @@ export default function DrivenPaths({ drivenPathSegment_.rotation = Vector3.RotationFromAxis( axis1, axis2, - axis3, + axis3 ); newDrivenPathGeometries[vehicle_id].push(drivenPathSegment_); diff --git a/envision/web/src/components/mission_routes.js b/envision/web/src/components/mission_routes.js index d0ef2b4338..b2ca5672ae 100644 --- a/envision/web/src/components/mission_routes.js +++ b/envision/web/src/components/mission_routes.js @@ -64,12 +64,12 @@ export default function MissionRoutes({ scene, worldState }) { let polygon = MeshBuilder.CreatePolygon( `mission-route-shape-${vehicle_id}-${shape_id}`, { shape: points }, - scene, + scene ); polygon.position.y = 0.1; polygon.material = new StandardMaterial( `mission-route-shape-${vehicle_id}-${shape_id}-material`, - scene, + scene ); polygon.material.diffuseColor = new Color4(...SceneColors.MissionRoute); polygon.material.alpha = SceneColors.MissionRoute[3]; diff --git a/envision/web/src/components/simulation.js b/envision/web/src/components/simulation.js index e56f705152..e90a89becf 100644 --- a/envision/web/src/components/simulation.js +++ b/envision/web/src/components/simulation.js @@ -129,28 +129,28 @@ export default function Simulation({ let cylinder_ = MeshBuilder.CreateCylinder( "waypoint", { diameterTop: 0.5, diameterBottom: 0.5, height: 0.01 }, - scene_, + scene_ ); cylinder_.isVisible = false; setEgoWaypointModel(cylinder_.clone("ego-waypoint").makeGeometryUnique()); setSocialWaypointModel( - cylinder_.clone("social-waypoint").makeGeometryUnique(), + cylinder_.clone("social-waypoint").makeGeometryUnique() ); // Driven path cuboid let cuboid_ = MeshBuilder.CreateBox( "drivenPath", { height: 0.3, width: 1, depth: 0.01 }, - scene_, + scene_ ); cuboid_.isVisible = false; setEgoDrivenPathModel( - cuboid_.clone("ego-driven-path").makeGeometryUnique(), + cuboid_.clone("ego-driven-path").makeGeometryUnique() ); setSocialDrivenPathModel( - cuboid_.clone("social-driven-path").makeGeometryUnique(), + cuboid_.clone("social-driven-path").makeGeometryUnique() ); // Light @@ -226,7 +226,7 @@ export default function Simulation({ `load_gltf_extras_${worldState.scenario_id}`, function (loader) { return new LoadGLTFExtras(loader, worldState.scenario_id); - }, + } ); SceneLoader.ImportMesh("", mapRootUrl, mapFilename, scene, (meshes) => { @@ -250,7 +250,7 @@ export default function Simulation({ child.actionManager = new ActionManager(scene); child.actionManager.registerAction( new ExecuteCodeAction(ActionManager.OnPointerOverTrigger, function ( - evt, + evt ) { material.diffuseColor = roadColorSelected; setMapElementSelected(true); @@ -259,23 +259,23 @@ export default function Simulation({ lane_id: child.metadata.gltf.extras.lane_id, lane_index: child.metadata.gltf.extras.lane_index, }); - }), + }) ); child.actionManager.registerAction( new ExecuteCodeAction(ActionManager.OnPointerOutTrigger, function ( - evt, + evt ) { material.diffuseColor = roadColor; setMapElementSelected(false); setDebugInfo({}); - }), + }) ); } mapMeshesRef.current = meshes; GLTFLoader.UnregisterExtension( - `load_gltf_extras_${worldState.scenario_id}`, + `load_gltf_extras_${worldState.scenario_id}` ); }); }, [scene, worldState.scenario_id]); @@ -370,7 +370,7 @@ export default function Simulation({ attrName="Position" data_formattter={(position) => `x: ${parseFloat(position[0]).toFixed(2)} y: ${parseFloat( - position[1], + position[1] ).toFixed(2)}` } ego_agent_ids={worldState.ego_agent_ids} diff --git a/envision/web/src/components/traffic_dividers.js b/envision/web/src/components/traffic_dividers.js index 1d5ca503b5..564365c82a 100644 --- a/envision/web/src/components/traffic_dividers.js +++ b/envision/web/src/components/traffic_dividers.js @@ -50,7 +50,7 @@ export default function TrafficDividers({ let dashLine = MeshBuilder.CreateDashedLines( `lane-divider-${idx}`, { points: points, updatable: false, dashSize: 1, gapSize: 2 }, - scene, + scene ); dashLine.color = new Color4(...SceneColors.LaneDivider); return dashLine; @@ -77,7 +77,7 @@ export default function TrafficDividers({ let newEdgeDividers = MeshBuilder.CreateLineSystem( "edge-dividers", { lines: edgeDividerPoints, updatable: false }, - scene, + scene ); newEdgeDividers.color = new Color4(...SceneColors.EdgeDivider); diff --git a/envision/web/src/components/traffic_signals.js b/envision/web/src/components/traffic_signals.js index 2598fa50db..b7190bdd64 100644 --- a/envision/web/src/components/traffic_signals.js +++ b/envision/web/src/components/traffic_signals.js @@ -57,7 +57,7 @@ export default function TrafficSignals({ scene, worldState }) { let mesh = MeshBuilder.CreateDisc( `signal-${signalName}`, { radius: 0.8 }, - scene, + scene ); mesh.position = point; let axis = new Vector3(1, 0, 0); @@ -66,14 +66,14 @@ export default function TrafficSignals({ scene, worldState }) { let color = signalColorMap[state]; let material = new StandardMaterial( `signal-${signalName}-material`, - scene, + scene ); material.diffuseColor = new Color4(...color); material.specularColor = new Color3(0, 0, 0); mesh.material = material; mesh.isVisible = true; return mesh; - }, + } ); signalGeometryRef.current = newSignalGeometry; diff --git a/envision/web/src/components/vehicles.js b/envision/web/src/components/vehicles.js index 354d49cec3..7f7b7f3f94 100644 --- a/envision/web/src/components/vehicles.js +++ b/envision/web/src/components/vehicles.js @@ -84,7 +84,7 @@ export default function Vehicles({ // Load mesh asynchronously useEffect(() => { for (const [vehicleFilename, meshTemplate] of Object.entries( - vehicleMeshTemplates, + vehicleMeshTemplates )) { if (meshTemplate != null) { continue; @@ -107,7 +107,7 @@ export default function Vehicles({ let material = new StandardMaterial( `material-${vehicleFilename}`, - scene, + scene ); material.backFaceCulling = false; material.disableLighting = true; @@ -121,7 +121,7 @@ export default function Vehicles({ material.id = child.material.id; child.instancedBuffers.color = Color4.FromColor3( child.material.albedoColor, - 1, + 1 ); } child.material = material; @@ -151,7 +151,7 @@ export default function Vehicles({ newRoot.setBoundingInfo(new BoundingInfo(rootMeshMin, rootMeshMax)); vehicleMeshTemplates[vehicleFilename] = newRoot; - }, + } ); } // This useEffect is triggered when the vehicleMeshTemplate's keys() change @@ -214,7 +214,7 @@ export default function Vehicles({ if (state.interest && instancedSubMesh.material.id == "body") { instancedSubMesh.instancedBuffers.color = new Color4( ...SceneColors.Interest, - 1, + 1 ); } else if ( state.actor_type == ActorTypes.SOCIAL_VEHICLE || @@ -235,12 +235,12 @@ export default function Vehicles({ width: boxSize.x + 0.1, depth: boxSize.z + 0.1, }, - scene, + scene ); let boxMaterial = new StandardMaterial( `boundingbox-${meshId}-material`, - scene, + scene ); boxMaterial.diffuseColor = new Color4(...SceneColors.Selection); boxMaterial.specularColor = new Color3(0, 0, 0); @@ -251,21 +251,21 @@ export default function Vehicles({ box.actionManager = new ActionManager(scene); box.actionManager.registerAction( new ExecuteCodeAction(ActionManager.OnPointerOverTrigger, function ( - evt, + evt ) { boxMaterial.alpha = 0.5; setVehicleSelected(true); setDebugInfo(evt.meshUnderPointer.parent.metadata.debugInfo); - }), + }) ); box.actionManager.registerAction( new ExecuteCodeAction(ActionManager.OnPointerOutTrigger, function ( - evt, + evt ) { boxMaterial.alpha = 0.0; setVehicleSelected(false); setDebugInfo({}); - }), + }) ); rootMesh.addChild(box); diff --git a/envision/web/src/components/waypoints.js b/envision/web/src/components/waypoints.js index 8aa5888dd1..bd987c088a 100644 --- a/envision/web/src/components/waypoints.js +++ b/envision/web/src/components/waypoints.js @@ -51,11 +51,11 @@ export default function Waypoints({ if (egoWaypointModel.material == null) { egoWaypointModel.material = new StandardMaterial( "ego-waypoint-material", - scene, + scene ); egoWaypointModel.material.specularColor = new Color3(0, 0, 0); egoWaypointModel.material.diffuseColor = new Color4( - ...SceneColors.EgoWaypoint, + ...SceneColors.EgoWaypoint ); egoWaypointModel.material.alpha = SceneColors.EgoWaypoint[3]; } @@ -63,7 +63,7 @@ export default function Waypoints({ if (socialWaypointModel.material == null) { socialWaypointModel.material = new StandardMaterial( "social-waypoint-material", - scene, + scene ); socialWaypointModel.material.specularColor = new Color3(0, 0, 0); let color = vehicleMeshColor(ActorTypes.SOCIAL_AGENT); diff --git a/envision/web/src/helpers/state_unpacker.js b/envision/web/src/helpers/state_unpacker.js index a406c172f2..8241f83ca8 100644 --- a/envision/web/src/helpers/state_unpacker.js +++ b/envision/web/src/helpers/state_unpacker.js @@ -116,7 +116,7 @@ function unpack_waypoints(lanes) { speed_limit: wp[Waypoint.SPEED_LIMIT], lane_index: wp[Waypoint.LANE_INDEX], }; - }), + }) ); } @@ -143,13 +143,13 @@ function unpack_traffic(traffic) { driven_path: unpack_driven_path(t[Traffic.DRIVEN_PATH]), point_cloud: unpack_point_cloud(t[Traffic.POINT_CLOUD]), mission_route_geometry: unpack_route_geometry( - t[Traffic.MISSION_ROUTE_GEOMETRY], + t[Traffic.MISSION_ROUTE_GEOMETRY] ), actor_type: AGENT_TYPE_MAP[t[Traffic.ACTOR_TYPE]], vehicle_type: VEHICLE_TYPE_MAP[t[Traffic.VEHICLE_TYPE]], interest: t[Traffic.INTEREST], }, - })), + })) ); return mapped_traffic; } @@ -162,10 +162,10 @@ function unpack_signals(signals) { state: t[TrafficSignal.STATE], position: t.slice( TrafficSignal.POSITION_BEGIN, - TrafficSignal.POSITION_END, + TrafficSignal.POSITION_END ), }, - })), + })) ); return mapped_signals; } @@ -174,7 +174,7 @@ function get_attribute_map(unpacked_traffic, attr) { return Object.fromEntries( Object.entries(unpacked_traffic) .filter(([_, t]) => t.actor_type === AGENT_TYPE_MAP[2]) - .map(([n, t]) => [n, t[attr]]), + .map(([n, t]) => [n, t[attr]]) ); } @@ -182,7 +182,7 @@ export default function unpack_worldstate(formatted_state) { let unpacked_bubbles = unpack_bubbles(formatted_state[WorldState.BUBBLES]); let unpacked_traffic = unpack_traffic(formatted_state[WorldState.TRAFFIC]); let unpacked_signals = unpack_signals( - formatted_state[WorldState.TRAFFIC_SIGNALS], + formatted_state[WorldState.TRAFFIC_SIGNALS] ); const worldstate = { traffic: unpacked_traffic, diff --git a/envision/web/src/helpers/transcode.js b/envision/web/src/helpers/transcode.js index fb168db940..87b3f486a5 100644 --- a/envision/web/src/helpers/transcode.js +++ b/envision/web/src/helpers/transcode.js @@ -40,7 +40,7 @@ export default async function transcode(blob, onMessage = (message) => {}) { // is switch the container. "-c:v", "copy", - "output.mp4", + "output.mp4" ); onMessage("Transcoding complete"); diff --git a/envision/web/src/index.html b/envision/web/src/index.html index 7d7a382db5..2434dbaa56 100644 --- a/envision/web/src/index.html +++ b/envision/web/src/index.html @@ -1,4 +1,4 @@ - + diff --git a/envision/web/src/index.js b/envision/web/src/index.js index f032b10f39..2dc71be64c 100644 --- a/envision/web/src/index.js +++ b/envision/web/src/index.js @@ -45,5 +45,5 @@ ReactDOM.render( , - document.getElementById("root"), + document.getElementById("root") ); diff --git a/envision/web/src/render_helpers.js b/envision/web/src/render_helpers.js index a29adc182f..3d101a4165 100644 --- a/envision/web/src/render_helpers.js +++ b/envision/web/src/render_helpers.js @@ -81,7 +81,7 @@ export function buildLabel(name, text, scene) { name, { width: width, height: height }, scene, - false, + false ); plane.billboardMode = Mesh.BILLBOARDMODE_ALL; @@ -89,7 +89,7 @@ export function buildLabel(name, text, scene) { `${name}-texture`, { width: width * 100, height: height * 100 }, scene, - true, + true ); texture.hasAlpha = true; texture.drawText(text, null, null, "bold 100px arial", "white"); diff --git a/examples/configs/e7_experiment_base/agent_configs/chase_via_points-v0.yaml b/examples/configs/e7_experiment_base/agent_configs/chase_via_points-v0.yaml new file mode 100644 index 0000000000..cbedbe3da5 --- /dev/null +++ b/examples/configs/e7_experiment_base/agent_configs/chase_via_points-v0.yaml @@ -0,0 +1,2 @@ +locator: __main__:chase_via_points-v0 +max_episode_steps: 1000 \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/agent_configs/open_agent-v0.yaml b/examples/configs/e7_experiment_base/agent_configs/open_agent-v0.yaml new file mode 100644 index 0000000000..5f59d959c4 --- /dev/null +++ b/examples/configs/e7_experiment_base/agent_configs/open_agent-v0.yaml @@ -0,0 +1,3 @@ +locator: zoo.policies:open_agent-v0 +debug: false +aggressiveness: 3 \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/agent_configs/random_lane_control-v0.yaml b/examples/configs/e7_experiment_base/agent_configs/random_lane_control-v0.yaml new file mode 100644 index 0000000000..449453977d --- /dev/null +++ b/examples/configs/e7_experiment_base/agent_configs/random_lane_control-v0.yaml @@ -0,0 +1,2 @@ +locator: __main__:random_lane_control-v0 +max_episode_steps: 1000 \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/agent_configs/standard_lane_follower-v0.yaml b/examples/configs/e7_experiment_base/agent_configs/standard_lane_follower-v0.yaml new file mode 100644 index 0000000000..08f0c08819 --- /dev/null +++ b/examples/configs/e7_experiment_base/agent_configs/standard_lane_follower-v0.yaml @@ -0,0 +1,2 @@ +locator: __main__:standard_lane_follower-v0 +max_episode_steps: 1000 \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/agent_configs/trajectory_tracking-v0.yaml b/examples/configs/e7_experiment_base/agent_configs/trajectory_tracking-v0.yaml new file mode 100644 index 0000000000..96cae84c0d --- /dev/null +++ b/examples/configs/e7_experiment_base/agent_configs/trajectory_tracking-v0.yaml @@ -0,0 +1,2 @@ +locator: __main__:trajectory_tracking-v0 +max_episode_steps: 1000 \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/env_config/hiway_env-v1.yaml b/examples/configs/e7_experiment_base/env_config/hiway_env-v1.yaml new file mode 100644 index 0000000000..be8f86ef60 --- /dev/null +++ b/examples/configs/e7_experiment_base/env_config/hiway_env-v1.yaml @@ -0,0 +1,3 @@ +# Just an alias for hiway_env-v1_default.yaml +defaults: + - hiway_env-v1_default # env_config/hiway_env-v1_default \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/env_config/hiway_env-v1_boid.yaml b/examples/configs/e7_experiment_base/env_config/hiway_env-v1_boid.yaml new file mode 100644 index 0000000000..b3070beb8f --- /dev/null +++ b/examples/configs/e7_experiment_base/env_config/hiway_env-v1_boid.yaml @@ -0,0 +1,6 @@ +defaults: + - hiway_env-v1_default # env_config/hiway_env-v1_default + +observation_options: full +action_options: full +environment_return_mode: environment \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/env_config/hiway_env-v1_default.yaml b/examples/configs/e7_experiment_base/env_config/hiway_env-v1_default.yaml new file mode 100644 index 0000000000..3d574dd9fc --- /dev/null +++ b/examples/configs/e7_experiment_base/env_config/hiway_env-v1_default.yaml @@ -0,0 +1,19 @@ +defaults: + - scenarios/figure_eight + +id: smarts.env:hiway-v1 +agent_interfaces: {} +sim_name: null +scenarios_order: scrambled +headless: true +visdom: false +fixed_timestep_sec: 0.1 +sumo_options: + num_external_clients: 0 + auto_start: true + headless: true + port: null +seed: 42 +observation_options: default +action_options: default +environment_return_mode: default \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/env_config/hiway_env-v1_full.yaml b/examples/configs/e7_experiment_base/env_config/hiway_env-v1_full.yaml new file mode 100644 index 0000000000..802d07eefc --- /dev/null +++ b/examples/configs/e7_experiment_base/env_config/hiway_env-v1_full.yaml @@ -0,0 +1,6 @@ +defaults: + - hiway_env-v1_default # env_config/hiway_env-v1_default + +observation_options: full +action_options: full +environment_return_mode: default \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/env_config/hiway_env-v1_multi-agent.yaml b/examples/configs/e7_experiment_base/env_config/hiway_env-v1_multi-agent.yaml new file mode 100644 index 0000000000..802d07eefc --- /dev/null +++ b/examples/configs/e7_experiment_base/env_config/hiway_env-v1_multi-agent.yaml @@ -0,0 +1,6 @@ +defaults: + - hiway_env-v1_default # env_config/hiway_env-v1_default + +observation_options: full +action_options: full +environment_return_mode: default \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/env_config/hiway_env-v1_unformatted.yaml b/examples/configs/e7_experiment_base/env_config/hiway_env-v1_unformatted.yaml new file mode 100644 index 0000000000..dcdac84b4d --- /dev/null +++ b/examples/configs/e7_experiment_base/env_config/hiway_env-v1_unformatted.yaml @@ -0,0 +1,6 @@ +defaults: + - hiway_env-v1_default # env_config/hiway_env-v1_default + +observation_options: unformatted +action_options: unformatted +environment_return_mode: default \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/env_config/scenarios/figure_eight.yaml b/examples/configs/e7_experiment_base/env_config/scenarios/figure_eight.yaml new file mode 100644 index 0000000000..c5af1aec72 --- /dev/null +++ b/examples/configs/e7_experiment_base/env_config/scenarios/figure_eight.yaml @@ -0,0 +1 @@ +- ./scenarios/sumo/figure_eight \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/env_config/scenarios/intersections.yaml b/examples/configs/e7_experiment_base/env_config/scenarios/intersections.yaml new file mode 100644 index 0000000000..7cda5cc268 --- /dev/null +++ b/examples/configs/e7_experiment_base/env_config/scenarios/intersections.yaml @@ -0,0 +1,7 @@ +# - ./scenarios/sumo/intersections/1_to_1lane_left_turn_c_agents_1 +# - ./scenarios/sumo/intersections/1_to_2lane_left_turn_c_agents_1 +- ./scenarios/sumo/intersections/2lane +- ./scenarios/sumo/intersections/2lane_circle +- ./scenarios/sumo/intersections/4lane +- ./scenarios/sumo/intersections/4lane_t +- ./scenarios/sumo/intersections/6lane \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/env_config/scenarios/loop.yaml b/examples/configs/e7_experiment_base/env_config/scenarios/loop.yaml new file mode 100644 index 0000000000..3abadcaffa --- /dev/null +++ b/examples/configs/e7_experiment_base/env_config/scenarios/loop.yaml @@ -0,0 +1 @@ +- ./scenarios/sumo/loop \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/experiment/README.md b/examples/configs/e7_experiment_base/experiment/README.md new file mode 100644 index 0000000000..223640b22d --- /dev/null +++ b/examples/configs/e7_experiment_base/experiment/README.md @@ -0,0 +1,34 @@ +Please note https://hydra.cc/docs/patterns/configuring_experiments/ for how to generate experiment files. + +## 1 + +In the most simple application the file must start with a global declaration to be relative to the global package + +See [Configuration package overrides](https://hydra.cc/docs/advanced/overriding_packages/#defaults-list-package-keywords) +```yaml +# @package _global_ +``` + +## 2 +The defaults should select from configuration (or manually specify configuration) + +See [Configuration package overrides](https://hydra.cc/docs/advanced/overriding_packages/) +```yaml +# experiment/laner.yaml +defaults: + - /experiment_default # unnecessary but useful to visualize + - /agent_configs@agent_configs.agent_black: keep_lane_control-v0 # agent_configs/keep_lane_control-v0.yaml + - override /env_config: hiway_env-v1_unformatted + - _self_ # this is also unnecessary because it is implied +``` + +Note that because the configuration is in the global package an absolute path must be used must be absolute relative to the working directory that was configured from `@hydra.main(config_path=)`. + +## 3 +Then this experiment can be called like: + +```bash +python examples/e7_experiment_base.py +experiment=trajectory_tracking +env_config/scenarios=intersections +``` + +See [CLI grammar](https://hydra.cc/docs/advanced/override_grammar/basic/) \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/experiment/chase_via_points.yaml b/examples/configs/e7_experiment_base/experiment/chase_via_points.yaml new file mode 100644 index 0000000000..25dcb6babf --- /dev/null +++ b/examples/configs/e7_experiment_base/experiment/chase_via_points.yaml @@ -0,0 +1,4 @@ +# @package _global_ +defaults: + - /agent_configs@agent_configs.agent_smith: chase_via_points-v0 # agent_configs/chase_via_points-v0.yaml + - override /env_config: hiway_env-v1_unformatted \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/experiment/egoless.yaml b/examples/configs/e7_experiment_base/experiment/egoless.yaml new file mode 100644 index 0000000000..0ddd11ba33 --- /dev/null +++ b/examples/configs/e7_experiment_base/experiment/egoless.yaml @@ -0,0 +1,6 @@ +# @package _global_ +defaults: + - /env_config/scenarios: intersections + +minimum_steps: 1000 +episodes: 10 \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/experiment/mixed_agents.yaml b/examples/configs/e7_experiment_base/experiment/mixed_agents.yaml new file mode 100644 index 0000000000..fa66313e3d --- /dev/null +++ b/examples/configs/e7_experiment_base/experiment/mixed_agents.yaml @@ -0,0 +1,9 @@ +# @package _global_ +defaults: + - /agent_configs@agent_configs.agent_smith: chase_via_points-v0 # agent_configs/chase_via_points-v0.yaml + - /agent_configs@agent_configs.agent_black: random_lane_control-v0 # agent_configs/keep_lane_control-v0.yaml + - override /env_config: hiway_env-v1_unformatted + +env_config: + scenarios: + - ./scenarios/sumo/figure_eight \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/experiment/open_agent.yaml b/examples/configs/e7_experiment_base/experiment/open_agent.yaml new file mode 100644 index 0000000000..e9a7739a71 --- /dev/null +++ b/examples/configs/e7_experiment_base/experiment/open_agent.yaml @@ -0,0 +1,9 @@ +# @package _global_ +defaults: + - /agent_configs@agent_configs.agent_007: open_agent-v0 # agent_configs/open_agent.yaml applied to agent_configs.agent_007 + - override /env_config: hiway_env-v1_unformatted + +env_config: + scenarios: + - ./scenarios/sumo/loop + - ./scenarios/sumo/figure_eight \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/experiment/standard_lane_follower.yaml b/examples/configs/e7_experiment_base/experiment/standard_lane_follower.yaml new file mode 100644 index 0000000000..f69d3e1e51 --- /dev/null +++ b/examples/configs/e7_experiment_base/experiment/standard_lane_follower.yaml @@ -0,0 +1,9 @@ +# @package _global_ +defaults: + - /agent_configs@agent_configs.agent_j: standard_lane_follower-v0 + - /agent_configs@agent_configs.agent_k: standard_lane_follower-v0 + - override /env_config: hiway_env-v1 + +env_config: + scenarios: + - ./scenarios/sumo/merge/3lane_agents_2 \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/experiment/trajectory_tracking.yaml b/examples/configs/e7_experiment_base/experiment/trajectory_tracking.yaml new file mode 100644 index 0000000000..c0aadf37a0 --- /dev/null +++ b/examples/configs/e7_experiment_base/experiment/trajectory_tracking.yaml @@ -0,0 +1,4 @@ +# @package _global_ +defaults: + - /agent_configs@agent_configs.tracker_agent: trajectory_tracking-v0 + - override /env_config: hiway_env-v1_unformatted \ No newline at end of file diff --git a/examples/configs/e7_experiment_base/experiment_default.yaml b/examples/configs/e7_experiment_base/experiment_default.yaml new file mode 100644 index 0000000000..221c3fb633 --- /dev/null +++ b/examples/configs/e7_experiment_base/experiment_default.yaml @@ -0,0 +1,10 @@ +defaults: + - base_experiment # see registration of this in examples/control.py + - env_config: hiway_env-v1 + # - agent_configs@agent_configs.agent_007: lane_control-v0 # agent_configs/lane_control-v0.yaml as agent_configs.agent_007 + # - agent_configs@agent_configs.agent_smith: chase_via_points-v0 # agent_configs/chase_via_points-v0.yaml as agent_configs.agent_smith + - _self_ + +episodes: 4 +show_config: true +minimum_steps: 1 \ No newline at end of file diff --git a/examples/control/chase_via_points.py b/examples/control/chase_via_points.py deleted file mode 100644 index fcb4b40a11..0000000000 --- a/examples/control/chase_via_points.py +++ /dev/null @@ -1,88 +0,0 @@ -import sys -from pathlib import Path - -import gymnasium as gym - -sys.path.insert(0, str(Path(__file__).parents[2].absolute())) -from examples.tools.argument_parser import default_argument_parser -from smarts.core.agent import Agent -from smarts.core.agent_interface import AgentInterface, AgentType -from smarts.core.observations import Observation -from smarts.core.utils.episodes import episodes -from smarts.env.utils.observation_conversion import ObservationOptions -from smarts.sstudio.scenario_construction import build_scenarios - -N_AGENTS = 3 -AGENT_IDS = ["Agent_%i" % i for i in range(N_AGENTS)] - - -class ChaseViaPointsAgent(Agent): - def act(self, obs: Observation): - if ( - len(obs.via_data.near_via_points) < 1 - or obs.ego_vehicle_state.road_id != obs.via_data.near_via_points[0].road_id - ): - return (obs.waypoint_paths[0][0].speed_limit, 0) - - nearest = obs.via_data.near_via_points[0] - if nearest.lane_index == obs.ego_vehicle_state.lane_index: - return (nearest.required_speed, 0) - - return ( - nearest.required_speed, - 1 if nearest.lane_index > obs.ego_vehicle_state.lane_index else -1, - ) - - -def main(scenarios, headless, num_episodes, max_episode_steps=None): - agent_interfaces = { - agent_id: AgentInterface.from_type( - AgentType.LanerWithSpeed, - max_episode_steps=max_episode_steps, - ) - for agent_id in AGENT_IDS - } - - env = gym.make( - "smarts.env:hiway-v1", - scenarios=scenarios, - agent_interfaces=agent_interfaces, - headless=headless, - observation_options=ObservationOptions.unformatted, - ) - - for episode in episodes(n=num_episodes): - agents = { - agent_id: ChaseViaPointsAgent() for agent_id in agent_interfaces.keys() - } - observations, _ = env.reset() - episode.record_scenario(env.scenario_log) - - terminateds = {"__all__": False} - while not terminateds["__all__"]: - actions = { - agent_id: agents[agent_id].act(agent_obs) - for agent_id, agent_obs in observations.items() - } - observations, rewards, terminateds, truncateds, infos = env.step(actions) - episode.record_step(observations, rewards, terminateds, truncateds, infos) - - env.close() - - -if __name__ == "__main__": - parser = default_argument_parser("chase-via-points") - args = parser.parse_args() - - if not args.scenarios: - args.scenarios = [ - str(Path(__file__).absolute().parents[2] / "scenarios" / "sumo" / "loop") - ] - - build_scenarios(scenarios=args.scenarios) - - main( - scenarios=args.scenarios, - headless=args.headless, - num_episodes=args.episodes, - ) diff --git a/examples/control/ego_open_agent.py b/examples/control/ego_open_agent.py deleted file mode 100644 index 4d612aa0e8..0000000000 --- a/examples/control/ego_open_agent.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -This examples runs the OpEn Agent, a classical, MPC based agent using [OpEn](https://alphaville.github.io/optimization-engine/). -For further reading, see zoo policy [open-agent](../../smarts/zoo/policies/open-agent/). -""" - -import importlib -import sys -from pathlib import Path - -import gymnasium as gym - -from smarts.core.utils.episodes import episodes -from smarts.env.utils.observation_conversion import ObservationOptions -from smarts.sstudio.scenario_construction import build_scenarios - -sys.path.insert(0, str(Path(__file__).parents[2].absolute())) -from examples.tools.argument_parser import default_argument_parser - -try: - open_agent = importlib.import_module("open_agent") -except ModuleNotFoundError as e: - raise ModuleNotFoundError( - f"Ensure that the open-agent has been installed with `pip install open-agent" - ) - - -AGENT_ID = "Agent-007" - - -def main(scenarios, headless, num_episodes): - open_agent_spec = open_agent.entrypoint(debug=False, aggressiveness=3) - env = gym.make( - "smarts.env:hiway-v1", - scenarios=scenarios, - agent_interfaces={AGENT_ID: open_agent_spec.interface}, - headless=headless, - sumo_headless=True, - observation_options=ObservationOptions.unformatted, - ) - - for episode in episodes(n=num_episodes): - agent = open_agent_spec.build_agent() - - observations, _ = env.reset() - episode.record_scenario(env.scenario_log) - - terminateds = {"__all__": False} - while not terminateds["__all__"]: - agent_obs = observations[AGENT_ID] - agent_action = agent.act(agent_obs) - observations, rewards, terminateds, truncateds, infos = env.step( - {AGENT_ID: agent_action} - ) - episode.record_step(observations, rewards, terminateds, truncateds, infos) - - del agent - - env.close() - - -if __name__ == "__main__": - parser = default_argument_parser("OpEn-trajectory-optimizer") - args = parser.parse_args() - - if not args.scenarios: - args.scenarios = [ - str(Path(__file__).absolute().parents[2] / "scenarios" / "sumo" / "loop") - ] - - build_scenarios(scenarios=args.scenarios) - - main( - scenarios=args.scenarios, - headless=args.headless, - num_episodes=args.episodes, - ) diff --git a/examples/control/hiway_env_v1_lane_follower.py b/examples/control/hiway_env_v1_lane_follower.py deleted file mode 100644 index f1d6ed662d..0000000000 --- a/examples/control/hiway_env_v1_lane_follower.py +++ /dev/null @@ -1,61 +0,0 @@ -import sys -from pathlib import Path -from typing import Any, Dict, Union - -sys.path.insert(0, str(Path(__file__).parents[2].absolute())) -from examples.tools.argument_parser import default_argument_parser -from smarts.core.agent import Agent -from smarts.core.agent_interface import AgentInterface, AgentType -from smarts.core.utils.episodes import episodes -from smarts.env.gymnasium.hiway_env_v1 import HiWayEnvV1 -from smarts.sstudio.scenario_construction import build_scenarios - - -class LaneFollowerAgent(Agent): - def act(self, obs): - return (obs["waypoint_paths"]["speed_limit"][0][0], 0) - - -def main(scenarios, headless, num_episodes, max_episode_steps=None): - agent_interface = AgentInterface.from_type( - AgentType.LanerWithSpeed, max_episode_steps=max_episode_steps - ) - - env = HiWayEnvV1( - scenarios=scenarios, - agent_interfaces={"SingleAgent": agent_interface}, - headless=headless, - ) - - for episode in episodes(n=num_episodes): - agent = LaneFollowerAgent() - observation, info = env.reset() - episode.record_scenario(env.scenario_log) - - terminated = {"__all__": False} - while not terminated["__all__"]: - agent_action = agent.act(observation["SingleAgent"]) - observation, reward, terminated, truncated, info = env.step( - {"SingleAgent": agent_action} - ) - episode.record_step(observation, reward, terminated, truncated, info) - - env.close() - - -if __name__ == "__main__": - parser = default_argument_parser("single-agent-example") - args = parser.parse_args() - - if not args.scenarios: - args.scenarios = [ - str(Path(__file__).absolute().parents[2] / "scenarios" / "sumo" / "loop") - ] - - build_scenarios(scenarios=args.scenarios) - - main( - scenarios=args.scenarios, - headless=args.headless, - num_episodes=args.episodes, - ) diff --git a/examples/control/laner.py b/examples/control/laner.py deleted file mode 100644 index c1584dfbc2..0000000000 --- a/examples/control/laner.py +++ /dev/null @@ -1,89 +0,0 @@ -import random -import sys -from pathlib import Path - -import gymnasium as gym - -sys.path.insert(0, str(Path(__file__).parents[2].absolute())) -from examples.tools.argument_parser import default_argument_parser -from smarts.core.agent import Agent -from smarts.core.agent_interface import AgentInterface, AgentType -from smarts.core.utils.episodes import episodes -from smarts.env.utils.action_conversion import ActionOptions -from smarts.sstudio.scenario_construction import build_scenarios -from smarts.zoo.agent_spec import AgentSpec - -N_AGENTS = 4 -AGENT_IDS = ["Agent %i" % i for i in range(N_AGENTS)] - - -class KeepLaneAgent(Agent): - def act(self, obs): - val = ["keep_lane", "slow_down", "change_lane_left", "change_lane_right"] - return random.choice(val) - - -def main(scenarios, headless, num_episodes, max_episode_steps=None): - agent_specs = { - agent_id: AgentSpec( - interface=AgentInterface.from_type( - AgentType.Laner, max_episode_steps=max_episode_steps - ), - agent_builder=KeepLaneAgent, - ) - for agent_id in AGENT_IDS - } - - env = gym.make( - "smarts.env:hiway-v1", - scenarios=scenarios, - agent_interfaces={ - a_id: a_intrf.interface for a_id, a_intrf in agent_specs.items() - }, - headless=headless, - action_options=ActionOptions.unformatted, - ) - - for episode in episodes(n=num_episodes): - agents = { - agent_id: agent_spec.build_agent() - for agent_id, agent_spec in agent_specs.items() - } - observations, _ = env.reset() - episode.record_scenario(env.scenario_log) - - terminateds = {"__all__": False} - while not terminateds["__all__"]: - actions = { - agent_id: agents[agent_id].act(agent_obs) - for agent_id, agent_obs in observations.items() - } - observations, rewards, terminateds, truncateds, infos = env.step(actions) - episode.record_step(observations, rewards, terminateds, truncateds, infos) - - env.close() - - -if __name__ == "__main__": - parser = default_argument_parser("laner") - args = parser.parse_args() - - if not args.scenarios: - args.scenarios = [ - str(Path(__file__).absolute().parents[2] / "scenarios" / "sumo" / "loop"), - str( - Path(__file__).absolute().parents[2] - / "scenarios" - / "sumo" - / "figure_eight" - ), - ] - - build_scenarios(scenarios=args.scenarios) - - main( - scenarios=args.scenarios, - headless=args.headless, - num_episodes=args.episodes, - max_episode_steps=args.max_episode_steps, - ) diff --git a/examples/control/trajectory_tracking.py b/examples/control/trajectory_tracking.py deleted file mode 100644 index 1d3df6de6e..0000000000 --- a/examples/control/trajectory_tracking.py +++ /dev/null @@ -1,90 +0,0 @@ -import sys -from pathlib import Path - -import gymnasium as gym - -sys.path.insert(0, str(Path(__file__).parents[2].absolute())) -from examples.tools.argument_parser import default_argument_parser -from smarts.core.agent import Agent -from smarts.core.agent_interface import AgentInterface, AgentType -from smarts.core.utils.episodes import episodes -from smarts.env.utils.action_conversion import ActionOptions -from smarts.env.utils.observation_conversion import ObservationOptions -from smarts.sstudio.scenario_construction import build_scenarios - -AGENT_ID = "Agent-007" - - -class TrackingAgent(Agent): - def act(self, obs): - lane_index = 0 - num_trajectory_points = min([10, len(obs.waypoint_paths[lane_index])]) - # Desired speed is in m/s - desired_speed = 50 / 3.6 - trajectory = [ - [ - obs.waypoint_paths[lane_index][i].pos[0] - for i in range(num_trajectory_points) - ], - [ - obs.waypoint_paths[lane_index][i].pos[1] - for i in range(num_trajectory_points) - ], - [ - obs.waypoint_paths[lane_index][i].heading - for i in range(num_trajectory_points) - ], - [desired_speed for i in range(num_trajectory_points)], - ] - return trajectory - - -def main(scenarios, headless, num_episodes, max_episode_steps=None): - agent_interfaces = { - AGENT_ID: AgentInterface.from_type( - AgentType.Tracker, max_episode_steps=max_episode_steps - ), - } - - env = gym.make( - "smarts.env:hiway-v1", - scenarios=scenarios, - agent_interfaces=agent_interfaces, - headless=headless, - observation_options=ObservationOptions.unformatted, - action_options=ActionOptions.unformatted, - ) - - for episode in episodes(n=num_episodes): - agent = TrackingAgent() - observations, _ = env.reset() - episode.record_scenario(env.scenario_log) - - terminateds = {"__all__": False} - while not terminateds["__all__"]: - agent_obs = observations[AGENT_ID] - agent_action = agent.act(agent_obs) - observations, rewards, terminateds, truncateds, infos = env.step( - {AGENT_ID: agent_action} - ) - episode.record_step(observations, rewards, terminateds, truncateds, infos) - - env.close() - - -if __name__ == "__main__": - parser = default_argument_parser("trajectory-tracking") - args = parser.parse_args() - - if not args.scenarios: - args.scenarios = [ - str(Path(__file__).absolute().parents[2] / "scenarios" / "sumo" / "loop") - ] - - build_scenarios(scenarios=args.scenarios) - - main( - scenarios=args.scenarios, - headless=args.headless, - num_episodes=args.episodes, - ) diff --git a/examples/bubble_example.py b/examples/direct/bubble_example.py similarity index 100% rename from examples/bubble_example.py rename to examples/direct/bubble_example.py diff --git a/examples/dynamic_history_vehicles_replacement.py b/examples/direct/dynamic_history_vehicles_replacement.py similarity index 100% rename from examples/dynamic_history_vehicles_replacement.py rename to examples/direct/dynamic_history_vehicles_replacement.py diff --git a/examples/traffic_histories_vehicle_replacement.py b/examples/direct/traffic_histories_vehicle_replacement.py similarity index 100% rename from examples/traffic_histories_vehicle_replacement.py rename to examples/direct/traffic_histories_vehicle_replacement.py diff --git a/examples/rl/__init__.py b/examples/e10_drive/__init__.py similarity index 100% rename from examples/rl/__init__.py rename to examples/e10_drive/__init__.py diff --git a/examples/rl/drive/inference/MANIFEST.in b/examples/e10_drive/inference/MANIFEST.in similarity index 100% rename from examples/rl/drive/inference/MANIFEST.in rename to examples/e10_drive/inference/MANIFEST.in diff --git a/examples/rl/drive/inference/__init__.py b/examples/e10_drive/inference/__init__.py similarity index 100% rename from examples/rl/drive/inference/__init__.py rename to examples/e10_drive/inference/__init__.py diff --git a/examples/rl/drive/__init__.py b/examples/e10_drive/inference/contrib_policy/__init__.py similarity index 100% rename from examples/rl/drive/__init__.py rename to examples/e10_drive/inference/contrib_policy/__init__.py diff --git a/examples/rl/drive/inference/contrib_policy/filter_obs.py b/examples/e10_drive/inference/contrib_policy/filter_obs.py similarity index 100% rename from examples/rl/drive/inference/contrib_policy/filter_obs.py rename to examples/e10_drive/inference/contrib_policy/filter_obs.py diff --git a/examples/rl/drive/inference/contrib_policy/format_action.py b/examples/e10_drive/inference/contrib_policy/format_action.py similarity index 100% rename from examples/rl/drive/inference/contrib_policy/format_action.py rename to examples/e10_drive/inference/contrib_policy/format_action.py diff --git a/examples/rl/drive/inference/contrib_policy/frame_stack.py b/examples/e10_drive/inference/contrib_policy/frame_stack.py similarity index 100% rename from examples/rl/drive/inference/contrib_policy/frame_stack.py rename to examples/e10_drive/inference/contrib_policy/frame_stack.py diff --git a/examples/rl/drive/inference/contrib_policy/make_dict.py b/examples/e10_drive/inference/contrib_policy/make_dict.py similarity index 100% rename from examples/rl/drive/inference/contrib_policy/make_dict.py rename to examples/e10_drive/inference/contrib_policy/make_dict.py diff --git a/examples/rl/drive/inference/contrib_policy/network.py b/examples/e10_drive/inference/contrib_policy/network.py similarity index 100% rename from examples/rl/drive/inference/contrib_policy/network.py rename to examples/e10_drive/inference/contrib_policy/network.py diff --git a/examples/rl/drive/inference/contrib_policy/policy.py b/examples/e10_drive/inference/contrib_policy/policy.py similarity index 100% rename from examples/rl/drive/inference/contrib_policy/policy.py rename to examples/e10_drive/inference/contrib_policy/policy.py diff --git a/examples/rl/drive/inference/setup.cfg b/examples/e10_drive/inference/setup.cfg similarity index 100% rename from examples/rl/drive/inference/setup.cfg rename to examples/e10_drive/inference/setup.cfg diff --git a/examples/rl/drive/inference/setup.py b/examples/e10_drive/inference/setup.py similarity index 100% rename from examples/rl/drive/inference/setup.py rename to examples/e10_drive/inference/setup.py diff --git a/examples/rl/drive/train/Dockerfile b/examples/e10_drive/train/Dockerfile similarity index 89% rename from examples/rl/drive/train/Dockerfile rename to examples/e10_drive/train/Dockerfile index ad3f452b62..3e0a4cb3c3 100644 --- a/examples/rl/drive/train/Dockerfile +++ b/examples/e10_drive/train/Dockerfile @@ -27,13 +27,13 @@ ENV PATH="$VIRTUAL_ENV/bin:$PATH" RUN pip install --upgrade pip && pip install wheel==0.38.4 # Install requirements.txt . -COPY ./examples/rl/drive/train/requirements.txt /tmp/requirements.txt +COPY ./examples/e10_drive/train/requirements.txt /tmp/requirements.txt RUN pip install --no-cache-dir -r /tmp/requirements.txt # Copy source files and install. COPY . /SMARTS WORKDIR /SMARTS RUN pip install -e .[camera_obs,argoverse] -RUN pip install -e ./examples/rl/drive/inference +RUN pip install -e ./examples/e10_drive/inference SHELL ["/bin/bash", "-c", "-l"] \ No newline at end of file diff --git a/examples/rl/drive/inference/contrib_policy/__init__.py b/examples/e10_drive/train/__init__.py similarity index 100% rename from examples/rl/drive/inference/contrib_policy/__init__.py rename to examples/e10_drive/train/__init__.py diff --git a/examples/rl/drive/train/config.yaml b/examples/e10_drive/train/config.yaml similarity index 100% rename from examples/rl/drive/train/config.yaml rename to examples/e10_drive/train/config.yaml diff --git a/examples/rl/drive/train/env.py b/examples/e10_drive/train/env.py similarity index 100% rename from examples/rl/drive/train/env.py rename to examples/e10_drive/train/env.py diff --git a/examples/rl/drive/train/preprocess.py b/examples/e10_drive/train/preprocess.py similarity index 100% rename from examples/rl/drive/train/preprocess.py rename to examples/e10_drive/train/preprocess.py diff --git a/examples/rl/drive/train/requirements.txt b/examples/e10_drive/train/requirements.txt similarity index 100% rename from examples/rl/drive/train/requirements.txt rename to examples/e10_drive/train/requirements.txt diff --git a/examples/rl/drive/train/reward.py b/examples/e10_drive/train/reward.py similarity index 100% rename from examples/rl/drive/train/reward.py rename to examples/e10_drive/train/reward.py diff --git a/examples/rl/drive/train/run.py b/examples/e10_drive/train/run.py similarity index 99% rename from examples/rl/drive/train/run.py rename to examples/e10_drive/train/run.py index 8f52e949b7..d7a519072a 100644 --- a/examples/rl/drive/train/run.py +++ b/examples/e10_drive/train/run.py @@ -84,7 +84,7 @@ def main(args: argparse.Namespace): envs_train = {} envs_eval = {} for scenario in config.scenarios: - scenario_path = str(Path(__file__).resolve().parents[4] / scenario) + scenario_path = str(Path(__file__).resolve().parents[3] / scenario) envs_train[f"{scenario}"] = make_env( env_id=config.env_id, scenario=scenario_path, diff --git a/examples/rl/drive/train/utils.py b/examples/e10_drive/train/utils.py similarity index 100% rename from examples/rl/drive/train/utils.py rename to examples/e10_drive/train/utils.py diff --git a/examples/rl/drive/train/__init__.py b/examples/e11_platoon/__init__.py similarity index 100% rename from examples/rl/drive/train/__init__.py rename to examples/e11_platoon/__init__.py diff --git a/examples/rl/platoon/inference/MANIFEST.in b/examples/e11_platoon/inference/MANIFEST.in similarity index 100% rename from examples/rl/platoon/inference/MANIFEST.in rename to examples/e11_platoon/inference/MANIFEST.in diff --git a/examples/rl/platoon/inference/__init__.py b/examples/e11_platoon/inference/__init__.py similarity index 100% rename from examples/rl/platoon/inference/__init__.py rename to examples/e11_platoon/inference/__init__.py diff --git a/examples/rl/platoon/__init__.py b/examples/e11_platoon/inference/contrib_policy/__init__.py similarity index 100% rename from examples/rl/platoon/__init__.py rename to examples/e11_platoon/inference/contrib_policy/__init__.py diff --git a/examples/rl/platoon/inference/contrib_policy/filter_obs.py b/examples/e11_platoon/inference/contrib_policy/filter_obs.py similarity index 100% rename from examples/rl/platoon/inference/contrib_policy/filter_obs.py rename to examples/e11_platoon/inference/contrib_policy/filter_obs.py diff --git a/examples/rl/platoon/inference/contrib_policy/format_action.py b/examples/e11_platoon/inference/contrib_policy/format_action.py similarity index 100% rename from examples/rl/platoon/inference/contrib_policy/format_action.py rename to examples/e11_platoon/inference/contrib_policy/format_action.py diff --git a/examples/rl/platoon/inference/contrib_policy/frame_stack.py b/examples/e11_platoon/inference/contrib_policy/frame_stack.py similarity index 100% rename from examples/rl/platoon/inference/contrib_policy/frame_stack.py rename to examples/e11_platoon/inference/contrib_policy/frame_stack.py diff --git a/examples/rl/platoon/inference/contrib_policy/make_dict.py b/examples/e11_platoon/inference/contrib_policy/make_dict.py similarity index 100% rename from examples/rl/platoon/inference/contrib_policy/make_dict.py rename to examples/e11_platoon/inference/contrib_policy/make_dict.py diff --git a/examples/rl/platoon/inference/contrib_policy/network.py b/examples/e11_platoon/inference/contrib_policy/network.py similarity index 100% rename from examples/rl/platoon/inference/contrib_policy/network.py rename to examples/e11_platoon/inference/contrib_policy/network.py diff --git a/examples/rl/platoon/inference/contrib_policy/policy.py b/examples/e11_platoon/inference/contrib_policy/policy.py similarity index 100% rename from examples/rl/platoon/inference/contrib_policy/policy.py rename to examples/e11_platoon/inference/contrib_policy/policy.py diff --git a/examples/rl/platoon/inference/setup.cfg b/examples/e11_platoon/inference/setup.cfg similarity index 100% rename from examples/rl/platoon/inference/setup.cfg rename to examples/e11_platoon/inference/setup.cfg diff --git a/examples/rl/platoon/inference/setup.py b/examples/e11_platoon/inference/setup.py similarity index 100% rename from examples/rl/platoon/inference/setup.py rename to examples/e11_platoon/inference/setup.py diff --git a/examples/rl/platoon/train/Dockerfile b/examples/e11_platoon/train/Dockerfile similarity index 89% rename from examples/rl/platoon/train/Dockerfile rename to examples/e11_platoon/train/Dockerfile index 3e0edc93e2..a77ab1cd7d 100644 --- a/examples/rl/platoon/train/Dockerfile +++ b/examples/e11_platoon/train/Dockerfile @@ -27,13 +27,13 @@ ENV PATH="$VIRTUAL_ENV/bin:$PATH" RUN pip install --upgrade pip && pip install wheel==0.38.4 # Install requirements.txt . -COPY ./examples/rl/platoon/train/requirements.txt /tmp/requirements.txt +COPY ./examples/e11_platoon/train/requirements.txt /tmp/requirements.txt RUN pip install --no-cache-dir -r /tmp/requirements.txt # Copy source files and install. COPY . /SMARTS WORKDIR /SMARTS RUN pip install -e .[camera_obs,argoverse] -RUN pip install -e ./examples/rl/platoon/inference +RUN pip install -e ./examples/e11_platoon/inference SHELL ["/bin/bash", "-c", "-l"] \ No newline at end of file diff --git a/examples/rl/platoon/inference/contrib_policy/__init__.py b/examples/e11_platoon/train/__init__.py similarity index 100% rename from examples/rl/platoon/inference/contrib_policy/__init__.py rename to examples/e11_platoon/train/__init__.py diff --git a/examples/rl/platoon/train/config.yaml b/examples/e11_platoon/train/config.yaml similarity index 100% rename from examples/rl/platoon/train/config.yaml rename to examples/e11_platoon/train/config.yaml diff --git a/examples/rl/platoon/train/env.py b/examples/e11_platoon/train/env.py similarity index 100% rename from examples/rl/platoon/train/env.py rename to examples/e11_platoon/train/env.py diff --git a/examples/rl/platoon/train/preprocess.py b/examples/e11_platoon/train/preprocess.py similarity index 100% rename from examples/rl/platoon/train/preprocess.py rename to examples/e11_platoon/train/preprocess.py diff --git a/examples/rl/platoon/train/requirements.txt b/examples/e11_platoon/train/requirements.txt similarity index 100% rename from examples/rl/platoon/train/requirements.txt rename to examples/e11_platoon/train/requirements.txt diff --git a/examples/rl/platoon/train/reward.py b/examples/e11_platoon/train/reward.py similarity index 100% rename from examples/rl/platoon/train/reward.py rename to examples/e11_platoon/train/reward.py diff --git a/examples/rl/platoon/train/run.py b/examples/e11_platoon/train/run.py similarity index 99% rename from examples/rl/platoon/train/run.py rename to examples/e11_platoon/train/run.py index fe4ac7a473..b5a590fa6e 100644 --- a/examples/rl/platoon/train/run.py +++ b/examples/e11_platoon/train/run.py @@ -85,7 +85,7 @@ def main(args: argparse.Namespace): envs_train = {} envs_eval = {} for scenario in config.scenarios: - scenario_path = str(Path(__file__).resolve().parents[4] / scenario) + scenario_path = str(Path(__file__).resolve().parents[3] / scenario) envs_train[f"{scenario}"] = make_env( env_id=config.env_id, scenario=scenario_path, diff --git a/examples/rl/platoon/train/utils.py b/examples/e11_platoon/train/utils.py similarity index 100% rename from examples/rl/platoon/train/utils.py rename to examples/e11_platoon/train/utils.py diff --git a/examples/rl/platoon/train/__init__.py b/examples/e12_rllib/__init__.py similarity index 100% rename from examples/rl/platoon/train/__init__.py rename to examples/e12_rllib/__init__.py diff --git a/examples/rl/rllib/configs.py b/examples/e12_rllib/configs.py similarity index 100% rename from examples/rl/rllib/configs.py rename to examples/e12_rllib/configs.py diff --git a/examples/e12_rllib/model/README.md b/examples/e12_rllib/model/README.md new file mode 100644 index 0000000000..f7cfe89467 --- /dev/null +++ b/examples/e12_rllib/model/README.md @@ -0,0 +1,3 @@ +## Model Binaries + +The binaries located in this directory are the components of a trained rllib model. These are related to the `examples/e12_rllib/pg_pbt_example.py` example script. Results from `examples/e12_rllib/pg_pbt_example.py` are loaded and written to this directory. \ No newline at end of file diff --git a/examples/rl/rllib/pg_example.py b/examples/e12_rllib/pg_example.py similarity index 100% rename from examples/rl/rllib/pg_example.py rename to examples/e12_rllib/pg_example.py diff --git a/examples/rl/rllib/pg_pbt_example.py b/examples/e12_rllib/pg_pbt_example.py similarity index 100% rename from examples/rl/rllib/pg_pbt_example.py rename to examples/e12_rllib/pg_pbt_example.py diff --git a/examples/rl/rllib/rllib_agent.py b/examples/e12_rllib/rllib_agent.py similarity index 100% rename from examples/rl/rllib/rllib_agent.py rename to examples/e12_rllib/rllib_agent.py diff --git a/examples/e1_egoless.py b/examples/e1_egoless.py new file mode 100644 index 0000000000..9b734f2f26 --- /dev/null +++ b/examples/e1_egoless.py @@ -0,0 +1,45 @@ +"""This example is intended to show how a SMARTS environment would be set up without any agents.""" +from pathlib import Path + +import gymnasium as gym +from tools.argument_parser import empty_parser + +from smarts.core.utils.episodes import episodes +from smarts.sstudio.scenario_construction import build_scenarios + + +def main(*_, **kwargs): + max_episode_steps = None + num_episodes = 10 + scenarios_path = Path(__file__).absolute().parents[1] / "scenarios" / "sumo" + + # Scenarios have to be built for the scenario to be complete. This can be done + # in two ways: + # - From the cli like: `scl scenario build-all ` + # - From code using `build_scenarios(scenarios=[, ...])` + scenarios = [str(scenarios_path / "figure_eight"), str(scenarios_path / "loop")] + build_scenarios(scenarios=scenarios) + + env = gym.make( + "smarts.env:hiway-v1", + scenarios=scenarios, + agent_interfaces={}, + headless=True, + ) + + for episode in episodes(n=num_episodes): + env.reset() + episode.record_scenario(env.unwrapped.scenario_log) + + for _ in range(max_episode_steps or 300): + env.step({}) + episode.record_step({}, {}, {}, {}, {}) + + env.close() + + +if __name__ == "__main__": + parser = empty_parser(Path(__file__).stem) + args = parser.parse_args() + + main() diff --git a/examples/e2_single_agent.py b/examples/e2_single_agent.py new file mode 100644 index 0000000000..07d34570c1 --- /dev/null +++ b/examples/e2_single_agent.py @@ -0,0 +1,73 @@ +"""This example shows how you might run a SMARTS environment for single-agent work. SMARTS is +natively multi-agent so a single-agent wrapper is used.""" +import argparse +import random +import sys +from pathlib import Path +from typing import Final + +import gymnasium as gym + +SMARTS_REPO_PATH = Path(__file__).parents[1].absolute() +sys.path.insert(0, str(SMARTS_REPO_PATH)) +from examples.tools.argument_parser import minimal_argument_parser +from smarts.core.agent import Agent +from smarts.core.agent_interface import AgentInterface, AgentType +from smarts.core.utils.episodes import episodes +from smarts.env.gymnasium.wrappers.single_agent import SingleAgent +from smarts.sstudio.scenario_construction import build_scenarios + +AGENT_ID: Final[str] = "Agent" + + +class KeepLaneAgent(Agent): + def act(self, obs, **kwargs): + return random.randint(0, 3) + + +def main(scenarios, headless, num_episodes, max_episode_steps=None): + # This interface must match the action returned by the agent + agent_interface = AgentInterface.from_type( + AgentType.Laner, max_episode_steps=max_episode_steps + ) + + env = gym.make( + "smarts.env:hiway-v1", + scenarios=scenarios, + agent_interfaces={AGENT_ID: agent_interface}, + headless=headless, + ) + env = SingleAgent(env) + + for episode in episodes(n=num_episodes): + agent = KeepLaneAgent() + observation, _ = env.reset() + episode.record_scenario(env.unwrapped.scenario_log) + + terminated = False + while not terminated: + action = agent.act(observation) + observation, reward, terminated, truncated, info = env.step(action) + episode.record_step(observation, reward, terminated, truncated, info) + + env.close() + + +if __name__ == "__main__": + parser = minimal_argument_parser(Path(__file__).stem) + args = parser.parse_args() + + if not args.scenarios: + args.scenarios = [ + str(SMARTS_REPO_PATH / "scenarios" / "sumo" / "loop"), + str(SMARTS_REPO_PATH / "scenarios" / "sumo" / "figure_eight"), + ] + + build_scenarios(scenarios=args.scenarios) + + main( + scenarios=args.scenarios, + headless=args.headless, + num_episodes=args.episodes, + max_episode_steps=args.max_episode_steps, + ) diff --git a/examples/e3_multi_agent.py b/examples/e3_multi_agent.py new file mode 100644 index 0000000000..d77a305af4 --- /dev/null +++ b/examples/e3_multi_agent.py @@ -0,0 +1,90 @@ +"""This is an example to show how SMARTS multi-agent works. This example uses the same kind of +agent multiple times. But different agents with different action and observation shapes can be mixed +in.""" +import random +import sys +from pathlib import Path +from typing import Final + +import gymnasium as gym + +SMARTS_REPO_PATH = Path(__file__).parents[1].absolute() +sys.path.insert(0, str(SMARTS_REPO_PATH)) +from examples.tools.argument_parser import minimal_argument_parser +from smarts.core.agent import Agent +from smarts.core.agent_interface import AgentInterface, AgentType +from smarts.core.utils.episodes import episodes +from smarts.sstudio.scenario_construction import build_scenarios + +N_AGENTS = 4 +AGENT_IDS: Final[list] = ["Agent %i" % i for i in range(N_AGENTS)] + + +class RandomLanerAgent(Agent): + def __init__(self, action_space) -> None: + self._action_space = action_space + + def act(self, obs, **kwargs): + return self._action_space.sample() + + +class KeepLaneAgent(Agent): + def __init__(self, action_space) -> None: + self._action_space = action_space + + def act(self, obs, **kwargs): + return self._action_space.sample() + + +def main(scenarios, headless, num_episodes, max_episode_steps=None): + # This interface must match the action returned by the agent + agent_interfaces = { + agent_id: AgentInterface.from_type( + AgentType.Laner, max_episode_steps=max_episode_steps + ) + for agent_id in AGENT_IDS + } + + env = gym.make( + "smarts.env:hiway-v1", + scenarios=scenarios, + agent_interfaces=agent_interfaces, + headless=headless, + ) + + for episode in episodes(n=num_episodes): + agents = { + agent_id: RandomLanerAgent(env.action_space[agent_id]) + for agent_id in agent_interfaces.keys() + } + observations, _ = env.reset() + episode.record_scenario(env.unwrapped.scenario_log) + + terminateds = {"__all__": False} + while not terminateds["__all__"]: + actions = { + agent_id: agent.act(observations) for agent_id, agent in agents.items() + } + observations, rewards, terminateds, truncateds, infos = env.step(actions) + episode.record_step(observations, rewards, terminateds, truncateds, infos) + + env.close() + + +if __name__ == "__main__": + parser = minimal_argument_parser(Path(__file__).stem) + args = parser.parse_args() + + if not args.scenarios: + args.scenarios = [ + str(SMARTS_REPO_PATH / "scenarios" / "sumo" / "loop"), + ] + + build_scenarios(scenarios=args.scenarios) + + main( + scenarios=args.scenarios, + headless=args.headless, + num_episodes=args.episodes, + max_episode_steps=args.max_episode_steps, + ) diff --git a/examples/e4_environment_config.py b/examples/e4_environment_config.py new file mode 100644 index 0000000000..f9d6476afb --- /dev/null +++ b/examples/e4_environment_config.py @@ -0,0 +1,95 @@ +"""This example shows the differences between each of the environments. + +For the unformatted observation please see https://smarts.readthedocs.io/en/latest/sim/obs_action_reward.html. +""" +import warnings +from pathlib import Path + +from tools.argument_parser import empty_parser + +from smarts.core.agent_interface import AgentInterface, AgentType +from smarts.core.utils.string import truncate +from smarts.env.configs.hiway_env_configs import EnvReturnMode +from smarts.env.gymnasium.hiway_env_v1 import HiWayEnvV1 +from smarts.env.utils.action_conversion import ActionOptions +from smarts.env.utils.observation_conversion import ObservationOptions +from smarts.sstudio.scenario_construction import build_scenarios + +warnings.filterwarnings("ignore", category=UserWarning) + +AGENT_ID = "agent" + + +def detail_environment(env: HiWayEnvV1, name: str): + obs, _ = env.reset() + + print(f"-------- Format '{name}' ---------") + print(f"Environment action space {env.action_space}") + print( + f"Environment observation space {truncate(str(env.observation_space), length=80)}", + ) + print(f"Environment observation type {type(obs)}") + print(f"Agent observation type {type(obs[AGENT_ID])}") + observations, rewards, terminations, truncations, infos = env.step( + { + AGENT_ID: None + if env.action_space is None + else env.action_space.sample()[AGENT_ID] + } + ) + print(f"Environment infos type {type(rewards)}") + print() + + +def main(*_, **kwargs): + defaults = dict( + agent_interfaces={AGENT_ID: AgentInterface.from_type(AgentType.Standard)}, + scenarios=[ + str( + Path(__file__).absolute().parents[1] + / "scenarios" + / "sumo" + / "figure_eight" + ) + ], + headless=True, + ) + + build_scenarios(defaults["scenarios"]) + + # AKA: `gym.make("smarts.env:hiway-v1")` + with HiWayEnvV1( + # observation_options=ObservationOptions.multi_agent, + # action_options=ActionOptions.multi_agent, + **defaults, + ) as env: + detail_environment(env, "multi_agent") + + with HiWayEnvV1( + observation_options=ObservationOptions.full, + action_options=ActionOptions.full, + **defaults, + ) as env: + detail_environment(env, "full") + + with HiWayEnvV1( + observation_options=ObservationOptions.unformatted, + action_options=ActionOptions.unformatted, + **defaults, + ) as env: + detail_environment(env, "unformatted") + + with HiWayEnvV1( + observation_options="unformatted", + action_options="unformatted", + environment_return_mode=EnvReturnMode.environment, + **defaults, + ) as env: + detail_environment(env, "env return") + + +if __name__ == "__main__": + parser = empty_parser("environment config") + args = parser.parse_args() + + main() diff --git a/examples/e5_agent_zoo.py b/examples/e5_agent_zoo.py new file mode 100644 index 0000000000..43259cfcd1 --- /dev/null +++ b/examples/e5_agent_zoo.py @@ -0,0 +1,76 @@ +"""This is an example to show how SMARTS multi-agent works. This example uses the same kind of +agent multiple times but different agents with different action and observation shapes can be mixed +in.""" +import random +import sys +from pathlib import Path + +# This may be necessary to get the repository root into path +SMARTS_REPO_PATH = Path(__file__).parents[1].absolute() +sys.path.insert(0, str(SMARTS_REPO_PATH)) + +from examples.tools.argument_parser import empty_parser +from smarts.core.agent import Agent +from smarts.core.agent_interface import AgentInterface, AgentType +from smarts.zoo import registry +from smarts.zoo.agent_spec import AgentSpec + + +class RandomLaneAgent(Agent): + def act(self, obs, **kwargs): + return random.randint(0, 3) + + +def rla_entrypoint(max_episode_steps=1000): + return AgentSpec( + interface=AgentInterface.from_type( + AgentType.Laner, max_episode_steps=max_episode_steps + ), + agent_builder=RandomLaneAgent, + ) + + +def main(*_, **kwargs): + name = "random_lane_control-v0" + print(f"=== Before registering `{name}` ===") + print(registry.agent_registry) + registry.register( + name, rla_entrypoint + ) # This registers "__main__:random_lane_control-v0" + print(f"=== After registering `{name}` ===") + print(registry.agent_registry) + + agent_spec = registry.make(locator=f"__main__:{name}") + agent_interface = agent_spec.interface + agent = agent_spec.build_agent() + # alternatively this will build the agent + agent, _ = registry.make_agent(locator=f"__main__:{name}") + # just "random_lane_control-v0" also works because the agent has already been registered in this file. + agent, _ = registry.make_agent(locator=name) + + locator = "zoo.policies:chase-via-points-agent-v0" + # Here is an example of using the module component of the locator to dynamically load agents: + agent, _ = registry.make_agent(locator=locator) + print(f"=== After loading `{locator}` ===") + print(registry.agent_registry) + + ## This agent requires installation + # agent, _ = registry.make_agent( + # locator="zoo.policies:discrete-soft-actor-critic-agent-v0" + # ) + + locator = "non_existing.module:md-v44" + try: + agent, _ = registry.make_agent(locator="non_existing.module:md-v44") + except (ModuleNotFoundError, ImportError): + print( + f"Such as with '{locator}'. Module resolution can fail if the module cannot be found " + "from the PYTHONPATH environment variable apparent as `sys.path` in python." + ) + + +if __name__ == "__main__": + parser = empty_parser(Path(__file__).stem) + args = parser.parse_args() + + main() diff --git a/examples/e6_agent_action_space.py b/examples/e6_agent_action_space.py new file mode 100644 index 0000000000..6cbedc8296 --- /dev/null +++ b/examples/e6_agent_action_space.py @@ -0,0 +1,51 @@ +"""This is an example to show how agent interface action formatting configuration works.""" +from pathlib import Path + +from examples.tools.argument_parser import empty_parser +from smarts.core.agent_interface import AgentInterface, AgentType +from smarts.core.controllers import Controllers +from smarts.core.controllers.action_space_type import ActionSpaceType +from smarts.env.utils.action_conversion import ActionOptions, get_formatters + + +def display_spaces(): + tn, _, _ = f"{ActionOptions=}".partition("=") + action_formatters = get_formatters() + for name, action_type in ActionSpaceType.__members__.items(): + action_type: ActionSpaceType + unformatted_types, unformatted_schema_names = Controllers.get_action_shape( + action_type + ) + formatted = action_formatters[action_type] + print(f"======= {name} =======") + print(f"- For {ActionOptions.unformatted!r} -") + print(f"{unformatted_types = !r}") + print(f"{unformatted_schema_names = !r}") + print() + print( + f"- For {tn} {(ActionOptions.multi_agent, ActionOptions.full, ActionOptions.default)} -" + ) + print(f"Gym space = {formatted.space!r}") + print() + + +def display_agent_type_spaces(): + method_name, _, _ = f"{AgentInterface.from_type=}".partition("=") + print( + f"Note that `{method_name}` generates a pre-configured agent type with an existing action space." + ) + + for name, agent_type in AgentType.__members__.items(): + agent_type: AgentType + print(f"{name.ljust(30)}: {AgentInterface.from_type(agent_type).action}") + + +def main(*_, **kwargs): + display_spaces() + display_agent_type_spaces() + + +if __name__ == "__main__": + parser = empty_parser(Path(__file__).stem) + args = parser.parse_args() + main() diff --git a/examples/e7_experiment_base.py b/examples/e7_experiment_base.py new file mode 100644 index 0000000000..3688e304d0 --- /dev/null +++ b/examples/e7_experiment_base.py @@ -0,0 +1,156 @@ +"""This example allows you to play around with the features of the previous examples through configuration.""" +import sys +from dataclasses import asdict, dataclass, field +from pathlib import Path +from typing import Dict, Final, Type + +import gymnasium as gym + +try: + import hydra + from hydra.core.config_store import ConfigStore + from omegaconf import OmegaConf +except ImportError as exc: + raise ImportError("Please install smarts[examples] or -e .[examples].") + + +from smarts.core.utils.episodes import episodes +from smarts.env.configs.base_config import EnvironmentConfiguration +from smarts.env.configs.hiway_env_configs import HiWayEnvV1Configuration +from smarts.sstudio.scenario_construction import build_scenarios +from smarts.zoo import registry + +sys.path.insert(0, str(Path(__file__).parents[1].absolute())) +from zoo.policies.primitive_agents import ( + cvpa_entrypoint, + rla_entrypoint, + standard_lane_follower_entrypoint, + trajectory_tracking_entrypoint, +) + + +@dataclass +class EnvCfg(HiWayEnvV1Configuration, EnvironmentConfiguration): + @classmethod + def default(cls: Type["EnvCfg"]): + return cls( + id="smarts.env:hiway-v1", + scenarios=[ + str( + Path(__file__).absolute().parents[1] + / "scenarios" + / "sumo" + / "figure_eight" + ), + ], + agent_interfaces={}, + ) + + +@dataclass +class ExperimentCfg: + episodes: int + """This indicates how many times the environment will reset.""" + show_config: bool + """If true the yaml structure of the configuration for this run will be printed.""" + minimum_steps: int = 1 + """The minimum number of steps to run before reset. This can be used to run egoless.""" + agent_configs: Dict[str, Dict] = field(default_factory=lambda: {}) + """The configuration of the agents to include in this experiment.""" + env_config: EnvironmentConfiguration = field(default_factory=EnvCfg.default) + """The environment configuration for the environment used in this experiment. Typically 'smarts.env:hiway-v1'.""" + + +registry.register( + "random_lane_control-v0", rla_entrypoint +) # This registers "__main__:keep_lane_control-v0" +registry.register("chase_via_points-v0", entry_point=cvpa_entrypoint) +registry.register("trajectory_tracking-v0", entry_point=trajectory_tracking_entrypoint) +registry.register( + "standard_lane_follower-v0", entry_point=standard_lane_follower_entrypoint +) + + +CONFIG_LOCATION: Final[str] = str( + Path(__file__).parent.absolute() / "configs" / "e7_experiment_base" +) +cs = ConfigStore.instance() +cs.store(name="base_experiment", node=ExperimentCfg, group=None) + + +@hydra.main( + config_path=CONFIG_LOCATION, + config_name="experiment_default", + version_base=None, +) +def hydra_main(experiment_config: ExperimentCfg) -> None: + if experiment_config.show_config: + print() + print("# Current used configuration") + print("# ==========================\n") + print(OmegaConf.to_yaml(cfg=experiment_config)) + print("# ==========================") + main(experiment_config) + + +def main(experiment_config: ExperimentCfg, *_, **kwargs): + typed_experiment_config: ExperimentCfg = OmegaConf.to_object(cfg=experiment_config) + print(f"Loading configuration from `{CONFIG_LOCATION}`") + + assert not any( + "locator" not in v for v in typed_experiment_config.agent_configs.values() + ), "A declared agent is missing a locator." + + if hasattr(typed_experiment_config.env_config, "scenarios"): + build_scenarios( + scenarios=getattr(typed_experiment_config.env_config, "scenarios") + ) + + agent_specs = { + name: registry.make( + **cfg, + ) + for name, cfg in typed_experiment_config.agent_configs.items() + } + # This is the one point of pain that the agent interfaces are needed for the environment + # but the agent should be constructed by the `smarts.zoo` separately. + env_params = asdict(typed_experiment_config.env_config) + if "agent_interfaces" in env_params: + assert ( + len(env_params["agent_interfaces"]) == 0 + ), "Agent interfaces in this case are attached to the agent configuration." + # I would consider allowing agent interface to also be just a dictionary. + env_params["agent_interfaces"] = { + a_id: a_intrf.interface for a_id, a_intrf in agent_specs.items() + } + + env = gym.make( + **env_params, + ) + + for episode in episodes(n=typed_experiment_config.episodes): + agents = { + agent_id: agent_spec.build_agent() + for agent_id, agent_spec in agent_specs.items() + } + observations, _ = env.reset() + episode.record_scenario(env.unwrapped.scenario_log) + + terminateds = {"__all__": False} + steps = 0 + while ( + not terminateds["__all__"] or steps < typed_experiment_config.minimum_steps + ): + steps += 1 + actions = { + agent_id: agents[agent_id].act(agent_obs) + for agent_id, agent_obs in observations.items() + } + observations, rewards, terminateds, truncateds, infos = env.step(actions) + episode.record_step(observations, rewards, terminateds, truncateds, infos) + + env.close() + + +if __name__ == "__main__": + hydra_main() diff --git a/examples/control/parallel_environment.py b/examples/e8_parallel_environment.py similarity index 96% rename from examples/control/parallel_environment.py rename to examples/e8_parallel_environment.py index 8b45fbccd7..d5faa07346 100644 --- a/examples/control/parallel_environment.py +++ b/examples/e8_parallel_environment.py @@ -1,5 +1,8 @@ +"""This example demonstrates using a SMARTS environment in parallel to generate batched environment returns.""" import gymnasium as gym +from smarts.env.utils.action_conversion import ActionOptions + gym.logger.set_level(40) import sys @@ -7,7 +10,7 @@ from pathlib import Path from typing import Dict, Sequence, Tuple -sys.path.insert(0, str(Path(__file__).parents[2].absolute())) +sys.path.insert(0, str(Path(__file__).parents[1].absolute())) from examples.tools.argument_parser import default_argument_parser from smarts.core.agent import Agent from smarts.core.agent_interface import AgentInterface @@ -61,6 +64,7 @@ def main( sim_name=sim_name, headless=headless, observation_options=ObservationOptions.unformatted, + action_options=ActionOptions.unformatted, seed=seed, ) # A list of env constructors of type `Callable[[int], gym.Env]` @@ -220,7 +224,7 @@ def parallel_env_sync( if not args.scenarios: args.scenarios = [ str( - Path(__file__).absolute().parents[2] + Path(__file__).absolute().parents[1] / "scenarios" / "sumo" / "figure_eight" diff --git a/examples/env/create_run_visualize.ipynb b/examples/e9_notebook.ipynb similarity index 60% rename from examples/env/create_run_visualize.ipynb rename to examples/e9_notebook.ipynb index 3e20cf74d3..cdbf481ce6 100644 --- a/examples/env/create_run_visualize.ipynb +++ b/examples/e9_notebook.ipynb @@ -1,5 +1,14 @@ { "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# SMARTS", + "This example demonstrates the use of SMARTS within a Jupyter notebook." + ] + }, { "attachments": {}, "cell_type": "markdown", @@ -7,7 +16,8 @@ "id": "wkR0YvENQni4" }, "source": [ - "**Setup dependencies**" + "## Setup dependencies\n", + "Install the base SMARTS dependencies needed for the example." ] }, { @@ -22,8 +32,18 @@ }, "outputs": [], "source": [ - "!git clone https://github.com/huawei-noah/SMARTS 2> /dev/null\n", - "!cd SMARTS && ls && git checkout tucker/fix_for_python_3_10 && pip install .[camera_obs,gymnasium]" + "# !git clone https://github.com/huawei-noah/SMARTS 2> /dev/null\n", + "# !cd SMARTS && ls && pip install .[camera_obs]\n", + "%pip install smarts[camera_obs]" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Insert Colab content directory \n", + "This is only necessary if running this notebook on Colab." ] }, { @@ -46,6 +66,15 @@ "print(Path(os.path.abspath(\"\")) / \"SMARTS\")" ] }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Main\n", + "The core example." + ] + }, { "attachments": {}, "cell_type": "markdown", @@ -53,7 +82,7 @@ "id": "s7UtcphinvNv" }, "source": [ - "**Import Base Modules**" + "### Import Base Modules" ] }, { @@ -67,7 +96,47 @@ "import gymnasium as gym\n", "\n", "from smarts.zoo import registry\n", - "from smarts.env.gymnasium.wrappers.episode_logger import EpisodeLogger" + "from smarts.env.gymnasium.wrappers.episode_logger import EpisodeLogger\n", + "from smarts.core.utils.episodes import episode_range\n", + "from smarts.env.gymnasium.wrappers.single_agent import SingleAgent" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Set up the environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env = gym.make(\"smarts.env:hiway-v1\")\n", + "env = SingleAgent(env)\n", + "env: gym.Env = EpisodeLogger(env)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Set up the agent" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import zoo.policies.keep_lane_agent\n", + "\n", + "agent, _ = registry.make_agent(\"zoo.policies:keep-lane-agent-v0\")" ] }, { @@ -77,7 +146,7 @@ "id": "LFoG7Z-FobPP" }, "source": [ - "**Run an episode**" + "### Run the experiment" ] }, { @@ -93,27 +162,28 @@ }, "outputs": [], "source": [ - "from smarts.core.utils.episodes import episode_range\n", - "from smarts.core.utils.import_utils import import_module_from_file\n", - "\n", - "import_module_from_file(\n", - " \"examples.env.figure_eight_env\", Path(os.path.abspath(\"\")) / \"figure_eight_env.py\"\n", - ")\n", - "\n", - "env = gym.make(\"figure_eight-v0\", disable_env_checker=True)\n", - "env: gym.Env = EpisodeLogger(env)\n", - "\n", - "import zoo.policies.keep_lane_agent\n", - "\n", - "agent = registry.make_agent(\"zoo.policies:keep-lane-agent-v0\")\n", - "\n", "for episode in episode_range(max_steps=450):\n", " observation = env.reset()\n", " reward, terminated, truncated, info = None, False, False, None\n", " while episode.continues(observation, reward, terminated, truncated, info):\n", " action = agent.act(observation)\n", - " observation, reward, terminated, info = env.step(action)\n", - "\n", + " observation, reward, terminated, info = env.step(action)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Close the environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "env.close()" ] } diff --git a/examples/egoless.py b/examples/egoless.py deleted file mode 100644 index f5e07c4415..0000000000 --- a/examples/egoless.py +++ /dev/null @@ -1,47 +0,0 @@ -from pathlib import Path - -import gymnasium as gym -from tools.argument_parser import default_argument_parser - -from smarts.core.utils.episodes import episodes -from smarts.sstudio.scenario_construction import build_scenarios - - -def main(scenarios, headless, num_episodes, max_episode_steps=None): - env = gym.make( - "smarts.env:hiway-v1", - scenarios=scenarios, - agent_interfaces={}, - headless=headless, - ) - - if max_episode_steps is None: - max_episode_steps = 300 - - for episode in episodes(n=num_episodes): - env.reset() - episode.record_scenario(env.scenario_log) - - for _ in range(max_episode_steps): - env.step({}) - episode.record_step({}, {}, {}, {}, {}) - - env.close() - - -if __name__ == "__main__": - parser = default_argument_parser("egoless") - args = parser.parse_args() - - if not args.scenarios: - args.scenarios = [ - str(Path(__file__).absolute().parents[2] / "scenarios" / "sumo" / "loop") - ] - - build_scenarios(scenarios=args.scenarios) - - main( - scenarios=args.scenarios, - headless=args.headless, - num_episodes=args.episodes, - ) diff --git a/examples/env/figure_eight_env.py b/examples/env/figure_eight_env.py deleted file mode 100644 index f1878c6b45..0000000000 --- a/examples/env/figure_eight_env.py +++ /dev/null @@ -1,43 +0,0 @@ -from pathlib import Path - -import gymnasium as gym - -from smarts.core.agent_interface import AgentInterface, AgentType -from smarts.env.gymnasium.wrappers.single_agent import SingleAgent -from smarts.env.utils.action_conversion import ActionOptions -from smarts.env.utils.observation_conversion import ObservationOptions - -agent_interface = AgentInterface.from_type( - AgentType.Laner, - max_episode_steps=150, - top_down_rgb=True, - occupancy_grid_map=True, - drivable_area_grid_map=True, -) - - -def entry_point(*args, **kwargs): - scenario = str( - (Path(__file__).parent / "../../scenarios/sumo/figure_eight").resolve() - ) - # Note: can build the scenario here - from smarts.sstudio.scenario_construction import build_scenario - - build_scenario(scenario=scenario, clean=True) - env = gym.make( - "smarts.env:hiway-v1", - agent_interfaces={"agent-007": agent_interface}, - scenarios=[scenario], - headless=True, - action_options=ActionOptions.unformatted, - observation_options=ObservationOptions.unformatted, - disable_env_checker=True, - ) - env.unwrapped.render_mode = "rgb_array" - env.metadata["render_modes"] = set(env.metadata.get("render_modes", ())) | { - "rgb_array" - } - return SingleAgent(env) - - -gym.register("figure_eight-v0", entry_point=entry_point) diff --git a/examples/rl/rllib/__init__.py b/examples/rl/rllib/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/examples/rl/rllib/model/README.md b/examples/rl/rllib/model/README.md deleted file mode 100644 index 9afe8800ce..0000000000 --- a/examples/rl/rllib/model/README.md +++ /dev/null @@ -1,3 +0,0 @@ -## Model Binaries - -The binaries located in this directory are the components of a trained rllib model. These are related to the `examples/rl/rllib/pg_pbt_example.py` example script. Results from `examples/rl/rllib/pg_pbt_example.py` are loaded and written to this directory. \ No newline at end of file diff --git a/examples/tests/test_examples.py b/examples/tests/test_examples.py index ec807db580..10c3e7aabf 100644 --- a/examples/tests/test_examples.py +++ b/examples/tests/test_examples.py @@ -1,8 +1,11 @@ import sys import tempfile +from importlib import import_module from pathlib import Path +from typing import Literal import pytest +from hydra import compose, initialize_config_dir from smarts.core.utils import import_utils @@ -16,31 +19,62 @@ @pytest.mark.parametrize( "example", - ["egoless", "chase_via_points", "trajectory_tracking", "laner", "hiway_v1"], + [ + "e1_egoless", + "e2_single_agent", + "e3_multi_agent", + "e4_environment_config", + "e5_agent_zoo", + "e6_agent_action_space", + "e7_experiment_base", + "e8_parallel_environment", + ], # TODO: "ego_open_agent" and "human_in_the_loop" are causing aborts, fix later ) def test_examples(example): - if example == "egoless": - from examples import egoless as current_example - elif example == "chase_via_points": - from examples.control import chase_via_points as current_example - elif example == "trajectory_tracking": - from examples.control import trajectory_tracking as current_example - elif example == "laner": - from examples.control import laner as current_example - elif example == "hiway_v1": - from examples.control import hiway_env_v1_lane_follower as current_example + current_example = import_module(example, "examples") main = current_example.main - main( - scenarios=["scenarios/sumo/loop"], - headless=True, - num_episodes=1, - max_episode_steps=100, - ) + + if example == "e7_experiment_base": + example_path = Path(current_example.__file__).parent + with initialize_config_dir( + version_base=None, + config_dir=str(example_path.absolute() / "configs" / example), + ): + cfg = compose(config_name="experiment_default") + main(cfg) + elif example == "e8_parallel_environment": + scenarios = [ + str( + Path(__file__).absolute().parents[2] + / "scenarios" + / "sumo" + / "figure_eight" + ) + ] + main( + scenarios=scenarios, + sim_name=f"test_{example}", + headless=True, + seed=42, + num_agents=2, + num_stack=2, + num_env=2, + auto_reset=True, + max_episode_steps=40, + num_episodes=2, + ) + else: + main( + scenarios=["scenarios/sumo/loop"], + headless=True, + num_episodes=1, + max_episode_steps=100, + ) def test_rllib_pg_example(): - from examples.rl.rllib import pg_example + from examples.e12_rllib import pg_example main = pg_example.main with tempfile.TemporaryDirectory() as result_dir: @@ -62,7 +96,7 @@ def test_rllib_pg_example(): def test_rllib_tune_pg_example(): - from examples.rl.rllib import pg_pbt_example + from examples.e12_rllib import pg_pbt_example main = pg_pbt_example.main with tempfile.TemporaryDirectory() as result_dir, tempfile.TemporaryDirectory() as model_dir: diff --git a/examples/tests/test_learning.py b/examples/tests/test_learning.py index c426cad850..7b6d25d0f7 100644 --- a/examples/tests/test_learning.py +++ b/examples/tests/test_learning.py @@ -21,7 +21,7 @@ def test_learning_regression_rllib(): - from examples.rl.rllib.rllib_agent import TrainingModel, rllib_agent + from examples.e12_rllib.rllib_agent import TrainingModel, rllib_agent ModelCatalog.register_custom_model(TrainingModel.NAME, TrainingModel) rllib_policies = { diff --git a/examples/tests/test_rl.py b/examples/tests/test_rl.py index b4cdded710..6908d2a921 100644 --- a/examples/tests/test_rl.py +++ b/examples/tests/test_rl.py @@ -24,10 +24,10 @@ def func(): return func -def test_platoon(): - """Tests RL training of `examples/rl/platoon` example.""" +def test_e11_platoon(): + """Tests RL training of `examples/e11_platoon` example.""" - from examples.rl.platoon.train.run import load_config, main + from examples.e11_platoon.train.run import load_config, main args = argparse.Namespace() args.mode = "train" @@ -36,15 +36,15 @@ def test_platoon(): args.head = False with mock.patch( - "examples.rl.platoon.train.run.load_config", + "examples.e11_platoon.train.run.load_config", _mock_load_config(load_config), ): main(args) -def test_drive(): - """Tests RL training of `examples/rl/drive` example.""" - from examples.rl.drive.train.run import load_config, main +def test_e10_drive(): + """Tests RL training of `examples/e10_drive` example.""" + from examples.e10_drive.train.run import load_config, main args = argparse.Namespace() args.mode = "train" @@ -53,7 +53,7 @@ def test_drive(): args.head = False with mock.patch( - "examples.rl.drive.train.run.load_config", + "examples.e10_drive.train.run.load_config", _mock_load_config(load_config), ): main(args) diff --git a/examples/tools/argument_parser.py b/examples/tools/argument_parser.py index ee469bf1c3..8e8abb0826 100644 --- a/examples/tools/argument_parser.py +++ b/examples/tools/argument_parser.py @@ -2,8 +2,24 @@ from typing import Optional -def default_argument_parser(program: Optional[str] = None): - """This factory method returns a vanilla `argparse.ArgumentParser` with the +def empty_parser(program: Optional[str] = None): + """This factory method returns an empty `argparse.ArgumentParser` with primitive + configuration. + + You can extend it with more `parser.add_argument(...)` calls or obtain the + arguments via `parser.parse_args()`. + """ + if not program: + from pathlib import Path + + program = Path(__file__).stem + + parser = argparse.ArgumentParser(program) + return parser + + +def minimal_argument_parser(program: Optional[str] = None): + """This factory method returns a minimal `argparse.ArgumentParser` with the minimum subset of arguments that should be supported. You can extend it with more `parser.add_argument(...)` calls or obtain the @@ -38,6 +54,17 @@ def default_argument_parser(program: Optional[str] = None): type=int, default=100, ) + return parser + + +def default_argument_parser(program: Optional[str] = None): + """This factory method returns a vanilla `argparse.ArgumentParser` with a + slightly broader subset of arguments that should be supported. + + You can extend it with more `parser.add_argument(...)` calls or obtain the + arguments via `parser.parse_args()`. + """ + parser = minimal_argument_parser(program=program) parser.add_argument("--seed", type=int, default=42) parser.add_argument( "--sim_name", diff --git a/examples/tools/primitive_agents.py b/examples/tools/primitive_agents.py new file mode 100644 index 0000000000..fe79a8caac --- /dev/null +++ b/examples/tools/primitive_agents.py @@ -0,0 +1,108 @@ +import importlib +import random +from typing import Optional + +from smarts.core.agent_interface import AgentInterface, AgentType +from smarts.core.observations import Observation +from smarts.zoo import Agent, AgentSpec + + +class KeepLaneAgent(Agent): + def act(self, obs): + val = ["keep_lane", "slow_down", "change_lane_left", "change_lane_right"] + return random.choice(val) + + +def kla_entrypoint(*, max_episode_steps: Optional[int]) -> AgentSpec: + return AgentSpec( + interface=AgentInterface.from_type( + AgentType.Laner, max_episode_steps=max_episode_steps + ), + agent_builder=KeepLaneAgent, + ) + + +def open_entrypoint(*, debug: bool = False, aggressiveness: int = 3) -> AgentSpec: + try: + open_agent = importlib.import_module("open_agent") + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"Ensure that the open-agent has been installed with `pip install open-agent" + ) + return open_agent.entrypoint(debug=debug, aggressiveness=aggressiveness) + + +class ChaseViaPointsAgent(Agent): + def act(self, obs: Observation): + if ( + len(obs.via_data.near_via_points) < 1 + or obs.ego_vehicle_state.road_id != obs.via_data.near_via_points[0].road_id + ): + return (obs.waypoint_paths[0][0].speed_limit, 0) + + nearest = obs.via_data.near_via_points[0] + if nearest.lane_index == obs.ego_vehicle_state.lane_index: + return (nearest.required_speed, 0) + + return ( + nearest.required_speed, + 1 if nearest.lane_index > obs.ego_vehicle_state.lane_index else -1, + ) + + +def cvpa_entrypoint(*, max_episode_steps: Optional[int]): + return AgentSpec( + interface=AgentInterface.from_type( + AgentType.LanerWithSpeed, + max_episode_steps=max_episode_steps, + ), + agent_builder=ChaseViaPointsAgent, + agent_params=None, + ) + + +class TrackingAgent(Agent): + def act(self, obs): + lane_index = 0 + num_trajectory_points = min([10, len(obs.waypoint_paths[lane_index])]) + # Desired speed is in m/s + desired_speed = 50 / 3.6 + trajectory = [ + [ + obs.waypoint_paths[lane_index][i].pos[0] + for i in range(num_trajectory_points) + ], + [ + obs.waypoint_paths[lane_index][i].pos[1] + for i in range(num_trajectory_points) + ], + [ + obs.waypoint_paths[lane_index][i].heading + for i in range(num_trajectory_points) + ], + [desired_speed for i in range(num_trajectory_points)], + ] + return trajectory + + +def trajectory_tracking_entrypoint(*, max_episode_steps: Optional[int]): + return AgentSpec( + interface=AgentInterface.from_type( + AgentType.Tracker, max_episode_steps=max_episode_steps + ), + agent_builder=TrackingAgent, + ) + + +class StandardLaneFollowerAgent(Agent): + def act(self, obs): + return (obs["waypoint_paths"]["speed_limit"][0][0], 0) + + +def standard_lane_follower_entrypoint(*, max_episode_steps: Optional[int]): + return AgentSpec( + interface=AgentInterface.from_type( + AgentType.LanerWithSpeed, max_episode_steps=max_episode_steps + ), + agent_builder=StandardLaneFollowerAgent, + ) diff --git a/scenarios/INTERACTION/README.md b/scenarios/INTERACTION/README.md index daa358f67b..e063953cff 100644 --- a/scenarios/INTERACTION/README.md +++ b/scenarios/INTERACTION/README.md @@ -26,5 +26,5 @@ their level of exactness may not be enough for some model-training situations, so you may want or need to refine them with SUMO's [netedit tool](https://sumo.dlr.de/docs/Netedit/index.html). An example of how traffic history might be replayed in SMARTS can be found in the -[examples/traffic_histories_to_observations.py](../../examples/traffic_histories_to_observations.py) +[examples/smarts/traffic_histories_to_observations.py](../../examples/smarts/traffic_histories_to_observations.py) script. diff --git a/scenarios/NGSIM/README.md b/scenarios/NGSIM/README.md index 43c3bd3316..f72713a568 100644 --- a/scenarios/NGSIM/README.md +++ b/scenarios/NGSIM/README.md @@ -28,5 +28,5 @@ their level of exactness may not be enough for some model-training situations, so you may want or need to refine them with SUMO's [netedit tool](https://sumo.dlr.de/docs/Netedit/index.html). An example of how traffic history might be replayed in SMARTS can be found in the -[examples/traffic_histories_to_observations.py](../../examples/traffic_histories_to_observations.py) +[examples/smarts/traffic_histories_to_observations.py](../../examples/smarts/traffic_histories_to_observations.py) script. diff --git a/setup.cfg b/setup.cfg index 07bfffc065..3d6c01ec88 100644 --- a/setup.cfg +++ b/setup.cfg @@ -20,9 +20,6 @@ include_package_data = True zip_safe = True python_requires = >=3.8 install_requires = - # setuptools: - # tensorboard needs >=41 - # 50.0 is broken: https://github.com/pypa/setupatools/issues/2353 setuptools>=41.0.0,!=50.0 click>=7.1.2 # used in scl # numpy>=1.19.5 required for tf 2.4 @@ -30,7 +27,8 @@ install_requires = psutil>=5.4.8 shapely>=2.0.0 tableprint>=0.9.1 - trimesh==3.9.29 # for writing .glb files + trimesh>=3.9.29 # for writing .glb files + scipy # This is still needed for writing .glb files... yattag>=1.14.0 # for scenario studio PyYAML>=3.13 twisted>=21.7.0 # for scenario requirements.txt files @@ -77,6 +75,10 @@ envision = tornado>=5.1.1 websocket-client>=1.2.1 ijson>=3.1.4 +examples = + hydra-core + %(gymnasium)s + %(sumo)s extras = pynput>=1.7.4 # Used by HumanKeyboardAgent gif_recorder = moviepy == 1.0.3 @@ -126,6 +128,7 @@ all = %(diagnostic)s %(doc)s %(envision)s + %(examples)s %(extras)s %(gif_recorder)s %(gymnasium)s diff --git a/smarts/benchmark/entrypoints/benchmark_runner_v0.py b/smarts/benchmark/entrypoints/benchmark_runner_v0.py index 97eb43092f..beb1749f2e 100644 --- a/smarts/benchmark/entrypoints/benchmark_runner_v0.py +++ b/smarts/benchmark/entrypoints/benchmark_runner_v0.py @@ -59,7 +59,7 @@ def _eval_worker_local(name, env_config, episodes, agent_locator, error_tolerant ) env = Metrics(env, formula_path=env_config["metric_formula"]) agents = { - agent_id: agent_registry.make_agent(locator=agent_locator) + agent_id: agent_registry.make_agent(locator=agent_locator)[0] for agent_id in env.agent_ids } diff --git a/smarts/benchmark/tests/test_benchmark_runner.py b/smarts/benchmark/tests/test_benchmark_runner.py index e87c84a214..9dadb99a2c 100644 --- a/smarts/benchmark/tests/test_benchmark_runner.py +++ b/smarts/benchmark/tests/test_benchmark_runner.py @@ -55,11 +55,11 @@ def predict(*args, **kwargs): ], indirect=True, ) -def test_drive(get_benchmark_args): - """Tests Driving SMARTS 2023.1 and 2023.2 benchmarks using `examples/rl/drive` model.""" +def test_e10_drive(get_benchmark_args): + """Tests Driving SMARTS 2023.1 and 2023.2 benchmarks using `examples/e10_drive` model.""" from contrib_policy.policy import Policy - agent_locator = "examples.rl.drive.inference:contrib-agent-v0" + agent_locator = "examples.e10_drive.inference:contrib-agent-v0" action = 1 with mock.patch.object(Policy, "_get_model", _get_model(action)): benchmark(benchmark_args=get_benchmark_args, agent_locator=agent_locator) @@ -72,11 +72,11 @@ def test_drive(get_benchmark_args): ], indirect=True, ) -def test_platoon(get_benchmark_args): - """Tests Driving SMARTS 2023.3 benchmark using `examples/rl/platoon` model.""" +def test_e11_platoon(get_benchmark_args): + """Tests Driving SMARTS 2023.3 benchmark using `examples/e11_platoon` model.""" from contrib_policy.policy import Policy - agent_locator = "examples.rl.platoon.inference:contrib-agent-v0" + agent_locator = "examples.e11_platoon.inference:contrib-agent-v0" action = 2 with mock.patch.object(Policy, "_get_model", _get_model(action)): benchmark(benchmark_args=get_benchmark_args, agent_locator=agent_locator) diff --git a/smarts/core/agent_interface.py b/smarts/core/agent_interface.py index 50cd6f1a89..f0818c52b6 100644 --- a/smarts/core/agent_interface.py +++ b/smarts/core/agent_interface.py @@ -268,7 +268,9 @@ class AgentInterface: debug: bool = False """Enable debug information for the various sensors and action spaces.""" - event_configuration: EventConfiguration = EventConfiguration() + event_configuration: EventConfiguration = field( + default_factory=lambda: EventConfiguration() + ) """Configurable criteria of when to trigger events""" done_criteria: DoneCriteria = field(default_factory=lambda: DoneCriteria()) diff --git a/smarts/core/configuration.py b/smarts/core/configuration.py index a08fb15fc9..107ffe78ab 100644 --- a/smarts/core/configuration.py +++ b/smarts/core/configuration.py @@ -22,12 +22,15 @@ import ast import configparser import functools +import logging import os import pathlib from typing import Any, Callable, Optional, Union _UNSET = object() +logger = logging.getLogger(__name__) + def _convert_truthy(t: str) -> bool: """Convert value to a boolean. This should only allow ([Tt]rue)|([Ff]alse)|[\\d]. @@ -72,7 +75,7 @@ def __init__( raise FileNotFoundError(f"Configuration file not found at {config_file}") self._config.read(str(config_file.absolute())) - print(f"Using configuration from: {config_file.absolute()}") + logger.info(msg=f"Using engine configuration from: {config_file.absolute()}") @property def environment_prefix(self): diff --git a/smarts/core/controllers/__init__.py b/smarts/core/controllers/__init__.py index 0119988b66..7a82e443a7 100644 --- a/smarts/core/controllers/__init__.py +++ b/smarts/core/controllers/__init__.py @@ -17,8 +17,9 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. -from enum import Enum +import enum from functools import partial +from typing import Dict, Literal, Sequence, Tuple, Union import numpy as np @@ -48,6 +49,15 @@ METER_PER_SECOND_TO_KM_PER_HR = 3.6 +class LaneAction(enum.Enum): + """The action for lane space actions.""" + + keep_lane: str = "keep_lane" + slow_down: str = "slow_down" + change_lane_left: str = "change_lane_left" + change_lane_right: str = "change_lane_right" + + class Controllers: """Handles vehicle controller selection.""" @@ -127,15 +137,16 @@ def perform_action( sensor_state=sensor_state, ) + action = LaneAction(value=action) # 12.5 m/s (45 km/h) is used as the nominal speed for lane change. # For keep_lane, the nominal speed is set to 15 m/s (54 km/h). - if action == "keep_lane": + if action == LaneAction.keep_lane: perform_lane_following(target_speed=15, lane_change=0) - elif action == "slow_down": + elif action == LaneAction.slow_down: perform_lane_following(target_speed=0, lane_change=0) - elif action == "change_lane_left": + elif action == LaneAction.change_lane_left: perform_lane_following(target_speed=12.5, lane_change=1) - elif action == "change_lane_right": + elif action == LaneAction.change_lane_right: perform_lane_following(target_speed=12.5, lane_change=-1) elif action_space == ActionSpaceType.Direct: DirectController.perform_action(sim.last_dt, vehicle, action) @@ -162,10 +173,79 @@ def perform_action( ) else: raise ValueError( - f"perform_action(action_space={action_space}, ...) has failed " - "inside controller" + f"perform_action({action_space=}, ...) has failed " "inside controller" + ) + + @staticmethod + def get_action_shape(action_space: ActionSpaceType): + """Describes the shape of actions that are used for standard controllers. + + Args: + action_space (ActionSpaceType): The action space to describe. + + Raises: + NotImplementedError: The action space requested does is not yet implemented. + + Returns: + Tuple[Any, str]: The action space and the descriptive attribute. + """ + # TODO MTA: test the action shapes against dummy agents. + if action_space == ActionSpaceType.Empty: + return Union[type(None), Literal[False], Tuple], "null" + + if action_space == ActionSpaceType.Lane: + return ( + Literal[ + LaneAction.keep_lane, + LaneAction.slow_down, + LaneAction.change_lane_left, + LaneAction.change_lane_right, + ], + "lane_action", ) + if action_space in ( + ActionSpaceType.ActuatorDynamic, + ActionSpaceType.Continuous, + ): + return Tuple[float, float, float], ("throttle", "break", "steering") + + if action_space == ActionSpaceType.LaneWithContinuousSpeed: + return Tuple[float, int], ("lane_speed", "lane_change_delta") + + if action_space in (ActionSpaceType.MPC, ActionSpaceType.Trajectory): + return Tuple[ + Sequence[float], Sequence[float], Sequence[float], Sequence[float] + ], ("x_coords", "y_coords", "headings", "speeds") + + if action_space == ActionSpaceType.Direct: + return Union[float, Tuple[float, float]], [ + "speed", + ("linear_acceleration", "angular_velocity"), + ] + + if action_space == ActionSpaceType.TrajectoryWithTime: + return Tuple[ + Sequence[float], + Sequence[float], + Sequence[float], + Sequence[float], + Sequence[float], + ], ("times", "x_coords", "y_coords", "headings", "speeds") + + TargetPoseSpace = Tuple[float, float, float, float] + TargetPoseAttributes = ("x_coord", "y_coord", "heading", "time_delta") + if action_space == ActionSpaceType.TargetPose: + return TargetPoseSpace, TargetPoseAttributes + + if action_space == ActionSpaceType.MultiTargetPose: + return Dict[str, TargetPoseSpace], {"agent_id": TargetPoseAttributes} + + if action_space == ActionSpaceType.RelativeTargetPose: + return Tuple[float, float, float], ("delta_x", "delta_y", "delta_heading") + + raise NotImplementedError(f"Type {action_space} is not implemented") + class ControllerOutOfLaneException(Exception): """Represents an error due to a vehicle straying too far from any available lane.""" diff --git a/smarts/core/coordinates.py b/smarts/core/coordinates.py index 8d904c147a..930ac9ad16 100644 --- a/smarts/core/coordinates.py +++ b/smarts/core/coordinates.py @@ -146,9 +146,12 @@ class RefLinePoint(NamedTuple): Also known as the Frenet coordinate system. """ - s: float # offset along lane from start of lane - t: Optional[float] = 0 # horizontal displacement from center of lane - h: Optional[float] = 0 # vertical displacement from surface of lane + s: float + """The offset along lane from start of lane.""" + t: Optional[float] = 0 + """The horizontal displacement from center of lane.""" + h: Optional[float] = 0 + """The vertical displacement from surface of lane.""" @dataclass(frozen=True) diff --git a/smarts/core/opendrive_road_network.py b/smarts/core/opendrive_road_network.py index 37bf78cdeb..58c6391d25 100644 --- a/smarts/core/opendrive_road_network.py +++ b/smarts/core/opendrive_road_network.py @@ -506,7 +506,7 @@ def _load(self): for incoming in outgoing.incoming_lanes if incoming != lane } - lane._foes = list(set(foes)) + lane._foes = list(foes) if lane.foes or len(lane.incoming_lanes) > 1: road._is_junction = True diff --git a/smarts/core/sensors/local_sensor_resolver.py b/smarts/core/sensors/local_sensor_resolver.py index 61b84968cd..6f3ce89633 100644 --- a/smarts/core/sensors/local_sensor_resolver.py +++ b/smarts/core/sensors/local_sensor_resolver.py @@ -42,7 +42,7 @@ def observe( renderer, bullet_client, ): - with timeit("serial run", logger.info): + with timeit("serial run", logger.debug): ( observations, dones, @@ -54,7 +54,7 @@ def observe( ) # While observation processes are operating do rendering - with timeit("rendering", logger.info): + with timeit("rendering", logger.debug): rendering = {} for agent_id in agent_ids: for vehicle_id in sim_frame.vehicles_for_agents[agent_id]: @@ -72,7 +72,7 @@ def observe( ) updated_sensors[vehicle_id].update(updated_unsafe_sensors) - with timeit(f"merging observations", logger.info): + with timeit(f"merging observations", logger.debug): # Merge sensor information for agent_id, r_obs in rendering.items(): observations[agent_id] = replace(observations[agent_id], **r_obs) diff --git a/smarts/core/sensors/parallel_sensor_resolver.py b/smarts/core/sensors/parallel_sensor_resolver.py index f6c614fc0a..3335ede279 100644 --- a/smarts/core/sensors/parallel_sensor_resolver.py +++ b/smarts/core/sensors/parallel_sensor_resolver.py @@ -88,7 +88,7 @@ def observe( used_workers: List[SensorsWorker] = [] with timeit( f"parallizable observations with {len(agent_ids)} and {len(workers)}", - logger.info, + logger.debug, ): agent_ids_for_grouping = list(agent_ids) agent_groups = [ @@ -98,7 +98,7 @@ def observe( for i, agent_group in enumerate(agent_groups): if not agent_group: break - with timeit(f"submitting {len(agent_group)} agents", logger.info): + with timeit(f"submitting {len(agent_group)} agents", logger.debug): workers[i].send( SensorsWorker.Request( SensorsWorkerRequestId.SIMULATION_FRAME, @@ -108,7 +108,7 @@ def observe( used_workers.append(workers[i]) # While observation processes are operating do rendering - with timeit("rendering", logger.info): + with timeit("rendering", logger.debug): rendering = {} for agent_id in agent_ids: for vehicle_id in sim_frame.vehicles_for_agents[agent_id]: @@ -127,7 +127,7 @@ def observe( updated_sensors[vehicle_id].update(updated_unsafe_sensors) # Collect futures - with timeit("waiting for observations", logger.info): + with timeit("waiting for observations", logger.debug): if used_workers: while agent_ids != set(observations): assert all( @@ -144,7 +144,7 @@ def observe( for v_id, values in u_sens.items(): updated_sensors[v_id].update(values) - with timeit(f"merging observations", logger.info): + with timeit(f"merging observations", logger.debug): # Merge sensor information for agent_id, r_obs in rendering.items(): observations[agent_id] = replace(observations[agent_id], **r_obs) @@ -292,14 +292,14 @@ def _run( break if isinstance(work, cls.Request): run_work = cls._on_request(state, request=work) - with timeit("do work", logger.info): + with timeit("do work", logger.debug): if not run_work: continue result = cls._do_work(state=state.copy()) - with timeit("reserialize", logger.info): + with timeit("reserialize", logger.debug): if serialize_results: result = serializer.dumps(result) - with timeit("put back to main thread", logger.info): + with timeit("put back to main thread", logger.debug): connection.send(result) def run(self): @@ -323,12 +323,12 @@ def send(self, request: Request): def result(self, timeout=None): """The most recent result from the worker.""" - with timeit("main thread blocked", logger.info): + with timeit("main thread blocked", logger.debug): conn = mp.connection.wait([self._parent_connection], timeout=timeout).pop() # pytype: disable=attribute-error result = conn.recv() # pytype: enable=attribute-error - with timeit("deserialize for main thread", logger.info): + with timeit("deserialize for main thread", logger.debug): if self._serialize_results: result = serializer.loads(result) return result diff --git a/smarts/core/smarts.py b/smarts/core/smarts.py index ed3fa7c500..bc23732f41 100644 --- a/smarts/core/smarts.py +++ b/smarts/core/smarts.py @@ -1007,7 +1007,7 @@ def renderer(self) -> RendererBase: return self._renderer @property - def renderer_ref(self) -> Optional[Any]: + def renderer_ref(self) -> Optional[RendererBase]: """Get the reference of the renderer. This can be `None`.""" return self._renderer @@ -1384,15 +1384,6 @@ def scenario(self) -> Scenario: """The current simulation scenario.""" return self._scenario - @property - def timestep_sec(self) -> float: - """Deprecated. Use `fixed_timestep_sec`.""" - warnings.warn( - "SMARTS timestep_sec property has been deprecated in favor of fixed_timestep_sec. Please update your code.", - category=DeprecationWarning, - ) - return self.fixed_timestep_sec - @property def fixed_timestep_sec(self) -> float: """The simulation fixed time-step.""" diff --git a/smarts/core/sumo_road_network.py b/smarts/core/sumo_road_network.py index 02175c0b76..308dc0b4ed 100644 --- a/smarts/core/sumo_road_network.py +++ b/smarts/core/sumo_road_network.py @@ -405,18 +405,22 @@ def incoming_lanes(self) -> List[RoadMap.Lane]: incoming_lanes = self._sumo_lane.getIncoming( onlyDirect=True ) or self._sumo_lane.getIncoming(onlyDirect=False) - return [ - self._map.lane_by_id(incoming.getID()) for incoming in incoming_lanes - ] + return sorted( + (self._map.lane_by_id(incoming.getID()) for incoming in incoming_lanes), + key=lambda l: l.lane_id, + ) @cached_property def outgoing_lanes(self) -> List[RoadMap.Lane]: - return [ - self._map.lane_by_id( - outgoing.getViaLaneID() or outgoing.getToLane().getID() - ) - for outgoing in self._sumo_lane.getOutgoing() - ] + return sorted( + ( + self._map.lane_by_id( + outgoing.getViaLaneID() or outgoing.getToLane().getID() + ) + for outgoing in self._sumo_lane.getOutgoing() + ), + key=lambda l: l.lane_id, + ) @cached_property def entry_surfaces(self) -> List[RoadMap.Surface]: diff --git a/smarts/core/trap_manager.py b/smarts/core/trap_manager.py index a473ec06d0..5d24b6c6a8 100644 --- a/smarts/core/trap_manager.py +++ b/smarts/core/trap_manager.py @@ -226,7 +226,7 @@ def step(self, sim): if ( trap.patience_expired(sim.elapsed_sim_time) - and sim.elapsed_sim_time > sim.timestep_sec + and sim.elapsed_sim_time > sim.fixed_timestep_sec ): capture_by_agent_id[agent_id] = _CaptureState( ConditionState.EXPIRED, trap, updated_mission=trap.mission diff --git a/smarts/core/utils/class_factory.py b/smarts/core/utils/class_factory.py index 47c73d3112..13bbab7a84 100644 --- a/smarts/core/utils/class_factory.py +++ b/smarts/core/utils/class_factory.py @@ -22,7 +22,8 @@ # See gym license in THIRD_PARTY_OPEN_SOURCE_SOFTWARE_NOTICE import importlib import re -from urllib.parse import urlparse +import warnings +from typing import Dict # Taken from OpenAI gym's name constraints NAME_CONSTRAINT_REGEX = re.compile(r"^(?:[\w:-]+\/)?([\w:.-]+)-(v(\d+)|latest)$") @@ -87,42 +88,49 @@ class ClassRegister: """A listing of key named class factories.""" def __init__(self): - self.index = {} + self.index: Dict[str, ClassFactory] = {} - def register(self, locator, entry_point=None, **kwargs): + def register(self, name, entry_point=None, **kwargs): """Registers a new factory with the given locator as the key. Args: locator: The key value of the factory. entry_point: The factory method. kwargs: Predefined arguments to the factory method. """ - # TODO: locator is being used for both module:name and just name. The former - # is the locator, and the latter is simply name. Update the signature of - # this method to be register(name, entrypoint, ...) - name = locator - if name not in self.index: - self.index[name] = ClassFactory(locator, entry_point, **kwargs) + + if name in self.index: + warnings.warn( + f"Resident named '{name}' was already registered. Overwriting existing registration." + ) + self.index[name] = ClassFactory(name, entry_point, **kwargs) def find_factory(self, locator): """Locates a factory given a locator.""" self._raise_on_invalid_locator(locator) - mod_name, name = locator.split(":", 1) - # `name` could be simple name string (e.g. or a URL - try: - # Import the module so that the agent may register it self in our self.index - module = importlib.import_module(mod_name) - except ImportError: - import sys - - raise ImportError( - f"Ensure that `{mod_name}` module can be found from your " - f"PYTHONPATH and name=`{locator}` exists (e.g. was registered " - "manually or downloaded.\n" - f"`PYTHONPATH`: `{sys.path}`" - ) + mod_name, _, name = locator.partition(":") + if name != "": + # There is a module component. + try: + # Import the module so that the agent may register itself in the index + # it is assumed that a `register(name=..., entry_point=...)` exists in the target module. + module = importlib.import_module(mod_name) + except ImportError: + import sys + + raise ImportError( + f"Ensure that `{mod_name}` module can be found from your " + f"PYTHONPATH and name=`{locator}` exists (e.g. was registered " + "manually or downloaded.\n" + f"`PYTHONPATH`: `{sys.path}`" + ) + else: + # There is no module component. + name = mod_name try: + # See if `register()` has been called. + # return the builder if it exists. return self.index[name] except KeyError: raise NameError(f"Locator not registered in lookup: {locator}") @@ -139,6 +147,21 @@ def all(self): """Lists all available factory objects.""" return self.index.values() + def __repr__(self) -> str: + columns = 3 + max_justify = float("-inf") + for name in self.index.keys(): + max_justify = max(max_justify, len(name)) + + out = "" + for i, name in enumerate(self.index.keys()): + out = f"{out}{name.ljust(max_justify)} " + if i % columns == 0 and len(self.index) != i + 1: + out += "\n" + out += "\n" + + return out + def _raise_on_invalid_locator(self, locator: str): if not is_valid_locator(locator): # TODO: Give clearer instructions/examples of the locator syntax diff --git a/smarts/core/utils/dummy.py b/smarts/core/utils/dummy.py new file mode 100644 index 0000000000..8b16507d05 --- /dev/null +++ b/smarts/core/utils/dummy.py @@ -0,0 +1,321 @@ +# MIT License +# +# Copyright (C) 2023. Huawei Technologies Co., Ltd. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + + +import math + +import numpy as np + +import smarts.sstudio.types as t +from smarts.core.coordinates import Dimensions, Heading, RefLinePoint +from smarts.core.events import Events +from smarts.core.observations import ( + DrivableAreaGridMap, + EgoVehicleObservation, + GridMapMetadata, + Observation, + OccupancyGridMap, + RoadWaypoints, + TopDownRGB, + VehicleObservation, + Vias, +) +from smarts.core.plan import EndlessGoal, Mission, Start +from smarts.core.road_map import Waypoint + + +def dummy_observation() -> Observation: + """A dummy observation for tests and conversion.""" + return Observation( + dt=0.1, + step_count=1, + elapsed_sim_time=0.2, + events=Events( + collisions=[], + off_road=False, + off_route=False, + on_shoulder=False, + wrong_way=False, + not_moving=False, + reached_goal=False, + reached_max_episode_steps=False, + agents_alive_done=False, + interest_done=False, + ), + ego_vehicle_state=EgoVehicleObservation( + id="AGENT-007-07a0ca6e", + position=np.array([161.23485529, 3.2, 0.0]), + bounding_box=Dimensions(length=3.68, width=1.47, height=1.0), + heading=Heading(-1.5707963267948966), + speed=5.0, + steering=-0.0, + yaw_rate=4.71238898038469, + road_id="east", + lane_id="east_2", + lane_index=2, + lane_position=RefLinePoint(161.23485529, 0.0, 0.0), + mission=Mission( + start=Start( + position=np.array([163.07485529, 3.2]), + heading=Heading(-1.5707963267948966), + from_front_bumper=True, + ), + goal=EndlessGoal(), + route_vias=(), + start_time=0.1, + entry_tactic=t.TrapEntryTactic( + start_time=0, + zone=None, + exclusion_prefixes=(), + default_entry_speed=None, + ), + via=(), + vehicle_spec=None, + ), + linear_velocity=np.array([5.000000e00, 3.061617e-16, 0.000000e00]), + angular_velocity=np.array([0.0, 0.0, 0.0]), + linear_acceleration=np.array([0.0, 0.0, 0.0]), + angular_acceleration=np.array([0.0, 0.0, 0.0]), + linear_jerk=np.array([0.0, 0.0, 0.0]), + angular_jerk=np.array([0.0, 0.0, 0.0]), + ), + under_this_agent_control=True, + neighborhood_vehicle_states=[ + VehicleObservation( + id="car-west_0_0-east_0_max-784511-726648-0-0.0", + position=(-1.33354215, -3.2, 0.0), + bounding_box=Dimensions(length=3.68, width=1.47, height=1.4), + heading=Heading(-1.5707963267948966), + speed=5.050372796758114, + road_id="west", + lane_id="west_0", + lane_index=0, + lane_position=RefLinePoint(-1.33354215, 0.0, 0.0), + ), + VehicleObservation( + id="car-west_1_0-east_1_max--85270-726648-1-0.0", + position=(-1.47159011, 0.0, 0.0), + bounding_box=Dimensions(length=3.68, width=1.47, height=1.4), + heading=Heading(-1.5707963267948966), + speed=3.6410559446059954, + road_id="west", + lane_id="west_1", + lane_index=1, + lane_position=RefLinePoint(-1.47159011, 0.0, 0.0), + ), + ], + waypoint_paths=[ + [ + Waypoint( + pos=np.array([192.00733923, -3.2]), + heading=Heading(-1.5707963267948966), + lane_id="east_0", + lane_width=3.2, + speed_limit=5.0, + lane_index=0, + lane_offset=192.00733923, + ), + Waypoint( + pos=np.array([193.0, -3.2]), + heading=Heading(-1.5707963267948966), + lane_id="east_0", + lane_width=3.2, + speed_limit=5.0, + lane_index=0, + lane_offset=193.0, + ), + ], + [ + Waypoint( + pos=np.array([192.00733923, 0.0]), + heading=Heading(-1.5707963267948966), + lane_id="east_1", + lane_width=3.2, + speed_limit=5.0, + lane_index=1, + lane_offset=192.00733923, + ), + Waypoint( + pos=np.array([193.0, 0.0]), + heading=Heading(-1.5707963267948966), + lane_id="east_1", + lane_width=3.2, + speed_limit=5.0, + lane_index=1, + lane_offset=193.0, + ), + ], + ], + distance_travelled=0.0, + lidar_point_cloud=( + [ + np.array([1.56077973e02, 5.56008599e00, -7.24975635e-14]), + np.array([math.inf, math.inf, math.inf]), + np.array([math.inf, math.inf, math.inf]), + np.array([1.66673185e02, 1.59127180e00, 9.07052211e-14]), + ], + [ + True, + False, + False, + True, + ], + [ + ( + np.array([161.23485529, 3.2, 1.0]), + np.array([143.32519217, 11.39649262, -2.47296355]), + ), + ( + np.array([161.23485529, 3.2, 1.0]), + np.array([158.45533372, 22.69904572, -2.47296355]), + ), + ( + np.array([161.23485529, 3.2, 1.0]), + np.array([176.14095458, 16.07426611, -2.47296355]), + ), + ( + np.array([161.23485529, 3.2, 1.0]), + np.array([180.12197649, -2.38705439, -2.47296355]), + ), + ], + ), + drivable_area_grid_map=DrivableAreaGridMap( + metadata=GridMapMetadata( + resolution=0.1953125, + width=256, + height=256, + camera_position=(161.235, 3.2, 73.6), + camera_heading=-math.pi / 2, + ), + data=np.array( + [ + [[0], [0], [0], [0], [0], [0]], + [[0], [0], [0], [0], [0], [0]], + [[0], [0], [0], [0], [0], [0]], + [[0], [0], [0], [0], [0], [0]], + [[0], [0], [0], [0], [0], [0]], + [[0], [0], [0], [0], [0], [0]], + ], + dtype=np.uint8, + ), + ), + occupancy_grid_map=OccupancyGridMap( + metadata=GridMapMetadata( + resolution=0.1953125, + width=256, + height=256, + camera_position=(161.235, 3.2, 73.6), + camera_heading=-math.pi / 2, + ), + data=np.array( + [ + [[0], [0], [0], [0], [0], [0]], + [[0], [0], [0], [0], [0], [0]], + [[0], [0], [0], [0], [0], [0]], + [[0], [0], [0], [0], [0], [0]], + [[0], [0], [0], [0], [0], [0]], + [[0], [0], [0], [0], [0], [0]], + ], + dtype=np.uint8, + ), + ), + top_down_rgb=TopDownRGB( + metadata=GridMapMetadata( + resolution=0.1953125, + width=256, + height=256, + camera_position=(161.235, 3.2, 73.6), + camera_heading=-math.pi / 2, + ), + data=np.array( + [ + [ + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + ], + [ + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + ], + ], + dtype=np.uint8, + ), + ), + road_waypoints=RoadWaypoints( + lanes={ + "east_0": [ + [ + Waypoint( + pos=np.array([180.00587138, -3.2]), + heading=Heading(-1.5707963267948966), + lane_id="east_0", + lane_width=3.2, + speed_limit=5.0, + lane_index=0, + lane_offset=180.00587138, + ), + Waypoint( + pos=np.array([181.0, -3.2]), + heading=Heading(-1.5707963267948966), + lane_id="east_0", + lane_width=3.2, + speed_limit=5.0, + lane_index=0, + lane_offset=181.0, + ), + ] + ], + "east_1": [ + [ + Waypoint( + pos=np.array([180.00587138, 0.0]), + heading=Heading(-1.5707963267948966), + lane_id="east_1", + lane_width=3.2, + speed_limit=5.0, + lane_index=1, + lane_offset=180.00587138, + ), + Waypoint( + pos=np.array([181.0, 0.0]), + heading=Heading(-1.5707963267948966), + lane_id="east_1", + lane_width=3.2, + speed_limit=5.0, + lane_index=1, + lane_offset=181.0, + ), + ] + ], + } + ), + via_data=Vias(near_via_points=[], hit_via_points=[]), + steps_completed=4, + ) diff --git a/smarts/core/utils/episodes.py b/smarts/core/utils/episodes.py index a76b14aac2..1248eab625 100644 --- a/smarts/core/utils/episodes.py +++ b/smarts/core/utils/episodes.py @@ -51,7 +51,7 @@ def _write_row(self): assert isinstance(self._current_episode, EpisodeLog) e = self._current_episode row = ( - f"{e.index}/{self._total_episodes}", + f"{e.index + 1}/{self._total_episodes}", f"{e.sim2wall_ratio:.2f}", e.steps, f"{e.steps_per_second:.2f}", @@ -159,6 +159,9 @@ def record_step(self, observations, rewards, terminateds, truncateds, infos): if terminateds.get("__all__", False) and infos is not None: for agent, score in infos.items(): self.scores[agent] = score["score"] + else: + for id in (_id for _id, t in terminateds.items() if t): + self.scores[id] = infos[id]["score"] def _convert_to_dict(self, observations, rewards, terminateds, truncateds, infos): observations, rewards, infos = [ diff --git a/smarts/core/utils/glb.py b/smarts/core/utils/glb.py index af9917cf99..47e01855a4 100644 --- a/smarts/core/utils/glb.py +++ b/smarts/core/utils/glb.py @@ -20,7 +20,7 @@ import math from pathlib import Path -from typing import Any, Dict, List, Tuple, Union +from typing import Any, Dict, Final, List, Tuple, Union import numpy as np import trimesh @@ -30,6 +30,12 @@ from smarts.core.coordinates import BoundingBox from smarts.core.utils.geometry import triangulate_polygon +OLD_TRIMESH: Final[bool] = tuple(int(d) for d in trimesh.__version__.split(".")) <= ( + 3, + 9, + 29, +) + def _convert_camera(camera): result = { @@ -114,7 +120,6 @@ def make_map_glb( edge_dividers, ) -> GLBData: """Create a GLB file from a list of road polygons.""" - scene = trimesh.Scene() # Attach additional information for rendering as metadata in the map glb metadata = { @@ -127,6 +132,7 @@ def make_map_glb( "lane_dividers": lane_dividers, "edge_dividers": edge_dividers, } + scene = trimesh.Scene(metadata=metadata) meshes = _generate_meshes_from_polygons(polygons) for mesh in meshes: @@ -139,8 +145,11 @@ def make_map_glb( name = str(road_id) if lane_id is not None: name += f"-{lane_id}" - scene.add_geometry(mesh, name, extras=mesh.metadata) - return GLBData(gltf.export_glb(scene, extras=metadata, include_normals=True)) + if OLD_TRIMESH: + scene.add_geometry(mesh, name, extras=mesh.metadata) + else: + scene.add_geometry(mesh, name, metadata=mesh.metadata) + return GLBData(gltf.export_glb(scene, include_normals=True)) def make_road_line_glb(lines: List[List[Tuple[float, float]]]) -> GLBData: diff --git a/smarts/core/utils/tests/fixtures.py b/smarts/core/utils/tests/fixtures.py index a99abeae02..668e3337bd 100644 --- a/smarts/core/utils/tests/fixtures.py +++ b/smarts/core/utils/tests/fixtures.py @@ -19,306 +19,15 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. -import math - -import numpy as np import pytest -import smarts.sstudio.types as t from smarts.core.controllers import ActionSpaceType -from smarts.core.coordinates import Dimensions, Heading, RefLinePoint -from smarts.core.events import Events -from smarts.core.observations import ( - DrivableAreaGridMap, - EgoVehicleObservation, - GridMapMetadata, - Observation, - OccupancyGridMap, - RoadWaypoints, - TopDownRGB, - VehicleObservation, - Vias, -) -from smarts.core.plan import EndlessGoal, Mission, Start -from smarts.core.road_map import Waypoint +from smarts.core.utils.dummy import dummy_observation @pytest.fixture def large_observation(): - return Observation( - dt=0.1, - step_count=1, - elapsed_sim_time=0.2, - events=Events( - collisions=[], - off_road=False, - off_route=False, - on_shoulder=False, - wrong_way=False, - not_moving=False, - reached_goal=False, - reached_max_episode_steps=False, - agents_alive_done=False, - interest_done=False, - ), - ego_vehicle_state=EgoVehicleObservation( - id="AGENT-007-07a0ca6e", - position=np.array([161.23485529, 3.2, 0.0]), - bounding_box=Dimensions(length=3.68, width=1.47, height=1.0), - heading=Heading(-1.5707963267948966), - speed=5.0, - steering=-0.0, - yaw_rate=4.71238898038469, - road_id="east", - lane_id="east_2", - lane_index=2, - lane_position=RefLinePoint(161.23485529, 0.0, 0.0), - mission=Mission( - start=Start( - position=np.array([163.07485529, 3.2]), - heading=Heading(-1.5707963267948966), - from_front_bumper=True, - ), - goal=EndlessGoal(), - route_vias=(), - start_time=0.1, - entry_tactic=t.TrapEntryTactic( - start_time=0, - zone=None, - exclusion_prefixes=(), - default_entry_speed=None, - ), - via=(), - vehicle_spec=None, - ), - linear_velocity=np.array([5.000000e00, 3.061617e-16, 0.000000e00]), - angular_velocity=np.array([0.0, 0.0, 0.0]), - linear_acceleration=np.array([0.0, 0.0, 0.0]), - angular_acceleration=np.array([0.0, 0.0, 0.0]), - linear_jerk=np.array([0.0, 0.0, 0.0]), - angular_jerk=np.array([0.0, 0.0, 0.0]), - ), - under_this_agent_control=True, - neighborhood_vehicle_states=[ - VehicleObservation( - id="car-west_0_0-east_0_max-784511-726648-0-0.0", - position=(-1.33354215, -3.2, 0.0), - bounding_box=Dimensions(length=3.68, width=1.47, height=1.4), - heading=Heading(-1.5707963267948966), - speed=5.050372796758114, - road_id="west", - lane_id="west_0", - lane_index=0, - lane_position=RefLinePoint(-1.33354215, 0.0, 0.0), - ), - VehicleObservation( - id="car-west_1_0-east_1_max--85270-726648-1-0.0", - position=(-1.47159011, 0.0, 0.0), - bounding_box=Dimensions(length=3.68, width=1.47, height=1.4), - heading=Heading(-1.5707963267948966), - speed=3.6410559446059954, - road_id="west", - lane_id="west_1", - lane_index=1, - lane_position=RefLinePoint(-1.47159011, 0.0, 0.0), - ), - ], - waypoint_paths=[ - [ - Waypoint( - pos=np.array([192.00733923, -3.2]), - heading=Heading(-1.5707963267948966), - lane_id="east_0", - lane_width=3.2, - speed_limit=5.0, - lane_index=0, - lane_offset=192.00733923, - ), - Waypoint( - pos=np.array([193.0, -3.2]), - heading=Heading(-1.5707963267948966), - lane_id="east_0", - lane_width=3.2, - speed_limit=5.0, - lane_index=0, - lane_offset=193.0, - ), - ], - [ - Waypoint( - pos=np.array([192.00733923, 0.0]), - heading=Heading(-1.5707963267948966), - lane_id="east_1", - lane_width=3.2, - speed_limit=5.0, - lane_index=1, - lane_offset=192.00733923, - ), - Waypoint( - pos=np.array([193.0, 0.0]), - heading=Heading(-1.5707963267948966), - lane_id="east_1", - lane_width=3.2, - speed_limit=5.0, - lane_index=1, - lane_offset=193.0, - ), - ], - ], - distance_travelled=0.0, - lidar_point_cloud=( - [ - np.array([1.56077973e02, 5.56008599e00, -7.24975635e-14]), - np.array([math.inf, math.inf, math.inf]), - np.array([math.inf, math.inf, math.inf]), - np.array([1.66673185e02, 1.59127180e00, 9.07052211e-14]), - ], - [ - True, - False, - False, - True, - ], - [ - ( - np.array([161.23485529, 3.2, 1.0]), - np.array([143.32519217, 11.39649262, -2.47296355]), - ), - ( - np.array([161.23485529, 3.2, 1.0]), - np.array([158.45533372, 22.69904572, -2.47296355]), - ), - ( - np.array([161.23485529, 3.2, 1.0]), - np.array([176.14095458, 16.07426611, -2.47296355]), - ), - ( - np.array([161.23485529, 3.2, 1.0]), - np.array([180.12197649, -2.38705439, -2.47296355]), - ), - ], - ), - drivable_area_grid_map=DrivableAreaGridMap( - metadata=GridMapMetadata( - resolution=0.1953125, - width=256, - height=256, - camera_position=(161.235, 3.2, 73.6), - camera_heading=-math.pi / 2, - ), - data=np.array( - [ - [[0], [0], [0], [0], [0], [0]], - [[0], [0], [0], [0], [0], [0]], - [[0], [0], [0], [0], [0], [0]], - [[0], [0], [0], [0], [0], [0]], - [[0], [0], [0], [0], [0], [0]], - [[0], [0], [0], [0], [0], [0]], - ], - dtype=np.uint8, - ), - ), - occupancy_grid_map=OccupancyGridMap( - metadata=GridMapMetadata( - resolution=0.1953125, - width=256, - height=256, - camera_position=(161.235, 3.2, 73.6), - camera_heading=-math.pi / 2, - ), - data=np.array( - [ - [[0], [0], [0], [0], [0], [0]], - [[0], [0], [0], [0], [0], [0]], - [[0], [0], [0], [0], [0], [0]], - [[0], [0], [0], [0], [0], [0]], - [[0], [0], [0], [0], [0], [0]], - [[0], [0], [0], [0], [0], [0]], - ], - dtype=np.uint8, - ), - ), - top_down_rgb=TopDownRGB( - metadata=GridMapMetadata( - resolution=0.1953125, - width=256, - height=256, - camera_position=(161.235, 3.2, 73.6), - camera_heading=-math.pi / 2, - ), - data=np.array( - [ - [ - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - ], - [ - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - ], - ], - dtype=np.uint8, - ), - ), - road_waypoints=RoadWaypoints( - lanes={ - "east_0": [ - [ - Waypoint( - pos=np.array([180.00587138, -3.2]), - heading=Heading(-1.5707963267948966), - lane_id="east_0", - lane_width=3.2, - speed_limit=5.0, - lane_index=0, - lane_offset=180.00587138, - ), - Waypoint( - pos=np.array([181.0, -3.2]), - heading=Heading(-1.5707963267948966), - lane_id="east_0", - lane_width=3.2, - speed_limit=5.0, - lane_index=0, - lane_offset=181.0, - ), - ] - ], - "east_1": [ - [ - Waypoint( - pos=np.array([180.00587138, 0.0]), - heading=Heading(-1.5707963267948966), - lane_id="east_1", - lane_width=3.2, - speed_limit=5.0, - lane_index=1, - lane_offset=180.00587138, - ), - Waypoint( - pos=np.array([181.0, 0.0]), - heading=Heading(-1.5707963267948966), - lane_id="east_1", - lane_width=3.2, - speed_limit=5.0, - lane_index=1, - lane_offset=181.0, - ), - ] - ], - } - ), - via_data=Vias(near_via_points=[], hit_via_points=[]), - steps_completed=4, - ) + return dummy_observation() @pytest.fixture diff --git a/smarts/core/vehicle.py b/smarts/core/vehicle.py index 449ad9cdee..c05ee94af6 100644 --- a/smarts/core/vehicle.py +++ b/smarts/core/vehicle.py @@ -569,7 +569,7 @@ def attach_sensor(self, sensor, sensor_name=sensor_name): detach = getattr(self, f"detach_{sensor_name}") if detach: detach(sensor_name) - self._log.info( + self._log.debug( f"replacing existing {sensor_name} on vehicle {self.id}" ) setattr(self, f"_{sensor_name}", sensor) diff --git a/smarts/diagnostic/run.py b/smarts/diagnostic/run.py index 99b1cca722..cfbe8ee203 100644 --- a/smarts/diagnostic/run.py +++ b/smarts/diagnostic/run.py @@ -52,7 +52,7 @@ def _compute(scenario_dir, ep_per_scenario=10, max_episode_steps=_MAX_EPISODE_ST env = gym.make( "smarts.env:hiway-v1", scenarios=scenario_dir, - scenarios_order=ScenarioOrder.Sequential, + scenarios_order=ScenarioOrder.sequential, sim_name="Diagnostic", agent_interfaces={}, headless=True, diff --git a/smarts/env/configs/__init__.py b/smarts/env/configs/__init__.py new file mode 100644 index 0000000000..2c65923417 --- /dev/null +++ b/smarts/env/configs/__init__.py @@ -0,0 +1,21 @@ +# MIT License +# +# Copyright (C) 2023. Huawei Technologies Co., Ltd. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. diff --git a/smarts/env/configs/base_config.py b/smarts/env/configs/base_config.py new file mode 100644 index 0000000000..2600f2fe4a --- /dev/null +++ b/smarts/env/configs/base_config.py @@ -0,0 +1,37 @@ +# MIT License +# +# Copyright (C) 2023. Huawei Technologies Co., Ltd. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +from dataclasses import dataclass + + +@dataclass +class EnvironmentArguments: + """The base for arguments for an environment.""" + + pass + + +@dataclass(unsafe_hash=True) +class EnvironmentConfiguration: + """A base environment configuration.""" + + id: str + """The gymnasium registered id of the environment.""" diff --git a/smarts/env/configs/hiway_env_configs.py b/smarts/env/configs/hiway_env_configs.py new file mode 100644 index 0000000000..24015f68f0 --- /dev/null +++ b/smarts/env/configs/hiway_env_configs.py @@ -0,0 +1,101 @@ +# MIT License +# +# Copyright (C) 2023. Huawei Technologies Co., Ltd. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +from dataclasses import dataclass, field +from enum import IntEnum, auto +from pathlib import Path +from typing import Dict, List, Optional + +from smarts.core.agent_interface import AgentInterface +from smarts.env.configs.base_config import EnvironmentArguments +from smarts.env.utils.action_conversion import ActionOptions +from smarts.env.utils.observation_conversion import ObservationOptions + + +class ScenarioOrder(IntEnum): + """Determines the order in which scenarios are served over successive resets.""" + + sequential = 0 + """Scenarios are served in order initially provided.""" + scrambled = 1 + """Scenarios are served in random order.""" + default = scrambled + """The default behavior. Defaults to ``scrambled``.""" + + +class EnvReturnMode(IntEnum): + """Configuration to determine the interface type of the step function. + + This configures between the environment status return (i.e. reward means the environment reward) and the per-agent + status return (i.e. rewards means reward per agent). + """ + + per_agent = auto() + """Generate per-agent mode step returns in the form ``(rewards({id: float}), terminateds({id: bool}), truncateds ({id: bool}), info)``.""" + environment = auto() + """Generate environment mode step returns in the form ``(reward (float), terminated (bool), truncated (bool), info)``.""" + default = per_agent + """The default behavior. Defaults to ``per_agent``.""" + + +@dataclass +class SumoOptions: + """Contains options used to configure sumo.""" + + num_external_clients: int = 0 + """Number of SUMO clients beyond SMARTS. Defaults to 0.""" + auto_start: bool = True + """Automatic starting of SUMO. Defaults to True.""" + headless: bool = True + """If True, disables visualization in SUMO GUI. Defaults to True.""" + port: Optional[str] = None + """SUMO port. Defaults to ``None``.""" + + +@dataclass +class HiWayEnvV1Configuration(EnvironmentArguments): + """The base configurations that should be used for HiWayEnvV1.""" + + scenarios: List[str] + """A list of scenario directories that will be simulated.""" + agent_interfaces: Dict[str, AgentInterface] + """Specification of the agents needs that will be used to configure + the environment.""" + sim_name: Optional[str] = None + """The name of the simulation.""" + scenarios_order: ScenarioOrder = ScenarioOrder.default + """The order in which scenarios should be executed.""" + headless: bool = False + """If this environment should attempt to connect to envision.""" + visdom: bool = False + """Deprecated. Use SMARTS_VISDOM_ENABLED.""" + fixed_timestep_sec: float = 0.1 + """The time length of each step.""" + sumo_options: SumoOptions = field(default_factory=lambda: SumoOptions()) + """The configuration for the sumo instance.""" + seed: int = 42 + """The environment seed.""" + observation_options: ObservationOptions = ObservationOptions.default + """Defines the options for how the formatting matches the observation space.""" + action_options: ActionOptions = ActionOptions.default + """Defines the options for how the formatting matches the action space.""" + environment_return_mode: EnvReturnMode = EnvReturnMode.default + """This configures between the environment step return information""" diff --git a/smarts/env/gymnasium/hiway_env_v1.py b/smarts/env/gymnasium/hiway_env_v1.py index 497b73a246..8d557c9da6 100644 --- a/smarts/env/gymnasium/hiway_env_v1.py +++ b/smarts/env/gymnasium/hiway_env_v1.py @@ -21,21 +21,10 @@ # THE SOFTWARE. import logging import os -from enum import IntEnum, auto +from dataclasses import asdict, is_dataclass from functools import partial from pathlib import Path -from typing import ( - Any, - Dict, - List, - NamedTuple, - Optional, - Sequence, - Set, - SupportsFloat, - Tuple, - Union, -) +from typing import Any, Dict, List, Optional, Sequence, Set, SupportsFloat, Tuple, Union import gymnasium as gym import numpy as np @@ -51,35 +40,17 @@ from smarts.core.agent_interface import AgentInterface from smarts.core.local_traffic_provider import LocalTrafficProvider from smarts.core.scenario import Scenario +from smarts.env.configs.hiway_env_configs import ( + EnvReturnMode, + ScenarioOrder, + SumoOptions, +) from smarts.env.utils.action_conversion import ActionOptions, ActionSpacesFormatter from smarts.env.utils.observation_conversion import ( ObservationOptions, ObservationSpacesFormatter, ) - -class ScenarioOrder(IntEnum): - """Determines the order in which scenarios are served over successive resets.""" - - Sequential = 0 - """Scenarios are served in order initially provided.""" - Scrambled = 1 - """Scenarios are served in random order.""" - - -class SumoOptions(NamedTuple): - """Contains options used to configure sumo.""" - - num_external_clients: int = 0 - """Number of SUMO clients beyond SMARTS. Defaults to 0.""" - auto_start: bool = True - """Automatic starting of SUMO. Defaults to True.""" - headless: bool = True - """If True, disables visualization in SUMO GUI. Defaults to True.""" - port: Optional[str] = None - """SUMO port. Defaults to None.""" - - DEFAULT_VISUALIZATION_CLIENT_BUILDER = partial( Envision, endpoint=None, @@ -89,19 +60,6 @@ class SumoOptions(NamedTuple): ) -class EnvReturnMode(IntEnum): - """Configuration to determine the interface type of the step function. - - This configures between the environment status return (i.e. reward means the environment reward) and the per-agent - status return (i.e. rewards means reward per agent). - """ - - per_agent = auto() - """Generate per-agent mode step returns in the form ``(rewards({id: float}), terminateds({id: bool}), truncateds ({id: bool}), info)``.""" - environment = auto() - """Generate environment mode step returns in the form ``(reward (float), terminated (bool), truncated (bool), info)``.""" - - class HiWayEnvV1(gym.Env): """A generic environment for various driving tasks simulated by SMARTS. @@ -112,7 +70,7 @@ class HiWayEnvV1(gym.Env): needs that will be used to configure the environment. sim_name (str, optional): Simulation name. Defaults to None. scenarios_order (ScenarioOrder, optional): Configures the order of - scenarios provided over successive resets. + scenarios provided over successive resets. See :class:`~smarts.env.configs.hiway_env_configs.ScenarioOrder`. headless (bool, optional): If True, disables visualization in Envision. Defaults to False. visdom (bool): Deprecated. Use SMARTS_VISDOM_ENABLED. @@ -122,7 +80,7 @@ class HiWayEnvV1(gym.Env): seed (int, optional): Random number generator seed. Defaults to 42. sumo_options (SumoOptions, Dict[str, Any]): The configuration for the sumo instance. A dictionary with the fields can be used instead. - See :class:`SumoOptions`. + See :class:`~smarts.env.configs.hiway_env_configs.SumoOptions`. visualization_client_builder: A method that must must construct an object that follows the Envision interface. Allows tapping into a direct data stream from the simulation. @@ -137,10 +95,12 @@ class HiWayEnvV1(gym.Env): environment_return_mode (EnvReturnMode, str): This configures between the environment step return information (i.e. reward means the environment reward) and the per-agent step return information (i.e. reward means rewards as key-value per agent). Defaults to - :attr:`~smarts.env.gymnasium.hiway_env_v1.EnvReturnMode.per_agent`. + :attr:`~smarts.env.configs.hiway_env_configs.EnvReturnMode.per_agent`. """ - metadata = {"render_modes": ["human"]} + metadata = { + "render_modes": ["rgb_array"], + } """Metadata for gym's use.""" # define render_mode if your environment supports rendering @@ -158,9 +118,9 @@ class HiWayEnvV1(gym.Env): def __init__( self, scenarios: Sequence[str], - agent_interfaces: Dict[str, AgentInterface], + agent_interfaces: Dict[str, Union[Dict[str, Any], AgentInterface]], sim_name: Optional[str] = None, - scenarios_order: ScenarioOrder = ScenarioOrder.Scrambled, + scenarios_order: ScenarioOrder = ScenarioOrder.default, headless: bool = False, visdom: bool = False, fixed_timestep_sec: float = 0.1, @@ -171,18 +131,26 @@ def __init__( ObservationOptions, str ] = ObservationOptions.default, action_options: Union[ActionOptions, str] = ActionOptions.default, - environment_return_mode: Union[EnvReturnMode, str] = EnvReturnMode.per_agent, + environment_return_mode: Union[EnvReturnMode, str] = EnvReturnMode.default, + render_mode: Optional[str] = None, ): self._log = logging.getLogger(self.__class__.__name__) smarts_seed(seed) - self._agent_interfaces = agent_interfaces + self._agent_interfaces: Dict[str, AgentInterface] = { + a_id: ( + a_interface + if isinstance(a_interface, AgentInterface) + else AgentInterface(**a_interface) + ) + for a_id, a_interface in agent_interfaces.items() + } self._dones_registered = 0 scenarios = [str(Path(scenario).resolve()) for scenario in scenarios] self._scenarios_iterator = Scenario.scenario_variations( scenarios, list(agent_interfaces.keys()), - shuffle_scenarios=scenarios_order == ScenarioOrder.Scrambled, + shuffle_scenarios=scenarios_order == ScenarioOrder.scrambled, ) visualization_client = None @@ -195,13 +163,14 @@ def __init__( visualization_client.send(preamble) self._env_renderer = None + self.render_mode = render_mode traffic_sims = [] if Scenario.any_support_sumo_traffic(scenarios): from smarts.core.sumo_traffic_simulation import SumoTrafficSimulation - if isinstance(sumo_options, tuple): - sumo_options = sumo_options._asdict() + if is_dataclass(sumo_options): + sumo_options = asdict(sumo_options) sumo_traffic = SumoTrafficSimulation( headless=sumo_options["headless"], time_resolution=fixed_timestep_sec, @@ -428,10 +397,7 @@ def render( Note: Make sure that your class's :attr:`metadata` ``"render_modes"`` key includes the list of supported modes. """ - if ( - "rgb_array" in self.metadata["render_modes"] - or self.render_mode == "rgb_array" - ): + if self.render_mode == "rgb_array": if self._env_renderer is None: from smarts.env.utils.record import AgentCameraRGBRender @@ -474,14 +440,11 @@ def __str__(self): Returns: A string identifying the environment. """ - if self.spec is None: - return f"<{type(self).__name__} instance>" - else: - return f"<{type(self).__name__}<{self.spec.id}>>" + return super().__str__() def __enter__(self): """Support with-statement for the environment.""" - return self + return super().__enter__() def __exit__(self, *args: Any): """Support with-statement for the environment and closes the environment.""" diff --git a/smarts/env/gymnasium/wrappers/parallel_env.py b/smarts/env/gymnasium/wrappers/parallel_env.py index 8313ec80fb..e26a82f99d 100644 --- a/smarts/env/gymnasium/wrappers/parallel_env.py +++ b/smarts/env/gymnasium/wrappers/parallel_env.py @@ -205,7 +205,9 @@ def reset(self) -> Tuple[Sequence[Dict[str, Any]], Sequence[Dict[str, Any]]]: observations and infos from the vectorized environment. """ - observations, infos = self._call(_Message.RESET, [None] * self._num_envs) + # since the return is [(obs0, infos0), ...] they need to be zipped to form. + # [(obs0, ...), (infos0, ...)] + observations, infos = zip(*self._call(_Message.RESET, [None] * self._num_envs)) return observations, infos def step( diff --git a/smarts/env/utils/action_conversion.py b/smarts/env/utils/action_conversion.py index 7e286f3a03..24b6fa2430 100644 --- a/smarts/env/utils/action_conversion.py +++ b/smarts/env/utils/action_conversion.py @@ -51,7 +51,9 @@ def _DEFAULT_PASSTHROUGH(action): _throttle_break_steering_space = gym.spaces.Box( - low=np.array([0.0, 0.0, -1.0]), high=np.array([1.0, 1.0, 1.0]), dtype=np.float32 + low=np.array([0.0, 0.0, -1.0], dtype=np.float32), + high=np.array([1.0, 1.0, 1.0], dtype=np.float32), + dtype=np.float32, ) _actuator_dynamic_space = _throttle_break_steering_space @@ -61,8 +63,12 @@ def _DEFAULT_PASSTHROUGH(action): _direct_space = gym.spaces.Box( - low=np.array([LINEAR_ACCELERATION_MINIMUM, ANGULAR_VELOCITY_MINIMUM]), - high=np.array([LINEAR_ACCELERATION_MAXIMUM, ANGULAR_VELOCITY_MAXIMUM]), + low=np.array( + [LINEAR_ACCELERATION_MINIMUM, ANGULAR_VELOCITY_MINIMUM], dtype=np.float32 + ), + high=np.array( + [LINEAR_ACCELERATION_MAXIMUM, ANGULAR_VELOCITY_MAXIMUM], dtype=np.float32 + ), dtype=np.float32, ) @@ -135,13 +141,15 @@ def _format_lane_space(action: int): @dataclass(frozen=True) -class _FormattingGroup: +class FormattingGroup: + """Describes the conversion necessary to generate the given space.""" + space: gym.Space formatting_func: Callable[[Any], Any] = field(default=_DEFAULT_PASSTHROUGH) @lru_cache(maxsize=1) -def _get_formats() -> Dict[ActionSpaceType, _FormattingGroup]: +def get_formatters() -> Dict[ActionSpaceType, FormattingGroup]: """Get the currently available formatting groups for converting actions from `gym` space standard to SMARTS accepted observations. @@ -149,42 +157,42 @@ def _get_formats() -> Dict[ActionSpaceType, _FormattingGroup]: Dict[ActionSpaceType, Any]: The currently available formatting groups. """ return { - ActionSpaceType.ActuatorDynamic: _FormattingGroup( + ActionSpaceType.ActuatorDynamic: FormattingGroup( space=_actuator_dynamic_space, ), - ActionSpaceType.Continuous: _FormattingGroup( + ActionSpaceType.Continuous: FormattingGroup( space=_continuous_space, ), - ActionSpaceType.Direct: _FormattingGroup( + ActionSpaceType.Direct: FormattingGroup( space=_direct_space, ), - ActionSpaceType.Empty: _FormattingGroup( + ActionSpaceType.Empty: FormattingGroup( space=gym.spaces.Tuple(spaces=()), formatting_func=lambda a: None, ), - ActionSpaceType.Lane: _FormattingGroup( + ActionSpaceType.Lane: FormattingGroup( space=_lane_space, formatting_func=_format_lane_space, ), - ActionSpaceType.LaneWithContinuousSpeed: _FormattingGroup( + ActionSpaceType.LaneWithContinuousSpeed: FormattingGroup( space=_lane_with_continuous_speed_space, ), - ActionSpaceType.MPC: _FormattingGroup( + ActionSpaceType.MPC: FormattingGroup( space=_mpc_space, ), - ActionSpaceType.MultiTargetPose: _FormattingGroup( + ActionSpaceType.MultiTargetPose: FormattingGroup( space=_multi_target_pose_space, ), - ActionSpaceType.RelativeTargetPose: _FormattingGroup( + ActionSpaceType.RelativeTargetPose: FormattingGroup( space=_relative_target_pose_space, ), - ActionSpaceType.TargetPose: _FormattingGroup( + ActionSpaceType.TargetPose: FormattingGroup( space=_target_pose_space, ), - ActionSpaceType.Trajectory: _FormattingGroup( + ActionSpaceType.Trajectory: FormattingGroup( space=_trajectory_space, ), - ActionSpaceType.TrajectoryWithTime: _FormattingGroup( + ActionSpaceType.TrajectoryWithTime: FormattingGroup( space=_trajectory_with_time_space, ), } @@ -200,7 +208,7 @@ class ActionOptions(IntEnum): unformatted = 2 """Actions are not reformatted or constrained to action space. Actions must directly map to underlying SMARTS actions.""" - default = 0 + default = multi_agent """Defaults to :attr:`multi_agent`.""" @@ -217,7 +225,7 @@ def __init__( self, agent_interfaces: Dict[str, AgentInterface], action_options: ActionOptions ) -> None: self._agent_interfaces = agent_interfaces - self._action_options = action_options + self.action_options = action_options for agent_id, agent_interface in agent_interfaces.items(): assert self.supported(agent_interface.action), ( @@ -236,11 +244,11 @@ def format(self, actions: Dict[str, Any]): (Observation, Dict[str, Any]): The formatted actions. """ - if self._action_options == ActionOptions.unformatted: + if self.action_options == ActionOptions.unformatted: return actions out_actions = {} - formatting_groups = _get_formats() + formatting_groups = get_formatters() for agent_id, action in actions.items(): agent_interface = self._agent_interfaces[agent_id] format_ = formatting_groups[agent_interface.action] @@ -253,7 +261,7 @@ def format(self, actions: Dict[str, Any]): formatted_action = format_.formatting_func(action) out_actions[agent_id] = formatted_action - if self._action_options == ActionOptions.full: + if self.action_options == ActionOptions.full: assert actions.keys() == self.space.spaces.keys() return out_actions @@ -268,7 +276,7 @@ def supported(action_type: ActionSpaceType): Returns: bool: If the action type is supported by the formatter. """ - return action_type in _get_formats() + return action_type in get_formatters() @cached_property def space(self) -> gym.spaces.Dict: @@ -277,11 +285,11 @@ def space(self) -> gym.spaces.Dict: Returns: gym.spaces.Dict: A description of the action space that this formatter requires. """ - if self._action_options is ActionOptions.unformatted: + if self.action_options is ActionOptions.unformatted: return None return gym.spaces.Dict( { - agent_id: _get_formats()[agent_interface.action].space + agent_id: get_formatters()[agent_interface.action].space for agent_id, agent_interface in self._agent_interfaces.items() } ) diff --git a/smarts/env/utils/observation_conversion.py b/smarts/env/utils/observation_conversion.py index 22301a1f4c..0e5723342f 100644 --- a/smarts/env/utils/observation_conversion.py +++ b/smarts/env/utils/observation_conversion.py @@ -905,7 +905,7 @@ class ObservationOptions(IntEnum): unformatted = 2 """Observation is the original unformatted observation. The observation will not match the observation space.""" - default = 0 + default = multi_agent """Defaults to :attr:`multi_agent`.""" diff --git a/smarts/p3d/renderer.py b/smarts/p3d/renderer.py index e4670ad36a..bc52a05e30 100644 --- a/smarts/p3d/renderer.py +++ b/smarts/p3d/renderer.py @@ -27,10 +27,11 @@ import math import os import re +import warnings from dataclasses import dataclass from pathlib import Path from threading import Lock -from typing import Optional, Tuple, Union +from typing import Literal, Optional, Tuple, Union import gltf import numpy as np @@ -68,12 +69,21 @@ # pytype: enable=import-error +BACKEND_LITERALS = Literal[ + "pandagl", + "pandadx9", + "pandagles", + "pandagles2", + "p3headlessgl", + "p3tinydisplay", +] + class _ShowBaseInstance(ShowBase): """Wraps a singleton instance of ShowBase from Panda3D.""" _debug_mode: DEBUG_MODE = DEBUG_MODE.WARNING - _rendering_backend: str = "p3headlessgl" + _rendering_backend: BACKEND_LITERALS = "pandagl" def __new__(cls): # Singleton pattern: ensure only 1 ShowBase instance @@ -84,11 +94,11 @@ def __new__(cls): "", f"load-display {cls._rendering_backend}", ) - loadPrcFileData("", "aux-display p3headlessgl") loadPrcFileData("", "aux-display pandagl") loadPrcFileData("", "aux-display pandadx9") loadPrcFileData("", "aux-display pandagles") loadPrcFileData("", "aux-display pandagles2") + loadPrcFileData("", "aux-display p3headlessgl") loadPrcFileData("", "aux-display p3tinydisplay") # disable vsync otherwise we are limited to refresh-rate of screen @@ -136,6 +146,17 @@ def set_rendering_verbosity(cls, debug_mode: DEBUG_MODE): cls._debug_mode = debug_mode loadPrcFileData("", f"notify-level {cls._debug_mode.name.lower()}") + @classmethod + def set_rendering_backend( + cls, + rendering_backend: BACKEND_LITERALS, + ): + """Sets the rendering backend.""" + if "__it__" not in cls.__dict__: + cls._rendering_backend = rendering_backend + else: + warnings.warn("Cannot apply rendering backend after setup.") + def destroy(self): """Destroy this renderer and clean up all remaining resources.""" super().destroy() @@ -249,7 +270,12 @@ def teardown(self): class Renderer(RendererBase): """The utility used to render simulation geometry.""" - def __init__(self, simid: str, debug_mode: DEBUG_MODE = DEBUG_MODE.ERROR): + def __init__( + self, + simid: str, + debug_mode: DEBUG_MODE = DEBUG_MODE.ERROR, + rendering_backend: BACKEND_LITERALS = "pandagl", + ): self._log: logging.Logger = logging.getLogger(self.__class__.__name__) self._is_setup = False self._simid = simid @@ -262,6 +288,7 @@ def __init__(self, simid: str, debug_mode: DEBUG_MODE = DEBUG_MODE.ERROR): self._signal_nodes = {} self._camera_nodes = {} _ShowBaseInstance.set_rendering_verbosity(debug_mode=debug_mode) + _ShowBaseInstance.set_rendering_backend(rendering_backend=rendering_backend) # Note: Each instance of the SMARTS simulation will have its own Renderer, # but all Renderer objects share the same ShowBaseInstance. self._showbase_instance: _ShowBaseInstance = _ShowBaseInstance() diff --git a/smarts/ray/sensors/ray_sensor_resolver.py b/smarts/ray/sensors/ray_sensor_resolver.py index b0796f8a00..cc672d4f98 100644 --- a/smarts/ray/sensors/ray_sensor_resolver.py +++ b/smarts/ray/sensors/ray_sensor_resolver.py @@ -101,7 +101,7 @@ def observe( tasks = [] with timeit( f"parallizable observations with {len(agent_ids)} and {len(ray_actors)}", - logger.info, + logger.debug, ): # Update remote state (if necessary) remote_sim_frame = ray.put(dumps(sim_frame)) @@ -122,7 +122,7 @@ def observe( for i, agent_group in enumerate(agent_groups): if not agent_group: break - with timeit(f"submitting {len(agent_group)} agents", logger.info): + with timeit(f"submitting {len(agent_group)} agents", logger.debug): tasks.append( ray_actors[i].do_work.remote( remote_sim_frame=remote_sim_frame, agent_ids=agent_group @@ -130,7 +130,7 @@ def observe( ) # While observation processes are operating do rendering - with timeit("rendering", logger.info): + with timeit("rendering", logger.debug): rendering = {} for agent_id in agent_ids: for vehicle_id in sim_frame.vehicles_for_agents[agent_id]: @@ -149,7 +149,7 @@ def observe( updated_sensors[vehicle_id].update(updated_unsafe_sensors) # Collect futures - with timeit("waiting for observations", logger.info): + with timeit("waiting for observations", logger.debug): for fut in concurrent.futures.as_completed( [task.future() for task in tasks] ): @@ -159,7 +159,7 @@ def observe( for v_id, values in u_sens.items(): updated_sensors[v_id].update(values) - with timeit("merging observations", logger.info): + with timeit("merging observations", logger.debug): # Merge sensor information for agent_id, r_obs in rendering.items(): observations[agent_id] = replace(observations[agent_id], **r_obs) diff --git a/smarts/zoo/registry.py b/smarts/zoo/registry.py index 2ba3c053bb..002410a15c 100644 --- a/smarts/zoo/registry.py +++ b/smarts/zoo/registry.py @@ -47,7 +47,7 @@ def register(locator: str, entry_point, **kwargs): ) """ - agent_registry.register(locator=locator, entry_point=entry_point, **kwargs) + agent_registry.register(name=locator, entry_point=entry_point, **kwargs) def make(locator: str, **kwargs): @@ -62,6 +62,8 @@ def make(locator: str, **kwargs): is in the form `{PYTHONPATH}[n]/path/to/file.py` kwargs: Additional arguments to be passed to the constructed class. + Returns: + AgentSpec: The agent specifications needed to instantiate and configure an agent. """ from smarts.zoo.agent_spec import AgentSpec @@ -86,8 +88,10 @@ def make_agent(locator: str, **kwargs): is in the form `{PYTHONPATH}[n]/path/to/file.py` kwargs: Additional arguments to be passed to the constructed class. + Returns: + Tuple[Agent, AgentInterface]: The agent and its interface. """ agent_spec = make(locator, **kwargs) - return agent_spec.build_agent() + return agent_spec.build_agent(), agent_spec.interface diff --git a/zoo/policies/__init__.py b/zoo/policies/__init__.py index a46097bf27..f7a3cc4f55 100644 --- a/zoo/policies/__init__.py +++ b/zoo/policies/__init__.py @@ -205,3 +205,16 @@ def entry_point_dsac(**kwargs): register(locator="discrete-soft-actor-critic-agent-v0", entry_point=entry_point_dsac) + + +def open_entrypoint(*, debug: bool = False, aggressiveness: int = 3) -> AgentSpec: + try: + open_agent = importlib.import_module("open_agent") + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"Ensure that the open-agent has been installed with `pip install open-agent" + ) + return open_agent.entrypoint(debug=debug, aggressiveness=aggressiveness) + + +register(locator="open-agent-v0", entry_point=open_entrypoint) diff --git a/zoo/policies/primitive_agents.py b/zoo/policies/primitive_agents.py new file mode 100644 index 0000000000..fb8d5264e6 --- /dev/null +++ b/zoo/policies/primitive_agents.py @@ -0,0 +1,98 @@ +import importlib +import random +from typing import Optional + +from smarts.core.agent_interface import AgentInterface, AgentType +from smarts.core.observations import Observation +from smarts.zoo import Agent, AgentSpec + + +class RandomLanerAgent(Agent): + def act(self, obs): + val = ["keep_lane", "slow_down", "change_lane_left", "change_lane_right"] + return random.choice(val) + + +def rla_entrypoint(*, max_episode_steps: Optional[int]) -> AgentSpec: + return AgentSpec( + interface=AgentInterface.from_type( + AgentType.Laner, max_episode_steps=max_episode_steps + ), + agent_builder=RandomLanerAgent, + ) + + +class ChaseViaPointsAgent(Agent): + def act(self, obs: Observation): + if ( + len(obs.via_data.near_via_points) < 1 + or obs.ego_vehicle_state.road_id != obs.via_data.near_via_points[0].road_id + ): + return (obs.waypoint_paths[0][0].speed_limit, 0) + + nearest = obs.via_data.near_via_points[0] + if nearest.lane_index == obs.ego_vehicle_state.lane_index: + return (nearest.required_speed, 0) + + return ( + nearest.required_speed, + 1 if nearest.lane_index > obs.ego_vehicle_state.lane_index else -1, + ) + + +def cvpa_entrypoint(*, max_episode_steps: Optional[int]): + return AgentSpec( + interface=AgentInterface.from_type( + AgentType.LanerWithSpeed, + max_episode_steps=max_episode_steps, + ), + agent_builder=ChaseViaPointsAgent, + agent_params=None, + ) + + +class TrackingAgent(Agent): + def act(self, obs): + lane_index = 0 + num_trajectory_points = min([10, len(obs.waypoint_paths[lane_index])]) + # Desired speed is in m/s + desired_speed = 50 / 3.6 + trajectory = [ + [ + obs.waypoint_paths[lane_index][i].pos[0] + for i in range(num_trajectory_points) + ], + [ + obs.waypoint_paths[lane_index][i].pos[1] + for i in range(num_trajectory_points) + ], + [ + obs.waypoint_paths[lane_index][i].heading + for i in range(num_trajectory_points) + ], + [desired_speed for i in range(num_trajectory_points)], + ] + return trajectory + + +def trajectory_tracking_entrypoint(*, max_episode_steps: Optional[int]): + return AgentSpec( + interface=AgentInterface.from_type( + AgentType.Tracker, max_episode_steps=max_episode_steps + ), + agent_builder=TrackingAgent, + ) + + +class StandardLaneFollowerAgent(Agent): + def act(self, obs): + return (obs["waypoint_paths"]["speed_limit"][0][0], 0) + + +def standard_lane_follower_entrypoint(*, max_episode_steps: Optional[int]): + return AgentSpec( + interface=AgentInterface.from_type( + AgentType.LanerWithSpeed, max_episode_steps=max_episode_steps + ), + agent_builder=StandardLaneFollowerAgent, + )