diff --git a/.circleci/config.yml b/.circleci/config.yml index 6dca4b27b..7f8a504f7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -28,7 +28,7 @@ executors: jobs: test: executor: grid2op-executor - resource_class: medium + resource_class: medium+ parallelism: 4 steps: - checkout @@ -47,13 +47,17 @@ jobs: export _GRID2OP_FORCE_TEST=1 cd grid2op/tests/ python3 helper_list_test.py | circleci tests split > /tmp/tests_run + - run: + command: | + source venv_test/bin/activate + pip freeze - run: cat /tmp/tests_run - run: command: | source venv_test/bin/activate cd grid2op/tests/ export _GRID2OP_FORCE_TEST=1 - python3 -m unittest $(cat /tmp/tests_run) + python3 -m unittest -v $(cat /tmp/tests_run) install36: executor: python36 @@ -136,39 +140,109 @@ jobs: - run: command: | source venv_test/bin/activate - python -m pip install -U "numpy>=1.20,<1.21" - python -m pip install -U .[test] + python -m pip install -U "numpy>=1.20,<1.21" "pandas<2.2" "scipy<1.12" numba "pillow<10.4.0" .[test] + pip freeze + - run: + command: | + source venv_test/bin/activate export _GRID2OP_FORCE_TEST=1 + cd /tmp grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.21,<1.22" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.22,<1.23" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.23,<1.24" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - run: command: | source venv_test/bin/activate - python -m pip install -U "numpy>=1.24,<1.25" - python -m pip install -U .[test] + python -m pip install -U "numpy>=1.24,<1.25" "pandas<2.2" "scipy<1.12" numba "pillow<10.4.0" .[test] + - run: + command: | + source venv_test/bin/activate + pip freeze + - run: + command: | + source venv_test/bin/activate export _GRID2OP_FORCE_TEST=1 + cd /tmp grid2op.testinstall + legacy_lightsim_old_pp: + executor: python38 # needs to be 38: whl of lightsim were not released for 3.10 at the time + resource_class: small + steps: + - checkout + - run: + command: | + apt-get update + apt-get install -y coinor-cbc + - run: python -m pip install virtualenv + - run: python -m virtualenv venv_test + - run: + command: | + source venv_test/bin/activate + python -m pip install -U pip setuptools wheel + python -m pip install -U lightsim2grid==0.5.3 gymnasium "numpy<1.22" + - run: + command: | + source venv_test/bin/activate + python -m pip install -e . + pip freeze + - run: + command: | + source venv_test/bin/activate + export _GRID2OP_FORCE_TEST=1 + python -m unittest grid2op/tests/test_basic_env_ls.py + + legacy_lightsim: + executor: python38 # needs to be 38: whl of lightsim were not released for 3.10 at the time + resource_class: small + steps: + - checkout + - run: + command: | + apt-get update + apt-get install -y coinor-cbc + - run: python -m pip install virtualenv + - run: python -m virtualenv venv_test + - run: + command: | + source venv_test/bin/activate + python -m pip install -U pip setuptools wheel + python -m pip install -U lightsim2grid==0.6.0 gymnasium "numpy<1.22" + - run: + command: | + source venv_test/bin/activate + python -m pip install -e . + pip freeze + - run: + command: | + source venv_test/bin/activate + export _GRID2OP_FORCE_TEST=1 + python -m unittest grid2op/tests/test_basic_env_ls.py + + test_chronix2grid: + executor: python310 # needs to be 38: whl of lightsim were not released for 3.10 at the time + resource_class: small + steps: + - checkout + - run: + command: | + apt-get update + apt-get install -y coinor-cbc + - run: python -m pip install virtualenv + - run: python -m virtualenv venv_test + - run: + command: | + source venv_test/bin/activate + python -m pip install -U pip setuptools wheel "numpy==1.26.4" + - run: + command: | + source venv_test/bin/activate + python -m pip install -e .[chronix2grid] "linopy==0.3.8" "scs==3.2.4.post1" "ecos==2.0.13" "pillow==10.3.0" "numpy==1.26.4" "xarray==2024.3.0" + pip freeze + - run: + command: | + source venv_test/bin/activate + export _GRID2OP_FORCE_TEST=1 + python -m unittest grid2op/tests/fromChronix2grid.py + install39: executor: python39 resource_class: small @@ -184,62 +258,29 @@ jobs: command: | export _GRID2OP_FORCE_TEST=1 source venv_test/bin/activate - python -m pip install -U pip setuptools wheel - python -m pip install chronix2grid>="1.1.0.post1" + python -m pip install -U pip setuptools wheel "numpy>=1.20,<1.21" "pandas<2.2" "scipy==1.10.1" "pillow<10.4.0" numba python -m pip uninstall -y grid2op - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U numba - # python -m pip install -U .[test] - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.20,<1.21" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.21,<1.22" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.22,<1.23" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.23,<1.24" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.24,<1.25" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.25,<1.26" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall + - run: + command: | # issue with previous more simple install, so I fix some versions + source venv_test/bin/activate + python -m pip install "numpy>=1.20,<1.21" "pandas<2.2" "scipy==1.10.1" numba . + pip freeze + - run: + command: | + source venv_test/bin/activate + export _GRID2OP_FORCE_TEST=1 + cd /tmp + grid2op.testinstall - run: command: | source venv_test/bin/activate - python -m pip install -U "numpy>=1.26,<1.27" - python -m pip install -U .[test] + python -m pip install "numpy>=1.26,<1.27" "pandas<2.2" "scipy<1.12" numba "pillow<10.4.0" + pip freeze + - run: + command: | + source venv_test/bin/activate export _GRID2OP_FORCE_TEST=1 + cd /tmp grid2op.testinstall install310: @@ -261,44 +302,24 @@ jobs: - run: command: | source venv_test/bin/activate - python -m pip install -U "numpy>=1.21,<1.22" - python -m pip install -U .[test] + python -m pip install -U "numpy>=1.21,<1.22" "pandas<2.2" "scipy<1.12" numba . + pip freeze + - run: + command: | + source venv_test/bin/activate export _GRID2OP_FORCE_TEST=1 + cd /tmp grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.22,<1.23" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.23,<1.24" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.24,<1.25" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.25,<1.26" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - run: command: | source venv_test/bin/activate - python -m pip install -U "numpy>=1.26,<1.27" - python -m pip install -U .[test] + python -m pip install -U "numpy>=1.26,<1.27" "pandas<2.2" "scipy<1.12" numba + pip freeze + - run: + command: | + source venv_test/bin/activate export _GRID2OP_FORCE_TEST=1 + cd /tmp grid2op.testinstall install311: @@ -316,34 +337,27 @@ jobs: command: | source venv_test/bin/activate python -m pip install -U pip setuptools wheel - python -m pip install -U numba - run: command: | source venv_test/bin/activate - python -m pip install -U "numpy>=1.23,<1.24" - python -m pip install -U .[test] + python -m pip install -U "numpy>=1.23,<1.24" "pandas<2.2" "scipy<1.12" numba . + pip freeze + - run: + command: | + source venv_test/bin/activate export _GRID2OP_FORCE_TEST=1 + cd /tmp grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.24,<1.25" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - # - run: - # command: | - # source venv_test/bin/activate - # python -m pip install -U "numpy>=1.25,<1.26" - # python -m pip install -U .[test] - # export _GRID2OP_FORCE_TEST=1 - # grid2op.testinstall - run: command: | source venv_test/bin/activate - python -m pip install -U "numpy>=1.26,<1.27" - python -m pip install -U .[test] + python -m pip install -U "numpy>=1.26,<1.27" "pandas<2.2" "scipy<1.12" numba + pip freeze + - run: + command: | + source venv_test/bin/activate export _GRID2OP_FORCE_TEST=1 + cd /tmp grid2op.testinstall install312: executor: python312 @@ -364,9 +378,13 @@ jobs: - run: command: | source venv_test/bin/activate - python -m pip install -U "numpy>=1.26,<1.27" - python -m pip install -U .[test] + python -m pip install -U "numpy>=1.26,<1.27" "pandas<2.2" "scipy<1.12" . + pip freeze + - run: + command: | + source venv_test/bin/activate export _GRID2OP_FORCE_TEST=1 + cd /tmp grid2op.testinstall workflows: @@ -374,10 +392,14 @@ workflows: test: jobs: - test + - legacy_lightsim_old_pp + - legacy_lightsim + - test_chronix2grid + install: jobs: - install38 - install39 - install310 - install311 - - install312 # failing because of dependencies of numba, torch etc. Tired of it so ignoring it ! + - install312 diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 310f61316..185443976 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -12,6 +12,9 @@ jobs: name: Build linux ${{ matrix.python.name }} wheel runs-on: ubuntu-latest container: quay.io/pypa/manylinux2014_x86_64 + env: + ACTIONS_ALLOW_USE_UNSECURE_NODE_VERSION: true + GHA_USE_NODE_20: false strategy: matrix: python: @@ -44,7 +47,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@v1 + uses: actions/checkout@v3 with: submodules: true @@ -59,20 +62,26 @@ jobs: - name: Build wheel run: | - python3 setup.py bdist_wheel + python setup.py bdist_wheel # auditwheel repair dist/*.whl # only for compiled code ! - name: Install wheel - run: pip3 install dist/*.whl --user + run: | + pip3 install dist/*.whl + pip freeze - name: Check package can be imported run: | - python3 -c "import grid2op" - python3 -c "from grid2op import *" - python3 -c "from grid2op.Action._backendAction import _BackendAction" + python -c "import grid2op" + python -c "from grid2op import *" + python -c "from grid2op.Action._backendAction import _BackendAction" + + - name: List wheel + run: + ls ./dist/*.whl - name: Upload wheel - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: grid2op-wheel-${{ matrix.config.name }}-${{ matrix.python.name }} path: dist/*.whl @@ -136,44 +145,106 @@ jobs: - name: Install wheel shell: bash - run: python -m pip install dist/*.whl --user + run: | + python -m pip install dist/*.whl --user + pip freeze - name: Check package can be imported run: | - python3 -c "import grid2op" - python3 -c "from grid2op import *" - python3 -c "from grid2op.Action._backendAction import _BackendAction" + python -c "import grid2op" + python -c "from grid2op import *" + python -c "from grid2op.Action._backendAction import _BackendAction" - name: Build source archive if: matrix.config.name == 'darwin' && matrix.python.name == 'cp310' run: python setup.py sdist - name: Upload wheel - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: name: grid2op-wheel-${{ matrix.config.name }}-${{ matrix.python.name }} path: dist/*.whl - name: Upload source archive - uses: actions/upload-artifact@v2 - if: matrix.config.name == 'darwin' && matrix.python.name == 'cp39' + uses: actions/upload-artifact@v4 + if: matrix.config.name == 'darwin' && matrix.python.name == 'cp310' with: name: grid2op-sources path: dist/*.tar.gz + auto_class_in_file: + name: Test ${{ matrix.config.name }} OS can handle automatic class generation + runs-on: ${{ matrix.config.os }} + strategy: + matrix: + config: + - { + name: darwin, + os: macos-latest, + } + # - { + # name: windows, + # os: windows-2019, + # } + - { + name: ubuntu, + os: ubuntu-latest, + } + python: + - { + name: cp39, + version: '3.9', + } + - { + name: cp312, + version: '3.12', + } + + steps: + + - name: Checkout sources + uses: actions/checkout@v1 + with: + submodules: true + + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python.version }} + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + python -m pip install --upgrade wheel + python -m pip install --upgrade setuptools + python -m pip install --upgrade gymnasium "numpy<2" + + - name: Build wheel + run: python setup.py bdist_wheel + + - name: Install wheel + shell: bash + run: | + python -m pip install dist/*.whl --user + pip freeze + + - name: Test the automatic generation of classes in the env folder + run: | + python -m unittest grid2op/tests/automatic_classes.py -f + package: name: Test install runs-on: ubuntu-latest - needs: [manylinux_build, macos_windows_build] + needs: [manylinux_build, macos_windows_build, auto_class_in_file] steps: - name: Download wheels - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: path: download - name: Upload wheels - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: name: grid2op-wheels path: | diff --git a/.gitignore b/.gitignore index 84e7e7bd5..6bd200b60 100644 --- a/.gitignore +++ b/.gitignore @@ -399,6 +399,23 @@ pp_bug_gen_alone.py test_dunder.py grid2op/tests/test_fail_ci.txt saved_multiepisode_agent_36bus_DN_4/ +grid2op/tests/requirements.txt +grid2op/tests/venv_test_311/ +issue_577/ +junk.py +grid2op/tests/20240429_failed_tests.txt +grid2op/tests/20240429_failed_tests_small.txt +grid2op/tests/20240429_teq_test.txt +grid2op/tests/req_38_np121 +test_make_2_envs.py +getting_started/env_py38_grid2op110_ray110.ipynb +getting_started/env_py38_grid2op110_ray210.ipynb +grid2op/tests/req_chronix2grid +grid2op/tests/venv_test_chronix2grid/ +getting_started/venv_310_ray/ +grid2op/tests/venv_test_autoclass/ +test_eduardo.py +grid2op/tests/failed_test* # profiling files **.prof diff --git a/.readthedocs.yml b/.readthedocs.yml index 6f2d283a9..8dbbe353f 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,7 +1,14 @@ -version: 2 +version: "2" + +build: + os: "ubuntu-22.04" + tools: + python: "3.10" + +sphinx: + configuration: docs/conf.py python: - version: 3.8 install: - method: pip path: . diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f492ba4a1..d8cee880a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -31,14 +31,259 @@ Change Log - [???] "asynch" multienv - [???] properly model interconnecting powerlines - -[1.9.8] - 20xx-yy-zz +Work kind of in progress +---------------------------------- +- TODO A number of max buses per sub +- TODO in the runner, save multiple times the same sceanrio +- TODO in the gym env, make the action_space and observation_space attribute + filled automatically (see ray integration, it's boring to have to copy paste...) + +Next release +--------------------------------- +- numpy 2 compat (need pandapower for that) +- automatic read from local dir also on windows ! +- TODO doc for the "new" feature of automatic "experimental_read_from_local_dir" +- TODO bug on maintenance starting at midnight (they are not correctly handled in the observation) + => cf script test_issue_616 +- TODO put the Grid2opEnvWrapper directly in grid2op as GymEnv +- TODO faster gym_compat (especially for DiscreteActSpace and BoxGymObsSpace) +- TODO Notebook for tf_agents +- TODO Notebook for acme +- TODO Notebook using "keras rl" (see https://keras.io/examples/rl/ppo_cartpole/) +- TODO example for MCTS https://github.com/bwfbowen/muax et https://github.com/google-deepmind/mctx +- TODO jax everything that can be: create a simple env based on jax for topology manipulation, without + redispatching or rules +- TODO backend in jax, maybe ? +- TODO done and truncated properly handled in gym_compat module (when game over + before the end it's probably truncated and not done) +- TODO when reset, have an attribute "reset_infos" with some infos about the + way reset was called. +- TODO ForecastEnv in MaskedEnv ! (and obs.simulate there too !) +- TODO finish the test in automatic_classes +- TODO in multi-mix increase the reset options with the mix the user wants +- TODO L2RPN scores as reward (sum loads after the game over and have it in the final reward) +- TODO on CI: test only gym, only gymnasium and keep current test for both gym and gymnasium +- TODO work on the reward class (see https://github.com/rte-france/Grid2Op/issues/584) + + +[1.10.4] - 2024-xx-yy +------------------------- +- [FIXED] an issue in the backend: if the backend failed to be copied + created the `_grid` attribute was set to `None` and not set back to + its original value in the copied backend. +- [FIXED] the `self.skip_if_needed()` was missing for one of the test suite. +- [FIXED] the correct `AmbiguousAction` is now raised when grid2op does not understand + what an action should be doing (an incorrect `IllegalAction` used to be sent) +- [FIXED] a test in `test_ActionProperties` did not test the correct property + +[1.10.3] - 2024-07-12 +------------------------- +- [BREAKING] `env.chronics_hander.set_max_iter(xxx)` is now a private function. Use + `env.set_max_iter(xxx)` or even better `env.reset(options={"max step": xxx})`. + Indeed, `env.chronics_hander.set_max_iter()` will likely have + no effect at all on your environment. +- [BREAKING] for all the `Handler` (*eg* `CSVForecastHandler`) the method `set_max_iter` is + now private (for the same reason as the `env.chronics_handler`). We do not recommend to + use it (will likely have no effect). Prefer using `env.set_max_iter` instead. +- [BREAKING] now the `runner.run()` method only accept kwargs argument + (because it should always have been like this) +- [BREAKING] to improve pickle support and multi processing capabilities, the attribute + `gym_env.observation_space._init_env` and `gym_env.observation_space.initial_obs_space` + have been deleted (for the `Dict` space only, for the other spaces like the `Box` they + were not present in the first place) +- [BREAKING] in the `GymEnv` class now by default the underlying grid2op environment has no + forecast anymore in an attempt to make this wrapper faster AND more easily pickle-able. You can + retrieve the old behaviour by passing `gym_env = GymEnv(grid2op_env, with_forecast=True)` +- [FIXED] a bug in the `MultiFolder` and `MultifolderWithCache` leading to the wrong + computation of `max_iter` on some corner cases +- [FIXED] the function `cleanup_action_space()` did not work correctly when the "chronics_hander" + was not initialized for some classes +- [FIXED] the `_observationClass` attribute of the "observation env" (used for simulate and forecasted env) + is now an Observation and not an Action. +- [FIXED] a bug when deep copying an "observation environment" (it changes its class) +- [FIXED] issue on `seed` and `MultifolderWithCache` which caused + https://github.com/rte-france/Grid2Op/issues/616 +- [FIXED] another issue with the seeding of `MultifolderWithCache`: the seed was not used + correctly on the cache data when calling `chronics_handler.reset` multiple times without + any changes +- [FIXED] `Backend` now properly raise EnvError (grid2op exception) instead of previously + `EnvironmentError` (python default exception) +- [FIXED] a bug in `PandaPowerBackend` (missing attribute) causing directly + https://github.com/rte-france/Grid2Op/issues/617 +- [FIXED] a bug in `Environment`: the thermal limit were used when loading the environment + even before the "time series" are applied (and before the user defined thermal limits were set) + which could lead to disconnected powerlines even before the initial step (t=0, when time + series are loaded) +- [FIXED] an issue with the "max_iter" for `FromNPY` time series generator +- [FIXED] a bug in `MultiMixEnvironment` : a multi-mix could be created even if the underlying + powergrids (for each mix) where not the same. +- [FIXED] a bug in `generate_classes` (experimental_read_from_local_dir) with alert data. +- [FIXED] a bug in the `Runner` when using multi processing on macos and windows OS: some non default + parameters where not propagated in the "child" process (bug in `runner._ger_params`) +- [ADDED] possibility to skip some step when calling `env.reset(..., options={"init ts": ...})` +- [ADDED] possibility to limit the duration of an episode with `env.reset(..., options={"max step": ...})` +- [ADDED] possibility to specify the "reset_options" used in `env.reset` when + using the runner with `runner.run(..., reset_options=xxx)` +- [ADDED] the argument `mp_context` when building the runner to help pass a multiprocessing context in the + grid2op `Runner` +- [ADDED] the time series are now able to regenerate their "random" part + even when "cached" thanks to the addition of the `regenerate_with_new_seed` of the + `GridValue` class (in public API) +- [ADDED] `MultifolderWithCache` now supports `FromHandlers` time series generator +- [IMPROVED] more consistency in the way the classes are initialized at the creation of an environment +- [IMPROVED] more consistency when an environment is copied (some attributes of the copied env were + deep copied incorrectly) +- [IMPROVED] Doc about the runner +- [IMPROVED] the documentation on the `time series` folder. +- [IMPROVED] now the "maintenance from json" (*eg* the `JSONMaintenanceHandler` or the + `GridStateFromFileWithForecastsWithMaintenance`) can be customized with the day + of the week where the maintenance happens (key `maintenance_day_of_week`) +- [IMPROVED] in case of "`MultiMixEnvironment`" there is now only class generated for + all the underlying mixes (instead of having one class per mixes) +- [IMPROVED] the `EpisodeData` have now explicitely a mode where they can be shared accross + processes (using `fork` at least), see `ep_data.make_serializable` +- [IMPROVED] chronix2grid tests are now done independantly on the CI + +[1.10.2] - 2024-05-27 +------------------------- +- [BREAKING] the `runner.run_one_episode` now returns an extra argument (first position): + `chron_id, chron_name, cum_reward, timestep, max_ts = runner.run_one_episode()` which + is consistant with `runner.run(...)` (previously it returned only + `chron_name, cum_reward, timestep, max_ts = runner.run_one_episode()`) +- [BREAKING] the runner now has no `chronics_handler` attribute (`runner.chronics_handler` + is not defined) +- [BREAKING] now grid2op forces everything to be connected at busbar 1 if + `param.IGNORE_INITIAL_STATE_TIME_SERIE == True` (**NOT** the default) and + no initial state is provided in `env.reset(..., options={"init state": ...})` +- [ADDED] it is now possible to call `change_reward` directly from + an observation (no need to do it from the Observation Space) +- [ADDED] method to change the reward from the observation (observation_space + is not needed anymore): you can use `obs.change_reward` +- [ADDED] a way to automatically set the `experimental_read_from_local_dir` flags + (with automatic class creation). For now it is disable by default, but you can + activate it transparently (see doc) +- [ADDED] possibility to set the grid to an initial state (using an action) when using the + "time series" classes. The supported classes are `GridStateFromFile` - and all its derivative, + `FromOneEpisodeData`, `FromMultiEpisodeData`, `FromNPY` and `FromHandlers`. The classes `ChangeNothing` + and `FromChronix2grid` are not supported at the moment. +- [ADDED] an "Handler" (`JSONInitStateHandler`) that can set the grid to an initial state (so as to make + compatible the `FromHandlers` time series class with this new feature) +- [ADDED] some more type hints in the `GridObject` class +- [ADDED] Possibility to deactive the support of shunts if subclassing `PandaPowerBackend` + (and add some basic tests) +- [ADDED] a parameters (`param.IGNORE_INITIAL_STATE_TIME_SERIE`) which defaults to + `False` that tells the environment whether it should ignore the + initial state of the grid provided in the time series. + By default it is NOT ignored, it is taken into account + (for the environment that supports this feature) +- [FIXED] a small issue that could lead to having + "redispatching_unit_commitment_availble" flag set even if the redispatching + data was not loaded correctly +- [FIXED] EducPandaPowerBackend now properly sends numpy array in the class attributes + (instead of pandas series) +- [FIXED] an issue when loading back data (with `EpisodeData`): when there were no storage units + on the grid it did not set properly the "storage relevant" class attributes +- [FIXED] a bug in the "gridobj.generate_classes()" function which crashes when no + grid layout was set +- [FIXED] notebook 5 on loading back data with `EpisodeData`. +- [FIXED] converter between backends (could not handle more than 2 busbars) +- [FIXED] a bug in `BaseMultiProcessEnvironment`: set_filter had no impact +- [FIXED] an issue in the `Runner` (`self.chronics_handler` was sometimes used, sometimes not + and most of the time incorrectly) +- [FIXED] on `RemoteEnv` class (impact all multi process environment): the kwargs used to build then backend + where not used which could lead to"wrong" backends being used in the sub processes. +- [FIXED] a bug when the name of the times series and the names of the elements in the backend were + different: it was not possible to set `names_chronics_to_grid` correctly when calling `env.make` +- [IMPROVED] documentation about `obs.simulate` to make it clearer the + difference between env.step and obs.simulate on some cases +- [IMPROVED] type hints on some methods of `GridObjects` +- [IMPROVED] replace `np.nonzero(arr)` calls with `arr.nonzero()` which could + save up a bit of computation time. +- [IMPROVED] force class attributes to be numpy arrays of proper types when the + classes are initialized from the backend. +- [IMPROVED] some (slight) speed improvments when comparing actions or deep copying objects +- [IMPROVED] the way the "grid2op compat" mode is handled +- [IMPROVED] the coverage of the tests in the "test_basic_env_ls.py" to test more in depth lightsim2grid + (creation of multiple environments, grid2op compatibility mode) +- [IMPROVED] the function to test the backend interface in case when shunts are not supported + (improved test `AAATestBackendAPI.test_01load_grid`) + +[1.10.1] - 2024-03-xx +---------------------- +- [FIXED] issue https://github.com/rte-france/Grid2Op/issues/593 +- [FIXED] backward compatibility issues with "oldest" lightsim2grid versions + (now tested in basic settings) +- [ADDED] a "compact" way to store the data in the Runner +- [IMPROVED] the "`train_val_split`" functions, now more names (for the folders) + can be used + +[1.10.0] - 2024-03-06 +---------------------- +- [BREAKING] the order of the actions in `env.action_space.get_all_unitary_line_set` and + `env.action_space.get_all_unitary_topologies_set` might have changed (this is caused + by a rewriting of these functions in case there is not 2 busbars per substation) +- [FIXED] github CI did not upload the source files +- [FIXED] `l2rpn_utils` module did not stored correctly the order + of actions and observation for wcci_2020 +- [FIXED] 2 bugs detected by static code analysis (thanks sonar cloud) +- [FIXED] a bug in `act.get_gen_modif` (vector of wrong size was used, could lead + to some crashes if `n_gen >= n_load`) +- [FIXED] a bug in `act.as_dict` when shunts were modified +- [FIXED] a bug affecting shunts: sometimes it was not possible to modify their p / q + values for certain values of p or q (an AmbiguousAction exception was raised wrongly) +- [FIXED] a bug in the `_BackendAction`: the "last known topoolgy" was not properly computed + in some cases (especially at the time where a line was reconnected) +- [FIXED] `MultiDiscreteActSpace` and `DiscreteActSpace` could be the same classes + on some cases (typo in the code). +- [FIXED] a bug in `MultiDiscreteActSpace` : the "do nothing" action could not be done if `one_sub_set` (or `one_sub_change`) + was selected in `attr_to_keep` +- [ADDED] a method `gridobj.topo_vect_element()` that does the opposite of `gridobj.xxx_pos_topo_vect` +- [ADDED] a mthod `gridobj.get_powerline_id(sub_id)` that gives the + id of all powerlines connected to a given substation +- [ADDED] a convenience function `obs.get_back_to_ref_state(...)` + for the observation and not only the action_space. +- [IMPROVED] handling of "compatibility" grid2op version + (by calling the relevant things done in the base class + in `BaseAction` and `BaseObservation`) and by using the `from packaging import version` + to check version (instead of comparing strings) +- [IMPROVED] slightly the code of `check_kirchoff` to make it slightly clearer +- [IMRPOVED] typing and doc for some of the main classes of the `Action` module +- [IMRPOVED] typing and doc for some of the main classes of the `Observation` module +- [IMPROVED] methods `gridobj.get_lines_id`, `gridobj.get_generators_id`, `gridobj.get_loads_id` + `gridobj.get_storages_id` are now class methods and can be used with `type(env).get_lines_id(...)` + or `act.get_lines_id(...)` for example. +- [IMPROVED] `obs.get_energy_graph()` by giving the "local_bus_id" and the "global_bus_id" + of the bus that represents each node of this graph. +- [IMPROVED] `obs.get_elements_graph()` by giving access to the bus id (local, global and + id of the node) where each element is connected. +- [IMPROVED] description of the different graph of the grid in the documentation. +- [IMPROVED] type hints for the `gym_compat` module (more work still required in this area) +- [IMPROVED] the `MultiDiscreteActSpace` to have one "dimension" controling all powerlines + (see "one_line_set" and "one_line_change") +- [IMPROVED] doc at different places, including the addition of the MDP implemented by grid2op. + +[1.9.8] - 2024-01-26 ---------------------- +- [FIXED] the `backend.check_kirchoff` function was not correct when some elements were disconnected + (the wrong columns of the p_bus and q_bus was set in case of disconnected elements) +- [FIXED] `PandapowerBackend`, when no slack was present +- [FIXED] the "BaseBackendTest" class did not correctly detect divergence in most cases (which lead + to weird bugs in failing tests) +- [FIXED] an issue with imageio having deprecated the `fps` kwargs (see https://github.com/rte-france/Grid2Op/issues/569) +- [FIXED] adding the "`loads_charac.csv`" in the package data +- [FIXED] a bug when using grid2op, not "utils.py" script could be used (see + https://github.com/rte-france/Grid2Op/issues/577). This was caused by the modification of + `sys.path` when importing the grid2op test suite. +- [ADDED] A type of environment that does not perform the "emulation of the protections" + for some part of the grid (`MaskedEnvironment`) see https://github.com/rte-france/Grid2Op/issues/571 +- [ADDED] a "gym like" API for reset allowing to set the seed and the time serie id directly when calling + `env.reset(seed=.., options={"time serie id": ...})` - [IMPROVED] the CI speed: by not testing every possible numpy version but only most ancient and most recent - [IMPROVED] Runner now test grid2op version 1.9.6 and 1.9.7 - [IMPROVED] refacto `gridobj_cls._clear_class_attribute` and `gridobj_cls._clear_grid_dependant_class_attributes` - [IMPROVED] the bahviour of the generic class `MakeBackend` used for the test suite. - [IMPROVED] re introducing python 12 testing +- [IMPROVED] error messages in the automatic test suite (`AAATestBackendAPI`) [1.9.7] - 2023-12-01 ---------------------- @@ -685,12 +930,12 @@ Change Log - [ADDED]: function to retrieve the maximum duration of the current episode. - [ADDED]: a new kind of opponent that is able to attack at "more random" times with "more random" duration. See the `GeometricOpponent`. -- [IMPROVED]: on windows at least, grid2op does not work with gym < 0.17.2 Checks are performed in order to make sure +- [IMPROVED]: on windows at least, grid2op does not work with `gym < 0.17.2` Checks are performed in order to make sure the installed open ai gym package meets this requirement (see issue `Issue#185 `_ ) - [IMPROVED] the seed of openAI gym for composed action space (see issue `https://github.com/openai/gym/issues/2166`): in waiting for an official fix, grid2op will use the solution proposed there - https://github.com/openai/gym/issues/2166#issuecomment-803984619 ) + https://github.com/openai/gym/issues/2166#issuecomment-803984619 [1.5.1] - 2021-04-15 ----------------------- diff --git a/Dockerfile b/Dockerfile index 9d0271a0a..3d47b14d3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -35,7 +35,7 @@ WORKDIR /Grid2Op RUN git pull RUN git remote update RUN git fetch --all --tags -RUN git checkout "tags/v1.9.7" -b "v1.9.7-branch" +RUN git checkout "tags/v1.10.3" -b "v1.10.3-branch" # Install Dependencies RUN pip3 install .[optional,challenge] WORKDIR / diff --git a/MANIFEST.in b/MANIFEST.in index 25337d7a1..3692f5526 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ -recursive-include grid2op/data *.bz2 *.json *.zip prods_charac.csv *.py .multimix storage_units_charac.csv start_datetime.info time_interval.info +recursive-include grid2op/data *.bz2 *.json *.zip loads_charac.csv prods_charac.csv *.py .multimix storage_units_charac.csv start_datetime.info time_interval.info global-exclude */__pycache__/* global-exclude *.pyc global-exclude grid2op/data_test/* diff --git a/README.md b/README.md index cddf1f5a9..b24db06d9 100644 --- a/README.md +++ b/README.md @@ -305,6 +305,8 @@ The complete test suit is run on linux with the latest numpy version on python 3 ### Known issues + +#### Multi processing Due to the underlying behaviour of the "multiprocessing" package on windows based python versions, the "multiprocessing" of the grid2op "Runner" is not supported on windows. This might change in the future, but it is currently not on our priorities. @@ -312,6 +314,15 @@ but it is currently not on our priorities. A quick fix that is known to work include to set the `experimental_read_from_local_dir` when creating the environment with `grid2op.make(..., experimental_read_from_local_dir=True)` (see doc for more information) +Sometimes, on some configuration (python version) we do not recommend to use grid2op with pandas>=2.2 +If you encounter any trouble, please downgrade to pandas<2.2. This behaviour occured in our continuous +integration environment for python >=3.9 but could not be reproduced locally. + +#### python 3.11 +Some version of grid2op (*eg* 1.6.3) are not compatible with python 3.10 or 3.11. + +Either use python version 3.8 or 3.9 or upgrade grid2op (1.6.5 works) if that is the case. + ### Perform tests locally Provided that Grid2Op is installed *from source*: diff --git a/docs/_static/hacks.css b/docs/_static/hacks.css new file mode 100644 index 000000000..a0fa73de4 --- /dev/null +++ b/docs/_static/hacks.css @@ -0,0 +1,326 @@ +/* + * CSS hacks and small modification for my Sphinx website + * :copyright: Copyright 2013-2016 Lilian Besson + * :license: GPLv3, see LICENSE for details. + */ + + +/* Colors and text decoration. + For example, :black:`text in black` or :blink:`text blinking` in rST. */ + + .black { + color: black; +} + +.gray { + color: gray; +} + +.grey { + color: gray; +} + +.silver { + color: silver; +} + +.white { + color: white; +} + +.maroon { + color: maroon; +} + +.red { + color: red; +} + +.magenta { + color: magenta; +} + +.fuchsia { + color: fuchsia; +} + +.pink { + color: pink; +} + +.orange { + color: orange; +} + +.yellow { + color: yellow; +} + +.lime { + color: lime; +} + +.green { + color: green; +} + +.olive { + color: olive; +} + +.teal { + color: teal; +} + +.cyan { + color: cyan; +} + +.aqua { + color: aqua; +} + +.blue { + color: blue; +} + +.navy { + color: navy; +} + +.purple { + color: purple; +} + +.under { + text-decoration: underline; +} + +.over { + text-decoration: overline; +} + +.blink { + text-decoration: blink; +} + +.line { + text-decoration: line-through; +} + +.strike { + text-decoration: line-through; +} + +.it { + font-style: italic; +} + +.ob { + font-style: oblique; +} + +.small { + font-size: small; +} + +.large { + font-size: large; +} + +.smallpar { + font-size: small; +} + + +/* Style pour les badges en bas de la page. */ + +div.supportBadges { + margin: 1em; + text-align: right; +} + +div.supportBadges ul { + padding: 0; + display: inline; +} + +div.supportBadges li { + display: inline; +} + +div.supportBadges a { + margin-right: 1px; + opacity: 0.6; +} + +div.supportBadges a:hover { + opacity: 1; +} + + +/* Details elements in the sidebar */ + +a.reference { + border-bottom: none; + text-decoration: none; +} + +ul.details { + font-size: 80%; +} + +ul.details li p { + font-size: 85%; +} + +ul.externallinks { + font-size: 85%; +} + + +/* Pour le drapeau de langue */ + +img.languageswitch { + width: 50px; + height: 32px; + margin-left: 5px; + vertical-align: bottom; +} + +div.sphinxsidebar { + overflow: hidden !important; + font-size: 120%; + word-wrap: break-word; + width: 300px; + max-width: 300px; +} + +div.sphinxsidebar h3 { + font-size: 125%; +} + +div.sphinxsidebar h4 { + font-size: 110%; +} + +div.sphinxsidebar a { + font-size: 85%; +} + + +/* Image style for scrollUp jQuery plugin */ + +#scrollUpLeft { + bottom: 50px; + left: 260px; + height: 38px; + width: 38px; + background: url('//perso.crans.org/besson/_images/.top.svg'); + background: url('../_images/.top.svg'); +} + +@media screen and (max-width: 875px) { + #scrollUpLeft { + right: 50px; + left: auto; + } +} + + +/* responsive for font-size. */ + +@media (max-width: 875px) { + body { + font-size: 105%; + /* Increase font size for responsive theme */ + } +} + +@media (max-width: 1480px) and (min-width: 876px) { + body { + font-size: 110%; + /* Increase font size for not-so-big screens */ + } +} + +@media (min-width: 1481px) { + body { + font-size: 115%; + /* Increase even more font size for big screens */ + } +} + + +/* Social Icons in the sidebar (available: twitter, facebook, linkedin, google+, bitbucket, github) */ + +.social-icons { + display: inline-block; + margin: 0; + text-align: center; +} + +.social-icons a { + background: none no-repeat scroll center top #444444; + border: 1px solid #F6F6F6; + border-radius: 50% 50% 50% 50%; + display: inline-block; + height: 35px; + width: 35px; + margin: 0; + text-indent: -9000px; + transition: all 0.2s ease 0s; + text-align: center; + border-bottom: none; +} + +.social-icons li { + display: inline-block; + list-style-type: none; + border-bottom: none; +} +.social-icons li a { + border-bottom: none; +} + +.social-icons a:hover { + background-color: #666666; + transition: all 0.2s ease 0s; + text-decoration: none; +} + +.social-icons a.facebook { + background-image: url('../_images/.facebook.png'); + background-image: url('//perso.crans.org/besson/_images/.facebook.png'); + display: block; + margin-left: auto; + margin-right: auto; + background-size: 35px 35px; +} + +.social-icons a.bitbucket { + background-image: url('../_images/.bitbucket.png'); + background-image: url('//perso.crans.org/besson/_images/.bitbucket.png'); + display: block; + margin-left: auto; + margin-right: auto; + background-size: 35px 35px; +} + +.social-icons li a.github { + background-image: url('../_images/.github.png'); + background-image: url('//perso.crans.org/besson/_images/.github.png'); + display: block; + margin-left: auto; + margin-right: auto; + background-size: 35px 35px; +} + +.social-icons li a.wikipedia { + background-image: url('../_images/.wikipedia.png'); + background-image: url('//perso.crans.org/besson/_images/.wikipedia.png'); + display: block; + margin-left: auto; + margin-right: auto; + background-size: 35px 35px; +} \ No newline at end of file diff --git a/docs/chronics.rst b/docs/chronics.rst deleted file mode 100644 index 428852556..000000000 --- a/docs/chronics.rst +++ /dev/null @@ -1,102 +0,0 @@ -.. currentmodule:: grid2op.Chronics - -Chronics -=================================== - -This page is organized as follow: - -.. contents:: Table of Contents - :depth: 3 - -Objectives ------------ -This module is present to handle everything related to input data that are not structural. - -In the Grid2Op vocabulary a "GridValue" or "Chronics" is something that provides data to change the input parameter -of a power flow between 1 time step and the other. - -It is a more generic terminology. Modification that can be performed by :class:`GridValue` object includes, but -are not limited to: - - - injections such as: - - - generators active production setpoint - - generators voltage setpoint - - loads active consumption - - loads reactive consumption - - - structural informations such as: - - - planned outage: powerline disconnection anticipated in advance - - hazards: powerline disconnection that cannot be anticipated, for example due to a windstorm. - -All powergrid modification that can be performed using an :class:`grid2op.Action.BaseAction` can be implemented as -form of a :class:`GridValue`. - -The same mechanism than for :class:`grid2op.Action.BaseAction` or :class:`grid2op.Observation.BaseObservation` -is pursued here. All state modifications made by the :class:`grid2op.Environment` must derived from -the :class:`GridValue`. It is not recommended to create them directly, but rather to use -the :class:`ChronicsHandler` for such a purpose. - -Note that the values returned by a :class:`GridValue` are **backend dependant**. A GridValue object should always -return the data in the order expected by the :class:`grid2op.Backend`, regardless of the order in which data are given -in the files or generated by the data generator process. - -This implies that changing the backend will change the output of :class:`GridValue`. More information about this -is given in the description of the :func:`GridValue.initialize` method. - -Finally, compared to other Reinforcement Learning problems, is the possibility to use "forecast". This optional feature -can be accessed via the :class:`grid2op.Observation.BaseObservation` and mainly the -:func:`grid2op.Observation.BaseObservation.simulate` method. The data that are used to generate this forecasts -come from the :class:`grid2op.GridValue` and are detailed in the -:func:`GridValue.forecasts` method. - - -More control on the chronics -------------------------------- -We explained, in the description of the :class:`grid2op.Environment` in sections -:ref:`environment-module-chronics-info` and following how to have more control on which chronics is used, -with steps are used within a chronics etc. We will not detailed here again, please refer to this page -for more information. - -However, know that you can have a very detailed control on which chronics are used: - -- use `env.set_id(THE_CHRONIC_ID)` (see :func:`grid2op.Environment.Environment.set_id`) to set the id of the - chronics you want to use -- use `env.chronics_handler.set_filter(a_function)` (see :func:`grid2op.Chronics.GridValue.set_filter`) - to only use certain chronics -- use `env.chronics_handler.sample_next_chronics(probas)` - (see :func:`grid2op.Chronics.GridValue.sample_next_chronics`) to draw at random some chronics -- use `env.fast_forward_chronics(nb_time_steps)` - (see :func:`grid2op.Environment.BaseEnv.fast_forward_chronics`) to skip initial number of steps - of a given chronics -- use `env.chronics_handler.set_max_iter(nb_max_iter)` - (see :func:`grid2op.Chronics.ChronicsHandler.set_max_iter`) to limit the number of steps within an episode - -Chosing the right chronics can also lead to some large advantage in terms of computation time. This is -particularly true if you want to benefit the most from HPC for example. More detailed is given in the -:ref:`environment-module-data-pipeline` section. In summary: - -- set the "chunk" size (amount of data read from the disk, instead of reading an entire scenarios, you read - from the hard drive only a certain amount of data at a time, see - :func:`grid2op.Chronics.ChronicsHandler.set_chunk_size`) you can use it with - `env.chronics_handler.set_chunk_size(100)` -- cache all the chronics and use them from memory (instead of reading them from the hard drive, see - :class:`grid2op.Chronics.MultifolderWithCache`) you can do this with - `env = grid2op.make(..., chronics_class=MultifolderWithCache)` - -Finally, if you need to study machine learning in a "regular" fashion, with a train / validation / set -you can use the `env.train_val_split` or `env.train_val_split_random` functions to do that. See -an example usage in the section :ref:`environment-module-train-val-test`. - - - - -Detailed Documentation by class --------------------------------- - -.. automodule:: grid2op.Chronics - :members: - :autosummary: - -.. include:: final.rst diff --git a/docs/conf.py b/docs/conf.py index b9cbbc67d..726281bbc 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,8 +22,8 @@ author = 'Benjamin Donnot' # The full version, including alpha/beta/rc tags -release = '1.9.8.dev0' -version = '1.9' +release = '1.10.4.dev0' +version = '1.10' # -- General configuration --------------------------------------------------- @@ -75,6 +75,7 @@ # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] +html_css_files = ['hacks.css'] # for pdf pdf_documents = [('index', u'rst2pdf', u'Grid2op documentation', u'B. DONNOT'),] diff --git a/docs/data_pipeline.rst b/docs/data_pipeline.rst index cb86a6723..0c316667e 100644 --- a/docs/data_pipeline.rst +++ b/docs/data_pipeline.rst @@ -3,6 +3,14 @@ Optimize the data pipeline ============================ +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + +Objectives +-------------------------- + Optimizing the data pipeline can be crucial if you want to learn fast, especially at the beginning of the training. There exists multiple way to perform this task. @@ -47,9 +55,9 @@ Results are reported in the table below: ============================== ================ =================== Method used memory footprint time to perform (s) ============================== ================ =================== -Nothing (see `Basic Usage`_) low 44.6 -set_chunk (see `Chunk size`_) ultra low 26.8 -`MultifolderWithCache`_ high 11.0 +Nothing (see Basic Usage ) low 44.6 +set_chunk (see `Chunk size`_ ) ultra low 26.8 +`MultifolderWithCache`_ high 11.0 ============================== ================ =================== As you can see, the default usage uses relatively little memory but takes a while to compute (almost 45s to perform diff --git a/docs/detailed_topology.rst b/docs/detailed_topology.rst new file mode 100644 index 000000000..c3dfff9c0 --- /dev/null +++ b/docs/detailed_topology.rst @@ -0,0 +1,50 @@ +.. _detailed-topology-modeling-module: + +Dive into the detailed topology "modeling" in grid2op +=================================================================== + +.. warning:: + Work in progress + +What is a "detailed" topology in grid2op +----------------------------------------- + +.. warning:: + Work in progress + + +- Concept of connectivity nodes +- Switches +- Processing of the switches to "original topoolgy" +- + +Impact in grid2op +------------------ + +.. warning:: + Work in progress + +- new action +- new observation + +.. danger:: + Be carefull with convertion fo / from switches !!! + +Why did we add it ? +-------------------- + +.. warning:: + Work in progress + +What features are actually implemented ? +----------------------------------------- + +.. warning:: + Work in progress + + +Pros and cons of using it ? +--------------------------------- +.. warning:: + Work in progress + diff --git a/docs/developer.rst b/docs/developer.rst new file mode 100644 index 000000000..f32eb13de --- /dev/null +++ b/docs/developer.rst @@ -0,0 +1,8 @@ +.. toctree:: + :maxdepth: 1 + + developer/env_content + developer/create_an_environment + developer/createbackend + +.. include:: final.rst diff --git a/docs/create_an_environment.rst b/docs/developer/create_an_environment.rst similarity index 96% rename from docs/create_an_environment.rst rename to docs/developer/create_an_environment.rst index f802ad9c7..2e7dcfb88 100644 --- a/docs/create_an_environment.rst +++ b/docs/developer/create_an_environment.rst @@ -1,13 +1,22 @@ -.. |l2rpn_case14_sandbox_layout| image:: ./img/l2rpn_case14_sandbox_layout.png -.. |R2_full_grid| image:: ./img/R2_full_grid.png -.. |l2rpn_neurips_2020_track1_layout| image:: ./img/l2rpn_neurips_2020_track1_layout.png -.. |l2rpn_neurips_2020_track2_layout| image:: ./img/l2rpn_neurips_2020_track2_layout.png -.. |l2rpn_wcci_2022_layout| image:: ./img/l2rpn_wcci_2022_layout.png +.. |l2rpn_case14_sandbox_layout| image:: ../img/l2rpn_case14_sandbox_layout.png +.. |R2_full_grid| image:: ../img/R2_full_grid.png +.. |l2rpn_neurips_2020_track1_layout| image:: ../img/l2rpn_neurips_2020_track1_layout.png +.. |l2rpn_neurips_2020_track2_layout| image:: ../img/l2rpn_neurips_2020_track2_layout.png +.. |l2rpn_wcci_2022_layout| image:: ../img/l2rpn_wcci_2022_layout.png Possible workflow to create an environment from existing time series ====================================================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + + +Workflow in more details +------------------------- + In this subsection, we will give an example on how to set up an environment in grid2op if you already have some data that represents loads and productions at each steps. This paragraph aims at making more concrete the description of the environment shown previously. diff --git a/docs/createbackend.rst b/docs/developer/createbackend.rst similarity index 89% rename from docs/createbackend.rst rename to docs/developer/createbackend.rst index c4746f6d9..93d0fb9c5 100644 --- a/docs/createbackend.rst +++ b/docs/developer/createbackend.rst @@ -21,26 +21,26 @@ .. _line_or_pos_topo_vect: ./space.html#grid2op.Space.GridObjects.line_or_pos_topo_vect .. _line_ex_pos_topo_vect: ./space.html#grid2op.Space.GridObjects.line_ex_pos_topo_vect -.. |5subs_grid_layout| image:: ./img/5subs_grid_layout.jpg -.. |5subs_grid_1_sub| image:: ./img/5subs_grid_1_sub.jpg -.. |5subs_grid_2_loads| image:: ./img/5subs_grid_2_loads.jpg -.. |5subs_grid_3_gens| image:: ./img/5subs_grid_3_gens.jpg -.. |5subs_grid_4_lines| image:: ./img/5subs_grid_4_lines.jpg -.. |5subs_grid_5_obj_in_sub| image:: ./img/5subs_grid_5_obj_in_sub.jpg -.. |5subs_grid_layout_with_repr| image:: ./img/5subs_grid_layout_with_repr.jpg -.. |5subs_grid_n_el| image:: ./img/5subs_grid_n_el.jpg -.. |5subs_grid_5_sub_i| image:: ./img/5subs_grid_5_sub_i.jpg -.. |5subs_grid_load_to_subid| image:: ./img/5subs_grid_load_to_subid.jpg -.. |5subs_grid_el_to_subid| image:: ./img/5subs_grid_el_to_subid.jpg -.. |5subs_grid_sub0| image:: ./img/5subs_grid_sub0.jpg -.. |5subs_grid_sub0_final| image:: ./img/5subs_grid_sub0_final.jpg -.. |5subs_grid_sub1_final| image:: ./img/5subs_grid_sub1_final.jpg -.. |5subs_grid_loads_info| image:: ./img/5subs_grid_loads_info.jpg -.. |5subs_grid_sub1_topo| image:: ./img/5subs_grid_sub1_topo.jpg -.. |5subs_grid_sub1_2_topo| image:: ./img/5subs_grid_sub1_2_topo.jpg -.. |5subs_grid_suball_topo| image:: ./img/5subs_grid_suball_topo.jpg -.. |5subs_grid_ex_disco| image:: ./img/5subs_grid_ex_disco.jpg -.. |5subs_grid_ex_2buses| image:: ./img/5subs_grid_ex_2buses.jpg +.. |5subs_grid_layout| image:: ../img/5subs_grid_layout.jpg +.. |5subs_grid_1_sub| image:: ../img/5subs_grid_1_sub.jpg +.. |5subs_grid_2_loads| image:: ../img/5subs_grid_2_loads.jpg +.. |5subs_grid_3_gens| image:: ../img/5subs_grid_3_gens.jpg +.. |5subs_grid_4_lines| image:: ../img/5subs_grid_4_lines.jpg +.. |5subs_grid_5_obj_in_sub| image:: ../img/5subs_grid_5_obj_in_sub.jpg +.. |5subs_grid_layout_with_repr| image:: ../img/5subs_grid_layout_with_repr.jpg +.. |5subs_grid_n_el| image:: ../img/5subs_grid_n_el.jpg +.. |5subs_grid_5_sub_i| image:: ../img/5subs_grid_5_sub_i.jpg +.. |5subs_grid_load_to_subid| image:: ../img/5subs_grid_load_to_subid.jpg +.. |5subs_grid_el_to_subid| image:: ../img/5subs_grid_el_to_subid.jpg +.. |5subs_grid_sub0| image:: ../img/5subs_grid_sub0.jpg +.. |5subs_grid_sub0_final| image:: ../img/5subs_grid_sub0_final.jpg +.. |5subs_grid_sub1_final| image:: ../img/5subs_grid_sub1_final.jpg +.. |5subs_grid_loads_info| image:: ../img/5subs_grid_loads_info.jpg +.. |5subs_grid_sub1_topo| image:: ../img/5subs_grid_sub1_topo.jpg +.. |5subs_grid_sub1_2_topo| image:: ../img/5subs_grid_sub1_2_topo.jpg +.. |5subs_grid_suball_topo| image:: ../img/5subs_grid_suball_topo.jpg +.. |5subs_grid_ex_disco| image:: ../img/5subs_grid_ex_disco.jpg +.. |5subs_grid_ex_2buses| image:: ../img/5subs_grid_ex_2buses.jpg .. _create-backend-module: @@ -89,7 +89,9 @@ everywhere). This includes, but is not limited to: - etc. .. note:: Grid2Op do not care about the modeling of the grid (static / steady state or dyanmic / transient) and both - types of solver could be implemented as backend. At time of writing (december 2020), only steady state powerflow are + Any types of solver could be implemented as backend. + + At time of writing (december 2020), only steady state powerflow are available. .. note:: The previous note entails that grid2op is also independent on the format used to store a powergrid. @@ -131,7 +133,31 @@ everywhere). This includes, but is not limited to: Main methods to implement -------------------------- Typically, a backend has a internal "modeling" / "representation" of the powergrid -stored in the attribute `self._grid` that can be anything. An more detailed example, with some +stored in the attribute `self._grid` that can be anything. + +.. note:: + `self._grid` is a "private" attribute. Only people that knows what it does and how + it works should be able to use it. + + Grid2op being fully generic, you can assume that all the classes of grid2op will never + access `self._grid`. For example, when building the observation of the grid, + grid2op will only use the information given in the `*_infos()` methods + (*eg* :func:`grid2op.Backend.Backend.loads_info`) and never by directly accessing `self._grid` + + In other words, `self._grid` can be anything: a `PandaPower `_ `Network`, a + `GridCal `_ `MultiCircuit`, + a `lightsim2grid `_ `GridModel`, a + `pypowsybl `_ `Network` (or `SortedNetwork`), + a `powerfactory ` `Project` etc. + Grid2op will never attempt to access `self._grid` + + (Though, to be perfectly honest, some agents might rely on some type `_grid`, if that's the case, too + bad for these agents they will need to implement special methods to be compatible with your backend. + Hopefully this should be extremely rare... The whole idea of grid2op being to make the different + "entities" (agent, environment, data, backend) as independant as possible this "corner" cases should + be rare.) + +An more detailed example, with some "working minimal code" is given in the "example/backend_integration" of the grid2op repository. There are 4 **__main__** types of method you need to implement if you want to use a custom powerflow @@ -172,8 +198,9 @@ There are 4 **__main__** types of method you need to implement if you want to us .. _grid-description: -Grid description ------------------- +load_grid: Grid description +---------------------------- + In this section we explicit what attributes need to be implemented to have a valid backend instance. We focus on the attribute of the `Backend` you have to set. But don't forget you also need to load a powergrid and store it in the `_grid` attribute. @@ -184,18 +211,16 @@ Basically the `load_grid` function would look something like: def load_grid(self, path=None, filename=None): # simply handles different way of inputing the data - if path is None and filename is None: - raise RuntimeError("You must provide at least one of path or file to load a powergrid.") - if path is None: - full_path = filename - elif filename is None: - full_path = path - else: - full_path = os.path.join(path, filename) - if not os.path.exists(full_path): - raise RuntimeError("There is no powergrid at \"{}\"".format(full_path)) - - # load the grid in your favorite format: + full_path = self.make_complete_path(path, filename) + + # from grid2op 1.10.0 you need to call one of + self.can_handle_more_than_2_busbar() # see doc for more information + OR + self.cannot_handle_more_than_2_busbar() # see doc for more information + # It is important you include it at the top of this method, otherwise you + # will not have access to self.n_busbar_per_sub + + # load the grid in your favorite format, located at `full_path`: self._grid = ... # the way you do that depends on the "solver" you use # and now initialize the attributes (see list bellow) @@ -233,7 +258,7 @@ Name See paragraph Type Size Description `line_ex_to_subid`_ :ref:`subid` vect, int `n_line`_ For each powerline, it gives the substation id to which its **extremity** end is connected `name_load`_ vect, str `n_load`_ (optional) name of each load on the grid [if not set, by default it will be "load_$LoadSubID_$LoadID" for example "load_1_10" if the load with id 10 is connected to substation with id 1] `name_gen`_ vect, str `n_gen`_ (optional) name of each generator on the grid [if not set, by default it will be "gen_$GenSubID_$GenID" for example "gen_2_42" if the generator with id 42 is connected to substation with id 2] -`name_line`_ vect, str `n_line`_ (optional) name of each powerline (and transformers !) on the grid [if not set, by default it will be "$SubOrID_SubExID_LineID" for example "1_4_57" if the powerline with id 57 has its origin end connected to substation with id 1 and its extremity end connected to substation with id 4] +`name_line`_ vect, str `n_line`_ (optional) name of each powerline (and transformers !) on the grid [if not set, by default it will be "$SubOrID_SubExID_LineID" for example "1_4_57" if the powerline with id 57 has its origin side connected to substation with id 1 and its extremity side connected to substation with id 4] `name_sub`_ vect, str `n_sub`_ (optional) name of each substation on the grid [if not set, by default it will be "sub_$SubID" for example "sub_41" for the substation with id 41] `sub_info`_ :ref:`sub-i` vect, int `n_sub`_ (can be automatically set if you don't initialize it) For each substation, it gives the number of elements connected to it ("elements" here denotes: powerline - and transformer- ends, load or generator) `dim_topo`_ :ref:`sub-i` int NA (can be automatically set if you don't initialize it) Total number of elements on the grid ("elements" here denotes: powerline - and transformer- ends, load or generator) @@ -298,7 +323,7 @@ extremely complex way to say you have to do this: Note the number for each element in the substation. In this example, for substaion with id 0 (bottom left) you decided -that the powerline with id 0 (connected at this substation at its origin end) will be the "first object of this +that the powerline with id 0 (connected at this substation at its origin side) will be the "first object of this substation". Then the "Load 0" is the second object [remember index a 0 based, so the second object has id 1], generator 0 is the third object of this substation (you can know it with the "3" near it) etc. @@ -422,12 +447,12 @@ First, have a look at substation 0: You know that, at this substation 0 there are `6` elements connected. In this example, these are: -- origin end of Line 0 +- origin side of Line 0 - Load 0 - gen 0 -- origin end of line 1 -- origin end of line 2 -- origin end of line 3 +- origin side of line 1 +- origin side of line 2 +- origin side of line 3 Given that, you can fill: @@ -452,12 +477,12 @@ You defined (in a purely arbitrary manner): So you get: -- first component of `line_or_to_sub_pos` is 0 [because "origin end of line 0" is "element 0" of this substation] +- first component of `line_or_to_sub_pos` is 0 [because "origin side of line 0" is "element 0" of this substation] - first component of `load_to_sub_pos` is 1 [because "load 0" is "element 1" of this substation] - first component of `gen_to_sub_pos` is 2 [because "gen 0" is "element 2" of this substation] -- fourth component of `line_or_to_sub_pos` is 3 [because "origin end of line 3" is "element 3" of this substation] -- third component of `line_or_to_sub_pos` is 4 [because "origin end of line 2" is "element 4" of this substation] -- second component of `line_or_to_sub_pos` is 5 [because "origin end of line 1" is "element 5" of this substation] +- fourth component of `line_or_to_sub_pos` is 3 [because "origin side of line 3" is "element 3" of this substation] +- third component of `line_or_to_sub_pos` is 4 [because "origin side of line 2" is "element 4" of this substation] +- second component of `line_or_to_sub_pos` is 5 [because "origin side of line 1" is "element 5" of this substation] This is showed in the figure below: @@ -490,12 +515,12 @@ of your implementation of `load_grid` function) .. _backend-action-create-backend: -BackendAction: modification +apply_action: underlying grid modification ---------------------------------------------- In this section we detail step by step how to understand the specific format used by grid2op to "inform" the backend on how to modify its internal state before computing a powerflow. -A `BackendAction` will tell the backend on what is modified among: +A :class:`grid2op.Action._backendAction._BackendAction` will tell the backend on what is modified among: - the active value of each loads (see paragraph :ref:`change-inj`) - the reactive value of each loads (see paragraph :ref:`change-inj`) @@ -557,22 +582,22 @@ At the end, the `apply_action` function of the backend should look something lik ... # the way you do that depends on the `internal representation of the grid` lines_or_bus = backendAction.get_lines_or_bus() for line_id, new_bus in lines_or_bus: - # modify the "busbar" of the origin end of powerline line_id + # modify the "busbar" of the origin side of powerline line_id if new_bus == -1: - # the origin end of powerline is disconnected in the action, disconnect it on your internal representation of the grid + # the origin side of powerline is disconnected in the action, disconnect it on your internal representation of the grid ... # the way you do that depends on the `internal representation of the grid` else: - # the origin end of powerline is moved to either busbar 1 (in this case `new_bus` will be `1`) + # the origin side of powerline is moved to either busbar 1 (in this case `new_bus` will be `1`) # or to busbar 2 (in this case `new_bus` will be `2`) ... # the way you do that depends on the `internal representation of the grid` lines_ex_bus = backendAction.get_lines_ex_bus() for line_id, new_bus in lines_ex_bus: - # modify the "busbar" of the extremity end of powerline line_id + # modify the "busbar" of the extremity side of powerline line_id if new_bus == -1: - # the extremity end of powerline is disconnected in the action, disconnect it on your internal representation of the grid + # the extremity side of powerline is disconnected in the action, disconnect it on your internal representation of the grid ... # the way you do that depends on the `internal representation of the grid` else: - # the extremity end of powerline is moved to either busbar 1 (in this case `new_bus` will be `1`) + # the extremity side of powerline is moved to either busbar 1 (in this case `new_bus` will be `1`) # or to busbar 2 (in this case `new_bus` will be `2`) ... # the way you do that depends on the `internal representation of the grid` @@ -664,7 +689,8 @@ These functions can be used in the following manner: And of course you do the same for generators and both ends of each powerline. -.. note:: About powerline, grid2op adopts the following convention: a powerline **cannot** be connected on one side +.. note:: + About powerline, grid2op adopts the following convention: a powerline **cannot** be connected on one side and disconnected on the other. That being said, it's still possible to connect the extremity of a powerline "alone" on a busbar, which will have @@ -672,8 +698,8 @@ And of course you do the same for generators and both ends of each powerline. .. _vector-orders-create-backend: -Read back the results (flows, voltages etc.) ------------------------------------------------ +\*\*\*_infos() : Read back the results (flows, voltages etc.) +-------------------------------------------------------------- This last "technical" part concerns what can be refer to as "getters" from the backend. These functions allow to read back the state of the grid and expose its results to grid2op in a standardize manner. @@ -774,7 +800,7 @@ And you do chat for all substations, giving: So in this simple example, the first element of the topology vector will represent the origin of powerline 0, the second element will represent the load 0, the 7th element (id 6, remember python index are 0 based) represent -first element of substation 1, so in this case extremity end of powerline 3, the 8th element the generator 1, etc. +first element of substation 1, so in this case extremity side of powerline 3, the 8th element the generator 1, etc. up to element with id 20 whith is the last element of the last substation, in this case extremity of powerline 7. Once you know the order, the encoding is pretty straightforward: @@ -957,10 +983,26 @@ TODO this will be explained "soon". Detailed Documentation by class ------------------------------- -.. autoclass:: grid2op.Backend.EducPandaPowerBackend.EducPandaPowerBackend +A first example of a working backend that can be easily understood (without nasty gory speed optimization) +based on pandapower is available at : + +.. autoclass:: grid2op.Backend.educPandaPowerBackend.EducPandaPowerBackend + :members: + :private-members: + :special-members: + :autosummary: + +And to understand better some key concepts, you can have a look at :class:`grid2op.Action._backendAction._BackendAction` +or the :class:`grid2op.Action._backendAction.ValueStore` class: + +.. autoclass:: grid2op.Action._backendAction._BackendAction :members: :private-members: :special-members: :autosummary: -.. include:: final.rst \ No newline at end of file +.. autoclass:: grid2op.Action._backendAction.ValueStore + :members: + :autosummary: + +.. include:: final.rst diff --git a/docs/env_content.rst b/docs/developer/env_content.rst similarity index 91% rename from docs/env_content.rst rename to docs/developer/env_content.rst index 96af7a4ef..c4351185a 100644 --- a/docs/env_content.rst +++ b/docs/developer/env_content.rst @@ -1,9 +1,9 @@ -.. |l2rpn_case14_sandbox_layout| image:: ./img/l2rpn_case14_sandbox_layout.png -.. |R2_full_grid| image:: ./img/R2_full_grid.png -.. |l2rpn_neurips_2020_track1_layout| image:: ./img/l2rpn_neurips_2020_track1_layout.png -.. |l2rpn_neurips_2020_track2_layout| image:: ./img/l2rpn_neurips_2020_track2_layout.png -.. |l2rpn_wcci_2022_layout| image:: ./img/l2rpn_wcci_2022_layout.png +.. |l2rpn_case14_sandbox_layout| image:: ../img/l2rpn_case14_sandbox_layout.png +.. |R2_full_grid| image:: ../img/R2_full_grid.png +.. |l2rpn_neurips_2020_track1_layout| image:: ../img/l2rpn_neurips_2020_track1_layout.png +.. |l2rpn_neurips_2020_track2_layout| image:: ../img/l2rpn_neurips_2020_track2_layout.png +.. |l2rpn_wcci_2022_layout| image:: ../img/l2rpn_wcci_2022_layout.png Content of an environment diff --git a/docs/developer/final.rst b/docs/developer/final.rst new file mode 100644 index 000000000..f095ba7ca --- /dev/null +++ b/docs/developer/final.rst @@ -0,0 +1,2 @@ + +.. include:: ../final.rst diff --git a/docs/dive_into_time_series.rst b/docs/dive_into_time_series.rst index acf95f813..5a5264996 100644 --- a/docs/dive_into_time_series.rst +++ b/docs/dive_into_time_series.rst @@ -5,6 +5,14 @@ Input data of an environment =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + +Objectives +---------------- + A grid2op "environment" is nothing more than a local folder on your computer. This folder consists of different things: diff --git a/docs/grid2op.rst b/docs/grid2op.rst index 1e115f329..f2e6b763b 100644 --- a/docs/grid2op.rst +++ b/docs/grid2op.rst @@ -26,7 +26,8 @@ competitions. This platform is still under development. If you notice a bug, let us know with a github issue at `Grid2Op `_ -.. note:: Grid2op do not model any object on the powergrid. It has no internal modeling of the equations of the +.. note:: + Grid2op do not model any object on the powergrid. It has no internal modeling of the equations of the grids, or what kind of solver you need to adopt. On the other hand, grid2op aims at representing the grid in a relatively "high level" point of view: it knows @@ -43,6 +44,7 @@ This platform is still under development. If you notice a bug, let us know with Objectives ----------- + The primary goal of grid2op is to model decision making process in power systems. Indeed, we believe that developing new flexibilities on the grid would make the "energy transition" an easier, less costly process. @@ -72,7 +74,8 @@ Though grid2op has been primarily developed for the L2RPN competitions series, i can also help developing and benchmarking new powerflow solvers for example. Controlling the grid --------------------- +--------------------- + Modeling all what happens in the powergrid would be an extremely difficult task. Grid2op focusing on controls that could be done today by a human (happening with **a frequency of approximately the minute**). It does not aim at simulation really high frequency control that are often automatic today. That being said, such controls @@ -107,8 +110,10 @@ Other "flexibilities" (ways to act on the grid) are coming soon (-: solver uses some physical laws to compute these "weights" from the amount of power produced / absorbed in different part of the grid where generators and loads are connected). + What is modeled in an grid2op environment ------------------------------------------ +------------------------------------------ + The simulator is able to emulate a power grid (of any size or characteristics) subject to a set of temporal injections (productions and consumptions) or maintenance / hazards for discretized time-steps (usually there is the equivalent of *5* minutes between two consective steps). @@ -216,7 +221,8 @@ Module Name Main usage ============================= ========================================================================================= Properties of this environments -------------------------------- +-------------------------------- + The grid2op environments have multiple shared properties: - highly constrained environments: these environments obey physical laws. You cannot directly choose how much @@ -398,7 +404,7 @@ If it fails between "now" and "12 steps from now" reward associated with alert w negative (this is the situation where the agent should have told the human operator "help me"). -Let's replay again (again ?) the same scenario again: same attack, same everything: +Let's replay again (again ?) the same scenario: same attack, same everything: .. code-block:: python @@ -447,6 +453,7 @@ alert (when the attack is happening) Disclaimer ----------- + Grid2op is a research testbed platform, it has not been tested in "production" context Going further @@ -456,5 +463,8 @@ that are available, without any installation thanks to `Binder `_ . Feel free to visit the "getting_started" page for more information and a detailed tour about the issue that grid2op tries to address. -.. note:: As of writing (december 2020) most of these notebooks focus on the "agent" part of grid2op. We would welcome +.. note:: + As of writing (december 2020) most of these notebooks focus on the "agent" part of grid2op. We would welcome any contribution to better explain the other aspect of this platform. + +.. include:: final.rst diff --git a/docs/grid2op_dev.rst b/docs/grid2op_dev.rst new file mode 100644 index 000000000..6beedb0dc --- /dev/null +++ b/docs/grid2op_dev.rst @@ -0,0 +1,7 @@ +.. toctree:: + :maxdepth: 1 + + grid2op_dev/action + grid2op_dev/observation + +.. include:: final.rst diff --git a/docs/grid2op_dev/action.rst b/docs/grid2op_dev/action.rst new file mode 100644 index 000000000..67cfd9193 --- /dev/null +++ b/docs/grid2op_dev/action.rst @@ -0,0 +1,7 @@ +How to add a new type of action +=================================== + +Work in progress ! + + +.. include:: final.rst \ No newline at end of file diff --git a/docs/grid2op_dev/final.rst b/docs/grid2op_dev/final.rst new file mode 100644 index 000000000..f095ba7ca --- /dev/null +++ b/docs/grid2op_dev/final.rst @@ -0,0 +1,2 @@ + +.. include:: ../final.rst diff --git a/docs/grid2op_dev/observation.rst b/docs/grid2op_dev/observation.rst new file mode 100644 index 000000000..989fce42b --- /dev/null +++ b/docs/grid2op_dev/observation.rst @@ -0,0 +1,7 @@ +How to add a new attribute to the observation +============================================== + +Work in progress ! + + +.. include:: final.rst \ No newline at end of file diff --git a/docs/grid_graph.rst b/docs/grid_graph.rst index 8d2834cfa..c9733b2cc 100644 --- a/docs/grid_graph.rst +++ b/docs/grid_graph.rst @@ -10,6 +10,15 @@ A grid, a graph: grid2op representation of the powergrid =================================================================== + +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + +Objectives +---------------- + In this section of the documentation, we will dive a deeper into the "modeling" on which grid2op is based and especially how the underlying graph of the powergrid is represented and how it can be easily retrieved. @@ -23,10 +32,6 @@ First, we detail some concepts from the power system community in section :ref:`graph-encoding-gridgraph`. Finally, we show some code examples on how to retrieve this graph in section :ref:`get-the-graph-gridgraph`. - -.. contents:: Table of Contents - :depth: 3 - .. _powersystem-desc-gridgraph: Description of a powergrid adopting the "energy graph" representation @@ -321,11 +326,13 @@ To know what element of the grid is the "42nd", you can: case the extremity side of powerline `line_id`. 2) look at the table :attr:`grid2op.Space.GridObjects.grid_objects_types` and especially the line 42 so `env.grid_objects_types[42,:]` which contains this information as well. Each column of this table encodes - for one type of element (first column is substation, second is load, then generator, then origin end of - powerline then extremity end of powerline and finally storage unit. Each will have "-1" if the element + for one type of element (first column is substation, second is load, then generator, then origin side of + powerline then extremity side of powerline and finally storage unit. Each will have "-1" if the element is not of that type, and otherwise and id > 0. Taking the same example as for the above bullet point! `env.grid_objects_types[42,:] = [sub_id, -1, -1, -1, line_id, -1]` meaning the "42nd" element of the grid - if the extremity end (because it's the 5th column) of id `line_id` (the other element being marked as "-1"). + if the extremity side (because it's the 5th column) of id `line_id` (the other element being marked as "-1"). +3) refer to the :func:`grid2op.Space.GridObject.topo_vect_element` for an "easier" way to retrieve information + about this element. .. note:: As of a few versions of grid2op, if you are interested at the busbar to which (say) load 5 is connected, then Instead @@ -363,15 +370,15 @@ Type of graph described in grid2op method And their respective properties: -======================== ================ ======================== ===================== -Type of graph always same size encode all observation has flow information -======================== ================ ======================== ===================== -"energy graph" no almost yes -"elements graph" yes for nodes yes yes -"connectivity graph" yes no no -"bus connectivity graph" no no no -"flow bus graph" no no yes -======================== ================ ======================== ===================== +======================== =================== ==================== ======================= ===================== +Type of graph same number of node same number of edges encode all observation has flow information +======================== =================== ==================== ======================= ===================== +"energy graph" no no almost yes +"elements graph" yes no yes yes +"connectivity graph" yes no no no +"bus connectivity graph" no no no no +"flow bus graph" no no no yes +======================== =================== ==================== ======================= ===================== .. _graph1-gg: @@ -505,7 +512,7 @@ the two red powerlines, another where there are the two green) .. note:: On this example, for this visualization, lots of elements of the grid are not displayed. This is the case - for the load, generator and storage units for example. + for the loads, generators and storage units for example. For an easier to read representation, feel free to consult the :ref:`grid2op-plot-module` @@ -516,10 +523,10 @@ Graph2: the "elements graph" As opposed to the previous graph, this one has a fixed number of **nodes**: each nodes will represent an "element" of the powergrid. In this graph, there is -`n_sub` nodes each representing a substation and `2 * n_sub` nodes, each +`n_sub` nodes each representing a substation and `env.n_busbar_per_sub * n_sub` nodes, each representing a "busbar" and `n_load` nodes each representing a load etc. In total, there is then: -`n_sub + 2*n_sub + n_load + n_gen + n_line + n_storage + n_shunt` nodes. +`n_sub + env.n_busbar_per_sub*n_sub + n_load + n_gen + n_line + n_storage + n_shunt` nodes. Depending on its type, a node can have different properties. @@ -619,15 +626,16 @@ There are no outgoing edges from substation. Bus properties +++++++++++++++++++++++ -The next `2 * n_sub` nodes of the "elements graph" represent the "buses" of the grid. They have the attributes: +The next `env.n_busbar_per_sub * n_sub` nodes of the "elements graph" represent the "buses" of the grid. They have the attributes: -- `id`: which bus does this node represent (global id: `0 <= id < 2*env.n_sub`) +- `id`: which bus does this node represent (global id: `0 <= id < env.n_busbar_per_sub*env.n_sub`) - `global_id`: same as "id" -- `local_id`: which bus (in the substation) does this busbar represents (local id: `1 <= local_id <= 2`) +- `local_id`: which bus (in the substation) does this busbar represents (local id: `1 <= local_id <= env.n_busbar_per_sub`) - `type`: always "bus" - `connected`: whether or not this bus is "connected" to the grid. - `v`: the voltage magnitude of this bus (in kV, optional only when the bus is connected) -- `theta`: the voltage angle of this bus (in deg, optional only when the bus is connected) +- `theta`: the voltage angle of this bus (in deg, optional only when the bus is connected and + if the backend supports it) The outgoing edges from the nodes representing buses tells at which substation this bus is connected. These edges are "fixed": if they are present (meaning the bus is connected) they always connect the bus to the same substation. They have only @@ -645,7 +653,14 @@ The next `n_load` nodes of the "elements graph" represent the "loads" of the gri - `id`: which load does this node represent (between 0 and `n_load - 1`) - `type`: always "loads" - `name`: the name of this load (equal to `obs.name_load[id]`) -- `connected`: whether or not this load is connected to the grid. +- `connected`: whether or not this load is connected to the grid +- `local_bus`: (from version 1.9.9) the id (local, so between `1, 2, ..., obs.n_busbar_per_sub`) + of the bus to which this load is connected +- `global_bus`: (from version 1.9.9) the id (global, so between `0, 1, ..., obs.n_busbar_per_sub * obs.n_sub`) + of the bus to which this load is connected +- `bus_node_id`: (from version 1.9.9) the id of the node of this graph representing the bus to which the + load is connected. This means that if the load is connected, then (node_load_id, bus_node_id) is the + outgoing edge in this graph. The outgoing edges from the nodes representing loads tell at which bus this load is connected (for each load, there is only one outgoing edge). They have attributes: @@ -676,6 +691,13 @@ The next `n_gen` nodes of the "elements graph" represent the "generators" of the - `curtailment_limit`: same as `obs.curtailment_limit[id]`, see :attr:`grid2op.Observation.BaseObservation.curtailment_limit` - `gen_margin_up`: same as `obs.gen_margin_up[id]`, see :attr:`grid2op.Observation.BaseObservation.gen_margin_up` - `gen_margin_down`: same as `obs.gen_margin_down[id]`, see :attr:`grid2op.Observation.BaseObservation.gen_margin_down` +- `local_bus`: (from version 1.9.9) the id (local, so between `1, 2, ..., obs.n_busbar_per_sub`) + of the bus to which this generator is connected +- `global_bus`: (from version 1.9.9) the id (global, so between `0, 1, ..., obs.n_busbar_per_sub * obs.n_sub`) + of the bus to which this generator is connected +- `bus_node_id`: (from version 1.9.9) the id of the node of this graph representing the bus to which the + generator is connected. This means that if the generator is connected, then (node_gen_id, bus_node_id) is the + outgoing edge in this graph. The outgoing edges from the nodes representing generators tell at which bus this generator is connected (for each generator, there is only one outgoing edge). They have attributes: @@ -740,6 +762,14 @@ The next `n_storage` nodes represent the storage units. They have attributes: - `connected`: whether or not this storage unit is connected to the grid2op - `storage_charge`: same as `obs.storage_charge[id]`, see :attr:`grid2op.Observation.BaseObservation.storage_charge` - `storage_power_target`: same as `obs.storage_power_target[id]`, see :attr:`grid2op.Observation.BaseObservation.storage_power_target` +- `local_bus`: (from version 1.9.9) the id (local, so between `1, 2, ..., obs.n_busbar_per_sub`) + of the bus to which this storage unit is connected +- `global_bus`: (from version 1.9.9) the id (global, so between `0, 1, ..., obs.n_busbar_per_sub * obs.n_sub`) + of the bus to which this storage unit is connected +- `bus_node_id`: (from version 1.9.9) the id of the node of this graph representing the bus to which the + storage unit is connected. This means that if the storage unit is connected, + then (node_storage_id, bus_node_id) is the + outgoing edge in this graph. The outgoing edges from the nodes representing storage units tells at which bus this load is connected (for each load, there is only one outgoing edge). They have attributes: @@ -759,6 +789,14 @@ The next `n_shunt` nodes represent the storage units. They have attributes: - `type`: always "shunt" - `name`: the name of this shunt (equal to `obs.name_shunt[id]`) - `connected`: whether or not this shunt is connected to the grid2op +- `local_bus`: (from version 1.9.9) the id (local, so between `1, 2, ..., obs.n_busbar_per_sub`) + of the bus to which this shunt is connected +- `global_bus`: (from version 1.9.9) the id (global, so between `0, 1, ..., obs.n_busbar_per_sub * obs.n_sub`) + of the bus to which this shunt is connected +- `bus_node_id`: (from version 1.9.9) the id of the node of this graph representing the bus to which the + shunt is connected. This means that if the shunt is connected, + then (node_shunt_id, bus_node_id) is the + outgoing edge in this graph. The outgoing edges from the nodes representing sthuns tell at which bus this shunt is connected (for each load, there is only one outgoing edge). They have attributes: @@ -773,9 +811,25 @@ there is only one outgoing edge). They have attributes: Graph3: the "connectivity graph" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -TODO: Work in progress, any help welcome +This graph is represented by a matrix (numpy 2d array or sicpy sparse matrix) of +floating point: `0.` means there are no connection between the elements and `1.`. + +Each row / column of the matrix represent an element modeled in the `topo_vect` vector. To know +more about the element represented by the row / column, you can have a look at the +:func:`grid2op.Space.GridObjects.topo_vect_element` function. -In the mean time, some documentation are available at :func:`grid2op.Observation.BaseObservation.connectivity_matrix` +In short, this graph gives the information of "this object" and "this other object" are connected +together: either they are the two side of the same powerline or they are connected to the same bus +in the grid. + +In other words the `node` of this graph are the element of the grid (side of line, load, gen and storage) +and the `edge` of this non oriented (undirected / symmetrical) non weighted graph represent the connectivity +of the grid. + +It has a fixed number of nodes (number of elements is fixed) but the number of edges can vary. + +You can consult the documentation of the :func:`grid2op.Observation.BaseObservation.connectivity_matrix` +for complement of information an some examples on how to retrieve this graph. .. note:: @@ -788,9 +842,24 @@ In the mean time, some documentation are available at :func:`grid2op.Observation Graph4: the "bus connectivity graph" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -TODO: Work in progress, any help welcome -In the mean time, some documentation are available at :func:`grid2op.Observation.BaseObservation.bus_connectivity_matrix` +This graph is represented by a matrix (numpy 2d array or sicpy sparse matrix) of +floating point: `0.` means there are no connection between the elements and `1.`. + +As opposed to the previous "graph" the row / column of this matrix has as many elements as the number of +independant buses on the grid. There are 0. if no powerlines connects the two buses +or one if at least a powerline connects these two buses. + +In other words the `nodes` of this graph are the buse of the grid +and the `edges` of this non oriented (undirected / symmetrical) non weighted graph represent the presence +of powerline connected two buses (basically if there are line with one of its side connecting one of the bus +and the other side connecting the other). + +It has a variable number of nodes and edges. In case of game over we chose to represent this graph as +an graph with 1 node and 0 edge. + +You can consult the documentation of the :func:`grid2op.Observation.BaseObservation.bus_connectivity_matrix` +for complement of information an some examples on how to retrieve this graph. .. note:: @@ -804,9 +873,25 @@ In the mean time, some documentation are available at :func:`grid2op.Observation Graph5: the "flow bus graph" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -TODO: Work in progress, any help welcome +This graph is also represented by a matrix (numpy 2d array or scipy sparse matrix) of float. It is quite similar +to the graph described in :ref:`graph4-gg`. The main difference is that instead of simply giving +information about connectivity (0. or 1.) this one gives information about flows +(either active flows or reactive flows). + +It is a directed graph (matrix is not symmetric) and it has weights. The weight associated to each node +(representing a bus) is the power (in MW for active or MVAr for reactive) injected at this bus +(generator convention: if the power is positive the power is injected at this graph). The weight associated +at each edge going from `i` to `j` is the sum of the active (or reactive) power of all +the lines connecting bus `i` to bus `j`. + +It has a variable number of nodes and edges. In case of game over we chose to represent this graph as +an graph with 1 node and 0 edge. + +You can consult the documentation of the :func:`grid2op.Observation.BaseObservation.flow_bus_matrix` +for complement of information an some examples on how to retrieve this graph. + +It is a simplified version of the :ref:`graph1-gg` described previously. -In the mean time, some documentation are available at :func:`grid2op.Observation.BaseObservation.flow_bus_matrix` .. note:: diff --git a/docs/gym.rst b/docs/gym.rst index 06fe365f7..02e47d796 100644 --- a/docs/gym.rst +++ b/docs/gym.rst @@ -504,37 +504,7 @@ This is because grid2op will (to save computation time) generate some classes (t fly, once the environment is loaded. And unfortunately, pickle module is not always able to process these (meta) data. -Try to first create (automatically!) the files containing the description of the classes -used by your environment (for example): - -.. code-block:: python - - from grid2op import make - from grid2op.Reward import RedispReward - from lightsim2grid import LightSimBackend - - env_name = 'l2rpn_wcci_2022' - backend_class = LightSimBackend - env = make(env_name, reward_class=RedispReward, backend=backend_class()) - env.generate_classes() - -.. note:: - This piece of code is to do once (each time you change the backend or the env name) - -And then proceed as usual by loading the grid2op environment -with the key-word `experimental_read_from_local_dir` - -.. code-block:: python - - from grid2op import make - from grid2op.Reward import RedispReward - from lightsim2grid import LightSimBackend - - env_name = 'l2rpn_wcci_2022' - backend_class = LightSimBackend - env = make(env_name, reward_class=RedispReward, backend=backend_class(), - experimental_read_from_local_dir=True) - # do whatever +You can solve this issue by look at :ref:`troubleshoot_pickle` section of the documentation. Observation XXX outside given space YYY **************************************** @@ -560,4 +530,4 @@ Detailed Documentation by class :members: :autosummary: -.. include:: final.rst \ No newline at end of file +.. include:: final.rst diff --git a/docs/index.rst b/docs/index.rst index 751b37b11..46c6a4dd5 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,81 +6,82 @@ .. |episode_example| image:: ./img/grid2op_action.jpg =============================================== -Welcome to Grid2Op's technical documentation! +Welcome to Grid2Op's documentation =============================================== Grid2Op is a pythonic, easy to use framework, to be able to develop, train or evaluate performances of "agent" or -"controller" that acts on a powergrid in different ways. +"controller" that acts on a powergrid. -It is modular and can be use to train reinforcement learning agent or to assess the performance of optimal control -algorithm. +It is modular and can be use to train reinforcement learning agent or to assess the performance of any kind of +agent controlling powergrids (heuristic, machine learning, optimization, mix of everything etc.) -It is flexible and allows the power flow to be computed by the algorithm of your choice. It abstracts the modification -of a powergrid and use this abstraction to compute the **cascading failures** resulting from powerlines disconnection -for example. - -**Features** - - - abstract the computation of the "cascading failures" - - ability to have the same code running with multiple powerflows - - parallel execution of one agent / controller on multiple independent scenarios (multiprocessing) - - fully customisable: this software has been built to be fully customizable to serve different - purposes and not only reinforcement learning, or the L2RPN competition. +It is highly flexible and can be modified in different ways. Grid2Op philosophy -------------------- Grid2Op is a python module that aims to make easier the research on sequential decision making applied to power systems. -This package adopt the "reinforcement learning" point of view and is compatible with the openAI gym programming -interface (see section :ref:`openai-gym` for more information). +This package adopt the "sequential decision making" point of view, for example suited +for training and evaluation "reinforcement learning" agents. -Applied to power system, the "reinforcement learning" framework ask: +It is made of 4 main blocks: -- a "controller" (named Agent) to take an "action" on the powergrid (for example for L2RPN competitions in 2019 - and 2020 these actions consist in modifying the connectivity of the powergrid). -- the "environment" (*a.k.a* the "real world") applies that action on the powergrid, applies some other modifications - and return the next state. +- a module that will embed all the "external" / "exogenous" data, called the "time series" (formelly call "chronics"). + This module for example contain the variation of each and generators. +- a module that will compute the "powerflows", called "backend". It is important to note that grid2op itself + assumes nothing on the powergrid. And in theory you can use any solver that you want to compute the state of + the grid (static / steady state or dynamic / transient or DC modeling or AC modeling etc. with trafo being modled in `T` or in `Pi`) +- a module that "takes decision", called "action" on the grid based on the current grid state, called "observation" and + possible future forecasted grid states +- a module that wrap all the above together and implements a few other feature such as making sure provided actions are "legal" + (meet certain rules) or even emulating (if the module that compute the grid states does not do it) the behaviour of some + "protections". -The goal of grid2op is to model "sequential decision making" that could be made by human operators, for example -changing the configuration of some "substations" as demonstrate in the figure below: +The goal of grid2op is to model "sequential decision making" that could be made by +human operators, for example changing the configuration of some "substations" +as demonstrate in the figure below: |episode_example| -Any kind of "controller" can be implemented using this framework even though it has been inspired by the -"reinforcement learning" community. You can implement some heuristic "controllers" (some examples are available in the -:ref:`agent-module` module description), "controllers" that comes from the Optimization community -(for example "Optimal Power Flow") or -"Model Predictive Control". One of the goal of Grid2Op is to allow everyone to contribute to closing the gap -between all these research communities. +.. note:: + Any kind of "controller" can be implemented using this framework even though it has been inspired by the + "reinforcement learning" community. You can implement some heuristic "controllers" (some examples are available in the + :ref:`agent-module` module description), "controllers" that comes from the Optimization community + (for example "Optimal Power Flow") or + "Model Predictive Control". One of the goal of Grid2Op is to allow everyone to contribute to closing the gap + between all these research communities. + +.. note:: + Consecutive steps are "correlated" in the sense that the action taken + at time `t` is part of the process that defines the state observed at + step `t+1`. More information on this is given in the + :ref:`mdp-doc-module` for example. + Main module content --------------------- +This is where you can go if you want some quick introduction about grid2op +or overall view of what is happing when you "run" a scenario using in grid2op. + .. toctree:: - :maxdepth: 2 - :caption: Quickstart + :maxdepth: 1 + :caption: Overview quickstart grid2op -Environments ---------------- -.. toctree:: - :maxdepth: 2 - :caption: Focus on an "environment" - - available_envs - makeenv - env_content - create_an_environment - dive_into_time_series - data_pipeline - Usage examples --------------------- + +On this part of the documentation we focus on some usage of grid2op in different +context, for example using optimization or when "wrapping" grid2op into +a gymnsium compatible environment (with only subset of grid2op capabilities) +to ease training of reinforcement learning agents. + .. toctree:: - :maxdepth: 2 - :caption: Learn by Example + :maxdepth: 1 + :caption: Learn with examples optimization gym @@ -90,48 +91,144 @@ Usage examples Modeling ---------- +This part of the documentation focuses on the different +"model" in grid2op. You can find the formal definition +(or at least an attempt at such) for the "Markov Decision Process" +(a mathematical framework used to model sequential decisions making) and +the how the elements accessible in the observation or modifiable in +the action of the agent are represented. + +You can also find some discussion about the topology of the grid (one +of the focus of grid2op) and the representation of the grid as a +graph. + .. toctree:: - :maxdepth: 2 + :maxdepth: 1 :caption: Models + mdp modeled_elements grid_graph + topology + detailed_topology + +Environments +--------------- + +Here we try to explain rapidly how to load pre existing environment and how some +customization can make grid2op faster (depending on the context) + +.. toctree:: + :maxdepth: 1 + :caption: Focus on an "environment" + + available_envs + makeenv + dive_into_time_series + data_pipeline + troubleshoot Plotting capabilities ---------------------- +Some plotting capabilities of grid2op. + +.. warning:: + This has not been updated + for a long time and is maybe not up to date. + .. toctree:: - :maxdepth: 2 + :maxdepth: 1 :caption: Plot plot -Technical Documentation ----------------------------- +Technical documentation for grid2op users +------------------------------------------- + +This part of the documentation is dedicated to grid2op users. It +covers detailed description of all the modules, classes and their main method +that you, as a user, can use in grid2op. + +The documentation there is mainly descirptive. It explains what is done but +avoid (in general) getting in "too much gory details" on how these +things are done. + +As a starting point, we suggest you first look here before diving more +deeply into the other section of the documentation. .. toctree:: - :maxdepth: 2 - :caption: Technical Documentation - - action - agent - backend - chronics - converter - createbackend - environment - episode - exception - observation - opponent - parameters - reward - rules - runner - simulator - space - timeserie_handlers - utils - voltagecontroler + :maxdepth: 1 + :caption: Technical documentation for grid2op users + + user + + +Technical documentation for grid2op "external" contributions +---------------------------------------------------------------- + +This part of the documentation is focued on external contribution. +It is best suited if you want to use grid2op as a "core" and extend / modify +it with different elements. + +For example, you might want to : + +- use a different solver to compute powerflows + (called :class:`grid2op.Backend.Backend` in grid2op) +- create a new environment +- load time series from a different format than the grid2op default csv +- have an opponent that act differently than the provided ones +- evaluate the performance of the agent differently (change the reward / score function) +- use a different way to control the voltages +- etc. + +The main focuse of these pages of the documentation is put on the +interface and still avoid getting into too much detail on how things +are done internally whenever possible. + +This is the type of documentation you should be looking at if the +current grid2op modelling statisfies you in its vast majority +but if you want to slightly modify one of its component. + +.. note:: + This type of contribution can be developed and hosted in a different + github repository than grid2op (*eg* lightsim2grid, another faster backend + is hosted on https://github.com/bdonnot/lightsim2grid.git) + + Feel free to contact us if you have done such an "external contribution" so + that we can at least reference it in the documentation. + +.. toctree:: + :maxdepth: 1 + :caption: Technical documentation for grid2op "external" contributions + + developer + + +Technical documentation for grid2op developers +------------------------------------------------- + +This part of the documentation also focuses on external contribution. It +focuses on the core of grid2op. If you want to : + +- change the grid2op internal representation +- add a functionality to grid2op (*eg* a new type of actions or a new attribute to the observation) +- change the representatino of this or this elements +- etc. + +We encourage you to get in touch with us for such development. + +.. note:: + Most of the work falling into this category should probably be + integrated into the main grid2op repository. + +.. warning:: + DOC IN PROGRESS... + +.. toctree:: + :maxdepth: 1 + :caption: Technical documentation for grid2op developers + + grid2op_dev .. include:: final.rst diff --git a/docs/makeenv.rst b/docs/makeenv.rst index 8fb895cb4..55184f7a7 100644 --- a/docs/makeenv.rst +++ b/docs/makeenv.rst @@ -80,11 +80,13 @@ It has the following behavior: it will be used (see section :ref:`usage`) 2) if you specify the name of an environment that you have already downloaded, it will use this environment (NB currently no checks are implemented if the environment has been updated remotely, which can happen if - we realize there were some issues with it.) + we realize there were some issues with it.). If you want to update the environments you downloaded + please use :func:`grid2op.update_env()` 3) you are expected to provide an environment name (if you don't know what this is just put `"l2rpn_case14_sandbox"`) 4) if the flag `test` is set to ``False`` (default behaviour) and none of the above conditions are met, the :func:`make` will download the data of this environment locally the first time it is called. If you don't want - to download anything then you can pass the flag ``test=True`` + to download anything then you can pass the flag ``test=True`` (in this case only a small sample of + time series will be available. We don't recommend to do that at all !) 5) if ``test=True`` (NON default behaviour) nothing will be loaded, and the :func:`make` will attempt to use a pre defined environment provided with the python package. We want to emphasize that because the environments provided with this package contains only little data, they are not suitable for leaning a consistent agent / controler. That @@ -134,11 +136,16 @@ context of the L2RPN competition, we don't recommend to modify them. - `dataset_path`: used to specify the name (or the path) of the environment you want to load - `backend`: a initialized backend that will carry out the computation related to power system [mainly use if you want - to change from PandapowerBackend (default) to a different one *eg* LightSim2Grid) -- `reward_class`: change the type of reward you want to use for your agent -- `other_reward`: tell "env.step" to return addition "rewards" + to change from PandapowerBackend (default) to a different one *eg* LightSim2Grid] +- `reward_class`: change the type of reward you want to use for your agent (see section + :ref:`reward-module` for more information). +- `other_reward`: tell "env.step" to return addition "rewards"(see section + :ref:`reward-module` for more information). - `difficulty`, `param`: control the difficulty level of the game (might not always be available) -- `chronics_class`, `data_feeding_kwargs`: further customization to how the data will be generated +- `chronics_class`, `data_feeding_kwargs`: further customization to how the data will be generated, + see section :ref:`environment-module-data-pipeline` for more information +- `n_busbar`: (``int``, default 2) [new in version 1.9.9] see section :ref:`substation-mod-el` + for more information - \* `chronics_path`, `data_feeding`, : to overload default path for the data (**not recommended**) - \* `action_class`: which action class your agent is allowed to use (**not recommended**). - \* `gamerules_class`: the rules that are checked to declare an action legal / illegal (**not recommended**) diff --git a/docs/mdp.rst b/docs/mdp.rst new file mode 100644 index 000000000..c889287ed --- /dev/null +++ b/docs/mdp.rst @@ -0,0 +1,865 @@ +.. for the color +.. include:: special.rst + +.. for the observation attributes +.. include:: user/special.rst + +.. _mdp-doc-module: + +Dive into grid2op sequential decision process +=============================================== + +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + +Objectives +----------- + +The goal of this page of the documentation is to provide you with a relatively extensive description of the +mathematical model behind grid2op. + +Grid2op is a software whose aim is to make experiments on powergrid, mainly sequential decision making, +as easy as possible. + +We chose to model this sequential decision making probleme as a +"*Markov Decision Process*" (MDP) and one some cases +"*Partially Observable Markov Decision Process*" (POMDP) or +"*Constrainted Markov Decision Process*" (CMDP) and (work in progress) even +"*Decentralized (Partially Observable) Markov Decision Process*" (Dec-(PO)MDP). + +General notations +~~~~~~~~~~~~~~~~~~~~ + +There are different ways to define an MDP. In this paragraph we introduce the notations that we will use. + +In an MDP an "agent" / "automaton" / "algorithm" / "policy" takes some action :math:`a_t \in \mathcal{A}`. This +action is processed by the environment and update its internal state from :math:`s_t \in \mathcal{S}` +to :math:`s_{t+1} \in \mathcal{S}` and +computes a so-called *reward* :math:`r_{t+1} \in [0, 1]`. + +.. note:: + By stating the dynamic of the environment this way, we ensure the "*Markovian*" property: the + state :math:`s_{t+1}` is determined by the knowledge of the previous state :math:`s_{t}` and the + action :math:`a_{t}` + +This tuple +:math:`(s_t, r_t)` is then given to the "agent" / "automaton" / "algorithm" which in turns produce the action :math:`a_{t+1}` + +.. note:: + More formally even, everything written can be stochastic: + + - :math:`a_t \sim \pi_{\theta}(s_t)` where :math:`\pi_{\theta}(\cdot)` is the "policy" parametrized by + some parameters :math:`\theta` that outputs here a probability distribution (depending on the + state of the environment :math:`s_t`) over all the actions `\mathcal{A}` + - :math:`s_{t+1} \sim \mathcal{L}_S(s_t, a_t)` where :math:`\mathcal{L}_S(s_t, a_t)` is a probability distribution + over :math:`\mathcal{S}` representing the likelyhood if the "next state" given the current state and the action + of the "policy" + - :math:`r_{t+1} \sim \mathcal{L}_R(s_t, s_{t+1}, a_t)` is the reward function indicating "how good" + was the transition from :math:`s_{t}` to :math:`s_{t+1}` by taking action :math:`a_t` + + +This alternation :math:`\dots \to a \to (s, r) \to a \to \dots` is done for a certain number of "steps" called :math:`T`. + +We will call the list :math:`s_{1} \to a_1 \to (s_2, r_2) \to \dots \to a_{T-1} \to (s_{T}, r_T)` +an "**episode**". + +Formally the knowledge of: + +- :math:`\mathcal{S}`, the "state space" +- :math:`\mathcal{A}`, the "action space" +- :math:`\mathcal{L}_s(s, a)`, sometimes called "transition kernel", is the probability + distribution (over :math:`\mathcal{S}`) that gives the next + state after taking action :math:`a` in state :math:`s` +- :math:`\mathcal{L}_r(s, s', a)`, sometimes called "reward kernel", + is the probability distribution (over :math:`[0, 1]`) that gives + the reward :math:`r` after taking action :math:`a` in state :math:`s` which lead to state :math:`s'` +- :math:`T \in \mathbb{N}^*` the maximum number of steps for an episode + +Defines a MDP. We will detail all of them in the section :ref:`mdp-def` bellow. + +In grid2op, there is a special case where a grid state cannot be computed (either due to some physical infeasibilities +or because the resulting state would be irrealistic). This can be modeled relatively easily in the MDP formulation +above if we add a "terminal state" :math:`s_{\emptyset}` in the state space :math:`\mathcal{S}_{new} := \mathcal{S} \cup \left\{ s_{\emptyset} \right\}`: and add the transitions: +:math:`\mathcal{L}_s(s_{\emptyset}, a) = \text{Dirac}(s_{\emptyset}) \forall a \in \mathcal{A}` +stating that once the agent lands in this "terminal state" then the game is over, it stays there until the +end of the scenario. + +We can also define the reward kernel in this state, for example with +:math:`\mathcal{L}_r(s_{\emptyset}, s', a) = \text{Dirac}(0) \forall s' \in \mathcal{S}, a \in \mathcal{A}` and +:math:`\mathcal{L}_r(s, s_{\emptyset}, a) = \text{Dirac}(0) \forall s \in \mathcal{S}, a \in \mathcal{A}` which +states that there is nothing to be gained in being in this terminal set. + +Unless specified otherwise, we will not enter these details in the following explanation and take it as +"pre requisite" as it can be defined in general. We will focus on the definition of :math:`\mathcal{S}`, +:math:`\mathcal{A}`, :math:`\mathcal{L}_s(s, a)` and :math:`\mathcal{L}_r(s, s', a)` by leaving out the +"terminal state". + +.. note:: + In grid2op implementation, this "terminal state" is not directly implemented. Instead, the first Observation leading + to this state is marked as "done" (flag `obs.done` is set to `True`). + + No other "observation" will be given by + grid2op after an observation with `obs.done` set to `True` and the environment needs to be "reset". + + This is consistent with the gymnasium implementation. + +The main goal of a finite horizon MDP is then to find a policy :math:`\pi \in \Pi` that given states :math:`s` and reward :math:`r` +output an action :math:`a` such that (*NB* here :math:`\Pi` denotes the set of all considered policies for this +MDP): + +.. math:: + :nowrap: + + \begin{align*} + \min_{\pi \in \Pi} ~& \sum_{t=1}^T \mathbb{E} \left( r_t \right) \\ + \text{s.t.} ~ \\ + & \forall t, a_t \sim \pi (s_{t}) & \text{policy produces the action} \\ + & \forall t, s_{t+1} \sim \mathcal{L}_S(s_t, a_t) & \text{environment produces next state} \\ + & \forall t, r_{t+1} \sim \mathcal{L}_r(s_t, a_t, s_{t+1}) & \text{environment produces next reward} \\ + \end{align*} + +Specific notations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To define "the" MDP modeled by grid2op, we also need to define some other concepts that will be used to define the +state space :math:`\mathcal{S}` or transition kernel :math:`\mathcal{L}_s(s, a)` for example. + +A Simulator +++++++++++++ + +We need a so called "simulator". + +Informatically, this is represented by the `Backend` inside the grid2op environment (more information +about the `Backend` is detailed in the :ref:`backend-module` section of the documentation). + +This simulator is able to compute some informations that are part of the state +space :math:`\mathcal{S}` (*eg* flows on powerlines, active production value of generators etc.) +and thus are used in the computation of the transition kernel. + +We can model this simulator with a function :math:`\text{Sim}` that takes as input some data from an +"input space" :math:`\mathcal{S}_{\text{im}}^{(\text{in})}` and result +in data in :math:`\mathcal{S}_{\text{im}}^{(\text{out})}`. + +.. note:: + In grid2op we don't force the "shape" of :math:`\mathcal{S}_{\text{im}}^{(\text{in})}`, including + the format used to read the grid file from the hard drive, the solved equations, the way + these equations are used. Everything here is "free" and grid2op only needs that the simulator + (wrapped in a `Backend`) understands the "format" sent by grid2op (through a + :class:`grid2op.Action._backendAction._BackendAction`) and is able to expose + to grid2op some of its internal variables (accessed with the `***_infos()` methods of the backend) + + +TODO do I emphasize that the simulator also contains the grid iteself ? + +To make a parallel with similar concepts "simulator", +represents the physics as in all `"mujoco" environments `_ +*eg* `Ant `_ or +`Inverted Pendulum `_ . This is the same concept +here excepts that it solves powerflows. + +Some Time Series ++++++++++++++++++ + +Another type of data that we need to define "the" grid2op MDP is the "time series", implemented in the `chronics` +grid2op module documented on the page +:ref:`time-series-module` with some complements given in the :ref:`doc_timeseries` page as well. + +These time series define what exactly would happen if the grid was a +"copper plate" without any constraints. Said differently it provides what would each consumer +consume and what would each producer produce if they could all be connected together with +infinite "bandwith", without any constraints on the powerline etc. + +In particular, grid2op supposes that these "time series" are balanced, in the sense that the producers +produce just the right amount (electrical power cannot really be stocked) for the consumer to consume +and that for each steps. It also supposes that all the "constraints" of the producers. + +These time series are typically generated outside of grid2op, for example using `chronix2grid `_ +python package (or anything else). + + +Formally, we will define these time series as input :math:`\mathcal{X}_t` all these time series at time :math:`t`. These +exogenous data consist of : + +- generator active production (in MW), for each generator +- load active power consumption (in MW), for each loads +- load reactive consumption (in MVAr), for each loads +- \* generator voltage setpoint / target (in kV) + +.. note:: + \* for this last part, this can be adapted "on demand" by the environment through the `voltage controler` module. + But for the sake of modeling, this can be modeled as being external / exogenous data. + +And, to make a parrallel with similar concept in other RL environment, these "time series" can represent the layout of the maze +in pacman, the positions of the platforms in "mario-like" 2d games, the different turns and the width of the route in a car game etc. +This is the "base" of the levels in most games. + +Finally, for most released environment, a lof of different :math:`\mathcal{X}` are available. By default, each time the +environment is "reset" (the user want to move to the next scenario), a new :math:`\mathcal{X}` is used (this behaviour +can be changed, more information on the section :ref:`environment-module-chronics-info` of the documentation). + +.. _mdp-def: + +Modeling sequential decisions +------------------------------- + +As we said in introduction of this page, we will model a given scenario in grid2op. We have at our disposal: + +- a simulator, which is represented as a function :math:`\text{Sim} : \mathcal{S}_{\text{im}}^{(\text{in})} \to \mathcal{S}_{\text{im}}^{(\text{out})}` +- some time series :math:`\mathcal{X} = \left\{ \mathcal{X}_t \right\}_{1 \leq t \leq T}` + +In order to define the MDP we need to define: + +- :math:`\mathcal{S}`, the "state space" +- :math:`\mathcal{A}`, the "action space" +- :math:`\mathcal{L}_s(s, a)`, sometimes called "transition kernel", is the probability + distribution (over :math:`\mathcal{S}`) that gives the next + state after taking action :math:`a` in state :math:`s` +- :math:`\mathcal{L}_r(s, s', a)`, sometimes called "reward kernel", + is the probability distribution (over :math:`[0, 1]`) that gives + the reward :math:`r` after taking action :math:`a` in state :math:`s` which lead to state :math:`s'` + +We will do that for a single episode (all episodes follow the same process) + +Precisions +~~~~~~~~~~~ + +To make the reading of this MDP easier, for this section of the documentation, +we adopted the following convention: + +- text in :green:`green` will refer to elements that are read directly from the grid + by the simulator :math:`\text{Sim}` at the creation of the environment. +- text in :orange:`orange` will refer to elements that are related to time series :math:`\mathcal{X}` +- text in :blue:`blue` will refer to elements that can be + be informatically modified by the user at the creation of the environment. + +In the pure definition of the MDP all text in :green:`green`, :orange:`orange` or +:blue:`blue` are exogenous and constant: once the episode starts they cannot be changed +by anything (including the agent). + +We differenciate between these 3 types of "variables" only to clarify what can be modified +by "who": + +- :green:`green` variables depend only on the controlled powergrid +- :orange:`orange` variables depend only time series +- :blue:`blue` variables depend only on the way the environment is loaded + +.. note:: + Not all these variables are independant though. If there are for example 3 loads + on the grid, then you need to use time series that somehow can generate + 3 values at each step for load active values and 3 values at each step for load + reactive values. So the dimension of the :orange:`orange` variables is somehow + related to dimension of :green:`green` variables : you cannot use the + time series you want on the grid you want. + +Structural informations +~~~~~~~~~~~~~~~~~~~~~~~~ + +To define mathematically the MPD we need first to define some notations about the grid manipulated in +this episode. + +We suppose that the structure of the grid does not change during the episode, with: + +- :green:`n_line` being the number of "powerlines" (and transformers) which are elements that allow the + power flows to actually move from one place to another +- :green:`n_gen` being the number of generators, which are elements that produces the power +- :green:`n_load` being the number of consumers, which are elements that consume the power (typically a city or a + large industrial plant manufacturing) +- :green:`n_storage` being the number of storage units on the grid, which are elements that allow to + convert the power into a form of energy that can be stored (*eg* chemical) + +All these elements (side of powerlines, generators, loads and storage units) +are connected together at so called "substation". The grid counts :green:`n_sub` such substations. +We will call :green:`dim_topo := 2 \times n_line + n_gen + n_load + n_storage` the total number +of elements in the grid. + +.. note:: + This "substation" concept only means that if two elements does not belong to the same substations, they cannot + be directly connected at the same "node" of the graph. + + They can be connected in the same "connex component" of the graph (meaning that there are edges that + can connect them) but they cannot be part of the same "node" + +Each substation can be divided into :blue:`n_busbar_per_sub` (was only `2` in grid2op <= 1.9.8 and can be +any integer > 0 in grid2op version >= 1.9.9). + +This :blue:`n_busbar_per_sub` parameters tell the maximum number of independant nodes their can be in a given substation. +So to count the total maximum number of nodes in the grid, you can do +:math:`\text{n\_busbar\_per\_sub} \times \text{n\_sub}` + +When the grid is loaded, the backend also informs the environment about the :green:`***_to_subid` vectors +(*eg* :green:`gen_to_subid`) +which give, for each element to which substation they are connected. This is how the "constraint" of + +.. note:: + **Definition** + + With these notations, two elements are connected together if (and only if, that's a + definition after all): + + - they belong to the same substation + - they are connected to the same busbar + + In this case, we can also say that these two elements are connected to the same "bus". + + These "buses" are the "nodes" in "the" graph you thought about when looking at a powergrid. + +.. note:: + **Definition** ("disconnected bus"): A bus is said to be disconnected if there are no elements connected to it. + +.. note:: + **Definition** ("disconnected element"): An element (side of powerlines, generators, loads or storage units) + is said to be disconnected if it is not connected to anything. + +Extra references: ++++++++++++++++++ + +You can modify :blue:`n_busbar_per_sub` in the `grid2op.make` function. For example, +by default if you call `grid2op.make("l2rpn_case14_sandbox")` you will have :blue:`n_busbar_per_sub = 2` +but if you call `grid2op.make("l2rpn_case14_sandbox", n_busbar=3)` you will have +:blue:`n_busbar_per_sub = 3` see :ref:`substation-mod-el` for more information. + +:green:`n_line`, :green:`n_gen`, :green:`n_load`, :green:`n_storage` and :green:`n_sub` depends on the environment +you loaded when calling `grid2op.make`, for example calling `grid2op.make("l2rpn_case14_sandbox")` +will lead to environment +with :green:`n_line = 20`, :green:`n_gen = 6`, :green:`n_load = 11` and :green:`n_storage = 0`. + +Other informations +~~~~~~~~~~~~~~~~~~~~~~~~ + +When loading the environment, there are also some other static data that are loaded which includes: + +- :green:`min_storage_p` and :green:`max_storage_p`: the minimum power that can be injected by + each storage units (typically :green:`min_storage_p` :math:`< 0`). These are vectors + (of real numbers) of size :green:`n_storage` +- :green:`is_gen_renewable`: a vector of `True` / `False` indicating for each generator whether + it comes from new renewable (and intermittent) renewable energy sources (*eg* solar or wind) +- :green:`is_gen_controlable`: a vector of `True` / `False` indicating for each generator + whether it can be controlled by the agent to produce both more or less power + at any given step. This is usually the case for generator which uses + as primary energy coal, gaz, nuclear or water (hyrdo powerplant) +- :green:`min_ramp` and :green:`max_ramp`: are two vector giving the maximum amount + of power each generator can be adjusted to produce more / less. Typically, + :green:`min_ramp = max_ramp = 0` for non controlable generators. + +.. note:: + These elements are marked :green:`green` because they are loaded by the backend, but strictly speaking + they can be specified in other files than the one representing the powergrid. + +Action space +~~~~~~~~~~~~~ + +At time of writing, grid2op support different type of actions: + +- :blue:`change_line_status`: that will change the line status (if it is disconnected + this action will attempt to connect it). It leaves in :math:`\left\{0,1\right\}^{\text{n\_line}}` +- :blue:`set_line_status`: that will set the line status to a + particular state regardless of the previous state (+1 to attempt a force + reconnection on the powerline and -1 to attempt a force disconnection). + There is also a special case where the agent do not want to modify a given line and + it can then output "0" + It leaves in :math:`\left\{-1, 0, 1\right\}^{\text{n\_line}}` +- \* :blue:`change_bus`: that will, for each element of the grid change the busbars + to which it is connected (*eg* if it was connected on busbar 1 it will attempt to connect it on + busbar 2). This leaves in :math:`\left\{0,1\right\}^{\text{dim\_topo}}` +- :blue:`set_bus`: that will, for each element control on which busbars you want to assign it + to (1, 2, ..., :blue:`n_busbar_per_sub`). To which has been added 2 special cases -1 means "disconnect" this element + and 0 means "I don't want to affect" this element. This part of the action space then leaves + in :math:`\left\{-1, 0, 1, 2, ..., \text{n\_busbar\_per\_sub} \right\}^{\text{dim\_topo}}` +- :blue:`storage_p`: for each storage, the agent can chose the setpoint / target power for + each storage units. It leaves in + :math:`[\text{min\_storage\_p}, \text{max\_storage\_p}] \subset \mathbb{R}^{\text{n\_storage}}` +- :blue:`curtail`: corresponds to the action where the agent ask a generator (using renewable energy sources) + to produce less than what would be possible given the current weather. This type of action can + only be performed on renewable generators. It leaves in :math:`[0, 1]^{\text{n\_gen}}` + (to avoid getting the notations even more complex, we won't define exactly the space of this + action. Indeed, writing :math:`[0, 1]^{\text{n\_gen}}` is not entirely true as a non renewable generator + will not be affected by this type of action) +- :blue:`redisp`: corresponds to the action where the agent is able to modify (to increase or decrease) + the generator output values (asking at the some producers to produce more and at some + to produce less). It leaves in :math:`[\text{min\_ramp}, \text{max\_ramp}] \subset \mathbb{R}^{\text{n\_gen}}` + (remember that for non controlable generators, by definition we suppose that :green:`min_ramp = max_ramp = 0`) + +.. note:: + The :blue:`change_bus` is only available in environment where :blue:`n_busbar_per_sub = 2` + otherwise this would not make sense. The action space does not include this + type of actions if :blue:`n_busbar_per_sub != 2` + +You might have noticed that every type of actions is written in :blue:`blue`. This is because +the action space can be defined at the creation of the environment, by specifying in +the call to `grid2op.make` the `action_class` to be used. + +Let's call :math:`1_{\text{change\_line\_status}}` either :math:`\left\{0,1\right\}^{\text{n\_line}}` +(corresponding to the definition of the :blue:`change_line_status` briefly described above) if the +:blue:`change_line_status` has been selected by the user (for the entire scenario) or the +:math:`\emptyset` otherwise (and we do similarly for all other type of actions of course: for example: +:math:`1_{redisp} \in \left\{[\text{min\_ramp}, \text{max\_ramp}], \emptyset\right\}`) + +Formally then, the action space can then be defined as: + +.. math:: + :nowrap: + + \begin{align*} + \mathcal{A}\text{space\_type} =&\left\{\text{change\_line\_status}, \text{set\_line\_status}, \right. \\ + &~\left.\text{change\_bus}, \text{set\_bus}, \right.\\ + &~\left.\text{storage\_p}, \text{curtail}, \text{redisp} \right\} \\ + \mathcal{A} =&\Pi_{\text{a\_type} \in \mathcal{A}\text{space\_type} } 1_{\text{a\_type}}\\ + \end{align*} + +.. note:: + In the grid2op documentation, the words "topological modification" are often used. + When that is the case, unless told otherwise it means + :blue:`set_bus` or :blue:`change_bus` type of actions. + + +Extra references: ++++++++++++++++++ + +Informatically, the :math:`1_{\text{change\_line\_status}}` can be define at the +call to `grid2op.make` when the environment is created (and cannot be changed afterwards). + +For example, if the user build the environment like this : + +.. code-block:: python + + import grid2op + from grid2op.Action import PlayableAction + env_name = ... # whatever, eg "l2rpn_case14_sandbox" + env = grid2op.make(env_name, action_class=PlayableAction) + +Then all type of actions are selected and : + +.. math:: + :nowrap: + + \begin{align*} + \mathcal{A} =& \left\{0,1\right\}^{\text{n\_line}}~ \times & \text{change\_line\_status} \\ + & \left\{-1, 0, 1\right\}^{\text{n\_line}}~ \times & \text{set\_line\_status} \\ + & \left\{0,1\right\}^{\text{dim\_topo}}~ \times & \text{change\_bus} \\ + & \left\{-1, 0, 1, 2, ..., \text{n\_busbar\_per\_sub} \right\}^{\text{dim\_topo}}~ \times & \text{set\_bus} \\ + & ~[\text{min\_storage\_p}, \text{max\_storage\_p}]~ \times & \text{storage\_p} \\ + & ~[0, 1]^{\text{n\_gen}} \times & \text{curtail} \\ + & ~[\text{min\_ramp}, \text{max\_ramp}] & \text{redisp} + \end{align*} + +You can also build the same environment like this: + +.. code-block:: python + + import grid2op + from grid2op.Action import TopologySetAction + same_env_name = ... # whatever, eg "l2rpn_case14_sandbox" + env = grid2op.make(same_env_name, action_class=TopologySetAction) + +Which will lead the following action space, because the user ask to +use only "topological actions" (including line status) with only the +"set" way of modifying them. + +.. math:: + :nowrap: + + \begin{align*} + \mathcal{A} =& \left\{-1, 0, 1\right\}^{\text{n\_line}}~ \times & \text{set\_line\_status} \\ + & \left\{-1, 0, 1, 2, ..., \text{n\_busbar\_per\_sub} \right\}^{\text{dim\_topo}}~ & \text{set\_bus} \\ + \end{align*} + +The page :ref:`action-module` of the documentation provides you with all types of +actions you you can use in grid2op. + +.. note:: + If you use a compatibility with the popular gymnasium (previously gym) + you can also specify the action space with the "`attr_to_keep`" + key-word argument. + +.. _mdp-state-space-def: + +State space +~~~~~~~~~~~~~ + +By default in grid2op, the state space shown to the agent (the so called +"observation"). In this part of the documentation, we will described something +slightly different which is the "state space" of the MDP. + +The main difference is that this "state space" will include future data about the +environment (*eg* the :math:`\mathcal{X}` matrix). You can refer to +section :ref:`pomdp` or :ref:`non-pomdp` of this page of the documentation. + +.. note:: + We found it easier to show the MDP without the introduction of the + "observation kernel", so keep in mind that this paragraph is not + representative of the observation in grid2op but is "purely + theoretical". + +The state space is defined by different type of attributes and we will not list +them all here (you can find a detailed list of everything available to the +agent in the :ref:`observation_module` page of the documentation.) The +"state space" is then made of: + +- some part of the outcome of the solver: + :math:`S_{\text{grid}} \subset \mathcal{S}_{\text{im}}^{(\text{out})}`, this + includes but is not limited to the loads active values `load_p`_, + loads reactive values `load_q`_, voltage magnitude + at each loads `load_v`_, the same kind of attributes but for generators + `gen_p`_, `gen_q`_, `gen_v`_, `gen_theta`_ and also for powerlines + `p_or`_, `q_or`_, `v_or`_, `a_or`_, `theta_or`_, `p_ex`_, `q_ex`_, `v_ex`_, + `a_ex`_, `theta_ex`_, `rho`_ etc. +- some attributes related to "redispatching" (which is a type of actions) that is + computed by the environment (see :ref:`mdp-transition-kernel-def` for more information) + which includes `target_dispatch`_ and `actual_dispatch`_ or the curtailment + `gen_p_before_curtail`_, `curtailment_mw`_, `curtailment`_ or `curtailment_limit`_ +- some attributes related to "storage units", for example `storage_charge`_ , + `storage_power_target`_, `storage_power`_ or `storage_theta`_ +- some related to "date" and "time", `year`_, `month`_, `day`_, `hour_of_day`_, + `minute_of_hour`_, `day_of_week`_, `current_step`_, `max_step`_, `delta_time`_ +- finally some related to the :blue:`rules of the game` like + `timestep_overflow`_, `time_before_cooldown_line`_ or `time_before_cooldown_sub`_ + +And, to make it "Markovian" we also need to include : + +- the (constant) values of :math:`\mathcal{S}_{\text{im}}^{(\text{in})}` that + are not "part of" :math:`\mathcal{X}` (more information about that in + the paragraph ":ref:`mdp-call-simulator-step`" of this documentation). + This might include some physical + parameters of some elements of the grid (like transformers or powerlines) or + some other parameters of the solver controlling either the equations to be + solved or the solver to use etc. \* +- the complete matrix :math:`\mathcal{X}` which include the exact knowledge of + past, present **and future** loads and generation for the entire scenario (which + is not possible in practice). The matrix itself is constant. +- the index representing at which "step" of the matrix :math:`\mathcal{X}` the + current data are being used by the environment. + +.. note:: + \* grid2op is build to be "simulator agnostic" so all this part of the "state space" + is not easily accessible through the grid2op API. To access (or to modify) them + you need to be aware of the implementation of the :class:`grid2op.Backend.Backend` + you are using. + +.. note:: + In this modeling, by design, the agent sees everything that will happen in the + future, without uncertainties. To make a parrallel with a "maze" environment, + the agent would see the full maze and its position at each step. + + This is of course not fully representative of the daily powergrid operations, + where the operators cannot see exactly the future. To make this modeling + closer to the reality, you can refer to the paragphs :ref:`pomdp` and :ref:`non-pomdp` + below. + +.. _mdp-transition-kernel-def: + +Transition Kernel +~~~~~~~~~~~~~~~~~~~ + +In this subsection we will describe the so called transition kernel, this is the function that given a +state :math:`s` and an action :math:`a` gives a probability distribution over all possible next state +:math:`s' \in \mathcal{S}`. + +In this subsection, we chose to model this transition kernel as a deterministic +function (which is equivalent to saying that the probability distribution overs :math:`\mathcal{S}` is +a Dirac distribution). + +.. note:: + The removal of the :math:`\mathcal{X}` matrix in the "observation space" see section :ref:`pomdp` or the + rewriting of the MDP to say in the "fully observable setting" (see section :ref:`non-pomdp`) or the + introduction of the "opponent" described in section :ref:`mdp-opponent` are all things that "makes" this + "transition kernel" probabilistic. We chose the simplicity in presenting it in a fully deterministic + fashion. + +So let's write what the next state is given the current state :math:`s \in \mathcal{S}` and the action of +the agent :math:`a \in \mathcal{A}`. To do that we split the computation in different steps explained bellow. + +.. note:: + To be exhaustive, if the actual state is :math:`s = s_{\emptyset}` then the :math:`s' = s_{\emptyset}` is + returned regardless of the action and the steps described below are skipped. + +If the end of the episode is reached then :math:`s' = s_{\emptyset}` is returned. + +Step 1: legal vs illegal ++++++++++++++++++++++++++ + +The first step is to check if the action is :blue:`legal` or not. This depends on the :blue:`rules` (see the +dedicated page :ref:`rule-module` of the documentation) and the :blue:`parameters` (more information at the page +:ref:`parameters-module` of the documentation). There are basically two cases: + +#. the action :math:`a` is legal: then proceed to next step +#. the action :math:`a` is not, then replace the action by `do nothing`, an action that does not + affect anything and proceed to next step + +.. _mdp-read-x-values: + +Step 2: load next environment values ++++++++++++++++++++++++++++++++++++++ + +This is also rather straightforward, the current index is updated (+1 is added) and this +new index is used to find the "optimal" (from a market or a central authority perspective) +value each producer produce to satisfy the demand mof each consumers (in this case large cities or +companies). These informations are stored in the :math:`\mathcal{X}` matrix. + +.. _mdp-redispatching-step: + +Step 3: Compute the generators setpoints and handle storage units +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +The next step of the environment is to handle the "continuous" part of the action (*eg* "storage_p", +"curtail" or "redisp") and to make sure a suitable setpoint can be reached for each generators (you +can refer to the pages :ref:`storage-mod-el` and :ref:`generator-mod-el` of this documentation +for more information). + +There are two alternatives: + +#. either the physical constraints cannot be met (there exist no feasible solutions + for at least one generator), and in this case the next state is the + terminal state :math:`s_{\emptyset}` (ignore all the steps bellow) +#. or they can be met. In this case the "target generator values" is computed as well + as the "target storage unit values" + +.. note:: + There is a parameters called :blue:`LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION` that will + try to avoid, as best as possible to fall into infeasibile solution. It does so by limiting + the amount of power that is curtailed or injected in the grid from the storage units: it + modifies the actions :math:`a`. + +.. _mdp-call-simulator-step: + +Step 4: Call the simulator ++++++++++++++++++++++++++++++++ + +At this stage then (assuming the physical constraints can be met), the setpoint for the following variables +is known: + +- the status of the lines is deduced from the "change_line_status" and "set_line_status" and their + status in :math:`s` (the current state). If there are maintenance (or attacks, see section + :ref:`mdp-opponent`) they can also disconnect powerlines. +- the busbar to which each elements is connected is also decuced from the "change_bus" and + "set_bus" part of the action +- the consumption active and reactive values have been computed from the :math:`\mathcal{X}` + values at previous step +- the generator active values have just been computed after taking into account the redispatching, + curtailement and storage (at this step) +- the voltage setpoint for each generators is either read from :math:`\mathcal{X}` or + deduced from the above data by the "voltage controler" (more information on :ref:`voltage-controler-module`) + +All this should be part of the input solver data :math:`\mathcal{S}_{\text{im}}^{(\text{in})}`. If not, then the +solver cannot be used unfortunately... + +With that (and the other data used by the solver and included in the space, see paragraph +:ref:`mdp-state-space-def` of this documentation), the necessary data is shaped (by the Backend) into +a valid :math:`s_{\text{im}}^{(\text{in})} \in \mathcal{S}_{\text{im}}^{(\text{in})}`. + +The solver is then called and there are 2 alternatives (again): + +#. either the solver cannot find a feasible solution (it "diverges"), and in this case the next state is the + terminal state :math:`s_{\emptyset}` (ignore all the steps bellow) +#. or a physical solution is found and the process carries out in the next steps + +.. _mdp-protection-emulation-step: + +Step 5: Emulation of the "protections" +++++++++++++++++++++++++++++++++++++++++++ + +At this stage an object :math:`s_{\text{im}}^{(\text{out})} \in \mathcal{S}_{\text{im}}^{(\text{out})}` +has been computed by the solver. + +The first step performed by grid2op is to look at the flows (in Amps) on the powerlines (these data +are part of :math:`s_{\text{im}}^{(\text{out})}`) and to check whether they meet some constraints +defined in the :blue:`parameters` (mainly if for some powerline the flow is too high, or if it has been +too high for too long, see :blue:`HARD_OVERFLOW_THRESHOLD`, :blue:`NB_TIMESTEP_OVERFLOW_ALLOWED` and +:blue:`NO_OVERFLOW_DISCONNECTION`). If some powerlines are disconnected at this step, then the +"setpoint" send to the backend at the previous step is modified and it goes back +to :ref:`mdp-call-simulator-step`. + +.. note:: + The simulator can already handle a real simulation of these "protections". This "outer loop" + is because some simulators does not do it. + +.. note:: + For the purist, this "outer loop" necessarily terminates. It is trigger when at least one + powerline needs to be disconnected. And there are :green:`n_line` (finite) powerlines. + +Step 6: Reading back the "grid dependant" attributes +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +At this stage an object :math:`s_{\text{im}}^{(\text{out})} \in \mathcal{S}_{\text{im}}^{(\text{out})}` +has been computed by the solver and all the "rules" / "parameters" regarding powerlines +are met. + +As discussed in the section about "state space" (see :ref:`mdp-state-space-def` for more information), +the next state space :math:`s'` include some part of the outcome of the solver. These data +are then read from the :math:`s_{\text{im}}^{(\text{out})}`, which +includes but is not limited to the loads active values `load_p`_, +loads reactive values `load_q`_, voltage magnitude +at each loads `load_v`_, the same kind of attributes but for generators +`gen_p`_, `gen_q`_, `gen_v`_, `gen_theta`_ and also for powerlines +`p_or`_, `q_or`_, `v_or`_, `a_or`_, `theta_or`_, `p_ex`_, `q_ex`_, `v_ex`_, +`a_ex`_, `theta_ex`_, `rho`_ etc. + + +Step 7: update the other attributes of the state space ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Finally, the environment takes care of updating all the other "part" +of the state space, which are: + +- attributes related to "redispatching" are updated at in paragraph :ref:`mdp-redispatching-step` +- and so are attributes related to storage units +- the information about the date and time are loaded from the :math:`\mathcal{X}` matrix. + +As for the attributes related to the rules of the game, they are updated in the following way: + +- `timestep_overflow`_ is set to 0 for all powerlines not in overflow and increased by 1 for all the other +- `time_before_cooldown_line`_ is reduced by 1 for all line that has not been impacted by the action :math:`a` + otherwise set to :blue:`param.NB_TIMESTEP_COOLDOWN_LINE` +- `time_before_cooldown_sub`_ is reduced by 1 for all substations that has not been impacted by the action :math:`a` + otherwise set to :blue:`param.NB_TIMESTEP_COOLDOWN_SUB` + +The new state :math:`s'` is then passed to the agent. + +.. note:: + We remind that this process might have terminated before reaching the last step described above, for example + at :ref:`mdp-redispatching-step` or at :ref:`mdp-call-simulator-step` or during the + emulation of the protections described at :ref:`mdp-protection-emulation-step` + +Reward Kernel +~~~~~~~~~~~~~~~~~~~ + +And to finish this (rather long) description of grid2op's MDP we need to mention the +"reward kernel". + +This "kernel" computes the reward associated to taking the action :math:`a` in step +:math:`s` that lead to step :math:`s'`. In most cases, the +reward in grid2op is a deterministic function and depends only on the grid state. + +In grid2op, every environment comes with a pre-defined :blue:`reward function` that +can be fully customized by the user when the environment is created or +even afterwards (but is still constant during an entire episode of course). + +For more information, you might want to have a look at the :ref:`reward-module` page +of this documentation. + +Extensions +----------- + +In this last section of this page of the documentation, we dive more onto some aspect of the grid2op MDP. + +.. note:: + TODO: This part of the section is still an ongoing work. + + Let us know if you want to contribute ! + + +.. _pomdp: + +Partial Observatibility +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This is the case in most grid2op environments: only some part of the environment +state at time `t` :math:`s_t` are +given to the agent in the observation at time `t` :math:`o_t`. + +Mathematically this can be modeled with the introduction of an "observation space" and an +"observation kernel". This kernel will only expose part of the "state space" to the agent and +(in grid2op) is a deterministic function that depends on the environment state :math:`s'`. + +More specifically, in most grid2op environment (by default at least), none of the +physical parameters of the solvers are provided. Also, to represent better +the daily operation in power systems, only the `t` th row of the matrix :math:`\mathcal{X}_t` +is given in the observation :math:`o_t`. The components :math:`\mathcal{X}_{t', i}` +(for :math:`\forall t' > t`) are not given. The observation kernel in grid2op will +mask out some part of the "environment state" to the agent. + +.. _non-pomdp: + +Or not partial observatibility ? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If we consider that the agent is aware of the simulator used and all it's "constant" (see +paragraph :ref:`mdp-state-space-def`) part of :math:`\mathcal{S}_{\text{im}}^{(\text{in})}` +(which are part of the simulator that are not affected by the actions of +the agent nor by environment) then we can model the grid2op MDP without the need +to use an observation kernel: it can be a regular MDP. + +To "remove" the need of partial observatibility, without the need to suppose that the +agent sees all the future we can adapt slightly the modeling which allows us to +remove completely the :math:`\mathcal{X}` matrix : + +- the observation space / state space (which are equal in this setting) are the same as the + one used in :ref:`pomdp` +- the transition kernel is now stochastic. Indeed, the "next" value of the loads and generators + are, in this modeling not read from a :math:`\mathcal{X}` matrix but sampled from a given + distribution which replaces the step :ref:`mdp-read-x-values` of subsection + :ref:`mdp-transition-kernel-def`. And once the values of these variables are sampled, + the rest of the steps described there are unchanged. + +.. note:: + The above holds as long as there exist a way to sample new values for gen_p, load_p, gen_v and + load_q that is markovian. We suppose it exists here and will not write it down. + +.. note:: + Sampling from these distribution can be quite challenging and will not be covered here. + + One of the challenging part is that the sampled generations need to meet the demand (and + the losses) as well as all the constraints on the generators (p_min, p_max and ramps) + +.. _mdp-opponent: + +Adversarial attacks +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +TODO: explain the model of the environment + +Forecast and simulation on future states +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +TODO : explain the model the forecast and the fact that the "observation" also +includes a model of the world that can be different from the grid of the environment + +Simulator dynamics can be more complex +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +TODO, Backend does not need to "exactly map the simulator" there are +some examples below: + +Hide elements from the grid2op environment +++++++++++++++++++++++++++++++++++++++++++ + +TODO only a part of the grid would be "exposed" in the +grid2op environment. + + +Contain elements not modeled by grid2op +++++++++++++++++++++++++++++++++++++++++++ + +TODO: speak about HVDC or "pq" generators, or 3 winding transformers + +Contain embeded controls +++++++++++++++++++++++++++++++++++++++++++ + +TODO for example automatic setpoint for HVDC or limit on Q for generators + +Time domain simulation ++++++++++++++++++++++++ + +TODO: we can plug in simulator that solves more +accurate description of the grid and only "subsample" +(*eg* at a frequency of every 5 mins) provide grid2op +with some information. + +Handle the topology differently +++++++++++++++++++++++++++++++++++ + +Backend can operate switches, only requirement from grid2op is to map the topology +to switches. + +Some constraints +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +TODO + +Operator attention: alarm and alter +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +TODO + +.. include:: final.rst diff --git a/docs/model_based.rst b/docs/model_based.rst index 54f4c6f6e..5bd373985 100644 --- a/docs/model_based.rst +++ b/docs/model_based.rst @@ -3,6 +3,15 @@ Model Based / Planning methods ==================================== + +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + +Objectives +---------------- + .. warning:: This page is in progress. We welcome any contribution :-) @@ -369,3 +378,5 @@ And for the `ExampleAgent2`: res = strat[0] # action is the first one of the best strategy highest_score = ts_survived return res + +.. include:: final.rst diff --git a/docs/model_free.rst b/docs/model_free.rst index db326736f..94f8f7458 100644 --- a/docs/model_free.rst +++ b/docs/model_free.rst @@ -17,3 +17,5 @@ Some examples are given in "l2rpn-baselines": - `PPO with RLLIB `_ - `PPO with stable-baselines3 `_ + +.. include:: final.rst diff --git a/docs/modeled_elements.rst b/docs/modeled_elements.rst index 634548455..9dc4509d3 100644 --- a/docs/modeled_elements.rst +++ b/docs/modeled_elements.rst @@ -1034,7 +1034,8 @@ Substations Description ~~~~~~~~~~~~~~~~~~ -A "substation" is a place where "elements" (side of a powerline, a load, a generator or +A "substation" is a place (that exists, you can touch it) +where "elements" (side of a powerline, a load, a generator or a storage unit) belonging to the powergrid are connected all together. Substations are connected to other substation with powerlines (this is why powerline have two "sides": one for @@ -1042,11 +1043,39 @@ each substation they are connecting). In most powergrid around the world, substations are made of multiple "busbars". In grid2op we supposes that every "elements" connected to a substation can be connected to every busbars in the substation. This is mainly -done for simplicity, for real powergrid it might not be the case. We also, for simplicity, assume that -each substations counts exactly 2 distincts busbars. +done for simplicity, for real powergrid it might not be the case. -At the initial step, for all environment available at the time of writing (february 2021) every objects -were connected to the busbar 1 of their substation. This is not a requirement of grid2op, but it was the case +In earlier grid2op versions, we also assumed that, for simplicity, +each substations counts exactly 2 distincts busbars. Starting from grid2op 1.9.9, it is possible +when you create an environment, to specify how many busbars are available in each substation. You can +customize it with: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + + env_2_busbars = grid2op.make(env_name) # default + env_2_busbars_bis = grid2op.make(env_name, n_busbar=2) # same as above + + # one busbar + env_1_busbar = grid2op.make(env_name, n_busbar=1) + #NB: topological action on substation (set_bus, change_bus) are not possible in this case ! + + # 3 busbars + env_3_busbars = grid2op.make(env_name, n_busbar=3) + #NB: "change_bus" type of actions are not possible (it would be ambiguous - non unique- + # on which busbar you want to change them) + + # 10 busbars + env_10_busbars = grid2op.make(env_name, n_busbar=10) + #NB: "change_bus" type of actions are not possible (it would be ambiguous - non unique- + # on which busbar you want to change them) + + +At the initial step (right after `env.reset()`), for all environment available +at the time of writing (february 2021) every objects were connected to the busbar 1 +of their substation. This is not a requirement of grid2op, but it was the case for every environments created. .. _topology-pb-explained: diff --git a/docs/optimization.rst b/docs/optimization.rst index 24a58e304..ba9407a8e 100644 --- a/docs/optimization.rst +++ b/docs/optimization.rst @@ -19,3 +19,5 @@ Basically an "optimizer" agent looks like (from a very high level): 3) update the "formulation" using the observation received 4) run a solver to solve the "problem" 5) convert back the "decisions" (output) of the solver into a "grid2op" action + +.. include:: final.rst diff --git a/docs/quickstart.rst b/docs/quickstart.rst index 3a641da06..3955b8182 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -121,3 +121,5 @@ The most basic code, for those familiar with openAI gym (a well-known framework To make the use of grid2op alongside grid2op environment easier, we developed a module described in :ref:`openai-gym`. + +.. include:: final.rst diff --git a/docs/reward.rst b/docs/reward.rst deleted file mode 100644 index 555988adf..000000000 --- a/docs/reward.rst +++ /dev/null @@ -1,62 +0,0 @@ -.. currentmodule:: grid2op.Reward - -Reward -=================================== - -This page is organized as follow: - -.. contents:: Table of Contents - :depth: 3 - -Objectives ------------ -This module implements some utilities to get rewards given an :class:`grid2op.Action` an :class:`grid2op.Environment` -and some associated context (like has there been an error etc.) - -It is possible to modify the reward to use to better suit a training scheme, or to better take into account -some phenomenon by simulating the effect of some :class:`grid2op.Action` using -:func:`grid2op.Observation.BaseObservation.simulate`. - -Doing so only requires to derive the :class:`BaseReward`, and most notably the three abstract methods -:func:`BaseReward.__init__`, :func:`BaseReward.initialize` and :func:`BaseReward.__call__` - -Training with multiple rewards -------------------------------- -In the standard reinforcement learning framework the reward is unique. In grid2op, we didn't want to modify that. - -However powergrid are complex environment with some specific and unsual dynamics. For these reasons it can be -difficult to compress all these signal into one single scalar. To speed up the learning process, to force the -Agent to adopt more resilient strategies etc. it can be usefull to look at different aspect, thus using different -reward. Grid2op allows to do so. At each time step (and also when using the `simulate` function) it is possible -to compute different rewards. This rewards must inherit and be provided at the initialization of the Environment. - -This can be done as followed: - -.. code-block:: python - - import grid2op - from grid2op.Reward import GameplayReward, L2RPNReward - env = grid2op.make("case14_realistic", reward_class=L2RPNReward, other_rewards={"gameplay": GameplayReward}) - obs = env.reset() - act = env.action_space() # the do nothing action - obs, reward, done, info = env.step(act) # immplement the do nothing action on the environment - -On this example, "reward" comes from the :class:`L2RPNReward` and the results of the "reward" computed with the -:class:`GameplayReward` is accessible with the info["rewards"]["gameplay"]. We choose for this example to name the other -rewards, "gameplay" which is related to the name of the reward "GampeplayReward" for convenience. The name -can be absolutely any string you want. - - -**NB** In the case of L2RPN competitions, the reward can be modified by the competitors, and so is the "other_reward" -key word arguments. The only restriction is that the key "__score" will be use by the organizers to compute the -score the agent. Any attempt to modify it will be erased by the score function used by the organizers without any -warning. - -Detailed Documentation by class --------------------------------- -.. automodule:: grid2op.Reward - :members: - :special-members: - :autosummary: - -.. include:: final.rst \ No newline at end of file diff --git a/docs/runner.rst b/docs/runner.rst deleted file mode 100644 index 266c26c2d..000000000 --- a/docs/runner.rst +++ /dev/null @@ -1,137 +0,0 @@ -.. _runner-module: - -Runner -=================================== - -This page is organized as follow: - -.. contents:: Table of Contents - :depth: 3 - -Objectives ------------ -The runner class aims at: - -i) facilitate the evaluation of the performance of :class:`grid2op.Agent` by performing automatically the - "open ai gym loop" (see below) -ii) define a format to store the results of the evaluation of such agent in a standardized manner -iii) this "agent logs" can then be re read by third party applications, such as - `grid2viz `_ or by internal class to ease the study of the behaviour of - such agent, for example with the classes :class:`grid2op.Episode.EpisodeData` or - :class:`grid2op.Episode.EpisodeReplay` -iv) allow easy use of parallelization of this assessment. - -Basically, the runner simplifies the assessment of the performance of some agent. This is the "usual" gym code to run -an agent: - -.. code-block:: python - - import grid2op - from grid2op.Agent import RandomAgent - env = grid2op.make("l2rpn_case14_sandbox") - agent = RandomAgent(env.action_space) - NB_EPISODE = 10 # assess the performance for 10 episodes, for example - for i in range(NB_EPISODE): - reward = env.reward_range[0] - done = False - obs = env.reset() - while not done: - act = agent.act(obs, reward, done) - obs, reward, done, info = env.step(act) - -The above code does not store anything, cannot be run easily in parallel and is already pretty verbose. -To have a shorter code, that saves most of -the data (and make it easier to integrate it with other applications) we can use the runner the following way: - -.. code-block:: python - - import grid2op - from grid2op.Runner import Runner - from grid2op.Agent import RandomAgent - env = grid2op.make("l2rpn_case14_sandbox") - NB_EPISODE = 10 # assess the performance for 10 episodes, for example - NB_CORE = 2 # do it on 2 cores, for example - PATH_SAVE = "agents_log" # and store the results in the "agents_log" folder - runner = Runner(**env.get_params_for_runner(), agentClass=RandomAgent) - runner.run(nb_episode=NB_EPISODE, nb_process=NB_CORE, path_save=PATH_SAVE) - -As we can see, with less lines of code, we could execute parallel assessment of our agent, on 10 episode -and save the results (observations, actions, rewards, etc.) into a dedicated folder. - -If your agent is inialiazed with a custom `__init__` method that takes more than the action space to be built, -you can also use the Runner pretty easily by passing it an instance of your agent, for example: - -.. code-block:: python - - import grid2op - from grid2op.Runner import Runner - env = grid2op.make("l2rpn_case14_sandbox") - NB_EPISODE = 10 # assess the performance for 10 episodes, for example - NB_CORE = 2 # do it on 2 cores, for example - PATH_SAVE = "agents_log" # and store the results in the "agents_log" folder - - # initilize your agent - my_agent = FancyAgentWithCustomInitialization(env.action_space, - env.observation_space, - "whatever else you want" - ) - - # and proceed as following for the runner - runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=my_agent) - runner.run(nb_episode=NB_EPISODE, nb_process=NB_CORE, path_save=PATH_SAVE) - -Other tools are available for this runner class, for example the easy integration of progress bars. See bellow for -more information. - -.. _runner-multi-proc-warning: - -Note on parallel processing ----------------------------- -The "Runner" class allows for parallel execution of the same agent on different scenarios. In this case, each -scenario will be run in independent process. - -Depending on the platform and python version, you might end up with some bugs and error like - -.. pull-quote:: - AttributeError: Can't get attribute 'ActionSpace_l2rpn_case14_sandbox' on Process SpawnPoolWorker-4: - -or like: - -.. pull-quote:: - File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/pool.py", line 125, in worker - result = (True, func(\*args, \*\*kwds)) - - File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/pool.py", line 51, in - starmapstar return list(itertools.starmap(args[0], args[1])) - -In this case this means grid2op has a hard time dealing with the multi processing part. In that case, it -is recommended to disable it completely, for example by using, before any call to "runner.run" the following code: - -.. code-block:: python - - import os - from grid2op.Runner import Runner - - os.environ[Runner.FORCE_SEQUENTIAL] = "1" - -This will force (starting grid2op >= 1.5) grid2op to use the sequential runner and not deal with the added -complexity of multi processing. - -This is especially handy for "windows" system in case of trouble. - -For information, as of writing (march 2021): - -- macOS with python <= 3.7 will behave like any python version on linux -- windows and macOS with python >=3.8 will behave differently than linux but similarly to one another - - -Detailed Documentation by class -------------------------------- -.. automodule:: grid2op.Runner - :members: - :private-members: - :special-members: - :autosummary: - -.. include:: final.rst \ No newline at end of file diff --git a/docs/special.rst b/docs/special.rst new file mode 100644 index 000000000..5b2102393 --- /dev/null +++ b/docs/special.rst @@ -0,0 +1,41 @@ +.. Color profiles for Sphinx. +.. Has to be used with hacks.css +.. (https://bitbucket.org/lbesson/web-sphinx/src/master/.static/hacks.css) +.. role:: black +.. role:: gray +.. role:: grey +.. role:: silver +.. role:: white +.. role:: maroon +.. role:: red +.. role:: magenta +.. role:: fuchsia +.. role:: pink +.. role:: orange +.. role:: yellow +.. role:: lime +.. role:: green +.. role:: olive +.. role:: teal +.. role:: cyan +.. role:: aqua +.. role:: blue +.. role:: navy +.. role:: purple + +.. role:: under +.. role:: over +.. role:: blink +.. role:: line +.. role:: strike + +.. role:: it +.. role:: ob + +.. role:: small +.. role:: large + +.. role:: center +.. role:: left +.. role:: right +.. (c) Lilian Besson, 2011-2016, https://bitbucket.org/lbesson/web-sphinx/ \ No newline at end of file diff --git a/docs/topology.rst b/docs/topology.rst new file mode 100644 index 000000000..74b13d9db --- /dev/null +++ b/docs/topology.rst @@ -0,0 +1,325 @@ + + +.. _topology-modeling-module: + +Dive into the topology "modeling" in grid2op +=================================================================== + +In this page of the documentation we dive into the description of the +"topology" of the grid in grid2op. + +.. warning:: + Work in progress + +.. note:: + You can also find another representation of the topology in grid2op + in the page :ref:`detailed-topology-modeling-module` + + +What do we call topology +--------------------------------- + +In the powersystem literature "topology" might refer to different things and +be encoded in different ways, for example there is the "nodal topology" which +is often use by the physical solvers (backends in case of grid2op), or there +is the "detailed topoology" which rather uses swithes, breakers etc. + +.. note:: + The "nodal topology" is a graph that meets the Kirchhoff Current Laws. + + The vertex of this graph are the "electrical node". These vertices contains, + from grid2op point of view, one or more "elements" of grid (side of powerline, + loads, generators, storage units etc.) that can be directly connected together. + + The edges of this graph are merging of 1 or more powerlines that connects two + vertices together. + +.. note:: + The "detailed topology" is more complicated. It also represents a graph but + at a more granular level. + + In real powergrid, elements of the grid are connected together with switches / + breakers / couplers etc. that can be either closed or opened. + + In real grid, the "topology" is controled with actions on these switches / + breakers / couplers etc. + +In the case of grid2op we adopt another representation for this "topology". +It is more detailed than containing purely the "nodal" information but +does not model the switches. + +.. note:: + TODO have some illustrative examples here of "nodal" and "detailed" + + For example inspired from https://www.powsybl.org/pages/documentation/developer/tutorials/topology.html + +.. note:: + This explanation is correct as of writing (September 2024) but there are + some efforts to use a more detailed representation of the topology in + the form of `switches` in a branch in grid2op. + +In plain English, the "topology" is a representation of the powergrid +as a graph with the edges being the powerlines / transformers and the +nodes being some "things" having attributes such that the power produced +or consumed at this nodes. + +As often in computer science, there are different ways to informatically +represent a graph. + +We chose to encode this "graph" in the form of a vector. This vector, +often called the "topology vector" or "topo vect" has the following properties: + +- it has as many component as the number of elements (load, generator, side of powerline + or transformer, storage unit etc.) present in the grid. Each component of this vector + provide information about the state of an unique element of the grid. +- it is a vector of integer (`=> -1`) with the following convention: + + - if a given component is `-1` this means the relevant element is connected + - if a given component is `1` it means the element of the grid represented by this component is connected to "busbar 1" + - if a given component is `2` it means the element of the grid is connected to "busbar 2" + - etc. (for all `k >= 1` if a given component is `k` then it means the relevant element of the grid is connected to busbar `k`) + - the component can never be `<= -2` nor `0` + +This "topology vector" can change depending on the state of the grid. + +Another "fixed" / "constant" / "immutable" information is needed to retrieve the +"topology" of the grid. It concerns the mapping between each elements of +the grid and the "substation" to which it "connected". + +.. note:: + The same word "connected" used here means two different things. + + The "connected to a substation" is independant of the status "connected / disconnected" + of an element. + + Let's suppose the city of Nowhere is modeled by a load in the grid: + + - "*Nowhere is connected to substation 5*" means that + the powergrid is made in such a way that the physical place where the transformer + that powers the city of "Nowhere" is in a location that is called "substation 5". + It can never be "disconnected" from substation 5 (this would mean the city ceased + to exist) nor can it be "connected to substation 1 [or 2, or 3, or 4, etc.]" + (this would mean this city magically change its geographical location and is + moved from a few hundred of miles / km) + - "*Nowhere is disconnected*" means that the transformer + powering the city of Nowhere is switched-off (blackout in this city) + - "*Nowhere is connected to busbar 1*" means that + within the "substation 5" there is an object called "busbar 1" and that + there is a "direct electrical path" (made of all closed switches) that + connects the transformer of the city of Nowhere to this "busbar 1" + +.. note:: + The mapping between each object and the substation to which it is connected + does not change. This is why it is not stored in the topology vector. + +This mapping is loadedonce and for all from the grid file by the "backend" at the +creation of the environment. + +With both these information the "nodal topology" can be computed as followed: + +- if an object is disconnected (associated component to the topology vector is `-1`) + it is not connected (no kidding...) and can be ommitted when building the graph +- if two obejcts `o_i` and `o_j` are not "connected to the same substation" they + are not connected to the same vertex of the graph. +- if two objects `o_i` and `o_j` are "connected to the same substation" they are + part of the same "electrical node" (also called bus) if (and only if) + the associated component + of the "topoolgy vector" has the same integer. For example if the component + of the topology vector for `o_i` is 2 and the component for `o_j` is 1 + they are NOT connected together. But if its component is 3 for `o_i` + and 3 for `o_j` they are connected together. + +.. note:: + As of writing, if a load or a generator is disconnected, there is a "game over". + + +Why the "switches" are not modled by default +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For a grid modeling with switches, you can consult the dedicated +page :ref:`detailed-topology-modeling-module` of the grid2op +package. + + +.. warning:: + Doc in progress... + + +Switches are not in most synthetic grids +++++++++++++++++++++++++++++++++++++++++ + +There are no switches in most IEEE test cases which serve as reference +for most of grid2op environment and are widely used in the literature. +Forcing switches in grid2op would mean inventing them on these grid, which is +not necessary. When creating an open source environment, it would be +mandatory to come up with a layout for each substation of the +fictive grid. And there are many different "substation layout" possible ( +see *eg* https://www.technomaxme.com/understanding-busbar-systems/ ) + + +Switches will not make the problem more realistic ++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Switches information is too complicated to be manipulated correctly if we +consider time dependant states.Switches would also make the rules much more difficult to +implement. For example, in real time, some breakers can be opened / closed +while under charge but some other might not. This means an agent that would +operate the grid would have to anticipate to "pre configure" the switches +"before" real time if it wants to adopt this and that. We believe that this +is too complicated for an agent to do yet [TODO more info about that needed] + +Closer to human reasoning ++++++++++++++++++++++++++++ + +As for our experience, human operators do not think in terms of opening / closing +switches. The first target a given "topology": these two elements connected together, +these other three also, but not with the previous ones etc. And then they +use their expertise to find a combination of breakers which match what +they want to achieve. We believe that the added value of AI is greater in the +first step (find the good nodal topology) so we decided to entirely skip the second +one (which, we think, can be solved by optimization routines or heuristics) + +Smaller action space ++++++++++++++++++++++ + +The problem we expose in grid2op is far from being solved (to our knowledge). And +we believe that making multiple consecutive small steps into the right direction is better than +modeling every bit of complexity of the "real" problem and then find a solution +to this really hard problem. Removing switches is a way to reduce the action space. Indeed, +if you consider the "grid2op standard" : "*maximum 2 independant buses per substation*" and +a substation with 4 elements. You need: + +- an action space of **4 bits** with current grid2op modeling + (one bit per elements) +- whereas you would need to "build" the substation layout, for example: + you create two busbars (one for each independant buses), then + one switch connecting each of the 4 elements to both busbars plus possibly a + breaker between both busbars. Making **9 switches** / breakers in total. + +.. note:: + Both type of action spaces would represent the same reality. This means that + in the second case lots of "possible action" would be ambiguous or lead finally + to the "do nothing" action, which is not ideal. + +In this case, adding switches would more than double (in this case) the size of the action space +(4 btis without, 9 bits with them). + +Simpler action and observaton spaces ++++++++++++++++++++++++++++++++++++++ + +One of the main issue with "topology" is that the same topology can be encoded differently. + +With the proposed grid2op encoding this problem is not totally solved: the symmetry still exists. +However it is drastically reduced from the symmetry there would have when manipulating directly +the switches. + +Let's take again our example with a substation of 4 elements. For the "fully connected" topology, +the grid2op encoding can be either [1, 1, 1, 1] or [2, 2, 2, 2] which makes 2 solutions. + +With the substation layout detailed in the paragraph `Smaller action space`_ it can be encoding with: + +- [[1, 0], [1, 0], [1, 0], [1, 0], 0] : every element connected to busbar 1 and the busbar coupler between busbar 1 and 2 opened +- [[0, 1], [0, 1], [0, 1], [0, 1], 0] : every element connected to busbar 2 and the busbar coupler between busbar 1 and 2 opened +- [[1, 0], [1, 0], [1, 0], [1, 0], 1] : every element connected to busbar 1 and the busbar coupler between busbar 1 and 2 closed +- [[0, 1], [0, 1], [0, 1], [0, 1], 1] : every element connected to busbar 2 and the busbar coupler between busbar 1 and 2 closed +- [[1, 0], [0, 1], [0, 1], [0, 1], 1] : first element connected to busbar 1, all others to busbar 2 + and the busbar coupler between busbar 1 and 2 closed +- [[0, 0], [1, 1], [0, 1], [0, 1], 1] : second element connected to busbar 1, all others to busbar 2 + and the busbar coupler between busbar 1 and 2 closed +- ... + +Basically, as long at the busbar coupler between busbar 1 and busbar 2 is closed, you can connect every element to every +busbar and end-up with a valid encoding of the topology "fully connected". + +In this representation, you have 2 + 2**4 = 18 possible "valid" encoding of the same "fully connected" topology. + +.. note:: + We only count here "valid" topology, in the sense that an element is either connected to busbar 1 or busbar 2 + but not to both at the same time. But in fact it would be perfectly fine to connect and object to + both busbar as long as the busbar coupler is closed (for each element this lead to 3 possible combination) + + There would be not 2**4 but 4**3 = 128 encoding of this "fully connected" topology. + + In general it is considered a good practice to chose a reprensentation that is as explicit and "unique" + as possible. + +Switches make the solver slightly slower ++++++++++++++++++++++++++++++++++++++++++ + +The switches information is also a reprensentation of the topology that is not the one used by the solver. + +At some point, any solver will have to compute a (sparse) matrices and a (dense) vetor to represent +the physical laws. These are often computed by first reducing the "switches state" to the "nodal topology" +and then convert this graph to the proper matrix and vector. + +By passing directly the "nodal topology" it is faster (for some solver at least) as the initial pre processing +of the switches state to the "graph" does not need to be performed. + +.. note:: + And this why it is relatively hard for some "solver" to be used as a backend. + + Some solver can only manipulate switches. In order to match grid2op representation, + it is then required to cast the "nodal topology" of grid2op to a switches state + (which is for now HARD and slow), then pass these swtiches to the "solver". + + Afterwards, the "solver" will then run its internal routine (often really fast) + to retrieve the "nodal topology" + of the grid (what the agent wanted to get) from the swtiches state. + + +It is easy to compute the grid2op representation from the switches ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +This is done internally by all solvers (pandapower when using switches but also all solver we +know) at the initial state of running a powerflow and is relatively easy. Some graph +alrogithms BFS (*eg* Breadth First Search) allows to quickly compute the "grid2op representation" +from the state of the switches. + +This means that an agent can have full access to the switches, manipulate them and at the end +inform grid2op about the "grid2op topology" without too much trouble. + +If we had modeled "by default" the switches it would mean that an agent that would "do like the human" +(*ie* target a nodal topology) would then need to find some "switches states" that matches The +representation it targets. So an agent would have to do two things, instead of just one. + +.. da,ger:: + To be honest, it also means that the current grid2op representation is not entirely "consistent". + + For some real grid, with some given substations layout, a agent could target a topology that is + not feasible: there does not exist a switches state that can represent this topology. + + This is currently a problem for real time grid operations. But we believe that a "routine" + (heuristic or optimization based) can be used to detect such cases. + This routine is yet to be implemented (it is not on our priority list). The first step + (in our opinion) is to make a "proof of concept" that something can work. So basically + that a "target nodal topology" can be found. + + In a second stage, when things will be closer to production context, we will thing + about + +How it is accessible in grid2op +--------------------------------- + +.. warning:: + Doc in progress + +The "topo_vect" vector +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + Doc in progress + +In the observation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + Doc in progress + +In the action +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + Doc in progress + +.. include:: final.rst + \ No newline at end of file diff --git a/docs/troubleshoot.rst b/docs/troubleshoot.rst new file mode 100644 index 000000000..d07539fda --- /dev/null +++ b/docs/troubleshoot.rst @@ -0,0 +1,190 @@ + +.. _troubleshoot_page: + +Known issues and workarounds +=============================== + + +In this section we will detail what are the common questions we have regarding grid2op and how to +best solve them (if we are aware of such a way...) + +.. _troubleshoot_pickle: + +Pickle issues +-------------------------- + +The most common (and oldest) issue regarding grid2op is its interaction with the `pickle` module +in python. + +This module is used internally by the `multiprocessing` module and many others. + +By default (and "by design") grid2op will create the classes when an environment +is loaded. You can notice it like this: + +.. code-block:: python + + import grid2op + + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + print(type(env)) + +This will show something like `Environment_l2rpn_case14_sandbox`. This means that, +not only the object `env` is created when you call `grid2op.make` but also +the class that `env` belongs too (in this case `Environment_l2rpn_case14_sandbox`). + +.. note:: + We decided to adopt this design so that the powergrid reprensentation in grid2op + is not copied and can be access pretty easily from pretty much every objects. + + For example you can call `env.n_gen`, `type(env).n_gen`, `env.backend.n_gen`, + `type(env.backend).n_gen`, `obs.n_gen`, `type(obs).n_gen`, `act.n_gen`, + `type(act).n_gen`, `env.observation_space.n_gen`, `type(env.observation_space).n_gen` + well... you get the idea + + But allowing so makes it "hard" for python to understand how to transfer objects + from one "process" to another or to save / restore it (indeed, python does not + save the entire class definition it only saves the class names.) + +This type of issue takes the form of an error with: + +- `XXX_env_name` (*eg* `CompleteObservation_l2rpn_wcci_2022`) is not serializable. +- `_pickle.PicklingError`: Can't pickle : attribute lookup _ObsEnv_l2rpn_case14_sandbox on abc failed + +Automatic 'class_in_file' ++++++++++++++++++++++++++++ + +To solve this issue, we are starting from grid2op 1.10 to introduce some ways +to get around this automatically. It will be integrated incrementally to make +sure not to break any previous code. + +The main idea is that grid2op will define the class as it used to (no change there) +but instead of keeping them "in memory" it will write it on the hard drive (in +a folder within the environment data) each time an environment is created. + +This way, when pickle or multiprocessing will attempt to load the environment class, +they will be able to because the files are stored on the hard drive. + +There are some drawbacks of course. The main one being that creating an environment +can take a bit more time (especially if you have slow I/O). It will also use +a bit of disk space (a few kB so nothing to worry about). + +For now we tested it on multi processing and it gives promising results. + +**TL;DR**: Enable this feature by calling `grid2op.make(env_name, class_in_file=True)` and you're good to go. + +To enable this, you can: + +- define a default behaviour by editing the `~/.grid2opconfig.json` global parameters +- define the environment variable `grid2op_class_in_file` **BEFORE** importing grid2op +- use the kwargs `class_in_file` when calling the `grid2op.make` function + +.. note:: + In case of "conflicting" instruction grid2op will do the following: + + - if `class_in_file` is provided in the call to `grid2op.make(...)` it will use this and ignore everything else + - (else) if the environment variable `grid2op_class_in_file` is defined, grid2op will use it + - (else) if the configuration file is present and the key `class_in_file` is there, grid2op will + use it + - (else) it will use its default behaviour (as of writing, grid2op 1.10.3) it is to **DEACTIVATE** + this feature (in the near future the default will change and it will be activated by default) + +For example: + +The file `~/.grid2opconfig.json` can look like: + +.. code-block:: json + + { + "class_in_file" : false + } + +or + +.. code-block:: json + + { + "class_in_file" : true + } + +If you prefer to work with environment variables, we recommend you do something like : + +.. code-block:: python + + import os + + os.environ["grid2op_class_in_file"] = "true" # or "false" if you want to disable it + + import grid2op + +And if you prefer to use it directly in `grid2op.make(...)` funciton, you can do it with: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name, class_in_file=True) # or `class_in_file=False` + + +If you want to know if you environment has used this new feature, you can check with: + +.. code-block:: python + + import grid2op + env = grid2op.make(...) + print(env.classes_are_in_files()) + +.. danger:: + If you use this, make sure (for now) that the original grid2op environment that you have created + is not deleted. If that is the case then the folder containing the classes definition will be + removed and you might not be able to work with grid2op correctly. + + +Experimental `read_from_local_dir` ++++++++++++++++++++++++++++++++++++ + +Before grid2op 1.10.3 the only way to get around pickle / multiprocessing issue was a "two stage" process: +you had first to tell grid2op to generate the classes and then to tell it to use it in all future environment. + +This had the drawbacks that if you changed the backend classes, or the observation classes or the +action classes, you needed to start the whole process again. ANd it as manual so you might have ended up +doing some un intended actions which could create some "silent bugs" (the worst kind, like for example +not using the right class...) + +To do it you first needed to call, once (as long as you did not change backend class or observation or action etc.) +in a **SEPARATE** python script: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" # or any other name + + env = grid2op.make(env_name, ...) # again: redo this step each time you customize "..." + # for example if you change the `action_class` or the `backend` etc. + + env.generate_classes() + + +And then, in another script, the main one you want to use: + +.. code-block:: python + + import grid2op + env_name = SAME NAME AS ABOVE + env = grid2op.make(env_name, + experimental_read_from_local_dir=True, + SAME ENV CUSTOMIZATION AS ABOVE) + +As of grid2op 1.10.3 this process can be made automatically (not without some drawbacks, see above). It might +interact in a weird (and unpredictable) way with the `class_in_file` so we would recommend to use one **OR** +(exclusive OR, XOR for the mathematicians) the other but avoid mixing the two: + +- either use `grid2op.make(..., class_in_file=True)` +- or use `grid2op.make(..., experimental_read_from_local_dir=True)` + +Thus we **DO NOT** recommend to use something like +`grid2op.make(..., experimental_read_from_local_dir=True, class_in_file=True)` + + +.. include:: final.rst diff --git a/docs/user.rst b/docs/user.rst new file mode 100644 index 000000000..d1b715423 --- /dev/null +++ b/docs/user.rst @@ -0,0 +1,24 @@ +.. toctree:: + :maxdepth: 1 + + user/action + user/agent + user/backend + user/chronics + user/converter + user/environment + user/episode + user/exception + user/observation + user/opponent + user/parameters + user/reward + user/rules + user/runner + user/simulator + user/space + user/timeserie_handlers + user/utils + user/voltagecontroler + +.. include:: final.rst diff --git a/docs/action.rst b/docs/user/action.rst similarity index 91% rename from docs/action.rst rename to docs/user/action.rst index a370d4d8b..b2c842f50 100644 --- a/docs/action.rst +++ b/docs/user/action.rst @@ -29,6 +29,11 @@ Action =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ---------- The "Action" module lets you define some actions on the underlying power _grid. @@ -85,7 +90,7 @@ you want to perform on the grid. For more information you can consult the help o To avoid extremely verbose things, as of grid2op 1.5.0, we introduced some convenience functions to allow easier action construction. You can now do `act.load_set_bus = ...` instead of the previously way -more verbose `act.update({"set_bus": {"loads_id": ...}}` +more verbose `act.update({"set_bus": {"loads_id": ...}})` .. _action-module-examples: @@ -411,7 +416,7 @@ As we explained in the previous paragraph, some action on one end of a powerline powerline or disconnect it. This means they modify the bus of **both** the extremity of the powerline. Here is a table summarizing how the buses are impacted. We denoted by "`PREVIOUS_OR`" the last bus at which -the origin end of the powerline was connected and "`PREVIOUS_EX`" the last bus at which the extremity end of the +the origin side of the powerline was connected and "`PREVIOUS_EX`" the last bus at which the extremity side of the powerline was connected. Note that for clarity when something is not modified by the action we decided to write on the table "not modified" (this entails that after this action, if the powerline is connected then "new origin bus" is "`PREVIOUS_OR`" and "new extremity bus" is "`PREVIOUS_EX`"). We remind the reader that "-1" encode for a @@ -448,8 +453,35 @@ Easier actions manipulation ---------------------------- The action class presented here can be quite complex to apprehend, especially for a machine learning algorithm. -It is possible to use the :class:`grid2op.Converter` class for such purpose. You can have a look at the dedicated -documentation. +Grid2op offers some more "convient" manipulation of the powergrid by transforming this rather "descriptive" +action formulation to "action_space" that are compatible with Farama Fundation Gymnasium package ( +package that was formerly "openAI gym"). + +This includes: + +- :class:`grid2op.gym_compat.GymActionSpace` which "represents" actions as a gymnasium + `Dict `_ +- :class:`grid2op.gym_compat.BoxGymActSpace` which represents actions as gymnasium + `Box `_ + (actions are numpy arrays). This is especially suited for continuous attributes + such as redispatching, storage or curtailment. +- :class:`grid2op.gym_compat.DiscreteActSpace` which represents actions as gymnasium + `Discrete `_ + (actions are integer). This is especially suited for discrete actions such as + setting line status or topologies at substation. +- :class:`grid2op.gym_compat.MultiDiscreteActSpace` which represents actions as gymnasium + `MultiDiscrete `_ + (actions are integer). This is also especially suited for discrete actions such as + setting line status or topologies at substation. + +.. note:: + The main difference between :class:`grid2op.gym_compat.DiscreteActSpace` and + :class:`grid2op.gym_compat.MultiDiscreteActSpace` is that Discrete actions will + allow the agent to perform only one type of action at each step (either it performs + redispatching on one generator OR on another generator OR it set the status of a powerline + OR it set the substation at one substation etc. but it cannot "peform redispatching on + 2 or more generators" nor can it "perform redispatching on one generator AND disconnect a powerline") + which can be rather limited for some applications. Detailed Documentation by class diff --git a/docs/agent.rst b/docs/user/agent.rst similarity index 100% rename from docs/agent.rst rename to docs/user/agent.rst diff --git a/docs/backend.rst b/docs/user/backend.rst similarity index 55% rename from docs/backend.rst rename to docs/user/backend.rst index d4e666861..1d52adccd 100644 --- a/docs/backend.rst +++ b/docs/user/backend.rst @@ -1,4 +1,5 @@ .. currentmodule:: grid2op.Backend + .. _backend-module: Backend @@ -22,9 +23,39 @@ Objectives Both can serve as example if you want to code a new backend. This Module defines the template of a backend class. -Backend instances are responsible to translate action (performed either by an BaseAgent or by the Environment) into -comprehensive powergrid modifications. -They are responsible to perform the powerflow (AC or DC) computation. + +Backend instances are responsible to translate action into +comprehensive powergrid modifications that can be process by your "Simulator". +The simulator is responsible to perform the powerflow (AC or DC or Time Domain / Dynamic / Transient simulation) +and to "translate back" the results (of the simulation) to grid2op. + +More precisely, a backend should: + +#. inform grid2op of the grid: which objects exist, where are they connected etc. +#. being able to process an object of type :class:`grid2op.Action._backendAction._BackendAction` + into some modification to your solver (*NB* these "BackendAction" are created by the :class:`grid2op.Environment.BaseEnv` + from the agent's actions, the time series modifications, the maintenances, the opponent, etc. The backend **is not** + responsible for their creation) +#. being able to run a simulation (DC powerflow, AC powerflow or time domain / transient / dynamic) +#. expose (through some functions like :func:`Backend.generators_info` or :func:`Backend.loads_info`) + the state of some of the elements in the grid. + +.. note:: + A backend can model more elements than what can be controlled or modified in grid2op. + For example, at time of writing, grid2op does not allow the modification of + HVDC powerlines. But this does not mean that grid2op will not work if your grid + counts such devices. It just means that grid2op will not be responsible + for modifying them. + +.. note:: + A backend can expose only part of the grid to the environment / agent. For example, if you + give it as input a pan european grid but only want to study the grid of Netherlands or + France your backend can only "inform" grid2op (in the :func:`Backend.load_grid` function) + that "only the Dutch (or French) grid" exists and leave out all other informations. + + In this case grid2op will perfectly work, agents and environment will work as expected and be + able to control the Dutch (or French) part of the grid and your backend implementation + can control the rest (by directly updating the state of the solver). It is also through the backend that some quantities about the powergrid (such as the flows) can be inspected. @@ -57,6 +88,9 @@ We developed a dedicated page for the development of new "Backend" compatible wi Detailed Documentation by class ------------------------------- + +Then the `Backend` module: + .. automodule:: grid2op.Backend :members: :private-members: diff --git a/docs/user/chronics.rst b/docs/user/chronics.rst new file mode 100644 index 000000000..1557ab07f --- /dev/null +++ b/docs/user/chronics.rst @@ -0,0 +1,236 @@ +.. currentmodule:: grid2op.Chronics + +.. _time-series-module: + +Time series (formerly called "chronics") +========================================= + +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + +Objectives +----------- +This module is present to handle everything related to input data that are not structural. + +In the Grid2Op vocabulary a "GridValue" or "Chronics" is something that provides data to change the input parameter +of a power flow between 1 time step and the other. + +It is a more generic terminology. Modification that can be performed by :class:`GridValue` object includes, but +are not limited to: + + - injections such as: + + - generators active production setpoint + - generators voltage setpoint + - loads active consumption + - loads reactive consumption + + - structural informations such as: + + - planned outage: powerline disconnection anticipated in advance + - hazards: powerline disconnection that cannot be anticipated, for example due to a windstorm. + +All powergrid modification that can be performed using an :class:`grid2op.Action.BaseAction` can be implemented as +form of a :class:`GridValue`. + +The same mechanism than for :class:`grid2op.Action.BaseAction` or :class:`grid2op.Observation.BaseObservation` +is pursued here. All state modifications made by the :class:`grid2op.Environment` must derived from +the :class:`GridValue`. It is not recommended to create them directly, but rather to use +the :class:`ChronicsHandler` for such a purpose. + +Note that the values returned by a :class:`GridValue` are **backend dependant**. A GridValue object should always +return the data in the order expected by the :class:`grid2op.Backend`, regardless of the order in which data are given +in the files or generated by the data generator process. + +This implies that changing the backend will change the output of :class:`GridValue`. More information about this +is given in the description of the :func:`GridValue.initialize` method. + +Finally, compared to other Reinforcement Learning problems, is the possibility to use "forecast". This optional feature +can be accessed via the :class:`grid2op.Observation.BaseObservation` and mainly the +:func:`grid2op.Observation.BaseObservation.simulate` method. The data that are used to generate this forecasts +come from the :class:`grid2op.GridValue` and are detailed in the +:func:`GridValue.forecasts` method. + + +More control on the time series +------------------------------- +We explained, in the description of the :class:`grid2op.Environment` in sections +:ref:`environment-module-chronics-info` and following how to have more control on which chronics is used, +with steps are used within a chronics etc. We will not detailed here again, please refer to this page +for more information. + +However, know that you can have a very detailed control on which time series using the `options` +kwargs of a call to `env.reset()` (or the `reset_otions` kwargs when calling the +`runner.run()`) : + + +Use a specific time serie for an episode +******************************************* + +To use a specific time series for a given episode, you can use +`env.reset(options={"time serie id": THE_ID_YOU_WANT)`. + +For example: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + # you can use an int: + obs = env.reset(options={"time serie id": 0}) + + # or the name of the folder (for most grid2op environment) + obs = env.reset(options={"time serie id": "0000"}) # for l2rpn_case14_sandbox + + # for say l2rpn_neurips_2020_track1 + # obs = env.reset(options={"time serie id": "Scenario_august_008"}) + + # for say l2rpn_idf_2023 + # obs = env.reset(options={"time serie id": "2035-04-23_7"}) + + +.. note:: + For oldest grid2op versions (please upgrade if that's the case) you needed to use: + `env.set_id(THE_CHRONIC_ID)` (see :func:`grid2op.Environment.Environment.set_id`) to set the id of the + chronics you want to use. + + +Skipping the initial few steps +******************************* + +Often the time series provided for an environment always start at the same date and time on +the same hour of the day and day of the week. It might not be ideal to learn controler +with such data or might "burn up" computation time during evaluation. + +To do that, you can use the `"init ts"` reset options, for example with: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + # you can use an int: + obs = env.reset(options={"init ts": 12}) + + # obs will skip the first hour of the time series + # 12 steps is equivalent to 1h (5 mins per step in general) + + +.. note:: + + For oldest grid2op versions (please upgrade if that's the case) you needed to use: + `env.fast_forward_chronics(nb_time_steps)` + (see :func:`grid2op.Environment.BaseEnv.fast_forward_chronics`) to skip initial + few steps + of a given chronics. + + Please be aware that this "legacy" behaviour has some issues and is "less clear" + than the "init ts" above and it can have some weird combination with + `set_max_iter` for example. + + +Limit the maximum length of the current episode +************************************************* + +For most enviroment, the maximum duration of an episode is the equivalent of a week +(~2020 steps) or a month (~8100 steps) which might be too long for some usecase. + +Anyway, if you want to reduce it, you can now do it with the `"max step"` reset +option like this: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + # you can use an int: + obs = env.reset(options={"max step": 2*288}) + + # the maximum duration of the episode is now 2*288 steps + # the equivalent of two days + +.. note:: + + For oldest grid2op versions (please upgrade if that's the case) you needed to use: + `env.chronics_handler.set_max_iter(nb_max_iter)` + (see :func:`grid2op.Chronics.ChronicsHandler.set_max_iter`) to limit the number + of steps within an episode. + + Please be aware that this "legacy" behaviour has some issues and is "less clear" + than the "init ts" above and it can have some weird combination with + `fast_forward_chronics` for example. + +Discard some time series from the existing folder +************************************************** + +The folder containing the time series for a given grid2op environment often contains +dozens (thousands sometimes) different time series. + +You might want to use only part of them at some point (whether it's some for training and some +for validation and test, or some for training an agent on a process and some to train the +same agent on another process etc.) + +Anyway, if you want to do this (on the majority of released environments) you can do it +thanks to the `env.chronics_handler.set_filter(a_function)`. + +For example: + +.. code-block:: python + + import re + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + def keep_only_some_ep(chron_name): + return re.match(r".*00.*", chron_name) is not None + + env.chronics_handler.set_filter(keep_only_some_ep) + li_episode_kept = env.chronics_handler.reset() + + +.. note:: + For oldest grid2op versions (please upgrade if that's the case) you needed to use: + use `env.chronics_handler.set_filter(a_function)` (see :func:`grid2op.Chronics.GridValue.set_filter`) + to only use certain chronics + + +- use `env.chronics_handler.sample_next_chronics(probas)` + (see :func:`grid2op.Chronics.GridValue.sample_next_chronics`) to draw at random some chronics + +Performance gain (throughput) +******************************** + +Chosing the right chronics can also lead to some large advantage in terms of computation time. This is +particularly true if you want to benefit the most from HPC for example. More detailed is given in the +:ref:`environment-module-data-pipeline` section. In summary: + +- set the "chunk" size (amount of data read from the disk, instead of reading an entire scenarios, you read + from the hard drive only a certain amount of data at a time, see + :func:`grid2op.Chronics.ChronicsHandler.set_chunk_size`) you can use it with + `env.chronics_handler.set_chunk_size(100)` +- cache all the chronics and use them from memory (instead of reading them from the hard drive, see + :class:`grid2op.Chronics.MultifolderWithCache`) you can do this with + `env = grid2op.make(..., chronics_class=MultifolderWithCache)` + +Finally, if you need to study machine learning in a "regular" fashion, with a train / validation / set +you can use the `env.train_val_split` or `env.train_val_split_random` functions to do that. See +an example usage in the section :ref:`environment-module-train-val-test`. + + + + +Detailed Documentation by class +-------------------------------- + +.. automodule:: grid2op.Chronics + :members: + :autosummary: + +.. include:: final.rst diff --git a/docs/converter.rst b/docs/user/converter.rst similarity index 100% rename from docs/converter.rst rename to docs/user/converter.rst diff --git a/docs/environment.rst b/docs/user/environment.rst similarity index 92% rename from docs/environment.rst rename to docs/user/environment.rst index 11cac0a59..3b4af59cf 100644 --- a/docs/environment.rst +++ b/docs/user/environment.rst @@ -1,4 +1,5 @@ .. currentmodule:: grid2op.Environment + .. _environment-module: Environment @@ -83,7 +84,7 @@ What happens here is the following: You might want to customize this general behaviour in multiple way: - you might want to study only one chronics (equivalent to only one level of a video game) - see `Study always the same chronics`_ + see `Study always the same time serie`_ - you might want to loop through the chronics, but not always in the same order. If that is the case you might want to consult the section `Shuffle the chronics order`_ - you might also have spotted some chronics that have bad properties. In this case, you can @@ -101,14 +102,66 @@ be equivalent to starting into the "middle" of a video game. If that is the case Finally, you might have noticed that each call to "env.reset" might take a while. This can dramatically increase the training time, especially at the beginning. This is due to the fact that each time `env.reset` is called, the whole chronics is read from the hard drive. If you want to lower this -impact then you might consult the `Optimize the data pipeline`_ section. +impact then you might consult the :ref:`environment-module-data-pipeline` page of the doc. + +Go to the next scenario +++++++++++++++++++++++++ + +Starting grid2op 1.9.8 we attempt to make an easier user experience in the +selection of time series, seed, initial state of the grid, etc. + +All of the above can be done when calling `env.reset()` function. + +For customizing the seed, you can for example do: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + obs = env.reset(seed=0) + +For customizing the time series id you want to use: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + obs = env.reset(options={"time serie id": 1}) # time serie by id (sorted alphabetically) + # or + obs = env.reset(options={"time serie id": "0001"}) # time serie by name (folder name) + +For customizing the initial state of the grid, for example forcing the +powerline 0 to be disconnected in the initial observation: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + init_state_dict = {"set_line_status": [(0, -1)]} + obs = env.reset(options={"init state": init_state_dict}) + + +Feel free to consult the documentation of the :func:`Environment.reset` function +for more information (this doc might be outdated, the one of the function should +be more up to date with the code). + +.. note:: + In the near future (next few releases) we will also attempt to make the + customization of the `parameters` or the `skip number of steps`, + `maximum duration of the scenarios` also available in `env.reset()` options. .. _environment-module-chronics-info: -Chronics Customization -+++++++++++++++++++++++ +Time series Customization +++++++++++++++++++++++++++ -Study always the same chronics +Study always the same time serie ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you spotted a particularly interesting chronics, or if you want, for some reason your agent to see only one chronics, you can do this rather easily with grid2op. @@ -140,10 +193,15 @@ the call to "env.reset". This gives the following code: # and now the loop starts for i in range(episode_count): ################################### - env.set_id(THE_CHRONIC_ID) + # with recent grid2op + obs = env.reset(options={"time serie id": THE_CHRONIC_ID}) ################################### - obs = env.reset() + ################################### + # 'old method (oldest grid2op version)' + # env.set_id(THE_CHRONIC_ID) + # obs = env.reset() + ################################### # now play the episode as usual while True: diff --git a/docs/episode.rst b/docs/user/episode.rst similarity index 99% rename from docs/episode.rst rename to docs/user/episode.rst index 34bc8453e..9d8be3d8f 100644 --- a/docs/episode.rst +++ b/docs/user/episode.rst @@ -1,5 +1,6 @@ Episode =================================== + This page is organized as follow: .. contents:: Table of Contents diff --git a/docs/exception.rst b/docs/user/exception.rst similarity index 100% rename from docs/exception.rst rename to docs/user/exception.rst diff --git a/docs/user/final.rst b/docs/user/final.rst new file mode 100644 index 000000000..79beb6191 --- /dev/null +++ b/docs/user/final.rst @@ -0,0 +1,2 @@ + +.. include:: ../final.rst \ No newline at end of file diff --git a/docs/observation.rst b/docs/user/observation.rst similarity index 60% rename from docs/observation.rst rename to docs/user/observation.rst index 86bc3baba..05bb35a75 100644 --- a/docs/observation.rst +++ b/docs/user/observation.rst @@ -1,70 +1,7 @@ .. currentmodule:: grid2op.Observation -.. _n_gen: ./space.html#grid2op.Space.GridObjects.n_gen -.. _n_load: ./space.html#grid2op.Space.GridObjects.n_load -.. _n_line: ./space.html#grid2op.Space.GridObjects.n_line -.. _n_sub: ./space.html#grid2op.Space.GridObjects.n_sub -.. _n_storage: ./space.html#grid2op.Space.GridObjects.n_storage -.. _dim_topo: ./space.html#grid2op.Space.GridObjects.dim_topo -.. _dim_alarms: ./space.html#grid2op.Space.GridObjects.dim_alarms -.. _dim_alerts: ./space.html#grid2op.Space.GridObjects.dim_alerts -.. _year: ./observation.html#grid2op.Observation.BaseObservation.year -.. _month: ./observation.html#grid2op.Observation.BaseObservation.month -.. _day: ./observation.html#grid2op.Observation.BaseObservation.day -.. _hour_of_day: ./observation.html#grid2op.Observation.BaseObservation.hour_of_day -.. _minute_of_hour: ./observation.html#grid2op.Observation.BaseObservation.minute_of_hour -.. _day_of_week: ./observation.html#grid2op.Observation.BaseObservation.day_of_week -.. _gen_p: ./observation.html#grid2op.Observation.BaseObservation.gen_p -.. _gen_q: ./observation.html#grid2op.Observation.BaseObservation.gen_q -.. _gen_v: ./observation.html#grid2op.Observation.BaseObservation.gen_v -.. _load_p: ./observation.html#grid2op.Observation.BaseObservation.load_p -.. _load_q: ./observation.html#grid2op.Observation.BaseObservation.load_q -.. _load_v: ./observation.html#grid2op.Observation.BaseObservation.load_v -.. _p_or: ./observation.html#grid2op.Observation.BaseObservation.p_or -.. _q_or: ./observation.html#grid2op.Observation.BaseObservation.q_or -.. _v_or: ./observation.html#grid2op.Observation.BaseObservation.v_or -.. _a_or: ./observation.html#grid2op.Observation.BaseObservation.a_or -.. _p_ex: ./observation.html#grid2op.Observation.BaseObservation.p_ex -.. _q_ex: ./observation.html#grid2op.Observation.BaseObservation.q_ex -.. _v_ex: ./observation.html#grid2op.Observation.BaseObservation.v_ex -.. _a_ex: ./observation.html#grid2op.Observation.BaseObservation.a_ex -.. _rho: ./observation.html#grid2op.Observation.BaseObservation.rho -.. _topo_vect: ./observation.html#grid2op.Observation.BaseObservation.topo_vect -.. _line_status: ./observation.html#grid2op.Observation.BaseObservation.line_status -.. _timestep_overflow: ./observation.html#grid2op.Observation.BaseObservation.timestep_overflow -.. _time_before_cooldown_line: ./observation.html#grid2op.Observation.BaseObservation.time_before_cooldown_line -.. _time_before_cooldown_sub: ./observation.html#grid2op.Observation.BaseObservation.time_before_cooldown_sub -.. _time_next_maintenance: ./observation.html#grid2op.Observation.BaseObservation.time_next_maintenance -.. _duration_next_maintenance: ./observation.html#grid2op.Observation.BaseObservation.duration_next_maintenance -.. _target_dispatch: ./observation.html#grid2op.Observation.BaseObservation.target_dispatch -.. _actual_dispatch: ./observation.html#grid2op.Observation.BaseObservation.actual_dispatch -.. _storage_charge: ./observation.html#grid2op.Observation.BaseObservation.storage_charge -.. _storage_power_target: ./observation.html#grid2op.Observation.BaseObservation.storage_power_target -.. _storage_power: ./observation.html#grid2op.Observation.BaseObservation.storage_power -.. _gen_p_before_curtail: ./observation.html#grid2op.Observation.BaseObservation.gen_p_before_curtail -.. _curtailment: ./observation.html#grid2op.Observation.BaseObservation.curtailment -.. _curtailment_limit: ./observation.html#grid2op.Observation.BaseObservation.curtailment_limit -.. _is_alarm_illegal: ./observation.html#grid2op.Observation.BaseObservation.is_alarm_illegal -.. _time_since_last_alarm: ./observation.html#grid2op.Observation.BaseObservation.time_since_last_alarm -.. _last_alarm: ./observation.html#grid2op.Observation.BaseObservation.last_alarm -.. _attention_budget: ./observation.html#grid2op.Observation.BaseObservation.attention_budget -.. _max_step: ./observation.html#grid2op.Observation.BaseObservation.max_step -.. _current_step: ./observation.html#grid2op.Observation.BaseObservation.current_step -.. _delta_time: ./observation.html#grid2op.Observation.BaseObservation.delta_time -.. _gen_margin_up: ./observation.html#grid2op.Observation.BaseObservation.gen_margin_up -.. _gen_margin_down: ./observation.html#grid2op.Observation.BaseObservation.gen_margin_down -.. _curtailment_mw: ./observation.html#grid2op.Observation.BaseObservation.curtailment_mw -.. _theta_or: ./observation.html#grid2op.Observation.BaseObservation.theta_or -.. _theta_ex: ./observation.html#grid2op.Observation.BaseObservation.theta_ex -.. _gen_theta: ./observation.html#grid2op.Observation.BaseObservation.gen_theta -.. _load_theta: ./observation.html#grid2op.Observation.BaseObservation.load_theta -.. _active_alert: ./observation.html#grid2op.Observation.BaseObservation.active_alert -.. _time_since_last_alert: ./observation.html#grid2op.Observation.BaseObservation.time_since_last_alert -.. _alert_duration: ./observation.html#grid2op.Observation.BaseObservation.alert_duration -.. _total_number_of_alert: ./observation.html#grid2op.Observation.BaseObservation.total_number_of_alert -.. _time_since_last_attack: ./observation.html#grid2op.Observation.BaseObservation.time_since_last_attack -.. _was_alert_used_after_attack: ./observation.html#grid2op.Observation.BaseObservation.was_alert_used_after_attack -.. _attack_under_alert: ./observation.html#grid2op.Observation.BaseObservation.attack_under_alert +.. include:: special.rst +.. include the observation attributes .. _observation_module: @@ -196,4 +133,4 @@ Detailed Documentation by class :special-members: :autosummary: -.. include:: final.rst \ No newline at end of file +.. include:: final.rst diff --git a/docs/opponent.rst b/docs/user/opponent.rst similarity index 100% rename from docs/opponent.rst rename to docs/user/opponent.rst diff --git a/docs/parameters.rst b/docs/user/parameters.rst similarity index 94% rename from docs/parameters.rst rename to docs/user/parameters.rst index f89ccc78e..727a422e5 100644 --- a/docs/parameters.rst +++ b/docs/user/parameters.rst @@ -1,6 +1,8 @@ +.. _parameters-module: + Parameters =================================== -The challenge "learning to run a power network" offers different _parameters to be customized, or to learn an +The challenge "learning to run a power network" offers different parameters to be customized, or to learn an :class:`grid2op.Agent` that will perform better for example. This class is an attempt to group them all inside one single structure. @@ -10,6 +12,7 @@ come soon. Example -------- + If you want to change the parameters it is better to do it at the creation of the environment. This can be done with: diff --git a/docs/user/reward.rst b/docs/user/reward.rst new file mode 100644 index 000000000..684eaccaf --- /dev/null +++ b/docs/user/reward.rst @@ -0,0 +1,290 @@ +.. currentmodule:: grid2op.Reward + +.. _reward-module: + +Reward +=================================== + +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + +Objectives +----------- +This module implements some utilities to get rewards given an :class:`grid2op.Action` an :class:`grid2op.Environment` +and some associated context (like has there been an error etc.) + +It is possible to modify the reward to use to better suit a training scheme, or to better take into account +some phenomenon by simulating the effect of some :class:`grid2op.Action` using +:func:`grid2op.Observation.BaseObservation.simulate`. + +Doing so only requires to derive the :class:`BaseReward`, and most notably the three abstract methods +:func:`BaseReward.__init__`, :func:`BaseReward.initialize` and :func:`BaseReward.__call__` + +Customization of the reward +----------------------------- + +In grid2op you can customize the reward function / reward kernel used by your agent. By default, when you create an +environment a reward has been specified for you by the creator of the environment and you have nothing to do: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + + env = grid2op.make(env_name) + + obs = env.reset() + an_action = env.action_space() + obs, reward_value, done, info = env.step(an_action) + +The value of the reward function above is computed by a default function that depends on +the environment you are using. For the example above, the "l2rpn_case14_sandbox" environment is +using the :class:`RedispReward`. + +Using a reward function available in grid2op +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you want to customize your environment by adapting the reward and use a reward available in grid2op +it is rather simple, you need to specify it in the `make` command: + + +.. code-block:: python + + import grid2op + from grid2op.Reward import EpisodeDurationReward + env_name = "l2rpn_case14_sandbox" + + env = grid2op.make(env_name, reward_class=EpisodeDurationReward) + + obs = env.reset() + an_action = env.action_space() + obs, reward_value, done, info = env.step(an_action) + +In this example the `reward_value` is computed using the formula defined in the :class:`EpisodeDurationReward`. + +.. note:: + There is no error in the syntax. You need to provide the class and not an object of the class + (see next paragraph for more information about that). + +At time of writing the available reward functions is : + +- :class:`AlarmReward` +- :class:`AlertReward` +- :class:`BridgeReward` +- :class:`CloseToOverflowReward` +- :class:`ConstantReward` +- :class:`DistanceReward` +- :class:`EconomicReward` +- :class:`EpisodeDurationReward` +- :class:`FlatReward` +- :class:`GameplayReward` +- :class:`IncreasingFlatReward` +- :class:`L2RPNReward` +- :class:`LinesCapacityReward` +- :class:`LinesReconnectedReward` +- :class:`N1Reward` +- :class:`RedispReward` + +In the provided reward you have also some convenience functions to combine different reward. These are: + +- :class:`CombinedReward` +- :class:`CombinedScaledReward` + +Basically these two classes allows you to combine (sum) different reward in a single one. + +Passing an instance instead of a class +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +On some occasion, it might be easier to work with instance of classes (object) +rather than to work with classes (especially if you want to customize the implementation used). +You can do this without any issue: + + +.. code-block:: python + + import grid2op + from grid2op.Reward import N1Reward + env_name = "l2rpn_case14_sandbox" + + n1_l1_reward = N1Reward(l_id=1) # this is an object and not a class. + env = grid2op.make(env_name, reward_class=n1_l1_reward) + + obs = env.reset() + an_action = env.action_space() + obs, reward_value, done, info = env.step(an_action) + +In this example `reward_value` is computed as being the maximum flow on all the powerlines after +the disconnection of powerline `1` (because we specified `l_id=1` at creation). If we +want to know the maximum flows after disconnection of powerline `5` you can call: + +.. code-block:: python + + import grid2op + from grid2op.Reward import N1Reward + env_name = "l2rpn_case14_sandbox" + + n1_l5_reward = N1Reward(l_id=5) # this is an object and not a class. + env = grid2op.make(env_name, reward_class=n1_l5_reward) + +Customizing the reward for the "simulate" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In grid2op, you have the possibility to `simulate` the impact of an action +on some future steps with the use of `obs.simulate(...)` (see :func:`grid2op.Observation.BaseObservation.simulate`) +or `obs.get_forecast_env()` (see :func:`grid2op.Observation.BaseObservation.get_forecast_env`). + +In these methods you have some computations of rewards. Grid2op lets you allow to customize how these rewards +are computed. You can change it in multiple fashion: + +.. code-block:: python + + import grid2op + from grid2op.Reward import EpisodeDurationReward + env_name = "l2rpn_case14_sandbox" + + env = grid2op.make(env_name, reward_class=EpisodeDurationReward) + obs = env.reset() + + an_action = env.action_space() + sim_obs, sim_reward, sim_d, sim_i = obs.simulate(an_action) + +By default `sim_reward` is comupted with the same function as the environment, in this +example :class:`EpisodeDurationReward`. + +If for some reason you want to customize the formula used to compute `sim_reward` and cannot (or +does not want to) modify the reward of the environment you can: + +.. code-block:: python + + import grid2op + from grid2op.Reward import EpisodeDurationReward + env_name = "l2rpn_case14_sandbox" + + env = grid2op.make(env_name) + obs = env.reset() + + env.observation_space.change_reward(EpisodeDurationReward) + an_action = env.action_space() + + sim_obs, sim_reward, sim_d, sim_i = obs.simulate(an_action) + next_obs, reward_value, done, info = env.step(an_action) + +In this example, `sim_reward` is computed using the `EpisodeDurationReward` (on forecast data) +and `reward_value` is computed using the default reward of "l2rpn_case14_sandbox" on the +"real" time serie data. + +Creating a new reward +~~~~~~~~~~~~~~~~~~~~~~ + +If you don't find any suitable reward function in grid2op (or in other package) you might +want to implement one yourself. + +To that end, you need to implement a class that derives from :class:`BaseReward`, like this: + +.. code-block:: python + + import grid2op + from grid2op.Reward import BaseReward + from grid2op.Action import BaseAction + from grid2op.Environment import BaseEnv + + + class MyCustomReward(BaseReward): + def __init__(self, whatever, you, want, logger=None): + self.whatever = blablabla + # some code needed + ... + super().__init__(logger) + + def __call__(self, + action: BaseAction, + env: BaseEnv, + has_error: bool, + is_done: bool, + is_illegal: bool, + is_ambiguous: bool) -> float: + # only method really required. + # called at each step to compute the reward. + # this is where you need to code the "formula" of your reward + ... + + def initialize(self, env: BaseEnv): + # optional + # called once, the first time the reward is used + pass + + def reset(self, env: BaseEnv): + # optional + # called by the environment each time it is "reset" + pass + + def close(self): + # optional called once when the environment is deleted + pass + + +And then you can use your (custom) reward like any other: + +.. code-block:: python + + import grid2op + from the_above_script import MyCustomReward + env_name = "l2rpn_case14_sandbox" + + custom_reward = MyCustomReward(whatever=1, you=2, want=42) + env = grid2op.make(env_name, reward_class=custom_reward) + obs = env.reset() + an_action = env.action_space() + obs, reward_value, done, info = env.step(an_action) + +And now `reward_value` is computed using the formula you defined in `__call__` + +Training with multiple rewards +------------------------------- +In the standard reinforcement learning framework the reward is unique. In grid2op, we didn't want to modify that. + +However powergrid are complex environment with some specific and unsual dynamics. For these reasons it can be +difficult to compress all these signal into one single scalar. To speed up the learning process, to force the +Agent to adopt more resilient strategies etc. it can be usefull to look at different aspect, thus using different +reward. Grid2op allows to do so. At each time step (and also when using the `simulate` function) it is possible +to compute different rewards. This rewards must inherit and be provided at the initialization of the Environment. + +This can be done as followed: + +.. code-block:: python + + import grid2op + from grid2op.Reward import GameplayReward, L2RPNReward + env = grid2op.make("case14_realistic", reward_class=L2RPNReward, other_rewards={"gameplay": GameplayReward}) + obs = env.reset() + act = env.action_space() # the do nothing action + obs, reward, done, info = env.step(act) # immplement the do nothing action on the environment + +On this example, "reward" comes from the :class:`L2RPNReward` and the results of the "reward" computed with the +:class:`GameplayReward` is accessible with the info["rewards"]["gameplay"]. We choose for this example to name the other +rewards, "gameplay" which is related to the name of the reward "GampeplayReward" for convenience. The name +can be absolutely any string you want. + + +**NB** In the case of L2RPN competitions, the reward can be modified by the competitors, and so is the "other_reward" +key word arguments. The only restriction is that the key "__score" will be use by the organizers to compute the +score the agent. Any attempt to modify it will be erased by the score function used by the organizers without any +warning. + +.. _reward-module-reset-focus: + +What happens in the "reset" +------------------------------ + +TODO + +Detailed Documentation by class +-------------------------------- +.. automodule:: grid2op.Reward + :members: + :special-members: + :autosummary: + +.. include:: final.rst diff --git a/docs/rules.rst b/docs/user/rules.rst similarity index 99% rename from docs/rules.rst rename to docs/user/rules.rst index 24e7c087e..40ef5ac40 100644 --- a/docs/rules.rst +++ b/docs/user/rules.rst @@ -1,5 +1,7 @@ .. currentmodule:: grid2op.Rules +.. _rule-module: + Rules of the Game =================================== diff --git a/docs/user/runner.rst b/docs/user/runner.rst new file mode 100644 index 000000000..2752971cc --- /dev/null +++ b/docs/user/runner.rst @@ -0,0 +1,320 @@ +.. _runner-module: + +Runner +=================================== + +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + +Objectives +----------- +The runner class aims at: + +i) facilitate the evaluation of the performance of :class:`grid2op.Agent` by performing automatically the + "open ai gym loop" (see below) +ii) define a format to store the results of the evaluation of such agent in a standardized manner +iii) this "agent logs" can then be re read by third party applications, such as + `grid2viz `_ or by internal class to ease the study of the behaviour of + such agent, for example with the classes :class:`grid2op.Episode.EpisodeData` or + :class:`grid2op.Episode.EpisodeReplay` +iv) allow easy use of parallelization of this assessment. + +Basically, the runner simplifies the assessment of the performance of some agent. This is the "usual" gym code to run +an agent: + +.. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent + env = grid2op.make("l2rpn_case14_sandbox") + agent = RandomAgent(env.action_space) + NB_EPISODE = 10 # assess the performance for 10 episodes, for example + for i in range(NB_EPISODE): + reward = env.reward_range[0] + done = False + obs = env.reset() + while not done: + act = agent.act(obs, reward, done) + obs, reward, done, info = env.step(act) + +The above code does not store anything, cannot be run easily in parallel and is already pretty verbose. +To have a shorter code, that saves most of +the data (and make it easier to integrate it with other applications) we can use the runner the following way: + +.. code-block:: python + + import grid2op + from grid2op.Runner import Runner + from grid2op.Agent import RandomAgent + env = grid2op.make("l2rpn_case14_sandbox") + NB_EPISODE = 10 # assess the performance for 10 episodes, for example + NB_CORE = 2 # do it on 2 cores, for example + PATH_SAVE = "agents_log" # and store the results in the "agents_log" folder + runner = Runner(**env.get_params_for_runner(), agentClass=RandomAgent) + runner.run(nb_episode=NB_EPISODE, nb_process=NB_CORE, path_save=PATH_SAVE) + +As we can see, with less lines of code, we could execute parallel assessment of our agent, on 10 episode +and save the results (observations, actions, rewards, etc.) into a dedicated folder. + +If your agent is inialiazed with a custom `__init__` method that takes more than the action space to be built, +you can also use the Runner pretty easily by passing it an instance of your agent, for example: + +.. code-block:: python + + import grid2op + from grid2op.Runner import Runner + env = grid2op.make("l2rpn_case14_sandbox") + NB_EPISODE = 10 # assess the performance for 10 episodes, for example + NB_CORE = 2 # do it on 2 cores, for example + PATH_SAVE = "agents_log" # and store the results in the "agents_log" folder + + # initilize your agent + my_agent = FancyAgentWithCustomInitialization(env.action_space, + env.observation_space, + "whatever else you want" + ) + + # and proceed as following for the runner + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=my_agent) + runner.run(nb_episode=NB_EPISODE, nb_process=NB_CORE, path_save=PATH_SAVE) + +Other tools are available for this runner class, for example the easy integration of progress bars. See bellow for +more information. + +.. _runner-multi-proc-warning: + +Note on parallel processing +---------------------------- +The "Runner" class allows for parallel execution of the same agent on different scenarios. In this case, each +scenario will be run in independent process. + +Depending on the platform and python version, you might end up with some bugs and error like + +.. pull-quote:: + AttributeError: Can't get attribute 'ActionSpace_l2rpn_case14_sandbox' on Process SpawnPoolWorker-4: + +or like: + +.. pull-quote:: + File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/pool.py", line 125, in worker + result = (True, func(\*args, \*\*kwds)) + + File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/multiprocessing/pool.py", line 51, in + starmapstar return list(itertools.starmap(args[0], args[1])) + +In this case this means grid2op has a hard time dealing with the multi processing part. In that case, it +is recommended to disable it completely, for example by using, before any call to "runner.run" the following code: + +.. code-block:: python + + import os + from grid2op.Runner import Runner + + os.environ[Runner.FORCE_SEQUENTIAL] = "1" + +This will force (starting grid2op >= 1.5) grid2op to use the sequential runner and not deal with the added +complexity of multi processing. + +This is especially handy for "windows" system in case of trouble. + +For information, as of writing (march 2021): + +- macOS with python <= 3.7 will behave like any python version on linux +- windows and macOS with python >=3.8 will behave differently than linux but similarly to one another + +Some common runner options: +------------------------------- + +Specify an agent instance and not a class +******************************************* + +By default, if you specify an agent class (*eg* `AgentCLS`), then the runner will initialize it with: + +.. code-block:: python + + agent = AgentCLS(env.action_space) + +But you might want to use agent initialized in a more complex way. To that end, you can customize the +agent instance you want to use (and not only its class) with the following code: + +.. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=nn_episode) + +Customize the scenarios +************************** + +You can customize the seeds, the scenarios ID you want, the number of initial steps to skip, the +maximum duration of an episode etc. For more information, please refer to the :func:`Runner.run` +for more information. But basically, you can do: + +.. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=nn_episode, + + # nb process to use + nb_process=1, + + # path where the outcome will be saved + path_save=None, + + # max number of steps in an environment + max_iter=None, + + # progress bar to use + pbar=False, + + # seeds to use for the environment + env_seeds=None, + + # seeds to use for the agent + agent_seeds=None, + + # id the time serie to use + episode_id=None, + + # whether to add the outcome (EpisodeData) as a result of this function + add_detailed_output=False, + + # whether to keep track of the number of call to "high resolution simulator" + # (eg obs.simulate or obs.get_forecasted_env) + add_nb_highres_sim=False, + + # which initial state you want the grid to be in + init_states=None, + + # options passed in `env.reset(..., options=XXX)` + reset_options=None, + ) + + +Retrieve what has happened +**************************** + +You can also easily retrieve the :class:`grid2op.Episode.EpisodeData` representing your runs with: + +.. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=2, + add_detailed_output=True) + for *_, ep_data in res: + # ep_data are the EpisodeData you can use to do whatever + ... + +Save the results +***************** + +You can save the results in a standardized format with: + +.. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=agent_instance) + res = runner.run(nb_episode=2, + save_path="A/PATH/SOMEWHERE") # eg "/home/user/you/grid2op_results/this_run" + +Multi processing +*********************** + +You can also easily (on some platform) easily make the evaluation faster by using the "multi processing" python +package with: + +.. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=agent_instance) + res = runner.run(nb_episode=2, + nb_process=2) + +Customize the multi processing +******************************** + +And, as of grid2op 1.10.3 you can know customize the multi processing context you want +to use to evaluate your agent, like this: + +.. code-block:: python + + import multiprocessing as mp + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + + ctx = mp.get_context('spawn') # or "fork" or "forkserver" + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=agent_instance, + mp_context=ctx) + res = runner.run(nb_episode=2, + nb_process=2) + +If you set this, the multiprocessing `Pool` used to evaluate your agents will be made with: + +.. code-block:: python + + with mp_context.Pool(nb_process) as p: + .... + +Otherwise the default "Pool" is used: + +.. code-block:: python + + with Pool(nb_process) as p: + .... + + +Detailed Documentation by class +------------------------------- +.. automodule:: grid2op.Runner + :members: + :private-members: + :special-members: + :autosummary: + +.. include:: final.rst \ No newline at end of file diff --git a/docs/simulator.rst b/docs/user/simulator.rst similarity index 100% rename from docs/simulator.rst rename to docs/user/simulator.rst diff --git a/docs/space.rst b/docs/user/space.rst similarity index 100% rename from docs/space.rst rename to docs/user/space.rst diff --git a/docs/user/special.rst b/docs/user/special.rst new file mode 100644 index 000000000..a4b7a9d97 --- /dev/null +++ b/docs/user/special.rst @@ -0,0 +1,69 @@ +.. for the color +.. include:: ../special.rst + +.. _n_gen: ./space.html#grid2op.Space.GridObjects.n_gen +.. _n_load: ./space.html#grid2op.Space.GridObjects.n_load +.. _n_line: ./space.html#grid2op.Space.GridObjects.n_line +.. _n_sub: ./space.html#grid2op.Space.GridObjects.n_sub +.. _n_storage: ./space.html#grid2op.Space.GridObjects.n_storage +.. _dim_topo: ./space.html#grid2op.Space.GridObjects.dim_topo +.. _dim_alarms: ./space.html#grid2op.Space.GridObjects.dim_alarms +.. _dim_alerts: ./space.html#grid2op.Space.GridObjects.dim_alerts +.. _year: ./observation.html#grid2op.Observation.BaseObservation.year +.. _month: ./observation.html#grid2op.Observation.BaseObservation.month +.. _day: ./observation.html#grid2op.Observation.BaseObservation.day +.. _hour_of_day: ./observation.html#grid2op.Observation.BaseObservation.hour_of_day +.. _minute_of_hour: ./observation.html#grid2op.Observation.BaseObservation.minute_of_hour +.. _day_of_week: ./observation.html#grid2op.Observation.BaseObservation.day_of_week +.. _gen_p: ./observation.html#grid2op.Observation.BaseObservation.gen_p +.. _gen_q: ./observation.html#grid2op.Observation.BaseObservation.gen_q +.. _gen_v: ./observation.html#grid2op.Observation.BaseObservation.gen_v +.. _load_p: ./observation.html#grid2op.Observation.BaseObservation.load_p +.. _load_q: ./observation.html#grid2op.Observation.BaseObservation.load_q +.. _load_v: ./observation.html#grid2op.Observation.BaseObservation.load_v +.. _p_or: ./observation.html#grid2op.Observation.BaseObservation.p_or +.. _q_or: ./observation.html#grid2op.Observation.BaseObservation.q_or +.. _v_or: ./observation.html#grid2op.Observation.BaseObservation.v_or +.. _a_or: ./observation.html#grid2op.Observation.BaseObservation.a_or +.. _p_ex: ./observation.html#grid2op.Observation.BaseObservation.p_ex +.. _q_ex: ./observation.html#grid2op.Observation.BaseObservation.q_ex +.. _v_ex: ./observation.html#grid2op.Observation.BaseObservation.v_ex +.. _a_ex: ./observation.html#grid2op.Observation.BaseObservation.a_ex +.. _rho: ./observation.html#grid2op.Observation.BaseObservation.rho +.. _topo_vect: ./observation.html#grid2op.Observation.BaseObservation.topo_vect +.. _line_status: ./observation.html#grid2op.Observation.BaseObservation.line_status +.. _timestep_overflow: ./observation.html#grid2op.Observation.BaseObservation.timestep_overflow +.. _time_before_cooldown_line: ./observation.html#grid2op.Observation.BaseObservation.time_before_cooldown_line +.. _time_before_cooldown_sub: ./observation.html#grid2op.Observation.BaseObservation.time_before_cooldown_sub +.. _time_next_maintenance: ./observation.html#grid2op.Observation.BaseObservation.time_next_maintenance +.. _duration_next_maintenance: ./observation.html#grid2op.Observation.BaseObservation.duration_next_maintenance +.. _target_dispatch: ./observation.html#grid2op.Observation.BaseObservation.target_dispatch +.. _actual_dispatch: ./observation.html#grid2op.Observation.BaseObservation.actual_dispatch +.. _storage_charge: ./observation.html#grid2op.Observation.BaseObservation.storage_charge +.. _storage_power_target: ./observation.html#grid2op.Observation.BaseObservation.storage_power_target +.. _storage_power: ./observation.html#grid2op.Observation.BaseObservation.storage_power +.. _storage_theta: ./observation.html#grid2op.Observation.BaseObservation.storage_theta +.. _gen_p_before_curtail: ./observation.html#grid2op.Observation.BaseObservation.gen_p_before_curtail +.. _curtailment: ./observation.html#grid2op.Observation.BaseObservation.curtailment +.. _curtailment_limit: ./observation.html#grid2op.Observation.BaseObservation.curtailment_limit +.. _is_alarm_illegal: ./observation.html#grid2op.Observation.BaseObservation.is_alarm_illegal +.. _time_since_last_alarm: ./observation.html#grid2op.Observation.BaseObservation.time_since_last_alarm +.. _last_alarm: ./observation.html#grid2op.Observation.BaseObservation.last_alarm +.. _attention_budget: ./observation.html#grid2op.Observation.BaseObservation.attention_budget +.. _max_step: ./observation.html#grid2op.Observation.BaseObservation.max_step +.. _current_step: ./observation.html#grid2op.Observation.BaseObservation.current_step +.. _delta_time: ./observation.html#grid2op.Observation.BaseObservation.delta_time +.. _gen_margin_up: ./observation.html#grid2op.Observation.BaseObservation.gen_margin_up +.. _gen_margin_down: ./observation.html#grid2op.Observation.BaseObservation.gen_margin_down +.. _curtailment_mw: ./observation.html#grid2op.Observation.BaseObservation.curtailment_mw +.. _theta_or: ./observation.html#grid2op.Observation.BaseObservation.theta_or +.. _theta_ex: ./observation.html#grid2op.Observation.BaseObservation.theta_ex +.. _gen_theta: ./observation.html#grid2op.Observation.BaseObservation.gen_theta +.. _load_theta: ./observation.html#grid2op.Observation.BaseObservation.load_theta +.. _active_alert: ./observation.html#grid2op.Observation.BaseObservation.active_alert +.. _time_since_last_alert: ./observation.html#grid2op.Observation.BaseObservation.time_since_last_alert +.. _alert_duration: ./observation.html#grid2op.Observation.BaseObservation.alert_duration +.. _total_number_of_alert: ./observation.html#grid2op.Observation.BaseObservation.total_number_of_alert +.. _time_since_last_attack: ./observation.html#grid2op.Observation.BaseObservation.time_since_last_attack +.. _was_alert_used_after_attack: ./observation.html#grid2op.Observation.BaseObservation.was_alert_used_after_attack +.. _attack_under_alert: ./observation.html#grid2op.Observation.BaseObservation.attack_under_alert diff --git a/docs/timeserie_handlers.rst b/docs/user/timeserie_handlers.rst similarity index 99% rename from docs/timeserie_handlers.rst rename to docs/user/timeserie_handlers.rst index e7a9b1fb5..bd76abddf 100644 --- a/docs/timeserie_handlers.rst +++ b/docs/user/timeserie_handlers.rst @@ -344,3 +344,5 @@ Detailed Documentation by class .. automodule:: grid2op.Chronics.handlers :members: :autosummary: + +.. include:: final.rst diff --git a/docs/utils.rst b/docs/user/utils.rst similarity index 99% rename from docs/utils.rst rename to docs/user/utils.rst index fde3a084a..d30de21a1 100644 --- a/docs/utils.rst +++ b/docs/user/utils.rst @@ -22,4 +22,3 @@ Detailed Documentation by class :autosummary: .. include:: final.rst - diff --git a/docs/voltagecontroler.rst b/docs/user/voltagecontroler.rst similarity index 96% rename from docs/voltagecontroler.rst rename to docs/user/voltagecontroler.rst index eb7b902f3..1c85a3552 100644 --- a/docs/voltagecontroler.rst +++ b/docs/user/voltagecontroler.rst @@ -1,5 +1,8 @@ .. currentmodule:: grid2op.VoltageControler +.. _voltage-controler-module: + + Voltage Controler =================================== @@ -38,4 +41,4 @@ Detailed Documentation by class :members: :autosummary: -.. include:: final.rst \ No newline at end of file +.. include:: final.rst diff --git a/examples/backend_integration/Step0_make_env.py b/examples/backend_integration/Step0_make_env.py index cc0d45b60..5d91fbdeb 100644 --- a/examples/backend_integration/Step0_make_env.py +++ b/examples/backend_integration/Step0_make_env.py @@ -41,6 +41,28 @@ from grid2op.Opponent import BaseOpponent +class PandaPowerBackendNoShunt(PandaPowerBackend): + shunts_data_available = False + + +def create_action(env, backend, action): + """this is done internally by grid2op. + + The idea is to generate a "backend action" (which again is provided by grid2op) + easily + """ + # bk_act = env._backend_action_class() + # bk_act += action # action for pandapower backend + # bk_act.reorder(env.backend._load_sr2tg, + # env.backend._gen_sr2tg, + # env.backend._topo_sr2tg, + # env.backend._storage_sr2tg, + # env.backend._shunt_sr2tg) + bk_act = type(backend).my_bk_act_class() + bk_act += action + return bk_act + + def make_env_for_backend(env_name, backend_class): # env_name: one of: # - rte_case5_example: the grid in the documentation (completely fake grid) @@ -65,8 +87,9 @@ def make_env_for_backend(env_name, backend_class): action_class=CompleteAction, # we tell grid2op we will manipulate all type of actions reward_class=ConstantReward, # we don't have yet redispatching data, that might be use by the reward opponent_class=BaseOpponent, # we deactivate the opponents + # backend=backend_class() backend=BackendConverter(source_backend_class=backend_class, - target_backend_class=PandaPowerBackend, + target_backend_class=PandaPowerBackendNoShunt, use_target_backend_name=True) ) obs = env.reset() diff --git a/examples/backend_integration/Step1_loading.py b/examples/backend_integration/Step1_loading.py index a456a2106..ac4612169 100644 --- a/examples/backend_integration/Step1_loading.py +++ b/examples/backend_integration/Step1_loading.py @@ -30,8 +30,11 @@ # to serve as an example import pandapower as pp +ERR_MSG_ELSEWHERE = "Will be detailed in another example script" + class CustomBackend_Step1(Backend): + shunts_data_available = False def load_grid(self, path : Union[os.PathLike, str], filename : Optional[Union[os.PathLike, str]]=None) -> None: @@ -97,25 +100,25 @@ def load_grid(self, self._compute_pos_big_topo() def apply_action(self, backendAction: Union["grid2op.Action._backendAction._BackendAction", None]) -> None: - raise NotImplementedError("Will be detailed in another example script") + raise NotImplementedError() def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: - raise NotImplementedError("Will be detailed in another example script") + raise NotImplementedError(ERR_MSG_ELSEWHERE) def get_topo_vect(self) -> np.ndarray: - raise NotImplementedError("Will be detailed in another example script") + raise NotImplementedError(ERR_MSG_ELSEWHERE) def generators_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray]: - raise NotImplementedError("Will be detailed in another example script") + raise NotImplementedError(ERR_MSG_ELSEWHERE) def loads_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray]: - raise NotImplementedError("Will be detailed in another example script") + raise NotImplementedError(ERR_MSG_ELSEWHERE) def lines_or_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: - raise NotImplementedError("Will be detailed in another example script") + raise NotImplementedError(ERR_MSG_ELSEWHERE) def lines_ex_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: - raise NotImplementedError("Will be detailed in another example script") + raise NotImplementedError(ERR_MSG_ELSEWHERE) if __name__ == "__main__": @@ -168,8 +171,8 @@ def lines_ex_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: # storage_pos_topo_vect # for example - print(type(backend).name_load) - print(type(backend).load_to_subid) - print(type(backend).load_to_sub_pos) - print(type(backend).load_pos_topo_vect) + print(f"Name of the loads, seen in grid2op: {type(backend).name_load}") + print(f"Id of substation, for each load: {type(backend).load_to_subid}") + print(f"Position in the substation topology vector, for each load: {type(backend).load_to_sub_pos}") + print(f"Position in the global topology vector, for each load: {type(backend).load_pos_topo_vect}") \ No newline at end of file diff --git a/examples/backend_integration/Step2_modify_load.py b/examples/backend_integration/Step2_modify_load.py index c55049458..4947af3a6 100644 --- a/examples/backend_integration/Step2_modify_load.py +++ b/examples/backend_integration/Step2_modify_load.py @@ -69,7 +69,7 @@ def loads_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray]: if __name__ == "__main__": import grid2op import os - from Step0_make_env import make_env_for_backend + from Step0_make_env import make_env_for_backend, create_action path_grid2op = grid2op.__file__ path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data") @@ -105,8 +105,7 @@ def loads_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray]: # have the proper size) # this is technical to grid2op (done internally) - bk_act = env._backend_action_class() - bk_act += action + bk_act = create_action(env, backend, action) ############# # this is what the backend receive: diff --git a/examples/backend_integration/Step3_modify_gen.py b/examples/backend_integration/Step3_modify_gen.py index 8ec174f34..b3d45eddc 100644 --- a/examples/backend_integration/Step3_modify_gen.py +++ b/examples/backend_integration/Step3_modify_gen.py @@ -67,7 +67,7 @@ def generators_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray]: if __name__ == "__main__": import grid2op import os - from Step0_make_env import make_env_for_backend + from Step0_make_env import make_env_for_backend, create_action path_grid2op = grid2op.__file__ path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data") @@ -103,8 +103,7 @@ def generators_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray]: # have the proper size) # this is technical to grid2op (done internally) - bk_act = env._backend_action_class() - bk_act += action + bk_act = create_action(env, backend, action) ############# # this is what the backend receive: diff --git a/examples/backend_integration/Step4_modify_line_status.py b/examples/backend_integration/Step4_modify_line_status.py index 1f3cac741..3fabdb5c6 100644 --- a/examples/backend_integration/Step4_modify_line_status.py +++ b/examples/backend_integration/Step4_modify_line_status.py @@ -178,7 +178,7 @@ def lines_ex_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: if __name__ == "__main__": import grid2op import os - from Step0_make_env import make_env_for_backend + from Step0_make_env import make_env_for_backend, create_action path_grid2op = grid2op.__file__ path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data") @@ -205,8 +205,7 @@ def lines_ex_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: action = env.action_space({"set_line_status": [(0, -1)]}) # this is technical to grid2op - bk_act = env._backend_action_class() - bk_act += action + bk_act = create_action(env, backend, action) ############# # this is what the backend receive: @@ -224,10 +223,10 @@ def lines_ex_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: print(f"{q_or = }") print(f"{v_or = }") print(f"{a_or = }") - assert p_or[0] == 0. - assert q_or[0] == 0. - assert v_or[0] == 0. - assert a_or[0] == 0. + assert np.abs(p_or[0]) <= 1e-7 + assert np.abs(q_or[0]) <= 1e-7 + assert np.abs(v_or[0]) <= 1e-7 + assert np.abs(a_or[0]) <= 1e-7 # this is how "user" manipute the grid # in this I reconnect powerline 0 @@ -280,7 +279,7 @@ def lines_ex_info(self)-> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: print(f"{q_or = }") print(f"{v_or = }") print(f"{a_or = }") - assert p_or[line_id] == 0. - assert q_or[line_id] == 0. - assert v_or[line_id] == 0. - assert a_or[line_id] == 0. + assert np.abs(p_or[line_id]) <= 1e-7 + assert np.abs(q_or[line_id]) <= 1e-7 + assert np.abs(v_or[line_id]) <= 1e-7 + assert np.abs(a_or[line_id]) <= 1e-7 diff --git a/examples/backend_integration/Step5_modify_topology.py b/examples/backend_integration/Step5_modify_topology.py index c582aae9d..4e84a58e7 100644 --- a/examples/backend_integration/Step5_modify_topology.py +++ b/examples/backend_integration/Step5_modify_topology.py @@ -58,7 +58,7 @@ def _aux_change_bus_or_disconnect(self, new_bus, dt, key, el_id): # are either 1 or 2) def apply_action(self, backendAction: Union["grid2op.Action._backendAction._BackendAction", None]) -> None: # the following few lines are highly recommended - if action is None: + if backendAction is None: return # loads and generators are modified in the previous script @@ -173,12 +173,12 @@ def get_topo_vect(self) -> np.ndarray: if __name__ == "__main__": import grid2op import os - from Step0_make_env import make_env_for_backend + from Step0_make_env import make_env_for_backend, create_action path_grid2op = grid2op.__file__ path_data_test = os.path.join(os.path.split(path_grid2op)[0], "data") - env_name = "l2rpn_wcci_2022_dev" + env_name = "rte_case5_example" # one of: # - rte_case5_example: the grid in the documentation (completely fake grid) # - l2rpn_case14_sandbox: inspired from IEEE 14 @@ -206,16 +206,16 @@ def get_topo_vect(self) -> np.ndarray: sub_id = 1 local_topo = (1, 2, 1, 2, 1, 2) elif env_name == "l2rpn_wcci_2022_dev": - sub_id = 3 - local_topo = (1, 2, 1, 2, 1) + raise RuntimeError("Storage units are not handled by the example backend, and there are some on the grid.") + # sub_id = 3 + # local_topo = (1, 2, 1, 2, 1) else: raise RuntimeError(f"Unknown grid2op environment name {env_name}") action = env.action_space({"set_bus": {"substations_id": [(sub_id, local_topo)]}}) ############################# # this is technical to grid2op - bk_act = env._backend_action_class() - bk_act += action + bk_act = create_action(env, backend, action) #################################### # this is what the backend receive: diff --git a/examples/backend_integration/Step6_integration.py b/examples/backend_integration/Step6_integration.py index 7518504b3..f17ff0cbf 100644 --- a/examples/backend_integration/Step6_integration.py +++ b/examples/backend_integration/Step6_integration.py @@ -12,7 +12,7 @@ interacts with it. """ - +from tqdm import tqdm from Step5_modify_topology import CustomBackend_Minimal @@ -60,11 +60,13 @@ ########### First "test" perform nothing and see what it gives done = False nb_step = 0 - while True: - obs, reward, done, info = env.step(env.action_space()) - if done: - break - nb_step += 1 + with tqdm() as pbar: + while True: + obs, reward, done, info = env.step(env.action_space()) + if done: + break + nb_step += 1 + pbar.update() print(f"{nb_step} steps have been made with your backend with do nothing") ########## Second "test" perform random actions every now and then @@ -72,18 +74,20 @@ obs = env.reset() done = False nb_step = 0 - while True: - if nb_step % 10 == 9: - # do a randome action sometime - act = env.action_space.sample() - else: - # do nothing most of the time - act = env.action_space() - obs, reward, done, info = env.step(act) - if done: - break - nb_step += 1 - print(f"{nb_step} steps have been made with your backend with random actions") + with tqdm() as pbar: + while True: + if nb_step % 10 == 9: + # do a randome action sometime + act = env.action_space.sample() + else: + # do nothing most of the time + act = env.action_space() + obs, reward, done, info = env.step(act) + if done: + break + nb_step += 1 + pbar.update() + print(f"{nb_step} steps have been made with your backend with some random actions") ########### Third "test" using an "agent" that "does smart actions" (greedy agent) done = False @@ -91,11 +95,13 @@ obs = env.reset() reward = 0. agent = RecoPowerlineAgent(env.action_space) - while True: - act = agent.act(obs, reward) - obs, reward, done, info = env.step(act) - if done: - break - nb_step += 1 + with tqdm() as pbar: + while True: + act = agent.act(obs, reward) + obs, reward, done, info = env.step(act) + if done: + break + nb_step += 1 + pbar.update() print(f"{nb_step} steps have been made with the greedy agent") \ No newline at end of file diff --git a/examples/backend_integration/Step7_optional_make_test.py b/examples/backend_integration/Step7_optional_make_test.py new file mode 100644 index 000000000..579b5b2bf --- /dev/null +++ b/examples/backend_integration/Step7_optional_make_test.py @@ -0,0 +1,87 @@ +# Copyright (c) 2019-2020, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +""" +This script provides a way to run the tests performed by grid2Op for the backend. + +These tests are not 100% complete (some things might not be tested and are tested somewhere else) +but they cover a big part of what the backend is expected to do. + +YOU NEED TO INSTALL GRID2OP FROM THE GITHUB REPO FOR THIS TO WORK ! +To do that, simply: + +1) clone grid2op repo +2) cd there +3) run `pip install -e .` + +(do this in a venv preferably) +""" + +import unittest +import warnings + +# first the backend class (for the example here) +from Step5_modify_topology import CustomBackend_Minimal + +# then some required things +from grid2op.tests.helper_path_test import PATH_DATA_TEST_PP, PATH_DATA_TEST +from grid2op.tests.helper_path_test import HelperTests +PATH_DATA_TEST_INIT = PATH_DATA_TEST +PATH_DATA_TEST = PATH_DATA_TEST_PP + +# then all the tests that can be automatically performed +from grid2op.tests.BaseBackendTest import BaseTestNames, BaseTestLoadingCase, BaseTestLoadingBackendFunc +from grid2op.tests.BaseBackendTest import BaseTestTopoAction, BaseTestEnvPerformsCorrectCascadingFailures +from grid2op.tests.BaseBackendTest import BaseTestChangeBusAffectRightBus, BaseTestShuntAction +from grid2op.tests.BaseBackendTest import BaseTestResetEqualsLoadGrid, BaseTestVoltageOWhenDisco, BaseTestChangeBusSlack +from grid2op.tests.BaseBackendTest import BaseIssuesTest, BaseStatusActions +from grid2op.tests.test_Environment import (TestLoadingBackendPandaPower as BaseTestLoadingBackendPandaPower, + TestResetOk as BaseTestResetOk) +from grid2op.tests.test_Environment import (TestResetAfterCascadingFailure as TestResetAfterCascadingFailure, + TestCascadingFailure as BaseTestCascadingFailure) +from grid2op.tests.BaseRedispTest import BaseTestRedispatch, BaseTestRedispatchChangeNothingEnvironment +from grid2op.tests.BaseRedispTest import BaseTestRedispTooLowHigh, BaseTestDispatchRampingIllegalETC +from grid2op.tests.BaseRedispTest import BaseTestLoadingAcceptAlmostZeroSumRedisp + +# then still some glue code, mainly for the names of the time series +from grid2op.Converter import BackendConverter +from grid2op.Backend import PandaPowerBackend + +# our backend does not read the names from the grid, so this test is not relevant +# class TestNames(HelperTests, BaseTestNames): +# def make_backend(self, detailed_infos_for_cascading_failures=False): +# with warnings.catch_warnings(): +# warnings.filterwarnings("ignore") +# bk = BackendConverter(source_backend_class=CustomBackend_Minimal, +# target_backend_class=PandaPowerBackend, +# use_target_backend_name=True, +# detailed_infos_for_cascading_failures=detailed_infos_for_cascading_failures) +# return bk + +# def get_path(self): +# return PATH_DATA_TEST_INIT + +class TestLoadingCase(HelperTests, BaseTestLoadingCase): + def make_backend(self, detailed_infos_for_cascading_failures=False): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + bk = BackendConverter(source_backend_class=CustomBackend_Minimal, + target_backend_class=PandaPowerBackend, + use_target_backend_name=True, + detailed_infos_for_cascading_failures=detailed_infos_for_cascading_failures) + return bk + + def get_path(self): + return PATH_DATA_TEST + + def get_casefile(self): + return "test_case14.json" + + +if __name__ == "__main__": + unittest.main() diff --git a/getting_started/00_Introduction.ipynb b/getting_started/00_Introduction.ipynb index bf16a50db..a58f23b7b 100644 --- a/getting_started/00_Introduction.ipynb +++ b/getting_started/00_Introduction.ipynb @@ -191,10 +191,25 @@ "\n", "- In reality there can also be \"switches\" that can connect the two busbars (reconfiguring the topology of the substation can be done with only one switch, but on the other hand, sometimes changing one switch will have no effect at all).\n", "\n", - "- You can also have more than 2 busbars in each substation (sometimes 5 or 6 for example). This makes the number of possible topologies even higher than what it is in grid2op.\n", + "- You can also have more than 2 busbars in each substation (sometimes 5 or 6 for example). This makes the number of possible topologies even higher than it currently is in grid2op (see below for some additional precisions).\n", "\n", "- Finally, most of the time a single busbar count a \"switch\" in its middle that allows to disconnect part of the element connected to it to another part. Basically this entails that some combinaison of elements are not possible to perform\n", "\n", + "*Additional precisions about the number of independant busbsars per susbtations*: Starting from grid2op 1.10.2 you can now have any number of busbars you want per susbtations. For example, you can create an environment with:\n", + "```python\n", + "env = grid2op.make(\"l2rpn_case14_sandbox\")\n", + "```\n", + "To have the default of 2 busbars per susbtations. But you can also do:\n", + "```python\n", + "env_3 = grid2op.make(\"l2rpn_case14_sandbox\", n_busbar=3)\n", + "```\n", + "Then you end-up with 3 busbars for all substations or you can even do:\n", + "```python\n", + "env_1 = grid2op.make(\"l2rpn_case14_sandbox\", n_busbar=1)\n", + "# or\n", + "env_10 = grid2op.make(\"l2rpn_case14_sandbox\", n_busbar=10)\n", + "```\n", + "\n", "And of course, we model explicitly in this framework (*eg* we allow the agents to act on) only some elements of a powergrid. In reality, much more heterogeneous objects exists with more complex properties. \n", "\n", "We decided to make all these assumptions because we thought it was the easiest setting that allow to perform some topological reconfiguration, beside connecting / disconnecting powerlines.\n", diff --git a/getting_started/05_StudyYourAgent.ipynb b/getting_started/05_StudyYourAgent.ipynb index 0e9d142a4..44868f421 100644 --- a/getting_started/05_StudyYourAgent.ipynb +++ b/getting_started/05_StudyYourAgent.ipynb @@ -94,6 +94,8 @@ "outputs": [], "source": [ "try:\n", + " # use a (way) faster backend to reduce computation time\n", + " # to use it, you need to install `pip install lightsim2grid`\n", " from lightsim2grid import LightSimBackend\n", " bk_cls = LightSimBackend\n", "except ImportError as exc:\n", @@ -252,13 +254,15 @@ "outputs": [], "source": [ "id_line_inspected = 13\n", - "actions_on_line_14 = 0\n", + "actions_on_line_13 = 0\n", "for act in this_episode.actions:\n", " dict_ = act.effect_on(line_id=id_line_inspected) # which effect has this action action on the substation with given id\n", " # other objects are: load_id, gen_id, line_id or substation_id\n", " if dict_['change_line_status'] or dict_[\"set_line_status\"] != 0:\n", - " actions_on_line_14 += 1\n", - "print(f'Total actions on powerline 14 : {actions_on_line_14}')" + " actions_on_line_13 += 1\n", + "print(f\"Total actions on powerline 13 (named \"\n", + " f\"{type(env).name_line[id_line_inspected]}): \"\n", + " f\"{actions_on_line_13}\")\n" ] }, { diff --git a/getting_started/11_IntegrationWithExistingRLFrameworks.ipynb b/getting_started/11_IntegrationWithExistingRLFrameworks.ipynb index a2c43f898..e4d2c6ecb 100644 --- a/getting_started/11_IntegrationWithExistingRLFrameworks.ipynb +++ b/getting_started/11_IntegrationWithExistingRLFrameworks.ipynb @@ -29,7 +29,6 @@ "\n", "Other RL frameworks are not cover here. If you already use them, let us know !\n", "- https://github.com/PaddlePaddle/PARL/blob/develop/README.md (used by the winner teams of Neurips competitions !) Work in progress.\n", - "- https://github.com/wau/keras-rl2\n", "- https://github.com/deepmind/acme\n", "\n", "Note also that there is still the possibility to use past codes in the l2rpn-baselines repository: https://github.com/rte-france/l2rpn-baselines . This repository contains code snippets that can be reuse to make really nice agents on the l2rpn competitions. You can try it out :-) \n", @@ -85,11 +84,13 @@ "- [Action space](#Action-space): basic usage of the action space, by removing redundant feature (`gym_env.observation_space.ignore_attr`) or transforming feature from a continuous space to a discrete space (`ContinuousToDiscreteConverter`)\n", "- [Observation space](#Observation-space): basic usage of the observation space, by removing redunddant features (`keep_only_attr`) or to scale the data on between a certain range (`ScalerAttrConverter`)\n", "- [Making the grid2op agent](#Making-the-grid2op-agent) explains how to make a grid2op agent once trained. Note that a more \"agent focused\" view is provided in the notebook [04_TrainingAnAgent](04_TrainingAnAgent.ipynb) !\n", - "- [1) RLLIB](#1\\)-RLLIB): more advance usage for customizing the observation space (`gym_env.observation_space.reencode_space` and `gym_env.observation_space.add_key`) or modifying the type of gym attribute (`MultiToTupleConverter`) as well as an example of how to use RLLIB framework\n", - "- [2)-Stable baselines](#2\\)-Stable-baselines): even more advanced usage for customizing the observation space by concatenating it to a single \"Box\" (instead of a dictionnary) thanks to `BoxGymObsSpace` and to use `BoxGymActSpace` if you are more focus on continuous actions and `MultiDiscreteActSpace` for discrete actions (**NB** in both case there will be loss of information as compared to regular grid2op actions! for example it will be harder to have a representation of the graph of the grid there)\n", - "- [3) Tf Agents](#3\\)-Tf-Agents) explains how to convert the action space into a \"Discrete\" gym space thanks to `DiscreteActSpace`\n", "\n", - "On each sections, we also explain concisely how to train the agent. Note that we did not spend any time on customizing the default agents and training scheme. It is then less than likely that these agents there" + "To dive deeper and with proper \"hands on\", you can refer to one of the following notebooks that uses real RL frameworks:\n", + "\n", + "1) RLLIB: see notebook [11_ray_integration](./11_ray_integration.ipynb) for more information about RLLIB\n", + "2) Stable baselines: see notebook [11_ray_integration](./11_stable_baselines3_integration.ipynb) for more information about stables-baselines3\n", + "3) tf agents: coming soon\n", + "4) acme: coming soon" ] }, { @@ -135,6 +136,37 @@ "\n", "More information are provided here: https://grid2op.readthedocs.io/en/latest/environment.html#splitting-into-raining-validation-test-scenarios\n", "\n", + "### Use the `experimental_read_from_local_dir` flag\n", + "\n", + "This flag allows python to better \"understands\" the classes in grid2op and avoid lots of issue with pickle / multi processing etc.\n", + "\n", + "The complete documentation is available here https://grid2op.readthedocs.io/en/latest/environment.html#grid2op.Environment.BaseEnv.generate_classes\n", + "\n", + "Basically, once, and only once, outside of this process, you can call:\n", + "\n", + "```python\n", + "import grid2op\n", + "env_name = \"l2rpn_case14_sandbox\" # or any other name\n", + "\n", + "env = grid2op.make(env_name, ...) # again: redo this step each time you customize \"...\"\n", + "# for example if you change the `action_class` or the `backend` etc.\n", + "\n", + "env.generate_classes()\n", + "```\n", + "\n", + "Then, each time you want to reload the same environment, you can do:\n", + "\n", + "```python\n", + "import grid2op\n", + "env_name = SAME NAME AS ABOVE\n", + "env = grid2op.make(env_name,\n", + " experimental_read_from_local_dir=True,\n", + " ..., # SAME ENV CUSTOMIZATION AS ABOVE\n", + " )\n", + "```\n", + "\n", + "This is known to solve bug related to multi processing, but also to reduce the amount of RAM taken (in some cases) as well as creation time (in some cases)\n", + "\n", "### Other steps\n", "\n", "The grid2op documentation is full of details to \"optimize\" the number of steps you can do per seconds. This number can rise from a few dozen per seconds to around a thousands per seconds with proper care.\n", @@ -142,7 +174,6 @@ "We strongly encouraged you to leverage all the possibilities which includes (but are not limited to):\n", "- using \"lightsim2grid\" as a backend for a 10-15x speed up in the \"env.step(...)\" function\n", "- using \"MultifolderWithCache\" or \"env.chronics_handler.set_chunk(...)\" for faster \"env.reset(...)\" see https://grid2op.readthedocs.io/en/latest/environment.html#optimize-the-data-pipeline\n", - "- using \"SingleEnvMultiProcess\" for parrallel computation\n", "\n", "\n", "### Create a grid2op environment\n", @@ -190,25 +221,13 @@ "metadata": {}, "outputs": [], "source": [ - "import gym\n", + "import gymnasium\n", "import numpy as np\n", "from grid2op.gym_compat import GymEnv\n", "env_gym_init = GymEnv(env_glop)\n", "env_gym = GymEnv(env_glop)\n", - "print(f\"The \\\"env_gym\\\" is a gym environment: {isinstance(env_gym, gym.Env)}\")\n", - "obs_gym = env_gym.reset()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - " In this notebook, we only present some basic (and really \"detailed\" use of the `GymEnv`). \n", - " \n", - " This is especially suited for advanced users wanting a deep control over everything happening. \n", - "\n", - " For a less advanced usage, feel free to consult the l2rpn baselines package, that embed some usefull environments, compatible with gym, that can embed some heuristics and other \"quality of life\" features. Feel free to use the l2rpn baselines package for more information.\n", - "" + "print(f\"The \\\"env_gym\\\" is a gym environment: {isinstance(env_gym, gymnasium.Env)}\")\n", + "obs_gym, info = env_gym.reset()" ] }, { @@ -388,7 +407,7 @@ "outputs": [], "source": [ "from grid2op.gym_compat import ScalerAttrConverter\n", - "from gym.spaces import Box\n", + "from gymnasium.spaces import Box\n", "ob_space = env_gym.observation_space\n", "ob_space = ob_space.reencode_space(\"actual_dispatch\",\n", " ScalerAttrConverter(substract=0.,\n", @@ -419,6 +438,133 @@ "env_gym.observation_space" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For the next notebooks, we use the following environment wrapper:\n", + "\n", + "```python\n", + "from gymnasium import Env\n", + "from gymnasium.spaces import Discrete, MultiDiscrete, Box\n", + "import json\n", + "\n", + "import ray\n", + "from ray.rllib.algorithms.ppo import PPOConfig\n", + "from ray.rllib.algorithms import ppo\n", + "\n", + "from typing import Dict, Literal, Any\n", + "import copy\n", + "\n", + "import grid2op\n", + "from grid2op.gym_compat import GymEnv, BoxGymObsSpace, DiscreteActSpace, BoxGymActSpace, MultiDiscreteActSpace\n", + "from lightsim2grid import LightSimBackend\n", + "\n", + "\n", + "class Grid2opEnvWrapper(Env):\n", + " def __init__(self,\n", + " env_config: Dict[Literal[\"backend_cls\",\n", + " \"backend_options\",\n", + " \"env_name\",\n", + " \"env_is_test\",\n", + " \"obs_attr_to_keep\",\n", + " \"act_type\",\n", + " \"act_attr_to_keep\"],\n", + " Any]= None):\n", + " super().__init__()\n", + " if env_config is None:\n", + " env_config = {}\n", + "\n", + " # handle the backend\n", + " backend_cls = LightSimBackend\n", + " if \"backend_cls\" in env_config:\n", + " backend_cls = env_config[\"backend_cls\"]\n", + " backend_options = {}\n", + " if \"backend_options\" in env_config:\n", + " backend_options = env_config[\"backend_options\"]\n", + " backend = backend_cls(**backend_options)\n", + "\n", + " # create the grid2op environment\n", + " env_name = \"l2rpn_case14_sandbox\"\n", + " if \"env_name\" in env_config:\n", + " env_name = env_config[\"env_name\"]\n", + " if \"env_is_test\" in env_config:\n", + " is_test = bool(env_config[\"env_is_test\"])\n", + " else:\n", + " is_test = False\n", + " self._g2op_env = grid2op.make(env_name, backend=backend, test=is_test)\n", + " # NB by default this might be really slow (when the environment is reset)\n", + " # see https://grid2op.readthedocs.io/en/latest/data_pipeline.html for maybe 10x speed ups !\n", + " # TODO customize reward or action_class for example !\n", + "\n", + " # create the gym env (from grid2op)\n", + " self._gym_env = GymEnv(self._g2op_env)\n", + "\n", + " # customize observation space\n", + " obs_attr_to_keep = [\"rho\", \"p_or\", \"gen_p\", \"load_p\"]\n", + " if \"obs_attr_to_keep\" in env_config:\n", + " obs_attr_to_keep = copy.deepcopy(env_config[\"obs_attr_to_keep\"])\n", + " self._gym_env.observation_space.close()\n", + " self._gym_env.observation_space = BoxGymObsSpace(self._g2op_env.observation_space,\n", + " attr_to_keep=obs_attr_to_keep\n", + " )\n", + " # export observation space for the Grid2opEnv\n", + " self.observation_space = Box(shape=self._gym_env.observation_space.shape,\n", + " low=self._gym_env.observation_space.low,\n", + " high=self._gym_env.observation_space.high)\n", + "\n", + " # customize the action space\n", + " act_type = \"discrete\"\n", + " if \"act_type\" in env_config:\n", + " act_type = env_config[\"act_type\"]\n", + "\n", + " self._gym_env.action_space.close()\n", + " if act_type == \"discrete\":\n", + " # user wants a discrete action space\n", + " act_attr_to_keep = [\"set_line_status_simple\", \"set_bus\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = DiscreteActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = Discrete(self._gym_env.action_space.n)\n", + " elif act_type == \"box\":\n", + " # user wants continuous action space\n", + " act_attr_to_keep = [\"redispatch\", \"set_storage\", \"curtail\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = BoxGymActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = Box(shape=self._gym_env.action_space.shape,\n", + " low=self._gym_env.action_space.low,\n", + " high=self._gym_env.action_space.high)\n", + " elif act_type == \"multi_discrete\":\n", + " # user wants a multi-discrete action space\n", + " act_attr_to_keep = [\"one_line_set\", \"one_sub_set\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = MultiDiscreteActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = MultiDiscrete(self._gym_env.action_space.nvec)\n", + " else:\n", + " raise NotImplementedError(f\"action type '{act_type}' is not currently supported.\")\n", + " \n", + " def reset(self, seed=None, options=None):\n", + " # use default _gym_env (from grid2op.gym_compat module)\n", + " # NB: here you can also specify \"default options\" when you reset, for example:\n", + " # - limiting the duration of the episode \"max step\"\n", + " # - starting at different steps \"init ts\"\n", + " # - study difficult scenario \"time serie id\"\n", + " # - specify an initial state of your grid \"init state\"\n", + " return self._gym_env.reset(seed=seed, options=options)\n", + " \n", + " def step(self, action):\n", + " # use default _gym_env (from grid2op.gym_compat module)\n", + " return self._gym_env.step(action)\n", + " \n", + "```\n", + "\n" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -467,260 +613,9 @@ "source": [ "## 1) RLLIB\n", "\n", - "This part is not a tutorial on how to use rllib. Please refer to [their documentation](https://docs.ray.io/en/master/rllib.html) for more detailed information.\n", - "\n", - "As explained in the header of this notebook, we will follow the recommended usage:\n", - "1. Create a grid2op environment (see section [0) Recommended initial steps](#0\\)-Recommended-initial-steps))\n", - "2. Convert it to a gym environment (see section [0) Recommended initial steps](#0\\)-Recommended-initial-steps))\n", - "3. (optional) Customize the action space and observation space (see section [0) Recommended initial steps](#0\\)-Recommended-initial-steps))\n", - "4. Use the framework to train an agent **(only this part is framework specific)**\n", - "\n", - "\n", - "The issue with rllib is that it does not take into account MultiBinary nor MultiDiscrete action space (see \n", - "see https://github.com/ray-project/ray/issues/1519) so we need some way to encode these types of actions. This can be done automatically with the `MultiToTupleConverter` provided in grid2op (as always, more information [in the documentation](https://grid2op.readthedocs.io/en/latest/gym.html#grid2op.gym_compat.MultiToTupleConverter) ).\n", + "To make it easier to get started, we moved this into the notebook [11_ray_integration](./11_ray_integration.ipynb)\n", "\n", - "We will then use this to customize our environment previously defined:\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import copy\n", - "env_rllib = copy.deepcopy(env_gym_init)\n", - "from grid2op.gym_compat import MultiToTupleConverter\n", - "env_rllib.action_space = env_rllib.action_space.reencode_space(\"change_bus\", MultiToTupleConverter())\n", - "env_rllib.action_space = env_rllib.action_space.reencode_space(\"change_line_status\", MultiToTupleConverter())\n", - "env_rllib.action_space = env_rllib.action_space.reencode_space(\"redispatch\",\n", - " ContinuousToDiscreteConverter(nb_bins=11)\n", - " )\n", - "env_rllib.action_space = env_rllib.action_space.reencode_space(\"redispatch\", MultiToTupleConverter())\n", - "env_rllib.action_space" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Another specificity of RLLIB is that it handles creation of environments \"on its own\". This implies that you need to create a custom class representing an environment, rather a python object.\n", - "\n", - "And finally, you ask it to use this class, and learn a specific agent. This is really well explained in their documentation: https://docs.ray.io/en/master/rllib-env.html#configuring-environments." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# gym specific, we simply do a copy paste of what we did in the previous cells, wrapping it in the\n", - "# MyEnv class, and train a Proximal Policy Optimisation based agent\n", - "import gym\n", - "import ray\n", - "import numpy as np\n", - " \n", - "class MyEnv(gym.Env):\n", - " def __init__(self, env_config):\n", - " import grid2op\n", - " from grid2op.gym_compat import GymEnv\n", - " from grid2op.gym_compat import ScalerAttrConverter, ContinuousToDiscreteConverter, MultiToTupleConverter\n", - "\n", - " # 1. create the grid2op environment\n", - " if not \"env_name\" in env_config:\n", - " raise RuntimeError(\"The configuration for RLLIB should provide the env name\")\n", - " nm_env = env_config[\"env_name\"]\n", - " del env_config[\"env_name\"]\n", - " self.env_glop = grid2op.make(nm_env, **env_config)\n", - "\n", - " # 2. create the gym environment\n", - " self.env_gym = GymEnv(self.env_glop)\n", - " obs_gym = self.env_gym.reset()\n", - "\n", - " # 3. (optional) customize it (see section above for more information)\n", - " ## customize action space\n", - " self.env_gym.action_space = self.env_gym.action_space.ignore_attr(\"set_bus\").ignore_attr(\"set_line_status\")\n", - " self.env_gym.action_space = self.env_gym.action_space.reencode_space(\"redispatch\",\n", - " ContinuousToDiscreteConverter(nb_bins=11)\n", - " )\n", - " self.env_gym.action_space = self.env_gym.action_space.reencode_space(\"change_bus\", MultiToTupleConverter())\n", - " self.env_gym.action_space = self.env_gym.action_space.reencode_space(\"change_line_status\",\n", - " MultiToTupleConverter())\n", - " self.env_gym.action_space = self.env_gym.action_space.reencode_space(\"redispatch\", MultiToTupleConverter())\n", - " ## customize observation space\n", - " ob_space = self.env_gym.observation_space\n", - " ob_space = ob_space.keep_only_attr([\"rho\", \"gen_p\", \"load_p\", \"topo_vect\", \"actual_dispatch\"])\n", - " ob_space = ob_space.reencode_space(\"actual_dispatch\",\n", - " ScalerAttrConverter(substract=0.,\n", - " divide=self.env_glop.gen_pmax\n", - " )\n", - " )\n", - " ob_space = ob_space.reencode_space(\"gen_p\",\n", - " ScalerAttrConverter(substract=0.,\n", - " divide=self.env_glop.gen_pmax\n", - " )\n", - " )\n", - " ob_space = ob_space.reencode_space(\"load_p\",\n", - " ScalerAttrConverter(substract=obs_gym[\"load_p\"],\n", - " divide=0.5 * obs_gym[\"load_p\"]\n", - " )\n", - " )\n", - " self.env_gym.observation_space = ob_space\n", - "\n", - " # 4. specific to rllib\n", - " self.action_space = self.env_gym.action_space\n", - " self.observation_space = self.env_gym.observation_space\n", - " \n", - " # 4. bis: to avoid other type of issues, we recommend to build the action space and observation\n", - " # space directly from the spaces class.\n", - " d = {k: v for k, v in self.env_gym.observation_space.spaces.items()}\n", - " self.observation_space = gym.spaces.Dict(d)\n", - " a = {k: v for k, v in self.env_gym.action_space.items()}\n", - " self.action_space = gym.spaces.Dict(a)\n", - "\n", - " def reset(self):\n", - " obs = self.env_gym.reset()\n", - " return obs\n", - "\n", - " def step(self, action):\n", - " obs, reward, done, info = self.env_gym.step(action)\n", - " return obs, reward, done, info" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test = MyEnv({\"env_name\": \"l2rpn_case14_sandbox\"})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And now you can train it :" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "if nb_step_train: # remember: don't forge to change this number to perform an actual training !\n", - " from ray.rllib.agents import ppo # import the type of agents\n", - " # nb_step_train = 100 # Do not forget to turn on the actual training !\n", - " # fist initialize ray\n", - " \n", - " try:\n", - " # then define a \"trainer\"\n", - " trainer = ppo.PPOTrainer(env=MyEnv, config={\n", - " \"env_config\": {\"env_name\":\"l2rpn_case14_sandbox\"}, # config to pass to env class\n", - " })\n", - " # and then train it for a given number of iteration\n", - " for step in range(nb_step_train):\n", - " trainer.train()\n", - " finally: \n", - " # shutdown ray\n", - " ray.shutdown()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Because we are approximating a physical system with real equations, and limited computational power\n", - "regardless of the \"backend\" / \"powergrid simulator\" used internally by grid2op, it is sometimes possible\n", - "that an observation obs[\"gen_p\"] is not exactly in the range \n", - "env.observation_space[\"gen_p\"].low, env.observation_space[\"gen_p\"].high.\n", - "\n", - "In this \"pathological\" cases we recommend to manually change the low / high value of the `gen_p` part of the observation space, for example by adding, after the definition of self.observation_space something like:\n", - "\n", - "```python\n", - " # 4. specific to rllib\n", - " self.action_space = self.env_gym.action_space\n", - " self.observation_space = self.env_gym.observation_space\n", - " self.observation_space[\"gen_p\"].low[:] = -np.inf\n", - " self.observation_space[\"gen_p\"].high[:] = np.inf\n", - "```\n", - "\n", - "More information at https://github.com/rte-france/Grid2Op/issues/196\n", - "\n", - "**NB** these cases can be spotted with an error like:\n", - "\n", - "```\n", - "RayTaskError(ValueError): ray::RolloutWorker.par_iter_next() (pid=378, ip=172.28.0.2)\n", - " File \"python/ray/_raylet.pyx\", line 480, in ray._raylet.execute_task\n", - " File \"python/ray/_raylet.pyx\", line 432, in ray._raylet.execute_task.function_executor\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/util/iter.py\", line 1152, in par_iter_next\n", - " return next(self.local_it)\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/evaluation/rollout_worker.py\", line 327, in gen_rollouts\n", - " yield self.sample()\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/evaluation/rollout_worker.py\", line 662, in sample\n", - " batches = [self.input_reader.next()]\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/evaluation/sampler.py\", line 95, in next\n", - " batches = [self.get_data()]\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/evaluation/sampler.py\", line 224, in get_data\n", - " item = next(self.rollout_provider)\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/evaluation/sampler.py\", line 620, in _env_runner\n", - " sample_collector=sample_collector,\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/evaluation/sampler.py\", line 1056, in _process_observations_w_trajectory_view_api\n", - " policy_id).transform(raw_obs)\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/models/preprocessors.py\", line 257, in transform\n", - " self.check_shape(observation)\n", - " File \"/usr/local/lib/python3.7/dist-packages/ray/rllib/models/preprocessors.py\", line 68, in check_shape\n", - " observation, self._obs_space)\n", - "ValueError: ('Observation ({}) outside given space ({})!', OrderedDict([('actual_dispatch', array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0.], dtype=float32)), ('gen_p', array([0. , 0.14583334, 0. , 0.5376 , 0. ,\n", - " 0.13690476, 0. , 0. , 0.13988096, 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0.10416667, 0. , 0.9975 ,\n", - " 0. , 0.0872582 ], dtype=float32)), ('load_p', array([-8.33333358e-02, 1.27543859e+01, -3.14843726e+00, -4.91228588e-02,\n", - " -7.84314200e-02, 2.70270016e-02, 4.51001197e-01, -7.63358772e-02,\n", - " -8.42104480e-02, -7.90961310e-02, -2.31212564e-02, -7.31706619e-02,\n", - " -5.47945984e-02, -5.57769537e-02, -4.65115122e-02, 0.00000000e+00,\n", - " -6.25000373e-02, -2.98508592e-02, 0.00000000e+00, 2.59741265e-02,\n", - " -5.12821227e-02, 2.12766770e-02, -4.38757129e-02, 1.45455096e-02,\n", - " -1.45278079e-02, -3.63636017e-02, 7.14286715e-02, 1.03358915e-02,\n", - " 8.95522386e-02, 4.81927246e-02, -1.76759213e-02, 1.11111533e-02,\n", - " 1.00000061e-01, -5.28445065e-01, 3.00833374e-01, 7.76839375e-01,\n", - " -7.07498193e-01], dtype=float32)), ('rho', array([0.49652272, 0.42036632, 0.12563582, 0.22375877, 0.54946697,\n", - " 0.08844228, 0.05907034, 0.10975129, 0.13002895, 0.14068729,\n", - " 0.17318982, 0.6956544 , 0.38796344, 0.67179894, 0.22992906,\n", - " 0.25189328, 0.15049867, 0.09095841, 0.35627988, 0.35627988,\n", - " 0.36776555, 0.27249542, 0.6269728 , 0.62393713, 0.3464659 ,\n", - " 0.35879263, 0.22755426, 0.35994047, 0.36117986, 0.12019955,\n", - " 0.03638522, 0.2805753 , 0.5809281 , 0.6191531 , 0.5243356 ,\n", - " 0.60382956, 0.35834518, 0.35867074, 0.3580954 , 0.6681824 ,\n", - " 0.3441911 , 0.6081861 , 0.34460714, 0.18246886, 0.10307808,\n", - " 0.46778303, 0.47179568, 0.45407027, 0.30089107, 0.30089107,\n", - " 0.34481782, 0.3182735 , 0.35940355, 0.21895139, 0.19766088,\n", - " 0.63653564, 0.46778303, 0.4566811 , 0.64398617], dtype=float32)), ('topo_vect', array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", - " 1], dtype=int32))]), Dict(actual_dispatch:Box(-1.0, 1.0, (22,), float32), gen_p:Box(0.0, 1.2000000476837158, (22,), float32), load_p:Box(-inf, inf, (37,), float32), rho:Box(0.0, inf, (59,), float32), topo_vect:Box(-1, 2, (177,), int32)))\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**NB** We want to emphasize here that:\n", - "- This encoding is far from being suitable here. It is shown as an example, mainly to demonstrate the use of some of the gym_compat module\n", - "- The actions in particular are not really suited here. Actions in grid2op are relatively complex and encoding them this way does not seem like a great idea. For example, with this encoding, the agent will have to learn that it cannot act on more than 2 lines or two substations at the same time...\n", - "- The \"PPO\" agent shown here, with some default parameters is unlikely to lead to a good agent. You might want to read litterature on past L2RPN agents or draw some inspiration from L2RPN baselines packages for more information.\n", - "\n", - " For a better \"usecase\" of the PPO agent using RLLIB we strongly encourage you to check out the \"PPO_RLLIB\" agent of l2rpn_baselines package. " + "Please have a look at this notebook for more information." ] }, { @@ -729,273 +624,15 @@ "source": [ "## 2) Stable baselines\n", "\n", - "This part is not a tutorial on how to use stable baselines. Please refer to [their documentation](https://stable-baselines3.readthedocs.io/en/master/) for more detailed information.\n", - "\n", - "As explained in the header of this notebook, we will follow the recommended usage:\n", - "1. Create a grid2op environment (see section [0) Recommended initial steps](#0\\)-Recommended-initial-steps))\n", - "2. Convert it to a gym environment (see section [0) Recommended initial steps](#0\\)-Recommended-initial-steps))\n", - "3. (optional) Customize the action space and observation space (see section [0) Recommended initial steps](#0\\)-Recommended-initial-steps))\n", - "4. Use the framework to train an agent **(only this part is framework specific)**\n", - "\n", - "\n", - "The issue with stable beselines 3 is that it expects standard action / observation types as explained there:\n", - "https://stable-baselines3.readthedocs.io/en/master/guide/algos.html#rl-algorithms\n", - "\n", - "> Non-array spaces such as Dict or Tuple are not currently supported by any algorithm.\n", - "\n", - "Unfortunately, it's not possible to convert without any \"loss of information\" an action space of dictionnary type to a vector.\n", - "\n", - "It is possible to use the grid2op framework in such cases, and in this section, we will explain how.\n", - "\n", - "\n", - "First, as always, we convert the grid2op environment in a gym environment." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env_sb = GymEnv(env_glop) # sb for \"stable baselines\"\n", - "glop_obs = env_glop.reset()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then, we need to convert everything into a \"Box\" as it is the only things that stable baselines seems to digest at time of writing (March 20201).\n", - "\n", - "### Observation Space\n", + "To make it easier to get started, we moved this into the notebook [11_stable_baselines3_integration](./11_stable_baselines3_integration.ipynb)\n", "\n", - "We explain here how we convert an observation as a single Box. This step is rather easy, you just need to specify which attributes of the observation you want to keep and if you want so scale them (with the keword `subtract` and `divide`)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from grid2op.gym_compat import BoxGymObsSpace\n", - "env_sb.observation_space = BoxGymObsSpace(env_sb.init_env.observation_space,\n", - " attr_to_keep=[\"gen_p\", \"load_p\", \"topo_vect\",\n", - " \"rho\", \"actual_dispatch\", \"connectivity_matrix\"],\n", - " divide={\"gen_p\": env_glop.gen_pmax,\n", - " \"load_p\": glop_obs.load_p,\n", - " \"actual_dispatch\": env_glop.gen_pmax},\n", - " functs={\"connectivity_matrix\": (\n", - " lambda grid2obs: grid2obs.connectivity_matrix().flatten(),\n", - " 0., 1., None, None,\n", - " )\n", - " }\n", - " )\n", - "obs_gym = env_sb.reset()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "obs_gym in env_sb.observation_space" + "Please have a look at this notebook for more information." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "**NB**: the above code is equivalent to something like:\n", - "\n", - "```python\n", - "from gym.spaces import Box\n", - "class BoxGymObsSpaceExample(Box):\n", - " def __init__(self, observation_space)\n", - " shape = observation_space.n_gen + \\ # dimension of gen_p\n", - " observation_space.n_load + \\ # load_p\n", - " observation_space.dim_topo + \\ # topo_vect\n", - " observation_space.n_line + \\ # rho\n", - " observation_space.n_gen + \\ # actual_dispatch\n", - " observation_space.dim_topo ** 2 # connectivity_matrix\n", - " \n", - " ob_sp = observation_space\n", - " # lowest value the attribute can take (see doc for more information)\n", - " low = np.concatenate((np.full(shape=(ob_sp.n_gen,), fill_value=0., dtype=dt_float), # gen_p\n", - " np.full(shape=(ob_sp.n_load,), fill_value=-np.inf, dtype=dt_float), # load_p\n", - " np.full(shape=(ob_sp.dim_topo,), fill_value=-1., dtype=dt_float), # topo_vect\n", - " np.full(shape=(ob_sp.n_line,), fill_value=0., dtype=dt_float), # rho\n", - " np.full(shape=(ob_sp.n_line,), fill_value=-ob_sp.gen_pmax, dtype=dt_float), # actual_dispatch\n", - " np.full(shape=(ob_sp.dim_topo**2,), fill_value=0., dtype=dt_float), # connectivity_matrix\n", - " ))\n", - " \n", - " # highest value the attribute can take\n", - " high = np.concatenate((np.full(shape=(ob_sp.n_gen,), fill_value=np.inf, dtype=dt_float), # gen_p\n", - " np.full(shape=(ob_sp.n_load,), fill_value=np.inf, dtype=dt_float), # load_p\n", - " np.full(shape=(ob_sp.dim_topo,), fill_value=2., dtype=dt_float), # topo_vect\n", - " np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float), # rho\n", - " np.full(shape=(ob_sp.n_line,), fill_value=ob_sp.gen_pmax, dtype=dt_float), # actual_dispatch\n", - " np.full(shape=(ob_sp.dim_topo**2,), fill_value=1., dtype=dt_float), # connectivity_matrix\n", - " ))\n", - " Box.__init__(self, low=low, high=high, shape=shape)\n", - " \n", - " def to_gym(self, observation):\n", - " res = np.concatenate((obs.gen_p / obs.gen_pmax,\n", - " obs.prod_p / glop_obs.load_p,\n", - " obs.topo_vect.astype(float),\n", - " obs.rho,\n", - " obs.actual_dispatch / env_glop.gen_pmax,\n", - " obs.connectivity_matrix().flatten()\n", - " ))\n", - " return res\n", - "```\n", - "\n", - "So if you want more customization, but making less generic code (the `BoxGymObsSpace` works for all the attribute of the observation) you can customize it by adapting the snippet above or read the documentation here (TODO).\n", - "\n", - "Only the \"to_gym\" function, and this exact signature is important in this case. It should take an observation in a grid2op format and return this same observation compatible with the gym Box (so a numpy array with the right shape and in the right range)\n", - " \n", - "\n", - "### Action space\n", - "\n", - "Converting the grid2op actions in something that is not a Tuple, nor a Dict. The main restriction in these frameworks is that they do not allow for easy integration of environment where both discrete actions and continuous actions are possible.\n", - "\n", - "\n", - "#### Using a BoxGymActSpace\n", - "\n", - "We can use the same kind of method explained above with the use of the class `BoxGymActSpace`. In this case, you need to provide a way to convert a numpy array (an element of a gym Box) into a grid2op action.\n", - "\n", - "**NB** This method is particularly suited if you want to focus on CONTINUOUS part of the action space, for example redispatching, curtailment or action on storage unit.\n", - "\n", - "Though we made it possible to also use discrete action, we do not recommend to use it. Prefer using the `MultiDiscreteActSpace` for such purpose." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from grid2op.gym_compat import BoxGymActSpace\n", - "scale_gen = env_sb.init_env.gen_max_ramp_up + env_sb.init_env.gen_max_ramp_down\n", - "scale_gen[~env_sb.init_env.gen_redispatchable] = 1.0\n", - "env_sb.action_space = BoxGymActSpace(env_sb.init_env.action_space,\n", - " attr_to_keep=[\"redispatch\"],\n", - " multiply={\"redispatch\": scale_gen},\n", - " )\n", - "obs_gym = env_sb.reset()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**NB**: the above code is equivalent to something like:\n", - "\n", - "```python\n", - "from gym.spaces import Box\n", - "class BoxGymActSpace(Box):\n", - " def __init__(self, action_space)\n", - " shape = observation_space.n_gen # redispatch\n", - " \n", - " ob_sp = observation_space\n", - " # lowest value the attribute can take (see doc for more information)\n", - " low = np.full(shape=(ob_sp.n_gen,), fill_value=-1., dtype=dt_float)\n", - " \n", - " # highest value the attribute can take\n", - " high = np.full(shape=(ob_sp.n_gen,), fill_value=1., dtype=dt_float)\n", - " \n", - " Box.__init__(self, low=low, high=high, shape=shape)\n", - " \n", - " self.action_space = action_space\n", - " \n", - " def from_gym(self, gym_observation):\n", - " res = self.action_space()\n", - " res.redispatch = gym_observation * scale_gen\n", - " return res\n", - "```\n", - "\n", - "So if you want more customization, but making less generic code (the `BoxGymActSpace` works for all the attribute of the action) you can customize it by adapting the snippet above or read the documentation here (TODO). The only important method you need to code is the \"from_gym\" one that should take into account an action as sampled by the gym Box and return a grid2op action.\n", - "\n", - "\n", - "#### Using a MultiDiscreteActSpace\n", - "\n", - "We can use the same kind of method explained above with the use of the class `BoxGymActSpace`, but which is more suited to the discrete type of actions.\n", - "\n", - "In this case, you need to provide a way to convert a numpy array of integer (an element of a gym MultiDiscrete) into a grid2op action.\n", - "\n", - "**NB** This method is particularly suited if you want to focus on DISCRETE part of the action space, for example set_bus or change_line_status." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from grid2op.gym_compat import MultiDiscreteActSpace\n", - "reencoded_act_space = MultiDiscreteActSpace(env_sb.init_env.action_space,\n", - " attr_to_keep=[\"set_line_status\", \"set_bus\", \"redispatch\"])\n", - "env_sb.action_space = reencoded_act_space\n", - "obs_gym = env_sb.reset()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Wrapping all up and starting the training\n", - "\n", - "First, let's make sure our environment is compatible with stable baselines, thanks to their helper function.\n", - "\n", - "This means that " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from stable_baselines3.common.env_checker import check_env\n", - "check_env(env_sb)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "So as we see, the environment seems to be compatible with stable baselines. Now we can start the training." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from stable_baselines3 import PPO\n", - "model = PPO(\"MlpPolicy\", env_sb, verbose=1)\n", - "if nb_step_train:\n", - " model.learn(total_timesteps=nb_step_train)\n", - " # model.save(\"ppo_stable_baselines3\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Again, the goal of this section was not to demonstrate how to train a state of the art algorithm, but rather to demonstrate how to use grid2op with the stable baselines repository.\n", - "\n", - "Most importantly, the neural networks there are not customized for the environment, default parameters are used. This is unlikely to work at all !\n", - "\n", - "For more information and to use tips and tricks to get started with RL agents, the devs of \"stable baselines\" have done a really nice job. You can have some tips for training RL agents here\n", - "https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html\n", - "and consult any of the resources listed there https://stable-baselines3.readthedocs.io/en/master/guide/rl.html\n", - "\n", - "\n", - " For a better \"usecase\" of the PPO agent using stable-baselines3 we strongly encourage you to check out the \"PPO_SB3\" agent of l2rpn_baselines package. \n", - "\n", "## 3) Tf Agents\n", "Lastly, the RL frameworks we will use is tf agents.\n", "\n", @@ -1041,7 +678,7 @@ " )\n", " }\n", " )\n", - "obs_gym = env_tfa.reset()" + "obs_gym, info = env_tfa.reset()" ] }, { diff --git a/getting_started/11_ray_integration.ipynb b/getting_started/11_ray_integration.ipynb new file mode 100644 index 000000000..7410bdace --- /dev/null +++ b/getting_started/11_ray_integration.ipynb @@ -0,0 +1,688 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Grid2Op integration with ray / rllib framework\n", + "\n", + "Try me out interactively with: [![Binder](./img/badge_logo.svg)](https://mybinder.org/v2/gh/rte-france/Grid2Op/master)\n", + "\n", + "\n", + "**objectives** This notebooks briefly explains how to use grid2op with ray (rllib) RL framework. Make sure to read the previous notebook 11_IntegrationWithExistingRLFrameworks.ipynb for a deeper dive into what happens. We only show the working solution here.\n", + "\n", + " This explains the ideas and shows a \"self contained\" somewhat minimal example of use of ray / rllib framework with grid2op. It is not meant to be fully generic, code might need to be adjusted. \n", + "\n", + "This notebook is more an \"example of what works\" rather than a deep dive tutorial.\n", + "\n", + "See https://docs.ray.io/en/latest/rllib/rllib-env.html#configuring-environments for a more detailed information.\n", + "\n", + "See also https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html for other details\n", + "\n", + "This notebook is tested with grid2op 1.10.2 and ray 2.24.0 (python3.10) on an ubuntu 20.04 machine.\n", + "\n", + " We found that ray is highly \"unstable\". Documentation is not really on par with their developments rythm. Basically, this notebook works given the exact python version and ray version. If you change it then you might need to modify the calls to ray. \n", + "\n", + "It is organised as followed:\n", + "\n", + "- [0 Some tips to get started](#0-some-tips-to-get-started) : is a reminder on what you can do to make things work. Indeed, this notebook explains \"how to use grid2op with stable baselines\" but not \"how to create a working agent able to operate a real powergrid in real time with stable baselines\". We wish we could explain the later...\n", + "- [1 Create the \"Grid2opEnvWrapper\" class](#1-create-the-grid2openvwraper-class) : explain how to create the main grid2op env class that you can use a \"gymnasium\" environment. \n", + "- [2 Create an environment, and train a first policy](#2-create-an-environment-and-train-a-first-policy): show how to create an environment from the class above (is pretty easy)\n", + "- [3 Evaluate the trained agent ](#3-evaluate-the-trained-agent): show how to evaluate the trained \"agent\"\n", + "- [4 Some customizations](#4-some-customizations): explain how to perform some customization of your agent / environment / policy\n", + "## 0 Some tips to get started\n", + "\n", + " It is unlikely that \"simply\" using a RL algorithm on a grid2op environment will lead to good results for the vast majority of environments.\n", + "\n", + "To make RL algorithms work with more or less sucess you might want to:\n", + "\n", + " 1) ajust the observation space: in particular selecting the right information for your agent. Too much information\n", + " and the size of the observation space will blow up and your agent will not learn anything. Not enough\n", + " information and your agent will not be able to capture anything.\n", + " \n", + " 2) customize the action space: dealing with both discrete and continuous values is often a challenge. So maybe you want to focus on only one type of action. And in all cases, try to still reduce the amount of actions your\n", + " agent \n", + " can perform. Indeed, for \"larger\" grids (118 substations, as a reference the french grid counts more than 6.000\n", + " such substations...) and by limiting 2 busbars per substation (as a reference, for some subsations, you have more\n", + " than 12 such \"busbars\") your agent will have the opportunity to choose between more than 60.000 different discrete\n", + " actions each steps. This is way too large for current RL algorithm as far as we know (and proposed environment are\n", + " small in comparison to real one)\n", + " \n", + " 3) customize the reward: the default reward might not work great for you. Ultimately, what TSO's or ISO's want is\n", + " to operate the grid safely, as long as possible with a cost as low as possible. This is of course really hard to\n", + " catch everything in one single reward signal. Customizing the reward is also really important because the \"do\n", + " nothing\" policy often leads to really good results (much better than random actions) which makes exploration \n", + " different actions...). So you kind of want to incentivize your agent to perform some actions at some point.\n", + " \n", + " 4) use fast simulator: even if you target an industrial application with industry grade simulators, we still would\n", + " advise you to use (at early stage of training at least) fast simulator for the vast majority of the training\n", + " process and then maybe to fine tune on better one.\n", + " \n", + " 5) combine RL with some heuristics: it's super easy to implement things like \"if there is no issue, then do\n", + " nothing\". This can be quite time consuming to learn though. Don't hesitate to check out the \"l2rpn-baselines\"\n", + " repository for already \"kind of working\" heuristics\n", + " \n", + "And finally don't hesitate to check solution proposed by winners of past l2rpn competitions in l2rpn-baselines.\n", + "\n", + "You can also ask question on our discord or on our github." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## 1 Create the \"Grid2opEnvWrapper\" class\n", + "\n", + "In the next cell, we define a custom environment (that will internally use the `GymEnv` grid2op class). It is not strictly needed\n", + "\n", + "Indeed, in order to work with ray / rllib you need to define a custom wrapper on top of the GymEnv wrapper. You then have:\n", + "\n", + "- self._g2op_env which is the default grid2op environment, receiving grid2op Action and producing grid2op Observation.\n", + "- self._gym_env which is a the grid2op defined `gymnasium Environment` that cannot be directly used with ray / rllib\n", + "- `Grid2opEnvWrapper` which is a the wrapper on top of `self._gym_env` to make it usable with ray / rllib.\n", + "\n", + "Ray / rllib expects the gymnasium environment to inherit from `gymnasium.Env` and to be initialized with a given configuration. This is why you need to create the `Grid2opEnvWrapper` wrapper on top of `GymEnv`.\n", + "\n", + "In the initialization of `Grid2opEnvWrapper`, the `env_config` variable is a dictionary that can take as key-word arguments:\n", + "\n", + "- `backend_cls` : what is the class of the backend. If not provided, it will use `LightSimBackend` from the `lightsim2grid` package\n", + "- `backend_options`: what options will be used to create the backend for your environment. Your backend will be created by calling\n", + " `backend_cls(**backend_options)`, for example if you want to build `LightSimBackend(detailed_info_for_cascading_failure=False)` you can pass `{\"backend_cls\": LightSimBackend, \"backend_options\": {\"detailed_info_for_cascading_failure\": False}}`\n", + "- `env_name` : name of the grid2op environment you want to use, by default it uses `\"l2rpn_case14_sandbox\"`\n", + "- `env_is_test` : whether to add `test=True` when creating the grid2op environment (if `env_is_test` is True it will add `test=True` when calling `grid2op.make(..., test=True)`) otherwise it uses `test=False`\n", + "- `obs_attr_to_keep` : in this wrapper we only allow your agent to see a Box as an observation. This parameter allows you to control which attributes of the grid2op observation will be present in the agent observation space. By default it's `[\"rho\", \"p_or\", \"gen_p\", \"load_p\"]` which is \"kind of random\" and is probably not suited for every agent.\n", + "- `act_type` : controls the type of actions your agent will be able to perform. Already coded in this notebook are:\n", + " - `\"discrete\"` to use a `Discrete` action space\n", + " - `\"box\"` to use a `Box` action space\n", + " - `\"multi_discrete\"` to use a `MultiDiscrete` action space\n", + "- `act_attr_to_keep` : that allows you to customize the action space. If not provided, it defaults to:\n", + " - `[\"set_line_status_simple\", \"set_bus\"]` if `act_type` is `\"discrete\"` \n", + " - `[\"redispatch\", \"set_storage\", \"curtail\"]` if `act_type` is `\"box\"` \n", + " - `[\"one_line_set\", \"one_sub_set\"]` if `act_type` is `\"multi_discrete\"`\n", + "\n", + "If you want to add more customization, for example the reward function, the parameters of the environment etc. etc. feel free to get inspired by this code and extend it. Any PR on this regard is more than welcome." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from gymnasium import Env\n", + "from gymnasium.spaces import Discrete, MultiDiscrete, Box\n", + "import json\n", + "\n", + "import ray\n", + "from ray.rllib.algorithms.ppo import PPOConfig\n", + "from ray.rllib.algorithms import ppo\n", + "\n", + "from typing import Dict, Literal, Any\n", + "import copy\n", + "\n", + "import grid2op\n", + "from grid2op.gym_compat import GymEnv, BoxGymObsSpace, DiscreteActSpace, BoxGymActSpace, MultiDiscreteActSpace\n", + "from lightsim2grid import LightSimBackend\n", + "\n", + "\n", + "class Grid2opEnvWrapper(Env):\n", + " def __init__(self,\n", + " env_config: Dict[Literal[\"backend_cls\",\n", + " \"backend_options\",\n", + " \"env_name\",\n", + " \"env_is_test\",\n", + " \"obs_attr_to_keep\",\n", + " \"act_type\",\n", + " \"act_attr_to_keep\"],\n", + " Any]= None):\n", + " super().__init__()\n", + " if env_config is None:\n", + " env_config = {}\n", + "\n", + " # handle the backend\n", + " backend_cls = LightSimBackend\n", + " if \"backend_cls\" in env_config:\n", + " backend_cls = env_config[\"backend_cls\"]\n", + " backend_options = {}\n", + " if \"backend_options\" in env_config:\n", + " backend_options = env_config[\"backend_options\"]\n", + " backend = backend_cls(**backend_options)\n", + "\n", + " # create the grid2op environment\n", + " env_name = \"l2rpn_case14_sandbox\"\n", + " if \"env_name\" in env_config:\n", + " env_name = env_config[\"env_name\"]\n", + " if \"env_is_test\" in env_config:\n", + " is_test = bool(env_config[\"env_is_test\"])\n", + " else:\n", + " is_test = False\n", + " self._g2op_env = grid2op.make(env_name, backend=backend, test=is_test)\n", + " # NB by default this might be really slow (when the environment is reset)\n", + " # see https://grid2op.readthedocs.io/en/latest/data_pipeline.html for maybe 10x speed ups !\n", + " # TODO customize reward or action_class for example !\n", + "\n", + " # create the gym env (from grid2op)\n", + " self._gym_env = GymEnv(self._g2op_env)\n", + "\n", + " # customize observation space\n", + " obs_attr_to_keep = [\"rho\", \"p_or\", \"gen_p\", \"load_p\"]\n", + " if \"obs_attr_to_keep\" in env_config:\n", + " obs_attr_to_keep = copy.deepcopy(env_config[\"obs_attr_to_keep\"])\n", + " self._gym_env.observation_space.close()\n", + " self._gym_env.observation_space = BoxGymObsSpace(self._g2op_env.observation_space,\n", + " attr_to_keep=obs_attr_to_keep\n", + " )\n", + " # export observation space for the Grid2opEnv\n", + " self.observation_space = Box(shape=self._gym_env.observation_space.shape,\n", + " low=self._gym_env.observation_space.low,\n", + " high=self._gym_env.observation_space.high)\n", + "\n", + " # customize the action space\n", + " act_type = \"discrete\"\n", + " if \"act_type\" in env_config:\n", + " act_type = env_config[\"act_type\"]\n", + "\n", + " self._gym_env.action_space.close()\n", + " if act_type == \"discrete\":\n", + " # user wants a discrete action space\n", + " act_attr_to_keep = [\"set_line_status_simple\", \"set_bus\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = DiscreteActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = Discrete(self._gym_env.action_space.n)\n", + " elif act_type == \"box\":\n", + " # user wants continuous action space\n", + " act_attr_to_keep = [\"redispatch\", \"set_storage\", \"curtail\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = BoxGymActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = Box(shape=self._gym_env.action_space.shape,\n", + " low=self._gym_env.action_space.low,\n", + " high=self._gym_env.action_space.high)\n", + " elif act_type == \"multi_discrete\":\n", + " # user wants a multi-discrete action space\n", + " act_attr_to_keep = [\"one_line_set\", \"one_sub_set\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = MultiDiscreteActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = MultiDiscrete(self._gym_env.action_space.nvec)\n", + " else:\n", + " raise NotImplementedError(f\"action type '{act_type}' is not currently supported.\")\n", + " \n", + " def reset(self, seed=None, options=None):\n", + " # use default _gym_env (from grid2op.gym_compat module)\n", + " # NB: here you can also specify \"default options\" when you reset, for example:\n", + " # - limiting the duration of the episode \"max step\"\n", + " # - starting at different steps \"init ts\"\n", + " # - study difficult scenario \"time serie id\"\n", + " # - specify an initial state of your grid \"init state\"\n", + " return self._gym_env.reset(seed=seed, options=options)\n", + " \n", + " def step(self, action):\n", + " # use default _gym_env (from grid2op.gym_compat module)\n", + " return self._gym_env.step(action)\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2 Create an environment, and train a first policy" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we init ray, because we need to." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ray.init()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# example of the documentation, directly\n", + "# see https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html\n", + "\n", + "# Construct a generic config object, specifying values within different\n", + "# sub-categories, e.g. \"training\".\n", + "env_config = {}\n", + "config = (PPOConfig().training(gamma=0.9, lr=0.01)\n", + " .environment(env=Grid2opEnvWrapper, env_config=env_config)\n", + " .resources(num_gpus=0)\n", + " .env_runners(num_env_runners=0)\n", + " .framework(\"tf2\")\n", + " )\n", + "\n", + "# A config object can be used to construct the respective Algorithm.\n", + "rllib_algo = config.build()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we train it for one training iteration (might call `env.reset()` and `env.step()` multiple times, see ray's documentation for a better understanding of what happens here and don't hesitate to open an issue or a PR to explain it and we'll add it here, thanks)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "print(rllib_algo.train())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3 Evaluate the trained agent\n", + "\n", + "This notebook is a simple quick introduction for stable baselines only. So we don't really recall everything that has been said previously.\n", + "\n", + "Please consult the section `0) Recommended initial steps` of the notebook [11_IntegrationWithExistingRLFrameworks](./11_IntegrationWithExistingRLFrameworks.ipynb) for more information.\n", + "\n", + "**TLD;DR** grid2op offers the possibility to test your agent on scenarios / episodes different from the one it has been trained. We greatly encourage you to use this functionality.\n", + "\n", + "There are two main ways to evaluate your agent:\n", + "\n", + "- you stay in the \"gymnasium\" world (see [here](#31-staying-in-the-gymnasium-ecosystem) ) and you evaluate your policy directly just like you would any other gymnasium compatible environment. Simple, easy but without support for some grid2op features\n", + "- you \"get back\" to the \"grid2op\" world (detailed [here](#32-using-the-grid2op-ecosystem)) by \"converting\" your NN policy into something that is able to output grid2op like action. This introduces yet again a \"wrapper\" but you can benefit from all grid2op features, such as the `Runner` to save an inspect what your policy has done.\n", + "\n", + " We show here just a simple examples to \"get easily started\". For much better working agents, you can have a look at l2rpn-baselines code. There you have classes that maps the environment, the agents etc. to grid2op directly (you don't have to copy paste any wrapper). \n", + "\n", + "\n", + "\n", + "### 3.1 staying in the gymnasium ecosystem\n", + "\n", + "You can do pretty much what you want, but you have to do it yourself, or use any of the \"Wrappers\" available in gymnasium https://gymnasium.farama.org/main/api/wrappers/ (*eg* https://gymnasium.farama.org/main/api/wrappers/misc_wrappers/#gymnasium.wrappers.RecordEpisodeStatistics) or in your RL framework.\n", + "\n", + "For the sake of simplicity, we show how to do things \"manually\" even though we do not recommend to do it like that." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "nb_episode_test = 2\n", + "seeds_test_env = (0, 1) # same size as nb_episode_test\n", + "seeds_test_agent = (3, 4) # same size as nb_episode_test\n", + "ts_ep_test = (0, 1) # same size as nb_episode_test\n", + "gym_env = Grid2opEnvWrapper(env_config)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ep_infos = {} # information that will be saved\n", + "\n", + "\n", + "for ep_test_num in range(nb_episode_test):\n", + " init_obs, init_infos = gym_env.reset(seed=seeds_test_env[ep_test_num],\n", + " options={\"time serie id\": ts_ep_test[ep_test_num]})\n", + " # TODO seed the agent, I did not found in ray doc how to do it\n", + " done = False\n", + " cum_reward = 0\n", + " step_survived = 0\n", + " obs = init_obs\n", + " while not done:\n", + " act = rllib_algo.compute_single_action(obs, explore=False)\n", + " obs, reward, terminated, truncated, info = gym_env.step(act)\n", + " step_survived += 1\n", + " cum_reward += float(reward)\n", + " done = terminated or truncated\n", + " ep_infos[ep_test_num] = {\"time serie id\": ts_ep_test[ep_test_num],\n", + " \"time serie folder\": gym_env._gym_env.init_env.chronics_handler.get_id(),\n", + " \"env seed\": seeds_test_env[ep_test_num],\n", + " \"agent seed\": seeds_test_agent[ep_test_num],\n", + " \"steps survived\": step_survived,\n", + " \"total steps\": int(gym_env._gym_env.init_env.max_episode_duration()),\n", + " \"cum reward\": cum_reward}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# \"prettyprint\" the dictionnary above\n", + "\n", + "print(json.dumps(ep_infos, indent=4))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As you might have seen, it's not easy this way to retrieve some useful information about the grid2op environment if these informations are not passed to the policy.\n", + "\n", + "For example, we need to call `gym_env._gym_env.init_env` to access the underlying grid2op environment... You have to convert some things from int32 or float32 to float or int otherwise json complains, you have to control yourself the seeds to have reproducible results etc.\n", + "\n", + "It's a quick way to have something working but it might be perfected." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3.2 using the grid2op ecosystem\n", + "\n", + "This second method brings it closer to grid2op ecosystem, you will be able to use it with the grid2op `Runner`, save the results and read it back with other tools such as grid2viz and do the evaluation in parrallel without too much trouble (and with high reproducibility).\n", + "\n", + "With this method, you build a grid2op agent and this agent can then be used like every other grid2op agent. For example you can compare it with heuristic agents, agent based on optimization etc.\n", + "\n", + "This way of doing things also allows you to customize when the neural network policy is used. For example, you might chose to use it only when the grid is \"unsafe\" (and if the grid is safe you use an \"expert\" rules).\n", + "\n", + "This is more flexible than the previous one." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from grid2op.Agent import BaseAgent\n", + "from grid2op.Runner import Runner\n", + "\n", + "class Grid2opAgentWrapper(BaseAgent):\n", + " def __init__(self,\n", + " gym_env: Grid2opEnvWrapper,\n", + " trained_agent):\n", + " self.gym_env = gym_env\n", + " BaseAgent.__init__(self, gym_env._gym_env.init_env.action_space)\n", + " self.trained_agent = trained_agent\n", + " \n", + " def act(self, obs, reward, done):\n", + " # you can customize it here to call the NN policy `trained_agent`\n", + " # only in some cases, depending on the observation for example\n", + " gym_obs = self.gym_env._gym_env.observation_space.to_gym(obs)\n", + " gym_act = self.trained_agent.compute_single_action(gym_obs, explore=False)\n", + " grid2op_act = self.gym_env._gym_env.action_space.from_gym(gym_act)\n", + " return grid2op_act\n", + " \n", + " def seed(self, seed):\n", + " # implement the seed function\n", + " # TODO\n", + " return" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "my_agent = Grid2opAgentWrapper(gym_env, rllib_algo)\n", + "runner = Runner(**gym_env._g2op_env.get_params_for_runner(),\n", + " agentClass=None,\n", + " agentInstance=my_agent)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res = runner.run(nb_episode=nb_episode_test,\n", + " env_seeds=seeds_test_env,\n", + " agent_seeds=seeds_test_agent,\n", + " episode_id=ts_ep_test,\n", + " add_detailed_output=True\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "res" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4 some customizations\n", + "\n", + "### 4.1 Train a PPO agent using 2 \"runners\" to make the rollouts\n", + "\n", + "In this second example, we explain briefly how to train the model using 2 \"processes\". This is, the agent will interact with 2 agents at the same time during the \"rollout\" phases.\n", + "\n", + "But everything related to the training of the agent is still done on the main process (and in this case not using a GPU but only a CPU)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# see https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html\n", + "\n", + "# use multiple runners\n", + "config2 = (PPOConfig().training(gamma=0.9, lr=0.01)\n", + " .environment(env=Grid2opEnvWrapper, env_config={})\n", + " .resources(num_gpus=0)\n", + " .env_runners(num_env_runners=2, num_envs_per_env_runner=1, num_cpus_per_env_runner=1)\n", + " .framework(\"tf2\")\n", + " )\n", + "\n", + "# A config object can be used to construct the respective Algorithm.\n", + "rllib_algo2 = config2.build()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we train it for one training iteration (might call `env.reset()` and `env.step()` multiple times)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(rllib_algo2.train())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.2 Use non default parameters to make the grid2op environment\n", + "\n", + "In this third example, we will train a policy using the \"box\" action space, and on another environment (`l2rpn_idf_2023` instead of `l2rpn_case14_sandbox`)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# see https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html\n", + "\n", + "# Use a \"Box\" action space (mainly to use redispatching, curtailment and storage units)\n", + "env_config3 = {\"env_name\": \"l2rpn_idf_2023\",\n", + " \"env_is_test\": True,\n", + " \"act_type\": \"box\",\n", + " }\n", + "config3 = (PPOConfig().training(gamma=0.9, lr=0.01)\n", + " .environment(env=Grid2opEnvWrapper, env_config=env_config3)\n", + " .resources(num_gpus=0)\n", + " .env_runners(num_env_runners=2, num_envs_per_env_runner=1, num_cpus_per_env_runner=1)\n", + " .framework(\"tf2\")\n", + " )\n", + "\n", + "# A config object can be used to construct the respective Algorithm.\n", + "rllib_algo3 = config3.build()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we train it for one training iteration (might call `env.reset()` and `env.step()` multiple times)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(rllib_algo3.train())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And now a policy using the \"multi discrete\" action space: " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# see https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html\n", + "\n", + "# Use a \"Box\" action space (mainly to use redispatching, curtailment and storage units)\n", + "env_config4 = {\"env_name\": \"l2rpn_idf_2023\",\n", + " \"env_is_test\": True,\n", + " \"act_type\": \"multi_discrete\",\n", + " }\n", + "config4 = (PPOConfig().training(gamma=0.9, lr=0.01)\n", + " .environment(env=Grid2opEnvWrapper, env_config=env_config4)\n", + " .resources(num_gpus=0)\n", + " .env_runners(num_env_runners=2, num_envs_per_env_runner=1, num_cpus_per_env_runner=1)\n", + " .framework(\"tf2\")\n", + " )\n", + "\n", + "# A config object can be used to construct the respective Algorithm.\n", + "rllib_algo4 = config4.build()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we train it for one training iteration (might call `env.reset()` and `env.step()` multiple times)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(rllib_algo4.train())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.3 Customize the policy (number of layers, size of layers etc.)\n", + "\n", + "This notebook does not aim at covering all possibilities offered by ray / rllib. For that you need to refer to the ray / rllib documentation.\n", + "\n", + "We will simply show how to change the size of the neural network used as a policy." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# see https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html\n", + "\n", + "# Use a \"Box\" action space (mainly to use redispatching, curtailment and storage units)\n", + "config5 = (PPOConfig().training(gamma=0.9, lr=0.01)\n", + " .environment(env=Grid2opEnvWrapper, env_config={})\n", + " .resources(num_gpus=0)\n", + " .env_runners(num_env_runners=2, num_envs_per_env_runner=1, num_cpus_per_env_runner=1)\n", + " .framework(\"tf2\")\n", + " .rl_module(\n", + " model_config_dict={\"fcnet_hiddens\": [32, 32, 32]}, # 3 layers (fully connected) of 32 units each\n", + " )\n", + " )\n", + "\n", + "# A config object can be used to construct the respective Algorithm.\n", + "rllib_algo5 = config5.build()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we train it for one training iteration (might call `env.reset()` and `env.step()` multiple times)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(rllib_algo5.train())" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/getting_started/11_stable_baselines3_integration.ipynb b/getting_started/11_stable_baselines3_integration.ipynb new file mode 100644 index 000000000..68576bc80 --- /dev/null +++ b/getting_started/11_stable_baselines3_integration.ipynb @@ -0,0 +1,638 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "90b9341f", + "metadata": {}, + "source": [ + "# Grid2Op integration with stable baselines3 framework\n", + "\n", + "Try me out interactively with: [![Binder](./img/badge_logo.svg)](https://mybinder.org/v2/gh/rte-france/Grid2Op/master)\n", + "\n", + "\n", + "**objectives** This notebooks briefly explains how to use grid2op with stable baselines 3 RL framework. Make sure to read the previous notebook [11_IntegrationWithExistingRLFrameworks](./11_IntegrationWithExistingRLFrameworks.ipynb) for a deeper dive into what happens. We only show the working solution here.\n", + "\n", + " This explains the ideas and shows a \"self contained\" somewhat minimal example of use of stable baselines 3 framework with grid2op. It is not meant to be fully generic, code might need to be adjusted. \n", + "\n", + "This notebook is more an \"example of what works\" rather than a deep dive tutorial.\n", + "\n", + "See stable-baselines3.readthedocs.io/ for a more detailed information.\n", + "\n", + "This notebook is tested with grid2op 1.10 and stable baselines 2.3.2 on an ubuntu 20.04 machine.\n", + "\n", + "It is organised as followed:\n", + "\n", + "- [0 Some tips to get started](#0-some-tips-to-get-started) : is a reminder on what you can do to make things work. Indeed, this notebook explains \"how to use grid2op with stable baselines\" but not \"how to create a working agent able to operate a real powergrid in real time with stable baselines\". We wish we could explain the later...\n", + "- [1 Create the \"Grid2opEnvWrapper\" class](#1-create-the-grid2openvwraper-class) : explain how to create the main grid2op env class that you can use a \"gymnasium\" environment. \n", + "- [2 Create an environment, and train a first policy](#2-create-an-environment-and-train-a-first-policy): show how to create an environment from the class above (is pretty easy)\n", + "- [3 Evaluate the trained agent ](#3-evaluate-the-trained-agent): show how to evaluate the trained \"agent\"\n", + "- [4 Some customizations](#4-some-customizations): explain how to perform some customization of your agent / environment / policy\n", + "\n", + "## 0 Some tips to get started\n", + "\n", + " It is unlikely that \"simply\" using a RL algorithm on a grid2op environment will lead to good results for the vast majority of environments.\n", + "\n", + "To make RL algorithms work with more or less sucess you might want to:\n", + "\n", + " 1) ajust the observation space: in particular selecting the right information for your agent. Too much information\n", + " and the size of the observation space will blow up and your agent will not learn anything. Not enough\n", + " information and your agent will not be able to capture anything.\n", + " \n", + " 2) customize the action space: dealing with both discrete and continuous values is often a challenge. So maybe you want to focus on only one type of action. And in all cases, try to still reduce the amount of actions your\n", + " agent \n", + " can perform. Indeed, for \"larger\" grids (118 substations, as a reference the french grid counts more than 6.000\n", + " such substations...) and by limiting 2 busbars per substation (as a reference, for some subsations, you have more\n", + " than 12 such \"busbars\") your agent will have the opportunity to choose between more than 60.000 different discrete\n", + " actions each steps. This is way too large for current RL algorithm as far as we know (and proposed environment are\n", + " small in comparison to real one)\n", + " \n", + " 3) customize the reward: the default reward might not work great for you. Ultimately, what TSO's or ISO's want is\n", + " to operate the grid safely, as long as possible with a cost as low as possible. This is of course really hard to\n", + " catch everything in one single reward signal. Customizing the reward is also really important because the \"do\n", + " nothing\" policy often leads to really good results (much better than random actions) which makes exploration \n", + " different actions...). So you kind of want to incentivize your agent to perform some actions at some point.\n", + " \n", + " 4) use fast simulator: even if you target an industrial application with industry grade simulators, we still would\n", + " advise you to use (at early stage of training at least) fast simulator for the vast majority of the training\n", + " process and then maybe to fine tune on better one.\n", + " \n", + " 5) combine RL with some heuristics: it's super easy to implement things like \"if there is no issue, then do\n", + " nothing\". This can be quite time consuming to learn though. Don't hesitate to check out the \"l2rpn-baselines\"\n", + " repository for already \"kind of working\" heuristics\n", + " \n", + "And finally don't hesitate to check solution proposed by winners of past l2rpn competitions in l2rpn-baselines.\n", + "\n", + "You can also ask question on our discord or on our github.\n", + "\n", + "\n", + "## 1 Create the \"Grid2opEnvWrapper\" class\n", + "\n", + "### 1.1 Easy but not easily customizable" + ] + }, + { + "cell_type": "markdown", + "id": "ae59e1f5", + "metadata": {}, + "source": [ + "### 1.2 Similar to ray / rllib with same type of configuration\n", + "\n", + "In the next cell, we define a custom environment (that will internally use the `GymEnv` grid2op class) that is needed for ray / rllib.\n", + "\n", + "Indeed, in order to work with ray / rllib you need to define a custom wrapper on top of the GymEnv wrapper. You then have:\n", + "\n", + "- self._g2op_env which is the default grid2op environment, receiving grid2op Action and producing grid2op Observation.\n", + "- self._gym_env which is a the grid2op defined `gymnasium Environment` that cannot be directly used with ray / rllib\n", + "- `Grid2opEnv` which is a the wrapper on top of `self._gym_env` to make it usable with ray / rllib.\n", + "\n", + "Ray / rllib expects the gymnasium environment to inherit from `gymnasium.Env` and to be initialized with a given configuration. This is why you need to create the `Grid2opEnv` wrapper on top of `GymEnv`.\n", + "\n", + "In the initialization of `Grid2opEnv`, the `env_config` variable is a dictionary that can take as key-word arguments:\n", + "\n", + "- `backend_cls` : what is the class of the backend. If not provided, it will use `LightSimBackend` from the `lightsim2grid` package\n", + "- `backend_options`: what options will be used to create the backend for your environment. Your backend will be created by calling\n", + " `backend_cls(**backend_options)`, for example if you want to build `LightSimBackend(detailed_info_for_cascading_failure=False)` you can pass `{\"backend_cls\": LightSimBackend, \"backend_options\": {\"detailed_info_for_cascading_failure\": False}}`\n", + "- `env_name` : name of the grid2op environment you want to use, by default it uses `\"l2rpn_case14_sandbox\"`\n", + "- `env_is_test` : whether to add `test=True` when creating the grid2op environment (if `env_is_test` is True it will add `test=True` when calling `grid2op.make(..., test=True)`) otherwise it uses `test=False`\n", + "- `obs_attr_to_keep` : in this wrapper we only allow your agent to see a Box as an observation. This parameter allows you to control which attributes of the grid2op observation will be present in the agent observation space. By default it's `[\"rho\", \"p_or\", \"gen_p\", \"load_p\"]` which is \"kind of random\" and is probably not suited for every agent.\n", + "- `act_type` : controls the type of actions your agent will be able to perform. Already coded in this notebook are:\n", + " - `\"discrete\"` to use a `Discrete` action space\n", + " - `\"box\"` to use a `Box` action space\n", + " - `\"multi_discrete\"` to use a `MultiDiscrete` action space\n", + "- `act_attr_to_keep` : that allows you to customize the action space. If not provided, it defaults to:\n", + " - `[\"set_line_status_simple\", \"set_bus\"]` if `act_type` is `\"discrete\"` \n", + " - `[\"redispatch\", \"set_storage\", \"curtail\"]` if `act_type` is `\"box\"` \n", + " - `[\"one_line_set\", \"one_sub_set\"]` if `act_type` is `\"multi_discrete\"`\n", + "\n", + "If you want to add more customization, for example the reward function, the parameters of the environment etc. etc. feel free to get inspired by this code and extend it. Any PR on this regard is more than welcome." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "55e043a9", + "metadata": {}, + "outputs": [], + "source": [ + "import copy\n", + "from typing import Dict, Literal, Any\n", + "import json\n", + "\n", + "from gymnasium import Env\n", + "from gymnasium.spaces import Discrete, MultiDiscrete, Box\n", + "\n", + "import grid2op\n", + "from grid2op.gym_compat import GymEnv, BoxGymObsSpace, DiscreteActSpace, BoxGymActSpace, MultiDiscreteActSpace\n", + "from lightsim2grid import LightSimBackend\n", + "\n", + "\n", + "class Grid2opEnvWrapper(Env):\n", + " def __init__(self,\n", + " env_config: Dict[Literal[\"backend_cls\",\n", + " \"backend_options\",\n", + " \"env_name\",\n", + " \"env_is_test\",\n", + " \"obs_attr_to_keep\",\n", + " \"act_type\",\n", + " \"act_attr_to_keep\"],\n", + " Any] = None):\n", + " super().__init__()\n", + " if env_config is None:\n", + " env_config = {}\n", + "\n", + " # handle the backend\n", + " backend_cls = LightSimBackend\n", + " if \"backend_cls\" in env_config:\n", + " backend_cls = env_config[\"backend_cls\"]\n", + " backend_options = {}\n", + " if \"backend_options\" in env_config:\n", + " backend_options = env_config[\"backend_options\"]\n", + " backend = backend_cls(**backend_options)\n", + "\n", + " # create the grid2op environment\n", + " env_name = \"l2rpn_case14_sandbox\"\n", + " if \"env_name\" in env_config:\n", + " env_name = env_config[\"env_name\"]\n", + " if \"env_is_test\" in env_config:\n", + " is_test = bool(env_config[\"env_is_test\"])\n", + " else:\n", + " is_test = False\n", + " self._g2op_env = grid2op.make(env_name, backend=backend, test=is_test)\n", + " # NB by default this might be really slow (when the environment is reset)\n", + " # see https://grid2op.readthedocs.io/en/latest/data_pipeline.html for maybe 10x speed ups !\n", + " # TODO customize reward or action_class for example !\n", + "\n", + " # create the gym env (from grid2op)\n", + " self._gym_env = GymEnv(self._g2op_env)\n", + "\n", + " # customize observation space\n", + " obs_attr_to_keep = [\"rho\", \"p_or\", \"gen_p\", \"load_p\"]\n", + " if \"obs_attr_to_keep\" in env_config:\n", + " obs_attr_to_keep = copy.deepcopy(env_config[\"obs_attr_to_keep\"])\n", + " self._gym_env.observation_space.close()\n", + " self._gym_env.observation_space = BoxGymObsSpace(self._g2op_env.observation_space,\n", + " attr_to_keep=obs_attr_to_keep\n", + " )\n", + " # export observation space for the Grid2opEnv\n", + " self.observation_space = Box(shape=self._gym_env.observation_space.shape,\n", + " low=self._gym_env.observation_space.low,\n", + " high=self._gym_env.observation_space.high)\n", + "\n", + " # customize the action space\n", + " act_type = \"discrete\"\n", + " if \"act_type\" in env_config:\n", + " act_type = env_config[\"act_type\"]\n", + "\n", + " self._gym_env.action_space.close()\n", + " if act_type == \"discrete\":\n", + " # user wants a discrete action space\n", + " act_attr_to_keep = [\"set_line_status_simple\", \"set_bus\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = DiscreteActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = Discrete(self._gym_env.action_space.n)\n", + " elif act_type == \"box\":\n", + " # user wants continuous action space\n", + " act_attr_to_keep = [\"redispatch\", \"set_storage\", \"curtail\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = BoxGymActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = Box(shape=self._gym_env.action_space.shape,\n", + " low=self._gym_env.action_space.low,\n", + " high=self._gym_env.action_space.high)\n", + " elif act_type == \"multi_discrete\":\n", + " # user wants a multi-discrete action space\n", + " act_attr_to_keep = [\"one_line_set\", \"one_sub_set\"]\n", + " if \"act_attr_to_keep\" in env_config:\n", + " act_attr_to_keep = copy.deepcopy(env_config[\"act_attr_to_keep\"])\n", + " self._gym_env.action_space = MultiDiscreteActSpace(self._g2op_env.action_space,\n", + " attr_to_keep=act_attr_to_keep)\n", + " self.action_space = MultiDiscrete(self._gym_env.action_space.nvec)\n", + " else:\n", + " raise NotImplementedError(f\"action type '{act_type}' is not currently supported.\")\n", + " \n", + " \n", + " def reset(self, seed=None, options=None):\n", + " # use default _gym_env (from grid2op.gym_compat module)\n", + " # NB: here you can also specify \"default options\" when you reset, for example:\n", + " # - limiting the duration of the episode \"max step\"\n", + " # - starting at different steps \"init ts\"\n", + " # - study difficult scenario \"time serie id\"\n", + " # - specify an initial state of your grid \"init state\"\n", + " return self._gym_env.reset(seed=seed, options=options)\n", + " \n", + " def step(self, action):\n", + " # use default _gym_env (from grid2op.gym_compat module)\n", + " return self._gym_env.step(action)\n", + " " + ] + }, + { + "cell_type": "markdown", + "id": "a93964d8", + "metadata": {}, + "source": [ + "## 2 Create an environment, and train a first policy\n", + "\n", + "In this section we quickly show :\n", + "\n", + "- how to create the gym environment, which is an instance from `Grid2opEnvWrapper` defined above\n", + "- how to train a PPO policy using stable baselines3\n", + "\n", + "This part, for stable baselines is really small." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38629107", + "metadata": {}, + "outputs": [], + "source": [ + "from stable_baselines3 import PPO\n", + "\n", + "gym_env = Grid2opEnvWrapper()\n", + "sb3_algo1 = PPO(\"MlpPolicy\", gym_env, verbose=0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "89be6372", + "metadata": {}, + "outputs": [], + "source": [ + "sb3_algo1.learn(total_timesteps=1024)" + ] + }, + { + "cell_type": "markdown", + "id": "3a8f9717", + "metadata": {}, + "source": [ + "## 3 Evaluate the trained agent\n", + "\n", + "This notebook is a simple quick introduction for stable baselines only. So we don't really recall everything that has been said previously.\n", + "\n", + "Please consult the section `0) Recommended initial steps` of the notebook [11_IntegrationWithExistingRLFrameworks](./11_IntegrationWithExistingRLFrameworks.ipynb) for more information.\n", + "\n", + "**TLD;DR** grid2op offers the possibility to test your agent on scenarios / episodes different from the one it has been trained. We greatly encourage you to use this functionality.\n", + "\n", + "There are two main ways to evaluate your agent:\n", + "\n", + "- you stay in the \"gymnasium\" world (see [here](#31-staying-in-the-gymnasium-ecosystem) ) and you evaluate your policy directly just like you would any other gymnasium compatible environment. Simple, easy but without support for some grid2op features\n", + "- you \"get back\" to the \"grid2op\" world (detailed [here](#32-using-the-grid2op-ecosystem)) by \"converting\" your NN policy into something that is able to output grid2op like action. This introduces yet again a \"wrapper\" but you can benefit from all grid2op features, such as the `Runner` to save an inspect what your policy has done.\n", + "\n", + " We show here just a simple examples to \"get easily started\". For much better working agents, you can have a look at l2rpn-baselines code. There you have classes that maps the environment, the agents etc. to grid2op directly (you don't have to copy paste any wrapper). \n", + "\n", + "\n", + "\n", + "### 3.1 staying in the gymnasium ecosystem\n", + "\n", + "You can do pretty much what you want, but you have to do it yourself, or use any of the \"Wrappers\" available in gymnasium https://gymnasium.farama.org/main/api/wrappers/ (*eg* https://gymnasium.farama.org/main/api/wrappers/misc_wrappers/#gymnasium.wrappers.RecordEpisodeStatistics) or in your RL framework.\n", + "\n", + "For the sake of simplicity, we show how to do things \"manually\" even though we do not recommend to do it like that." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "05f5e188", + "metadata": {}, + "outputs": [], + "source": [ + "nb_episode_test = 2\n", + "seeds_test_env = (0, 1) # same size as nb_episode_test\n", + "seeds_test_agent = (3, 4) # same size as nb_episode_test\n", + "ts_ep_test = (0, 1) # same size as nb_episode_test" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "da0e7990", + "metadata": {}, + "outputs": [], + "source": [ + "ep_infos = {} # information that will be saved\n", + "\n", + "\n", + "for ep_test_num in range(nb_episode_test):\n", + " init_obs, init_infos = gym_env.reset(seed=seeds_test_env[ep_test_num],\n", + " options={\"time serie id\": ts_ep_test[ep_test_num]})\n", + " sb3_algo1.set_random_seed(seeds_test_agent[ep_test_num])\n", + " done = False\n", + " cum_reward = 0\n", + " step_survived = 0\n", + " obs = init_obs\n", + " while not done:\n", + " act, _states = sb3_algo1.predict(obs, deterministic=True)\n", + " obs, reward, terminated, truncated, info = gym_env.step(act)\n", + " step_survived += 1\n", + " cum_reward += float(reward)\n", + " done = terminated or truncated\n", + " ep_infos[ep_test_num] = {\"time serie id\": ts_ep_test[ep_test_num],\n", + " \"time serie folder\": gym_env._gym_env.init_env.chronics_handler.get_id(),\n", + " \"env seed\": seeds_test_env[ep_test_num],\n", + " \"agent seed\": seeds_test_agent[ep_test_num],\n", + " \"steps survived\": step_survived,\n", + " \"total steps\": int(gym_env._gym_env.init_env.max_episode_duration()),\n", + " \"cum reward\": cum_reward}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f248fdc", + "metadata": {}, + "outputs": [], + "source": [ + "# \"prettyprint\" the dictionnary above\n", + "\n", + "print(json.dumps(ep_infos, indent=4))" + ] + }, + { + "cell_type": "markdown", + "id": "8a32899b", + "metadata": {}, + "source": [ + "As you might have seen, it's not easy this way to retrieve some useful information about the grid2op environment if these informations are not passed to the policy.\n", + "\n", + "For example, we need to call `gym_env._gym_env.init_env` to access the underlying grid2op environment... You have to convert some things from int32 or float32 to float or int otherwise json complains, you have to control yourself the seeds to have reproducible results etc.\n", + "\n", + "It's a quick way to have something working but it might be perfected." + ] + }, + { + "cell_type": "markdown", + "id": "fde71911", + "metadata": {}, + "source": [ + "### 3.2 using the grid2op ecosystem\n", + "\n", + "This second method brings it closer to grid2op ecosystem, you will be able to use it with the grid2op `Runner`, save the results and read it back with other tools such as grid2viz and do the evaluation in parrallel without too much trouble (and with high reproducibility).\n", + "\n", + "With this method, you build a grid2op agent and this agent can then be used like every other grid2op agent. For example you can compare it with heuristic agents, agent based on optimization etc.\n", + "\n", + "This way of doing things also allows you to customize when the neural network policy is used. For example, you might chose to use it only when the grid is \"unsafe\" (and if the grid is safe you use an \"expert\" rules).\n", + "\n", + "This is more flexible than the previous one." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "50625005", + "metadata": {}, + "outputs": [], + "source": [ + "from grid2op.Agent import BaseAgent\n", + "from grid2op.Runner import Runner\n", + "\n", + "class Grid2opAgentWrapper(BaseAgent):\n", + " def __init__(self,\n", + " gym_env: Grid2opEnvWrapper,\n", + " trained_agent: PPO):\n", + " self.gym_env = gym_env\n", + " BaseAgent.__init__(self, gym_env._gym_env.init_env.action_space)\n", + " self.trained_agent = trained_agent\n", + " \n", + " def act(self, obs, reward, done):\n", + " # you can customize it here to call the NN policy `trained_agent`\n", + " # only in some cases, depending on the observation for example\n", + " gym_obs = self.gym_env._gym_env.observation_space.to_gym(obs)\n", + " gym_act, _states = self.trained_agent.predict(gym_obs, deterministic=True)\n", + " grid2op_act = self.gym_env._gym_env.action_space.from_gym(gym_act)\n", + " return grid2op_act\n", + " \n", + " def seed(self, seed):\n", + " # implement the seed function\n", + " if seed is None:\n", + " return\n", + " seed_int = int(seed)\n", + " if seed_int != seed:\n", + " raise RuntimeError(\"Seed must be convertible to an integer\")\n", + " self.trained_agent.set_random_seed(seed_int)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99e84f4a", + "metadata": {}, + "outputs": [], + "source": [ + "my_agent = Grid2opAgentWrapper(gym_env, sb3_algo1)\n", + "runner = Runner(**gym_env._g2op_env.get_params_for_runner(),\n", + " agentClass=None,\n", + " agentInstance=my_agent)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18b461cb", + "metadata": {}, + "outputs": [], + "source": [ + "res = runner.run(nb_episode=nb_episode_test,\n", + " env_seeds=seeds_test_env,\n", + " agent_seeds=seeds_test_agent,\n", + " episode_id=ts_ep_test,\n", + " add_detailed_output=True\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fe880aac", + "metadata": {}, + "outputs": [], + "source": [ + "res" + ] + }, + { + "cell_type": "markdown", + "id": "6fce9ed9", + "metadata": {}, + "source": [ + "See the documentation or the notebook [05 StudyYourAgent](./05_StudyYourAgent.ipynb) on how to use grid2op tools to study your agent, its decisions etc." + ] + }, + { + "cell_type": "markdown", + "id": "49bf6095", + "metadata": {}, + "source": [ + "## 4 Some customizations\n", + "\n", + "### 4.1 Train a PPO agent using 4 \"runners\" to make the rollouts\n", + "\n", + "This, for now, only works on linux based computers. Hopefully this will work on windows and macos as soon as possible.\n", + "\n", + "This allows to use some \"parralellism\" during the training: your agent will interact \"at the same time\" with 4 environments allowing it to gather experience faster. But in this case, its training is always done in the \"main\" process." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2036ac9", + "metadata": {}, + "outputs": [], + "source": [ + "from stable_baselines3.common.env_util import make_vec_env\n", + "vec_env = make_vec_env(lambda : Grid2opEnvWrapper(), n_envs=4)\n", + "sb3_algo2 = PPO(\"MlpPolicy\", vec_env, verbose=0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d8ac595", + "metadata": {}, + "outputs": [], + "source": [ + "sb3_algo2.learn(total_timesteps=1024)" + ] + }, + { + "cell_type": "markdown", + "id": "8fc163cd", + "metadata": {}, + "source": [ + "### 4.2 Use non default parameters to make the grid2op environment\n", + "\n", + "In this third example, we will train a policy using the \"box\" action space, and on another environment (`l2rpn_idf_2023` instead of `l2rpn_case14_sandbox`)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13740e53", + "metadata": {}, + "outputs": [], + "source": [ + "# see https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html\n", + "\n", + "# Use a \"Box\" action space (mainly to use redispatching, curtailment and storage units)\n", + "env_config3 = {\"env_name\": \"l2rpn_idf_2023\",\n", + " \"env_is_test\": True,\n", + " \"act_type\": \"box\",\n", + " }\n", + "gym_env3 = Grid2opEnvWrapper(env_config3)\n", + "sb3_algo3 = PPO(\"MlpPolicy\", gym_env3, verbose=0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "93ac61ff", + "metadata": {}, + "outputs": [], + "source": [ + "sb3_algo3.learn(total_timesteps=1024)" + ] + }, + { + "cell_type": "markdown", + "id": "00790379", + "metadata": {}, + "source": [ + "And now a policy using the \"multi discrete\" action space: " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6cd44edb", + "metadata": {}, + "outputs": [], + "source": [ + "# see https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html\n", + "\n", + "# Use a \"Box\" action space (mainly to use redispatching, curtailment and storage units)\n", + "env_config4 = {\"env_name\": \"l2rpn_idf_2023\",\n", + " \"env_is_test\": True,\n", + " \"act_type\": \"multi_discrete\",\n", + " }\n", + "gym_env4 = Grid2opEnvWrapper(env_config4)\n", + "sb3_algo4 = PPO(\"MlpPolicy\", gym_env4, verbose=0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d18be5ec", + "metadata": {}, + "outputs": [], + "source": [ + "sb3_algo4.learn(total_timesteps=1024)" + ] + }, + { + "cell_type": "markdown", + "id": "7cf2dd58", + "metadata": {}, + "source": [ + "### 4.3 Customize the policy (number of layers, size of layers etc.)\n", + "\n", + "This notebook does not aim at covering all possibilities offered by ray / rllib. For that you need to refer to the ray / rllib documentation.\n", + "\n", + "We will simply show how to change the size of the neural network used as a policy." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fa7cc345", + "metadata": {}, + "outputs": [], + "source": [ + "# see https://docs.ray.io/en/latest/rllib/package_ref/doc/ray.rllib.algorithms.algorithm_config.AlgorithmConfig.html\n", + "\n", + "gym_env5 = Grid2opEnvWrapper()\n", + "sb3_algo5 = PPO(\"MlpPolicy\",\n", + " gym_env5,\n", + " verbose=0,\n", + " policy_kwargs={\"net_arch\": [32, 32, 32]}\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "51d435e5", + "metadata": {}, + "outputs": [], + "source": [ + "sb3_algo5.learn(total_timesteps=1024)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/grid2op/Action/_backendAction.py b/grid2op/Action/_backendAction.py index 718d63a1a..5557e7b7f 100644 --- a/grid2op/Action/_backendAction.py +++ b/grid2op/Action/_backendAction.py @@ -8,27 +8,129 @@ import copy import numpy as np +from typing import Tuple, Union + +try: + from typing import Self +except ImportError: + from typing_extensions import Self + +from grid2op.Action.baseAction import BaseAction from grid2op.dtypes import dt_int, dt_bool, dt_float -from grid2op.Space import GridObjects, DetailedTopoDescription -from grid2op.Exceptions import Grid2OpException +from grid2op.Space import GridObjects +from grid2op.Exceptions import Grid2OpException, AmbiguousAction ERR_MSG_SWITCH = ("Cannot retrieve switches configuration if the grid does not have " "switches information. Have you set them when loading the grid ?") # TODO see if it can be done in c++ easily class ValueStore: """ - INTERNAL USE ONLY + USE ONLY IF YOU WANT TO CODE A NEW BACKEND - .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + .. warning:: /!\\\\ Internal, do not modify, alter, change, override the implementation unless you know what you are doing /!\\\\ + + If you override them you might even notice some extremely weird behaviour. It's not "on purpose", we are aware of + it but we won't change it (for now at least) + + .. warning:: + Objects from this class should never be created by anyone except by objects of the + :class:`grid2op.Action._backendAction._BackendAction` + when they are created or when instances of `_BackendAction` are process *eg* with :func:`_BackendAction.__call__` or + :func:`_BackendAction.get_loads_bus` etc. + + There are two correct uses for this class: + + #. by iterating manually with the `for xxx in value_stor_instance: ` + #. by checking which objects have been changed (with :attr:`ValueStore.changed` ) and then check the + new value of the elements **changed** with :attr:`ValueStore.values` [el_id] + + .. danger:: + + You should never trust the values in :attr:`ValueStore.values` [el_id] if :attr:`ValueStore.changed` [el_id] is `False`. + + Access data (values) only when the corresponding "mask" (:attr:`ValueStore.changed`) is `True`. + + This is, of course, ensured by default if you use the practical way of iterating through them with: + + .. code-block:: python + + load_p: ValueStore # a ValueStore object named "load_p" + + for load_id, new_p in load_p: + # do something + + In this case only "new_p" will be given if corresponding `changed` mask is true. + Attributes + ---------- + + TODO + + Examples + --------- + + Say you have a "ValueStore" `val_sto` (in :class:`grid2op.Action._backendAction._BackendAction` you will end up manipulating + pretty much all the time `ValueStore` if you use it correctly, with :func:`_BackendAction.__call__` but also is you call + :func:`_BackendAction.get_loads_bus`, :func:`_BackendAction.get_loads_bus_global`, :func:`_BackendAction.get_gens_bus`, ...) + + Basically, the "variables" named `prod_p`, `prod_v`, `load_p`, `load_q`, `storage_p`, + `topo__`, `shunt_p`, `shunt_q`, `shunt_bus`, `backendAction.get_lines_or_bus()`, + `backendAction.get_lines_or_bus_global()`, etc in the doc of :class:`grid2op.Action._backendAction._BackendAction` + are all :class:`ValueStore`. + + Recommended usage: + + .. code-block:: python + + val_sto: ValueStore # a ValueStore object named "val_sto" + + for el_id, new_val in val_sto: + # do something + + # less abstractly, say `load_p` is a ValueStore: + # for load_id, new_p in load_p: + # do the real changes of load active value in self._grid + # load_id => id of loads for which the active consumption changed + # new_p => new load active consumption for `load_id` + # self._grid.change_load_active_value(load_id, new_p) # fictive example of course... + + + More advanced / vectorized usage (only do that if you found out your backend was + slow because of the iteration in python above, this is error-prone and in general + might not be worth it...): + + .. code-block:: python + + val_sto: ValueStore # a ValueStore object named "val_sto" + + # less abstractly, say `load_p` is a ValueStore: + # self._grid.change_all_loads_active_value(where_changed=load_p.changed, + new_vals=load_p.values[load_p.changed]) + # fictive example of couse, I highly doubt the self._grid + # implements a method named exactly `change_all_loads_active_value` + + WARNING, DANGER AHEAD: + Never trust the data in load_p.values[~load_p.changed], they might even be un intialized... + """ def __init__(self, size, dtype): ## TODO at the init it's mandatory to have everything at "1" here # if topo is not "fully connected" it will not work + + #: :class:`np.ndarray` + #: The new target values to be set in `backend._grid` in `apply_action` + #: never use the values if the corresponding mask is set to `False` + #: (it might be non initialized). self.values = np.empty(size, dtype=dtype) + + #: :class:`np.ndarray` (bool) + #: Mask representing which values (stored in :attr:`ValueStore.values` ) are + #: meaningful. The other values (corresponding to `changed=False` ) are meaningless. self.changed = np.full(size, dtype=dt_bool, fill_value=False) + + #: used internally for iteration self.last_index = 0 self.__size = size @@ -55,7 +157,7 @@ def _change_val_int(self, newvals): self.values[changed_] = (1 - self.values[changed_]) + 2 def _change_val_float(self, newvals): - changed_ = newvals != 0.0 + changed_ = np.abs(newvals) >= 1e-7 self.changed[changed_] = True self.values[changed_] += newvals[changed_] @@ -64,6 +166,7 @@ def reset(self): self.last_index = 0 def change_status(self, switch, lineor_id, lineex_id, old_vect): + # CAREFULL: swith here is not switch, it's only to say "change" !!! if not switch.any(): # nothing is modified so i stop here return @@ -201,6 +304,10 @@ def force_unchanged(self, mask, local_bus): to_unchanged = local_bus == -1 to_unchanged[~mask] = False self.changed[to_unchanged] = False + + def register_new_topo(self, current_topo: "ValueStore"): + mask_co = current_topo.values >= 1 + self.values[mask_co] = current_topo.values[mask_co] class _BackendAction(GridObjects): @@ -209,47 +316,212 @@ class _BackendAction(GridObjects): Internal class, use at your own risk. - This class "digest" the players / environment / opponent / voltage controlers "action", - and transform it to setpoint for the backend. + This class "digest" the players / environment / opponent / voltage controlers "actions", + and transform it to one single "state" that can in turn be process by the backend + in the function :func:`grid2op.Backend.Backend.apply_action`. + + .. note:: + In a :class:`_BackendAction` only the state of the element that have been modified + by an "entity" (agent, environment, opponent, voltage controler etc.) is given. + + We expect the backend to "remember somehow" the state of all the rest. + + This is to save a lot of computation time for larger grid. + + .. note:: + You probably don't need to import the `_BackendAction` class (this is why + we "hide" it), + but the `backendAction` you will receive in `apply_action` is indeed + a :class:`_BackendAction`, hence this documentation. + + If you want to use grid2op to develop agents or new time series, + this class should behave transparently for you and you don't really + need to spend time reading its documentation. + + If you want to develop in grid2op and code a new backend, you might be interested in: + + - :func:`_BackendAction.__call__` + - :func:`_BackendAction.get_loads_bus` + - :func:`_BackendAction.get_loads_bus_global` + - :func:`_BackendAction.get_gens_bus` + - :func:`_BackendAction.get_gens_bus_global` + - :func:`_BackendAction.get_lines_or_bus` + - :func:`_BackendAction.get_lines_or_bus_global` + - :func:`_BackendAction.get_lines_ex_bus` + - :func:`_BackendAction.get_lines_ex_bus_global` + - :func:`_BackendAction.get_storages_bus` + - :func:`_BackendAction.get_storages_bus_global` + - :func:`_BackendAction.get_shunts_bus_global` + + And in this case, for usage examples, see the examples available in: + + - https://github.com/rte-france/Grid2Op/tree/master/examples/backend_integration: a step by step guide to + code a new backend + - :class:`grid2op.Backend.educPandaPowerBackend.EducPandaPowerBackend` and especially the + :func:`grid2op.Backend.educPandaPowerBackend.EducPandaPowerBackend.apply_action` + - :ref:`create-backend-module` page of the documentation, especially the + :ref:`backend-action-create-backend` section + + Otherwise, "TL;DR" (only relevant when you want to implement the :func:`grid2op.Backend.Backend.apply_action` + function, rest is not shown): + + .. code-block:: python + + def apply_action(self, backendAction: Union["grid2op.Action._backendAction._BackendAction", None]) -> None: + if backendAction is None: + return + + ( + active_bus, + (prod_p, prod_v, load_p, load_q, storage_p), + topo__, + shunts__, + ) = backendAction() + + # change the active values of the loads + for load_id, new_p in load_p: + # do the real changes in self._grid + + # change the reactive values of the loads + for load_id, new_q in load_q: + # do the real changes in self._grid + + # change the active value of generators + for gen_id, new_p in prod_p: + # do the real changes in self._grid + + # for the voltage magnitude, pandapower expects pu but grid2op provides kV, + # so we need a bit of change + for gen_id, new_v in prod_v: + # do the real changes in self._grid + + # process the topology : + + # option 1: you can directly set the element of the grid in the "topo_vect" + # order, for example you can modify in your solver the busbar to which + # element 17 of `topo_vect` is computed (this is necessarily a local view of + # the buses ) + for el_topo_vect_id, new_el_bus in topo__: + # connect this object to the `new_el_bus` (local) in self._grid + + # OR !!! (use either option 1 or option 2.a or option 2.b - exclusive OR) + + # option 2: use "per element type" view (this is usefull) + # if your solver has organized its data by "type" and you can + # easily access "all loads" and "all generators" etc. + + # option 2.a using "local view": + # new_bus is either -1, 1, 2, ..., backendAction.n_busbar_per_sub + lines_or_bus = backendAction.get_lines_or_bus() + for line_id, new_bus in lines_or_bus: + # connect "or" side of "line_id" to (local) bus `new_bus` in self._grid + + # OR !!! (use either option 1 or option 2.a or option 2.b - exclusive OR) + + # option 2.b using "global view": + # new_bus is either 0, 1, 2, ..., backendAction.n_busbar_per_sub * backendAction.n_sub + # (this suppose internally that your solver and grid2op have the same + # "ways" of labelling the buses...) + lines_or_bus = backendAction.get_lines_or_bus_global() + for line_id, new_bus in lines_or_bus: + # connect "or" side of "line_id" to (global) bus `new_bus` in self._grid + + # now repeat option a OR b calling the right methods + # for each element types (*eg* get_lines_ex_bus, get_loads_bus, get_gens_bus, + # get_storages_bus for "option a-like") + + ######## end processing of the topology ############### + + # now implement the shunts: + + if shunts__ is not None: + shunt_p, shunt_q, shunt_bus = shunts__ + + if (shunt_p.changed).any(): + # p has changed for at least a shunt + for shunt_id, new_shunt_p in shunt_p: + # do the real changes in self._grid + + if (shunt_q.changed).any(): + # q has changed for at least a shunt + for shunt_id, new_shunt_q in shunt_q: + # do the real changes in self._grid + + if (shunt_bus.changed).any(): + # at least one shunt has been disconnected + # or has changed the buses + + # do like for normal topology with: + # option a -like (using local bus): + for shunt_id, new_shunt_bus in shunt_bus: + ... + # OR + # option b -like (using global bus): + shunt_global_bus = backendAction.get_shunts_bus_global() + for shunt_id, new_shunt_bus in shunt_global_bus: + # connect shunt_id to (global) bus `new_shunt_bus` in self._grid + + .. warning:: + The steps shown here are generic and might not be optimised for your backend. This + is why you probably do not see any of them directly in :class:`grid2op.Backend.PandaPowerBackend` + (where everything is vectorized to make things fast **with pandapower**). + + It is probably a good idea to first get this first implementation up and running, passing + all the tests, and then to worry about optimization: + + The real problem is that programmers have spent far too much + time worrying about efficiency in the wrong places and at the wrong times; + premature optimization is the root of all evil (or at least most of it) + in programming. + + Donald Knuth, "*The Art of Computer Programming*" + """ def __init__(self): + """ + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + This is handled by the environment ! + + """ GridObjects.__init__(self) + cls = type(self) # last connected registered - self.last_topo_registered = ValueStore(self.dim_topo, dtype=dt_int) + self.last_topo_registered: ValueStore = ValueStore(cls.dim_topo, dtype=dt_int) # topo at time t - self.current_topo = ValueStore(self.dim_topo, dtype=dt_int) + self.current_topo: ValueStore = ValueStore(cls.dim_topo, dtype=dt_int) # by default everything is on busbar 1 self.last_topo_registered.values[:] = 1 self.current_topo.values[:] = 1 # injection at time t - self.prod_p = ValueStore(self.n_gen, dtype=dt_float) - self.prod_v = ValueStore(self.n_gen, dtype=dt_float) - self.load_p = ValueStore(self.n_load, dtype=dt_float) - self.load_q = ValueStore(self.n_load, dtype=dt_float) - self.storage_power = ValueStore(self.n_storage, dtype=dt_float) - - self.activated_bus = np.full((self.n_sub, 2), dtype=dt_bool, fill_value=False) - self.big_topo_to_subid = np.repeat( - list(range(self.n_sub)), repeats=self.sub_info + self.prod_p: ValueStore = ValueStore(cls.n_gen, dtype=dt_float) + self.prod_v: ValueStore = ValueStore(cls.n_gen, dtype=dt_float) + self.load_p: ValueStore = ValueStore(cls.n_load, dtype=dt_float) + self.load_q: ValueStore = ValueStore(cls.n_load, dtype=dt_float) + self.storage_power: ValueStore = ValueStore(cls.n_storage, dtype=dt_float) + + self.activated_bus = np.full((cls.n_sub, cls.n_busbar_per_sub), dtype=dt_bool, fill_value=False) + self.big_topo_to_subid: np.ndarray = np.repeat( + list(range(cls.n_sub)), repeats=cls.sub_info ) # shunts - cls = type(self) if cls.shunts_data_available: - self.shunt_p = ValueStore(self.n_shunt, dtype=dt_float) - self.shunt_q = ValueStore(self.n_shunt, dtype=dt_float) - self.shunt_bus = ValueStore(self.n_shunt, dtype=dt_int) - self.current_shunt_bus = ValueStore(self.n_shunt, dtype=dt_int) + self.shunt_p: ValueStore = ValueStore(cls.n_shunt, dtype=dt_float) + self.shunt_q: ValueStore = ValueStore(cls.n_shunt, dtype=dt_float) + self.shunt_bus: ValueStore = ValueStore(cls.n_shunt, dtype=dt_int) + self.shunt_bus.values[:] = 1 + self.current_shunt_bus: ValueStore = ValueStore(cls.n_shunt, dtype=dt_int) self.current_shunt_bus.values[:] = 1 - self._status_or_before = np.ones(self.n_line, dtype=dt_int) - self._status_ex_before = np.ones(self.n_line, dtype=dt_int) - self._status_or = np.ones(self.n_line, dtype=dt_int) - self._status_ex = np.ones(self.n_line, dtype=dt_int) + self._status_or_before: np.ndarray = np.ones(cls.n_line, dtype=dt_int) + self._status_ex_before: np.ndarray = np.ones(cls.n_line, dtype=dt_int) + self._status_or: np.ndarray = np.ones(cls.n_line, dtype=dt_int) + self._status_ex: np.ndarray = np.ones(cls.n_line, dtype=dt_int) self._loads_bus = None self._gens_bus = None @@ -258,8 +530,21 @@ def __init__(self): self._storage_bus = None self._shunt_bus = None self._detailed_topo = None # tuple: busbar_connector_state, switches_state + if cls.detailed_topo_desc is not None: + self.last_switch_registered = np.zeros(cls.detailed_topo_desc.switches.shape[0], dtype=dt_bool) + self.current_switch = np.zeros(cls.detailed_topo_desc.switches.shape[0], dtype=dt_bool) + self.current_switch[:] = cls.detailed_topo_desc.compute_switches_position( + self.current_topo.values, + self.current_shunt_bus.values + ) + # TODO detailed topo: shunt_bus and last_shunt_bus ! + + def __deepcopy__(self, memodict={}) -> Self: + + """ + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ - def __deepcopy__(self, memodict={}): + """ res = type(self)() # last connected registered res.last_topo_registered.copy(self.last_topo_registered) @@ -293,15 +578,22 @@ def __deepcopy__(self, memodict={}): return res - def __copy__(self): + def __copy__(self) -> Self: + + """ + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + """ res = self.__deepcopy__() # nothing less to do return res - def reorder(self, no_load, no_gen, no_topo, no_storage, no_shunt): + def reorder(self, no_load, no_gen, no_topo, no_storage, no_shunt) -> None: """ .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + This is handled by BackendConverter, do not alter - reorder the element modified, this is use when converting backends only and should not be use + Reorder the element modified, this is use when converting backends only and should not be use outside of this usecase no_* stands for "new order" @@ -325,8 +617,14 @@ def reorder(self, no_load, no_gen, no_topo, no_storage, no_shunt): # force to reset the detailed topo self._detailed_topo = None - def reset(self): - # last topo + def reset(self) -> None: + """ + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + This is called by the environment, do not alter. + + """ + # last known topo self.last_topo_registered.reset() # topo at time t @@ -338,6 +636,7 @@ def reset(self): self.load_p.reset() self.load_q.reset() self.storage_power.reset() + # storage unit have their power reset to 0. each step self.storage_power.changed[:] = True self.storage_power.values[:] = 0.0 @@ -349,18 +648,16 @@ def reset(self): self.shunt_q.reset() self.shunt_bus.reset() self.current_shunt_bus.reset() - - self._loads_bus = None - self._gens_bus = None - self._lines_or_bus = None - self._lines_ex_bus = None - self._storage_bus = None - self._shunt_bus = None - # force to reset the detailed topo self._detailed_topo = None + self.last_topo_registered.register_new_topo(self.current_topo) - def all_changed(self): + def all_changed(self) -> None: + """ + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + This is called by the environment, do not alter. + """ # last topo self.last_topo_registered.all_changed() @@ -382,47 +679,132 @@ def all_changed(self): # self.shunt_bus.all_changed() def set_redispatch(self, new_redispatching): + """ + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + This is called by the environment, do not alter. + """ self.prod_p.change_val(new_redispatching) - def __iadd__(self, other): + def _aux_iadd_inj(self, dict_injection): """ .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + Internal implementation of += + + """ + if "load_p" in dict_injection: + tmp = dict_injection["load_p"] + self.load_p.set_val(tmp) + if "load_q" in dict_injection: + tmp = dict_injection["load_q"] + self.load_q.set_val(tmp) + if "prod_p" in dict_injection: + tmp = dict_injection["prod_p"] + self.prod_p.set_val(tmp) + if "prod_v" in dict_injection: + tmp = dict_injection["prod_v"] + self.prod_v.set_val(tmp) + + def _aux_iadd_shunt(self, other, shunt_tp): + """ + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + Internal implementation of += + + """ + shunts = {} + if type(other).shunts_data_available: + shunts["shunt_p"] = other.shunt_p + shunts["shunt_q"] = other.shunt_q + shunts["shunt_bus"] = other.shunt_bus + + arr_ = shunts["shunt_p"] + self.shunt_p.set_val(arr_) + arr_ = shunts["shunt_q"] + self.shunt_q.set_val(arr_) + + arr_ = shunts["shunt_bus"] + if shunt_tp is not None: + # some shunts have been modified with switches + mask = shunt_tp != 0 + arr_[mask] = shunt_tp[mask] + self.shunt_bus.set_val(arr_) + self.current_shunt_bus.values[self.shunt_bus.changed] = self.shunt_bus.values[self.shunt_bus.changed] + + if self.shunt_bus.changed.any(): + self._detailed_topo = None - other: a grid2op action standard + def _aux_iadd_reconcile_disco_reco(self): + """ + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + Internal implementation of += + + """ + disco_or = (self._status_or_before == -1) | (self._status_or == -1) + disco_ex = (self._status_ex_before == -1) | (self._status_ex == -1) + disco_now = ( + disco_or | disco_ex + ) # a powerline is disconnected if at least one of its extremity is + # added + reco_or = (self._status_or_before == -1) & (self._status_or >= 1) + reco_ex = (self._status_or_before == -1) & (self._status_ex >= 1) + reco_now = reco_or | reco_ex + # Set nothing + set_now = np.zeros_like(self._status_or) + # Force some disconnections + set_now[disco_now] = -1 + set_now[reco_now] = 1 + + self.current_topo.set_status( + set_now, + self.line_or_pos_topo_vect, + self.line_ex_pos_topo_vect, + self.last_topo_registered, + ) + + def __iadd__(self, other : BaseAction) -> Self: + """ + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + This is called by the environment, do not alter. + + The goal of this function is to "fused" together all the different types + of modifications handled by: + + - the Agent + - the opponent + - the time series (part of the environment) + - the voltage controler + + It might be called multiple times per step. Parameters ---------- - other: :class:`grid2op.Action.BaseAction.BaseAction` + other: :class:`grid2op.Action.BaseAction` Returns ------- - + The updated state of `self` after the new action `other` has been added to it. + """ - dict_injection = other._dict_inj set_status = other._set_line_status switch_status = other._switch_line_status set_topo_vect = other._set_topo_vect switcth_topo_vect = other._change_bus_vect redispatching = other._redispatch storage_power = other._storage_power + modif_switch = False + switch_topo_vect = None + shunt_tp = None # I deal with injections # Ia set the injection if other._modif_inj: - if "load_p" in dict_injection: - tmp = dict_injection["load_p"] - self.load_p.set_val(tmp) - if "load_q" in dict_injection: - tmp = dict_injection["load_q"] - self.load_q.set_val(tmp) - if "prod_p" in dict_injection: - tmp = dict_injection["prod_p"] - self.prod_p.set_val(tmp) - if "prod_v" in dict_injection: - tmp = dict_injection["prod_v"] - self.prod_v.set_val(tmp) - + self._aux_iadd_inj(other._dict_inj) + # Ib change the injection aka redispatching if other._modif_redispatch: self.prod_p.change_val(redispatching) @@ -430,26 +812,36 @@ def __iadd__(self, other): # Ic storage unit if other._modif_storage: self.storage_power.set_val(storage_power) - + + # III 0 before everything + # TODO detailed topo: optimize this for staying + # in the "switch" world + if other._modif_change_switch or other._modif_set_switch: + # agent modified the switches + if type(self).detailed_topo_desc is None: + raise AmbiguousAction("Something modified the switches while " + "no switch information is provided.") + new_switch = True & self.current_switch + subid_switch = other.get_sub_ids_switch() + if other._modif_change_switch: + # TODO detailed topo method of ValueStore ! + new_switch[other._change_switch_status] = ~new_switch[other._change_switch_status] + if other._modif_set_switch: + # TODO detailed topo method of ValueStore + mask_set = other._set_switch_status != 0 + new_switch[mask_set] = other._set_switch_status[mask_set] == 1 + switch_topo_vect, shunt_tp = self.detailed_topo_desc.from_switches_position(new_switch, subid_switch) + modif_switch = True + + # change the "target topology" for the elements + # connected to the impacted substations + mask_switch = switch_topo_vect != 0 + set_topo_vect[mask_switch] = switch_topo_vect[mask_switch] + # II shunts if type(self).shunts_data_available: - shunts = {} - if type(other).shunts_data_available: - shunts["shunt_p"] = other.shunt_p - shunts["shunt_q"] = other.shunt_q - shunts["shunt_bus"] = other.shunt_bus - - arr_ = shunts["shunt_p"] - self.shunt_p.set_val(arr_) - arr_ = shunts["shunt_q"] - self.shunt_q.set_val(arr_) - arr_ = shunts["shunt_bus"] - self.shunt_bus.set_val(arr_) - if (arr_ != 0).any(): - # trigger the recompute of _detailed_topo if needed - self._detailed_topo = None - self.current_shunt_bus.values[self.shunt_bus.changed] = self.shunt_bus.values[self.shunt_bus.changed] - + self._aux_iadd_shunt(other, shunt_tp) + # III line status # this need to be done BEFORE the topology, as a connected powerline will be connected to their old bus. # regardless if the status is changed in the action or not. @@ -484,7 +876,7 @@ def __iadd__(self, other): self.current_topo.change_val(switcth_topo_vect) self._detailed_topo = None - if other._modif_set_bus: + if other._modif_set_bus or modif_switch: self.current_topo.set_val(set_topo_vect) self._detailed_topo = None @@ -495,48 +887,79 @@ def __iadd__(self, other): ) # At least one disconnected extremity - if other._modif_change_bus or other._modif_set_bus: - disco_or = (self._status_or_before == -1) | (self._status_or == -1) - disco_ex = (self._status_ex_before == -1) | (self._status_ex == -1) - disco_now = ( - disco_or | disco_ex - ) # a powerline is disconnected if at least one of its extremity is - # added - reco_or = (self._status_or_before == -1) & (self._status_or >= 1) - reco_ex = (self._status_or_before == -1) & (self._status_ex >= 1) - reco_now = reco_or | reco_ex - # Set nothing - set_now = np.zeros_like(self._status_or) - # Force some disconnections - set_now[disco_now] = -1 - set_now[reco_now] = 1 - - self.current_topo.set_status( - set_now, - self.line_or_pos_topo_vect, - self.line_ex_pos_topo_vect, - self.last_topo_registered, - ) - + if other._modif_change_bus or other._modif_set_bus or modif_switch: + self._aux_iadd_reconcile_disco_reco() return self - def _assign_0_to_disco_el(self): - """do not consider disconnected elements are modified for there active / reactive / voltage values""" - gen_changed = self.current_topo.changed[type(self).gen_pos_topo_vect] - gen_bus = self.current_topo.values[type(self).gen_pos_topo_vect] + def _assign_0_to_disco_el(self) -> None: + """ + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + This is handled by the environment, do not alter. + + Do not consider disconnected elements are modified for there active / reactive / voltage values + """ + cls = type(self) + gen_changed = self.current_topo.changed[cls.gen_pos_topo_vect] + gen_bus = self.current_topo.values[cls.gen_pos_topo_vect] self.prod_p.force_unchanged(gen_changed, gen_bus) self.prod_v.force_unchanged(gen_changed, gen_bus) - load_changed = self.current_topo.changed[type(self).load_pos_topo_vect] - load_bus = self.current_topo.values[type(self).load_pos_topo_vect] + load_changed = self.current_topo.changed[cls.load_pos_topo_vect] + load_bus = self.current_topo.values[cls.load_pos_topo_vect] self.load_p.force_unchanged(load_changed, load_bus) self.load_q.force_unchanged(load_changed, load_bus) - sto_changed = self.current_topo.changed[type(self).storage_pos_topo_vect] - sto_bus = self.current_topo.values[type(self).storage_pos_topo_vect] + sto_changed = self.current_topo.changed[cls.storage_pos_topo_vect] + sto_bus = self.current_topo.values[cls.storage_pos_topo_vect] self.storage_power.force_unchanged(sto_changed, sto_bus) - def __call__(self): + def __call__(self) -> Tuple[np.ndarray, + Tuple[ValueStore, ValueStore, ValueStore, ValueStore, ValueStore], + ValueStore, + Union[Tuple[ValueStore, ValueStore, ValueStore], None]]: + """ + This function should be called at the top of the :func:`grid2op.Backend.Backend.apply_action` + implementation when you decide to code a new backend. + + It processes the state of the backend into a form "easy to use" in the `apply_action` method. + + .. danger:: + It is mandatory to call it, otherwise some features might not work. + + .. warning:: /!\\\\ Do not alter / modify / change / override this implementation /!\\\\ + + Examples + ----------- + + A typical implementation of `apply_action` will start with: + + .. code-block:: python + + def apply_action(self, backendAction: Union["grid2op.Action._backendAction._BackendAction", None]) -> None: + if backendAction is None: + return + + ( + active_bus, + (prod_p, prod_v, load_p, load_q, storage), + topo__, + shunts__, + ) = backendAction() + + # process the backend action by updating `self._grid` + + Returns + ------- + + - `active_bus`: matrix with `type(self).n_sub` rows and `type(self).n_busbar_per_bus` columns. Each elements + represents a busbars of the grid. ``False`` indicates that nothing is connected to this busbar and ``True`` + means that at least an element is connected to this busbar + - (prod_p, prod_v, load_p, load_q, storage): 5-tuple of Iterable to set the new values of generators, loads and storage units. + - topo: iterable representing the target topology (in local bus, elements are ordered with their + position in the `topo_vect` vector) + + """ self._assign_0_to_disco_el() injections = ( self.prod_p, @@ -552,213 +975,528 @@ def __call__(self): self._get_active_bus() return self.activated_bus, injections, topo, shunts - def get_loads_bus(self): + def get_loads_bus(self) -> ValueStore: + """ + This function might be called in the implementation of :func:`grid2op.Backend.Backend.apply_action`. + + It is relevant when your solver expose API by "element types" for example + you get the possibility to set and access all loads at once, all generators at + once and your solver can easily move element from different busbar in a given + substation. + + This corresponds to option 2a described (shortly) in :class:`_BackendAction`. + + In this setting, this function will give you the "local bus" id for each loads that + have been changed by the agent / time series / voltage controlers / opponent / etc. + + .. warning:: /!\\\\ Do not alter / modify / change / override this implementation /!\\\\ + + .. seealso:: + The other related functions: + + - :func:`_BackendAction.get_loads_bus` + - :func:`_BackendAction.get_gens_bus` + - :func:`_BackendAction.get_lines_or_bus` + - :func:`_BackendAction.get_lines_ex_bus` + - :func:`_BackendAction.get_storages_bus` + + Examples + ----------- + + A typical use of `get_loads_bus` in `apply_action` is: + + .. code-block:: python + + def apply_action(self, backendAction: Union["grid2op.Action._backendAction._BackendAction", None]) -> None: + if backendAction is None: + return + + ( + active_bus, + (prod_p, prod_v, load_p, load_q, storage), + _, + shunts__, + ) = backendAction() + + # process the backend action by updating `self._grid` + ... + + # now process the topology (called option 2.a in the doc): + + lines_or_bus = backendAction.get_lines_or_bus() + for line_id, new_bus in lines_or_bus: + # connect "or" side of "line_id" to (local) bus `new_bus` in self._grid + self._grid.something(...) + # or + self._grid.something = ... + + lines_ex_bus = backendAction.get_lines_ex_bus() + for line_id, new_bus in lines_ex_bus: + # connect "ex" side of "line_id" to (local) bus `new_bus` in self._grid + self._grid.something(...) + # or + self._grid.something = ... + + storages_bus = backendAction.get_storages_bus() + for el_id, new_bus in storages_bus: + # connect storage id `el_id` to (local) bus `new_bus` in self._grid + self._grid.something(...) + # or + self._grid.something = ... + + gens_bus = backendAction.get_gens_bus() + for el_id, new_bus in gens_bus: + # connect generator id `el_id` to (local) bus `new_bus` in self._grid + self._grid.something(...) + # or + self._grid.something = ... + + loads_bus = backendAction.get_loads_bus() + for el_id, new_bus in loads_bus: + # connect generator id `el_id` to (local) bus `new_bus` in self._grid + self._grid.something(...) + # or + self._grid.something = ... + + # continue implementation of `apply_action` + + """ if self._loads_bus is None: - self._loads_bus = ValueStore(self.n_load, dtype=dt_int) - self._loads_bus.copy_from_index(self.current_topo, self.load_pos_topo_vect) + self._loads_bus = ValueStore(type(self).n_load, dtype=dt_int) + self._loads_bus.copy_from_index(self.current_topo, type(self).load_pos_topo_vect) return self._loads_bus - def _aux_to_global(self, value_store, to_subid): + def _aux_to_global(self, value_store, to_subid) -> ValueStore: value_store = copy.deepcopy(value_store) value_store.values = type(self).local_bus_to_global(value_store.values, to_subid) return value_store def get_all_switches(self): # TODO detailed topo - if type(self).detailed_topo_desc is None: + cls = type(self) + if cls.detailed_topo_desc is None: raise Grid2OpException(ERR_MSG_SWITCH) - detailed_topo_desc = type(self).detailed_topo_desc - if type(self).shunts_data_available: - shunt_bus = self.shunt_bus.values + detailed_topo_desc = cls.detailed_topo_desc + if cls.shunts_data_available: + shunt_bus = self.current_shunt_bus.values else: shunt_bus = None if self._detailed_topo is None: + # TODO detailed topo : optimization here : pass the substations modified + # TODO detailed topo : pass the current switches position self._detailed_topo = detailed_topo_desc.compute_switches_position(self.current_topo.values, shunt_bus) return self._detailed_topo - - def get_loads_bus_global(self): + + def get_loads_bus_global(self) -> ValueStore: + """ + This function might be called in the implementation of :func:`grid2op.Backend.Backend.apply_action`. + + It is relevant when your solver expose API by "element types" for example + you get the possibility to set and access all loads at once, all generators at + once AND you can easily switch element from one "busbars" to another in + the whole grid handled by your solver. + + This corresponds to situation 2b described in :class:`_BackendAction`. + + In this setting, this function will give you the "local bus" id for each loads that + have been changed by the agent / time series / voltage controlers / opponent / etc. + + .. warning:: /!\\\\ Do not alter / modify / change / override this implementation /!\\\\ + + .. seealso:: + The other related functions: + + - :func:`_BackendAction.get_loads_bus_global` + - :func:`_BackendAction.get_gens_bus_global` + - :func:`_BackendAction.get_lines_or_bus_global` + - :func:`_BackendAction.get_lines_ex_bus_global` + - :func:`_BackendAction.get_storages_bus_global` + + Examples + ----------- + + A typical use of `get_loads_bus_global` in `apply_action` is: + + .. code-block:: python + + def apply_action(self, backendAction: Union["grid2op.Action._backendAction._BackendAction", None]) -> None: + if backendAction is None: + return + + ( + active_bus, + (prod_p, prod_v, load_p, load_q, storage), + _, + shunts__, + ) = backendAction() + + # process the backend action by updating `self._grid` + ... + + # now process the topology (called option 2.a in the doc): + + lines_or_bus = backendAction.get_lines_or_bus_global() + for line_id, new_bus in lines_or_bus: + # connect "or" side of "line_id" to (global) bus `new_bus` in self._grid + self._grid.something(...) + # or + self._grid.something = ... + + lines_ex_bus = backendAction.get_lines_ex_bus_global() + for line_id, new_bus in lines_ex_bus: + # connect "ex" side of "line_id" to (global) bus `new_bus` in self._grid + self._grid.something(...) + # or + self._grid.something = ... + + storages_bus = backendAction.get_storages_bus_global() + for el_id, new_bus in storages_bus: + # connect storage id `el_id` to (global) bus `new_bus` in self._grid + self._grid.something(...) + # or + self._grid.something = ... + + gens_bus = backendAction.get_gens_bus_global() + for el_id, new_bus in gens_bus: + # connect generator id `el_id` to (global) bus `new_bus` in self._grid + self._grid.something(...) + # or + self._grid.something = ... + + loads_bus = backendAction.get_loads_bus_global() + for el_id, new_bus in loads_bus: + # connect generator id `el_id` to (global) bus `new_bus` in self._grid + self._grid.something(...) + # or + self._grid.something = ... + + # continue implementation of `apply_action` + + """ tmp_ = self.get_loads_bus() - return self._aux_to_global(tmp_, self.load_to_subid) - - def _aux_get_bus_detailed_topo(self, - switches_state : np.ndarray, - detailed_topo_desc : DetailedTopoDescription, - el_type_as_int, - el_id): - OBJ_TYPE_COL = type(detailed_topo_desc).OBJ_TYPE_COL - OBJ_ID_COL = type(detailed_topo_desc).OBJ_ID_COL - res = tuple(switches_state[(detailed_topo_desc.switches[:,OBJ_TYPE_COL] == el_type_as_int) & (detailed_topo_desc.switches[:,OBJ_ID_COL] == el_id)].tolist()) - return res + return self._aux_to_global(tmp_, type(self).load_to_subid) - def get_loads_bus_switches(self): - tmp_ = self.get_loads_bus() - # TODO detailed topo - # for now this is working because of the super simple representation of subtation - # but in reality i need to come up with a routine to find the topology (and raise the BackendError "impossible topology" - # if not possible) - if type(self).detailed_topo_desc is None: - raise Grid2OpException(ERR_MSG_SWITCH) - detailed_topo_desc = type(self).detailed_topo_desc - # returns an iterable: for each load you have: load_index, (pos_switch1, pos_switch_2, ..., pos_switchn) - # with (pos_switch1, pos_switch_2, ..., pos_switchn) the position of the - # n switch connecting the load to one busbar - # only one of pos_switch1, pos_switch_2, ..., pos_switchn is True ! - # res = [(l_id, self._aux_get_bus_detailed_topo(detailed_topo_desc.load_to_busbar_id, l_id, new_bus)) for l_id, new_bus in tmp_] + def get_gens_bus(self) -> ValueStore: + """ + This function might be called in the implementation of :func:`grid2op.Backend.Backend.apply_action`. - if self._detailed_topo is None: - self.get_all_switches() - busbar_connectors_state, switches_state = self._detailed_topo - LOAD_TYPE = type(detailed_topo_desc).LOAD_ID - res = [(el_id, self._aux_get_bus_detailed_topo(switches_state, detailed_topo_desc, LOAD_TYPE, el_id)) for el_id, new_bus in tmp_] - return res - - def get_gens_bus(self): + It is relevant when your solver expose API by "element types" for example + you get the possibility to set and access all loads at once, all generators at + once and your solver can easily move element from different busbar in a given + substation. + + This corresponds to option 2a described (shortly) in :class:`_BackendAction`. + + In this setting, this function will give you the "local bus" id for each generators that + have been changed by the agent / time series / voltage controlers / opponent / etc. + + .. warning:: /!\\\\ Do not alter / modify / change / override this implementation /!\\\\ + + .. seealso:: + The other related functions: + + - :func:`_BackendAction.get_loads_bus` + - :func:`_BackendAction.get_gens_bus` + - :func:`_BackendAction.get_lines_or_bus` + - :func:`_BackendAction.get_lines_ex_bus` + - :func:`_BackendAction.get_storages_bus` + + Examples + --------- + + Some examples are given in the documentation of :func:`_BackendAction.get_loads_bus` + + """ if self._gens_bus is None: - self._gens_bus = ValueStore(self.n_gen, dtype=dt_int) - self._gens_bus.copy_from_index(self.current_topo, self.gen_pos_topo_vect) + self._gens_bus = ValueStore(type(self).n_gen, dtype=dt_int) + self._gens_bus.copy_from_index(self.current_topo, type(self).gen_pos_topo_vect) return self._gens_bus - def get_gens_bus_global(self): + def get_gens_bus_global(self) -> ValueStore: + """ + This function might be called in the implementation of :func:`grid2op.Backend.Backend.apply_action`. + + It is relevant when your solver expose API by "element types" for example + you get the possibility to set and access all loads at once, all generators at + once AND you can easily switch element from one "busbars" to another in + the whole grid handled by your solver. + + This corresponds to situation 2b described in :class:`_BackendAction`. + + In this setting, this function will give you the "local bus" id for each loads that + have been changed by the agent / time series / voltage controlers / opponent / etc. + + .. warning:: /!\\\\ Do not alter / modify / change / override this implementation /!\\\\ + + .. seealso:: + The other related functions: + + - :func:`_BackendAction.get_loads_bus_global` + - :func:`_BackendAction.get_gens_bus_global` + - :func:`_BackendAction.get_lines_or_bus_global` + - :func:`_BackendAction.get_lines_ex_bus_global` + - :func:`_BackendAction.get_storages_bus_global` + + Examples + --------- + + Some examples are given in the documentation of :func:`_BackendAction.get_loads_bus_global` + """ + tmp_ = copy.deepcopy(self.get_gens_bus()) - return self._aux_to_global(tmp_, self.gen_to_subid) + return self._aux_to_global(tmp_, type(self).gen_to_subid) - def get_gens_bus_switches(self): - tmp_ = self.get_gens_bus() - # TODO detailed topo - # for now this is working because of the super simple representation of subtation - # but in reality i need to come up with a routine to find the topology (and raise the BackendError "impossible topology" - # if not possible) - if type(self).detailed_topo_desc is None: - raise Grid2OpException(ERR_MSG_SWITCH) - detailed_topo_desc = type(self).detailed_topo_desc - # returns an iterable: for each load you have: load_index, (pos_switch1, pos_switch_2, ..., pos_switchn) - # with (pos_switch1, pos_switch_2, ..., pos_switchn) the position of the - # n switch connecting the load to one busbar - # only one of pos_switch1, pos_switch_2, ..., pos_switchn is True ! - if self._detailed_topo is None: - self.get_all_switches() - busbar_connectors_state, switches_state = self._detailed_topo - GEN_TYPE = type(detailed_topo_desc).GEN_ID - res = [(el_id, self._aux_get_bus_detailed_topo(switches_state, detailed_topo_desc, GEN_TYPE, el_id)) for el_id, new_bus in tmp_] - return res - - def get_lines_or_bus(self): + def get_lines_or_bus(self) -> ValueStore: + """ + This function might be called in the implementation of :func:`grid2op.Backend.Backend.apply_action`. + + It is relevant when your solver expose API by "element types" for example + you get the possibility to set and access all loads at once, all generators at + once and your solver can easily move element from different busbar in a given + substation. + + This corresponds to option 2a described (shortly) in :class:`_BackendAction`. + + In this setting, this function will give you the "local bus" id for each line (or side) that + have been changed by the agent / time series / voltage controlers / opponent / etc. + + .. warning:: /!\\\\ Do not alter / modify / change / override this implementation /!\\\\ + + .. seealso:: + The other related functions: + + - :func:`_BackendAction.get_loads_bus` + - :func:`_BackendAction.get_gens_bus` + - :func:`_BackendAction.get_lines_or_bus` + - :func:`_BackendAction.get_lines_ex_bus` + - :func:`_BackendAction.get_storages_bus` + + Examples + --------- + + Some examples are given in the documentation of :func:`_BackendAction.get_loads_bus` + + """ if self._lines_or_bus is None: - self._lines_or_bus = ValueStore(self.n_line, dtype=dt_int) + self._lines_or_bus = ValueStore(type(self).n_line, dtype=dt_int) self._lines_or_bus.copy_from_index( - self.current_topo, self.line_or_pos_topo_vect + self.current_topo, type(self).line_or_pos_topo_vect ) return self._lines_or_bus - def get_lines_or_bus_global(self): - tmp_ = self.get_lines_or_bus() - return self._aux_to_global(tmp_, self.line_or_to_subid) - - def get_lines_or_bus_switches(self): + def get_lines_or_bus_global(self) -> ValueStore: + """ + This function might be called in the implementation of :func:`grid2op.Backend.Backend.apply_action`. + + It is relevant when your solver expose API by "element types" for example + you get the possibility to set and access all loads at once, all generators at + once AND you can easily switch element from one "busbars" to another in + the whole grid handled by your solver. + + This corresponds to situation 2b described in :class:`_BackendAction`. + + In this setting, this function will give you the "local bus" id for each loads that + have been changed by the agent / time series / voltage controlers / opponent / etc. + + .. warning:: /!\\\\ Do not alter / modify / change / override this implementation /!\\\\ + + .. seealso:: + The other related functions: + + - :func:`_BackendAction.get_loads_bus_global` + - :func:`_BackendAction.get_gens_bus_global` + - :func:`_BackendAction.get_lines_or_bus_global` + - :func:`_BackendAction.get_lines_ex_bus_global` + - :func:`_BackendAction.get_storages_bus_global` + + Examples + --------- + + Some examples are given in the documentation of :func:`_BackendAction.get_loads_bus_global` + """ tmp_ = self.get_lines_or_bus() - # TODO detailed topo - # for now this is working because of the super simple representation of subtation - # but in reality i need to come up with a routine to find the topology (and raise the BackendError "impossible topology" - # if not possible) - if type(self).detailed_topo_desc is None: - raise Grid2OpException(ERR_MSG_SWITCH) - detailed_topo_desc = type(self).detailed_topo_desc - # returns an iterable: for each load you have: load_index, (pos_switch1, pos_switch_2, ..., pos_switchn) - # with (pos_switch1, pos_switch_2, ..., pos_switchn) the position of the - # n switch connecting the load to one busbar - # only one of pos_switch1, pos_switch_2, ..., pos_switchn is True ! - if self._detailed_topo is None: - self.get_all_switches() - busbar_connectors_state, switches_state = self._detailed_topo - LINE_OR_ID = type(detailed_topo_desc).LINE_OR_ID - res = [(el_id, self._aux_get_bus_detailed_topo(switches_state, detailed_topo_desc, LINE_OR_ID, el_id)) for el_id, new_bus in tmp_] - return res + return self._aux_to_global(tmp_, type(self).line_or_to_subid) - def get_lines_ex_bus(self): + def get_lines_ex_bus(self) -> ValueStore: + """ + This function might be called in the implementation of :func:`grid2op.Backend.Backend.apply_action`. + + It is relevant when your solver expose API by "element types" for example + you get the possibility to set and access all loads at once, all generators at + once and your solver can easily move element from different busbar in a given + substation. + + This corresponds to option 2a described (shortly) in :class:`_BackendAction`. + + In this setting, this function will give you the "local bus" id for each line (ex side) that + have been changed by the agent / time series / voltage controlers / opponent / etc. + + .. warning:: /!\\\\ Do not alter / modify / change / override this implementation /!\\\\ + + .. seealso:: + The other related functions: + + - :func:`_BackendAction.get_loads_bus` + - :func:`_BackendAction.get_gens_bus` + - :func:`_BackendAction.get_lines_or_bus` + - :func:`_BackendAction.get_lines_ex_bus` + - :func:`_BackendAction.get_storages_bus` + + Examples + --------- + + Some examples are given in the documentation of :func:`_BackendAction.get_loads_bus` + + """ if self._lines_ex_bus is None: - self._lines_ex_bus = ValueStore(self.n_line, dtype=dt_int) + self._lines_ex_bus = ValueStore(type(self).n_line, dtype=dt_int) self._lines_ex_bus.copy_from_index( - self.current_topo, self.line_ex_pos_topo_vect + self.current_topo, type(self).line_ex_pos_topo_vect ) return self._lines_ex_bus - def get_lines_ex_bus_global(self): + def get_lines_ex_bus_global(self) -> ValueStore: + """ + This function might be called in the implementation of :func:`grid2op.Backend.Backend.apply_action`. + + It is relevant when your solver expose API by "element types" for example + you get the possibility to set and access all loads at once, all generators at + once AND you can easily switch element from one "busbars" to another in + the whole grid handled by your solver. + + This corresponds to situation 2b described in :class:`_BackendAction`. + + In this setting, this function will give you the "local bus" id for each loads that + have been changed by the agent / time series / voltage controlers / opponent / etc. + + .. warning:: /!\\\\ Do not alter / modify / change / override this implementation /!\\\\ + + .. seealso:: + The other related functions: + + - :func:`_BackendAction.get_loads_bus_global` + - :func:`_BackendAction.get_gens_bus_global` + - :func:`_BackendAction.get_lines_or_bus_global` + - :func:`_BackendAction.get_lines_ex_bus_global` + - :func:`_BackendAction.get_storages_bus_global` + + Examples + --------- + + Some examples are given in the documentation of :func:`_BackendAction.get_loads_bus_global` + """ tmp_ = self.get_lines_ex_bus() - return self._aux_to_global(tmp_, self.line_ex_to_subid) + return self._aux_to_global(tmp_, type(self).line_ex_to_subid) - def get_lines_ex_bus_switches(self): - tmp_ = self.get_lines_ex_bus() - # TODO detailed topo - # for now this is working because of the super simple representation of subtation - # but in reality i need to come up with a routine to find the topology (and raise the BackendError "impossible topology" - # if not possible) - if type(self).detailed_topo_desc is None: - raise Grid2OpException(ERR_MSG_SWITCH) - detailed_topo_desc = type(self).detailed_topo_desc - # returns an iterable: for each load you have: load_index, (pos_switch1, pos_switch_2, ..., pos_switchn) - # with (pos_switch1, pos_switch_2, ..., pos_switchn) the position of the - # n switch connecting the load to one busbar - # only one of pos_switch1, pos_switch_2, ..., pos_switchn is True ! - if self._detailed_topo is None: - self.get_all_switches() - busbar_connectors_state, switches_state = self._detailed_topo - LINE_EX_ID = type(detailed_topo_desc).LINE_EX_ID - res = [(el_id, self._aux_get_bus_detailed_topo(switches_state, detailed_topo_desc, LINE_EX_ID, el_id)) for el_id, new_bus in tmp_] - return res - - def get_storages_bus(self): + def get_storages_bus(self) -> ValueStore: + """ + This function might be called in the implementation of :func:`grid2op.Backend.Backend.apply_action`. + + It is relevant when your solver expose API by "element types" for example + you get the possibility to set and access all loads at once, all generators at + once and your solver can easily move element from different busbar in a given + substation. + + This corresponds to option 2a described (shortly) in :class:`_BackendAction`. + + In this setting, this function will give you the "local bus" id for each storage that + have been changed by the agent / time series / voltage controlers / opponent / etc. + + .. warning:: /!\\\\ Do not alter / modify / change / override this implementation /!\\\\ + + .. seealso:: + The other related functions: + + - :func:`_BackendAction.get_loads_bus` + - :func:`_BackendAction.get_gens_bus` + - :func:`_BackendAction.get_lines_or_bus` + - :func:`_BackendAction.get_lines_ex_bus` + - :func:`_BackendAction.get_storages_bus` + + Examples + --------- + + Some examples are given in the documentation of :func:`_BackendAction.get_loads_bus` + + """ if self._storage_bus is None: - self._storage_bus = ValueStore(self.n_storage, dtype=dt_int) - self._storage_bus.copy_from_index(self.current_topo, self.storage_pos_topo_vect) + self._storage_bus = ValueStore(type(self).n_storage, dtype=dt_int) + self._storage_bus.copy_from_index(self.current_topo, type(self).storage_pos_topo_vect) return self._storage_bus - def get_storages_bus_global(self): + def get_storages_bus_global(self) -> ValueStore: + """ + This function might be called in the implementation of :func:`grid2op.Backend.Backend.apply_action`. + + It is relevant when your solver expose API by "element types" for example + you get the possibility to set and access all loads at once, all generators at + once AND you can easily switch element from one "busbars" to another in + the whole grid handled by your solver. + + This corresponds to situation 2b described in :class:`_BackendAction`. + + In this setting, this function will give you the "local bus" id for each loads that + have been changed by the agent / time series / voltage controlers / opponent / etc. + + .. warning:: /!\\\\ Do not alter / modify / change / override this implementation /!\\\\ + + .. seealso:: + The other related functions: + + - :func:`_BackendAction.get_loads_bus_global` + - :func:`_BackendAction.get_gens_bus_global` + - :func:`_BackendAction.get_lines_or_bus_global` + - :func:`_BackendAction.get_lines_ex_bus_global` + - :func:`_BackendAction.get_storages_bus_global` + + Examples + --------- + + Some examples are given in the documentation of :func:`_BackendAction.get_loads_bus_global` + """ tmp_ = self.get_storages_bus() - return self._aux_to_global(tmp_, self.storage_to_subid) + return self._aux_to_global(tmp_, type(self).storage_to_subid) - def get_storages_bus_switches(self): - tmp_ = self.get_storages_bus() - # TODO detailed topo - # for now this is working because of the super simple representation of subtation - # but in reality i need to come up with a routine to find the topology (and raise the BackendError "impossible topology" - # if not possible) - if type(self).detailed_topo_desc is None: - raise Grid2OpException(ERR_MSG_SWITCH) - detailed_topo_desc = type(self).detailed_topo_desc - # returns an iterable: for each load you have: load_index, (pos_switch1, pos_switch_2, ..., pos_switchn) - # with (pos_switch1, pos_switch_2, ..., pos_switchn) the position of the - # n switch connecting the load to one busbar - # only one of pos_switch1, pos_switch_2, ..., pos_switchn is True ! - if self._detailed_topo is None: - self.get_all_switches() - busbar_connectors_state, switches_state = self._detailed_topo - STORAGE_ID = type(detailed_topo_desc).STORAGE_ID - res = [(el_id, self._aux_get_bus_detailed_topo(switches_state, detailed_topo_desc, STORAGE_ID, el_id)) for el_id, new_bus in tmp_] - return res - - def get_shunts_bus_switches(self): - if self._shunt_bus is None: - self._shunt_bus = ValueStore(self.n_shunt, dtype=dt_int) - self._shunt_bus.copy_from_index(self.shunt_bus, np.arange(self.n_shunt)) + def get_shunts_bus_global(self) -> ValueStore: + """ + This function might be called in the implementation of :func:`grid2op.Backend.Backend.apply_action`. - # TODO detailed topo - if type(self).detailed_topo_desc is None: - raise Grid2OpException(ERR_MSG_SWITCH) - detailed_topo_desc = type(self).detailed_topo_desc - # returns an iterable: for each load you have: load_index, (pos_switch1, pos_switch_2, ..., pos_switchn) - # with (pos_switch1, pos_switch_2, ..., pos_switchn) the position of the - # n switch connecting the load to one busbar - # only one of pos_switch1, pos_switch_2, ..., pos_switchn is True ! - if self._detailed_topo is None: - self.get_all_switches() - busbar_connectors_state, switches_state = self._detailed_topo - SHUNT_ID = type(detailed_topo_desc).SHUNT_ID - res = [(el_id, self._aux_get_bus_detailed_topo(switches_state, detailed_topo_desc, SHUNT_ID, el_id)) for el_id, new_bus in self._shunt_bus] - return res + It is relevant when your solver expose API by "element types" for example + you get the possibility to set and access all loads at once, all generators at + once AND you can easily switch element from one "busbars" to another in + the whole grid handled by your solver. + + This corresponds to situation 2b described in :class:`_BackendAction`. + + In this setting, this function will give you the "local bus" id for each loads that + have been changed by the agent / time series / voltage controlers / opponent / etc. + + .. warning:: /!\\\\ Do not alter / modify / change / override this implementation /!\\\\ + + .. seealso:: + The other related functions: + + - :func:`_BackendAction.get_loads_bus_global` + - :func:`_BackendAction.get_gens_bus_global` + - :func:`_BackendAction.get_lines_or_bus_global` + - :func:`_BackendAction.get_lines_ex_bus_global` + - :func:`_BackendAction.get_storages_bus_global` + + Examples + --------- + + Some examples are given in the documentation of :func:`_BackendAction.get_loads_bus_global` + """ + tmp_ = self.shunt_bus + return self._aux_to_global(tmp_, type(self).shunt_to_subid) - def _get_active_bus(self): + def _get_active_bus(self) -> None: + """ + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + """ self.activated_bus[:, :] = False - tmp = self.current_topo.values - 1 # TODO global to local ! + tmp = self.current_topo.values - 1 is_el_conn = tmp >= 0 self.activated_bus[self.big_topo_to_subid[is_el_conn], tmp[is_el_conn]] = True if type(self).shunts_data_available: @@ -766,11 +1504,13 @@ def _get_active_bus(self): tmp = self.current_shunt_bus.values - 1 self.activated_bus[type(self).shunt_to_subid[is_el_conn], tmp[is_el_conn]] = True - def update_state(self, powerline_disconnected): + def update_state(self, powerline_disconnected) -> None: """ .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + This is handled by the environment ! - Update the internal state. Should be called after the cascading failures + Update the internal state. Should be called after the cascading failures. """ if (powerline_disconnected >= 0).any(): diff --git a/grid2op/Action/actionSpace.py b/grid2op/Action/actionSpace.py index 975b5e9d0..2b55406e0 100644 --- a/grid2op/Action/actionSpace.py +++ b/grid2op/Action/actionSpace.py @@ -8,7 +8,10 @@ import warnings import copy +from typing import Dict, List, Any, Literal, Optional +import grid2op +from grid2op.typing_variables import DICT_ACT_TYPING from grid2op.Action.baseAction import BaseAction from grid2op.Action.serializableActionSpace import SerializableActionSpace @@ -41,6 +44,7 @@ def __init__( gridobj, legal_action, actionClass=BaseAction, # need to be a base grid2op type (and not a type generated on the fly) + _local_dir_cls=None, ): """ INTERNAL USE ONLY @@ -68,11 +72,15 @@ def __init__( """ actionClass._add_shunt_data() actionClass._update_value_set() - SerializableActionSpace.__init__(self, gridobj, actionClass=actionClass) + SerializableActionSpace.__init__(self, gridobj, actionClass=actionClass, _local_dir_cls=_local_dir_cls) self.legal_action = legal_action def __call__( - self, dict_: dict = None, check_legal: bool = False, env: "BaseEnv" = None + self, + dict_: DICT_ACT_TYPING = None, + check_legal: bool = False, + env: "grid2op.Environment.BaseEnv" = None, + _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None, ) -> BaseAction: """ This utility allows you to build a valid action, with the proper sizes if you provide it with a valid @@ -115,11 +123,19 @@ def __call__( An action that is valid and corresponds to what the agent want to do with the formalism defined in see :func:`Action.udpate`. + Notes + ----- + + This function is not in the "SerializableActionSpace" because the + "legal_action" is not serialized. TODO ? + """ - - res = self.actionClass() + # build the action + res : BaseAction = self.actionClass(_names_chronics_to_backend) + # update the action res.update(dict_) + if check_legal: is_legal, reason = self._is_legal(res, env) if not is_legal: diff --git a/grid2op/Action/baseAction.py b/grid2op/Action/baseAction.py index ac7321a55..62e400320 100644 --- a/grid2op/Action/baseAction.py +++ b/grid2op/Action/baseAction.py @@ -9,8 +9,20 @@ import copy import numpy as np import warnings -from typing import Tuple +from typing import Tuple, Dict, Literal, Any, List, Optional +import grid2op.Observation + + +try: + from typing import Self +except ImportError: + from typing_extensions import Self + +from packaging import version + +import grid2op +from grid2op.typing_variables import DICT_ACT_TYPING from grid2op.dtypes import dt_int, dt_bool, dt_float from grid2op.Exceptions import * from grid2op.Space import GridObjects @@ -73,9 +85,11 @@ class BaseAction(GridObjects): interpretation: - 0 -> don't change + - -1 -> disconnect the object. - 1 -> connect to bus 1 - 2 -> connect to bus 2 - - -1 -> disconnect the object. + - 3 -> connect to bus 3 (added in version 1.10.0) + - etc. (added in version 1.10.0) - the fifth element changes the buses to which the object is connected. It's a boolean vector interpreted as: @@ -203,6 +217,8 @@ class BaseAction(GridObjects): of MW that will be "curtailed" but will rather provide a limit on the number of MW a given generator can produce. + TODO detailed topo + Examples -------- Here are example on how to use the action, for more information on what will be the effect of each, @@ -357,7 +373,7 @@ class BaseAction(GridObjects): "set_storage", "curtail", "raise_alarm", - "raise_alert", + "raise_alert" } attr_list_vect = [ @@ -375,7 +391,7 @@ class BaseAction(GridObjects): "_storage_power", "_curtail", "_raise_alarm", - "_raise_alert", + "_raise_alert" ] attr_nan_list_set = set() @@ -388,7 +404,11 @@ class BaseAction(GridObjects): ERR_ACTION_CUT = 'The action added to me will be cut, because i don\'t support modification of "{}"' ERR_NO_STOR_SET_BUS = 'Impossible to modify the storage bus (with "set") with this action type.' - def __init__(self): + #: If set to "always" or "once" will issue a warning in case the + #: agent tries to affect the topology with set_switch / change_switch + #: and set_bus / change_bus in the same action + ISSUE_WARNING_SWITCH_SET_CHANGE : Literal["always", "once", "never"] = "always" + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): """ INTERNAL USE ONLY @@ -404,50 +424,57 @@ def __init__(self): """ GridObjects.__init__(self) - + cls = type(self) + + if _names_chronics_to_backend is not None: + # should only be the case for the "init state" action + self._names_chronics_to_backend = _names_chronics_to_backend + else: + self._names_chronics_to_backend = None + # False(line is disconnected) / True(line is connected) - self._set_line_status = np.full(shape=self.n_line, fill_value=0, dtype=dt_int) + self._set_line_status = np.full(shape=cls.n_line, fill_value=0, dtype=dt_int) self._switch_line_status = np.full( - shape=self.n_line, fill_value=False, dtype=dt_bool + shape=cls.n_line, fill_value=False, dtype=dt_bool ) # injection change self._dict_inj = {} # topology changed - self._set_topo_vect = np.full(shape=self.dim_topo, fill_value=0, dtype=dt_int) + self._set_topo_vect = np.full(shape=cls.dim_topo, fill_value=0, dtype=dt_int) self._change_bus_vect = np.full( - shape=self.dim_topo, fill_value=False, dtype=dt_bool + shape=cls.dim_topo, fill_value=False, dtype=dt_bool ) # add the hazards and maintenance usefull for saving. - self._hazards = np.full(shape=self.n_line, fill_value=False, dtype=dt_bool) - self._maintenance = np.full(shape=self.n_line, fill_value=False, dtype=dt_bool) + self._hazards = np.full(shape=cls.n_line, fill_value=False, dtype=dt_bool) + self._maintenance = np.full(shape=cls.n_line, fill_value=False, dtype=dt_bool) # redispatching vector - self._redispatch = np.full(shape=self.n_gen, fill_value=0.0, dtype=dt_float) + self._redispatch = np.full(shape=cls.n_gen, fill_value=0.0, dtype=dt_float) # storage unit vector self._storage_power = np.full( - shape=self.n_storage, fill_value=0.0, dtype=dt_float + shape=cls.n_storage, fill_value=0.0, dtype=dt_float ) # curtailment of renewable energy - self._curtail = np.full(shape=self.n_gen, fill_value=-1.0, dtype=dt_float) + self._curtail = np.full(shape=cls.n_gen, fill_value=-1.0, dtype=dt_float) self._vectorized = None self._lines_impacted = None self._subs_impacted = None # shunts - if type(self).shunts_data_available: + if cls.shunts_data_available: self.shunt_p = np.full( - shape=self.n_shunt, fill_value=np.NaN, dtype=dt_float + shape=cls.n_shunt, fill_value=np.NaN, dtype=dt_float ) self.shunt_q = np.full( - shape=self.n_shunt, fill_value=np.NaN, dtype=dt_float + shape=cls.n_shunt, fill_value=np.NaN, dtype=dt_float ) - self.shunt_bus = np.full(shape=self.n_shunt, fill_value=0, dtype=dt_int) + self.shunt_bus = np.full(shape=cls.n_shunt, fill_value=0, dtype=dt_int) else: self.shunt_p = None self.shunt_q = None @@ -456,13 +483,20 @@ def __init__(self): self._single_act = True self._raise_alarm = np.full( - shape=self.dim_alarms, dtype=dt_bool, fill_value=False + shape=cls.dim_alarms, dtype=dt_bool, fill_value=False ) # TODO self._raise_alert = np.full( - shape=self.dim_alerts, dtype=dt_bool, fill_value=False + shape=cls.dim_alerts, dtype=dt_bool, fill_value=False ) # TODO + self._set_switch_status = None + self._change_switch_status = None + if cls.detailed_topo_desc is not None: + n_switch = cls.detailed_topo_desc.switches.shape[0] + self._set_switch_status = np.full(shape=n_switch, fill_value=0, dtype=dt_int) + self._change_switch_status = np.full(shape=n_switch, fill_value=False, dtype=dt_bool) + # change the stuff self._modif_inj = False self._modif_set_bus = False @@ -474,10 +508,14 @@ def __init__(self): self._modif_curtailment = False self._modif_alarm = False self._modif_alert = False + + # TODO detailed topo + self._modif_set_switch = False + self._modif_change_switch = False @classmethod def process_shunt_satic_data(cls): - if not cls.shunts_data_available: + if cls.shunts_data_available: # this is really important, otherwise things from grid2op base types will be affected cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) cls.attr_list_set = copy.deepcopy(cls.attr_list_set) @@ -514,6 +552,8 @@ def _aux_copy(self, other): "_modif_alarm", "_modif_alert", "_single_act", + "_modif_set_switch", + "_modif_change_switch" ] attr_vect = [ @@ -532,7 +572,10 @@ def _aux_copy(self, other): if type(self).shunts_data_available: attr_vect += ["shunt_p", "shunt_q", "shunt_bus"] - + + if type(self).detailed_topo_desc is not None: + attr_vect += ["_set_switch_status", "_change_switch_status"] + for attr_nm in attr_simple: setattr(other, attr_nm, getattr(self, attr_nm)) @@ -555,9 +598,9 @@ def __copy__(self) -> "BaseAction": return res - @classmethod - def process_shunt_satic_data(cls): - return super().process_shunt_satic_data() + # @classmethod + # def process_shunt_satic_data(cls): + # return super().process_shunt_satic_data() def __deepcopy__(self, memodict={}) -> "BaseAction": res = type(self)() @@ -581,7 +624,7 @@ def _aux_serialize_add_key_change(self, attr_nm, dict_key, res): res[dict_key] = tmp_ def _aux_serialize_add_key_set(self, attr_nm, dict_key, res): - tmp_ = [(int(id_), int(val)) for id_, val in enumerate(getattr(self, attr_nm)) if val != 0.] + tmp_ = [(int(id_), int(val)) for id_, val in enumerate(getattr(self, attr_nm)) if np.abs(val) >= 1e-7] if tmp_: res[dict_key] = tmp_ @@ -616,6 +659,7 @@ def as_serializable_dict(self) -> dict: """ res = {} + cls = type(self) # bool elements if self._modif_alert: res["raise_alert"] = [ @@ -637,7 +681,7 @@ def as_serializable_dict(self) -> dict: self._aux_serialize_add_key_change("gen_change_bus", "generators_id", res["change_bus"]) self._aux_serialize_add_key_change("line_or_change_bus", "lines_or_id", res["change_bus"]) self._aux_serialize_add_key_change("line_ex_change_bus", "lines_ex_id", res["change_bus"]) - if hasattr(type(self), "n_storage") and type(self).n_storage: + if hasattr(cls, "n_storage") and cls.n_storage: self._aux_serialize_add_key_change("storage_change_bus", "storages_id", res["change_bus"]) if not res["change_bus"]: del res["change_bus"] @@ -656,7 +700,7 @@ def as_serializable_dict(self) -> dict: self._aux_serialize_add_key_set("gen_set_bus", "generators_id", res["set_bus"]) self._aux_serialize_add_key_set("line_or_set_bus", "lines_or_id", res["set_bus"]) self._aux_serialize_add_key_set("line_ex_set_bus", "lines_ex_id", res["set_bus"]) - if hasattr(type(self), "n_storage") and type(self).n_storage: + if hasattr(cls, "n_storage") and cls.n_storage: self._aux_serialize_add_key_set("storage_set_bus", "storages_id", res["set_bus"]) if not res["set_bus"]: del res["set_bus"] @@ -675,7 +719,7 @@ def as_serializable_dict(self) -> dict: res["redispatch"] = [ (int(id_), float(val)) for id_, val in enumerate(self._redispatch) - if val != 0.0 + if np.abs(val) >= 1e-7 ] if not res["redispatch"]: del res["redispatch"] @@ -684,7 +728,7 @@ def as_serializable_dict(self) -> dict: res["set_storage"] = [ (int(id_), float(val)) for id_, val in enumerate(self._storage_power) - if val != 0.0 + if np.abs(val) >= 1e-7 ] if not res["set_storage"]: del res["set_storage"] @@ -693,7 +737,7 @@ def as_serializable_dict(self) -> dict: res["curtail"] = [ (int(id_), float(val)) for id_, val in enumerate(self._curtail) - if val != -1 + if np.abs(val + 1.) >= 1e-7 ] if not res["curtail"]: del res["curtail"] @@ -707,7 +751,7 @@ def as_serializable_dict(self) -> dict: if not res["injection"]: del res["injection"] - if type(self).shunts_data_available: + if cls.shunts_data_available: res["shunt"] = {} if np.isfinite(self.shunt_p).any(): res["shunt"]["shunt_p"] = [ @@ -725,6 +769,26 @@ def as_serializable_dict(self) -> dict: ] if not res["shunt"]: del res["shunt"] + + if type(self).detailed_topo_desc is not None: + # TODO detailed topo + + if self._modif_set_switch: + res["set_switch_status"] = [ + (int(id_), int(val)) + for id_, val in enumerate(self._set_switch_status) + if val != 0 + ] + if not res["set_switch_status"]: + del res["set_switch_status"] + + if self._modif_change_switch: + res["change_switch_status"] = [ + int(id_) for id_, val in enumerate(self._change_switch_status) if val + ] + if not res["change_switch_status"]: + del res["change_switch_status"] + return res @classmethod @@ -756,7 +820,7 @@ def alarm_raised(self) -> np.ndarray: The indexes of the areas where the agent has raised an alarm. """ - return np.where(self._raise_alarm)[0] + return (self._raise_alarm).nonzero()[0] def alert_raised(self) -> np.ndarray: """ @@ -770,41 +834,106 @@ def alert_raised(self) -> np.ndarray: The indexes of the lines where the agent has raised an alert. """ - return np.where(self._raise_alert)[0] + return (self._raise_alert).nonzero[0] + + @classmethod + def _aux_process_old_compat(cls): + super()._aux_process_old_compat() + + # this is really important, otherwise things from grid2op base types will be affected + cls.authorized_keys = copy.deepcopy(cls.authorized_keys) + cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) + + # deactivate storage + if "set_storage" in cls.authorized_keys: + cls.authorized_keys.remove("set_storage") + if "_storage_power" in cls.attr_list_vect: + cls.attr_list_vect.remove("_storage_power") + cls.attr_list_set = set(cls.attr_list_vect) + + # remove the curtailment + if "curtail" in cls.authorized_keys: + cls.authorized_keys.remove("curtail") + if "_curtail" in cls.attr_list_vect: + cls.attr_list_vect.remove("_curtail") + + cls._aux_remove_switches() + + @classmethod + def _aux_remove_switches(cls): + # remove switches + if "set_switch" in cls.authorized_keys: + cls.authorized_keys.remove("set_switch") + if "_set_switch_status" in cls.attr_list_vect: + cls.attr_list_vect.remove("_set_switch_status") + if "change_switch" in cls.authorized_keys: + cls.authorized_keys.remove("change_switch") + if "_change_switch_status" in cls.attr_list_vect: + cls.attr_list_vect.remove("_change_switch_status") + + @classmethod + def process_grid2op_detailed_topo_vect(cls): + """Process the class to register new attribute for observation and action + if the detailed_topo_desc is not empty (*ie* if there switches on your grid) + + Only called if a detailed topology is registered + """ + cls.authorized_keys = copy.deepcopy(cls.authorized_keys) + cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) + cls.attr_list_set = copy.deepcopy(cls.attr_list_set) + + # for switches (element to busbar) + cls.authorized_keys.add("set_switch") + cls.authorized_keys.add("change_switch") + cls.attr_list_vect.append("_set_switch_status") + cls.attr_list_vect.append("_change_switch_status") + + cls.attr_list_set = set(cls.attr_list_vect) + + @classmethod + def _aux_process_n_busbar_per_sub(cls): + cls.authorized_keys = copy.deepcopy(cls.authorized_keys) + cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) + + if "change_bus" in cls.authorized_keys: + cls.authorized_keys.remove("change_bus") + if "_change_bus_vect" in cls.attr_list_vect: + cls.attr_list_vect.remove("_change_bus_vect") + @classmethod def process_grid2op_compat(cls): + super().process_grid2op_compat() + glop_ver = cls._get_grid2op_version_as_version_obj() + if cls.glop_version == cls.BEFORE_COMPAT_VERSION: # oldest version: no storage and no curtailment available - - # this is really important, otherwise things from grid2op base types will be affected - cls.authorized_keys = copy.deepcopy(cls.authorized_keys) - cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) - cls.attr_list_set = copy.deepcopy(cls.attr_list_set) - - # deactivate storage - cls.set_no_storage() - if "set_storage" in cls.authorized_keys: - cls.authorized_keys.remove("set_storage") - if "_storage_power" in cls.attr_list_vect: - cls.attr_list_vect.remove("_storage_power") - cls.attr_list_set = set(cls.attr_list_vect) - - # remove the curtailment - if "curtail" in cls.authorized_keys: - cls.authorized_keys.remove("curtail") - if "_curtail" in cls.attr_list_vect: - cls.attr_list_vect.remove("_curtail") - cls.attr_list_set = set(cls.attr_list_vect) - - if cls.glop_version < "1.6.0": + cls._aux_process_old_compat() + + if glop_ver < version.parse("1.6.0"): # this feature did not exist before. cls.dim_alarms = 0 - if cls.glop_version < "1.9.1": + if glop_ver < version.parse("1.9.1"): # this feature did not exist before. cls.dim_alerts = 0 - + + if glop_ver < version.parse("1.10.4.dev0"): + # this feature did not exist before + cls.detailed_topo_desc = None + cls._aux_remove_switches() + + if (cls.n_busbar_per_sub >= 3) or (cls.n_busbar_per_sub == 1): + # only relevant for grid2op >= 1.10.0 + # remove "change_bus" if it's there more than 3 buses (no sense: where to change it ???) + # or if there are only one busbar (cannot change anything) + # if there are only one busbar, the "set_bus" action can still be used + # to disconnect the element, this is why it's not removed + cls._aux_process_n_busbar_per_sub() + + cls.attr_list_set = copy.deepcopy(cls.attr_list_set) + cls.attr_list_set = set(cls.attr_list_vect) + def _reset_modified_flags(self): self._modif_inj = False self._modif_set_bus = False @@ -816,10 +945,13 @@ def _reset_modified_flags(self): self._modif_curtailment = False self._modif_alarm = False self._modif_alert = False + # detailed topology + self._modif_set_switch = False + self._modif_change_switch = False def can_affect_something(self) -> bool: """ - This functions returns True if the current action has any chance to change the grid. + This functions returns ``True`` if the current action has any chance to change the grid. Notes ----- @@ -836,6 +968,8 @@ def can_affect_something(self) -> bool: or self._modif_curtailment or self._modif_alarm or self._modif_alert + or self._modif_set_switch + or self._modif_change_switch ) def _get_array_from_attr_name(self, attr_name): @@ -845,14 +979,15 @@ def _get_array_from_attr_name(self, attr_name): if attr_name in self._dict_inj: res = self._dict_inj[attr_name] else: + cls = type(self) if attr_name == "prod_p" or attr_name == "prod_v": - res = np.full(self.n_gen, fill_value=0.0, dtype=dt_float) + res = np.full(cls.n_gen, fill_value=0.0, dtype=dt_float) elif attr_name == "load_p" or attr_name == "load_q": - res = np.full(self.n_load, fill_value=0.0, dtype=dt_float) + res = np.full(cls.n_load, fill_value=0.0, dtype=dt_float) else: raise Grid2OpException( 'Impossible to find the attribute "{}" ' - 'into the BaseAction of type "{}"'.format(attr_name, type(self)) + 'into the BaseAction of type "{}"'.format(attr_name, cls) ) return res @@ -863,12 +998,16 @@ def _post_process_from_vect(self): self._modif_set_status = (self._set_line_status != 0).any() self._modif_change_status = (self._switch_line_status).any() self._modif_redispatch = ( - np.isfinite(self._redispatch) & (self._redispatch != 0.0) + np.isfinite(self._redispatch) & (np.abs(self._redispatch) >= 1e-7) ).any() - self._modif_storage = (self._storage_power != 0.0).any() - self._modif_curtailment = (self._curtail != -1.0).any() + self._modif_storage = (np.abs(self._storage_power) >= 1e-7).any() + self._modif_curtailment = (np.abs(self._curtail + 1.0) >= 1e-7).any() self._modif_alarm = self._raise_alarm.any() self._modif_alert = self._raise_alert.any() + + if type(self).detailed_topo_desc is not None: + self._modif_set_switch = (self._set_switch_status != 0).any() + self._modif_change_switch = (self._change_switch_status).any() def _assign_attr_from_name(self, attr_nm, vect): if hasattr(self, attr_nm): @@ -879,7 +1018,7 @@ def _assign_attr_from_name(self, attr_nm, vect): super()._assign_attr_from_name(attr_nm, vect) self._post_process_from_vect() else: - if np.isfinite(vect).any() and (vect != 0.0).any(): + if np.isfinite(vect).any() and (np.abs(vect) >= 1e-7).any(): self._dict_inj[attr_nm] = vect def check_space_legit(self): @@ -925,7 +1064,7 @@ def get_set_line_status_vect(self) -> np.ndarray: "set_status" if building an :class:`BaseAction`. """ - return np.full(shape=self.n_line, fill_value=0, dtype=dt_int) + return np.full(shape=type(self).n_line, fill_value=0, dtype=dt_int) def get_change_line_status_vect(self) -> np.ndarray: """ @@ -941,7 +1080,7 @@ def get_change_line_status_vect(self) -> np.ndarray: "set_status" if building an :class:`BaseAction`. """ - return np.full(shape=self.n_line, fill_value=False, dtype=dt_bool) + return np.full(shape=type(self).n_line, fill_value=False, dtype=dt_bool) def __eq__(self, other) -> bool: """ @@ -1073,7 +1212,11 @@ def __eq__(self, other) -> bool: return False if not (self.shunt_bus == other.shunt_bus).all(): return False - + + # TODO detailed topology + if type(self).detailed_topo_desc is not None: + pass + return True def _dont_affect_topology(self) -> bool: @@ -1082,8 +1225,72 @@ def _dont_affect_topology(self) -> bool: and (not self._modif_change_bus) and (not self._modif_set_status) and (not self._modif_change_status) + and (not self._modif_set_switch) + and (not self._modif_change_switch) ) + def compute_switches_status(self): + """This function is used to "process" the action on switches and convert + it on action of type set_bus / change_bus + + It can raise some :class:`grid2op.Exceptions.AmbiugousAction` in different cases: + - trying to "set" and "change" the same switch + - trying to "set" and "change" the same busbar coupler + - trying to `set_bus` and modify (set or change) busbar coupler + - trying to `change_bus` and modify (set or change) busbar coupler + - trying to `set_bus` a given element and (in the same action) modify + (set or change) a switch that acts on this same element + - trying to `change_bus` a given element and (in the same action) modify + (set or change) a switch that acts on this same element + + It does not modify the action. + """ + + # TODO detailed topo : implement it ! + # set_line_status = 1 * self._set_line_status # needed ? + # switch_line_status = copy.deepcopy(self._switch_line_status) # needed ? + # topology changed + set_topo_vect = 1 * self._set_topo_vect + change_bus_vect = copy.deepcopy(self._change_bus_vect) + shunt_bus = copy.deepcopy(self.shunt_bus) + dtd = type(self).detailed_topo_desc + + if dtd is None: + # nothing to do in this case + return set_topo_vect, change_bus_vect, shunt_bus + + # check ambiguous behaviour + ## switches + if ((self._set_switch_status != 0) & self._change_switch_status).any(): + raise AmbiguousAction("Trying to both set the status of some switches (with 'set_switch') " + "and change it (with 'change_switch') using the same action.") + + id_topo_vect_set = dtd.switches_to_topovect_id[(self._set_switch_status != 0)] + id_topo_vect_set = id_topo_vect_set[id_topo_vect_set != -1] + if (set_topo_vect[id_topo_vect_set] != 0).any(): + raise AmbiguousAction("Trying to modify the status of some switches (with 'set_switch') " + "and set the element to a given busbar (using `set_bus`)") + if change_bus_vect[id_topo_vect_set].any(): + raise AmbiguousAction("Trying to modify the status of some switches (with 'set_switch') " + "and change the busbar to which the same element is connected " + "(using `change_bus`)") + id_topo_vect_change = dtd.switches_to_topovect_id[self._change_switch_status] + id_topo_vect_change = id_topo_vect_change[id_topo_vect_change != -1] + if (set_topo_vect[id_topo_vect_change] != 0).any(): + raise AmbiguousAction("Trying to modify the status of some switches (with 'change_switch') " + "and set the element to a given busbar (using `set_bus`)") + if change_bus_vect[id_topo_vect_change].any(): + raise AmbiguousAction("Trying to modify the status of some switches (with 'change_switch') " + "and change the busbar to which the same element is connected " + "(using `change_bus`)") + # TODO detailed topo : make it ambiguous to modify a substation topology + # with set_bus / change_bus and with set_switch / change_switch at the same same time + + # TODO detailed topo put elsewhere maybe ? + raise NotImplementedError("Not implemented yet, maybe check detailed_topo_desc.from_switches_position") + + return set_topo_vect, change_bus_vect, shunt_bus + def get_topological_impact(self, powerline_status=None) -> Tuple[np.ndarray, np.ndarray]: """ Gives information about the element being impacted by this action. @@ -1207,6 +1414,12 @@ def get_topological_impact(self, powerline_status=None) -> Tuple[np.ndarray, np. effective_change[self.line_ex_pos_topo_vect[disco_set_ex]] = False self._subs_impacted[self._topo_vect_to_sub[effective_change]] = True + + dtd = type(self).detailed_topo_desc + if dtd is not None: + self._subs_impacted[dtd.switches[self._set_switch_status != 0, type(dtd).SUB_COL]] = True + self._subs_impacted[dtd.switches[self._change_switch_status, type(dtd).SUB_COL]] = True + # TODO detailed topo return self._lines_impacted, self._subs_impacted def remove_line_status_from_topo(self, @@ -1487,43 +1700,8 @@ def _assign_iadd_or_warn(self, attr_name, new_value): ) else: getattr(self, attr_name)[:] = new_value - - def __iadd__(self, other): - """ - Add an action to this one. - - Adding an action to myself is equivalent to perform myself, and then perform other (but at the - same step) - - Parameters - ---------- - other: :class:`BaseAction` - - Examples - -------- - - .. code-block:: python - - import grid2op - env_name = "l2rpn_case14_sandbox" # or any other name - env = grid2op.make(env_name) - - act1 = env.action_space() - act1.set_bus = ... # for example - print("before += :") - print(act1) - - act2 = env.action_space() - act2.redispatch = ... # for example - print(act2) - - act1 += act 2 - print("after += ") - print(act1) - - """ - - # deal with injections + + def _aux_iadd_inj(self, other): for el in self.attr_list_vect: if el in other._dict_inj: if el not in self._dict_inj: @@ -1538,9 +1716,10 @@ def __iadd__(self, other): warnings.warn( type(self).ERR_ACTION_CUT.format(el) ) - # redispatching + + def _aux_iadd_redisp(self, other): redispatching = other._redispatch - if (redispatching != 0.0).any(): + if (np.abs(redispatching) >= 1e-7).any(): if "_redispatch" not in self.attr_list_set: warnings.warn( type(self).ERR_ACTION_CUT.format("_redispatch") @@ -1548,21 +1727,10 @@ def __iadd__(self, other): else: ok_ind = np.isfinite(redispatching) self._redispatch[ok_ind] += redispatching[ok_ind] - - # storage - set_storage = other._storage_power - ok_ind = np.isfinite(set_storage) & (set_storage != 0.0).any() - if ok_ind.any(): - if "_storage_power" not in self.attr_list_set: - warnings.warn( - type(self).ERR_ACTION_CUT.format("_storage_power") - ) - else: - self._storage_power[ok_ind] += set_storage[ok_ind] - - # curtailment + + def _aux_iadd_curtail(self, other): curtailment = other._curtail - ok_ind = np.isfinite(curtailment) & (curtailment != -1.0) + ok_ind = np.isfinite(curtailment) & (np.abs(curtailment + 1.0) >= 1e-7) if ok_ind.any(): if "_curtail" not in self.attr_list_set: warnings.warn( @@ -1573,8 +1741,57 @@ def __iadd__(self, other): # the curtailment of rhs, only when rhs acts # on curtailment self._curtail[ok_ind] = curtailment[ok_ind] - - # set and change status + + def _aux_iadd_storage(self, other): + set_storage = other._storage_power + ok_ind = np.isfinite(set_storage) & (np.abs(set_storage) >= 1e-7).any() + if ok_ind.any(): + if "_storage_power" not in self.attr_list_set: + warnings.warn( + type(self).ERR_ACTION_CUT.format("_storage_power") + ) + else: + self._storage_power[ok_ind] += set_storage[ok_ind] + + def _aux_iadd_modif_flags(self, other): + self._modif_change_bus = self._modif_change_bus or other._modif_change_bus + self._modif_set_bus = self._modif_set_bus or other._modif_set_bus + self._modif_change_status = ( + self._modif_change_status or other._modif_change_status + ) + self._modif_set_status = self._modif_set_status or other._modif_set_status + self._modif_inj = self._modif_inj or other._modif_inj + self._modif_redispatch = self._modif_redispatch or other._modif_redispatch + self._modif_storage = self._modif_storage or other._modif_storage + self._modif_curtailment = self._modif_curtailment or other._modif_curtailment + self._modif_alarm = self._modif_alarm or other._modif_alarm + self._modif_alert = self._modif_alert or other._modif_alert + + def _aux_iadd_shunt(self, other): + if not type(other).shunts_data_available: + warnings.warn("Trying to add an action that does not support " + "shunt with an action that does.") + return + + val = other.shunt_p + ok_ind = np.isfinite(val) + shunt_p = 1.0 * self.shunt_p + shunt_p[ok_ind] = val[ok_ind] + self._assign_iadd_or_warn("shunt_p", shunt_p) + + val = other.shunt_q + ok_ind = np.isfinite(val) + shunt_q = 1.0 * self.shunt_q + shunt_q[ok_ind] = val[ok_ind] + self._assign_iadd_or_warn("shunt_q", shunt_q) + + val = other.shunt_bus + ok_ind = val != 0 + shunt_bus = 1 * self.shunt_bus + shunt_bus[ok_ind] = val[ok_ind] + self._assign_iadd_or_warn("shunt_bus", shunt_bus) + + def _aux_iadd_set_change_status(self, other): other_set = other._set_line_status other_change = other._switch_line_status me_set = 1 * self._set_line_status @@ -1603,8 +1820,8 @@ def __iadd__(self, other): self._assign_iadd_or_warn("_set_line_status", me_set) self._assign_iadd_or_warn("_switch_line_status", me_change) - - # set and change bus + + def _aux_iadd_set_change_bus(self, other): other_set = other._set_topo_vect other_change = other._change_bus_vect me_set = 1 * self._set_topo_vect @@ -1635,26 +1852,63 @@ def __iadd__(self, other): self._assign_iadd_or_warn("_set_topo_vect", me_set) self._assign_iadd_or_warn("_change_bus_vect", me_change) + + def __iadd__(self, other: Self): + """ + Add an action to this one. + + Adding an action to myself is equivalent to perform myself, and then perform other (but at the + same step) + + Parameters + ---------- + other: :class:`BaseAction` + + Examples + -------- + + .. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" # or any other name + env = grid2op.make(env_name) + + act1 = env.action_space() + act1.set_bus = ... # for example + print("before += :") + print(act1) + + act2 = env.action_space() + act2.redispatch = ... # for example + print(act2) + + act1 += act 2 + print("after += ") + print(act1) + + """ + + # deal with injections + self._aux_iadd_inj(other) + + # redispatching + self._aux_iadd_redisp(other) + + # storage + self._aux_iadd_storage(other) + + # curtailment + self._aux_iadd_curtail(other) + + # set and change status + self._aux_iadd_set_change_status(other) + + # set and change bus + self._aux_iadd_set_change_bus(other) # shunts if type(self).shunts_data_available: - val = other.shunt_p - ok_ind = np.isfinite(val) - shunt_p = 1.0 * self.shunt_p - shunt_p[ok_ind] = val[ok_ind] - self._assign_iadd_or_warn("shunt_p", shunt_p) - - val = other.shunt_q - ok_ind = np.isfinite(val) - shunt_q = 1.0 * self.shunt_q - shunt_q[ok_ind] = val[ok_ind] - self._assign_iadd_or_warn("shunt_q", shunt_q) - - val = other.shunt_bus - ok_ind = val != 0 - shunt_bus = 1 * self.shunt_bus - shunt_bus[ok_ind] = val[ok_ind] - self._assign_iadd_or_warn("shunt_bus", shunt_bus) + self._aux_iadd_shunt(other) # alarm feature self._raise_alarm[other._raise_alarm] = True @@ -1664,19 +1918,7 @@ def __iadd__(self, other): # the modif flags - self._modif_change_bus = self._modif_change_bus or other._modif_change_bus - self._modif_set_bus = self._modif_set_bus or other._modif_set_bus - self._modif_change_status = ( - self._modif_change_status or other._modif_change_status - ) - self._modif_set_status = self._modif_set_status or other._modif_set_status - self._modif_inj = self._modif_inj or other._modif_inj - self._modif_redispatch = self._modif_redispatch or other._modif_redispatch - self._modif_storage = self._modif_storage or other._modif_storage - self._modif_curtailment = self._modif_curtailment or other._modif_curtailment - self._modif_alarm = self._modif_alarm or other._modif_alarm - self._modif_alert = self._modif_alert or other._modif_alert - + self._aux_iadd_modif_flags(other) return self def __add__(self, other) -> "BaseAction": @@ -1811,6 +2053,7 @@ def _digest_shunt(self, dict_): vect_self[:] = tmp elif isinstance(tmp, list): # expected a list: (id shunt, new bus) + cls = type(self) for (sh_id, new_bus) in tmp: if sh_id < 0: raise AmbiguousAction( @@ -1818,11 +2061,23 @@ def _digest_shunt(self, dict_): sh_id ) ) - if sh_id >= self.n_shunt: + if sh_id >= cls.n_shunt: raise AmbiguousAction( "Invalid shunt id {}. Shunt id should be less than the number " - "of shunt {}".format(sh_id, self.n_shunt) + "of shunt {}".format(sh_id, cls.n_shunt) ) + if key_n == "shunt_bus" or key_n == "set_bus": + if new_bus <= -2: + raise AmbiguousAction( + f"Cannot ask for a shunt bus <= -2, found {new_bus} for shunt id {sh_id}" + ) + elif new_bus > cls.n_busbar_per_sub: + raise AmbiguousAction( + f"Cannot ask for a shunt bus > {cls.n_busbar_per_sub} " + f"the maximum number of busbar per substations" + f", found {new_bus} for shunt id {sh_id}" + ) + vect_self[sh_id] = new_bus elif tmp is None: pass @@ -1939,6 +2194,10 @@ def _digest_set_status(self, dict_): if "set_line_status" in dict_: # this action can both disconnect or reconnect a powerlines self.line_set_status = dict_["set_line_status"] + + def _digest_set_switch(self, dict_): + if "set_switch" in dict_: + self.set_switch = dict_["set_switch"] def _digest_hazards(self, dict_): if "hazards" in dict_: @@ -1954,7 +2213,7 @@ def _digest_hazards(self, dict_): raise AmbiguousAction( f'You ask to perform hazard on powerlines, this can only be done if "hazards" can be casted ' f"into a numpy ndarray with error {exc_}" - ) + ) from exc_ if np.issubdtype(tmp.dtype, np.dtype(bool).type): if len(tmp) != self.n_line: raise InvalidNumberOfLines( @@ -1986,7 +2245,7 @@ def _digest_maintenance(self, dict_): raise AmbiguousAction( f'You ask to perform maintenance on powerlines, this can only be done if "maintenance" can ' f"be casted into a numpy ndarray with error {exc_}" - ) + ) from exc_ if np.issubdtype(tmp.dtype, np.dtype(bool).type): if len(tmp) != self.n_line: raise InvalidNumberOfLines( @@ -2011,6 +2270,16 @@ def _digest_change_status(self, dict_): # Lines with "0" in this vector are not impacted. if dict_["change_line_status"] is not None: self.line_change_status = dict_["change_line_status"] + + def _digest_change_switch(self, dict_): + if "change_switch" in dict_: + # the action will switch the status of the powerline + # for each element equal to 1 in this dict_["change_line_status"] + # if the status is "disconnected" it will be transformed into "connected" + # and if the status is "connected" it will be switched to "disconnected" + # Lines with "0" in this vector are not impacted. + if dict_["change_switch"] is not None: + self.change_switch = dict_["change_switch"] def _digest_redispatching(self, dict_): if "redispatch" in dict_: @@ -2018,11 +2287,31 @@ def _digest_redispatching(self, dict_): def _digest_storage(self, dict_): if "set_storage" in dict_: - self.storage_p = dict_["set_storage"] - + try: + self.storage_p = dict_["set_storage"] + except IllegalAction as exc_: + cls = type(self) + # only raise the error if I am not in compat mode + if cls.glop_version == grid2op.__version__: + raise exc_ + else: + # TODO be more specific on the version + warnings.warn(f"Ignored error on storage units, because " + f"you are in a backward compatibility mode.") + def _digest_curtailment(self, dict_): if "curtail" in dict_: - self.curtail = dict_["curtail"] + try: + self.curtail = dict_["curtail"] + except IllegalAction as exc_: + cls = type(self) + # only raise the error if I am not in compat mode + if cls.glop_version == grid2op.__version__: + raise exc_ + else: + # TODO be more specific on the version + warnings.warn(f"Ignored error on curtailment, because " + f"you are in a backward compatibility mode.") def _digest_alarm(self, dict_): """ @@ -2049,7 +2338,9 @@ def _reset_vect(self): self._subs_impacted = None self._lines_impacted = None - def update(self, dict_): + def update(self, + dict_: DICT_ACT_TYPING + ): """ Update the action with a comprehensible format specified by a dictionary. @@ -2103,13 +2394,19 @@ def update(self, dict_): - 0 -> don't change anything - +1 -> set to bus 1, - - +2 -> set to bus 2, etc. + - +2 -> set to bus 2 + - +3 -> set to bus 3 (grid2op >= 1.10.0) + - etc. - -1: You can use this method to disconnect an object by setting the value to -1. - "change_bus": (numpy bool vector or dictionary) will change the bus to which the object is connected. - True will + ``True`` will change it (eg switch it from bus 1 to bus 2 or from bus 2 to bus 1). NB this is only active if the system has only 2 buses per substation. + + .. note:: + Change in version: 1.10.0 This feature is deactivated if `act.n_busbar_per_sub >= 3` + or `act.n_busbar_per_sub == 1` - "redispatch": the best use of this is to specify either the numpy array of the redispatch vector you want to apply (that should have the size of the number of generators on the grid) or to specify a list of @@ -2130,13 +2427,19 @@ def update(self, dict_): - If "change_bus" is True, then objects will be moved from one bus to another. If the object were on bus 1 then it will be moved on bus 2, and if it were on bus 2, it will be moved on bus 1. If the object is disconnected then the action is ambiguous, and calling it will throw an AmbiguousAction exception. + + - "curtail" : TODO + - "raise_alarm" : TODO + - "raise_alert": TODO + + - TODO detailed topo **NB**: CHANGES: you can reconnect a powerline without specifying on each bus you reconnect it at both its ends. In that case the last known bus id for each its end is used. **NB**: if for a given powerline, both switch_line_status and set_line_status is set, the action will not be usable. - This will lead to an :class:`grid2op.Exception.AmbiguousAction` exception. + This will lead to an :class:`grid2op.Exceptions.AmbiguousAction` exception. **NB**: The length of vectors provided here is NOT check in this function. This method can be "chained" and only on the final action, when used, eg. in the Backend, is checked. @@ -2152,12 +2455,15 @@ def update(self, dict_): be used to modify a :class:`grid2op.Backend.Backend`. In all the following examples, we suppose that a valid grid2op environment is created, for example with: + .. code-block:: python import grid2op + from grid2op.Action import BaseAction + env_name = "l2rpn_case14_sandbox" # create a simple environment # and make sure every type of action can be used. - env = grid2op.make(action_class=grid2op.Action.Action) + env = grid2op.make(env_name, action_class=BaseAction) *Example 1*: modify the load active values to set them all to 1. You can replace "load_p" by "load_q", "prod_p" or "prod_v" to change the load reactive value, the generator active setpoint or the generator @@ -2178,8 +2484,8 @@ def update(self, dict_): # there is a shortcut to do that: disconnect_powerline2 = env.disconnect_powerline(line_id=1) - *Example 3*: force the reconnection of the powerline of id 5 by connected it to bus 1 on its origin end and - bus 2 on its extremity end. + *Example 3*: force the reconnection of the powerline of id 5 by connected it to bus 1 on its origin side and + bus 2 on its extremity side. .. code-block:: python @@ -2261,7 +2567,10 @@ def update(self, dict_): self._digest_change_status(dict_) self._digest_alarm(dict_) self._digest_alert(dict_) - + + # todo detailed topo + self._digest_change_switch(dict_) + self._digest_set_switch(dict_) return self def is_ambiguous(self) -> Tuple[bool, AmbiguousAction]: @@ -2340,7 +2649,7 @@ def _check_for_correct_modif_flags(self): "You illegally act on the powerline status (using change)" ) - if (self._redispatch != 0.0).any(): + if (np.abs(self._redispatch) >= 1e-7).any(): if not self._modif_redispatch: raise AmbiguousAction( "A action of type redispatch is performed while the appropriate flag " @@ -2351,7 +2660,7 @@ def _check_for_correct_modif_flags(self): if "redispatch" not in self.authorized_keys: raise IllegalAction("You illegally act on the redispatching") - if (self._storage_power != 0.0).any(): + if (np.abs(self._storage_power) >= 1e-7).any(): if not self._modif_storage: raise AmbiguousAction( "A action on the storage unit is performed while the appropriate flag " @@ -2362,7 +2671,7 @@ def _check_for_correct_modif_flags(self): if "set_storage" not in self.authorized_keys: raise IllegalAction("You illegally act on the storage unit") - if (self._curtail != -1.0).any(): + if (np.abs(self._curtail + 1.0) >= 1e-7).any(): if not self._modif_curtailment: raise AmbiguousAction( "A curtailment is performed while the action is not supposed to have done so. " @@ -2389,6 +2698,40 @@ def _check_for_correct_modif_flags(self): if "raise_alert" not in self.authorized_keys: raise IllegalAction("You illegally send an alert.") + if type(self).detailed_topo_desc is None: + # no detailed topo information + if self._set_switch_status is not None: + raise AmbiguousAction("You tried to modified switches (`_set_switch_status`) " + "without providing detailed topology information.") + if self._change_switch_status is not None: + raise AmbiguousAction("You tried to modified switches (`_change_switch_status`) " + "without providing detailed topology information.") + if self._modif_set_switch: + raise AmbiguousAction("You tried to modified switches (`_modif_set_switch`) " + "without providing detailed topology information.") + if self._modif_change_switch: + raise AmbiguousAction("You tried to modified switches (`_modif_change_switch`) " + "without providing detailed topology information.") + else: + # some detailed information is present + if (self._change_switch_status).any(): + # user modified switches + if "change_switch" not in self.authorized_keys: + raise AmbiguousAction("You tried to modified switches (`_change_switch_status`) " + "but your action does not allow it.") + if not self._modif_change_switch: + raise AmbiguousAction("You tried to modified switches (_change_switch_status) " + "but the action has not registered it.") + + if (self._set_switch_status != 0).any(): + # user modified switches + if "set_switch" not in self.authorized_keys: + raise AmbiguousAction("You tried to modified switches (`_set_switch_status`) " + "but your action does not allow it.") + if not self._modif_set_switch: + raise AmbiguousAction("You tried to modified switches (_set_switch_status) " + "but the action has not registered it.") + def _check_for_ambiguity(self): """ This method checks if an action is ambiguous or not. If the instance is ambiguous, an @@ -2431,6 +2774,10 @@ def _check_for_ambiguity(self): - the redispatching and the production setpoint, if added, are above pmax for at least a generator - the redispatching and the production setpoint, if added, are below pmin for at least a generator + - For switches, ambiguous actions can come from: + + - TODO + In case of need to overload this method, it is advise to still call this one from the base :class:`BaseAction` with ":code:`super()._check_for_ambiguity()`" or ":code:`BaseAction._check_for_ambiguity(self)`". @@ -2443,7 +2790,8 @@ def _check_for_ambiguity(self): """ # check that the correct flags are properly computed self._check_for_correct_modif_flags() - + cls = type(self) + if ( self._modif_change_status and self._modif_set_status @@ -2458,58 +2806,58 @@ def _check_for_ambiguity(self): # check size if self._modif_inj: if "load_p" in self._dict_inj: - if len(self._dict_inj["load_p"]) != self.n_load: + if len(self._dict_inj["load_p"]) != cls.n_load: raise InvalidNumberOfLoads( "This action acts on {} loads while there are {} " "in the _grid".format( - len(self._dict_inj["load_p"]), self.n_load + len(self._dict_inj["load_p"]), cls.n_load ) ) if "load_q" in self._dict_inj: - if len(self._dict_inj["load_q"]) != self.n_load: + if len(self._dict_inj["load_q"]) != cls.n_load: raise InvalidNumberOfLoads( "This action acts on {} loads while there are {} in " - "the _grid".format(len(self._dict_inj["load_q"]), self.n_load) + "the _grid".format(len(self._dict_inj["load_q"]), cls.n_load) ) if "prod_p" in self._dict_inj: - if len(self._dict_inj["prod_p"]) != self.n_gen: + if len(self._dict_inj["prod_p"]) != cls.n_gen: raise InvalidNumberOfGenerators( "This action acts on {} generators while there are {} in " - "the _grid".format(len(self._dict_inj["prod_p"]), self.n_gen) + "the _grid".format(len(self._dict_inj["prod_p"]), cls.n_gen) ) if "prod_v" in self._dict_inj: - if len(self._dict_inj["prod_v"]) != self.n_gen: + if len(self._dict_inj["prod_v"]) != cls.n_gen: raise InvalidNumberOfGenerators( "This action acts on {} generators while there are {} in " - "the _grid".format(len(self._dict_inj["prod_v"]), self.n_gen) + "the _grid".format(len(self._dict_inj["prod_v"]), cls.n_gen) ) - if len(self._switch_line_status) != self.n_line: + if len(self._switch_line_status) != cls.n_line: raise InvalidNumberOfLines( "This action acts on {} lines while there are {} in " - "the _grid".format(len(self._switch_line_status), self.n_line) + "the _grid".format(len(self._switch_line_status), cls.n_line) ) - if len(self._set_topo_vect) != self.dim_topo: + if len(self._set_topo_vect) != cls.dim_topo: raise InvalidNumberOfObjectEnds( "This action acts on {} ends of object while there are {} " - "in the _grid".format(len(self._set_topo_vect), self.dim_topo) + "in the _grid".format(len(self._set_topo_vect), cls.dim_topo) ) - if len(self._change_bus_vect) != self.dim_topo: + if len(self._change_bus_vect) != cls.dim_topo: raise InvalidNumberOfObjectEnds( "This action acts on {} ends of object while there are {} " - "in the _grid".format(len(self._change_bus_vect), self.dim_topo) + "in the _grid".format(len(self._change_bus_vect), cls.dim_topo) ) - if len(self._redispatch) != self.n_gen: + if len(self._redispatch) != cls.n_gen: raise InvalidNumberOfGenerators( "This action acts on {} generators (redispatching= while " - "there are {} in the grid".format(len(self._redispatch), self.n_gen) + "there are {} in the grid".format(len(self._redispatch), cls.n_gen) ) # redispatching specific check if self._modif_redispatch: - if "redispatch" not in self.authorized_keys: + if "redispatch" not in cls.authorized_keys: raise AmbiguousAction( 'Action of type "redispatch" are not supported by this action type' ) @@ -2519,17 +2867,17 @@ def _check_for_ambiguity(self): "environment. Please set up the proper costs for generator" ) - if (self._redispatch[~self.gen_redispatchable] != 0.0).any(): + if (np.abs(self._redispatch[~cls.gen_redispatchable]) >= 1e-7).any(): raise InvalidRedispatching( "Trying to apply a redispatching action on a non redispatchable generator" ) if self._single_act: - if (self._redispatch > self.gen_max_ramp_up).any(): + if (self._redispatch > cls.gen_max_ramp_up).any(): raise InvalidRedispatching( "Some redispatching amount are above the maximum ramp up" ) - if (-self._redispatch > self.gen_max_ramp_down).any(): + if (-self._redispatch > cls.gen_max_ramp_down).any(): raise InvalidRedispatching( "Some redispatching amount are bellow the maximum ramp down" ) @@ -2538,12 +2886,12 @@ def _check_for_ambiguity(self): new_p = self._dict_inj["prod_p"] tmp_p = new_p + self._redispatch indx_ok = np.isfinite(new_p) - if (tmp_p[indx_ok] > self.gen_pmax[indx_ok]).any(): + if (tmp_p[indx_ok] > cls.gen_pmax[indx_ok]).any(): raise InvalidRedispatching( "Some redispatching amount, cumulated with the production setpoint, " "are above pmax for some generator." ) - if (tmp_p[indx_ok] < self.gen_pmin[indx_ok]).any(): + if (tmp_p[indx_ok] < cls.gen_pmin[indx_ok]).any(): raise InvalidRedispatching( "Some redispatching amount, cumulated with the production setpoint, " "are below pmin for some generator." @@ -2572,7 +2920,7 @@ def _check_for_ambiguity(self): "1 (assign this object to bus one) or 2 (assign this object to bus" "2). A negative number has been found." ) - if self._modif_set_bus and (self._set_topo_vect > 2).any(): + if self._modif_set_bus and (self._set_topo_vect > cls.n_busbar_per_sub).any(): raise InvalidBusStatus( "Invalid set_bus. Buses should be either -1 (disconnect), 0 (change nothing)," "1 (assign this object to bus one) or 2 (assign this object to bus" @@ -2598,62 +2946,62 @@ def _check_for_ambiguity(self): ) if self._modif_set_bus: - disco_or = self._set_topo_vect[self.line_or_pos_topo_vect] == -1 - if (self._set_topo_vect[self.line_ex_pos_topo_vect][disco_or] > 0).any(): + disco_or = self._set_topo_vect[cls.line_or_pos_topo_vect] == -1 + if (self._set_topo_vect[cls.line_ex_pos_topo_vect][disco_or] > 0).any(): raise InvalidLineStatus( - "A powerline is connected (set to a bus at extremity end) and " - "disconnected (set to bus -1 at origin end)" + "A powerline is connected (set to a bus at extremity side) and " + "disconnected (set to bus -1 at origin side)" ) - disco_ex = self._set_topo_vect[self.line_ex_pos_topo_vect] == -1 - if (self._set_topo_vect[self.line_or_pos_topo_vect][disco_ex] > 0).any(): + disco_ex = self._set_topo_vect[cls.line_ex_pos_topo_vect] == -1 + if (self._set_topo_vect[cls.line_or_pos_topo_vect][disco_ex] > 0).any(): raise InvalidLineStatus( - "A powerline is connected (set to a bus at origin end) and " - "disconnected (set to bus -1 at extremity end)" + "A powerline is connected (set to a bus at origin side) and " + "disconnected (set to bus -1 at extremity side)" ) # if i disconnected of a line, but i modify also the bus where it's connected if self._modif_set_bus or self._modif_change_bus: idx = self._set_line_status == -1 - id_disc = np.where(idx)[0] + id_disc = (idx).nonzero()[0] idx2 = self._set_line_status == 1 - id_reco = np.where(idx2)[0] + id_reco = (idx2).nonzero()[0] if self._modif_set_bus: - if "set_bus" not in self.authorized_keys: + if "set_bus" not in cls.authorized_keys: raise AmbiguousAction( 'Action of type "set_bus" are not supported by this action type' ) if ( - self._set_topo_vect[self.line_or_pos_topo_vect[id_disc]] > 0 - ).any() or (self._set_topo_vect[self.line_ex_pos_topo_vect[id_disc]] > 0).any(): + self._set_topo_vect[cls.line_or_pos_topo_vect[id_disc]] > 0 + ).any() or (self._set_topo_vect[cls.line_ex_pos_topo_vect[id_disc]] > 0).any(): raise InvalidLineStatus( "You ask to disconnect a powerline but also to connect it " "to a certain bus." ) if ( - self._set_topo_vect[self.line_or_pos_topo_vect[id_reco]] == -1 - ).any() or (self._set_topo_vect[self.line_ex_pos_topo_vect[id_reco]] == -1).any(): + self._set_topo_vect[cls.line_or_pos_topo_vect[id_reco]] == -1 + ).any() or (self._set_topo_vect[cls.line_ex_pos_topo_vect[id_reco]] == -1).any(): raise InvalidLineStatus( "You ask to reconnect a powerline but also to disconnect it " "from a certain bus." ) if self._modif_change_bus: - if "change_bus" not in self.authorized_keys: + if "change_bus" not in cls.authorized_keys: raise AmbiguousAction( 'Action of type "change_bus" are not supported by this action type' ) if ( - self._change_bus_vect[self.line_or_pos_topo_vect[id_disc]] > 0 - ).any() or (self._change_bus_vect[self.line_ex_pos_topo_vect[id_disc]] > 0).any(): + self._change_bus_vect[cls.line_or_pos_topo_vect[id_disc]] > 0 + ).any() or (self._change_bus_vect[cls.line_ex_pos_topo_vect[id_disc]] > 0).any(): raise InvalidLineStatus( "You ask to disconnect a powerline but also to change its bus." ) if ( self._change_bus_vect[ - self.line_or_pos_topo_vect[self._set_line_status == 1] + cls.line_or_pos_topo_vect[self._set_line_status == 1] ] ).any(): raise InvalidLineStatus( @@ -2662,7 +3010,7 @@ def _check_for_ambiguity(self): ) if ( self._change_bus_vect[ - self.line_ex_pos_topo_vect[self._set_line_status == 1] + cls.line_ex_pos_topo_vect[self._set_line_status == 1] ] ).any(): raise InvalidLineStatus( @@ -2670,21 +3018,21 @@ def _check_for_ambiguity(self): "which it is connected. This is ambiguous. You must *set* this bus instead." ) - if type(self).shunts_data_available: - if self.shunt_p.shape[0] != self.n_shunt: + if cls.shunts_data_available: + if self.shunt_p.shape[0] != cls.n_shunt: raise IncorrectNumberOfElements( "Incorrect number of shunt (for shunt_p) in your action." ) - if self.shunt_q.shape[0] != self.n_shunt: + if self.shunt_q.shape[0] != cls.n_shunt: raise IncorrectNumberOfElements( "Incorrect number of shunt (for shunt_q) in your action." ) - if self.shunt_bus.shape[0] != self.n_shunt: + if self.shunt_bus.shape[0] != cls.n_shunt: raise IncorrectNumberOfElements( "Incorrect number of shunt (for shunt_bus) in your action." ) - if self.n_shunt > 0: - if np.max(self.shunt_bus) > 2: + if cls.n_shunt > 0: + if np.max(self.shunt_bus) > cls.n_busbar_per_sub: raise AmbiguousAction( "Some shunt is connected to a bus greater than 2" ) @@ -2709,10 +3057,10 @@ def _check_for_ambiguity(self): ) if self._modif_alarm: - if self._raise_alarm.shape[0] != self.dim_alarms: + if self._raise_alarm.shape[0] != cls.dim_alarms: raise AmbiguousAction( f"Wrong number of alarm raised: {self._raise_alarm.shape[0]} raised, expecting " - f"{self.dim_alarms}" + f"{cls.dim_alarms}" ) else: if self._raise_alarm.any(): @@ -2722,10 +3070,10 @@ def _check_for_ambiguity(self): ) if self._modif_alert: - if self._raise_alert.shape[0] != self.dim_alerts: + if self._raise_alert.shape[0] != cls.dim_alerts: raise AmbiguousActionRaiseAlert( f"Wrong number of alert raised: {self._raise_alert.shape[0]} raised, expecting " - f"{self.dim_alerts}" + f"{cls.dim_alerts}" ) else: if self._raise_alert.any(): @@ -2734,77 +3082,144 @@ def _check_for_ambiguity(self): "as doing so. Expect wrong behaviour." ) - def _is_storage_ambiguous(self): - """check if storage actions are ambiguous""" - if self._modif_storage: - if "set_storage" not in self.authorized_keys: + if cls.detailed_topo_desc is not None: + # there are some switches information + self._are_switches_ambiguous() + + def _are_switches_ambiguous(self): + cls = type(self) + dtd = cls.detailed_topo_desc + if self._set_switch_status.shape[0] != dtd.switches.shape[0]: + raise AmbiguousAction("Incorrect number of switches for set_switch in your action.") + if self._change_switch_status.shape[0] != dtd.switches.shape[0]: + raise AmbiguousAction("Incorrect number of switches for change_switch in your action.") + if ((self._modif_change_switch or self._modif_set_switch) and + self._modif_set_bus or self._modif_change_bus): + # trying to affect topology in two different ways... not a great ideas + if (cls.ISSUE_WARNING_SWITCH_SET_CHANGE == "always" or + cls.ISSUE_WARNING_SWITCH_SET_CHANGE == "once" ): + warnings.warn("Grid2op: you modified the topology with set_bus / change_bus " + "and set_switch / change_switch at the same time. Though it's not " + "necessarily ambiguous, we do not recommend to do it.") + if cls.ISSUE_WARNING_SWITCH_SET_CHANGE == "once": + # do not issue another warning like that + cls.ISSUE_WARNING_SWITCH_SET_CHANGE = "never" + + # TODO detailed topo : refacto that with the method get_sub_ids_switch + subs_aff_c_switch = np.unique(dtd.switches[self._change_switch_status, type(dtd).SUB_COL]) + subs_aff_s_switch = np.unique(dtd.switches[self._set_switch_status !=0, type(dtd).SUB_COL]) + subs_aff_c_bus = np.unique(cls.grid_objects_types[self._change_bus_vect,cls.SUB_COL]) + subs_aff_s_bus = np.unique(cls.grid_objects_types[self._set_topo_vect > 0,cls.SUB_COL]) + if np.isin(subs_aff_c_switch, subs_aff_c_bus).any(): + raise AmbiguousAction("You used change_switch and change_bus to modify the topology " + "of a given substation. You cannot affect the same substation " + "with switches or change_bus / set_bus") + if np.isin(subs_aff_c_switch, subs_aff_s_bus).any(): + raise AmbiguousAction("You used change_switch and set_bus to modify the topology " + "of a given substation. You cannot affect the same substation " + "with switches or change_bus / set_bus") + if np.isin(subs_aff_s_switch, subs_aff_c_bus).any(): + raise AmbiguousAction("You used set_switch and change_bus to modify the topology " + "of a given substation. You cannot affect the same substation " + "with switches or change_bus / set_bus") + if np.isin(subs_aff_s_switch, subs_aff_s_bus).any(): + raise AmbiguousAction("You used set_switch and set_bus to modify the topology " + "of a given substation. You cannot affect the same substation " + "with switches or change_bus / set_bus") + + if ((self._set_switch_status != 0) & self._change_switch_status).any(): + raise AmbiguousAction("Trying to both set the status of some switches (with 'set_switch') " + "and change it (with 'change_switch') using the same action.") + + def get_sub_ids_switch(self) -> np.ndarray: + """Return the ids of the substations affected by + an action on switches (either with `set switch` or `change switch`) + + Returns + ------- + np.ndarray + _description_ + """ + cls = type(self) + dtd = cls.detailed_topo_desc + res = np.zeros(cls.n_sub, dtype=dt_bool) + res[dtd.switches[self._change_switch_status, type(dtd).SUB_COL]] = True + res[dtd.switches[self._set_switch_status !=0, type(dtd).SUB_COL]] = True + return res + + def _is_storage_ambiguous(self): + """check if storage actions are ambiguous""" + cls = type(self) + if self._modif_storage: + if "set_storage" not in cls.authorized_keys: raise AmbiguousAction( 'Action of type "set_storage" are not supported by this action type' ) - if self.n_storage == 0: + if cls.n_storage == 0: raise InvalidStorage( "Attempt to modify a storage unit while there is none on the grid" ) - if self._storage_power.shape[0] != self.n_storage: + if self._storage_power.shape[0] != cls.n_storage: raise InvalidStorage( "self._storage_power.shape[0] != self.n_storage: wrong number of storage " "units affected" ) - if (self._storage_power < -self.storage_max_p_prod).any(): - where_bug = np.where(self._storage_power < -self.storage_max_p_prod)[0] + if (self._storage_power < -cls.storage_max_p_prod).any(): + where_bug = (self._storage_power < -cls.storage_max_p_prod).nonzero()[0] raise InvalidStorage( f"you asked a storage unit to absorb more than what it can: " f"self._storage_power[{where_bug}] < -self.storage_max_p_prod[{where_bug}]." ) - if (self._storage_power > self.storage_max_p_absorb).any(): - where_bug = np.where(self._storage_power > self.storage_max_p_absorb)[0] + if (self._storage_power > cls.storage_max_p_absorb).any(): + where_bug = (self._storage_power > cls.storage_max_p_absorb).nonzero()[0] raise InvalidStorage( f"you asked a storage unit to produce more than what it can: " f"self._storage_power[{where_bug}] > self.storage_max_p_absorb[{where_bug}]." ) - if "_storage_power" not in self.attr_list_set: - if (self._set_topo_vect[self.storage_pos_topo_vect] > 0).any(): + if "_storage_power" not in cls.attr_list_set: + if (self._set_topo_vect[cls.storage_pos_topo_vect] > 0).any(): raise InvalidStorage("Attempt to modify bus (set) of a storage unit") - if (self._change_bus_vect[self.storage_pos_topo_vect]).any(): + if (self._change_bus_vect[cls.storage_pos_topo_vect]).any(): raise InvalidStorage("Attempt to modify bus (change) of a storage unit") def _is_curtailment_ambiguous(self): """check if curtailment action is ambiguous""" + cls = type(self) if self._modif_curtailment: - if "curtail" not in self.authorized_keys: + if "curtail" not in cls.authorized_keys: raise AmbiguousAction( 'Action of type "curtail" are not supported by this action type' ) - if not self.redispatching_unit_commitment_availble: + if not cls.redispatching_unit_commitment_availble: raise UnitCommitorRedispachingNotAvailable( "Impossible to use a redispatching action in this " "environment. Please set up the proper costs for generator. " "This also means curtailment feature is not available." ) - if self._curtail.shape[0] != self.n_gen: + if self._curtail.shape[0] != cls.n_gen: raise InvalidCurtailment( "self._curtail.shape[0] != self.n_gen: wrong number of generator " "units affected" ) - if ((self._curtail < 0.0) & (self._curtail != -1.0)).any(): - where_bug = np.where((self._curtail < 0.0) & (self._curtail != -1.0))[0] + if ((self._curtail < 0.0) & (np.abs(self._curtail + 1.0) >= 1e-7)).any(): + where_bug = ((self._curtail < 0.0) & (np.abs(self._curtail + 1.0) >= 1e-7)).nonzero()[0] raise InvalidCurtailment( f"you asked to perform a negative curtailment: " f"self._curtail[{where_bug}] < 0. " f"Curtailment should be a real number between 0.0 and 1.0" ) if (self._curtail > 1.0).any(): - where_bug = np.where(self._curtail > 1.0)[0] + where_bug = (self._curtail > 1.0).nonzero()[0] raise InvalidCurtailment( f"you asked a storage unit to produce more than what it can: " f"self._curtail[{where_bug}] > 1. " f"Curtailment should be a real number between 0.0 and 1.0" ) - if (self._curtail[~self.gen_renewable] != -1.0).any(): + if (np.abs(self._curtail[~cls.gen_renewable] +1.0) >= 1e-7).any(): raise InvalidCurtailment( "Trying to apply a curtailment on a non renewable generator" ) @@ -2816,41 +3231,55 @@ def _ignore_topo_action_if_disconnection(self, sel_): self._set_topo_vect[np.array(self.line_ex_pos_topo_vect[sel_])] = 0 self._change_bus_vect[np.array(self.line_ex_pos_topo_vect[sel_])] = False - def _obj_caract_from_topo_id(self, id_): - obj_id = None - objt_type = None - array_subid = None - for l_id, id_in_topo in enumerate(self.load_pos_topo_vect): + def _aux_obj_caract(self, id_, with_name, xxx_pos_topo_vect, objt_type, xxx_subid, name_xxx): + for l_id, id_in_topo in enumerate(xxx_pos_topo_vect): if id_in_topo == id_: obj_id = l_id - objt_type = "load" - array_subid = self.load_to_subid - if obj_id is None: - for l_id, id_in_topo in enumerate(self.gen_pos_topo_vect): - if id_in_topo == id_: - obj_id = l_id - objt_type = "generator" - array_subid = self.gen_to_subid - if obj_id is None: - for l_id, id_in_topo in enumerate(self.line_or_pos_topo_vect): - if id_in_topo == id_: - obj_id = l_id - objt_type = self._line_or_str - array_subid = self.line_or_to_subid - if obj_id is None: - for l_id, id_in_topo in enumerate(self.line_ex_pos_topo_vect): - if id_in_topo == id_: - obj_id = l_id - objt_type = self._line_ex_str - array_subid = self.line_ex_to_subid - if obj_id is None: - for l_id, id_in_topo in enumerate(self.storage_pos_topo_vect): - if id_in_topo == id_: - obj_id = l_id - objt_type = "storage" - array_subid = self.storage_to_subid - substation_id = array_subid[obj_id] - return obj_id, objt_type, substation_id + obj_name = name_xxx[l_id] + substation_id = xxx_subid[obj_id] + if not with_name: + return obj_id, objt_type, substation_id + return obj_id, objt_type, substation_id, obj_name + return None + + def _aux_obj_caract_from_topo_id_load(self, cls, id_, with_name): + return self._aux_obj_caract(id_, with_name, cls.load_pos_topo_vect, "load", cls.load_to_subid, cls.name_load) + + def _aux_obj_caract_from_topo_id_gen(self, cls, id_, with_name): + return self._aux_obj_caract(id_, with_name, cls.gen_pos_topo_vect, + "generator", cls.gen_to_subid, cls.name_gen) + + def _aux_obj_caract_from_topo_id_lor(self, cls, id_, with_name): + return self._aux_obj_caract(id_, with_name, cls.line_or_pos_topo_vect, + self._line_or_str, cls.line_or_to_subid, cls.name_line) + + def _aux_obj_caract_from_topo_id_lex(self, cls, id_, with_name): + return self._aux_obj_caract(id_, with_name, cls.line_ex_pos_topo_vect, + self._line_ex_str, cls.line_ex_to_subid, cls.name_line) + + def _aux_obj_caract_from_topo_storage(self, cls, id_, with_name): + return self._aux_obj_caract(id_, with_name, cls.storage_pos_topo_vect, + "storage", cls.storage_to_subid, cls.name_storage) + + def _obj_caract_from_topo_id(self, id_, with_name=False): + # TODO refactor this with gridobj.topo_vect_element + cls = type(self) + tmp = self._aux_obj_caract_from_topo_id_load(cls, id_, with_name) + if tmp is not None: + return tmp + tmp = self._aux_obj_caract_from_topo_id_gen(cls, id_, with_name) + if tmp is not None: + return tmp + tmp = self._aux_obj_caract_from_topo_id_lor(cls, id_, with_name) + if tmp is not None: + return tmp + tmp = self._aux_obj_caract_from_topo_id_lex(cls, id_, with_name) + if tmp is not None: + return tmp + tmp = self._aux_obj_caract_from_topo_storage(cls, id_, with_name) + if tmp is not None: + return tmp + raise Grid2OpException(f"Unknown element in topovect with id {id_}") def __str__(self) -> str: """ @@ -2889,7 +3318,7 @@ def __str__(self) -> str: "\t - Modify the generators with redispatching in the following way:" ) for gen_idx in range(self.n_gen): - if self._redispatch[gen_idx] != 0.0: + if np.abs(self._redispatch[gen_idx]) >= 1e-7: gen_name = self.name_gen[gen_idx] r_amount = self._redispatch[gen_idx] res.append( @@ -2905,7 +3334,7 @@ def __str__(self) -> str: res.append("\t - Modify the storage units in the following way:") for stor_idx in range(self.n_storage): amount_ = self._storage_power[stor_idx] - if np.isfinite(amount_) and amount_ != 0.0: + if np.isfinite(amount_) and np.abs(amount_) >= 1e-7: name_ = self.name_storage[stor_idx] res.append( '\t \t - Ask unit "{}" to {} {:.2f} MW (setpoint: {:.2f} MW)' @@ -2924,7 +3353,7 @@ def __str__(self) -> str: res.append("\t - Perform the following curtailment:") for gen_idx in range(self.n_gen): amount_ = self._curtail[gen_idx] - if np.isfinite(amount_) and amount_ != -1.0: + if np.isfinite(amount_) and np.abs(amount_ + 1.0) >= 1e-7: name_ = self.name_gen[gen_idx] res.append( '\t \t - Limit unit "{}" to {:.1f}% of its Pmax (setpoint: {:.3f})' @@ -3009,7 +3438,7 @@ def __str__(self) -> str: if my_cls.dim_alarms > 0: if self._modif_alarm: li_area = np.array(my_cls.alarms_area_names)[ - np.where(self._raise_alarm)[0] + (self._raise_alarm).nonzero()[0] ] if len(li_area) == 1: area_str = ": " + li_area[0] @@ -3021,7 +3450,7 @@ def __str__(self) -> str: if my_cls.dim_alerts > 0: if self._modif_alert: - i_alert = np.where(self._raise_alert)[0] + i_alert = (self._raise_alert).nonzero()[0] li_line = np.array(my_cls.alertable_line_names)[i_alert] if len(li_line) == 1: line_str = f": {i_alert[0]} (on line {li_line[0]})" @@ -3067,9 +3496,8 @@ def impact_on_objects(self) -> dict: force_line_status["reconnections"]["count"] = ( self._set_line_status == 1 ).sum() - force_line_status["reconnections"]["powerlines"] = np.where( - self._set_line_status == 1 - )[0] + force_line_status["reconnections"]["powerlines"] = ( + (self._set_line_status == 1).nonzero()[0]) if (self._set_line_status == -1).any(): force_line_status["changed"] = True @@ -3077,9 +3505,9 @@ def impact_on_objects(self) -> dict: force_line_status["disconnections"]["count"] = ( self._set_line_status == -1 ).sum() - force_line_status["disconnections"]["powerlines"] = np.where( - self._set_line_status == -1 - )[0] + force_line_status["disconnections"]["powerlines"] = ( + (self._set_line_status == -1).nonzero()[0] + ) # handles action on swtich line status switch_line_status = {"changed": False, "count": 0, "powerlines": []} @@ -3087,7 +3515,7 @@ def impact_on_objects(self) -> dict: switch_line_status["changed"] = True has_impact = True switch_line_status["count"] = self._switch_line_status.sum() - switch_line_status["powerlines"] = np.where(self._switch_line_status)[0] + switch_line_status["powerlines"] = (self._switch_line_status).nonzero()[0] topology = { "changed": False, @@ -3145,9 +3573,9 @@ def impact_on_objects(self) -> dict: # handle redispatching redispatch = {"changed": False, "generators": []} - if (self._redispatch != 0.0).any(): + if (np.abs(self._redispatch) >= 1e-7).any(): for gen_idx in range(self.n_gen): - if self._redispatch[gen_idx] != 0.0: + if np.abs(self._redispatch[gen_idx]) >= 1e-7: gen_name = self.name_gen[gen_idx] r_amount = self._redispatch[gen_idx] redispatch["generators"].append( @@ -3177,7 +3605,7 @@ def impact_on_objects(self) -> dict: if self._modif_curtailment: for gen_idx in range(self.n_gen): tmp = self._curtail[gen_idx] - if np.isfinite(tmp) and tmp != -1: + if np.isfinite(tmp) and np.abs(tmp + 1.) >= 1e-7: name_ = self.name_gen[gen_idx] new_max = tmp curtailment["limit"].append( @@ -3190,6 +3618,7 @@ def impact_on_objects(self) -> dict: curtailment["changed"] = True has_impact = True + # TODO detailed topo return { "has_impact": has_impact, "injection": inject_detail, @@ -3201,7 +3630,85 @@ def impact_on_objects(self) -> dict: "curtailment": curtailment, } - def as_dict(self) -> dict: + def _aux_as_dict_set_line(self, res): + res["set_line_status"] = {} + res["set_line_status"]["nb_connected"] = (self._set_line_status == 1).sum() + res["set_line_status"]["nb_disconnected"] = ( + self._set_line_status == -1 + ).sum() + res["set_line_status"]["connected_id"] = ( + (self._set_line_status == 1).nonzero()[0] + ) + res["set_line_status"]["disconnected_id"] = ( + (self._set_line_status == -1).nonzero()[0] + ) + + def _aux_as_dict_change_line(self, res): + res["change_line_status"] = {} + res["change_line_status"]["nb_changed"] = self._switch_line_status.sum() + res["change_line_status"]["changed_id"] = ( + self._switch_line_status.nonzero()[0] + ) + + def _aux_as_dict_change_bus(self, res): + res["change_bus_vect"] = {} + res["change_bus_vect"]["nb_modif_objects"] = self._change_bus_vect.sum() + all_subs = set() + for id_, k in enumerate(self._change_bus_vect): + if k: + obj_id, objt_type, substation_id, nm_ = self._obj_caract_from_topo_id( + id_, with_name=True + ) + sub_id = "{}".format(substation_id) + if not sub_id in res["change_bus_vect"]: + res["change_bus_vect"][sub_id] = {} + res["change_bus_vect"][sub_id][nm_] = { + "type": objt_type, + "id": obj_id, + } + all_subs.add(sub_id) + + res["change_bus_vect"]["nb_modif_subs"] = len(all_subs) + res["change_bus_vect"]["modif_subs_id"] = sorted(all_subs) + + def _aux_as_dict_set_bus(self, res): + res["set_bus_vect"] = {} + res["set_bus_vect"]["nb_modif_objects"] = (self._set_topo_vect != 0).sum() + all_subs = set() + for id_, k in enumerate(self._set_topo_vect): + if k != 0: + obj_id, objt_type, substation_id, nm_ = self._obj_caract_from_topo_id( + id_, with_name=True + ) + sub_id = "{}".format(substation_id) + if not sub_id in res["set_bus_vect"]: + res["set_bus_vect"][sub_id] = {} + res["set_bus_vect"][sub_id][nm_] = { + "type": objt_type, + "id": obj_id, + "new_bus": k, + } + all_subs.add(sub_id) + + res["set_bus_vect"]["nb_modif_subs"] = len(all_subs) + res["set_bus_vect"]["modif_subs_id"] = sorted(all_subs) + + def _aux_as_dict_shunt(self, res): + tmp = {} + if np.any(np.isfinite(self.shunt_p)): + tmp["shunt_p"] = 1.0 * self.shunt_p + if np.any(np.isfinite(self.shunt_q)): + tmp["shunt_q"] = 1.0 * self.shunt_q + if np.any(self.shunt_bus != 0): + tmp["shunt_bus"] = 1.0 * self.shunt_bus + if tmp: + res["shunt"] = tmp + + def as_dict(self) -> Dict[Literal["load_p", "load_q", "prod_p", "prod_v", + "change_line_status", "set_line_status", + "change_bus_vect", "set_bus_vect", + "redispatch", "storage_power", "curtailment"], + Any]: """ Represent an action "as a" dictionary. This dictionary is useful to further inspect on which elements the actions had an impact. It is not recommended to use it as a way to serialize actions. The "do nothing" @@ -3256,7 +3763,10 @@ def as_dict(self) -> dict: dispatchable one) the amount of power redispatched in this action. * `storage_power`: the setpoint for production / consumption for all storage units * `curtailment`: the curtailment performed on all generator - + * `shunt` : + + TODO detailed topo + Returns ------- res: ``dict`` @@ -3272,80 +3782,29 @@ def as_dict(self) -> dict: # handles actions on force line status if (self._set_line_status != 0).any(): - res["set_line_status"] = {} - res["set_line_status"]["nb_connected"] = (self._set_line_status == 1).sum() - res["set_line_status"]["nb_disconnected"] = ( - self._set_line_status == -1 - ).sum() - res["set_line_status"]["connected_id"] = np.where( - self._set_line_status == 1 - )[0] - res["set_line_status"]["disconnected_id"] = np.where( - self._set_line_status == -1 - )[0] + self._aux_as_dict_set_line(res) # handles action on swtich line status if self._switch_line_status.sum(): - res["change_line_status"] = {} - res["change_line_status"]["nb_changed"] = self._switch_line_status.sum() - res["change_line_status"]["changed_id"] = np.where( - self._switch_line_status - )[0] + self._aux_as_dict_change_line(res) # handles topology change if (self._change_bus_vect).any(): - res["change_bus_vect"] = {} - res["change_bus_vect"]["nb_modif_objects"] = self._change_bus_vect.sum() - all_subs = set() - for id_, k in enumerate(self._change_bus_vect): - if k: - obj_id, objt_type, substation_id = self._obj_caract_from_topo_id( - id_ - ) - sub_id = "{}".format(substation_id) - if not sub_id in res["change_bus_vect"]: - res["change_bus_vect"][sub_id] = {} - res["change_bus_vect"][sub_id]["{}_{}".format(objt_type, obj_id)] = { - "type": objt_type, - "id": obj_id, - } - all_subs.add(sub_id) - - res["change_bus_vect"]["nb_modif_subs"] = len(all_subs) - res["change_bus_vect"]["modif_subs_id"] = sorted(all_subs) + self._aux_as_dict_change_bus(res) # handles topology set if (self._set_topo_vect!= 0).any(): - res["set_bus_vect"] = {} - res["set_bus_vect"]["nb_modif_objects"] = (self._set_topo_vect != 0).sum() - all_subs = set() - for id_, k in enumerate(self._set_topo_vect): - if k != 0: - obj_id, objt_type, substation_id = self._obj_caract_from_topo_id( - id_ - ) - sub_id = "{}".format(substation_id) - if not sub_id in res["set_bus_vect"]: - res["set_bus_vect"][sub_id] = {} - res["set_bus_vect"][sub_id]["{}_{}".format(objt_type, obj_id)] = { - "type": objt_type, - "id": obj_id, - "new_bus": k, - } - all_subs.add(sub_id) - - res["set_bus_vect"]["nb_modif_subs"] = len(all_subs) - res["set_bus_vect"]["modif_subs_id"] = sorted(all_subs) + self._aux_as_dict_set_bus(res) if self._hazards.any(): - res["hazards"] = np.where(self._hazards)[0] + res["hazards"] = self._hazards.nonzero()[0] res["nb_hazards"] = self._hazards.sum() if self._maintenance.any(): - res["maintenance"] = np.where(self._maintenance)[0] + res["maintenance"] = self._maintenance.nonzero()[0] res["nb_maintenance"] = self._maintenance.sum() - if (self._redispatch != 0.0).any(): + if (np.abs(self._redispatch) >= 1e-7).any(): res["redispatch"] = 1.0 * self._redispatch if self._modif_storage: @@ -3353,7 +3812,12 @@ def as_dict(self) -> dict: if self._modif_curtailment: res["curtailment"] = 1.0 * self._curtail - + + if type(self).shunts_data_available: + self._aux_as_dict_shunt(res) + + # TODO detailed topo + return res def get_types(self) -> Tuple[bool, bool, bool, bool, bool, bool, bool]: @@ -3410,7 +3874,7 @@ def get_types(self) -> Tuple[bool, bool, bool, bool, bool, bool, bool]: lines_impacted, subs_impacted = self.get_topological_impact() topology = subs_impacted.any() line = lines_impacted.any() - redispatching = (self._redispatch != 0.0).any() + redispatching = (np.abs(self._redispatch) >= 1e-7).any() storage = self._modif_storage curtailment = self._modif_curtailment return injection, voltage, topology, line, redispatching, storage, curtailment @@ -3492,9 +3956,10 @@ def _aux_effect_on_storage(self, storage_id): return res def _aux_effect_on_substation(self, substation_id): - if substation_id >= self.n_sub: + cls = type(self) + if substation_id >= cls.n_sub: raise Grid2OpException( - f"There are only {self.n_sub} substations on the grid. " + f"There are only {cls.n_sub} substations on the grid. " f"Cannot check impact on " f"`substation_id={substation_id}`" ) @@ -3502,8 +3967,8 @@ def _aux_effect_on_substation(self, substation_id): raise Grid2OpException(f"`substation_id` should be positive.") res = {} - beg_ = int(self.sub_info[:substation_id].sum()) - end_ = int(beg_ + self.sub_info[substation_id]) + beg_ = int(cls.sub_info[:substation_id].sum()) + end_ = int(beg_ + cls.sub_info[substation_id]) res["change_bus"] = self._change_bus_vect[beg_:end_] res["set_bus"] = self._set_topo_vect[beg_:end_] return res @@ -3570,8 +4035,8 @@ def effect_on( - if a powerline is inspected then the keys are: - - "change_bus_or": whether or not the origin end will be moved from one bus to another - - "change_bus_ex": whether or not the extremity end will be moved from one bus to another + - "change_bus_or": whether or not the origin side will be moved from one bus to another + - "change_bus_ex": whether or not the extremity side will be moved from one bus to another - "set_bus_or": the new bus where the origin will be moved - "set_bus_ex": the new bus where the extremity will be moved - "set_line_status": the new status of the power line @@ -3674,10 +4139,11 @@ def get_storage_modif(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: New bus of the storage units, affected with "change_bus" command """ + cls = type(self) storage_power = 1.0 * self._storage_power - storage_set_bus = 1 * self._set_topo_vect[self.storage_pos_topo_vect] + storage_set_bus = 1 * self._set_topo_vect[cls.storage_pos_topo_vect] storage_change_bus = copy.deepcopy( - self._change_bus_vect[self.storage_pos_topo_vect] + self._change_bus_vect[cls.storage_pos_topo_vect] ) return storage_power, storage_set_bus, storage_change_bus @@ -3696,14 +4162,15 @@ def get_load_modif(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray load_change_bus: ``np.ndarray`` New bus of the loads, affected with "change_bus" command """ - load_p = np.full(self.n_load, fill_value=np.NaN, dtype=dt_float) + cls = type(self) + load_p = np.full(cls.n_load, fill_value=np.NaN, dtype=dt_float) if "load_p" in self._dict_inj: load_p[:] = self._dict_inj["load_p"] load_q = 1.0 * load_p if "load_q" in self._dict_inj: load_q[:] = self._dict_inj["load_q"] - load_set_bus = 1 * self._set_topo_vect[self.load_pos_topo_vect] - load_change_bus = copy.deepcopy(self._change_bus_vect[self.load_pos_topo_vect]) + load_set_bus = 1 * self._set_topo_vect[cls.load_pos_topo_vect] + load_change_bus = copy.deepcopy(self._change_bus_vect[cls.load_pos_topo_vect]) return load_p, load_q, load_set_bus, load_change_bus def get_gen_modif(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: @@ -3724,14 +4191,15 @@ def get_gen_modif(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray] New bus of the generators, affected with "change_bus" command """ - gen_p = np.full(self.n_load, fill_value=np.NaN, dtype=dt_float) + cls = type(self) + gen_p = np.full(cls.n_gen, fill_value=np.NaN, dtype=dt_float) if "prod_p" in self._dict_inj: gen_p[:] = self._dict_inj["prod_p"] gen_v = 1.0 * gen_p if "prod_v" in self._dict_inj: gen_v[:] = self._dict_inj["prod_v"] - gen_set_bus = 1 * self._set_topo_vect[self.gen_pos_topo_vect] - gen_change_bus = copy.deepcopy(self._change_bus_vect[self.gen_pos_topo_vect]) + gen_set_bus = 1 * self._set_topo_vect[cls.gen_pos_topo_vect] + gen_change_bus = copy.deepcopy(self._change_bus_vect[cls.gen_pos_topo_vect]) return gen_p, gen_v, gen_set_bus, gen_change_bus # TODO do the get_line_modif, get_line_or_modif and get_line_ex_modif @@ -3746,6 +4214,7 @@ def _aux_affect_object_int( outer_vect, min_val=-1, max_val=2, + _nm_ch_bk_key: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None, ): """ NB : this do not set the _modif_set_bus attribute. It is expected to be set in the property setter. @@ -3763,7 +4232,7 @@ def _aux_affect_object_int( if isinstance(values, tuple): # i provide a tuple: load_id, new_bus if len(values) != 2: - raise IllegalAction( + raise AmbiguousAction( f"when set with tuple, this tuple should have size 2 and be: {name_el}_id, new_bus " f"eg. (3, {max_val})" ) @@ -3771,29 +4240,29 @@ def _aux_affect_object_int( try: new_bus = int(new_bus) except Exception as exc_: - raise IllegalAction( + raise AmbiguousAction( f'new_bus should be convertible to integer. Error was : "{exc_}"' - ) + ) from exc_ if new_bus < min_val: - raise IllegalAction( + raise AmbiguousAction( f"new_bus should be between {min_val} and {max_val}" ) if new_bus > max_val: - raise IllegalAction( + raise AmbiguousAction( f"new_bus should be between {min_val} and {max_val}" ) if isinstance(el_id, (float, dt_float, np.float64)): - raise IllegalAction( + raise AmbiguousAction( f"{name_el}_id should be integers you provided float!" ) if isinstance(el_id, (bool, dt_bool)): - raise IllegalAction( + raise AmbiguousAction( f"{name_el}_id should be integers you provided bool!" ) if isinstance(el_id, str): - raise IllegalAction( + raise AmbiguousAction( f"{name_el}_id should be integers you provided string " f"(hint: you can use a dictionary to set the bus by name eg. " f"act.{name_el}_set_bus = {{act.name_{name_el}[0] : 1, act.name_{name_el}[1] : " @@ -3803,15 +4272,15 @@ def _aux_affect_object_int( try: el_id = int(el_id) except Exception as exc_: - raise IllegalAction( + raise AmbiguousAction( f'{name_el}_id should be convertible to integer. Error was : "{exc_}"' - ) + ) from exc_ if el_id < 0: - raise IllegalAction( + raise AmbiguousAction( f"Impossible to set the bus of a {name_el} with negative id" ) if el_id >= nb_els: - raise IllegalAction( + raise AmbiguousAction( f"Impossible to set a {name_el} id {el_id} because there are only " f"{nb_els} on the grid (and in python id starts at 0)" ) @@ -3823,26 +4292,26 @@ def _aux_affect_object_int( or values.dtype == dt_float or values.dtype == np.float64 ): - raise IllegalAction( + raise AmbiguousAction( f"{name_el}_id should be integers you provided float!" ) if isinstance(values.dtype, bool) or values.dtype == dt_bool: - raise IllegalAction( + raise AmbiguousAction( f"{name_el}_id should be integers you provided boolean!" ) try: values = values.astype(dt_int) except Exception as exc_: - raise IllegalAction( + raise AmbiguousAction( f'{name_el}_id should be convertible to integer. Error was : "{exc_}"' - ) + ) from exc_ if (values < min_val).any(): - raise IllegalAction( + raise AmbiguousAction( f"new_bus should be between {min_val} and {max_val}, found a value < {min_val}" ) if (values > max_val).any(): - raise IllegalAction( + raise AmbiguousAction( f"new_bus should be between {min_val} and {max_val}, found a value > {max_val}" ) outer_vect[inner_vect] = values @@ -3868,21 +4337,26 @@ def _aux_affect_object_int( outer_vect=outer_vect, min_val=min_val, max_val=max_val, + _nm_ch_bk_key=_nm_ch_bk_key ) return # expected list of tuple, each tuple is a pair with load_id, new_load_bus: example: [(0, 1), (2,2)] for el in values: if len(el) != 2: - raise IllegalAction( + raise AmbiguousAction( f"If input is a list, it should be a list of pair (el_id, new_bus) " f"eg. [(0, {max_val}), (2, {min_val})]" ) el_id, new_bus = el if isinstance(el_id, str) and name_els is not None: - tmp = np.where(name_els == el_id)[0] + if self._names_chronics_to_backend is not None and _nm_ch_bk_key in self._names_chronics_to_backend: + # initial action to set the state, might use the name in the time series... + nms_conv = self._names_chronics_to_backend[_nm_ch_bk_key] + el_id = nms_conv[el_id] + tmp = (name_els == el_id).nonzero()[0] if len(tmp) == 0: - raise IllegalAction(f"No known {name_el} with name {el_id}") + raise AmbiguousAction(f"No known {name_el} with name {el_id}") el_id = tmp[0] self._aux_affect_object_int( (el_id, new_bus), @@ -3893,14 +4367,19 @@ def _aux_affect_object_int( outer_vect=outer_vect, min_val=min_val, max_val=max_val, + _nm_ch_bk_key=_nm_ch_bk_key ) elif isinstance(values, dict): # 2 cases: either key = load_id and value = new_bus or key = load_name and value = new bus for key, new_bus in values.items(): if isinstance(key, str) and name_els is not None: - tmp = np.where(name_els == key)[0] + if self._names_chronics_to_backend is not None and _nm_ch_bk_key in self._names_chronics_to_backend: + # initial action to set the state, might use the name in the time series... + nms_conv = self._names_chronics_to_backend[_nm_ch_bk_key] + key = nms_conv[key] + tmp = (name_els == key).nonzero()[0] if len(tmp) == 0: - raise IllegalAction(f"No known {name_el} with name {key}") + raise AmbiguousAction(f"No known {name_el} with name {key}") key = tmp[0] self._aux_affect_object_int( (key, new_bus), @@ -3911,9 +4390,10 @@ def _aux_affect_object_int( outer_vect=outer_vect, min_val=min_val, max_val=max_val, + _nm_ch_bk_key=_nm_ch_bk_key, ) else: - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the {name_el} bus with inputs {values}. " f"Please see the documentation." ) @@ -3921,9 +4401,35 @@ def _aux_affect_object_int( @property def load_set_bus(self) -> np.ndarray: """ - Allows to retrieve (and affect) the busbars at which each storage unit is **set**. + Allows to retrieve (and affect) the busbars at which the action **set** the loads. + + .. versionchanged:: 1.10.0 + From grid2op version 1.10.0 it is possible (under some cirumstances, depending on how + the environment is created) to set the busbar to a number >= 3, depending on the value + of `type(act).n_busbar_per_sub`. + + Returns + ------- + res: + A vector of integer, of size `act.n_gen` indicating what type of action is performed for + each load units with the convention : + + * 0 the action do not action on this load + * -1 the action disconnect the load + * 1 the action set the load to busbar 1 + * 2 the action set the load to busbar 2 + * 3 the action set the load to busbar 3 (grid2op >= 1.10.0) + * etc. (grid2op >= 1.10.0) + + Examples + -------- + + Please refer to the documentation of :attr:`BaseAction.gen_set_bus` for more information. + + .. note:: + Be careful not to mix "change" and "set". For "change" you only need to provide the ID of the elements + you want to change, for "set" you need to provide the ID **AND** where you want to set them. - It behaves similarly as :attr:`BaseAction.gen_set_bus`. See the help there for more information. """ res = self.set_bus[self.load_pos_topo_vect] res.flags.writeable = False @@ -3931,8 +4437,9 @@ def load_set_bus(self) -> np.ndarray: @load_set_bus.setter def load_set_bus(self, values): - if "set_bus" not in self.authorized_keys: - raise IllegalAction( + cls = type(self) + if "set_bus" not in cls.authorized_keys: + raise AmbiguousAction( 'Impossible to modify the load bus (with "set") with this action type.' ) orig_ = self.load_set_bus @@ -3940,46 +4447,57 @@ def load_set_bus(self, values): self._aux_affect_object_int( values, "load", - self.n_load, - self.name_load, - self.load_pos_topo_vect, + cls.n_load, + cls.name_load, + cls.load_pos_topo_vect, self._set_topo_vect, + max_val=cls.n_busbar_per_sub, + _nm_ch_bk_key="loads" ) self._modif_set_bus = True except Exception as exc_: self._aux_affect_object_int( orig_, "load", - self.n_load, - self.name_load, - self.load_pos_topo_vect, + cls.n_load, + cls.name_load, + cls.load_pos_topo_vect, self._set_topo_vect, + max_val=cls.n_busbar_per_sub, + _nm_ch_bk_key="loads" ) - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the load bus with your input. Please consult the documentation. " f'The error was "{exc_}"' - ) + ) from exc_ @property def gen_set_bus(self) -> np.ndarray: """ Allows to retrieve (and affect) the busbars at which the action **set** the generator units. + .. versionchanged:: 1.10.0 + From grid2op version 1.10.0 it is possible (under some cirumstances, depending on how + the environment is created) to set the busbar to a number >= 3, depending on the value + of `type(act).n_busbar_per_sub`. + Returns ------- res: A vector of integer, of size `act.n_gen` indicating what type of action is performed for each generator units with the convention : - * 0 the action do not action on this storage unit - * -1 the action disconnect the storage unit - * 1 the action set the storage unit to busbar 1 - * 2 the action set the storage unit to busbar 2 + * 0 the action do not action on this generator + * -1 the action disconnect the generator + * 1 the action set the generator to busbar 1 + * 2 the action set the generator to busbar 2 + * 3 the action set the generator to busbar 3 (grid2op >= 1.10.0) + * etc. (grid2op >= 1.10.0) Examples -------- - To retrieve the impact of the action on the storage unit, you can do: + To retrieve the impact of the action on the generator, you can do: .. code-block:: python @@ -4050,7 +4568,8 @@ def gen_set_bus(self) -> np.ndarray: act.gen_set_bus[1] = 2 # end do not run - .. note:: Be careful not to mix "change" and "set". For "change" you only need to provide the ID of the elements + .. note:: + Be careful not to mix "change" and "set". For "change" you only need to provide the ID of the elements you want to change, for "set" you need to provide the ID **AND** where you want to set them. """ @@ -4060,8 +4579,9 @@ def gen_set_bus(self) -> np.ndarray: @gen_set_bus.setter def gen_set_bus(self, values): - if "set_bus" not in self.authorized_keys: - raise IllegalAction( + cls = type(self) + if "set_bus" not in cls.authorized_keys: + raise AmbiguousAction( 'Impossible to modify the gen bus (with "set") with this action type.' ) orig_ = self.gen_set_bus @@ -4069,32 +4589,62 @@ def gen_set_bus(self, values): self._aux_affect_object_int( values, "gen", - self.n_gen, - self.name_gen, - self.gen_pos_topo_vect, + cls.n_gen, + cls.name_gen, + cls.gen_pos_topo_vect, self._set_topo_vect, + max_val=cls.n_busbar_per_sub, + _nm_ch_bk_key="prods" ) self._modif_set_bus = True except Exception as exc_: self._aux_affect_object_int( orig_, "gen", - self.n_gen, - self.name_gen, - self.gen_pos_topo_vect, + cls.n_gen, + cls.name_gen, + cls.gen_pos_topo_vect, self._set_topo_vect, + max_val=cls.n_busbar_per_sub, + _nm_ch_bk_key="prods" ) - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the gen bus with your input. Please consult the documentation. " f'The error was:\n"{exc_}"' - ) + ) from exc_ @property def storage_set_bus(self) -> np.ndarray: """ - Allows to retrieve (and affect) the busbars at which each storage unit is **set**. + Allows to retrieve (and affect) the busbars at which the action **set** the storage units. + + .. versionchanged:: 1.10.0 + From grid2op version 1.10.0 it is possible (under some cirumstances, depending on how + the environment is created) to set the busbar to a number >= 3, depending on the value + of `type(act).n_busbar_per_sub`. + + Returns + ------- + res: + A vector of integer, of size `act.n_gen` indicating what type of action is performed for + each storage unit with the convention : + + * 0 the action do not action on this storage unit + * -1 the action disconnect the storage unit + * 1 the action set the storage unit to busbar 1 + * 2 the action set the storage unit to busbar 2 + * 3 the action set the storage unit to busbar 3 (grid2op >= 1.10.0) + * etc. (grid2op >= 1.10.0) + + Examples + -------- + + Please refer to the documentation of :attr:`BaseAction.gen_set_bus` for more information. + + .. note:: + Be careful not to mix "change" and "set". For "change" you only need to provide the ID of the elements + you want to change, for "set" you need to provide the ID **AND** where you want to set them. - It behaves similarly as :attr:`BaseAction.gen_set_bus`. See the help there for more information. """ if "set_storage" not in self.authorized_keys: raise IllegalAction(type(self).ERR_NO_STOR_SET_BUS) @@ -4104,42 +4654,71 @@ def storage_set_bus(self) -> np.ndarray: @storage_set_bus.setter def storage_set_bus(self, values): - if "set_bus" not in self.authorized_keys: - raise IllegalAction(type(self).ERR_NO_STOR_SET_BUS) - if "set_storage" not in self.authorized_keys: - raise IllegalAction(type(self).ERR_NO_STOR_SET_BUS) + cls = type(self) + if "set_bus" not in cls.authorized_keys: + raise IllegalAction(cls.ERR_NO_STOR_SET_BUS) + if "set_storage" not in cls.authorized_keys: + raise IllegalAction(cls.ERR_NO_STOR_SET_BUS) orig_ = self.storage_set_bus try: self._aux_affect_object_int( values, "storage", - self.n_storage, - self.name_storage, - self.storage_pos_topo_vect, + cls.n_storage, + cls.name_storage, + cls.storage_pos_topo_vect, self._set_topo_vect, + max_val=cls.n_busbar_per_sub ) self._modif_set_bus = True except Exception as exc_: self._aux_affect_object_int( orig_, "storage", - self.n_storage, - self.name_storage, - self.storage_pos_topo_vect, + cls.n_storage, + cls.name_storage, + cls.storage_pos_topo_vect, self._set_topo_vect, + max_val=cls.n_busbar_per_sub ) - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the storage bus with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' - ) + ) from exc_ @property def line_or_set_bus(self) -> np.ndarray: """ - Allows to retrieve (and affect) the busbars at which the origin side of each powerline is **set**. + Allows to retrieve (and affect) the busbars at which the action **set** the lines (origin side). + + .. versionchanged:: 1.10.0 + From grid2op version 1.10.0 it is possible (under some cirumstances, depending on how + the environment is created) to set the busbar to a number >= 3, depending on the value + of `type(act).n_busbar_per_sub`. + + Returns + ------- + res: + A vector of integer, of size `act.n_gen` indicating what type of action is performed for + each lines (origin side) with the convention : + + * 0 the action do not action on this line (origin side) + * -1 the action disconnect the line (origin side) + * 1 the action set the line (origin side) to busbar 1 + * 2 the action set the line (origin side) to busbar 2 + * 3 the action set the line (origin side) to busbar 3 (grid2op >= 1.10.0) + * etc. + + Examples + -------- + + Please refer to the documentation of :attr:`BaseAction.gen_set_bus` for more information. + + .. note:: + Be careful not to mix "change" and "set". For "change" you only need to provide the ID of the elements + you want to change, for "set" you need to provide the ID **AND** where you want to set them. - It behaves similarly as :attr:`BaseAction.gen_set_bus`. See the help there for more information. """ res = self.set_bus[self.line_or_pos_topo_vect] res.flags.writeable = False @@ -4147,7 +4726,8 @@ def line_or_set_bus(self) -> np.ndarray: @line_or_set_bus.setter def line_or_set_bus(self, values): - if "set_bus" not in self.authorized_keys: + cls = type(self) + if "set_bus" not in cls.authorized_keys: raise IllegalAction( 'Impossible to modify the line (origin) bus (with "set") with this action type.' ) @@ -4160,22 +4740,26 @@ def line_or_set_bus(self, values): self.name_line, self.line_or_pos_topo_vect, self._set_topo_vect, + max_val=cls.n_busbar_per_sub, + _nm_ch_bk_key="lines" ) self._modif_set_bus = True except Exception as exc_: self._aux_affect_object_int( orig_, self._line_or_str, - self.n_line, - self.name_line, - self.line_or_pos_topo_vect, + cls.n_line, + cls.name_line, + cls.line_or_pos_topo_vect, self._set_topo_vect, + max_val=cls.n_busbar_per_sub, + _nm_ch_bk_key="lines" ) - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the line origin bus with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' - ) + ) from exc_ @property def line_ex_set_bus(self) -> np.ndarray: @@ -4190,7 +4774,8 @@ def line_ex_set_bus(self) -> np.ndarray: @line_ex_set_bus.setter def line_ex_set_bus(self, values): - if "set_bus" not in self.authorized_keys: + cls = type(self) + if "set_bus" not in cls.authorized_keys: raise IllegalAction( 'Impossible to modify the line (ex) bus (with "set") with this action type.' ) @@ -4199,26 +4784,30 @@ def line_ex_set_bus(self, values): self._aux_affect_object_int( values, self._line_ex_str, - self.n_line, - self.name_line, - self.line_ex_pos_topo_vect, + cls.n_line, + cls.name_line, + cls.line_ex_pos_topo_vect, self._set_topo_vect, + max_val=cls.n_busbar_per_sub, + _nm_ch_bk_key="lines" ) self._modif_set_bus = True except Exception as exc_: self._aux_affect_object_int( orig_, self._line_ex_str, - self.n_line, - self.name_line, - self.line_ex_pos_topo_vect, + cls.n_line, + cls.name_line, + cls.line_ex_pos_topo_vect, self._set_topo_vect, + max_val=cls.n_busbar_per_sub, + _nm_ch_bk_key="lines" ) - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the line extrmity bus with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' - ) + ) from exc_ @property def set_bus(self) -> np.ndarray: @@ -4267,7 +4856,8 @@ def set_bus(self) -> np.ndarray: @set_bus.setter def set_bus(self, values): - if "set_bus" not in self.authorized_keys: + cls = type(self) + if "set_bus" not in cls.authorized_keys: raise IllegalAction( 'Impossible to modify the bus (with "set") with this action type.' ) @@ -4276,26 +4866,93 @@ def set_bus(self, values): self._aux_affect_object_int( values, "", - self.dim_topo, + cls.dim_topo, None, - np.arange(self.dim_topo), + np.arange(cls.dim_topo), self._set_topo_vect, + max_val=cls.n_busbar_per_sub ) self._modif_set_bus = True except Exception as exc_: self._aux_affect_object_int( orig_, "", - self.dim_topo, + cls.dim_topo, None, - np.arange(self.dim_topo), + np.arange(cls.dim_topo), self._set_topo_vect, + max_val=cls.n_busbar_per_sub ) - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the bus with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' + ) from exc_ + + @property + def set_switch(self) -> np.ndarray: + """ + Allows to retrieve (and affect) the switch state by using **set**. + + Notes + ----- + + For example: + + .. code-block:: python + + act.set_switch = [(0, 1), (1, -1), (3, 1)] + + Will: + + * set the switch 0 to the `1` state, which is "closed" (current can pass) + * set the switch 1 to the `-1` state, which is "opened" (current cannot pass) + * set the switch 3 to the `1` state, which is "closed" (current can pass) + + """ + if "set_switch" not in self.authorized_keys: + raise IllegalAction( + 'Impossible to modify the switch (with "set") with this action type.' ) + res = 1 * self._set_switch_status + res.flags.writeable = False + return res + + @set_switch.setter + def set_switch(self, values): + cls = type(self) + if "set_switch" not in cls.authorized_keys: + raise IllegalAction( + 'Impossible to modify the switch (with "set") with this action type.' + ) + nb_switch = type(self).detailed_topo_desc.switches.shape[0] + orig_ = self.set_switch + try: + self._aux_affect_object_int( + values, + "", + nb_switch, + None, + np.arange(nb_switch), + self._set_switch_status, + max_val=1 + ) + self._modif_set_switch = True + except Exception as exc_: + self._aux_affect_object_int( + orig_, + "", + nb_switch, + None, + np.arange(nb_switch), + self._set_switch_status, + max_val=1 + ) + raise AmbiguousAction( + f"Impossible to modify the switch with your input. " + f"Please consult the documentation. " + f'The error was:\n"{exc_}"' + ) from exc_ @property def line_set_status(self) -> np.ndarray: @@ -4349,6 +5006,7 @@ def line_set_status(self, values): np.arange(self.n_line), self._set_line_status, max_val=1, + _nm_ch_bk_key="lines" ) self._modif_set_status = True except Exception as exc_: @@ -4360,12 +5018,13 @@ def line_set_status(self, values): np.arange(self.n_line), self._set_line_status, max_val=1, + _nm_ch_bk_key="lines" ) - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the line status with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' - ) + ) from exc_ @property def set_line_status(self) -> np.ndarray: @@ -4386,7 +5045,7 @@ def change_line_status(self, values): self.line_change_status = values def _aux_affect_object_bool( - self, values, name_el, nb_els, name_els, inner_vect, outer_vect + self, values, name_el, nb_els, name_els, inner_vect, outer_vect, _nm_ch_bk_key=None ): """ NB : this do not set the _modif_set_bus attribute. It is expected to be set in the property setter. @@ -4403,14 +5062,14 @@ def _aux_affect_object_bool( """ if isinstance(values, bool): # to make it explicit, tuple modifications are deactivated - raise IllegalAction( + raise AmbiguousAction( f"Impossible to change a {name_el} with a tuple input. Accepted inputs are:" f"int, list of int, list of string, array of int, array of bool, set of int," f"set of string" ) elif isinstance(values, float): # to make it explicit, tuple modifications are deactivated - raise IllegalAction( + raise AmbiguousAction( f"Impossible to change a {name_el} with a tuple input. Accepted inputs are:" f"int, list of int, list of string, array of int, array of bool, set of int," f"set of string" @@ -4420,15 +5079,15 @@ def _aux_affect_object_bool( try: el_id = int(values) except Exception as exc_: - raise IllegalAction( + raise AmbiguousAction( f'{name_el}_id should be convertible to integer. Error was : "{exc_}"' - ) + ) from exc_ if el_id < 0: - raise IllegalAction( + raise AmbiguousAction( f"Impossible to change a negative {name_el} with negative id" ) if el_id >= nb_els: - raise IllegalAction( + raise AmbiguousAction( f"Impossible to change a {name_el} id {el_id} because there are only " f"{nb_els} on the grid (and in python id starts at 0)" ) @@ -4436,7 +5095,7 @@ def _aux_affect_object_bool( return elif isinstance(values, tuple): # to make it explicit, tuple modifications are deactivated - raise IllegalAction( + raise AmbiguousAction( f"Impossible to change a {name_el} with a tuple input. Accepted inputs are:" f"int, list of int, list of string, array of int, array of bool, set of int," f"set of string" @@ -4450,7 +5109,7 @@ def _aux_affect_object_bool( ): # so i change by giving the full vector if values.shape[0] != nb_els: - raise IllegalAction( + raise AmbiguousAction( f"If provided with bool array, the number of components of the vector" f"should match the total number of {name_el}. You provided a vector " f"with size {values.shape[0]} and there are {nb_els} {name_el} " @@ -4463,15 +5122,15 @@ def _aux_affect_object_bool( try: values = values.astype(dt_int) except Exception as exc_: - raise IllegalAction( + raise AmbiguousAction( f'{name_el}_id should be convertible to integer. Error was : "{exc_}"' - ) + ) from exc_ if (values < 0).any(): - raise IllegalAction( + raise AmbiguousAction( f"Impossible to change a negative {name_el} with negative id" ) if (values > nb_els).any(): - raise IllegalAction( + raise AmbiguousAction( f"Impossible to change a {name_el} id because there are only " f"{nb_els} on the grid and you wanted to change an element with an " f"id > {nb_els} (in python id starts at 0)" @@ -4483,22 +5142,26 @@ def _aux_affect_object_bool( # (note: i cannot convert to numpy array other I could mix types...) for el_id_or_name in values: if isinstance(el_id_or_name, str): - tmp = np.where(name_els == el_id_or_name)[0] + if self._names_chronics_to_backend is not None and _nm_ch_bk_key in self._names_chronics_to_backend: + # initial action to set the state, might use the name in the time series... + nms_conv = self._names_chronics_to_backend[_nm_ch_bk_key] + el_id_or_name = nms_conv[el_id_or_name] + tmp = (name_els == el_id_or_name).nonzero()[0] if len(tmp) == 0: - raise IllegalAction( + raise AmbiguousAction( f'No known {name_el} with name "{el_id_or_name}"' ) el_id = tmp[0] elif isinstance(el_id_or_name, (bool, dt_bool)): # somehow python considers bool are int... - raise IllegalAction( + raise AmbiguousAction( f"If a list is provided, it is only valid with integer found " f"{type(el_id_or_name)}." ) elif isinstance(el_id_or_name, (int, dt_int, np.int64)): el_id = el_id_or_name else: - raise IllegalAction( + raise AmbiguousAction( f"If a list is provided, it is only valid with integer found " f"{type(el_id_or_name)}." ) @@ -4510,6 +5173,7 @@ def _aux_affect_object_bool( name_els, inner_vect=inner_vect, outer_vect=outer_vect, + _nm_ch_bk_key=_nm_ch_bk_key ) elif isinstance(values, set): # 2 cases: either set of load_id or set of load_name @@ -4521,9 +5185,10 @@ def _aux_affect_object_bool( name_els, inner_vect=inner_vect, outer_vect=outer_vect, + _nm_ch_bk_key=_nm_ch_bk_key ) else: - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the {name_el} with inputs {values}. " f"Please see the documentation." ) @@ -4553,7 +5218,7 @@ def change_bus(self) -> np.ndarray: .. code-block:: python - act.set_bus [0, 1, 3] + act.change_bus [0, 1, 3] Will: @@ -4591,11 +5256,77 @@ def change_bus(self, values): np.arange(self.dim_topo), self._change_bus_vect, ) - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the bus with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' + ) from exc_ + + @property + def change_switch(self) -> np.ndarray: + """ + Allows to retrieve (and affect) the switch using the **change** paradigm. + + Notes + ----- + + For example: + + .. code-block:: python + + act.change_switch [0, 1, 3] + + Will: + + * change the position switch 0 (if it was open it will close it and if it was closed it will open it) + * change the position switch 1 (if it was open it will close it and if it was closed it will open it) + * change the position switch 3 (if it was open it will close it and if it was closed it will open it) + + .. warning:: + Changing the switch might not have any impact or it might have a very impactfull one. + + """ + if type(self).detailed_topo_desc is None: + raise AmbiguousAction("You cannot 'change the switch' as no detailed " + "information (about switches) is provided in your grid.") + res = copy.deepcopy(self._change_switch_status) + res.flags.writeable = False + return res + + @change_switch.setter + def change_switch(self, values): + + if "change_switch" not in self.authorized_keys: + raise IllegalAction( + 'Impossible to modify the switches (with change) state with this action type.' + ) + + orig_ = self.change_switch + nb_switch = type(self).detailed_topo_desc.switches.shape[0] + try: + self._aux_affect_object_bool( + values, + "", + nb_switch, + None, + np.arange(nb_switch), + self._change_switch_status, + ) + self._modif_change_switch = True + except Exception as exc_: + self._aux_affect_object_bool( + orig_, + "", + nb_switch, + None, + np.arange(nb_switch), + self._change_switch_status, ) + raise AmbiguousAction( + f"Impossible to modify the switch with your input. " + f"Please consult the documentation. " + f'The error was:\n"{exc_}"' + ) from exc_ @property def load_change_bus(self) -> np.ndarray: @@ -4623,11 +5354,12 @@ def load_change_bus(self, values): self.name_load, self.load_pos_topo_vect, self._change_bus_vect, + _nm_ch_bk_key="loads", ) self._modif_change_bus = True except Exception as exc_: self._change_bus_vect[self.load_pos_topo_vect] = orig_ - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the load bus with your input. Please consult the documentation. " f'The error was "{exc_}"' ) @@ -4745,11 +5477,12 @@ def gen_change_bus(self, values): self.name_gen, self.gen_pos_topo_vect, self._change_bus_vect, + _nm_ch_bk_key="prods", ) self._modif_change_bus = True except Exception as exc_: self._change_bus_vect[self.gen_pos_topo_vect] = orig_ - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the gen bus with your input. Please consult the documentation. " f'The error was:\n"{exc_}"' ) @@ -4788,7 +5521,7 @@ def storage_change_bus(self, values): self._modif_change_bus = True except Exception as exc_: self._change_bus_vect[self.storage_pos_topo_vect] = orig_ - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the storage bus with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' @@ -4820,11 +5553,12 @@ def line_or_change_bus(self, values): self.name_line, self.line_or_pos_topo_vect, self._change_bus_vect, + _nm_ch_bk_key="lines", ) self._modif_change_bus = True except Exception as exc_: self._change_bus_vect[self.line_or_pos_topo_vect] = orig_ - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the line origin bus with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' @@ -4856,11 +5590,12 @@ def line_ex_change_bus(self, values): self.name_line, self.line_ex_pos_topo_vect, self._change_bus_vect, + _nm_ch_bk_key="lines", ) self._modif_change_bus = True except Exception as exc_: self._change_bus_vect[self.line_ex_pos_topo_vect] = orig_ - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the line extrmity bus with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' @@ -4897,11 +5632,12 @@ def line_change_status(self, values): self.name_line, np.arange(self.n_line), self._switch_line_status, + _nm_ch_bk_key="lines", ) self._modif_change_status = True except Exception as exc_: self._switch_line_status[:] = orig_ - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the line status with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' @@ -4963,7 +5699,7 @@ def raise_alarm(self, values): self._modif_alarm = True except Exception as exc_: self._raise_alarm[:] = orig_ - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the alarm with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' @@ -5011,11 +5747,11 @@ def raise_alert(self, values): self._modif_alert = True except Exception as exc_: self._raise_alert[:] = orig_ - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the alert with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' - ) + ) from exc_ def _aux_affect_object_float( self, @@ -5025,6 +5761,7 @@ def _aux_affect_object_float( name_els, inner_vect, outer_vect, + _nm_ch_bk_key=None, ): """ INTERNAL USE ONLY @@ -5044,47 +5781,47 @@ def _aux_affect_object_float( will modify outer_vect[inner_vect] """ if isinstance(values, (bool, dt_bool)): - raise IllegalAction( + raise AmbiguousAction( f"Impossible to set {name_el} values with a single boolean." ) elif isinstance(values, (int, dt_int, np.int64)): - raise IllegalAction( + raise AmbiguousAction( f"Impossible to set {name_el} values with a single integer." ) elif isinstance(values, (float, dt_float, np.float64)): - raise IllegalAction( + raise AmbiguousAction( f"Impossible to set {name_el} values with a single float." ) elif isinstance(values, tuple): # i provide a tuple: load_id, new_vals if len(values) != 2: - raise IllegalAction( + raise AmbiguousAction( f"when set with tuple, this tuple should have size 2 and be: {name_el}_id, new_bus " f"eg. (3, 0.0)" ) el_id, new_val = values if isinstance(new_val, (bool, dt_bool)): - raise IllegalAction( + raise AmbiguousAction( f"new_val should be a float. A boolean was provided" ) try: new_val = float(new_val) except Exception as exc_: - raise IllegalAction( + raise AmbiguousAction( f'new_val should be convertible to a float. Error was : "{exc_}"' ) if isinstance(el_id, (float, dt_float, np.float64)): - raise IllegalAction( + raise AmbiguousAction( f"{name_el}_id should be integers you provided float!" ) if isinstance(el_id, (bool, dt_bool)): - raise IllegalAction( + raise AmbiguousAction( f"{name_el}_id should be integers you provided bool!" ) if isinstance(el_id, str): - raise IllegalAction( + raise AmbiguousAction( f"{name_el}_id should be integers you provided string " f"(hint: you can use a dictionary to set the bus by name eg. " f"act.{name_el}_set_bus = {{act.name_{name_el}[0] : 1, act.name_{name_el}[1] : " @@ -5094,15 +5831,15 @@ def _aux_affect_object_float( try: el_id = int(el_id) except Exception as exc_: - raise IllegalAction( + raise AmbiguousAction( f'{name_el}_id should be convertible to integer. Error was : "{exc_}"' - ) + ) from exc_ if el_id < 0: - raise IllegalAction( + raise AmbiguousAction( f"Impossible to set the bus of a {name_el} with negative id" ) if el_id >= nb_els: - raise IllegalAction( + raise AmbiguousAction( f"Impossible to set a {name_el} id {el_id} because there are only " f"{nb_els} on the grid (and in python id starts at 0)" ) @@ -5116,18 +5853,18 @@ def _aux_affect_object_float( or values.dtype == np.int64 ): # for this the user explicitly casted it as integer, this won't work. - raise IllegalAction(f"{name_el}_id should be floats you provided int!") + raise AmbiguousAction(f"{name_el}_id should be floats you provided int!") if isinstance(values.dtype, bool) or values.dtype == dt_bool: - raise IllegalAction( + raise AmbiguousAction( f"{name_el}_id should be floats you provided boolean!" ) try: values = values.astype(dt_float) except Exception as exc_: - raise IllegalAction( + raise AmbiguousAction( f'{name_el}_id should be convertible to float. Error was : "{exc_}"' - ) + ) from exc_ indx_ok = np.isfinite(values) outer_vect[inner_vect[indx_ok]] = values[indx_ok] return @@ -5137,15 +5874,15 @@ def _aux_affect_object_float( # 2 cases: either i set all loads in the form [(0,..), (1,..), (2,...)] # or i should have converted the list to np array if isinstance(values, (bool, dt_bool)): - raise IllegalAction( + raise AmbiguousAction( f"Impossible to set {name_el} values with a single boolean." ) elif isinstance(values, (int, dt_int, np.int64)): - raise IllegalAction( + raise AmbiguousAction( f"Impossible to set {name_el} values with a single integer." ) elif isinstance(values, (float, dt_float, np.float64)): - raise IllegalAction( + raise AmbiguousAction( f"Impossible to set {name_el} values with a single float." ) elif isinstance(values[0], tuple): @@ -5162,21 +5899,26 @@ def _aux_affect_object_float( name_els, inner_vect=inner_vect, outer_vect=outer_vect, + _nm_ch_bk_key=_nm_ch_bk_key ) return # expected list of tuple, each tuple is a pair with load_id, new_vals: example: [(0, -1.0), (2,2.7)] for el in values: if len(el) != 2: - raise IllegalAction( + raise AmbiguousAction( f"If input is a list, it should be a list of pair (el_id, new_val) " f"eg. [(0, 1.0), (2, 2.7)]" ) el_id, new_val = el if isinstance(el_id, str): - tmp = np.where(name_els == el_id)[0] + if self._names_chronics_to_backend is not None and _nm_ch_bk_key in self._names_chronics_to_backend: + # initial action to set the state, might use the name in the time series... + nms_conv = self._names_chronics_to_backend[_nm_ch_bk_key] + el_id_or_name = nms_conv[el_id_or_name] + tmp = (name_els == el_id).nonzero()[0] if len(tmp) == 0: - raise IllegalAction(f"No known {name_el} with name {el_id}") + raise AmbiguousAction(f"No known {name_el} with name {el_id}") el_id = tmp[0] self._aux_affect_object_float( (el_id, new_val), @@ -5185,14 +5927,19 @@ def _aux_affect_object_float( name_els, inner_vect=inner_vect, outer_vect=outer_vect, + _nm_ch_bk_key=_nm_ch_bk_key, ) elif isinstance(values, dict): # 2 cases: either key = load_id and value = new_bus or key = load_name and value = new bus for key, new_val in values.items(): if isinstance(key, str): - tmp = np.where(name_els == key)[0] + if self._names_chronics_to_backend is not None and _nm_ch_bk_key in self._names_chronics_to_backend: + # initial action to set the state, might use the name in the time series... + nms_conv = self._names_chronics_to_backend[_nm_ch_bk_key] + key = nms_conv[key] + tmp = (name_els == key).nonzero()[0] if len(tmp) == 0: - raise IllegalAction(f"No known {name_el} with name {key}") + raise AmbiguousAction(f"No known {name_el} with name {key}") key = tmp[0] self._aux_affect_object_float( (key, new_val), @@ -5201,9 +5948,10 @@ def _aux_affect_object_float( name_els, inner_vect=inner_vect, outer_vect=outer_vect, + _nm_ch_bk_key=_nm_ch_bk_key ) else: - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the {name_el} with inputs {values}. " f"Please see the documentation." ) @@ -5321,11 +6069,12 @@ def redispatch(self, values): self.name_gen, np.arange(self.n_gen), self._redispatch, + _nm_ch_bk_key="prods", ) self._modif_redispatch = True except Exception as exc_: self._redispatch[:] = orig_ - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the redispatching with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' @@ -5378,11 +6127,11 @@ def storage_p(self, values): self._modif_storage = True except Exception as exc_: self._storage_power[:] = orig_ - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the storage active power with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' - ) + ) from exc_ @property def set_storage(self) -> np.ndarray: @@ -5428,31 +6177,32 @@ def curtail(self, values): self.name_gen, np.arange(self.n_gen), self._curtail, + _nm_ch_bk_key="prods", ) self._modif_curtailment = True except Exception as exc_: self._curtail[:] = orig_ - raise IllegalAction( + raise AmbiguousAction( f"Impossible to perform curtailment with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' - ) + ) from exc_ def _aux_aux_convert_and_check_np_array(self, array_): try: array_ = np.array(array_) except Exception as exc_: - raise IllegalAction( + raise AmbiguousAction( f"When setting the topology by substation and by giving a tuple, the " f"second element of the tuple should be convertible to a numpy " f'array of type int. Error was: "{exc_}"' - ) + ) from exc_ if ( isinstance(array_.dtype, (bool, dt_bool)) or array_.dtype == dt_bool or array_.dtype == bool ): - raise IllegalAction( + raise AmbiguousAction( "To set substation topology, you need a vector of integers, and not a vector " "of bool." ) @@ -5461,45 +6211,46 @@ def _aux_aux_convert_and_check_np_array(self, array_): or array_.dtype == dt_float or array_.dtype == float ): - raise IllegalAction( + raise AmbiguousAction( "To set substation topology, you need a vector of integers, and not a vector " "of float." ) array_ = array_.astype(dt_int) if (array_ < -1).any(): - raise IllegalAction( + raise AmbiguousAction( f"Impossible to set element to bus {np.min(array_)}. Buses must be " f"-1, 0, 1 or 2." ) - if (array_ > 2).any(): - raise IllegalAction( + if (array_ > type(self).n_busbar_per_sub).any(): + raise AmbiguousAction( f"Impossible to set element to bus {np.max(array_)}. Buses must be " f"-1, 0, 1 or 2." ) return array_ def _aux_set_bus_sub(self, values): + cls = type(self) if isinstance(values, (bool, dt_bool)): - raise IllegalAction( + raise AmbiguousAction( "Impossible to modify bus by substation with a single bool." ) - elif isinstance(values, (int, dt_int, np.int64)): - raise IllegalAction( + elif isinstance(values, (int, dt_int, np.int64, np.int32)): + raise AmbiguousAction( "Impossible to modify bus by substation with a single integer." ) - elif isinstance(values, (float, dt_float, np.float64)): - raise IllegalAction( + elif isinstance(values, (float, dt_float, np.float64, np.float32)): + raise AmbiguousAction( "Impossible to modify bus by substation with a single float." ) elif isinstance(values, np.ndarray): # full topo vect - if values.shape[0] != self.dim_topo: - raise IllegalAction( + if values.shape[0] != cls.dim_topo: + raise AmbiguousAction( "Impossible to modify bus when providing a full topology vector " "that has not the right " ) if values.dtype == dt_bool or values.dtype == bool: - raise IllegalAction( + raise AmbiguousAction( "When using a full vector for setting the topology, it should be " "of integer types" ) @@ -5509,11 +6260,11 @@ def _aux_set_bus_sub(self, values): # should be a tuple (sub_id, new_topo) sub_id, topo_repr, nb_el = self._check_for_right_vectors_sub(values) topo_repr = self._aux_aux_convert_and_check_np_array(topo_repr) - start_ = self.sub_info[:sub_id].sum() + start_ = cls.sub_info[:sub_id].sum() end_ = start_ + nb_el self._set_topo_vect[start_:end_] = topo_repr elif isinstance(values, list): - if len(values) == self.dim_topo: + if len(values) == cls.dim_topo: # if list is the size of the full topo vect, it's a list representing it values = self._aux_aux_convert_and_check_np_array(values) self._aux_set_bus_sub(values) @@ -5521,7 +6272,7 @@ def _aux_set_bus_sub(self, values): # otherwise it should be a list of tuples: [(sub_id, topo), (sub_id, topo)] for el in values: if not isinstance(el, tuple): - raise IllegalAction( + raise AmbiguousAction( "When provided a list, it should be a list of tuples: " "[(sub_id, topo), (sub_id, topo), ... ] " ) @@ -5531,7 +6282,7 @@ def _aux_set_bus_sub(self, values): sub_id = self._aux_sub_when_dict_get_id(sub_id) self._aux_set_bus_sub((sub_id, topo_repr)) else: - raise IllegalAction( + raise AmbiguousAction( "Impossible to set the topology by substation with your input." "Please consult the documentation." ) @@ -5555,7 +6306,7 @@ def sub_set_bus(self, values): self._modif_set_bus = True except Exception as exc_: self._set_topo_vect[:] = orig_ - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the substation bus with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' @@ -5575,7 +6326,7 @@ def _aux_aux_convert_and_check_np_array_change(self, array_): or array_.dtype == dt_int or array_.dtype == int ): - raise IllegalAction( + raise AmbiguousAction( "To change substation topology, you need a vector of bools, and not a vector " "of int." ) @@ -5584,7 +6335,7 @@ def _aux_aux_convert_and_check_np_array_change(self, array_): or array_.dtype == dt_float or array_.dtype == float ): - raise IllegalAction( + raise AmbiguousAction( "To change substation topology, you need a vector of bools, and not a vector " "of float." ) @@ -5593,31 +6344,31 @@ def _aux_aux_convert_and_check_np_array_change(self, array_): def _check_for_right_vectors_sub(self, values): if len(values) != 2: - raise IllegalAction( + raise AmbiguousAction( "Impossible to set the topology of a substation with a tuple which " "has not a size of 2 (substation_id, topology_representation)" ) sub_id, topo_repr = values if isinstance(sub_id, (bool, dt_bool)): - raise IllegalAction("Substation id should be integer") + raise AmbiguousAction("Substation id should be integer") if isinstance(sub_id, (float, dt_float, np.float64)): - raise IllegalAction("Substation id should be integer") + raise AmbiguousAction("Substation id should be integer") try: el_id = int(sub_id) except Exception as exc_: - raise IllegalAction( + raise AmbiguousAction( f"Substation id should be convertible to integer. " f'Error was "{exc_}"' - ) + ) from exc_ try: size_ = len(topo_repr) except Exception as exc_: - raise IllegalAction( + raise AmbiguousAction( f"Topology cannot be set with your input." f'Error was "{exc_}"' - ) + ) from exc_ nb_el = self.sub_info[el_id] if size_ != nb_el: - raise IllegalAction( + raise AmbiguousAction( f"To set topology of a substation, you must provide the full list of the " f"elements you want to modify. You provided a vector with {size_} components " f"while there are {self.sub_info[el_id]} on the substation." @@ -5627,26 +6378,26 @@ def _check_for_right_vectors_sub(self, values): def _aux_change_bus_sub(self, values): if isinstance(values, (bool, dt_bool)): - raise IllegalAction( + raise AmbiguousAction( "Impossible to modify bus by substation with a single bool." ) elif isinstance(values, (int, dt_int, np.int64)): - raise IllegalAction( + raise AmbiguousAction( "Impossible to modify bus by substation with a single integer." ) elif isinstance(values, (float, dt_float, np.float64)): - raise IllegalAction( + raise AmbiguousAction( "Impossible to modify bus by substation with a single float." ) elif isinstance(values, np.ndarray): # full topo vect if values.shape[0] != self.dim_topo: - raise IllegalAction( + raise AmbiguousAction( "Impossible to modify bus when providing a full topology vector " "that has not the right size." ) if values.dtype == dt_int or values.dtype == int: - raise IllegalAction( + raise AmbiguousAction( "When using a full vector for setting the topology, it should be " "of bool types" ) @@ -5669,7 +6420,7 @@ def _aux_change_bus_sub(self, values): # otherwise it should be a list of tuples: [(sub_id, topo), (sub_id, topo)] for el in values: if not isinstance(el, tuple): - raise IllegalAction( + raise AmbiguousAction( "When provided a list, it should be a list of tuples: " "[(sub_id, topo), (sub_id, topo), ... ] " ) @@ -5679,19 +6430,19 @@ def _aux_change_bus_sub(self, values): sub_id = self._aux_sub_when_dict_get_id(sub_id) self._aux_change_bus_sub((sub_id, topo_repr)) else: - raise IllegalAction( + raise AmbiguousAction( "Impossible to set the topology by substation with your input." "Please consult the documentation." ) def _aux_sub_when_dict_get_id(self, sub_id): if isinstance(sub_id, str): - tmp = np.where(self.name_sub == sub_id)[0] + tmp = (self.name_sub == sub_id).nonzero()[0] if len(tmp) == 0: - raise IllegalAction(f"No substation named {sub_id}") + raise AmbiguousAction(f"No substation named {sub_id}") sub_id = tmp[0] elif not isinstance(sub_id, int): - raise IllegalAction( + raise AmbiguousAction( f"When using a dictionary it should be either with key = name of the " f"substation or key = id of the substation. You provided neither string nor" f"int but {type(sub_id)}." @@ -5707,7 +6458,7 @@ def sub_change_bus(self) -> np.ndarray: @sub_change_bus.setter def sub_change_bus(self, values): if "change_bus" not in self.authorized_keys: - raise IllegalAction( + raise AmbiguousAction( 'Impossible to modify the substation bus (with "change") with this action type.' ) orig_ = self.sub_change_bus @@ -5716,7 +6467,7 @@ def sub_change_bus(self, values): self._modif_change_bus = True except Exception as exc_: self._change_bus_vect[:] = orig_ - raise IllegalAction( + raise AmbiguousAction( f"Impossible to modify the substation bus with your input. " f"Please consult the documentation. " f'The error was:\n"{exc_}"' @@ -5756,6 +6507,7 @@ def curtailment_mw_to_ratio(self, curtailment_mw) -> np.ndarray: self.name_gen, np.arange(self.n_gen), values, + _nm_ch_bk_key="prods", ) values /= self.gen_pmax values[values >= 1.0] = 1.0 @@ -5791,7 +6543,7 @@ def curtail_mw(self, values_mw): self.curtail = self.curtailment_mw_to_ratio(values_mw) def limit_curtail_storage(self, - obs: "BaseObservation", + obs: "grid2op.Observation.BaseObservation", margin: float=10., do_copy: bool=False, _tol_equal : float=0.01) -> Tuple["BaseAction", np.ndarray, np.ndarray]: @@ -5896,7 +6648,7 @@ def limit_curtail_storage(self, total_storage_consumed = res._storage_power.sum() # curtailment - gen_curtailed = (res._curtail != -1) & cls.gen_renewable + gen_curtailed = (np.abs(res._curtail + 1) >= 1e-7) & cls.gen_renewable gen_curtailed &= ( (obs.gen_p > res._curtail * cls.gen_pmax) | (obs.gen_p_before_curtail > obs.gen_p )) gen_p_after_max = (res._curtail * cls.gen_pmax)[gen_curtailed] @@ -5998,7 +6750,7 @@ def _aux_decompose_as_unary_actions_change_ls(self, cls, group_line_status, res) tmp._switch_line_status = copy.deepcopy(self._switch_line_status) res["change_line_status"] = [tmp] else: - lines_changed = np.where(self._switch_line_status)[0] + lines_changed = (self._switch_line_status).nonzero()[0] res["change_line_status"] = [] for l_id in lines_changed: tmp = cls() @@ -6030,7 +6782,7 @@ def _aux_decompose_as_unary_actions_set_ls(self, cls, group_line_status, res): tmp._set_line_status = 1 * self._set_line_status res["set_line_status"] = [tmp] else: - lines_changed = np.where(self._set_line_status != 0)[0] + lines_changed = (self._set_line_status != 0).nonzero()[0] res["set_line_status"] = [] for l_id in lines_changed: tmp = cls() @@ -6045,7 +6797,7 @@ def _aux_decompose_as_unary_actions_redisp(self, cls, group_redispatch, res): tmp._redispatch = 1. * self._redispatch res["redispatch"] = [tmp] else: - gen_changed = np.where(self._redispatch != 0.)[0] + gen_changed = (np.abs(self._redispatch) >= 1e-7).nonzero()[0] res["redispatch"] = [] for g_id in gen_changed: tmp = cls() @@ -6060,7 +6812,7 @@ def _aux_decompose_as_unary_actions_storage(self, cls, group_storage, res): tmp._storage_power = 1. * self._storage_power res["set_storage"] = [tmp] else: - sto_changed = np.where(self._storage_power != 0.)[0] + sto_changed = (np.abs(self._storage_power) >= 1e-7).nonzero()[0] res["set_storage"] = [] for s_id in sto_changed: tmp = cls() @@ -6075,7 +6827,7 @@ def _aux_decompose_as_unary_actions_curtail(self, cls, group_curtailment, res): tmp._curtail = 1. * self._curtail res["curtail"] = [tmp] else: - gen_changed = np.where(self._curtail != -1.)[0] + gen_changed = (np.abs(self._curtail + 1.) >= 1e-7).nonzero()[0] #self._curtail != -1 res["curtail"] = [] for g_id in gen_changed: tmp = cls() @@ -6088,7 +6840,14 @@ def decompose_as_unary_actions(self, group_line_status=False, group_redispatch=True, group_storage=True, - group_curtail=True) -> dict: + group_curtail=True) -> Dict[Literal["change_bus", + "set_bus", + "change_line_status", + "set_line_status", + "redispatch", + "set_storage", + "curtail"], + List["BaseAction"]]: """This function allows to split a possibly "complex" action into its "unary" counterpart. @@ -6161,7 +6920,6 @@ def decompose_as_unary_actions(self, tmp += a assert tmp == act - Parameters ---------- group_topo : bool, optional @@ -6229,4 +6987,57 @@ def decompose_as_unary_actions(self, def from_switches(cls, obs, switches_list): # TODO detailed topo pass - \ No newline at end of file + + + def _add_act_and_remove_line_status_only_set(self, other: "BaseAction") -> "BaseAction": + """INTERNAL + + This is used by the environment when combining action in the "set state" in env.reset. + + It supposes both self and other are only "set" actions + + .. versionadded:: 1.10.2 + + """ + self += other + cls = type(self) + # switch off in self the element disconnected in other + switched_off = other._set_line_status == -1 + switched_off |= other._set_topo_vect[cls.line_or_pos_topo_vect] == -1 + switched_off |= other._set_topo_vect[cls.line_ex_pos_topo_vect] == -1 + self._set_topo_vect[cls.line_or_pos_topo_vect[switched_off]] = -1 + self._set_topo_vect[cls.line_ex_pos_topo_vect[switched_off]] = -1 + self._set_line_status[switched_off] = -1 + + # switch on in self the element reconnected in other + switched_on = other._set_line_status == 1 + switched_on |= other._set_topo_vect[cls.line_or_pos_topo_vect] > 0 + switched_on |= other._set_topo_vect[cls.line_ex_pos_topo_vect] > 0 + self._set_line_status[switched_on] = 1 + # "reconnect" object through topo vect + topo_vect = other._set_topo_vect > 0 + self._set_topo_vect[topo_vect] = other._set_topo_vect[topo_vect] + + if (self._set_line_status != 0).any(): + self._modif_set_status = True + if (self._set_topo_vect != 0).any(): + self._modif_set_bus = True + return self + + def remove_change(self) -> "BaseAction": + """This function will modify 'self' and remove all "change" action type. + + It is mainly used in the environment, when removing the "change" type for setting the original + state of the grid. + + .. versionadded:: 1.10.2 + + """ + if self._change_bus_vect.any(): + warnings.warn("This action modified the buses with `change_bus` ") + self._change_bus_vect[:] = False + self._modif_change_bus = False + if self._switch_line_status.any(): + self._switch_line_status[:] = False + self._modif_change_status = False + return self diff --git a/grid2op/Action/completeAction.py b/grid2op/Action/completeAction.py index 548b59009..7b7fef238 100644 --- a/grid2op/Action/completeAction.py +++ b/grid2op/Action/completeAction.py @@ -5,6 +5,8 @@ # you can obtain one at http://mozilla.org/MPL/2.0/. # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +from typing import Optional, Dict, Literal from grid2op.Action.baseAction import BaseAction @@ -16,5 +18,5 @@ class CompleteAction(BaseAction): class is used by the chronics, the environment the opponent or the voltage controler for example. """ - def __init__(self): - BaseAction.__init__(self) + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + BaseAction.__init__(self, _names_chronics_to_backend) diff --git a/grid2op/Action/dispatchAction.py b/grid2op/Action/dispatchAction.py index b0ec07fc9..63e24f971 100644 --- a/grid2op/Action/dispatchAction.py +++ b/grid2op/Action/dispatchAction.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal from grid2op.Action.playableAction import PlayableAction @@ -22,5 +23,5 @@ class DispatchAction(PlayableAction): attr_list_vect = ["_redispatch"] attr_list_set = set(attr_list_vect) - def __init__(self): - super().__init__() + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + super().__init__(_names_chronics_to_backend) diff --git a/grid2op/Action/dontAct.py b/grid2op/Action/dontAct.py index 09fd0d1c1..b8ce0be41 100644 --- a/grid2op/Action/dontAct.py +++ b/grid2op/Action/dontAct.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal from grid2op.Action.playableAction import PlayableAction @@ -28,7 +29,7 @@ class DontAct(PlayableAction): authorized_keys = set() attr_list_vect = [] - def __init__(self): + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): """ INTERNAL @@ -38,7 +39,7 @@ def __init__(self): more is done in this constructor. """ - PlayableAction.__init__(self) + PlayableAction.__init__(self, _names_chronics_to_backend) def update(self, dict_): """ diff --git a/grid2op/Action/playableAction.py b/grid2op/Action/playableAction.py index fd854863e..dc6a65be9 100644 --- a/grid2op/Action/playableAction.py +++ b/grid2op/Action/playableAction.py @@ -7,6 +7,7 @@ # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. import warnings +from typing import Optional, Dict, Literal from grid2op.Exceptions import AmbiguousAction from grid2op.Action.baseAction import BaseAction @@ -44,8 +45,8 @@ class PlayableAction(BaseAction): attr_list_set = set(attr_list_vect) shunt_added = True # no shunt here - def __init__(self): - super().__init__() + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + super().__init__(_names_chronics_to_backend) self.authorized_keys_to_digest = { "set_line_status": self._digest_set_status, diff --git a/grid2op/Action/powerlineChangeAction.py b/grid2op/Action/powerlineChangeAction.py index e678d6a03..8dce42337 100644 --- a/grid2op/Action/powerlineChangeAction.py +++ b/grid2op/Action/powerlineChangeAction.py @@ -6,6 +6,8 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal + from grid2op.Action.playableAction import PlayableAction @@ -23,5 +25,5 @@ class PowerlineChangeAction(PlayableAction): attr_list_vect = ["_switch_line_status"] attr_list_set = set(attr_list_vect) - def __init__(self): - super().__init__() + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + super().__init__(_names_chronics_to_backend) diff --git a/grid2op/Action/powerlineChangeAndDispatchAction.py b/grid2op/Action/powerlineChangeAndDispatchAction.py index 759d241e0..58fd38462 100644 --- a/grid2op/Action/powerlineChangeAndDispatchAction.py +++ b/grid2op/Action/powerlineChangeAndDispatchAction.py @@ -6,6 +6,8 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal + from grid2op.Action.playableAction import PlayableAction @@ -24,5 +26,5 @@ class PowerlineChangeAndDispatchAction(PlayableAction): attr_list_vect = ["_switch_line_status", "_redispatch"] attr_list_set = set(attr_list_vect) - def __init__(self): - super().__init__() + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + super().__init__(_names_chronics_to_backend) diff --git a/grid2op/Action/powerlineChangeDispatchAndStorageAction.py b/grid2op/Action/powerlineChangeDispatchAndStorageAction.py index 7a0dfa0d0..5a5111a7f 100644 --- a/grid2op/Action/powerlineChangeDispatchAndStorageAction.py +++ b/grid2op/Action/powerlineChangeDispatchAndStorageAction.py @@ -6,6 +6,8 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal + from grid2op.Action.playableAction import PlayableAction @@ -20,5 +22,5 @@ class PowerlineChangeDispatchAndStorageAction(PlayableAction): attr_list_set = set(attr_list_vect) - def __init__(self): - super().__init__() + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + super().__init__(_names_chronics_to_backend) diff --git a/grid2op/Action/powerlineSetAction.py b/grid2op/Action/powerlineSetAction.py index 81c6b67b9..cd477f784 100644 --- a/grid2op/Action/powerlineSetAction.py +++ b/grid2op/Action/powerlineSetAction.py @@ -6,6 +6,8 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal + from grid2op.Action.playableAction import PlayableAction @@ -23,5 +25,5 @@ class PowerlineSetAction(PlayableAction): attr_list_vect = ["_set_line_status"] attr_list_set = set(attr_list_vect) - def __init__(self): - super().__init__() + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + super().__init__(_names_chronics_to_backend) diff --git a/grid2op/Action/powerlineSetAndDispatchAction.py b/grid2op/Action/powerlineSetAndDispatchAction.py index 97920d65a..bb6cefab3 100644 --- a/grid2op/Action/powerlineSetAndDispatchAction.py +++ b/grid2op/Action/powerlineSetAndDispatchAction.py @@ -6,6 +6,8 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal + from grid2op.Action.playableAction import PlayableAction @@ -33,5 +35,5 @@ class PowerlineSetAndDispatchAction(PlayableAction): attr_list_set = set(attr_list_vect) - def __init__(self): - super().__init__() + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + super().__init__(_names_chronics_to_backend) diff --git a/grid2op/Action/serializableActionSpace.py b/grid2op/Action/serializableActionSpace.py index d7cee94cf..79f409336 100644 --- a/grid2op/Action/serializableActionSpace.py +++ b/grid2op/Action/serializableActionSpace.py @@ -9,8 +9,13 @@ import warnings import numpy as np import itertools -from typing import Dict, List +from typing import Dict, List, Literal +try: + from typing import Self +except ImportError: + from typing_extensions import Self +import grid2op from grid2op.dtypes import dt_int, dt_float, dt_bool from grid2op.Exceptions import AmbiguousAction, Grid2OpException from grid2op.Space import SerializableSpace @@ -49,7 +54,7 @@ class SerializableActionSpace(SerializableSpace): '"which is not the type of action handled by this action space "' '("{}")') - def __init__(self, gridobj, actionClass=BaseAction, _init_grid=True): + def __init__(self, gridobj, actionClass=BaseAction, _init_grid=True, _local_dir_cls=None): """ INTERNAL USE ONLY @@ -69,7 +74,10 @@ def __init__(self, gridobj, actionClass=BaseAction, _init_grid=True): """ SerializableSpace.__init__( - self, gridobj=gridobj, subtype=actionClass, _init_grid=_init_grid + self, gridobj=gridobj, + subtype=actionClass, + _init_grid=_init_grid, + _local_dir_cls=_local_dir_cls ) self.actionClass = self.subtype self._template_act = self.actionClass() @@ -112,15 +120,27 @@ def _get_possible_action_types(self): rnd_types.append(cls.CHANGE_BUS_ID) if "redispatch" in self.actionClass.authorized_keys: rnd_types.append(cls.REDISPATCHING_ID) - if self.n_storage > 0 and "storage_power" in self.actionClass.authorized_keys: + if cls.n_storage > 0 and "storage_power" in self.actionClass.authorized_keys: rnd_types.append(cls.STORAGE_POWER_ID) - if self.dim_alarms > 0 and "raise_alarm" in self.actionClass.authorized_keys: + if cls.dim_alarms > 0 and "raise_alarm" in self.actionClass.authorized_keys: rnd_types.append(cls.RAISE_ALARM_ID) - if self.dim_alerts > 0 and "raise_alert" in self.actionClass.authorized_keys: + if cls.dim_alerts > 0 and "raise_alert" in self.actionClass.authorized_keys: rnd_types.append(cls.RAISE_ALERT_ID) return rnd_types - def supports_type(self, action_type): + def supports_type(self, + action_type: Literal["set_line_status", + "change_line_status", + "set_bus", + "change_bus", + "redispatch", + "storage_power", + "set_storage", + "curtail", + "curtail_mw", + "raise_alarm", + "raise_alert"] + ): """ Returns if the current action_space supports the current action type. @@ -128,7 +148,7 @@ def supports_type(self, action_type): ---------- action_type: ``str`` One of "set_line_status", "change_line_status", "set_bus", "change_bus", "redispatch", - "storage_power", "set_storage", "curtail" or "curtail_mw" + "storage_power", "set_storage", "curtail", "curtail_mw", "raise_alarm" or "raise_alert" A string representing the action types you want to inspect. Returns @@ -170,17 +190,13 @@ def supports_type(self, action_type): f"The action type provided should be in {name_action_types}. " f"You provided {action_type} which is not supported." ) - - if action_type == "storage_power": - return (self.n_storage > 0) and ( - "set_storage" in self.actionClass.authorized_keys - ) - elif action_type == "set_storage": - return (self.n_storage > 0) and ( + cls = type(self) + if action_type == "storage_power" or action_type == "set_storage": + return (cls.n_storage > 0) and ( "set_storage" in self.actionClass.authorized_keys ) elif action_type == "curtail_mw": - return "curtail" in self.actionClass.authorized_keys + return self.supports_type("curtail") else: return action_type in self.actionClass.authorized_keys @@ -246,8 +262,10 @@ def _sample_storage_power(self, rnd_update=None): return rnd_update def _sample_raise_alarm(self, rnd_update=None): - """.. warning:: + """ + .. warning:: /!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\ + """ if rnd_update is None: rnd_update = {} @@ -256,13 +274,18 @@ def _sample_raise_alarm(self, rnd_update=None): return rnd_update def _sample_raise_alert(self, rnd_update=None): + """ + .. warning:: + Not available in all environments. + + """ if rnd_update is None: rnd_update = {} rnd_alerted_lines = self.space_prng.choice([True, False], self.dim_alerts).astype(dt_bool) rnd_update["raise_alert"] = rnd_alerted_lines return rnd_update - def sample(self): + def sample(self) -> BaseAction: """ A utility used to sample a new random :class:`BaseAction`. @@ -303,7 +326,7 @@ def sample(self): env = grid2op.make("l2rpn_case14_sandbox") # and now you can sample from the action space - random_action = env.action_space() + random_action = env.action_space() # this action is not random at all, it starts by "do nothing" for i in range(5): # my resulting action will be a complex action # that will be the results of applying 5 random actions @@ -322,22 +345,22 @@ def sample(self): # this sampling rnd_type = self.space_prng.choice(rnd_types) - - if rnd_type == self.SET_STATUS_ID: + cls = type(self) + if rnd_type == cls.SET_STATUS_ID: rnd_update = self._sample_set_line_status() - elif rnd_type == self.CHANGE_STATUS_ID: + elif rnd_type == cls.CHANGE_STATUS_ID: rnd_update = self._sample_change_line_status() - elif rnd_type == self.SET_BUS_ID: + elif rnd_type == cls.SET_BUS_ID: rnd_update = self._sample_set_bus() - elif rnd_type == self.CHANGE_BUS_ID: + elif rnd_type == cls.CHANGE_BUS_ID: rnd_update = self._sample_change_bus() - elif rnd_type == self.REDISPATCHING_ID: + elif rnd_type == cls.REDISPATCHING_ID: rnd_update = self._sample_redispatch() - elif rnd_type == self.STORAGE_POWER_ID: + elif rnd_type == cls.STORAGE_POWER_ID: rnd_update = self._sample_storage_power() - elif rnd_type == self.RAISE_ALARM_ID: + elif rnd_type == cls.RAISE_ALARM_ID: rnd_update = self._sample_raise_alarm() - elif rnd_type == self.RAISE_ALERT_ID: + elif rnd_type == cls.RAISE_ALERT_ID: rnd_update = self._sample_raise_alert() else: raise Grid2OpException( @@ -347,7 +370,10 @@ def sample(self): rnd_act.update(rnd_update) return rnd_act - def disconnect_powerline(self, line_id=None, line_name=None, previous_action=None): + def disconnect_powerline(self, + line_id: int=None, + line_name: str=None, + previous_action: BaseAction=None) -> BaseAction: """ Utilities to disconnect a powerline more easily. @@ -396,6 +422,7 @@ def disconnect_powerline(self, line_id=None, line_name=None, previous_action=Non # after the last call! """ + cls = type(self) if line_id is None and line_name is None: raise AmbiguousAction( 'You need to provide either the "line_id" or the "line_name" of the powerline ' @@ -408,11 +435,11 @@ def disconnect_powerline(self, line_id=None, line_name=None, previous_action=Non ) if line_id is None: - line_id = np.where(self.name_line == line_name)[0] + line_id = (cls.name_line == line_name).nonzero()[0] if not len(line_id): raise AmbiguousAction( 'Line with name "{}" is not on the grid. The powerlines names are:\n{}' - "".format(line_name, self.name_line) + "".format(line_name, cls.name_line) ) if previous_action is None: res = self.actionClass() @@ -422,17 +449,22 @@ def disconnect_powerline(self, line_id=None, line_name=None, previous_action=Non type(self).ERR_MSG_WRONG_TYPE.format(type(previous_action), self.actionClass) ) res = previous_action - if line_id > self.n_line: + if line_id > cls.n_line: raise AmbiguousAction( "You asked to disconnect powerline of id {} but this id does not exist. The " - "grid counts only {} powerline".format(line_id, self.n_line) + "grid counts only {} powerline".format(line_id, cls.n_line) ) res.update({"set_line_status": [(line_id, -1)]}) return res def reconnect_powerline( - self, bus_or, bus_ex, line_id=None, line_name=None, previous_action=None - ): + self, + bus_or: int, + bus_ex: int, + line_id: int=None, + line_name: str=None, + previous_action: BaseAction=None + ) -> BaseAction: """ Utilities to reconnect a powerline more easily. @@ -457,10 +489,10 @@ def reconnect_powerline( The powerline to be disconnected. bus_or: ``int`` - On which bus to reconnect the powerline at its origin end + On which bus to reconnect the powerline at its origin side bus_ex: ``int`` - On which bus to reconnect the powerline at its extremity end + On which bus to reconnect the powerline at its extremity side previous_action Returns @@ -503,19 +535,19 @@ def reconnect_powerline( 'You need to provide only of the "line_id" or the "line_name" of the powerline ' "you want to reconnect" ) - + cls = type(self) if line_id is None: - line_id = np.where(self.name_line == line_name)[0] + line_id = (cls.name_line == line_name).nonzero()[0] if previous_action is None: res = self.actionClass() else: if not isinstance(previous_action, self.actionClass): raise AmbiguousAction( - type(self).ERR_MSG_WRONG_TYPE.format(type(previous_action), self.actionClass) + cls.ERR_MSG_WRONG_TYPE.format(type(previous_action), self.actionClass) ) res = previous_action - if line_id > self.n_line: + if line_id > cls.n_line: raise AmbiguousAction( "You asked to disconnect powerline of id {} but this id does not exist. The " "grid counts only {} powerline".format(line_id, self.n_line) @@ -533,12 +565,12 @@ def reconnect_powerline( def change_bus( self, - name_element, - extremity=None, - substation=None, - type_element=None, - previous_action=None, - ): + name_element : str, + extremity : Literal["or", "ex"] =None, + substation: int=None, + type_element :str=None, + previous_action: BaseAction=None, + ) -> BaseAction: """ Utilities to change the bus of a single element if you give its name. **NB** Changing a bus has the effect to assign the object to bus 1 if it was before that connected to bus 2, and to assign it to bus 2 if it was @@ -557,7 +589,7 @@ def change_bus( Its substation ID, if you know it will increase the performance. Otherwise, the method will search for it. type_element: ``str``, optional Type of the element to look for. It is here to speed up the computation. One of "line", "gen" or "load" - previous_action: :class:`Action`, optional + previous_action: :class:`BaseAction`, optional The (optional) action to update. It should be of the same type as :attr:`ActionSpace.actionClass` Notes @@ -622,15 +654,16 @@ def change_bus( res.update({"change_bus": {"substations_id": [(my_sub_id, arr_)]}}) return res - def _extract_database_powerline(self, extremity): + @classmethod + def _extract_database_powerline(cls, extremity: Literal["or", "ex"]): if extremity[:2] == "or": - to_subid = self.line_or_to_subid - to_sub_pos = self.line_or_to_sub_pos - to_name = self.name_line + to_subid = cls.line_or_to_subid + to_sub_pos = cls.line_or_to_sub_pos + to_name = cls.name_line elif extremity[:2] == "ex": - to_subid = self.line_ex_to_subid - to_sub_pos = self.line_ex_to_sub_pos - to_name = self.name_line + to_subid = cls.line_ex_to_subid + to_sub_pos = cls.line_ex_to_sub_pos + to_name = cls.name_line elif extremity is None: raise Grid2OpException( "It is mandatory to know on which ends you want to change the bus of the powerline" @@ -653,18 +686,18 @@ def _extract_dict_action( to_subid = None to_sub_pos = None to_name = None - + cls = type(self) if type_element is None: # i have to look through all the objects to find it - if name_element in self.name_load: - to_subid = self.load_to_subid - to_sub_pos = self.load_to_sub_pos - to_name = self.name_load - elif name_element in self.name_gen: - to_subid = self.gen_to_subid - to_sub_pos = self.gen_to_sub_pos - to_name = self.name_gen - elif name_element in self.name_line: + if name_element in cls.name_load: + to_subid = cls.load_to_subid + to_sub_pos = cls.load_to_sub_pos + to_name = cls.name_load + elif name_element in cls.name_gen: + to_subid = cls.gen_to_subid + to_sub_pos = cls.gen_to_sub_pos + to_name = cls.name_gen + elif name_element in cls.name_line: to_subid, to_sub_pos, to_name = self._extract_database_powerline( extremity ) @@ -675,13 +708,13 @@ def _extract_dict_action( elif type_element == "line": to_subid, to_sub_pos, to_name = self._extract_database_powerline(extremity) elif type_element[:3] == "gen" or type_element[:4] == "prod": - to_subid = self.gen_to_subid - to_sub_pos = self.gen_to_sub_pos - to_name = self.name_gen + to_subid = cls.gen_to_subid + to_sub_pos = cls.gen_to_sub_pos + to_name = cls.name_gen elif type_element == "load": - to_subid = self.load_to_subid - to_sub_pos = self.load_to_sub_pos - to_name = self.name_load + to_subid = cls.load_to_subid + to_sub_pos = cls.load_to_sub_pos + to_name = cls.name_load else: raise AmbiguousAction( 'unknown type_element specifier "{}". type_element should be "line" or "load" ' @@ -704,13 +737,13 @@ def _extract_dict_action( def set_bus( self, - name_element, - new_bus, - extremity=None, - substation=None, - type_element=None, - previous_action=None, - ): + name_element :str, + new_bus :int, + extremity: Literal["or", "ex"]=None, + substation: int=None, + type_element: int=None, + previous_action: BaseAction=None, + ) -> BaseAction: """ Utilities to set the bus of a single element if you give its name. **NB** Setting a bus has the effect to assign the object to this bus. If it was before that connected to bus 1, and you assign it to bus 1 (*new_bus* @@ -737,7 +770,7 @@ def set_bus( type_element: ``str``, optional Type of the element to look for. It is here to speed up the computation. One of "line", "gen" or "load" - previous_action: :class:`Action`, optional + previous_action: :class:`BaseAction`, optional The (optional) action to update. It should be of the same type as :attr:`ActionSpace.actionClass` Returns @@ -791,7 +824,7 @@ def set_bus( res.update({"set_bus": {"substations_id": [(my_sub_id, dict_["set_bus"])]}}) return res - def get_set_line_status_vect(self): + def get_set_line_status_vect(self) -> np.ndarray: """ Computes and returns a vector that can be used in the "set_status" keyword if building an :class:`BaseAction` @@ -803,7 +836,7 @@ def get_set_line_status_vect(self): """ return self._template_act.get_set_line_status_vect() - def get_change_line_status_vect(self): + def get_change_line_status_vect(self) -> np.ndarray: """ Computes and return a vector that can be used in the "change_line_status" keyword if building an :class:`BaseAction` @@ -816,11 +849,12 @@ def get_change_line_status_vect(self): return self._template_act.get_change_line_status_vect() @staticmethod - def get_all_unitary_line_set(action_space): + def get_all_unitary_line_set(action_space: Self) -> List[BaseAction]: """ Return all unitary actions that "set" powerline status. - For each powerline, there are 5 such actions: + For each powerline, if there are 2 busbars per substation, + there are 5 such actions: - disconnect it - connected it origin at bus 1 and extremity at bus 1 @@ -828,9 +862,18 @@ def get_all_unitary_line_set(action_space): - connected it origin at bus 2 and extremity at bus 1 - connected it origin at bus 2 and extremity at bus 2 + This number increases quite rapidly if there are more busbars + allowed per substation of course. For example if you allow + for 3 busbars per substations, it goes from (1 + 2*2) [=5] + to (1 + 3 * 3) [=10] and if you allow for 4 busbars per substations + you end up with (1 + 4 * 4) [=17] possible actions per powerline. + + .. seealso:: + :func:`SerializableActionSpace.get_all_unitary_line_set_simple` + Parameters ---------- - action_space: :class:`grid2op.BaseAction.ActionSpace` + action_space: :class:`ActionSpace` The action space used. Returns @@ -840,24 +883,23 @@ def get_all_unitary_line_set(action_space): """ res = [] - + cls = type(action_space) # powerline switch: disconnection - for i in range(action_space.n_line): - res.append(action_space.disconnect_powerline(line_id=i)) - - # powerline switch: reconnection - for bus_or in [1, 2]: - for bus_ex in [1, 2]: - for i in range(action_space.n_line): - act = action_space.reconnect_powerline( - line_id=i, bus_ex=bus_ex, bus_or=bus_or - ) - res.append(act) + for i in range(cls.n_line): + res.append(action_space.disconnect_powerline(line_id=i)) + + all_busbars = list(range(1, cls.n_busbar_per_sub + 1)) + for bus1, bus2 in itertools.product(all_busbars, all_busbars): + for i in range(cls.n_line): + act = action_space.reconnect_powerline( + line_id=i, bus_ex=bus1, bus_or=bus2 + ) + res.append(act) return res @staticmethod - def get_all_unitary_line_set_simple(action_space): + def get_all_unitary_line_set_simple(action_space: Self) -> List[BaseAction]: """ Return all unitary actions that "set" powerline status but in a more simple way than :func:`SerializableActionSpace.get_all_unitary_line_set` @@ -869,12 +911,19 @@ def get_all_unitary_line_set_simple(action_space): side used to be connected) It has the main advantages to "only" add 2 actions per powerline - instead of 5. + instead of 5 (if the number of busbars per substation is 2). + + Using this method, powerlines will always be reconnected to their + previous busbars (the last known one) and you will always get + exactly 2 actions per powerlines. + + .. seealso:: + :func:`SerializableActionSpace.get_all_unitary_line_set` Parameters ---------- - action_space: :class:`grid2op.BaseAction.ActionSpace` + action_space: :class:`ActionSpace` The action space used. Returns @@ -884,32 +933,33 @@ def get_all_unitary_line_set_simple(action_space): """ res = [] - + cls = type(action_space) # powerline set: disconnection - for i in range(action_space.n_line): + for i in range(cls.n_line): res.append(action_space({"set_line_status": [(i,-1)]})) # powerline set: reconnection - for i in range(action_space.n_line): + for i in range(cls.n_line): res.append(action_space({"set_line_status": [(i, +1)]})) return res @staticmethod - def get_all_unitary_alarm(action_space): + def get_all_unitary_alarm(action_space: Self) -> List[BaseAction]: """ .. warning:: /!\\\\ Only valid with "l2rpn_icaps_2021" environment /!\\\\ """ + cls = type(action_space) res = [] - for i in range(action_space.dim_alarms): - status = np.full(action_space.dim_alarms, fill_value=False, dtype=dt_bool) + for i in range(cls.dim_alarms): + status = np.full(cls.dim_alarms, fill_value=False, dtype=dt_bool) status[i] = True res.append(action_space({"raise_alarm": status})) return res @staticmethod - def get_all_unitary_alert(action_space): + def get_all_unitary_alert(action_space: Self) -> List[BaseAction]: """ Return all unitary actions that raise an alert on powerlines. @@ -918,15 +968,16 @@ def get_all_unitary_alert(action_space): If you got 22 attackable lines, then you got 2**22 actions... probably a TERRIBLE IDEA ! """ + cls = type(action_space) res = [] possible_values = [False, True] - if action_space.dim_alerts: - for status in itertools.product(possible_values, repeat=type(action_space).dim_alerts): + if cls.dim_alerts: + for status in itertools.product(possible_values, repeat=cls.dim_alerts): res.append(action_space({"raise_alert": np.array(status, dtype=dt_bool)})) return res @staticmethod - def get_all_unitary_line_change(action_space): + def get_all_unitary_line_change(action_space: Self) -> List[BaseAction]: """ Return all unitary actions that "change" powerline status. @@ -934,7 +985,7 @@ def get_all_unitary_line_change(action_space): Parameters ---------- - action_space: :class:`grid2op.BaseAction.ActionSpace` + action_space: :class:`ActionSpace` The action space used. Returns @@ -943,15 +994,16 @@ def get_all_unitary_line_change(action_space): The list of all "change" action acting on powerline status """ + cls = type(action_space) res = [] - for i in range(action_space.n_line): + for i in range(cls.n_line): status = action_space.get_change_line_status_vect() status[i] = True res.append(action_space({"change_line_status": status})) return res @staticmethod - def get_all_unitary_topologies_change(action_space, sub_id=None): + def get_all_unitary_topologies_change(action_space: Self, sub_id : int=None) -> List[BaseAction]: """ This methods allows to compute and return all the unitary topological changes that can be performed on a powergrid. @@ -960,7 +1012,7 @@ def get_all_unitary_topologies_change(action_space, sub_id=None): Parameters ---------- - action_space: :class:`grid2op.BaseAction.ActionSpace` + action_space: :class:`ActionSpace` The action space used. sub_id: ``int``, optional @@ -991,9 +1043,14 @@ def get_all_unitary_topologies_change(action_space, sub_id=None): all_change_actions_sub4 = env.action_space.get_all_unitary_topologies_change(env.action_space, sub_id=4) """ + cls = type(action_space) + if cls.n_busbar_per_sub == 1 or cls.n_busbar_per_sub >= 3: + raise Grid2OpException("Impossible to use `change_bus` action type " + "if your grid does not have exactly 2 busbars " + "per substation") res = [] S = [0, 1] - for sub_id_, num_el in enumerate(action_space.sub_info): + for sub_id_, num_el in enumerate(cls.sub_info): if sub_id is not None: if sub_id_ != sub_id: continue @@ -1020,8 +1077,114 @@ def get_all_unitary_topologies_change(action_space, sub_id=None): # a substation, changing A,B or changing C,D always has the same effect. return res + @classmethod + def _is_ok_symmetry(cls, n_busbar_per_sub: int, tup: np.ndarray, bus_start: int=2, id_start: int=1) -> bool: + # id_start: at which index to start in the `tup` vector + # bus_start: which maximum bus id should be present there + # tup: the topology vector + if id_start >= len(tup): + # i reached the end of the tuple + return True + if bus_start >= n_busbar_per_sub: + # all previous buses are filled + return True + + this_bus = tup[id_start] + if this_bus < bus_start: + # this bus id is already assigned + # go to next id, + return cls._is_ok_symmetry(n_busbar_per_sub, tup, bus_start, id_start + 1) + else: + if this_bus == bus_start: + # This is a new bus and it has the correct id + # so I go to next + return cls._is_ok_symmetry(n_busbar_per_sub, tup, bus_start + 1, id_start + 1) + else: + # by symmetry the "current" bus should be relabeled `bus_start` + # which is alreay added somewhere else. The current topologie + # is not valid. + return False + + @classmethod + def _is_ok_line(cls, n_busbar_per_sub: int, tup: np.ndarray, lines_id: np.ndarray) -> bool: + """check there are at least a line connected to each buses""" + # now, this is the "smart" thing: + # as the bus should be labelled "in order" (no way we can add + # bus 3 if bus 2 is not already set in `tup` because of the + # `_is_ok_symmetry` function), I know for a fact that there is + # `tup.max()` active buses in this topology. + # So to make sure that every buses has at least a line connected to it + # then I just check the number of unique buses (tup.max()) + # and compare it to the number of buses where there are + # at least a line len(buses_with_lines) + + # NB the alternative implementation is slower + # >>> buses_with_lines = np.unique(tup[lines_id]) + # >>> return buses_with_lines.size == tup.max() + nb = 0 + only_line = tup[lines_id] + for el in range(1, n_busbar_per_sub +1): + nb += (only_line == el).any() + return nb == tup.max() + + @classmethod + def _is_ok_2(cls, n_busbar_per_sub : int, tup) -> bool: + """check there are at least 2 elements per busbars""" + # now, this is the "smart" thing: + # as the bus should be labelled "in order" (no way we can add + # bus 3 if bus 2 is not already set in `tup` because of the + # `_is_ok_symmetry` function), I know for a fact that there is + # `tup.max()` active buses in this topology. + # So to make sure that every buses has at least a line connected to it + # then I just check the number of unique buses (tup.max()) + # and compare it to the number of buses where there are + # at least a line len(buses_with_lines) + + + # NB the alternative implementation is slower + # >>> un_, count = np.unique(tup, return_counts=True) + # >>> return (count >= 2).all() + for el in range(1, tup.max() + 1): + if (tup == el).sum() < 2: + return False + return True + @staticmethod - def get_all_unitary_topologies_set(action_space, sub_id=None): + def _aux_get_all_unitary_topologies_set_comp_topo(busbar_set, num_el, action_space, + cls, powerlines_id, add_alone_line, + _count_only, sub_id_): + if not _count_only: + tmp = [] + else: + tmp = 0 + + for tup in itertools.product(busbar_set, repeat=num_el - 1): + tup = np.array((1, *tup)) # force first el on bus 1 to break symmetry + + if not action_space._is_ok_symmetry(cls.n_busbar_per_sub, tup): + # already added (by symmetry) + continue + if not action_space._is_ok_line(cls.n_busbar_per_sub, tup, powerlines_id): + # check there is at least one line per busbars + continue + if not add_alone_line and not action_space._is_ok_2(cls.n_busbar_per_sub, tup): + # check there are at least 2 elements per buses + continue + + if not _count_only: + action = action_space( + {"set_bus": {"substations_id": [(sub_id_, tup)]}} + ) + tmp.append(action) + else: + tmp += 1 + return tmp + + @staticmethod + def get_all_unitary_topologies_set(action_space: Self, + sub_id: int=None, + add_alone_line=True, + _count_only=False) -> List[BaseAction]: """ This methods allows to compute and return all the unitary topological changes that can be performed on a powergrid. @@ -1029,14 +1192,60 @@ def get_all_unitary_topologies_set(action_space, sub_id=None): The changes will be performed using the "set_bus" method. The "do nothing" action will be counted once per substation in the grid. + It returns all the "valid" topologies available at any substation (if `sub_id` is ``None`` -default) + or at the requested substation. + + To be valid a topology must satisfy: + + - there are at least one side of the powerline connected to each busbar (there cannot be a load alone + on a bus or a generator alone on a bus for example) + - if `add_alone_line=False` (not the default) then there must be at least two elements in a + substation + + .. note:: + We try to make the result of this function as small as possible. This means that if at any + substation the number of "valid" topology is only 1, it is ignored and will not be added + in the result. + + This imply that when `env.n_busbar_per_sub=1` then this function returns the empty list. + + .. note:: + If `add_alone_line` is True (again NOT the default) then if any substation counts less than + 3 elements or less then no action will be added for this substation. + + If there are 4 or 5 elements at a substation (and add_alone_line=False), then only topologies + using 2 busbar will be used. + + .. warning:: + This generates only topologies were all elements are connected. It does not generate + topologies with disconnected lines. + + .. warning:: + As far as we know, there are no bugs in this implementation. However we did not spend + lots of time finding a "closed form" formula to count exactly the number of possible topologies. + This means that we might have missed some topologies or counted the same "results" multiple + times if there has been an error in the symmetries. + + If you are interested in this topic, let us know with a discussion, for example here + https://github.com/rte-france/Grid2Op/discussions + Parameters ---------- - action_space: :class:`grid2op.BaseAction.ActionHelper` + action_space: :class:`ActionSpace` The action space used. sub_id: ``int``, optional The substation ID. If ``None`` it is done for all substations. + add_alone_line: ``bool``, optional + If ``True`` (default) then topologiees where 1 line side is "alone" on a bus + are valid and put in the output (more topologies are considered). If not + then only topologies with at least one line AND 2 elements per buses + are returned. + + _count_only: ``bool``, optional + Does not return the list but rather only the number of elements there would be + Notes ----- This might take a long time on large grid (possibly 10-15 mins for the IEEE 118 for example) @@ -1062,80 +1271,50 @@ def get_all_unitary_topologies_set(action_space, sub_id=None): all_change_actions_sub4 = env.action_space.get_all_unitary_topologies_set(env.action_space, sub_id=4) """ + cls = type(action_space) + if cls.n_busbar_per_sub == 1: + return [] + res = [] - S = [0, 1] - for sub_id_, num_el in enumerate(action_space.sub_info): - tmp = [] - if sub_id is not None: - if sub_id_ != sub_id: - continue - - new_topo = np.full(shape=num_el, fill_value=1, dtype=dt_int) - # perform the action "set everything on bus 1" - action = action_space( - {"set_bus": {"substations_id": [(sub_id_, new_topo)]}} - ) - tmp.append(action) - - powerlines_or_id = action_space.line_or_to_sub_pos[ - action_space.line_or_to_subid == sub_id_ - ] - powerlines_ex_id = action_space.line_ex_to_sub_pos[ - action_space.line_ex_to_subid == sub_id_ - ] - powerlines_id = np.concatenate((powerlines_or_id, powerlines_ex_id)) - + S = list(range(1, cls.n_busbar_per_sub + 1)) + if sub_id is not None: + num_el = cls.sub_info[sub_id] + powerlines_id = cls.get_powerline_id(sub_id) + # computes all the topologies at 2 buses for this substation - for tup in itertools.product(S, repeat=num_el - 1): - indx = np.full(shape=num_el, fill_value=False, dtype=dt_bool) - tup = np.array((0, *tup)).astype( - dt_bool - ) # add a zero to first element -> break symmetry - indx[tup] = True - if indx.sum() >= 2 and (~indx).sum() >= 2: - # i need 2 elements on each bus at least (almost all the times, except when a powerline - # is alone on its bus) - new_topo = np.full(shape=num_el, fill_value=1, dtype=dt_int) - new_topo[~indx] = 2 - - if ( - indx[powerlines_id].sum() == 0 - or (~indx[powerlines_id]).sum() == 0 - ): - # if there is a "node" without a powerline, the topology is not valid - continue - - action = action_space( - {"set_bus": {"substations_id": [(sub_id_, new_topo)]}} - ) - tmp.append(action) - else: - # i need to take into account the case where 1 powerline is alone on a bus too - if ( - (indx[powerlines_id]).sum() >= 1 - and (~indx[powerlines_id]).sum() >= 1 - ): - new_topo = np.full(shape=num_el, fill_value=1, dtype=dt_int) - new_topo[~indx] = 2 - action = action_space( - {"set_bus": {"substations_id": [(sub_id_, new_topo)]}} - ) - tmp.append(action) + tmp = action_space._aux_get_all_unitary_topologies_set_comp_topo(S, num_el, action_space, + cls, powerlines_id, add_alone_line, + _count_only, sub_id) - if len(tmp) >= 2: + if not _count_only and len(tmp) >= 2: # if i have only one single topology on this substation, it doesn't make any action - # i cannot change the topology is there is only one. + # i cannot change the topology if there is only one. res += tmp - + elif _count_only: + if tmp >= 2: + res = tmp + else: + # no real way to change if there is only one valid topology + res = 0 + if not _count_only: + return res + return [res] # need to be a list still + + for sub_id in range(cls.n_sub): + this = cls.get_all_unitary_topologies_set(action_space, + sub_id, + add_alone_line, + _count_only) + res += this return res @staticmethod def get_all_unitary_redispatch( action_space, num_down=5, num_up=5, max_ratio_value=1.0 - ): + ) -> List[BaseAction]: """ Redispatching action are continuous action. This method is an helper to convert the continuous - action into discrete action (by rounding). + action into "discrete actions" (by rounding). The number of actions is equal to num_down + num_up (by default 10) per dispatchable generator. @@ -1146,10 +1325,14 @@ def get_all_unitary_redispatch( a distinct action (then counting `num_down` different action, because 0.0 is removed) - it will do the same for [0, gen_maw_ramp_up] + .. note:: + With this "helper" only one generator is affected by one action. For example + there are no action acting on both generator 1 and generator 2 at the same + time. Parameters ---------- - action_space: :class:`grid2op.BaseAction.ActionHelper` + action_space: :class:`ActionSpace` The action space used. num_down: ``int`` @@ -1204,7 +1387,7 @@ def get_all_unitary_redispatch( return res @staticmethod - def get_all_unitary_curtail(action_space, num_bin=10, min_value=0.5): + def get_all_unitary_curtail(action_space : Self, num_bin: int=10, min_value: float=0.5) -> List[BaseAction]: """ Curtailment action are continuous action. This method is an helper to convert the continuous action into discrete action (by rounding). @@ -1218,17 +1401,21 @@ def get_all_unitary_curtail(action_space, num_bin=10, min_value=0.5): - it will divide the interval [0, 1] into `num_bin`, each will make a distinct action (then counting `num_bin` different action, because 0.0 is removed) + .. note:: + With this "helper" only one generator is affected by one action. For example + there are no action acting on both generator 1 and generator 2 at the same + time. Parameters ---------- - action_space: :class:`grid2op.BaseAction.ActionHelper` + action_space: :class:`ActionSpace` The action space used. num_bin: ``int`` Number of actions for each renewable generator min_value: ``float`` - Between 0. and 1.: minimum value allow for the curtailment. FOr example if you set this + Between 0. and 1.: minimum value allow for the curtailment. For example if you set this value to be 0.2 then no curtailment will be done to limit the generator below 20% of its maximum capacity Returns @@ -1255,7 +1442,7 @@ def get_all_unitary_curtail(action_space, num_bin=10, min_value=0.5): return res @staticmethod - def get_all_unitary_storage(action_space, num_down=5, num_up=5): + def get_all_unitary_storage(action_space: Self, num_down: int =5, num_up: int=5) -> List[BaseAction]: """ Storage action are continuous action. This method is an helper to convert the continuous action into discrete action (by rounding). @@ -1269,10 +1456,15 @@ def get_all_unitary_storage(action_space, num_down=5, num_up=5): a distinct action (then counting `num_down` different action, because 0.0 is removed) - it will do the same for [0, storage_max_p_absorb] + .. note:: + With this "helper" only one storage unit is affected by one action. For example + there are no action acting on both storage unit 1 and storage unit 2 at the same + time. + Parameters ---------- - action_space: :class:`grid2op.BaseAction.ActionHelper` + action_space: :class:`ActionSpace` The action space used. Returns @@ -1316,7 +1508,7 @@ def _custom_deepcopy_for_copy(self, new_obj): new_obj._template_act = self.actionClass() def _aux_get_back_to_ref_state_curtail(self, res, obs): - is_curtailed = obs.curtailment_limit != 1.0 + is_curtailed = np.abs(obs.curtailment_limit - 1.0) >= 1e-7 if is_curtailed.any(): res["curtailment"] = [] if not self.supports_type("curtail"): @@ -1334,7 +1526,7 @@ def _aux_get_back_to_ref_state_curtail(self, res, obs): def _aux_get_back_to_ref_state_line(self, res, obs): disc_lines = ~obs.line_status if disc_lines.any(): - li_disc = np.where(disc_lines)[0] + li_disc = (disc_lines).nonzero()[0] res["powerline"] = [] for el in li_disc: act = self.actionClass() @@ -1376,9 +1568,9 @@ def _aux_get_back_to_ref_state_sub(self, res, obs): def _aux_get_back_to_ref_state_redisp(self, res, obs, precision=1e-5): # TODO this is ugly, probably slow and could definitely be optimized - notredisp_setpoint = obs.target_dispatch != 0.0 + notredisp_setpoint = np.abs(obs.target_dispatch) >= 1e-7 if notredisp_setpoint.any(): - need_redisp = np.where(notredisp_setpoint)[0] + need_redisp = (notredisp_setpoint).nonzero()[0] res["redispatching"] = [] # combine generators and do not exceed ramps (up or down) rem = np.zeros(self.n_gen, dtype=dt_float) @@ -1417,14 +1609,14 @@ def _aux_get_back_to_ref_state_redisp(self, res, obs, precision=1e-5): continue if obs.target_dispatch[gen_id] > 0.0: if nb_act < nb_[gen_id] - 1 or ( - rem[gen_id] == 0.0 and nb_act == nb_[gen_id] - 1 + np.abs(rem[gen_id]) <= 1e-7 and nb_act == nb_[gen_id] - 1 ): reds[gen_id] = -obs.gen_max_ramp_down[gen_id] else: reds[gen_id] = -rem[gen_id] else: if nb_act < nb_[gen_id] - 1 or ( - rem[gen_id] == 0.0 and nb_act == nb_[gen_id] - 1 + np.abs(rem[gen_id]) <= 1e-7 and nb_act == nb_[gen_id] - 1 ): reds[gen_id] = obs.gen_max_ramp_up[gen_id] else: @@ -1443,7 +1635,7 @@ def _aux_get_back_to_ref_state_storage( notredisp_setpoint = obs.storage_charge / obs.storage_Emax != storage_setpoint delta_time_hour = dt_float(obs.delta_time / 60.0) if notredisp_setpoint.any(): - need_ajust = np.where(notredisp_setpoint)[0] + need_ajust = (notredisp_setpoint).nonzero()[0] res["storage"] = [] # combine storage units and do not exceed maximum power rem = np.zeros(self.n_storage, dtype=dt_float) @@ -1488,14 +1680,14 @@ def _aux_get_back_to_ref_state_storage( continue if current_state[stor_id] > 0.0: if nb_act < nb_[stor_id] - 1 or ( - rem[stor_id] == 0.0 and nb_act == nb_[stor_id] - 1 + np.abs(rem[stor_id]) <= 1e-7 and nb_act == nb_[stor_id] - 1 ): reds[stor_id] = -obs.storage_max_p_prod[stor_id] else: reds[stor_id] = -rem[stor_id] else: if nb_act < nb_[stor_id] - 1 or ( - rem[stor_id] == 0.0 and nb_act == nb_[stor_id] - 1 + np.abs(rem[stor_id]) <= 1e-7 and nb_act == nb_[stor_id] - 1 ): reds[stor_id] = obs.storage_max_p_absorb[stor_id] else: @@ -1509,9 +1701,14 @@ def _aux_get_back_to_ref_state_storage( def get_back_to_ref_state( self, obs: "grid2op.Observation.BaseObservation", - storage_setpoint=0.5, - precision=5, - ) -> Dict[str, List[BaseAction]]: + storage_setpoint: float=0.5, + precision: int=5, + ) -> Dict[Literal["powerline", + "substation", + "redispatching", + "storage", + "curtailment"], + List[BaseAction]]: """ This function returns the list of unary actions that you can perform in order to get back to the "fully meshed" / "initial" topology. @@ -1525,8 +1722,8 @@ def get_back_to_ref_state( - an action that acts on a single powerline - an action on a single substation - - a redispatching action - - a storage action + - a redispatching action (acting possibly on all generators) + - a storage action (acting possibly on all generators) The list might be relatively long, in the case where lots of actions are needed. Depending on the rules of the game (for example limiting the action on one single substation), in order to get back to this topology, multiple consecutive actions will need to be implemented. @@ -1536,7 +1733,7 @@ def get_back_to_ref_state( - "powerline" for the list of actions needed to set back the powerlines in a proper state (connected). They can be of type "change_line" or "set_line". - "substation" for the list of actions needed to set back each substation in its initial state (everything connected to bus 1). They can be implemented as "set_bus" or "change_bus" - - "redispatching": for the redispatching action (there can be multiple redispatching actions needed because of the ramps of the generator) + - "redispatching": for the redispatching actions (there can be multiple redispatching actions needed because of the ramps of the generator) - "storage": for action on storage units (you might need to perform multiple storage actions because of the maximum power these units can absorb / produce ) - "curtailment": for curtailment action (usually at most one such action is needed) @@ -1564,7 +1761,22 @@ def get_back_to_ref_state( Examples -------- - TODO + You can use it like this: + + .. code-block:: python + + import grid2op + + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + obs = env.reset(seed=1) + + # perform a random action + obs, reward, done, info = env.step(env.action_space.sample()) + assert not done # you might end up in a "done" state depending on the random action + + acts = obs.get_back_to_ref_state() + print(acts) """ from grid2op.Observation.baseObservation import BaseObservation @@ -1574,7 +1786,6 @@ def get_back_to_ref_state( "You need to provide a grid2op Observation for this function to work correctly." ) res = {} - # powerline actions self._aux_get_back_to_ref_state_line(res, obs) # substations diff --git a/grid2op/Action/topologyAction.py b/grid2op/Action/topologyAction.py index 4fadb649d..5f3c9617a 100644 --- a/grid2op/Action/topologyAction.py +++ b/grid2op/Action/topologyAction.py @@ -6,6 +6,8 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal + from grid2op.Action.playableAction import PlayableAction @@ -36,5 +38,5 @@ class TopologyAction(PlayableAction): attr_list_set = set(attr_list_vect) - def __init__(self): - super().__init__() + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + super().__init__(_names_chronics_to_backend) diff --git a/grid2op/Action/topologyAndDispatchAction.py b/grid2op/Action/topologyAndDispatchAction.py index b85443724..66cf93cea 100644 --- a/grid2op/Action/topologyAndDispatchAction.py +++ b/grid2op/Action/topologyAndDispatchAction.py @@ -6,6 +6,8 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal + from grid2op.Action.playableAction import PlayableAction @@ -38,5 +40,5 @@ class TopologyAndDispatchAction(PlayableAction): attr_list_set = set(attr_list_vect) - def __init__(self): - super().__init__() + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + super().__init__(_names_chronics_to_backend) diff --git a/grid2op/Action/topologyChangeAction.py b/grid2op/Action/topologyChangeAction.py index c8ede25a2..70423e23c 100644 --- a/grid2op/Action/topologyChangeAction.py +++ b/grid2op/Action/topologyChangeAction.py @@ -6,6 +6,8 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal + from grid2op.Action.playableAction import PlayableAction @@ -24,5 +26,5 @@ class TopologyChangeAction(PlayableAction): attr_list_vect = ["_change_bus_vect", "_switch_line_status"] attr_list_set = set(attr_list_vect) - def __init__(self): - super().__init__() + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + super().__init__(_names_chronics_to_backend) diff --git a/grid2op/Action/topologyChangeAndDispatchAction.py b/grid2op/Action/topologyChangeAndDispatchAction.py index 11947f262..4e719375f 100644 --- a/grid2op/Action/topologyChangeAndDispatchAction.py +++ b/grid2op/Action/topologyChangeAndDispatchAction.py @@ -6,6 +6,8 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal + from grid2op.Action.playableAction import PlayableAction @@ -30,5 +32,5 @@ class TopologyChangeAndDispatchAction(PlayableAction): attr_list_set = set(attr_list_vect) - def __init__(self): - super().__init__() + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + super().__init__(_names_chronics_to_backend) diff --git a/grid2op/Action/topologySetAction.py b/grid2op/Action/topologySetAction.py index 204109694..2e58867df 100644 --- a/grid2op/Action/topologySetAction.py +++ b/grid2op/Action/topologySetAction.py @@ -6,6 +6,8 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal + from grid2op.Action.playableAction import PlayableAction @@ -25,5 +27,5 @@ class TopologySetAction(PlayableAction): attr_list_set = set(attr_list_vect) - def __init__(self): - super().__init__() + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + super().__init__(_names_chronics_to_backend) diff --git a/grid2op/Action/topologySetAndDispatchAction.py b/grid2op/Action/topologySetAndDispatchAction.py index dee7d797a..85ba10beb 100644 --- a/grid2op/Action/topologySetAndDispatchAction.py +++ b/grid2op/Action/topologySetAndDispatchAction.py @@ -6,6 +6,8 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal + from grid2op.Action.playableAction import PlayableAction @@ -24,5 +26,5 @@ class TopologySetAndDispatchAction(PlayableAction): attr_list_vect = ["_set_line_status", "_set_topo_vect", "_redispatch"] attr_list_set = set(attr_list_vect) - def __init__(self): - super().__init__() + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): + super().__init__(_names_chronics_to_backend) diff --git a/grid2op/Action/voltageOnlyAction.py b/grid2op/Action/voltageOnlyAction.py index 996be38e9..a90eed785 100644 --- a/grid2op/Action/voltageOnlyAction.py +++ b/grid2op/Action/voltageOnlyAction.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Optional, Dict, Literal import warnings from grid2op.Exceptions import AmbiguousAction @@ -32,13 +33,13 @@ class VoltageOnlyAction(BaseAction): _shunt_added = False _first_init = True - def __init__(self): + def __init__(self, _names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None): """ See the definition of :func:`BaseAction.__init__` and of :class:`BaseAction` for more information. Nothing more is done in this constructor. """ - BaseAction.__init__(self) + BaseAction.__init__(self, _names_chronics_to_backend) if VoltageOnlyAction._shunt_added is False and type(self).shunts_data_available: VoltageOnlyAction.attr_list_vect += ["shunt_p", "shunt_q", "shunt_bus"] diff --git a/grid2op/Agent/oneChangeThenNothing.py b/grid2op/Agent/oneChangeThenNothing.py index 49f8e6df8..2eab3f9c9 100644 --- a/grid2op/Agent/oneChangeThenNothing.py +++ b/grid2op/Agent/oneChangeThenNothing.py @@ -6,11 +6,16 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +import warnings from grid2op.Agent.baseAgent import BaseAgent class OneChangeThenNothing(BaseAgent): """ + .. warning:: + As of grid2op 1.10.2, this class has been deprecated. Please use `env.reset(options={"init state": THE_INITIAl_CHANGE})` + instead. + This is a specific kind of BaseAgent. It does an BaseAction (possibly non empty) at the first time step and then does nothing. @@ -25,10 +30,17 @@ class OneChangeThenNothing(BaseAgent): Examples --------- - We advise to use this class as following + + This class is deprecated in favor of the "init state" reset options. Please avoid using it. + + But if you really want to use it... then you can do it with: .. code-block:: python + # This class has been deprecated, please use the env.reset() + # with proper options instead + + # DEPRECATED ! import grid2op from grid2op.Agent import OneChangeThenNothing acts_dict_ = [{}, {"set_line_status": [(0,-1)]}] # list of dictionaries. Each dictionary @@ -38,18 +50,42 @@ class OneChangeThenNothing(BaseAgent): for act_as_dict in zip(acts_dict_): # generate the proper class that will perform the first action (encoded by {}) in acts_dict_ agent_class = OneChangeThenNothing.gen_next(act_as_dict) - + # start a runner with this agent runner = Runner(**env.get_params_for_runner(), agentClass=agent_class) # run 2 episode with it res_2 = runner.run(nb_episode=2) + + Notes + ------ + + After grid2op 1.10.2, this class has been deprecated. A cleaner alternative + to use it is to set the initial state of grid when calling `env.reset` like this: + + .. code-block:: python + + import grid2op + + env = grid2op.make("l2rpn_case14_sandbox") # create an environment + dict_act_json = ... # dict representing an action + obs = env.reset(options={"init state": dict_act_json}) + + This way of doing offers: + + - more flexibility: rules are not checked + - more flexibility: any type of actions acting on anything can be performed + (even if the action would be illegal for the agent) + - less trouble: cooldown are not affected + """ my_dict = {} def __init__(self, action_space): BaseAgent.__init__(self, action_space) + cls = type(self) + warnings.warn(f"Deprecated class, please use `env.reset(options={{'init state': {self.action_space(cls.my_dict).to_json()}, 'method': 'ignore' }})` instead") self.has_changed = False self.do_nothing_action = self.action_space({}) diff --git a/grid2op/Agent/recoPowerLinePerArea.py b/grid2op/Agent/recoPowerLinePerArea.py index bc28584e1..322d04b0f 100644 --- a/grid2op/Agent/recoPowerLinePerArea.py +++ b/grid2op/Agent/recoPowerLinePerArea.py @@ -57,7 +57,7 @@ def act(self, observation: BaseObservation, reward: float, done : bool=False): return self.action_space() area_used = np.full(self.nb_area, fill_value=False, dtype=bool) reco_ids = [] - for l_id in np.where(can_be_reco)[0]: + for l_id in can_be_reco.nonzero()[0]: if not area_used[self.lines_to_area_id[l_id]]: reco_ids.append(l_id) area_used[self.lines_to_area_id[l_id]] = True diff --git a/grid2op/Agent/recoPowerlineAgent.py b/grid2op/Agent/recoPowerlineAgent.py index b4373f9bd..c7462877f 100644 --- a/grid2op/Agent/recoPowerlineAgent.py +++ b/grid2op/Agent/recoPowerlineAgent.py @@ -28,6 +28,6 @@ def _get_tested_action(self, observation): if can_be_reco.any(): res = [ self.action_space({"set_line_status": [(id_, +1)]}) - for id_ in np.where(can_be_reco)[0] + for id_ in (can_be_reco).nonzero()[0] ] return res diff --git a/grid2op/Backend/backend.py b/grid2op/Backend/backend.py index bf291aaf3..5efbb25ff 100644 --- a/grid2op/Backend/backend.py +++ b/grid2op/Backend/backend.py @@ -22,6 +22,7 @@ # python version is probably bellow 3.11 from typing_extensions import Self +import grid2op from grid2op.dtypes import dt_int, dt_float, dt_bool from grid2op.Exceptions import ( EnvError, @@ -33,7 +34,7 @@ DivergingPowerflow, Grid2OpException, ) -from grid2op.Space import GridObjects +from grid2op.Space import GridObjects, DEFAULT_N_BUSBAR_PER_SUB # TODO method to get V and theta at each bus, could be in the same shape as check_kirchoff @@ -66,19 +67,22 @@ class Backend(GridObjects, ABC): All the abstract methods (that need to be implemented for a backend to work properly) are (more information given in the :ref:`create-backend-module` page): - - :func:`Backend.load_grid` - - :func:`Backend.apply_action` - - :func:`Backend.runpf` - - :func:`Backend.get_topo_vect` - - :func:`Backend.generators_info` - - :func:`Backend.loads_info` - - :func:`Backend.lines_or_info` - - :func:`Backend.lines_ex_info` + - :func:`Backend.load_grid` (called once per episode, or if :func:`Backend.reset` is implemented, once for the entire + lifetime of the environment) + - :func:`Backend.apply_action` (called once per episode -initialization- and at least once per step) + - :func:`Backend.runpf` (called once per episode -initialization- and at least once per step) + - :func:`Backend.get_topo_vect` (called once per episode -initialization- and at least once per step) + - :func:`Backend.generators_info` (called once per episode -initialization- and at least once per step) + - :func:`Backend.loads_info` (called once per episode -initialization- and at least once per step) + - :func:`Backend.lines_or_info` (called once per episode -initialization- and at least once per step) + - :func:`Backend.lines_ex_info` (called once per episode -initialization- and at least once per step) And optionally: + - :func:`Backend.reset` will reload the powergrid from the hard drive by default. This is rather slow and we + recommend to overload it. - :func:`Backend.close` (this is mandatory if your backend implementation (`self._grid`) is relying on some - c / c++ code that do not free memory automatically. + c / c++ code that do not free memory automatically.) - :func:`Backend.copy` (not that this is mandatory if your backend implementation (in `self._grid`) cannot be deep copied using the python copy.deepcopy function) [as of grid2op >= 1.7.1 it is no more required. If not implemented, you won't be able to use some of grid2op feature however] @@ -88,8 +92,6 @@ class Backend(GridObjects, ABC): at the "origin" side and just return the "a_or" vector. You want to do something smarter here. - :func:`Backend._disconnect_line`: has a default slow implementation using "apply_action" that might can most likely be optimized in your backend. - - :func:`Backend.reset` will reload the powergrid from the hard drive by default. This is rather slow and we - recommend to overload it. And, if the flag :attr:Backend.shunts_data_available` is set to ``True`` the method :func:`Backend.shunt_info` should also be implemented. @@ -99,12 +101,6 @@ class Backend(GridObjects, ABC): `shunt_to_subid`, `name_shunt` and function `shunt_info` and handle the modification of shunts bus, active value and reactive value in the "apply_action" function). - - In order to be valid and carry out some computations, you should call :func:`Backend.load_grid` and later - :func:`grid2op.Spaces.GridObjects.assert_grid_correct`. It is also more than recommended to call - :func:`Backend.assert_grid_correct_after_powerflow` after the first powerflow. This is all carried ou in the - environment properly. - Attributes ---------- detailed_infos_for_cascading_failures: :class:`bool` @@ -119,9 +115,7 @@ class Backend(GridObjects, ABC): """ IS_BK_CONVERTER : bool = False - - env_name : str = "unknown" - + # action to set me my_bk_act_class : "Optional[grid2op.Action._backendAction._BackendAction]"= None _complete_action_class : "Optional[grid2op.Action.CompleteAction]"= None @@ -171,6 +165,84 @@ def __init__(self, for k, v in kwargs.items(): self._my_kwargs[k] = v + #: .. versionadded:: 1.10.0 + #: + #: A flag to indicate whether the :func:`Backend.cannot_handle_more_than_2_busbar` + #: or the :func:`Backend.cannot_handle_more_than_2_busbar` + #: has been called when :func:`Backend.load_grid` was called. + #: Starting from grid2op 1.10.0 calling either of the above method + #: is a requirement (to + #: ensure backward compatibility) + self._missing_two_busbars_support_info: bool = True + + #: .. versionadded:: 1.10.0 + #: + #: There is a difference between this and the class attribute. + #: You should not worry about the class attribute of the backend + #: in :func:`Backend.apply_action` + self.n_busbar_per_sub: int = DEFAULT_N_BUSBAR_PER_SUB + + def can_handle_more_than_2_busbar(self): + """ + .. versionadded:: 1.10.0 + + This function should be called once in :func:`Backend.load_grid` if your backend is able + to handle more than 2 busbars per substation. + + If not called, then the `environment` will not be able to use more than 2 busbars per substations. + + .. seealso:: + :func:`Backend.cannot_handle_more_than_2_busbar` + + .. note:: + From grid2op 1.10.0 it is preferable that your backend calls one of + :func:`Backend.can_handle_more_than_2_busbar` or + :func:`Backend.cannot_handle_more_than_2_busbar`. + + If not, then the environments created with your backend will not be able to + "operate" grid with more than 2 busbars per substation. + + .. danger:: + We highly recommend you do not try to override this function. + + At least, at time of writing I can't find any good reason to do so. + """ + self._missing_two_busbars_support_info = False + self.n_busbar_per_sub = type(self).n_busbar_per_sub + + def cannot_handle_more_than_2_busbar(self): + """ + .. versionadded:: 1.10.0 + + This function should be called once in :func:`Backend.load_grid` if your backend is **NOT** able + to handle more than 2 busbars per substation. + + If not called, then the `environment` will not be able to use more than 2 busbars per substations. + + .. seealso:: + :func:`Backend.cannot_handle_more_than_2_busbar` + + .. note:: + From grid2op 1.10.0 it is preferable that your backend calls one of + :func:`Backend.can_handle_more_than_2_busbar` or + :func:`Backend.cannot_handle_more_than_2_busbar`. + + If not, then the environments created with your backend will not be able to + "operate" grid with more than 2 busbars per substation. + + .. danger:: + We highly recommend you do not try to override this function. + + Atleast, at time of writing I can't find any good reason to do so. + """ + self._missing_two_busbars_support_info = False + if type(self).n_busbar_per_sub != DEFAULT_N_BUSBAR_PER_SUB: + warnings.warn("You asked in `make` function to have more than 2 busbar per substation. It is " + f"not possible with a backend of type {type(self)}. To " + "'fix' this issue, you need to change the implementation of your backend or " + "upgrade it to a newer version.") + self.n_busbar_per_sub = DEFAULT_N_BUSBAR_PER_SUB + def make_complete_path(self, path : Union[os.PathLike, str], filename : Optional[Union[os.PathLike, str]]=None) -> str: @@ -301,11 +373,12 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: """ pass - @abstractmethod - def get_topo_vect(self) -> np.ndarray: + def get_topo_vect(self) -> Optional[np.ndarray]: """ INTERNAL + TODO detailed topo: change of behaviour ! + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ Prefer using :attr:`grid2op.Observation.BaseObservation.topo_vect` @@ -339,8 +412,20 @@ def get_topo_vect(self) -> np.ndarray: An array saying to which bus the object is connected. """ - pass + return None + def get_switches_position(self) -> Optional[np.ndarray]: + """INTERNAL + + TODO detailed topo: change of behaviour ! + + Returns + ------- + np.ndarray + _description_ + """ + return None + @abstractmethod def generators_info(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ @@ -420,7 +505,7 @@ def lines_or_info(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray] .. note:: It is called after the solver has been ran, only in case of success (convergence). - It returns the information extracted from the _grid at the origin end of each powerline. + It returns the information extracted from the _grid at the origin side of each powerline. For assumption about the order of the powerline flows return in this vector, see the help of the :func:`Backend.get_line_status` method. @@ -453,7 +538,7 @@ def lines_ex_info(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray] .. note:: It is called after the solver has been ran, only in case of success (convergence). - It returns the information extracted from the _grid at the extremity end of each powerline. + It returns the information extracted from the _grid at the extremity side of each powerline. For assumption about the order of the powerline flows return in this vector, see the help of the :func:`Backend.get_line_status` method. @@ -541,11 +626,13 @@ def copy(self): start_grid = self._grid self._grid = None - res = copy.deepcopy(self) - res.__class__ = type(self) # somehow deepcopy forget the init class... weird - res._grid = copy.deepcopy(start_grid) - self._grid = start_grid - res._is_loaded = False # i can reload a copy of an environment + try: + res = copy.deepcopy(self) + res.__class__ = type(self) # somehow deepcopy forget the init class... weird + res._grid = copy.deepcopy(start_grid) + finally: + self._grid = start_grid + res._is_loaded = False # i can reload a copy of an environment return res def save_file(self, full_path: Union[os.PathLike, str]) -> None: @@ -610,10 +697,10 @@ def get_line_flow(self) -> np.ndarray: It is called after the solver has been ran, only in case of success (convergence). If the AC mod is used, this shall return the current flow on the end of the powerline where there is a protection. - For example, if there is a protection on "origin end" of powerline "l2" then this method shall return the current - flow of at the "origin end" of powerline l2. + For example, if there is a protection on "origin side" of powerline "l2" then this method shall return the current + flow of at the "origin side" of powerline l2. - Note that in general, there is no loss of generality in supposing all protections are set on the "origin end" of + Note that in general, there is no loss of generality in supposing all protections are set on the "origin side" of the powerline. So this method will return all origin line flows. It is also possible, for a specific application, to return the maximum current flow between both ends of a power _grid for more complex scenario. @@ -673,11 +760,11 @@ def set_thermal_limit(self, limits : Union[np.ndarray, Dict["str", float]]) -> N if el in limits: try: tmp = dt_float(limits[el]) - except: + except Exception as exc_: raise BackendError( 'Impossible to convert data ({}) for powerline named "{}" into float ' "values".format(limits[el], el) - ) + ) from exc_ if tmp <= 0: raise BackendError( 'New thermal limit for powerlines "{}" is not positive ({})' @@ -949,12 +1036,7 @@ def _runpf_with_diverging_exception(self, is_dc : bool) -> Optional[Exception]: conv, exc_me = self.runpf(is_dc=is_dc) # run powerflow except Grid2OpException as exc_: exc_me = exc_ - except Exception as exc_: - exc_me = DivergingPowerflow( - f" An unexpected error occurred during the computation of the powerflow." - f"The error is: \n {exc_} \n. This is game over" - ) - + if not conv and exc_me is None: exc_me = DivergingPowerflow( "GAME OVER: Powerflow has diverged during computation " @@ -1023,10 +1105,12 @@ def next_grid_state(self, ] = True # disconnect the current power lines - if to_disc[lines_status].sum() == 0: - # no powerlines have been disconnected at this time step, i stop the computation there + if to_disc[lines_status].any() == 0: + # no powerlines have been disconnected at this time step, + # i stop the computation there break disconnected_during_cf[to_disc] = ts + # perform the disconnection action for i, el in enumerate(to_disc): if el: @@ -1124,18 +1208,19 @@ def check_kirchoff(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray p_ex, q_ex, v_ex, *_ = self.lines_ex_info() p_gen, q_gen, v_gen = self.generators_info() p_load, q_load, v_load = self.loads_info() - if self.n_storage > 0: + cls = type(self) + if cls.n_storage > 0: p_storage, q_storage, v_storage = self.storages_info() # fist check the "substation law" : nothing is created at any substation - p_subs = np.zeros(self.n_sub, dtype=dt_float) - q_subs = np.zeros(self.n_sub, dtype=dt_float) + p_subs = np.zeros(cls.n_sub, dtype=dt_float) + q_subs = np.zeros(cls.n_sub, dtype=dt_float) # check for each bus - p_bus = np.zeros((self.n_sub, 2), dtype=dt_float) - q_bus = np.zeros((self.n_sub, 2), dtype=dt_float) + p_bus = np.zeros((cls.n_sub, cls.n_busbar_per_sub), dtype=dt_float) + q_bus = np.zeros((cls.n_sub, cls.n_busbar_per_sub), dtype=dt_float) v_bus = ( - np.zeros((self.n_sub, 2, 2), dtype=dt_float) - 1.0 + np.zeros((cls.n_sub, cls.n_busbar_per_sub, 2), dtype=dt_float) - 1.0 ) # sub, busbar, [min,max] topo_vect = self.get_topo_vect() @@ -1143,11 +1228,15 @@ def check_kirchoff(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray # for example, if two powerlines are such that line_or_to_subid is equal (eg both connected to substation 0) # then numpy do not guarantee that `p_subs[self.line_or_to_subid] += p_or` will add the two "corresponding p_or" # TODO this can be vectorized with matrix product, see example in obs.flow_bus_matrix (BaseObervation.py) - for i in range(self.n_line): - sub_or_id = self.line_or_to_subid[i] - sub_ex_id = self.line_ex_to_subid[i] - loc_bus_or = topo_vect[self.line_or_pos_topo_vect[i]] - 1 - loc_bus_ex = topo_vect[self.line_ex_pos_topo_vect[i]] - 1 + for i in range(cls.n_line): + sub_or_id = cls.line_or_to_subid[i] + sub_ex_id = cls.line_ex_to_subid[i] + if (topo_vect[cls.line_or_pos_topo_vect[i]] == -1 or + topo_vect[cls.line_ex_pos_topo_vect[i]] == -1): + # line is disconnected + continue + loc_bus_or = topo_vect[cls.line_or_pos_topo_vect[i]] - 1 + loc_bus_ex = topo_vect[cls.line_ex_pos_topo_vect[i]] - 1 # for substations p_subs[sub_or_id] += p_or[i] @@ -1164,153 +1253,290 @@ def check_kirchoff(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray q_bus[sub_ex_id, loc_bus_ex] += q_ex[i] # fill the min / max voltage per bus (initialization) - if (v_bus[sub_or_id,loc_bus_or,][0] == -1): - v_bus[sub_or_id,loc_bus_or,][0] = v_or[i] - if (v_bus[sub_ex_id,loc_bus_ex,][0] == -1): - v_bus[sub_ex_id,loc_bus_ex,][0] = v_ex[i] + if (v_bus[sub_or_id, loc_bus_or,][0] == -1): + v_bus[sub_or_id, loc_bus_or,][0] = v_or[i] + if (v_bus[sub_ex_id, loc_bus_ex,][0] == -1): + v_bus[sub_ex_id, loc_bus_ex,][0] = v_ex[i] if (v_bus[sub_or_id, loc_bus_or,][1]== -1): - v_bus[sub_or_id,loc_bus_or,][1] = v_or[i] - if (v_bus[sub_ex_id,loc_bus_ex,][1]== -1): - v_bus[sub_ex_id,loc_bus_ex,][1] = v_ex[i] + v_bus[sub_or_id, loc_bus_or,][1] = v_or[i] + if (v_bus[sub_ex_id, loc_bus_ex,][1]== -1): + v_bus[sub_ex_id, loc_bus_ex,][1] = v_ex[i] # now compute the correct stuff if v_or[i] > 0.0: # line is connected - v_bus[sub_or_id,loc_bus_or,][0] = min(v_bus[sub_or_id,loc_bus_or,][0],v_or[i],) - v_bus[sub_or_id,loc_bus_or,][1] = max(v_bus[sub_or_id,loc_bus_or,][1],v_or[i],) + v_bus[sub_or_id, loc_bus_or,][0] = min(v_bus[sub_or_id, loc_bus_or,][0],v_or[i],) + v_bus[sub_or_id, loc_bus_or,][1] = max(v_bus[sub_or_id, loc_bus_or,][1],v_or[i],) if v_ex[i] > 0: # line is connected - v_bus[sub_ex_id,loc_bus_ex,][0] = min(v_bus[sub_ex_id,loc_bus_ex,][0],v_ex[i],) - v_bus[sub_ex_id,loc_bus_ex,][1] = max(v_bus[sub_ex_id,loc_bus_ex,][1],v_ex[i],) + v_bus[sub_ex_id, loc_bus_ex,][0] = min(v_bus[sub_ex_id, loc_bus_ex,][0],v_ex[i],) + v_bus[sub_ex_id, loc_bus_ex,][1] = max(v_bus[sub_ex_id, loc_bus_ex,][1],v_ex[i],) - for i in range(self.n_gen): + for i in range(cls.n_gen): + gptv = cls.gen_pos_topo_vect[i] + + if topo_vect[gptv] == -1: + # gen is disconnected + continue + # for substations - p_subs[self.gen_to_subid[i]] -= p_gen[i] - q_subs[self.gen_to_subid[i]] -= q_gen[i] + p_subs[cls.gen_to_subid[i]] -= p_gen[i] + q_subs[cls.gen_to_subid[i]] -= q_gen[i] + loc_bus = topo_vect[gptv] - 1 # for bus p_bus[ - self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1 + cls.gen_to_subid[i], loc_bus ] -= p_gen[i] q_bus[ - self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1 + cls.gen_to_subid[i], loc_bus ] -= q_gen[i] # compute max and min values if v_gen[i]: # but only if gen is connected - v_bus[self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1][ + v_bus[cls.gen_to_subid[i], loc_bus][ 0 ] = min( v_bus[ - self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1 + cls.gen_to_subid[i], loc_bus ][0], v_gen[i], ) - v_bus[self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1][ + v_bus[cls.gen_to_subid[i], loc_bus][ 1 ] = max( v_bus[ - self.gen_to_subid[i], topo_vect[self.gen_pos_topo_vect[i]] - 1 + cls.gen_to_subid[i], loc_bus ][1], v_gen[i], ) - for i in range(self.n_load): + for i in range(cls.n_load): + gptv = cls.load_pos_topo_vect[i] + + if topo_vect[gptv] == -1: + # load is disconnected + continue + loc_bus = topo_vect[gptv] - 1 + # for substations - p_subs[self.load_to_subid[i]] += p_load[i] - q_subs[self.load_to_subid[i]] += q_load[i] + p_subs[cls.load_to_subid[i]] += p_load[i] + q_subs[cls.load_to_subid[i]] += q_load[i] # for buses p_bus[ - self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1 + cls.load_to_subid[i], loc_bus ] += p_load[i] q_bus[ - self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1 + cls.load_to_subid[i], loc_bus ] += q_load[i] # compute max and min values if v_load[i]: # but only if load is connected - v_bus[self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1][ + v_bus[cls.load_to_subid[i], loc_bus][ 0 ] = min( v_bus[ - self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1 + cls.load_to_subid[i], loc_bus ][0], v_load[i], ) - v_bus[self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1][ + v_bus[cls.load_to_subid[i], loc_bus][ 1 ] = max( v_bus[ - self.load_to_subid[i], topo_vect[self.load_pos_topo_vect[i]] - 1 + cls.load_to_subid[i], loc_bus ][1], v_load[i], ) - for i in range(self.n_storage): - p_subs[self.storage_to_subid[i]] += p_storage[i] - q_subs[self.storage_to_subid[i]] += q_storage[i] + for i in range(cls.n_storage): + gptv = cls.storage_pos_topo_vect[i] + if topo_vect[gptv] == -1: + # storage is disconnected + continue + loc_bus = topo_vect[gptv] - 1 + + p_subs[cls.storage_to_subid[i]] += p_storage[i] + q_subs[cls.storage_to_subid[i]] += q_storage[i] p_bus[ - self.storage_to_subid[i], topo_vect[self.storage_pos_topo_vect[i]] - 1 + cls.storage_to_subid[i], loc_bus ] += p_storage[i] q_bus[ - self.storage_to_subid[i], topo_vect[self.storage_pos_topo_vect[i]] - 1 + cls.storage_to_subid[i], loc_bus ] += q_storage[i] # compute max and min values if v_storage[i] > 0: # the storage unit is connected v_bus[ - self.storage_to_subid[i], - topo_vect[self.storage_pos_topo_vect[i]] - 1, + cls.storage_to_subid[i], + loc_bus, ][0] = min( v_bus[ - self.storage_to_subid[i], - topo_vect[self.storage_pos_topo_vect[i]] - 1, + cls.storage_to_subid[i], + loc_bus, ][0], v_storage[i], ) v_bus[ self.storage_to_subid[i], - topo_vect[self.storage_pos_topo_vect[i]] - 1, + loc_bus, ][1] = max( v_bus[ - self.storage_to_subid[i], - topo_vect[self.storage_pos_topo_vect[i]] - 1, + cls.storage_to_subid[i], + loc_bus, ][1], v_storage[i], ) - if type(self).shunts_data_available: + if cls.shunts_data_available: p_s, q_s, v_s, bus_s = self.shunt_info() - for i in range(self.n_shunt): + for i in range(cls.n_shunt): + if bus_s[i] == -1: + # shunt is disconnected + continue + # for substations - p_subs[self.shunt_to_subid[i]] += p_s[i] - q_subs[self.shunt_to_subid[i]] += q_s[i] + p_subs[cls.shunt_to_subid[i]] += p_s[i] + q_subs[cls.shunt_to_subid[i]] += q_s[i] # for buses - p_bus[self.shunt_to_subid[i], bus_s[i] - 1] += p_s[i] - q_bus[self.shunt_to_subid[i], bus_s[i] - 1] += q_s[i] + p_bus[cls.shunt_to_subid[i], bus_s[i] - 1] += p_s[i] + q_bus[cls.shunt_to_subid[i], bus_s[i] - 1] += q_s[i] # compute max and min values - v_bus[self.shunt_to_subid[i], bus_s[i] - 1][0] = min( - v_bus[self.shunt_to_subid[i], bus_s[i] - 1][0], v_s[i] + v_bus[cls.shunt_to_subid[i], bus_s[i] - 1][0] = min( + v_bus[cls.shunt_to_subid[i], bus_s[i] - 1][0], v_s[i] ) - v_bus[self.shunt_to_subid[i], bus_s[i] - 1][1] = max( - v_bus[self.shunt_to_subid[i], bus_s[i] - 1][1], v_s[i] + v_bus[cls.shunt_to_subid[i], bus_s[i] - 1][1] = max( + v_bus[cls.shunt_to_subid[i], bus_s[i] - 1][1], v_s[i] ) else: warnings.warn( "Backend.check_kirchoff Impossible to get shunt information. Reactive information might be " "incorrect." ) - diff_v_bus = np.zeros((self.n_sub, 2), dtype=dt_float) + diff_v_bus = np.zeros((cls.n_sub, cls.n_busbar_per_sub), dtype=dt_float) diff_v_bus[:, :] = v_bus[:, :, 1] - v_bus[:, :, 0] return p_subs, q_subs, p_bus, q_bus, diff_v_bus + def _fill_names_obj(self): + """fill the name vectors (**eg** name_line) if not done already in the backend. + This function is used to fill the name of an object of a class. It will also check the existence + of these vectors in the class. + """ + cls = type(self) + if self.name_line is None: + if cls.name_line is None: + line_or_to_subid = cls.line_or_to_subid if cls.line_or_to_subid is not None else self.line_or_to_subid + line_ex_to_subid = cls.line_ex_to_subid if cls.line_ex_to_subid is not None else self.line_ex_to_subid + self.name_line = [ + "{}_{}_{}".format(or_id, ex_id, l_id) + for l_id, (or_id, ex_id) in enumerate( + zip(line_or_to_subid, line_ex_to_subid) + ) + ] + self.name_line = np.array(self.name_line) + warnings.warn( + "name_line is None so default line names have been assigned to your grid. " + "(FYI: Line names are used to make the correspondence between the chronics and the backend)" + "This might result in impossibility to load data." + '\n\tIf "env.make" properly worked, you can safely ignore this warning.' + ) + else: + self.name_line = cls.name_line + + if self.name_load is None: + if cls.name_load is None: + load_to_subid = cls.load_to_subid if cls.load_to_subid is not None else self.load_to_subid + self.name_load = [ + "load_{}_{}".format(bus_id, load_id) + for load_id, bus_id in enumerate(load_to_subid) + ] + self.name_load = np.array(self.name_load) + warnings.warn( + "name_load is None so default load names have been assigned to your grid. " + "(FYI: load names are used to make the correspondence between the chronics and the backend)" + "This might result in impossibility to load data." + '\n\tIf "env.make" properly worked, you can safely ignore this warning.' + ) + else: + self.name_load = cls.name_load + + if self.name_gen is None: + if cls.name_gen is None: + gen_to_subid = cls.gen_to_subid if cls.gen_to_subid is not None else self.gen_to_subid + self.name_gen = [ + "gen_{}_{}".format(bus_id, gen_id) + for gen_id, bus_id in enumerate(gen_to_subid) + ] + self.name_gen = np.array(self.name_gen) + warnings.warn( + "name_gen is None so default generator names have been assigned to your grid. " + "(FYI: generator names are used to make the correspondence between the chronics and " + "the backend)" + "This might result in impossibility to load data." + '\n\tIf "env.make" properly worked, you can safely ignore this warning.' + ) + else: + self.name_gen = cls.name_gen + + if self.name_sub is None: + if cls.name_sub is None: + n_sub = cls.n_sub if cls.n_sub is not None and cls.n_sub > 0 else self.n_sub + self.name_sub = ["sub_{}".format(sub_id) for sub_id in range(n_sub)] + self.name_sub = np.array(self.name_sub) + warnings.warn( + "name_sub is None so default substation names have been assigned to your grid. " + "(FYI: substation names are used to make the correspondence between the chronics and " + "the backend)" + "This might result in impossibility to load data." + '\n\tIf "env.make" properly worked, you can safely ignore this warning.' + ) + else: + self.name_sub = cls.name_sub + + if self.name_storage is None: + if cls.name_storage is None: + storage_to_subid = cls.storage_to_subid if cls.storage_to_subid is not None else self.storage_to_subid + self.name_storage = [ + "storage_{}_{}".format(bus_id, sto_id) + for sto_id, bus_id in enumerate(storage_to_subid) + ] + self.name_storage = np.array(self.name_storage) + warnings.warn( + "name_storage is None so default storage unit names have been assigned to your grid. " + "(FYI: storage names are used to make the correspondence between the chronics and " + "the backend)" + "This might result in impossibility to load data." + '\n\tIf "env.make" properly worked, you can safely ignore this warning.' + ) + else: + self.name_storage = cls.name_storage + + if cls.shunts_data_available: + if self.name_shunt is None: + if cls.name_shunt is None: + shunt_to_subid = cls.shunt_to_subid if cls.shunt_to_subid is not None else self.shunt_to_subid + self.name_shunt = [ + "shunt_{}_{}".format(bus_id, sh_id) + for sh_id, bus_id in enumerate(shunt_to_subid) + ] + self.name_shunt = np.array(self.name_shunt) + warnings.warn( + "name_shunt is None so default storage unit names have been assigned to your grid. " + "(FYI: storage names are used to make the correspondence between the chronics and " + "the backend)" + "This might result in impossibility to load data." + '\n\tIf "env.make" properly worked, you can safely ignore this warning.' + ) + else: + self.name_shunt = cls.name_shunt + def load_redispacthing_data(self, path : Union[os.PathLike, str], name : Optional[str]="prods_charac.csv") -> None: @@ -1323,6 +1549,13 @@ def load_redispacthing_data(self, We don't recommend at all to modify this function. + Notes + ----- + Before you use this function, make sure the names of the generators are properly set. + + For example you can either read them from the grid (setting self.name_gen) or call + self._fill_names_obj() beforehand (this later is done in the environment.) + Parameters ---------- path: ``str`` @@ -1351,7 +1584,6 @@ def load_redispacthing_data(self, to change it. """ - self._fill_names() self.redispatching_unit_commitment_availble = False # for redispatching @@ -1403,7 +1635,6 @@ def load_redispacthing_data(self, "min_up_time": row["min_up_time"], "min_down_time": row["min_down_time"], } - self.redispatching_unit_commitment_availble = True self.gen_type = np.full(self.n_gen, fill_value="aaaaaaaaaa") self.gen_pmin = np.full(self.n_gen, fill_value=1.0, dtype=dt_float) @@ -1468,6 +1699,13 @@ def load_storage_data(self, This method will load everything needed in presence of storage unit on the grid. We don't recommend at all to modify this function. + + Notes + ----- + Before you use this function, make sure the names of the generators are properly set. + + For example you can either read them from the grid (setting self.name_gen) or call + self._fill_names_obj() beforehand (this later is done in the environment.) Parameters ---------- @@ -1517,7 +1755,7 @@ def load_storage_data(self, fullpath = os.path.join(path, name) if not os.path.exists(fullpath): raise BackendError( - f"There are storage unit on the grid, yet we could not locate their description." + f"There are {self.n_storage} storage unit(s) on the grid, yet we could not locate their description." f'Please make sure to have a file "{name}" where the environment data are located.' f'For this environment the location is "{path}"' ) @@ -1737,8 +1975,9 @@ def get_action_to_set(self) -> "grid2op.Action.CompleteAction": p_s, q_s, sh_v, bus_s = self.shunt_info() dict_["shunt"] = {"shunt_bus": bus_s} if (bus_s >= 1).sum(): - p_s *= (self._sh_vnkv / sh_v) ** 2 - q_s *= (self._sh_vnkv / sh_v) ** 2 + sh_conn = bus_s > 0 + p_s[sh_conn] *= (self._sh_vnkv[sh_conn] / sh_v[sh_conn]) ** 2 + q_s[sh_conn] *= (self._sh_vnkv[sh_conn] / sh_v[sh_conn]) ** 2 p_s[bus_s == -1] = np.NaN q_s[bus_s == -1] = np.NaN dict_["shunt"]["shunt_p"] = p_s @@ -1786,8 +2025,9 @@ def update_from_obs(self, '"grid2op.Observation.CompleteObservation".' ) - backend_action = self.my_bk_act_class() - act = self._complete_action_class() + cls = type(self) + backend_action = cls.my_bk_act_class() + act = cls._complete_action_class() line_status = self._aux_get_line_status_to_set(obs.line_status) # skip the action part and update directly the backend action ! dict_ = { @@ -1801,7 +2041,7 @@ def update_from_obs(self, }, } - if type(self).shunts_data_available and type(obs).shunts_data_available: + if cls.shunts_data_available and type(obs).shunts_data_available: if "_shunt_bus" not in type(obs).attr_list_set: raise BackendError( "Impossible to set the backend to the state given by the observation: shunts data " @@ -1818,53 +2058,105 @@ def update_from_obs(self, sh_q[~shunt_co] = np.NaN dict_["shunt"]["shunt_p"] = sh_p dict_["shunt"]["shunt_q"] = sh_q - elif type(self).shunts_data_available and not type(obs).shunts_data_available: + elif cls.shunts_data_available and not type(obs).shunts_data_available: warnings.warn("Backend supports shunt but not the observation. This behaviour is non standard.") act.update(dict_) backend_action += act self.apply_action(backend_action) - def assert_grid_correct(self) -> None: + def assert_grid_correct(self, _local_dir_cls=None) -> None: """ INTERNAL .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ This is done as it should be by the Environment + """ - # lazy loading - from grid2op.Action import CompleteAction - from grid2op.Action._backendAction import _BackendAction + if hasattr(self, "_missing_two_busbars_support_info"): + if self._missing_two_busbars_support_info: + warnings.warn("The backend implementation you are using is probably too old to take advantage of the " + "new feature added in grid2op 1.10.0: the possibility " + "to have more than 2 busbars per substations (or not). " + "To silence this warning, you can modify the `load_grid` implementation " + "of your backend and either call:\n" + "- self.can_handle_more_than_2_busbar if the current implementation " + " can handle more than 2 busbsars OR\n" + "- self.cannot_handle_more_than_2_busbar if not." + "\nAnd of course, ideally, if the current implementation " + "of your backend cannot " + "handle more than 2 busbars per substation, then change it :-)\n" + "Your backend will behave as if it did not support it.") + self._missing_two_busbars_support_info = False + self.n_busbar_per_sub = DEFAULT_N_BUSBAR_PER_SUB + else: + self._missing_two_busbars_support_info = False + self.n_busbar_per_sub = DEFAULT_N_BUSBAR_PER_SUB + warnings.warn("Your backend is missing the `_missing_two_busbars_support_info` " + "attribute. This is known issue in lightims2grid <= 0.7.5. Please " + "upgrade your backend. This will raise an error in the future.") + orig_type = type(self) - if orig_type.my_bk_act_class is None: + if orig_type.my_bk_act_class is None and orig_type._INIT_GRID_CLS is None: + # NB the second part of the "if": `orig_type._INIT_GRID_CLS is None` + # has been added in grid2Op 1.10.3 to handle multiprocessing correctly: + # classes passed in multi processing should not be initialized a second time + # class is already initialized # and set up the proper class and everything self._init_class_attr() - - # hack due to changing class of imported module in the module itself + future_cls = orig_type.init_grid( - type(self), force_module=type(self).__module__ + type(self), _local_dir_cls=_local_dir_cls ) self.__class__ = future_cls - setattr( - sys.modules[type(self).__module__], - self.__class__.__name__, - self.__class__, - ) + # reset the attribute of the grid2op.Backend.Backend class # that can be messed up with depending on the initialization of the backend Backend._clear_class_attribute() # reset totally the grid2op Backend type - # orig_type._clear_class_attribute() - orig_type._clear_grid_dependant_class_attributes() # only reset the attributes that could be modified by user - + + # only reset the attributes that could be modified by the environment while keeping the + # attribute that can be defined in the Backend implementation (eg support of shunt) + orig_type._clear_grid_dependant_class_attributes() + my_cls = type(self) - my_cls.my_bk_act_class = _BackendAction.init_grid(my_cls) - my_cls._complete_action_class = CompleteAction.init_grid(my_cls) - my_cls._complete_action_class._add_shunt_data() - my_cls._complete_action_class._update_value_set() - my_cls.assert_grid_correct_cls() + my_cls._add_internal_classes(_local_dir_cls) + self._remove_my_attr_cls() + @classmethod + def _add_internal_classes(cls, _local_dir_cls): + # lazy loading + from grid2op.Action import CompleteAction + from grid2op.Action._backendAction import _BackendAction + + cls.my_bk_act_class = _BackendAction.init_grid(cls, _local_dir_cls=_local_dir_cls) + cls._complete_action_class = CompleteAction.init_grid(cls, _local_dir_cls=_local_dir_cls) + cls._complete_action_class._add_shunt_data() + cls._complete_action_class._update_value_set() + cls.assert_grid_correct_cls() + + def _remove_my_attr_cls(self): + """ + INTERNAL + + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + This function is called at the end of :func:`Backend.assert_grid_correct` and it "cleans" the attribute of the + backend object that are stored in the class now, to avoid discrepency between what has been read from the + grid and what have been processed by grid2op (for example in "compatibility" mode, storage are deactivated, so + `self.n_storage` would be different that `type(self).n_storage`) + + For this to work, the grid must first be initialized correctly, with the proper type (name of the environment + in the class name !) + """ + cls = type(self) + if cls._CLS_DICT_EXTENDED is not None: + for attr_nm, val in cls._CLS_DICT_EXTENDED.items(): + if hasattr(self, attr_nm) and hasattr(cls, attr_nm): + if id(getattr(self, attr_nm)) != id(getattr(cls, attr_nm)): + delattr(self, attr_nm) + def assert_grid_correct_after_powerflow(self) -> None: """ INTERNAL @@ -1884,22 +2176,22 @@ def assert_grid_correct_after_powerflow(self) -> None: if tmp.shape[0] != self.n_line: raise IncorrectNumberOfLines('returned by "backend.get_line_status()"') if (~np.isfinite(tmp)).any(): - raise EnvironmentError(type(self).ERR_INIT_POWERFLOW) + raise EnvError(type(self).ERR_INIT_POWERFLOW) tmp = self.get_line_flow() if tmp.shape[0] != self.n_line: raise IncorrectNumberOfLines('returned by "backend.get_line_flow()"') if (~np.isfinite(tmp)).any(): - raise EnvironmentError(type(self).ERR_INIT_POWERFLOW) + raise EnvError(type(self).ERR_INIT_POWERFLOW) tmp = self.get_thermal_limit() if tmp.shape[0] != self.n_line: raise IncorrectNumberOfLines('returned by "backend.get_thermal_limit()"') if (~np.isfinite(tmp)).any(): - raise EnvironmentError(type(self).ERR_INIT_POWERFLOW) + raise EnvError(type(self).ERR_INIT_POWERFLOW) tmp = self.get_line_overflow() if tmp.shape[0] != self.n_line: raise IncorrectNumberOfLines('returned by "backend.get_line_overflow()"') if (~np.isfinite(tmp)).any(): - raise EnvironmentError(type(self).ERR_INIT_POWERFLOW) + raise EnvError(type(self).ERR_INIT_POWERFLOW) tmp = self.generators_info() if len(tmp) != 3: diff --git a/grid2op/Backend/educPandaPowerBackend.py b/grid2op/Backend/educPandaPowerBackend.py index 6caf2f039..a56d66000 100644 --- a/grid2op/Backend/educPandaPowerBackend.py +++ b/grid2op/Backend/educPandaPowerBackend.py @@ -62,7 +62,9 @@ class EducPandaPowerBackend(Backend): real :class:`grid2op.Backend.PandaPowerBackend` class. """ - + + shunts_data_available = False + def __init__(self, detailed_infos_for_cascading_failures : Optional[bool]=False, can_be_copied : Optional[bool]=True): @@ -94,6 +96,7 @@ def __init__(self, # NB: this instance of backend is here for academic purpose only. For clarity, it does not handle # neither shunt nor storage unit. + self.shunts_data_available = False ####### load the grid def load_grid(self, @@ -131,7 +134,8 @@ def load_grid(self, example. (But of course you can still use switches if you really want to) """ - + self.cannot_handle_more_than_2_busbar() + # first, handles different kind of path: full_path = self.make_complete_path(path, filename) @@ -177,21 +181,21 @@ def load_grid(self, # initialize the number of elements per substation # now export to grid2op the substation to which objects are connected - self.load_to_subid = copy.deepcopy(self._grid.load["bus"]) - self.gen_to_subid = copy.deepcopy(self._grid.gen["bus"]) + self.load_to_subid = copy.deepcopy(self._grid.load["bus"].values) + self.gen_to_subid = copy.deepcopy(self._grid.gen["bus"].values) # here we just decide (but that is a convention we could have done it differently) # that "origin side" (grid2op) corresponds to "from_bus" from pandapower line and "hv_bus" for # pandapower trafo. self.line_or_to_subid = np.concatenate( ( - copy.deepcopy(self._grid.line["from_bus"]), - copy.deepcopy(self._grid.trafo["hv_bus"]), + copy.deepcopy(self._grid.line["from_bus"].values), + copy.deepcopy(self._grid.trafo["hv_bus"].values), ) ) self.line_ex_to_subid = np.concatenate( ( - copy.deepcopy(self._grid.line["to_bus"]), - copy.deepcopy(self._grid.trafo["lv_bus"]), + copy.deepcopy(self._grid.line["to_bus"].values), + copy.deepcopy(self._grid.trafo["lv_bus"].values), ) ) @@ -210,8 +214,8 @@ def load_grid(self, # NB: this instance of backend is here for academic purpose only. For clarity, it does not handle # neither shunt nor storage unit. - type(self).shunts_data_available = False - type(self).set_no_storage() + # type(self).shunts_data_available = False + # type(self).set_no_storage() ###### modify the grid def apply_action(self, backendAction: Union["grid2op.Action._backendAction._BackendAction", None]) -> None: diff --git a/grid2op/Backend/pandaPowerBackend.py b/grid2op/Backend/pandaPowerBackend.py index 3dcef1d6c..299043b65 100644 --- a/grid2op/Backend/pandaPowerBackend.py +++ b/grid2op/Backend/pandaPowerBackend.py @@ -17,11 +17,16 @@ import pandapower as pp import scipy +# check that pandapower does not introduce some +from packaging import version +import grid2op from grid2op.dtypes import dt_int, dt_float, dt_bool -from grid2op.Backend.backend import Backend from grid2op.Action import BaseAction -from grid2op.Exceptions import * +from grid2op.Exceptions import BackendError +from grid2op.Backend.backend import Backend + +MIN_LS_VERSION_VM_PU = version.parse("0.6.0") try: import numba @@ -63,31 +68,31 @@ class PandaPowerBackend(Backend): The ratio that allow the conversion from pair-unit to kv for the loads lines_or_pu_to_kv: :class:`numpy.array`, dtype:float - The ratio that allow the conversion from pair-unit to kv for the origin end of the powerlines + The ratio that allow the conversion from pair-unit to kv for the origin side of the powerlines lines_ex_pu_to_kv: :class:`numpy.array`, dtype:float - The ratio that allow the conversion from pair-unit to kv for the extremity end of the powerlines + The ratio that allow the conversion from pair-unit to kv for the extremity side of the powerlines p_or: :class:`numpy.array`, dtype:float - The active power flowing at the origin end of each powerline + The active power flowing at the origin side of each powerline q_or: :class:`numpy.array`, dtype:float - The reactive power flowing at the origin end of each powerline + The reactive power flowing at the origin side of each powerline v_or: :class:`numpy.array`, dtype:float The voltage magnitude at the origin bus of the powerline a_or: :class:`numpy.array`, dtype:float - The current flowing at the origin end of each powerline + The current flowing at the origin side of each powerline p_ex: :class:`numpy.array`, dtype:float - The active power flowing at the extremity end of each powerline + The active power flowing at the extremity side of each powerline q_ex: :class:`numpy.array`, dtype:float - The reactive power flowing at the extremity end of each powerline + The reactive power flowing at the extremity side of each powerline a_ex: :class:`numpy.array`, dtype:float - The current flowing at the extremity end of each powerline + The current flowing at the extremity side of each powerline v_ex: :class:`numpy.array`, dtype:float The voltage magnitude at the extremity bus of the powerline @@ -118,6 +123,12 @@ def __init__( can_be_copied: bool=True, with_numba: bool=NUMBA_, ): + from grid2op.MakeEnv.Make import _force_test_dataset + if _force_test_dataset(): + if with_numba: + warnings.warn(f"Forcing `test=True` will disable numba for {type(self)}") + with_numba = False + Backend.__init__( self, detailed_infos_for_cascading_failures=detailed_infos_for_cascading_failures, @@ -169,7 +180,6 @@ def __init__( self._number_true_line = -1 self._corresp_name_fun = {} self._get_vector_inj = {} - self.dim_topo = -1 self._vars_action = BaseAction.attr_list_vect self._vars_action_set = BaseAction.attr_list_vect self.cst_1 = dt_float(1.0) @@ -214,6 +224,10 @@ def __init__( self._lightsim2grid : bool = lightsim2grid self._dist_slack : bool = dist_slack self._max_iter : bool = max_iter + self._in_service_line_col_id = None + self._in_service_trafo_col_id = None + self._in_service_storage_cold_id = None + self.div_exception = None def _check_for_non_modeled_elements(self): """This function check for elements in the pandapower grid that will have no impact on grid2op. @@ -329,6 +343,7 @@ def load_grid(self, are set as "out of service" unless a topological action acts on these specific substations. """ + self.can_handle_more_than_2_busbar() full_path = self.make_complete_path(path, filename) with warnings.catch_warnings(): @@ -343,30 +358,15 @@ def load_grid(self, i_ref = None self._iref_slack = None self._id_bus_added = None - with warnings.catch_warnings(): - warnings.filterwarnings("ignore") - try: - pp.runpp( - self._grid, - numba=self.with_numba, - lightsim2grid=self._lightsim2grid, - distributed_slack=self._dist_slack, - max_iteration=self._max_iter, - ) - except pp.powerflow.LoadflowNotConverged: - pp.rundcpp( - self._grid, - numba=self.with_numba, - lightsim2grid=self._lightsim2grid, - distributed_slack=self._dist_slack, - max_iteration=self._max_iter, - ) + + self._aux_run_pf_init() # run an intiail powerflow, just in case + new_pp_version = False if not "slack_weight" in self._grid.gen: self._grid.gen["slack_weight"] = 1.0 else: new_pp_version = True - + if np.all(~self._grid.gen["slack"]): # there are not defined slack bus on the data, i need to hack it up a little bit pd2ppc = self._grid._pd2ppc_lookups["bus"] # pd2ppc[pd_id] = ppc_id @@ -426,26 +426,9 @@ def load_grid(self, # TODO here i force the distributed slack bus too, by removing the other from the ext_grid... self._grid.ext_grid = self._grid.ext_grid.iloc[:1] else: - self.slack_id = np.where(self._grid.gen["slack"])[0] + self.slack_id = (self._grid.gen["slack"].values).nonzero()[0] - with warnings.catch_warnings(): - warnings.filterwarnings("ignore") - try: - pp.runpp( - self._grid, - numba=self.with_numba, - lightsim2grid=self._lightsim2grid, - distributed_slack=self._dist_slack, - max_iteration=self._max_iter, - ) - except pp.powerflow.LoadflowNotConverged: - pp.rundcpp( - self._grid, - numba=self.with_numba, - lightsim2grid=self._lightsim2grid, - distributed_slack=self._dist_slack, - max_iteration=self._max_iter, - ) + self._aux_run_pf_init() # run another powerflow with the added generator self.__nb_bus_before = self._grid.bus.shape[0] self.__nb_powerline = self._grid.line.shape[0] @@ -543,16 +526,56 @@ def load_grid(self, self.name_sub = ["sub_{}".format(i) for i, row in self._grid.bus.iterrows()] self.name_sub = np.array(self.name_sub) + if type(self).shunts_data_available: + self.n_shunt = self._grid.shunt.shape[0] + else: + self.n_shunt = None + # "hack" to handle topological changes, for now only 2 buses per substation add_topo = copy.deepcopy(self._grid.bus) - add_topo.index += add_topo.shape[0] - add_topo["in_service"] = False - # self._grid.bus = pd.concat((self._grid.bus, add_topo)) - for ind, el in add_topo.iterrows(): - pp.create_bus(self._grid, index=ind, **el) - + # TODO n_busbar: what if non contiguous indexing ??? + for _ in range(self.n_busbar_per_sub - 1): # self.n_busbar_per_sub and not type(self) here otherwise it erases can_handle_more_than_2_busbar / cannot_handle_more_than_2_busbar + add_topo.index += add_topo.shape[0] + add_topo["in_service"] = False + for ind, el in add_topo.iterrows(): + pp.create_bus(self._grid, index=ind, **el) self._init_private_attrs() - + self._aux_run_pf_init() # run yet another powerflow with the added buses + + # do this at the end + self._in_service_line_col_id = int((self._grid.line.columns == "in_service").nonzero()[0][0]) + self._in_service_trafo_col_id = int((self._grid.trafo.columns == "in_service").nonzero()[0][0]) + self._in_service_storage_cold_id = int((self._grid.storage.columns == "in_service").nonzero()[0][0]) + self.comp_time = 0. + + # hack for backward compat with oldest lightsim2grid version + try: + import lightsim2grid + if version.parse(lightsim2grid.__version__) < MIN_LS_VERSION_VM_PU: + warnings.warn("You are using a really old version of lightsim2grid. Consider upgrading.") + if "_options" in self._grid and "init_vm_pu" in self._grid["_options"]: + try: + float(self._grid["_options"]["init_vm_pu"]) + except ValueError as exc_: + # we delete it because lightsim2grid uses it + # to init its internal "GridModel" and did not check that + # this is a float until MIN_LS_VERSION_VM_PU + del self._grid["_options"]["init_vm_pu"] + except ImportError: + # lightsim2grid is not installed, so no risk to contaminate it + pass + + def _aux_run_pf_init(self): + """run a powerflow when the file is being loaded. This is called three times for each call to "load_grid" """ + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + try: + self._aux_runpf_pp(False) + if not self._grid.converged: + raise pp.powerflow.LoadflowNotConverged + except pp.powerflow.LoadflowNotConverged: + self._aux_runpf_pp(True) + def _init_private_attrs(self) -> None: # number of elements per substation self.sub_info = np.zeros(self.n_sub, dtype=dt_int) @@ -637,6 +660,21 @@ def _init_private_attrs(self) -> None: self._what_object_where[sub_id].append(("storage", "bus", i)) self.dim_topo = self.sub_info.sum() + + # shunts data + if type(self).shunts_data_available: + self.shunt_to_subid = np.zeros(self.n_shunt, dtype=dt_int) - 1 + name_shunt = [] + # TODO read name from the grid if provided + for i, (_, row) in enumerate(self._grid.shunt.iterrows()): + bus = int(row["bus"]) + name_shunt.append("shunt_{bus}_{index_shunt}".format(**row, index_shunt=i)) + self.shunt_to_subid[i] = bus + self.name_shunt = np.array(name_shunt).astype(str) + self._sh_vnkv = self._grid.bus["vn_kv"][self.shunt_to_subid].values.astype( + dt_float + ) + self._compute_pos_big_topo() # utilities for imeplementing apply_action @@ -656,23 +694,23 @@ def _init_private_attrs(self) -> None: "prod_v" ] = self._load_grid_gen_vm_pu # lambda grid: grid.gen["vm_pu"] - self.load_pu_to_kv = self._grid.bus["vn_kv"][self.load_to_subid].values.astype( + self.load_pu_to_kv = 1. * self._grid.bus["vn_kv"][self.load_to_subid].values.astype( dt_float ) - self.prod_pu_to_kv = self._grid.bus["vn_kv"][self.gen_to_subid].values.astype( + self.prod_pu_to_kv = 1. * self._grid.bus["vn_kv"][self.gen_to_subid].values.astype( dt_float ) - self.lines_or_pu_to_kv = self._grid.bus["vn_kv"][ + self.lines_or_pu_to_kv = 1. * self._grid.bus["vn_kv"][ self.line_or_to_subid ].values.astype(dt_float) - self.lines_ex_pu_to_kv = self._grid.bus["vn_kv"][ + self.lines_ex_pu_to_kv = 1. * self._grid.bus["vn_kv"][ self.line_ex_to_subid ].values.astype(dt_float) - self.storage_pu_to_kv = self._grid.bus["vn_kv"][ + self.storage_pu_to_kv = 1. * self._grid.bus["vn_kv"][ self.storage_to_subid ].values.astype(dt_float) - self.thermal_limit_a = 1000 * np.concatenate( + self.thermal_limit_a = 1000. * np.concatenate( ( self._grid.line["max_i_ka"].values, self._grid.trafo["sn_mva"].values @@ -701,21 +739,6 @@ def _init_private_attrs(self) -> None: self.storage_v = np.full(self.n_storage, dtype=dt_float, fill_value=np.NaN) self._nb_bus_before = None - # shunts data - self.n_shunt = self._grid.shunt.shape[0] - self.shunt_to_subid = np.zeros(self.n_shunt, dtype=dt_int) - 1 - name_shunt = [] - # TODO read name from the grid if provided - for i, (_, row) in enumerate(self._grid.shunt.iterrows()): - bus = int(row["bus"]) - name_shunt.append("shunt_{bus}_{index_shunt}".format(**row, index_shunt=i)) - self.shunt_to_subid[i] = bus - self.name_shunt = np.array(name_shunt) - self._sh_vnkv = self._grid.bus["vn_kv"][self.shunt_to_subid].values.astype( - dt_float - ) - # self.shunts_data_available = True # TODO shunts_data_available - # store the topoid -> objid self._big_topo_to_obj = [(None, None) for _ in range(self.dim_topo)] nm_ = "load" @@ -776,7 +799,12 @@ def _init_private_attrs(self) -> None: ) # will be initialized in the "assert_grid_correct" def storage_deact_for_backward_comaptibility(self) -> None: - self._init_private_attrs() + cls = type(self) + self.storage_theta = np.full(cls.n_storage, fill_value=np.NaN, dtype=dt_float) + self.storage_p = np.full(cls.n_storage, dtype=dt_float, fill_value=np.NaN) + self.storage_q = np.full(cls.n_storage, dtype=dt_float, fill_value=np.NaN) + self.storage_v = np.full(cls.n_storage, dtype=dt_float, fill_value=np.NaN) + self._topo_vect = self._get_topo_vect() def _convert_id_topo(self, id_big_topo): """ @@ -802,7 +830,7 @@ def apply_action(self, backendAction: Union["grid2op.Action._backendAction._Back """ if backendAction is None: return - + cls = type(self) ( @@ -813,11 +841,12 @@ def apply_action(self, backendAction: Union["grid2op.Action._backendAction._Back ) = backendAction() # handle bus status - bus_is = self._grid.bus["in_service"] - for i, (bus1_status, bus2_status) in enumerate(active_bus): - bus_is[i] = bus1_status # no iloc for bus, don't ask me why please :-/ - bus_is[i + self.__nb_bus_before] = bus2_status - + self._grid.bus["in_service"] = pd.Series(data=active_bus.T.reshape(-1), + index=np.arange(cls.n_sub * cls.n_busbar_per_sub), + dtype=bool) + # TODO n_busbar what if index is not continuous + + # handle generators tmp_prod_p = self._get_vector_inj["prod_p"](self._grid) if (prod_p.changed).any(): tmp_prod_p.iloc[prod_p.changed] = prod_p.values[prod_p.changed] @@ -840,7 +869,7 @@ def apply_action(self, backendAction: Union["grid2op.Action._backendAction._Back if (load_q.changed).any(): tmp_load_q.iloc[load_q.changed] = load_q.values[load_q.changed] - if self.n_storage > 0: + if cls.n_storage > 0: # active setpoint tmp_stor_p = self._grid.storage["p_mw"] if (storage.changed).any(): @@ -848,19 +877,18 @@ def apply_action(self, backendAction: Union["grid2op.Action._backendAction._Back # topology of the storage stor_bus = backendAction.get_storages_bus() - new_bus_id = stor_bus.values[stor_bus.changed] # id of the busbar 1 or 2 if - activated = new_bus_id > 0 # mask of storage that have been activated - new_bus_num = ( - self.storage_to_subid[stor_bus.changed] + (new_bus_id - 1) * self.n_sub - ) # bus number - new_bus_num[~activated] = self.storage_to_subid[stor_bus.changed][ - ~activated - ] - self._grid.storage["in_service"].values[stor_bus.changed] = activated - self._grid.storage["bus"].values[stor_bus.changed] = new_bus_num - self._topo_vect[self.storage_pos_topo_vect[stor_bus.changed]] = new_bus_num + new_bus_num = dt_int(1) * self._grid.storage["bus"].values + new_bus_id = stor_bus.values[stor_bus.changed] + new_bus_num[stor_bus.changed] = cls.local_bus_to_global(new_bus_id, cls.storage_to_subid[stor_bus.changed]) + deactivated = new_bus_num <= -1 + deact_and_changed = deactivated & stor_bus.changed + new_bus_num[deact_and_changed] = cls.storage_to_subid[deact_and_changed] + self._grid.storage.loc[stor_bus.changed & deactivated, "in_service"] = False + self._grid.storage.loc[stor_bus.changed & ~deactivated, "in_service"] = True + self._grid.storage["bus"] = new_bus_num + self._topo_vect[cls.storage_pos_topo_vect[stor_bus.changed]] = new_bus_id self._topo_vect[ - self.storage_pos_topo_vect[stor_bus.changed][~activated] + cls.storage_pos_topo_vect[deact_and_changed] ] = -1 if type(backendAction).shunts_data_available: @@ -879,7 +907,7 @@ def apply_action(self, backendAction: Union["grid2op.Action._backendAction._Back self._grid.shunt["in_service"].iloc[shunt_bus.changed] = sh_service chg_and_in_service = sh_service & shunt_bus.changed self._grid.shunt["bus"].loc[chg_and_in_service] = cls.local_bus_to_global(shunt_bus.values[chg_and_in_service], - cls.shunt_to_subid[chg_and_in_service]) + cls.shunt_to_subid[chg_and_in_service]) # i made at least a real change, so i implement it in the backend for id_el, new_bus in topo__: @@ -888,7 +916,7 @@ def apply_action(self, backendAction: Union["grid2op.Action._backendAction._Back if type_obj is not None: # storage unit are handled elsewhere self._type_to_bus_set[type_obj](new_bus, id_el_backend, id_topo) - + def _apply_load_bus(self, new_bus, id_el_backend, id_topo): new_bus_backend = type(self).local_bus_to_global_int( new_bus, self._init_bus_load[id_el_backend] @@ -979,6 +1007,71 @@ def _aux_get_line_info(self, colname1, colname2): ) return res + def _aux_runpf_pp(self, is_dc: bool): + with warnings.catch_warnings(): + # remove the warning if _grid non connex. And it that case load flow as not converged + warnings.filterwarnings( + "ignore", category=scipy.sparse.linalg.MatrixRankWarning + ) + warnings.filterwarnings("ignore", category=RuntimeWarning) + warnings.filterwarnings("ignore", category=DeprecationWarning) + self._pf_init = "dc" + # nb_bus = self.get_nb_active_bus() + # if self._nb_bus_before is None: + # self._pf_init = "dc" + # elif nb_bus == self._nb_bus_before: + # self._pf_init = "results" + # else: + # self._pf_init = "auto" + + if (~self._grid.load["in_service"]).any(): + # TODO see if there is a better way here -> do not handle this here, but rather in Backend._next_grid_state + raise pp.powerflow.LoadflowNotConverged("Disconnected load: for now grid2op cannot handle properly" + " disconnected load. If you want to disconnect one, say it" + " consumes 0. instead. Please check loads: " + f"{(~self._grid.load['in_service'].values).nonzero()[0]}" + ) + if (~self._grid.gen["in_service"]).any(): + # TODO see if there is a better way here -> do not handle this here, but rather in Backend._next_grid_state + raise pp.powerflow.LoadflowNotConverged("Disconnected gen: for now grid2op cannot handle properly" + " disconnected generators. If you want to disconnect one, say it" + " produces 0. instead. Please check generators: " + f"{(~self._grid.gen['in_service'].values).nonzero()[0]}" + ) + try: + if is_dc: + pp.rundcpp(self._grid, check_connectivity=True, init="flat") + # if I put check_connectivity=False then the test AAATestBackendAPI.test_22_islanded_grid_make_divergence + # does not pass + + # if dc i start normally next time i call an ac powerflow + self._nb_bus_before = None + else: + pp.runpp( + self._grid, + check_connectivity=False, + init=self._pf_init, + numba=self.with_numba, + lightsim2grid=self._lightsim2grid, + max_iteration=self._max_iter, + distributed_slack=self._dist_slack, + ) + except IndexError as exc_: + raise pp.powerflow.LoadflowNotConverged(f"Surprising behaviour of pandapower when a bus is not connected to " + f"anything but present on the bus (with check_connectivity=False). " + f"Error was {exc_}" + ) + + # stores the computation time + if "_ppc" in self._grid: + if "et" in self._grid["_ppc"]: + self.comp_time += self._grid["_ppc"]["et"] + if self._grid.res_gen.isnull().values.any(): + # TODO see if there is a better way here -> do not handle this here, but rather in Backend._next_grid_state + # sometimes pandapower does not detect divergence and put Nan. + raise pp.powerflow.LoadflowNotConverged("Divergence due to Nan values in res_gen table (most likely due to " + "a non connected grid).") + def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: """ INTERNAL @@ -990,74 +1083,15 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: buses has not changed between two calls, the previous results are re used. This speeds up the computation in case of "do nothing" action applied. """ - nb_bus = self.get_nb_active_bus() try: - with warnings.catch_warnings(): - # remove the warning if _grid non connex. And it that case load flow as not converged - warnings.filterwarnings( - "ignore", category=scipy.sparse.linalg.MatrixRankWarning - ) - warnings.filterwarnings("ignore", category=RuntimeWarning) - warnings.filterwarnings("ignore", category=DeprecationWarning) - if self._nb_bus_before is None: - self._pf_init = "dc" - elif nb_bus == self._nb_bus_before: - self._pf_init = "results" - else: - self._pf_init = "auto" - - if (~self._grid.load["in_service"]).any(): - # TODO see if there is a better way here -> do not handle this here, but rather in Backend._next_grid_state - raise pp.powerflow.LoadflowNotConverged("Disconnected load: for now grid2op cannot handle properly" - " disconnected load. If you want to disconnect one, say it" - " consumes 0. instead. Please check loads: " - f"{np.where(~self._grid.load['in_service'])[0]}" - ) - if (~self._grid.gen["in_service"]).any(): - # TODO see if there is a better way here -> do not handle this here, but rather in Backend._next_grid_state - raise pp.powerflow.LoadflowNotConverged("Disconnected gen: for now grid2op cannot handle properly" - " disconnected generators. If you want to disconnect one, say it" - " produces 0. instead. Please check generators: " - f"{np.where(~self._grid.gen['in_service'])[0]}" - ) - try: - if is_dc: - pp.rundcpp(self._grid, check_connectivity=True, init="flat") - # if I put check_connectivity=False then the test AAATestBackendAPI.test_22_islanded_grid_make_divergence - # does not pass - - # if dc i start normally next time i call an ac powerflow - self._nb_bus_before = None - else: - pp.runpp( - self._grid, - check_connectivity=False, - init=self._pf_init, - numba=self.with_numba, - lightsim2grid=self._lightsim2grid, - max_iteration=self._max_iter, - distributed_slack=self._dist_slack, - ) - except IndexError as exc_: - raise pp.powerflow.LoadflowNotConverged(f"Surprising behaviour of pandapower when a bus is not connected to " - f"anything but present on the bus (with check_connectivity=False). " - f"Error was {exc_}" - ) - - # stores the computation time - if "_ppc" in self._grid: - if "et" in self._grid["_ppc"]: - self.comp_time += self._grid["_ppc"]["et"] - if self._grid.res_gen.isnull().values.any(): - # TODO see if there is a better way here -> do not handle this here, but rather in Backend._next_grid_state - # sometimes pandapower does not detect divergence and put Nan. - raise pp.powerflow.LoadflowNotConverged("Divergence due to Nan values in res_gen table (most likely due to " - "a non connected grid).") - + self._aux_runpf_pp(is_dc) + cls = type(self) # if a connected bus has a no voltage, it's a divergence (grid was not connected) if self._grid.res_bus.loc[self._grid.bus["in_service"]]["va_degree"].isnull().any(): - raise pp.powerflow.LoadflowNotConverged("Isolated bus") - + buses_ko = self._grid.res_bus.loc[self._grid.bus["in_service"]]["va_degree"].isnull() + buses_ko = buses_ko.values.nonzero()[0] + raise pp.powerflow.LoadflowNotConverged(f"Isolated bus, check buses {buses_ko} with `env.backend._grid.res_bus.iloc[{buses_ko}, :]`") + ( self.prod_p[:], self.prod_q[:], @@ -1075,7 +1109,7 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: if not np.isfinite(self.load_v).all(): # TODO see if there is a better way here # some loads are disconnected: it's a game over case! - raise pp.powerflow.LoadflowNotConverged("Isolated load") + raise pp.powerflow.LoadflowNotConverged(f"Isolated load: check loads {np.isfinite(self.load_v).nonzero()[0]}") else: # fix voltages magnitude that are always "nan" for dc case # self._grid.res_bus["vm_pu"] is always nan when computed in DC @@ -1083,15 +1117,15 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: # need to assign the correct value when a generator is present at the same bus # TODO optimize this ugly loop # see https://github.com/e2nIEE/pandapower/issues/1996 for a fix - for l_id in range(self.n_load): - if self.load_to_subid[l_id] in self.gen_to_subid: - ind_gens = np.where( - self.gen_to_subid == self.load_to_subid[l_id] - )[0] + for l_id in range(cls.n_load): + if cls.load_to_subid[l_id] in cls.gen_to_subid: + ind_gens = ( + cls.gen_to_subid == cls.load_to_subid[l_id] + ).nonzero()[0] for g_id in ind_gens: if ( - self._topo_vect[self.load_pos_topo_vect[l_id]] - == self._topo_vect[self.gen_pos_topo_vect[g_id]] + self._topo_vect[cls.load_pos_topo_vect[l_id]] + == self._topo_vect[cls.gen_pos_topo_vect[g_id]] ): self.load_v[l_id] = self.prod_v[g_id] break @@ -1101,7 +1135,7 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: self.p_or[:] = self._aux_get_line_info("p_from_mw", "p_hv_mw") self.q_or[:] = self._aux_get_line_info("q_from_mvar", "q_hv_mvar") self.v_or[:] = self._aux_get_line_info("vm_from_pu", "vm_hv_pu") - self.a_or[:] = self._aux_get_line_info("i_from_ka", "i_hv_ka") * 1000 + self.a_or[:] = self._aux_get_line_info("i_from_ka", "i_hv_ka") * 1000. self.theta_or[:] = self._aux_get_line_info( "va_from_degree", "va_hv_degree" ) @@ -1111,7 +1145,7 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: self.p_ex[:] = self._aux_get_line_info("p_to_mw", "p_lv_mw") self.q_ex[:] = self._aux_get_line_info("q_to_mvar", "q_lv_mvar") self.v_ex[:] = self._aux_get_line_info("vm_to_pu", "vm_lv_pu") - self.a_ex[:] = self._aux_get_line_info("i_to_ka", "i_lv_ka") * 1000 + self.a_ex[:] = self._aux_get_line_info("i_to_ka", "i_lv_ka") * 1000. self.theta_ex[:] = self._aux_get_line_info( "va_to_degree", "va_lv_degree" ) @@ -1129,7 +1163,9 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: self.theta_ex[~np.isfinite(self.theta_ex)] = 0.0 self._nb_bus_before = None - self._grid._ppc["gen"][self._iref_slack, 1] = 0.0 + if self._iref_slack is not None: + # a gen has been added to represent the slack, modeled as an "ext_grid" + self._grid._ppc["gen"][self._iref_slack, 1] = 0.0 # handle storage units # note that we have to look ourselves for disconnected storage @@ -1150,23 +1186,17 @@ def runpf(self, is_dc : bool=False) -> Tuple[bool, Union[Exception, None]]: self._grid.storage["in_service"].values[deact_storage] = False self._topo_vect[:] = self._get_topo_vect() - return self._grid.converged, None + if not self._grid.converged: + raise pp.powerflow.LoadflowNotConverged("Divergence without specific reason (self._grid.converged is False)") + self.div_exception = None + return True, None except pp.powerflow.LoadflowNotConverged as exc_: # of the powerflow has not converged, results are Nan + self.div_exception = exc_ self._reset_all_nan() msg = exc_.__str__() - return False, BackendError(f'powerflow diverged with error :"{msg}"') - - def assert_grid_correct(self) -> None: - """ - INTERNAL - - .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ - - This is done as it should be by the Environment - """ - super().assert_grid_correct() + return False, BackendError(f'powerflow diverged with error :"{msg}", you can check `env.backend.div_exception` for more information') def _reset_all_nan(self) -> None: self.p_or[:] = np.NaN @@ -1202,7 +1232,6 @@ def copy(self) -> "PandaPowerBackend": This should return a deep copy of the Backend itself and not just the `self._grid` """ - # res = copy.deepcopy(self) # this was really slow... res = type(self)(**self._my_kwargs) # copy from base class (backend) @@ -1279,11 +1308,10 @@ def copy(self) -> "PandaPowerBackend": with warnings.catch_warnings(): warnings.simplefilter("ignore", FutureWarning) res.__pp_backend_initial_grid = copy.deepcopy(self.__pp_backend_initial_grid) - - res.tol = ( - self.tol - ) # this is NOT the pandapower tolerance !!!! this is used to check if a storage unit + + # this is NOT the pandapower tolerance !!!! this is used to check if a storage unit # produce / absorbs anything + res.tol = self.tol # TODO storage doc (in grid2op rst) of the backend res.can_output_theta = self.can_output_theta # I support the voltage angle @@ -1292,7 +1320,12 @@ def copy(self) -> "PandaPowerBackend": res.load_theta = copy.deepcopy(self.load_theta) res.gen_theta = copy.deepcopy(self.gen_theta) res.storage_theta = copy.deepcopy(self.storage_theta) - + + res._in_service_line_col_id = self._in_service_line_col_id + res._in_service_trafo_col_id = self._in_service_trafo_col_id + + res._missing_two_busbars_support_info = self._missing_two_busbars_support_info + res.div_exception = self.div_exception return res def close(self) -> None: @@ -1347,88 +1380,47 @@ def get_line_flow(self) -> np.ndarray: def _disconnect_line(self, id_): if id_ < self._number_true_line: - self._grid.line["in_service"].iloc[id_] = False + self._grid.line.iloc[id_, self._in_service_line_col_id] = False else: - self._grid.trafo["in_service"].iloc[id_ - self._number_true_line] = False + self._grid.trafo.iloc[id_ - self._number_true_line, self._in_service_trafo_col_id] = False self._topo_vect[self.line_or_pos_topo_vect[id_]] = -1 self._topo_vect[self.line_ex_pos_topo_vect[id_]] = -1 self.line_status[id_] = False def _reconnect_line(self, id_): if id_ < self._number_true_line: - self._grid.line["in_service"].iloc[id_] = True + self._grid.line.iloc[id_, self._in_service_line_col_id] = True else: - self._grid.trafo["in_service"].iloc[id_ - self._number_true_line] = True + self._grid.trafo.iloc[id_ - self._number_true_line, self._in_service_trafo_col_id] = True self.line_status[id_] = True def get_topo_vect(self) -> np.ndarray: return self._topo_vect def _get_topo_vect(self): - res = np.full(self.dim_topo, fill_value=np.iinfo(dt_int).max, dtype=dt_int) + cls = type(self) + res = np.full(cls.dim_topo, fill_value=np.iinfo(dt_int).max, dtype=dt_int) + # lines / trafo line_status = self.get_line_status() - - i = 0 - for row in self._grid.line[["from_bus", "to_bus"]].values: - bus_or_id = row[0] - bus_ex_id = row[1] - if line_status[i]: - res[self.line_or_pos_topo_vect[i]] = ( - 1 if bus_or_id == self.line_or_to_subid[i] else 2 - ) - res[self.line_ex_pos_topo_vect[i]] = ( - 1 if bus_ex_id == self.line_ex_to_subid[i] else 2 - ) - else: - res[self.line_or_pos_topo_vect[i]] = -1 - res[self.line_ex_pos_topo_vect[i]] = -1 - i += 1 - - nb = self._number_true_line - i = 0 - for row in self._grid.trafo[["hv_bus", "lv_bus"]].values: - bus_or_id = row[0] - bus_ex_id = row[1] - - j = i + nb - if line_status[j]: - res[self.line_or_pos_topo_vect[j]] = ( - 1 if bus_or_id == self.line_or_to_subid[j] else 2 - ) - res[self.line_ex_pos_topo_vect[j]] = ( - 1 if bus_ex_id == self.line_ex_to_subid[j] else 2 - ) - else: - res[self.line_or_pos_topo_vect[j]] = -1 - res[self.line_ex_pos_topo_vect[j]] = -1 - i += 1 - - i = 0 - for bus_id in self._grid.gen["bus"].values: - res[self.gen_pos_topo_vect[i]] = 1 if bus_id == self.gen_to_subid[i] else 2 - i += 1 - - i = 0 - for bus_id in self._grid.load["bus"].values: - res[self.load_pos_topo_vect[i]] = ( - 1 if bus_id == self.load_to_subid[i] else 2 - ) - i += 1 - - if self.n_storage: - # storage can be deactivated by the environment for backward compatibility - i = 0 - for bus_id in self._grid.storage["bus"].values: - status = self._grid.storage["in_service"].values[i] - if status: - res[self.storage_pos_topo_vect[i]] = ( - 1 if bus_id == self.storage_to_subid[i] else 2 - ) - else: - res[self.storage_pos_topo_vect[i]] = -1 - i += 1 - + glob_bus_or = np.concatenate((self._grid.line["from_bus"].values, self._grid.trafo["hv_bus"].values)) + res[cls.line_or_pos_topo_vect] = cls.global_bus_to_local(glob_bus_or, cls.line_or_to_subid) + res[cls.line_or_pos_topo_vect[~line_status]] = -1 + glob_bus_ex = np.concatenate((self._grid.line["to_bus"].values, self._grid.trafo["lv_bus"].values)) + res[cls.line_ex_pos_topo_vect] = cls.global_bus_to_local(glob_bus_ex, cls.line_ex_to_subid) + res[cls.line_ex_pos_topo_vect[~line_status]] = -1 + # load, gen + load_status = self._grid.load["in_service"].values + res[cls.load_pos_topo_vect] = cls.global_bus_to_local(self._grid.load["bus"].values, cls.load_to_subid) + res[cls.load_pos_topo_vect[~load_status]] = -1 + gen_status = self._grid.gen["in_service"].values + res[cls.gen_pos_topo_vect] = cls.global_bus_to_local(self._grid.gen["bus"].values, cls.gen_to_subid) + res[cls.gen_pos_topo_vect[~gen_status]] = -1 + # storage + if cls.n_storage: + storage_status = self._grid.storage["in_service"].values + res[cls.storage_pos_topo_vect] = cls.global_bus_to_local(self._grid.storage["bus"].values, cls.storage_to_subid) + res[cls.storage_pos_topo_vect[~storage_status]] = -1 return res def _gens_info(self): diff --git a/grid2op/Chronics/GSFFWFWM.py b/grid2op/Chronics/GSFFWFWM.py index fc09e16e3..8ab2c1f22 100644 --- a/grid2op/Chronics/GSFFWFWM.py +++ b/grid2op/Chronics/GSFFWFWM.py @@ -108,6 +108,14 @@ def initialize( self.max_daily_number_per_month_maintenance = dict_[ "max_daily_number_per_month_maintenance" ] + + if "maintenance_day_of_week" in dict_: + self.maintenance_day_of_week = [int(el) for el in dict_[ + "maintenance_day_of_week" + ]] + else: + self.maintenance_day_of_week = np.arange(5) + super().initialize( order_backend_loads, order_backend_prods, @@ -133,7 +141,6 @@ def _sample_maintenance(self): ######## # new method to introduce generated maintenance self.maintenance = self._generate_maintenance() # - ########## # same as before in GridStateFromFileWithForecasts GridStateFromFileWithForecastsWithMaintenance._fix_maintenance_format(self) @@ -157,7 +164,7 @@ def _fix_maintenance_format(obj_with_maintenance): ) # there are _maintenance and hazards only if the value in the file is not 0. - obj_with_maintenance.maintenance = obj_with_maintenance.maintenance != 0.0 + obj_with_maintenance.maintenance = np.abs(obj_with_maintenance.maintenance) >= 1e-7 obj_with_maintenance.maintenance = obj_with_maintenance.maintenance.astype(dt_bool) @staticmethod @@ -171,7 +178,12 @@ def _generate_matenance_static(name_line, daily_proba_per_month_maintenance, max_daily_number_per_month_maintenance, space_prng, + maintenance_day_of_week=None ): + if maintenance_day_of_week is None: + # new in grid2op 1.10.3 + maintenance_day_of_week = np.arange(5) + # define maintenance dataframe with size (nbtimesteps,nlines) columnsNames = name_line nbTimesteps = n_ @@ -203,8 +215,6 @@ def _generate_matenance_static(name_line, datelist = datelist[:-1] n_lines_maintenance = len(line_to_maintenance) - - _24_h = timedelta(seconds=86400) nb_rows = int(86400 / time_interval.total_seconds()) selected_rows_beg = int( maintenance_starting_hour * 3600 / time_interval.total_seconds() @@ -220,7 +230,7 @@ def _generate_matenance_static(name_line, maxDailyMaintenance = -1 for nb_day_since_beg, this_day in enumerate(datelist): dayOfWeek = this_day.weekday() - if dayOfWeek < 5: # only maintenance starting on working days + if dayOfWeek in maintenance_day_of_week: month = this_day.month maintenance_me = np.zeros((nb_rows, nb_line_maint)) @@ -251,7 +261,7 @@ def _generate_matenance_static(name_line, size=n_Generated_Maintenance - maxDailyMaintenance, ) are_lines_in_maintenance[ - np.where(are_lines_in_maintenance)[0][not_chosen] + (are_lines_in_maintenance).nonzero()[0][not_chosen] ] = False maintenance_me[ selected_rows_beg:selected_rows_end, are_lines_in_maintenance @@ -279,5 +289,9 @@ def _generate_maintenance(self): self.maintenance_ending_hour, self.daily_proba_per_month_maintenance, self.max_daily_number_per_month_maintenance, - self.space_prng + self.space_prng, + self.maintenance_day_of_week ) + + def regenerate_with_new_seed(self): + self._sample_maintenance() diff --git a/grid2op/Chronics/chronicsHandler.py b/grid2op/Chronics/chronicsHandler.py index a1b66bce7..9f04c8f92 100644 --- a/grid2op/Chronics/chronicsHandler.py +++ b/grid2op/Chronics/chronicsHandler.py @@ -11,6 +11,7 @@ import numpy as np from datetime import timedelta +from grid2op.Exceptions.envExceptions import EnvError from grid2op.dtypes import dt_int from grid2op.Exceptions import Grid2OpException, ChronicsError from grid2op.Space import RandomObject @@ -88,6 +89,21 @@ def __init__( ) from exc_ @property + def action_space(self): + return self._real_data.action_space + + @action_space.setter + def action_space(self, values): + try: + self._real_data.action_space = values + except EnvError as exc_: + raise EnvError("Impossible to set the action_space for this 'chronics_handler'. " + f"It appears they have already been set previously. Do you try to use " + "The same 'chronics_handler' for two different environment ? " + "If so, you probably should not. \n" + "If you deep copied a 'chronics_handler', you can call `cpy.cleanup_action_space()` " + "on the copy to solve this issue.") from exc_ + @property def kwargs(self): res = copy.deepcopy(self._kwargs) if self._real_data is not None: @@ -144,13 +160,21 @@ def get_name(self): """ return str(os.path.split(self.get_id())[-1]) - def set_max_iter(self, max_iter: int): + def _set_max_iter(self, max_iter: int): """ This function is used to set the maximum number of iterations possible before the chronics ends. You can reset this by setting it to `-1`. + .. danger:: + As for grid2op 1.10.3, due to the fix of a bug when + max_iter and fast_forward were used at the same time + you should not use this function anymore. + + Please use `env.set_max_iter()` instead of + `env.chronics_hander.set_max_iter()` + Parameters ---------- max_iter: ``int`` @@ -159,9 +183,9 @@ def set_max_iter(self, max_iter: int): """ - if not isinstance(max_iter, int): + if not isinstance(max_iter, (int, dt_int, np.int64)): raise Grid2OpException( - "The maximum number of iterations possible for this chronics, before it ends." + "The maximum number of iterations possible for this time series, before it ends should be an int" ) if max_iter == 0: raise Grid2OpException( @@ -206,3 +230,12 @@ def __getattr__(self, name): # https://github.com/matplotlib/matplotlib/issues/7852/ return object.__getattr__(self, name) return getattr(self._real_data, name) + + def cleanup_action_space(self): + """INTERNAL, used to forget the "old" action_space when the + chronics_handler is copied for example. + """ + if self._real_data is None: + return + self._real_data.cleanup_action_space() + \ No newline at end of file diff --git a/grid2op/Chronics/fromChronix2grid.py b/grid2op/Chronics/fromChronix2grid.py index 2831f8d9d..9c6843404 100644 --- a/grid2op/Chronics/fromChronix2grid.py +++ b/grid2op/Chronics/fromChronix2grid.py @@ -309,4 +309,12 @@ def next_chronics(self): GridStateFromFileWithForecastsWithMaintenance._fix_maintenance_format(self) self.check_validity(backend=None) + + def regenerate_with_new_seed(self): + raise ChronicsError("You should not 'cache' the data coming from the " + "`FromChronix2grid`, which is probably why you ended " + "up calling this function. If you want to generate data " + "'on the fly' please do not use the `MultiFolder` or " + "`MultiFolderWithCache` `chronics_class` when making your " + "environment.") \ No newline at end of file diff --git a/grid2op/Chronics/fromMultiEpisodeData.py b/grid2op/Chronics/fromMultiEpisodeData.py index 309f990e6..d7f77d227 100644 --- a/grid2op/Chronics/fromMultiEpisodeData.py +++ b/grid2op/Chronics/fromMultiEpisodeData.py @@ -11,7 +11,7 @@ import numpy as np import copy import warnings -from typing import Optional, Union, List +from typing import Optional, Union, List, Dict, Literal from pathlib import Path from grid2op.Exceptions import ( @@ -30,7 +30,7 @@ class FromMultiEpisodeData(GridValue): It is an extension of the class :class:`FromOneEpisodeData` but with multiple episodes. .. seealso:: - :class:`grid2op.Chronics.FromOneEpisodeData`if you want to use only one episode + :class:`grid2op.Chronics.FromOneEpisodeData` if you want to use only one episode .. warning:: It has the same limitation as :class:`grid2op.Chronics.FromOneEpisodeData`, including: @@ -152,6 +152,9 @@ def initialize( names_chronics_to_backend=names_chronics_to_backend, ) self._episode_data = self.data._episode_data + if self.action_space is not None: + if self.data.action_space is None: + self.data.action_space = self.action_space def done(self): return self.data.done() @@ -186,3 +189,10 @@ def max_timestep(self): def fast_forward(self, nb_timestep): self.data.fast_forward(nb_timestep) + + def get_init_action(self, names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None) -> Union["grid2op.Action.playableAction.PlayableAction", None]: + return self.data.get_init_action(names_chronics_to_backend) + + def cleanup_action_space(self): + super().cleanup_action_space() + self.data.cleanup_action_space() diff --git a/grid2op/Chronics/fromNPY.py b/grid2op/Chronics/fromNPY.py index 983d10ebd..475f5aa7e 100644 --- a/grid2op/Chronics/fromNPY.py +++ b/grid2op/Chronics/fromNPY.py @@ -6,7 +6,7 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. -from typing import Optional, Union +from typing import Optional, Union, Dict, Literal import numpy as np import hashlib from datetime import datetime, timedelta @@ -68,12 +68,13 @@ class FromNPY(GridValue): "load_q": load_q, "prod_p": prod_p, "prod_v": prod_v - # other parameters includes + ## other parameters includes # maintenance # load_p_forecast # load_q_forecast # prod_p_forecast # prod_v_forecast + # init_state # new in 1.10.2 }) # you can use env normally, including in runners @@ -129,6 +130,7 @@ def __init__( chunk_size: Optional[int] = None, i_start: Optional[int] = None, i_end: Optional[int] = None, # excluded, as always in python + init_state: Optional["grid2op.Action.BaseAction"] = None, **kwargs ): GridValue.__init__( @@ -193,17 +195,6 @@ def __init__( "This feature is not available at the moment. Fill a github issue at " "https://github.com/rte-france/Grid2Op/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=" ) - # self.has_hazards = True - # if self.n_line is None: - # self.n_line = hazards.shape[1] - # else: - # assert self.n_line == hazards.shape[1] - # assert load_p.shape[0] == hazards.shape[0] - - # self.hazards = hazards # TODO copy ! - # self.hazard_duration = np.zeros(shape=(self.hazards.shape[0], self.n_line), dtype=dt_int) - # for line_id in range(self.n_line): - # self.hazard_duration[:, line_id] = self.get_hazard_duration_1d(self.hazards[:, line_id]) self._forecasts = None if load_p_forecast is not None: @@ -229,6 +220,9 @@ def __init__( raise ChronicsError( "if prod_p_forecast is not None, then load_p_forecast should not be None" ) + + self._init_state = init_state + self._max_iter = min(self._i_end - self._i_start, load_p.shape[0]) def initialize( self, @@ -259,6 +253,7 @@ def initialize( self.curr_iter = 0 self.current_index = self._i_start - 1 + self._max_iter = self._i_end - self._i_start def _get_long_hash(self, hash_: hashlib.blake2b = None): # get the "long hash" from blake2b @@ -427,6 +422,7 @@ def next_chronics(self): # update the forecast self._forecasts.next_chronics() self.check_validity(backend=None) + self._max_iter = self._i_end - self._i_start def done(self): """ @@ -655,6 +651,7 @@ def change_i_start(self, new_i_start: Union[int, None]): self.__new_istart = int(new_i_start) else: self.__new_istart = None + def change_i_end(self, new_i_end: Union[int, None]): """ @@ -700,3 +697,9 @@ def change_i_end(self, new_i_end: Union[int, None]): self.__new_iend = int(new_i_end) else: self.__new_iend = None + + def get_init_action(self, names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None) -> Union["grid2op.Action.playableAction.PlayableAction", None]: + # names_chronics_to_backend is ignored, names should be consistent between the environment + # and the initial state + + return self._init_state diff --git a/grid2op/Chronics/fromOneEpisodeData.py b/grid2op/Chronics/fromOneEpisodeData.py index 46e155a09..9dbe959ec 100644 --- a/grid2op/Chronics/fromOneEpisodeData.py +++ b/grid2op/Chronics/fromOneEpisodeData.py @@ -11,9 +11,10 @@ import numpy as np import copy import warnings -from typing import Union, Tuple +from typing import Union, Tuple, Optional, Dict, Literal from pathlib import Path +import grid2op from grid2op.Exceptions import ( ChronicsError, ChronicsNotFoundError ) @@ -25,6 +26,7 @@ TYPE_EP_DATA_INGESTED = Union[str, Path, EpisodeData, Tuple[str, str]] + class FromOneEpisodeData(GridValue): """This class allows to use the :class:`grid2op.Chronics.handlers.BaseHandler` to read back data stored in :class:`grid2op.Episode.EpisodeData` @@ -350,7 +352,6 @@ def get_id(self) -> str: else: # TODO EpisodeData.path !!! return "" - raise NotImplementedError() def shuffle(self, shuffler=None): # TODO @@ -423,3 +424,18 @@ def fast_forward(self, nb_timestep): self.load_next() # for this class I suppose the real data AND the forecast are read each step self.forecasts() + + def get_init_action(self, names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None) -> Union["grid2op.Action.playableAction.PlayableAction", None]: + # names_chronics_to_backend is ignored because it does not really make sense + # when read from the hard drive + obs = self._episode_data.observations[0] + dict_set = {"set_bus": obs.topo_vect} + if self.action_space.supports_type("redispatch"): + dict_set["redispatch"] = obs.target_dispatch + if self.action_space.supports_type("set_storage"): + dict_set["set_storage"] = obs.storage_power_target + if self.action_space.supports_type("curtail"): + dict_set["curtail"] = obs.curtailment_limit + dict_set["curtail"][~type(obs).gen_renewable] = -1 + # TODO shunts ! + return self.action_space(dict_set, check_legal=False) diff --git a/grid2op/Chronics/gridStateFromFile.py b/grid2op/Chronics/gridStateFromFile.py index 1cc53a725..4874a51a4 100644 --- a/grid2op/Chronics/gridStateFromFile.py +++ b/grid2op/Chronics/gridStateFromFile.py @@ -6,13 +6,17 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +import json import os import copy +from typing import Union, Optional, Dict, Literal import numpy as np import pandas as pd import warnings from datetime import datetime, timedelta +import grid2op +from grid2op.Exceptions import Grid2OpException from grid2op.dtypes import dt_int, dt_float, dt_bool from grid2op.Exceptions import ( IncorrectNumberOfElements, @@ -736,7 +740,7 @@ def _init_attrs( self.hazards[:, line_id] ) - self.hazards = self.hazards != 0.0 + self.hazards = np.abs(self.hazards) >= 1e-7 if maintenance is not None: self.maintenance = copy.deepcopy( maintenance.values[:, self._order_maintenance] @@ -759,7 +763,7 @@ def _init_attrs( ] = self.get_maintenance_duration_1d(self.maintenance[:, line_id]) # there are _maintenance and hazards only if the value in the file is not 0. - self.maintenance = self.maintenance != 0.0 + self.maintenance = np.abs(self.maintenance) >= 1e-7 self.maintenance = self.maintenance.astype(dt_bool) def done(self): @@ -1026,14 +1030,14 @@ def _convert_datetime(self, datetime_beg): if not isinstance(datetime_beg, datetime): try: res = datetime.strptime(datetime_beg, "%Y-%m-%d %H:%M") - except: + except Exception as exc_: try: res = datetime.strptime(datetime_beg, "%Y-%m-%d") - except: + except Exception as exc_2: raise ChronicsError( 'Impossible to convert "{}" to a valid datetime. Accepted format is ' '"%Y-%m-%d %H:%M"'.format(datetime_beg) - ) + ) from exc_2 return res def _extract_array(self, nm): @@ -1225,3 +1229,35 @@ def split_and_save(self, datetime_beg, datetime_end, path_out): ) with open(os.path.join(path_out, "time_interval.info"), "w") as f: f.write("{:%H:%M}\n".format(tmp_for_time_delta)) + + def get_init_action(self, names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None) -> Union["grid2op.Action.playableAction.PlayableAction", None]: + from grid2op.Action import BaseAction + maybe_path = os.path.join(self.path, "init_state.json") + if not os.path.exists(maybe_path): + return None + if self.action_space is None: + raise Grid2OpException(f"We detected an action to set the intial state of the grid " + f"but we cannot build it because the 'action_space' of the time" + f"serie is not set.") + try: + with open(maybe_path, "r", encoding="utf-8") as f: + maybe_act_dict = json.load(f) + except Exception as exc_: + raise Grid2OpException(f"Invalid action provided to initialize the powergrid (not readable by json)." + f"Check file located at {maybe_path}") from exc_ + + try: + act : BaseAction = self.action_space(maybe_act_dict, + _names_chronics_to_backend=names_chronics_to_backend, + check_legal=False) + except Grid2OpException as exc_: + raise Grid2OpException(f"Impossible to build the action to set the grid. Please fix the " + f"file located at {maybe_path}.") from exc_ + + # TODO check change bus, redispatching, change status etc. + # TODO basically anything that would be suspicious here + error, reason = act.is_ambiguous() + if error: + raise Grid2OpException(f"The action to set the grid to its original configuration " + f"is ambiguous. Please check {maybe_path}") from reason + return act diff --git a/grid2op/Chronics/gridValue.py b/grid2op/Chronics/gridValue.py index 00bc8af50..44cc2cb5c 100644 --- a/grid2op/Chronics/gridValue.py +++ b/grid2op/Chronics/gridValue.py @@ -5,14 +5,17 @@ # you can obtain one at http://mozilla.org/MPL/2.0/. # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + import numpy as np import warnings from datetime import datetime, timedelta from abc import ABC, abstractmethod +from typing import Union, Dict, Literal +import grid2op from grid2op.dtypes import dt_int from grid2op.Space import RandomObject -from grid2op.Exceptions import EnvError +from grid2op.Exceptions import EnvError, Grid2OpException # TODO sous echantillonner ou sur echantilloner les scenario: need to modify everything that affect the number # TODO of time steps there, for example "Space.gen_min_time_on" or "params.NB_TIMESTEP_POWERFLOW_ALLOWED" for @@ -106,6 +109,23 @@ def __init__( self.maintenance_time = None self.maintenance_duration = None self.hazard_duration = None + + # complete action space set by the environment + self.__action_space : Union["grid2op.Action.SerializableActionSpace", None] = None + + @property + def action_space(self)-> Union["grid2op.Action.SerializableActionSpace", None]: + return self.__action_space + + @action_space.setter + def action_space(self, values): + from grid2op.Action import SerializableActionSpace + if not isinstance(values, SerializableActionSpace): + raise EnvError(f"Impossible to set the action space with a value of type {type(values)}") + if self.__action_space is not None: + raise EnvError(f"Impossible to change the action space once initialized.") + # TODO maybe raise a warning if the underlying action class is not CompleteAction + self.__action_space = values def get_kwargs(self, dict_): """ @@ -115,7 +135,7 @@ def get_kwargs(self, dict_): pass @property - def max_iter(self): + def max_iter(self) -> int: return self._max_iter @max_iter.setter @@ -130,7 +150,7 @@ def initialize( order_backend_lines, order_backend_subs, names_chronics_to_backend, - ): + ) -> None: """ This function is used to initialize the data generator. It can be use to load scenarios, or to initialize noise if scenarios are generated on the fly. It must also @@ -288,8 +308,8 @@ def get_maintenance_time_1d(maintenance): a = np.diff(maintenance) # +1 is because numpy does the diff `t+1` - `t` so to get index of the initial array # I need to "+1" - start = np.where(a == 1)[0] + 1 # start of maintenance - end = np.where(a == -1)[0] + 1 # end of maintenance + start = (a == 1).nonzero()[0] + 1 # start of maintenance + end = (a == -1).nonzero()[0] + 1 # end of maintenance prev_ = 0 # it's efficient here as i do a loop only on the number of time there is a maintenance # and maintenance are quite rare @@ -362,8 +382,8 @@ def get_maintenance_duration_1d(maintenance): a = np.diff(maintenance) # +1 is because numpy does the diff `t+1` - `t` so to get index of the initial array # I need to "+1" - start = np.where(a == 1)[0] + 1 # start of maintenance - end = np.where(a == -1)[0] + 1 # end of maintenance + start = (a == 1).nonzero()[0] + 1 # start of maintenance + end = (a == -1).nonzero()[0] + 1 # end of maintenance prev_ = 0 # it's efficient here as i do a loop only on the number of time there is a maintenance # and maintenance are quite rare @@ -440,8 +460,8 @@ def get_hazard_duration_1d(hazard): a = np.diff(hazard) # +1 is because numpy does the diff `t+1` - `t` so to get index of the initial array # I need to "+1" - start = np.where(a == 1)[0] + 1 # start of maintenance - end = np.where(a == -1)[0] + 1 # end of maintenance + start = (a == 1).nonzero()[0] + 1 # start of maintenance + end = (a == -1).nonzero()[0] + 1 # end of maintenance prev_ = 0 # it's efficient here as i do a loop only on the number of time there is a maintenance # and maintenance are quite rare @@ -800,3 +820,57 @@ def fast_forward(self, nb_timestep): """ for _ in range(nb_timestep): self.load_next() + + def get_init_action(self, names_chronics_to_backend: Dict[Literal["loads", "prods", "lines"], Dict[str, str]]) -> Union["grid2op.Action.playableAction.PlayableAction", None]: + """ + It is used when the environment is reset (*ie* when :func:`grid2op.Environment.Environment.reset` is called) + to set the grid in its "original" state. + + .. versionadded 1.10.2 + + Before grid2op 1.10.2 the original state is necessarily "everything connected together". + + For later version, we let the possibility to set, in the "time series folder" (or time series generators) + the possibility to change the initial condition of the grid. + + Notes + ----- + If the environment parameters :attr:`grid2op.Parameters.Parameters.IGNORE_INITIAL_STATE_TIME_SERIE` + is set to `True` (not its default value) then this is ignored. + + Returns + ------- + grid2op.Action.playableAction.PlayableAction + The desired intial configuration of the grid + """ + return None + + def cleanup_action_space(self): + """ + INTERNAL + + Used internally, do not overide + + It is for example used when making a deepcopy of a `chronics_handler` to make sure + the new copy references the proper action space of the new environment. + """ + self.__action_space = None + # NB the action space is not closed as it is NOT own by this class + + def regenerate_with_new_seed(self): + """ + INTERNAL this function is called by some classes (*eg* :class:`MultifolderWithCache`) + when a new seed has been set. + + For example, if you use some 'chronics' that generate part of them randomly (*eg* + :class:`GridStateFromFileWithForecastsWithMaintenance`) they need to be aware of this + so that a reset actually update the seeds. + + This is closely related to issue https://github.com/rte-france/Grid2Op/issues/616 + + .. danger:: + This function should be called only once (not 0, not twice) after a "seed" function has been set. + Otherwise results might not be fully reproducible. + + """ + pass diff --git a/grid2op/Chronics/handlers/__init__.py b/grid2op/Chronics/handlers/__init__.py index f665ea896..bbd51d7f6 100644 --- a/grid2op/Chronics/handlers/__init__.py +++ b/grid2op/Chronics/handlers/__init__.py @@ -16,6 +16,7 @@ "PerfectForecastHandler", "NoisyForecastHandler", "LoadQFromPHandler", + "JSONInitStateHandler" ] from .baseHandler import BaseHandler @@ -28,3 +29,4 @@ from .perfectForecastHandler import PerfectForecastHandler from .noisyForecastHandler import NoisyForecastHandler from .load_q_from_p_handler import LoadQFromPHandler +from .jsonInitStateHandler import JSONInitStateHandler diff --git a/grid2op/Chronics/handlers/baseHandler.py b/grid2op/Chronics/handlers/baseHandler.py index 6de083e78..329e06f79 100644 --- a/grid2op/Chronics/handlers/baseHandler.py +++ b/grid2op/Chronics/handlers/baseHandler.py @@ -9,7 +9,8 @@ import copy import os import numpy as np -from typing import Optional, Tuple +from typing import Optional, Tuple, Union +import grid2op from grid2op.Space import RandomObject from datetime import timedelta, datetime @@ -72,7 +73,7 @@ def __init__(self, array_name, max_iter=-1, h_forecast=(5, )): self.path : Optional[os.PathLike] = None self.max_episode_duration : Optional[int] = None - def set_max_iter(self, max_iter: Optional[int]) -> None: + def _set_max_iter(self, max_iter: Optional[int]) -> None: """ INTERNAL @@ -341,7 +342,7 @@ def load_next(self, dict_: dict) -> Optional[np.ndarray]: """ raise NotImplementedError() - def check_validity(self, backend): + def check_validity(self, backend) -> None: """ INTERNAL @@ -479,3 +480,28 @@ def next_chronics(self) -> None: end of each episode when the next episode is loaded. """ return None + + def get_init_dict_action(self) -> Union[dict, None]: + """ + INTERNAL + + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + This function is called by the :class:`grid2op.Chronics.FromHandlers` only for the handlers responsible + for setting the "initial state" of the grid, for example :class:`JSONInitStateHandler`. + + If overidden, it is expected to return a dictionnary which can be converted to an action with an + action space. + """ + raise NotImplementedError() + + def regenerate_with_new_seed(self): + """This function is called in case of data being "cached" (for example using the + :class:`grid2op.Chronics.MultifolderWithCache`) + + In this case, the data in cache needs to be updated if the seed has changed since + the time they have been added to it. + + If your handler has some random part, we recommend you to implement this function. + Otherwise feel free to ignore it""" + pass diff --git a/grid2op/Chronics/handlers/csvForecastHandler.py b/grid2op/Chronics/handlers/csvForecastHandler.py index a8c8285d7..cf08a0eaa 100644 --- a/grid2op/Chronics/handlers/csvForecastHandler.py +++ b/grid2op/Chronics/handlers/csvForecastHandler.py @@ -64,6 +64,8 @@ class CSVForecastHandler(CSVHandler): not for maintenance (in this case use :class:`CSVMaintenanceHandler`) nor for environment data (in this case use :class:`CSVHandler`) + nor for setting the initial state state (in this case use + :class:`JSONInitStateHandler`) This is the default way to provide data to grid2op and its used for most l2rpn environments when forecasts are available. @@ -91,8 +93,8 @@ def load_next(self, dict_): def set_chunk_size(self, chunk_size): super().set_chunk_size(self._nb_row_per_step * int(chunk_size)) - def set_max_iter(self, max_iter): - super().set_max_iter(self._nb_row_per_step * int(max_iter)) + def _set_max_iter(self, max_iter): + super()._set_max_iter(self._nb_row_per_step * int(max_iter)) def set_h_forecast(self, h_forecast): super().set_h_forecast(h_forecast) diff --git a/grid2op/Chronics/handlers/csvHandler.py b/grid2op/Chronics/handlers/csvHandler.py index b1ef18765..ae16f8e89 100644 --- a/grid2op/Chronics/handlers/csvHandler.py +++ b/grid2op/Chronics/handlers/csvHandler.py @@ -52,6 +52,8 @@ class CSVHandler(BaseHandler): "prod_p" or "prod_v") and not for maintenance (in this case use :class:`CSVMaintenanceHandler`) nor for forecast (in this case use :class:`CSVForecastHandler`) + nor for setting the initial state state (in this case use + :class:`JSONInitStateHandler`) This is the default way to provide data to grid2op and its used for most l2rpn environments. diff --git a/grid2op/Chronics/handlers/csvMaintenanceHandler.py b/grid2op/Chronics/handlers/csvMaintenanceHandler.py index 19d45727e..6efc3eea4 100644 --- a/grid2op/Chronics/handlers/csvMaintenanceHandler.py +++ b/grid2op/Chronics/handlers/csvMaintenanceHandler.py @@ -41,10 +41,11 @@ class CSVMaintenanceHandler(CSVHandler): no string etc. .. warning:: - Use this class only for the ENVIRONMENT data ("load_p", "load_q", - "prod_p" or "prod_v") and not for maintenance (in this case - use :class:`CSVMaintenanceHandler`) nor for + Use this class only for the MAINTENANCE and not for environment + data ("load_p", "load_q", "prod_p" or "prod_v") nor for forecast (in this case use :class:`CSVForecastHandler`) + nor for setting the initial state state (in this case use + :class:`JSONInitStateHandler`) This is the default way to provide data to grid2op and its used for most l2rpn environments. @@ -79,7 +80,7 @@ def _init_attrs(self, array): ] = GridValue.get_maintenance_duration_1d(self.array[:, line_id]) # there are _maintenance and hazards only if the value in the file is not 0. - self.array = self.array != 0.0 + self.array = np.abs(self.array) >= 1e-7 self.array = self.array.astype(dt_bool) def load_next_maintenance(self) -> Tuple[np.ndarray, np.ndarray]: diff --git a/grid2op/Chronics/handlers/do_nothing_handler.py b/grid2op/Chronics/handlers/do_nothing_handler.py index bcfa98bdc..bd21c8ef0 100644 --- a/grid2op/Chronics/handlers/do_nothing_handler.py +++ b/grid2op/Chronics/handlers/do_nothing_handler.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +from typing import Union from grid2op.Chronics.handlers.baseHandler import BaseHandler @@ -50,4 +51,6 @@ def load_next_maintenance(self): def load_next_hazard(self): return None - \ No newline at end of file + + def get_init_dict_action(self) -> Union[dict, None]: + return None \ No newline at end of file diff --git a/grid2op/Chronics/handlers/jsonInitStateHandler.py b/grid2op/Chronics/handlers/jsonInitStateHandler.py new file mode 100644 index 000000000..b7740746b --- /dev/null +++ b/grid2op/Chronics/handlers/jsonInitStateHandler.py @@ -0,0 +1,52 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +from typing import Union +import json +import os + +import grid2op +from grid2op.Exceptions import Grid2OpException +from grid2op.Chronics.handlers.baseHandler import BaseHandler + + +class JSONInitStateHandler(BaseHandler): + """Base class to initialize the grid state using a method in the time series. + + .. versionadded:: 1.10.2 + + This class will look for a file named "init_state.json" (located at `self.path`) which should be a valid + json file (you can load it with the `json` module) representing a valid + action on the grid. + + This action should preferably be using only the `set` (*eg* `set_bus` and `set_status`) + keyword arguments and set only the topology of the grid (and not the injection or + the redispatching.) + + If no "init_state.json" file is found, then nothing is done. + + """ + + def check_validity(self, backend) -> None: + """This type of handler is always valid.""" + pass + + def done(self) -> bool: + return False + + def get_init_dict_action(self) -> Union[dict, None]: + maybe_path = os.path.join(self.path, "init_state.json") + if not os.path.exists(maybe_path): + return None + try: + with open(maybe_path, "r", encoding="utf-8") as f: + maybe_act_dict = json.load(f) + except Exception as exc_: + raise Grid2OpException(f"Invalid action provided to initialize the powergrid (not readable by json)." + f"Check file located at {maybe_path}") from exc_ + return maybe_act_dict diff --git a/grid2op/Chronics/handlers/jsonMaintenanceHandler.py b/grid2op/Chronics/handlers/jsonMaintenanceHandler.py index 779c877ba..3b891ab21 100644 --- a/grid2op/Chronics/handlers/jsonMaintenanceHandler.py +++ b/grid2op/Chronics/handlers/jsonMaintenanceHandler.py @@ -40,6 +40,13 @@ class JSONMaintenanceHandler(BaseHandler): - "max_daily_number_per_month_maintenance": maximum number of powerlines allowed in maintenance at the same time. + .. warning:: + Use this class only for the MAINTENANCE and not for environment + data ("load_p", "load_q", "prod_p" or "prod_v") nor for + forecast (in this case use :class:`CSVForecastHandler`) + nor for setting the initial state state (in this case use + :class:`JSONInitStateHandler`) + """ def __init__(self, array_name="maintenance", @@ -56,7 +63,8 @@ def __init__(self, self.n_line = None # used in one of the GridStateFromFileWithForecastsWithMaintenance functions self._duration_episode_default = _duration_episode_default self.current_index = 0 - + self._order_backend_arrays = None + def get_maintenance_time_1d(self, maintenance): return GridValue.get_maintenance_time_1d(maintenance) @@ -75,7 +83,8 @@ def _create_maintenance_arrays(self, current_datetime): self.dict_meta_data["maintenance_ending_hour"], self.dict_meta_data["daily_proba_per_month_maintenance"], self.dict_meta_data["max_daily_number_per_month_maintenance"], - self.space_prng + self.space_prng, + self.dict_meta_data["maintenance_day_of_week"] if "maintenance_day_of_week" in self.dict_meta_data else None ) GridStateFromFileWithForecastsWithMaintenance._fix_maintenance_format(self) @@ -121,4 +130,8 @@ def _clear(self): def done(self): # maintenance can be generated on the fly so they are never "done" - return False \ No newline at end of file + return False + + def regenerate_with_new_seed(self): + if self.dict_meta_data is not None: + self._create_maintenance_arrays(self.init_datetime) diff --git a/grid2op/Chronics/handlers/noisyForecastHandler.py b/grid2op/Chronics/handlers/noisyForecastHandler.py index e047c9271..8fb4cc763 100644 --- a/grid2op/Chronics/handlers/noisyForecastHandler.py +++ b/grid2op/Chronics/handlers/noisyForecastHandler.py @@ -212,3 +212,7 @@ def forecast(self, res *= self._env_loss_ratio(inj_dict_env) # TODO ramps, pmin, pmax ! return res.astype(dt_float) if res is not None else None + + def regenerate_with_new_seed(self): + # there is nothing to do for this handler as things are generated "on the fly" + pass \ No newline at end of file diff --git a/grid2op/Chronics/multiFolder.py b/grid2op/Chronics/multiFolder.py index f948f94ac..e8b8c9b48 100644 --- a/grid2op/Chronics/multiFolder.py +++ b/grid2op/Chronics/multiFolder.py @@ -8,10 +8,12 @@ import os import json +from typing import Union, Optional, Dict, Literal import warnings import numpy as np from datetime import timedelta, datetime +import grid2op from grid2op.dtypes import dt_int, dt_float from grid2op.Exceptions import ChronicsNotFoundError, ChronicsError from grid2op.Chronics.gridValue import GridValue @@ -352,7 +354,7 @@ def sample_next_chronics(self, probabilities=None): probabilities /= sum_prob # take one at "random" among these selected = self.space_prng.choice(self._order, p=probabilities) - id_sel = np.where(self._order == selected)[0] + id_sel = (self._order == selected).nonzero()[0] self._prev_cache_id = selected - 1 return id_sel @@ -392,6 +394,17 @@ def reset(self): self._order = np.array(self._order) return self.subpaths[self._order] + def _get_nex_data(self, this_path): + res = self.gridvalueClass( + time_interval=self.time_interval, + sep=self.sep, + path=this_path, + max_iter=self.max_iter, + chunk_size=self.chunk_size, + **self._kwargs + ) + return res + def initialize( self, order_backend_loads, @@ -417,14 +430,7 @@ def initialize( id_scenario = self._order[self._prev_cache_id] this_path = self.subpaths[id_scenario] - self.data = self.gridvalueClass( - time_interval=self.time_interval, - sep=self.sep, - path=this_path, - max_iter=self.max_iter, - chunk_size=self.chunk_size, - **self._kwargs - ) + self.data = self._get_nex_data(this_path) if self.seed is not None: max_int = np.iinfo(dt_int).max seed_chronics = self.space_prng.randint(max_int) @@ -437,6 +443,9 @@ def initialize( order_backend_subs, names_chronics_to_backend=names_chronics_to_backend, ) + if self.action_space is not None: + self.data.action_space = self.action_space + self._max_iter = self.data.max_iter def done(self): """ @@ -777,3 +786,12 @@ def split_and_save(self, datetime_beg, datetime_end, path_out): def fast_forward(self, nb_timestep): self.data.fast_forward(nb_timestep) + + def get_init_action(self, names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None) -> Union["grid2op.Action.playableAction.PlayableAction", None]: + return self.data.get_init_action(names_chronics_to_backend) + + def cleanup_action_space(self): + super().cleanup_action_space() + if self.data is None: + return + self.data.cleanup_action_space() diff --git a/grid2op/Chronics/multifolderWithCache.py b/grid2op/Chronics/multifolderWithCache.py index 02b0c5e46..436842841 100644 --- a/grid2op/Chronics/multifolderWithCache.py +++ b/grid2op/Chronics/multifolderWithCache.py @@ -7,10 +7,12 @@ # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. import numpy as np from datetime import timedelta, datetime +import warnings from grid2op.dtypes import dt_int from grid2op.Chronics.multiFolder import Multifolder from grid2op.Chronics.gridStateFromFile import GridStateFromFile +from grid2op.Chronics.time_series_from_handlers import FromHandlers from grid2op.Exceptions import ChronicsError @@ -70,7 +72,7 @@ class MultifolderWithCache(Multifolder): env = make(...,chronics_class=MultifolderWithCache) # set the chronics to limit to one week of data (lower memory footprint) - env.chronics_handler.set_max_iter(7*288) + env.set_max_iter(7*288) # assign a filter, use only chronics that have "december" in their name env.chronics_handler.real_data.set_filter(lambda x: re.match(".*december.*", x) is not None) # create the cache @@ -140,12 +142,18 @@ def __init__( ) self._cached_data = None self.cache_size = 0 - if not issubclass(self.gridvalueClass, GridStateFromFile): + if not (issubclass(self.gridvalueClass, GridStateFromFile) or + issubclass(self.gridvalueClass, FromHandlers)): raise RuntimeError( 'MultifolderWithCache does not work when "gridvalueClass" does not inherit from ' '"GridStateFromFile".' ) + if issubclass(self.gridvalueClass, FromHandlers): + warnings.warn("You use caching with handler data. This is possible but " + "might be a bit risky especially if your handlers are " + "heavily 'random' and you want fully reproducible results.") self.__i = 0 + self._cached_seeds = None def _default_filter(self, x): """ @@ -162,6 +170,11 @@ def reset(self): Rebuilt the cache as if it were built from scratch. This call might take a while to process. + This means that current data in cache will be discarded and that new data will + most likely be read from the hard drive. + + This might take a while. + .. danger:: You NEED to call this function (with `env.chronics_handler.reset()`) if you use the `MultiFolderWithCache` class in your experiments. @@ -180,16 +193,10 @@ def reset(self): for i in self._order: # everything in "_order" need to be put in cache path = self.subpaths[i] - data = self.gridvalueClass( - time_interval=self.time_interval, - sep=self.sep, - path=path, - max_iter=self.max_iter, - chunk_size=None, - ) - if self.seed_used is not None: - seed_chronics = self.space_prng.randint(max_int) - data.seed(seed_chronics) + data = self._get_nex_data(path) + + if self._cached_seeds is not None: + data.seed(self._cached_seeds[i]) data.initialize( self._order_backend_loads, @@ -198,8 +205,14 @@ def reset(self): self._order_backend_subs, self._names_chronics_to_backend, ) + + if self._cached_seeds is not None: + data.regenerate_with_new_seed() + self._cached_data[i] = data self.cache_size += 1 + if self.action_space is not None: + data.action_space = self.action_space if self.cache_size == 0: raise RuntimeError("Impossible to initialize the new cache.") @@ -231,12 +244,16 @@ def initialize( self.n_load = len(order_backend_loads) self.n_line = len(order_backend_lines) if self._cached_data is None: - # initialize the cache + # initialize the cache of this MultiFolder self.reset() id_scenario = self._order[self._prev_cache_id] self.data = self._cached_data[id_scenario] self.data.next_chronics() + if self.seed_used is not None and self.data.seed_used != self._cached_seeds[id_scenario]: + self.data.seed(self._cached_seeds[id_scenario]) + self.data.regenerate_with_new_seed() + self._max_iter = self.data.max_iter @property def max_iter(self): @@ -258,6 +275,15 @@ def seed(self, seed : int): (which has an impact for example on :func:`MultiFolder.sample_next_chronics`) and each data present in the cache. + .. warning:: + Before grid2op version 1.10.3 this function did not fully ensured + reproducible experiments (the cache was not update with the new seed) + + For grid2op 1.10.3 and after, this function might trigger some modification + in the cached data (calling :func:`GridValue.seed` and then + :func:`GridValue.regenerate_with_new_seed`). It might take a while if the cache + is large. + Parameters ---------- seed : int @@ -265,12 +291,15 @@ def seed(self, seed : int): """ res = super().seed(seed) max_int = np.iinfo(dt_int).max + self._cached_seeds = np.empty(shape=self._order.shape, dtype=dt_int) for i in self._order: data = self._cached_data[i] + seed_ts = self.space_prng.randint(max_int) + self._cached_seeds[i] = seed_ts if data is None: continue - seed_ts = self.space_prng.randint(max_int) data.seed(seed_ts) + data.regenerate_with_new_seed() return res def load_next(self): @@ -282,9 +311,66 @@ def load_next(self): return super().load_next() def set_filter(self, filter_fun): + """ + Assign a filtering function to remove some chronics from the next time a call to "reset_cache" is called. + + **NB** filter_fun is applied to all element of :attr:`Multifolder.subpaths`. If ``True`` then it will + be put in cache, if ``False`` this data will NOT be put in the cache. + + **NB** this has no effect until :attr:`Multifolder.reset` is called. + + + .. danger:: + Calling this function cancels the previous seed used. If you use `env.seed` + or `env.chronics_handler.seed` before then you need to + call it again after otherwise it has no effect. + + Parameters + ---------- + filter_fun : _type_ + _description_ + + Examples + -------- + Let's assume in your chronics, the folder names are "Scenario_august_dummy", and + "Scenario_february_dummy". For the sake of the example, we want the environment to loop + only through the month of february, because why not. Then we can do the following: + + .. code-block:: python + + import re + import grid2op + env = grid2op.make("l2rpn_neurips_2020_track1", test=True) # don't add "test=True" if + # you don't want to perform a test. + + # check at which month will belong each observation + for i in range(10): + obs = env.reset() + print(obs.month) + # it always alternatively prints "8" (if chronics if from august) or + # "2" if chronics is from february) + + # to see where the chronics are located + print(env.chronics_handler.subpaths) + + # keep only the month of february + env.chronics_handler.set_filter(lambda path: re.match(".*february.*", path) is not None) + env.chronics_handler.reset() # if you don't do that it will not have any effect + + for i in range(10): + obs = env.reset() + print(obs.month) + # it always prints "2" (representing february) + + Returns + ------- + _type_ + _description_ + """ self.__nb_reset_called = 0 self.__nb_step_called = 0 self.__nb_init_called = 0 + self._cached_seeds = None return super().set_filter(filter_fun) def get_kwargs(self, dict_): @@ -292,3 +378,10 @@ def get_kwargs(self, dict_): dict_["_DONTUSE_nb_step_called"] = self.__nb_step_called dict_["_DONTUSE_nb_init_called"] = self.__nb_init_called return super().get_kwargs(dict_) + + def cleanup_action_space(self): + super().cleanup_action_space() + for el in self._cached_data: + if el is None: + continue + el.cleanup_action_space() diff --git a/grid2op/Chronics/readPypowNetData.py b/grid2op/Chronics/readPypowNetData.py index de5589f7a..68ca46db0 100644 --- a/grid2op/Chronics/readPypowNetData.py +++ b/grid2op/Chronics/readPypowNetData.py @@ -191,8 +191,8 @@ def initialize( self.start_datetime = datetime.strptime(datetimes_.iloc[0, 0], "%Y-%b-%d") # there are maintenance and hazards only if the value in the file is not 0. - self.maintenance = self.maintenance != 0.0 - self.hazards = self.hazards != 0.0 + self.maintenance = np.abs(self.maintenance) >= 1e-7 + self.hazards = np.abs(self.hazards) >= 1e-7 self.curr_iter = 0 if self.max_iter == -1: @@ -294,9 +294,8 @@ def initialize( self.hazard_duration[:, line_id] = self.get_maintenance_duration_1d( self.hazards[:, line_id] ) - - self.maintenance_forecast = self.maintenance != 0.0 - + self.maintenance_forecast = np.abs(self.maintenance) >= 1e-7 + self.curr_iter = 0 if self.maintenance is not None: n_ = self.maintenance.shape[0] diff --git a/grid2op/Chronics/time_series_from_handlers.py b/grid2op/Chronics/time_series_from_handlers.py index 2d4510685..646cf3deb 100644 --- a/grid2op/Chronics/time_series_from_handlers.py +++ b/grid2op/Chronics/time_series_from_handlers.py @@ -10,8 +10,9 @@ import os import numpy as np import copy -from typing import Optional +from typing import Optional, Union, Dict, Literal +import grid2op from grid2op.Exceptions import ( ChronicsNotFoundError, HandlerError ) @@ -19,6 +20,7 @@ from grid2op.Chronics.gridValue import GridValue from grid2op.Chronics.handlers import BaseHandler +from grid2op.Exceptions.grid2OpException import Grid2OpException from grid2op.dtypes import dt_int, dt_float @@ -131,6 +133,7 @@ def __init__( load_q_for_handler=None, gen_p_for_handler=None, gen_v_for_handler=None, + init_state_handler=None, time_interval=timedelta(minutes=5), sep=";", # here for compatibility with grid2op, but not used max_iter=-1, @@ -161,6 +164,7 @@ def __init__( self.gen_v_for_handler : Optional[BaseHandler] = copy.deepcopy(gen_v_for_handler) self.load_p_for_handler : Optional[BaseHandler] = copy.deepcopy(load_p_for_handler) self.load_q_for_handler : Optional[BaseHandler] = copy.deepcopy(load_q_for_handler) + self.init_state_handler : Optional[BaseHandler] = copy.deepcopy(init_state_handler) # when there are no maintenance / hazards, build this only once self._no_mh_time = None @@ -185,6 +189,8 @@ def __init__( if self.load_q_for_handler is not None: self._active_handlers.append(self.load_q_for_handler) self._forcast_handlers.append(self.load_q_for_handler) + if self.init_state_handler is not None: + self._active_handlers.append(self.init_state_handler) self._check_types() # now synch all handlers @@ -198,7 +204,7 @@ def __init__( self.set_chunk_size(chunk_size) if max_iter != -1: - self.set_max_iter(max_iter) + self._set_max_iter(max_iter) self.init_datetime() self.current_inj = None @@ -383,10 +389,10 @@ def set_chunk_size(self, new_chunk_size): for el in self._active_handlers: el.set_chunk_size(new_chunk_size) - def set_max_iter(self, max_iter): + def _set_max_iter(self, max_iter): self.max_iter = int(max_iter) for el in self._active_handlers: - el.set_max_iter(max_iter) + el._set_max_iter(max_iter) def init_datetime(self): for handl in self._active_handlers: @@ -395,7 +401,7 @@ def init_datetime(self): def seed(self, seed): super().seed(seed) max_seed = np.iinfo(dt_int).max - seeds = self.space_prng.randint(max_seed, size=10) + seeds = self.space_prng.randint(max_seed, size=11) # this way of doing ensure the same seed given by the environment is # used even if some "handlers" are missing # (if env.seed(0) is called, then regardless of maintenance_handler or not, @@ -422,9 +428,12 @@ def seed(self, seed): gvf_seed = None if self.gen_v_for_handler is not None: gvf_seed = self.gen_v_for_handler.seed(seeds[9]) + init_state_seed = None + if self.init_state_handler is not None: + init_state_seed = self.init_state_handler.seed(seeds[10]) return (seed, gp_seed, gv_seed, lp_seed, lq_seed, maint_seed, haz_seed, gpf_seed, gvf_seed, - lpf_seed, lqf_seed) + lpf_seed, lqf_seed, init_state_seed) def _set_path(self, path): """tell the handler where this chronics is located""" @@ -521,3 +530,37 @@ def fast_forward(self, nb_timestep): self.load_next() # for this class I suppose the real data AND the forecast are read each step self.forecasts() + + def get_init_action(self, names_chronics_to_backend: Optional[Dict[Literal["loads", "prods", "lines"], Dict[str, str]]]=None) -> Union["grid2op.Action.playableAction.PlayableAction", None]: + from grid2op.Action import BaseAction + if self.init_state_handler is None: + return None + + act_as_dict = self.init_state_handler.get_init_dict_action() + if act_as_dict is None: + return None + + if self.action_space is None: + raise Grid2OpException(f"We detected an action to set the intial state of the grid " + f"but we cannot build it because the 'action_space' of the time" + f"serie is not set.") + + try: + act : BaseAction = self.action_space(act_as_dict, + check_legal=False, + _names_chronics_to_backend=names_chronics_to_backend) + except Grid2OpException as exc_: + raise Grid2OpException(f"Impossible to build the action to set the grid. Please fix the " + f"file located at {self.init_state_handler.path}.") from exc_ + + # TODO check change bus, redispatching, change status etc. + # TODO basically anything that would be suspicious here + error, reason = act.is_ambiguous() + if error: + raise Grid2OpException(f"The action to set the grid to its original configuration " + f"is ambiguous. Please check {self.init_state_handler.path}") from reason + return act + + def regenerate_with_new_seed(self): + for handl in self._active_handlers: + handl.regenerate_with_new_seed() diff --git a/grid2op/Converter/BackendConverter.py b/grid2op/Converter/BackendConverter.py index 44b381a23..4c023c85e 100644 --- a/grid2op/Converter/BackendConverter.py +++ b/grid2op/Converter/BackendConverter.py @@ -106,18 +106,24 @@ def __init__( difcf = detailed_infos_for_cascading_failures if kwargs_source_backend is None: kwargs_source_backend = {} - self.source_backend = source_backend_class( + + #: represents the backend used for the order / name of the elements + #: Agent will not see any difference between the converter and this backend + self.source_backend : Backend = source_backend_class( detailed_infos_for_cascading_failures=difcf, **kwargs_source_backend - ) # the one for the order of the elements + ) if kwargs_target_backend is None: kwargs_target_backend = {} - self.target_backend = target_backend_class( + + #: represents the backend used to compute the powerflows + self.target_backend : Backend = target_backend_class( detailed_infos_for_cascading_failures=difcf, **kwargs_target_backend ) # the one to computes powerflow - # if the target backend (the one performing the powerflows) needs a different file - self.target_backend_grid_path = target_backend_grid_path + + #: if the target backend (the one performing the powerflows) needs a different file + self.target_backend_grid_path :str = target_backend_grid_path # key: name in the source backend, value name in the target backend, for the substations self.sub_source_target = sub_source_target @@ -156,6 +162,14 @@ def __init__( # TODO storage check all this class ! + the doc of the backend def load_grid(self, path=None, filename=None): + # register the "n_busbar_per_sub" (set for the backend class) + # TODO in case source supports the "more than 2" feature but not target + # it's unclear how I can "reload" the grid... + from grid2op.Space import DEFAULT_N_BUSBAR_PER_SUB + type(self.source_backend).set_n_busbar_per_sub(DEFAULT_N_BUSBAR_PER_SUB) + type(self.target_backend).set_n_busbar_per_sub(DEFAULT_N_BUSBAR_PER_SUB) + self.cannot_handle_more_than_2_busbar() + self.source_backend.load_grid(path, filename) # and now i load the target backend if self.target_backend_grid_path is not None: @@ -163,7 +177,18 @@ def load_grid(self, path=None, filename=None): else: # both source and target backend understands the same format self.target_backend.load_grid(path, filename) - + + # TODO in case source supports the "more than 2" feature but not target + # it's unclear how I can "reload" the grid... + # if (not self.target_backend._missing_two_busbars_support_info and + # not self.source_backend._missing_two_busbars_support_info + # ): + # ??? + # else: + # # at least one backend cannot handle the number of busbars, so I deactivate it for all + # self.target_backend.cannot_handle_more_than_2_busbar() + # self.source_backend.cannot_handle_more_than_2_busbar() + def _assert_same_grid(self): """basic assertion that self and the target backend have the same grid but not necessarily the same object at the same place of course""" @@ -206,13 +231,13 @@ def _init_myself(self): == sorted(self.target_backend.name_sub) ): for id_source, nm_source in enumerate(self.source_backend.name_sub): - id_target = np.where(self.target_backend.name_sub == nm_source)[0] + id_target = (self.target_backend.name_sub == nm_source).nonzero()[0] self._sub_tg2sr[id_source] = id_target self._sub_sr2tg[id_target] = id_source else: for id_source, nm_source in enumerate(self.source_backend.name_sub): nm_target = self.sub_source_target[nm_source] - id_target = np.where(self.target_backend.name_sub == nm_target)[0] + id_target = (self.target_backend.name_sub == nm_target).nonzero()[0] self._sub_tg2sr[id_source] = id_target self._sub_sr2tg[id_target] = id_source @@ -300,7 +325,7 @@ def _init_myself(self): def _get_possible_target_ids(self, id_source, source_2_id_sub, target_2_id_sub, nm): id_sub_source = source_2_id_sub[id_source] id_sub_target = self._sub_tg2sr[id_sub_source] - ids_target = np.where(target_2_id_sub == id_sub_target)[0] + ids_target = (target_2_id_sub == id_sub_target).nonzero()[0] if ids_target.shape[0] == 0: raise RuntimeError( ERROR_ELEMENT_CONNECTED.format(nm, id_sub_target, id_sub_source) @@ -346,10 +371,10 @@ def _auto_fill_vect_powerline(self): idor_sub_target = self._sub_tg2sr[idor_sub_source] idex_sub_source = source_ex_2_id_sub[id_source] idex_sub_target = self._sub_tg2sr[idex_sub_source] - ids_target = np.where( + ids_target = ( (target_or_2_id_sub == idor_sub_target) & (target_ex_2_id_sub == idex_sub_target) - )[0] + ).nonzero()[0] if ids_target.shape[0] == 0: raise RuntimeError( ERROR_ELEMENT_CONNECTED.format( @@ -404,7 +429,7 @@ def _auto_fill_vect_topo_aux(self, n_elem, source_pos, target_pos, sr2tg): self._topo_tg2sr[source_pos[sr2tg]] = target_pos self._topo_sr2tg[target_pos] = source_pos[sr2tg] - def assert_grid_correct(self): + def assert_grid_correct(self, _local_dir_cls=None) -> None: # this is done before a call to this function, by the environment tg_cls = type(self.target_backend) sr_cls = type(self.source_backend) @@ -455,13 +480,13 @@ def assert_grid_correct(self): ) # init the target backend (the one that does the computation and that is initialized) - self.target_backend.assert_grid_correct() + self.target_backend.assert_grid_correct(_local_dir_cls=_local_dir_cls) # initialize the other one, because, well the grid should be seen from both backend self.source_backend._init_class_attr(obj=self) - self.source_backend.assert_grid_correct() + self.source_backend.assert_grid_correct(_local_dir_cls=_local_dir_cls) # and this should be called after all the rest - super().assert_grid_correct() + super().assert_grid_correct(_local_dir_cls=_local_dir_cls) # everything went well, so i can properly terminate my initialization self._init_myself() @@ -550,6 +575,12 @@ def assert_grid_correct_after_powerflow(self): super().assert_grid_correct_after_powerflow() self._sh_vnkv = self.target_backend._sh_vnkv + def _fill_names_obj(self): + self.target_backend._fill_names_obj() + self.source_backend._fill_names_obj() + for attr_nm in ["name_line", "name_gen", "name_load", "name_sub", "name_storage"]: + setattr(self, attr_nm, copy.deepcopy(getattr(self.source_backend, attr_nm))) + def reset(self, grid_path, grid_filename=None): """ Reload the power grid. diff --git a/grid2op/Converter/ConnectivityConverter.py b/grid2op/Converter/ConnectivityConverter.py index 5826c1bcc..5b971238b 100644 --- a/grid2op/Converter/ConnectivityConverter.py +++ b/grid2op/Converter/ConnectivityConverter.py @@ -188,11 +188,11 @@ def init_converter(self, all_actions=None, **kwargs): if nb_element < 4: continue - c_id = np.where(self.load_to_subid == sub_id)[0] - g_id = np.where(self.gen_to_subid == sub_id)[0] - lor_id = np.where(self.line_or_to_subid == sub_id)[0] - lex_id = np.where(self.line_ex_to_subid == sub_id)[0] - storage_id = np.where(self.storage_to_subid == sub_id)[0] + c_id = (self.load_to_subid == sub_id).nonzero()[0] + g_id = (self.gen_to_subid == sub_id).nonzero()[0] + lor_id = (self.line_or_to_subid == sub_id).nonzero()[0] + lex_id = (self.line_ex_to_subid == sub_id).nonzero()[0] + storage_id = (self.storage_to_subid == sub_id).nonzero()[0] c_pos = self.load_to_sub_pos[self.load_to_subid == sub_id] g_pos = self.gen_to_sub_pos[self.gen_to_subid == sub_id] @@ -380,20 +380,20 @@ def convert_act(self, encoded_act, explore=None): ) if ((encoded_act < -1.0) | (encoded_act > 1.0)).any(): errors = (encoded_act < -1.0) | (encoded_act > 1.0) - indexes = np.where(errors)[0] + indexes = (errors).nonzero()[0] raise RuntimeError( f'All elements of "encoded_act" must be in range [-1, 1]. Please check your ' f"encoded action at positions {indexes[:5]}... (only first 5 displayed)" ) - act_want_change = encoded_act != 0.0 + act_want_change = np.abs(encoded_act) >= 1e-7 encoded_act_filtered = encoded_act[act_want_change] if encoded_act_filtered.shape[0] == 0: # do nothing action in this case return super().__call__() argsort_changed = np.argsort(-np.abs(encoded_act_filtered)) - argsort = np.where(act_want_change)[0][argsort_changed] + argsort = (act_want_change).nonzero()[0][argsort_changed] act, disag = self._aux_act_from_order(argsort, encoded_act) self.indx_sel = 0 if explore is None: @@ -489,7 +489,7 @@ def _compute_disagreement(self, encoded_act, topo_vect): Lower disagreement is always better. """ - set_component = encoded_act != 0.0 + set_component = np.abs(encoded_act) >= 1e-7 bus_el1 = topo_vect[self.pos_topo[:, 0]] bus_el2 = topo_vect[self.pos_topo[:, 1]] # for the element that will connected diff --git a/grid2op/Converter/IdToAct.py b/grid2op/Converter/IdToAct.py index c1ffd241b..063b1f59d 100644 --- a/grid2op/Converter/IdToAct.py +++ b/grid2op/Converter/IdToAct.py @@ -26,7 +26,7 @@ class IdToAct(Converter): A "unary action" is an action that consists only in acting on one "concept" it includes: - disconnecting a single powerline - - reconnecting a single powerline and connect it to bus xxx on its origin end and yyy on its extremity end + - reconnecting a single powerline and connect it to bus xxx on its origin side and yyy on its extremity side - changing the topology of a single substation - performing redispatching on a single generator - performing curtailment on a single generator @@ -70,6 +70,7 @@ class IdToAct(Converter): def __init__(self, action_space): Converter.__init__(self, action_space) self.__class__ = IdToAct.init_grid(action_space) + self.init_action_space = action_space self.all_actions = [] # add the do nothing topology self.all_actions.append(super().__call__()) diff --git a/grid2op/Environment/__init__.py b/grid2op/Environment/__init__.py index 1375aad0a..a9a4197b3 100644 --- a/grid2op/Environment/__init__.py +++ b/grid2op/Environment/__init__.py @@ -5,7 +5,8 @@ "SingleEnvMultiProcess", "MultiEnvMultiProcess", "MultiMixEnvironment", - "TimedOutEnvironment" + "TimedOutEnvironment", + "MaskedEnvironment" ] from grid2op.Environment.baseEnv import BaseEnv @@ -15,3 +16,4 @@ from grid2op.Environment.multiEnvMultiProcess import MultiEnvMultiProcess from grid2op.Environment.multiMixEnv import MultiMixEnvironment from grid2op.Environment.timedOutEnv import TimedOutEnvironment +from grid2op.Environment.maskedEnvironment import MaskedEnvironment diff --git a/grid2op/Environment/_forecast_env.py b/grid2op/Environment/_forecast_env.py index ad08fc7df..7378df7c7 100644 --- a/grid2op/Environment/_forecast_env.py +++ b/grid2op/Environment/_forecast_env.py @@ -21,6 +21,7 @@ def __init__(self, *args, **kwargs): if "_update_obs_after_reward" not in kwargs: kwargs["_update_obs_after_reward"] = False super().__init__(*args, **kwargs) + self._do_not_erase_local_dir_cls = True def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]: self._highres_sim_counter += 1 diff --git a/grid2op/Environment/_obsEnv.py b/grid2op/Environment/_obsEnv.py index 347aaeaa9..172235eb7 100644 --- a/grid2op/Environment/_obsEnv.py +++ b/grid2op/Environment/_obsEnv.py @@ -9,8 +9,11 @@ import copy import numpy as np import warnings -from grid2op.Exceptions.envExceptions import EnvError +from typing import Dict, Union, Tuple, List, Optional, Any, Literal +import grid2op +from grid2op.Exceptions.envExceptions import EnvError +from grid2op.typing_variables import STEP_INFO_TYPING from grid2op.dtypes import dt_int, dt_float, dt_bool from grid2op.Environment.baseEnv import BaseEnv from grid2op.Chronics import ChangeNothing @@ -72,6 +75,8 @@ def __init__( highres_sim_counter=None, _complete_action_cls=None, _ptr_orig_obs_space=None, + _local_dir_cls=None, # only set at the first call to `make(...)` after should be false + _read_from_local_dir=None, ): BaseEnv.__init__( self, @@ -89,7 +94,10 @@ def __init__( logger=logger, highres_sim_counter=highres_sim_counter, update_obs_after_reward=False, + _local_dir_cls=_local_dir_cls, + _read_from_local_dir=_read_from_local_dir ) + self._do_not_erase_local_dir_cls = True self.__unusable = False # unsuable if backend cannot be copied self._reward_helper = reward_helper @@ -98,12 +106,13 @@ def __init__( # initialize the observation space self._obsClass = None - + + cls = type(self) # line status (inherited from BaseEnv) - self._line_status = np.full(self.n_line, dtype=dt_bool, fill_value=True) + self._line_status = np.full(cls.n_line, dtype=dt_bool, fill_value=True) # line status (for this usage) self._line_status_me = np.ones( - shape=self.n_line, dtype=dt_int + shape=cls.n_line, dtype=dt_int ) # this is "line status" but encode in +1 / -1 if self._thermal_limit_a is None: @@ -111,6 +120,8 @@ def __init__( else: self._thermal_limit_a[:] = thermal_limit_a + self.current_obs_init = None + self.current_obs = None self._init_backend( chronics_handler=_ObsCH(), backend=backend_instanciated, @@ -125,13 +136,13 @@ def __init__( #### # to be able to save and import (using env.generate_classes) correctly self._actionClass = action_helper.subtype - self._observationClass = _complete_action_cls # not used anyway self._complete_action_cls = _complete_action_cls self._action_space = ( action_helper # obs env and env share the same action space ) self._ptr_orig_obs_space = _ptr_orig_obs_space + #### self.no_overflow_disconnection = parameters.NO_OVERFLOW_DISCONNECTION @@ -175,6 +186,8 @@ def _init_backend( if backend is None: self.__unusable = True return + self._actionClass_orig = actionClass + self._observationClass_orig = observationClass self.__unusable = False self._env_dc = self.parameters.ENV_DC @@ -192,19 +205,22 @@ def _init_backend( from grid2op.Observation import ObservationSpace from grid2op.Reward import FlatReward - ob_sp_cls = ObservationSpace.init_grid(type(backend)) + ob_sp_cls = ObservationSpace.init_grid(type(backend), _local_dir_cls=self._local_dir_cls) self._observation_space = ob_sp_cls(type(backend), env=self, with_forecast=False, rewardClass=FlatReward, - _with_obs_env=False) + _with_obs_env=False, + _local_dir_cls=self._local_dir_cls + ) + self._observationClass = self._observation_space.subtype # not used anyway # create the opponent self._create_opponent() # create the attention budget self._create_attention_budget() - self._obsClass = observationClass.init_grid(type(self.backend)) + self._obsClass = observationClass.init_grid(type(self.backend), _local_dir_cls=self._local_dir_cls) self._obsClass._INIT_GRID_CLS = observationClass self.current_obs_init = self._obsClass(obs_env=None, action_helper=None) self.current_obs = self.current_obs_init @@ -213,7 +229,7 @@ def _init_backend( self._init_alert_data() # backend has loaded everything - self._hazard_duration = np.zeros(shape=self.n_line, dtype=dt_int) + self._hazard_duration = np.zeros(shape=type(self).n_line, dtype=dt_int) def _do_nothing(self, x): """ @@ -244,7 +260,7 @@ def _update_actions(self): # This "environment" doesn't modify anything return self._do_nothing_act, None - def copy(self): + def copy(self, env=None, new_obs_space=None): """ INTERNAL @@ -260,17 +276,44 @@ def copy(self): if self.__unusable: raise EnvError("Impossible to use a Observation backend with an " "environment that cannot be copied.") - backend = self.backend - self.backend = None - _highres_sim_counter = self._highres_sim_counter - self._highres_sim_counter = None - with warnings.catch_warnings(): - warnings.simplefilter("ignore", FutureWarning) - res = copy.deepcopy(self) - res.backend = backend.copy() - res._highres_sim_counter = _highres_sim_counter - self.backend = backend - self._highres_sim_counter = _highres_sim_counter + + my_cls = type(self) + res = my_cls.__new__(my_cls) + + # fill its attribute + res.__unusable = self.__unusable + res._obsClass = self._obsClass + res._line_status = copy.deepcopy(self._line_status) + res._line_status_me = copy.deepcopy(self._line_status_me) + if env is not None: + # res._ptr_orig_obs_space = env._observation_space # this is not created when this function is called + # so this is why i pass the `new_obs_space` as argument + res._ptr_orig_obs_space = new_obs_space + else: + res._ptr_orig_obs_space = self._ptr_orig_obs_space + res.no_overflow_disconnection = self.parameters.NO_OVERFLOW_DISCONNECTION + res._topo_vect = copy.deepcopy(self._topo_vect) + res.is_init = self.is_init + if env is not None: + res._helper_action_env = env._helper_action_env + else: + res._helper_action_env = self._helper_action_env + res._disc_lines = copy.deepcopy(self._disc_lines) + res._highres_sim_counter = self._highres_sim_counter + res._max_episode_duration = self._max_episode_duration + + res.current_obs_init = self._obsClass(obs_env=None, action_helper=None) + res.current_obs_init.reset() + res.current_obs = res.current_obs_init + + # copy attribute of "super" + super()._custom_deepcopy_for_copy(res) + + # finish to initialize res + res.env_modification = res._helper_action_env() + res._do_nothing_act = res._helper_action_env() + res._backend_action_set = res._backend_action_class() + res.current_obs = res.current_obs_init return res def _reset_to_orig_state(self, obs): @@ -362,7 +405,8 @@ def init( ) self._backend_action_set += new_state_action # for storage unit - self._backend_action_set.storage_power.values[:] = 0.0 + if time_step > 0: + self._backend_action_set.storage_power.values[:] = 0.0 self._backend_action_set.all_changed() self._backend_action = copy.deepcopy(self._backend_action_set) @@ -397,7 +441,7 @@ def reset(self): super().reset() self.current_obs = self.current_obs_init - def simulate(self, action): + def simulate(self, action : "grid2op.Action.BaseAction") -> Tuple["grid2op.Observation.BaseObservation", float, bool, STEP_INFO_TYPING]: """ INTERNAL @@ -416,12 +460,12 @@ def simulate(self, action): Parameters ---------- - action: :class:`grid2op.Action.Action` + action: :class:`grid2op.Action.BaseAction` The action to test Returns ------- - observation: :class:`grid2op.Observation.Observation` + observation: :class:`grid2op.Observation.BaseObservation` agent's observation of the current environment reward: ``float`` @@ -431,13 +475,9 @@ def simulate(self, action): whether the episode has ended, in which case further step() calls will return undefined results info: ``dict`` - contains auxiliary diagnostic information (helpful for debugging, and sometimes learning). It is a - dictionary with keys: - - - "disc_lines": a numpy array (or ``None``) saying, for each powerline if it has been disconnected - due to overflow - - "is_illegal" (``bool``) whether the action given as input was illegal - - "is_ambiguous" (``bool``) whether the action given as input was ambiguous. + contains auxiliary diagnostic information (helpful for debugging, and sometimes learning). See + description of :func:`grid2op.Environment.BaseEnv.step` for more information about the + keys of this dictionary. """ if self.__unusable: diff --git a/grid2op/Environment/baseEnv.py b/grid2op/Environment/baseEnv.py index 40aaf5252..f45a733fb 100644 --- a/grid2op/Environment/baseEnv.py +++ b/grid2op/Environment/baseEnv.py @@ -8,36 +8,43 @@ from datetime import datetime +import tempfile import logging import time import copy import os import json -from typing import Optional, Tuple +from typing import Optional, Tuple, Union, Dict, Any, Literal +import importlib +import sys + import warnings import numpy as np from scipy.optimize import (minimize, LinearConstraint) + from abc import ABC, abstractmethod -from grid2op.Action import ActionSpace from grid2op.Observation import (BaseObservation, ObservationSpace, HighResSimCounter) from grid2op.Backend import Backend from grid2op.dtypes import dt_int, dt_float, dt_bool from grid2op.Space import GridObjects, RandomObject -from grid2op.Exceptions import * +from grid2op.Exceptions import (Grid2OpException, + EnvError, + InvalidRedispatching, + GeneratorTurnedOffTooSoon, + GeneratorTurnedOnTooSoon, + AmbiguousActionRaiseAlert, + ImpossibleTopology) from grid2op.Parameters import Parameters -from grid2op.Reward import BaseReward -from grid2op.Reward import RewardHelper -from grid2op.Opponent import OpponentSpace, NeverAttackBudget -from grid2op.Action import DontAct, BaseAction -from grid2op.Rules import AlwaysLegal -from grid2op.Opponent import BaseOpponent +from grid2op.Reward import BaseReward, RewardHelper +from grid2op.Opponent import OpponentSpace, NeverAttackBudget, BaseOpponent +from grid2op.Action import DontAct, BaseAction, ActionSpace from grid2op.operator_attention import LinearAttentionBudget from grid2op.Action._backendAction import _BackendAction from grid2op.Chronics import ChronicsHandler -from grid2op.Rules import AlwaysLegal, BaseRules - +from grid2op.Rules import AlwaysLegal, BaseRules, AlwaysLegal +from grid2op.typing_variables import STEP_INFO_TYPING, RESET_OPTIONS_TYPING # TODO put in a separate class the redispatching function @@ -72,7 +79,6 @@ # WE DO NOT RECOMMEND TO ALTER IT IN ANY WAY """ - class BaseEnv(GridObjects, RandomObject, ABC): """ INTERNAL @@ -84,6 +90,65 @@ class BaseEnv(GridObjects, RandomObject, ABC): The documentation is showed here to document the common attributes of an "BaseEnvironment". + .. _danger-env-ownership: + + Notes + ------------------------ + + Note en environment data ownership + + .. danger:: + + + A non pythonic decision has been implemented in grid2op for various reasons: an environment + owns everything created from it. + + This means that if you (or the python interpreter) deletes the environment, you might not + use some data generate with this environment. + + More precisely, you cannot do something like: + + .. code-block:: python + + import grid2op + env = grid2op.make("l2rpn_case14_sandbox") + + saved_obs = [] + + obs = env.reset() + saved_obs.append(obs) + obs2, reward, done, info = env.step(env.action_space()) + saved_obs.append(obs2) + + saved_obs[0].simulate(env.action_space()) # works + del env + saved_obs[0].simulate(env.action_space()) # DOES NOT WORK + + It will raise an error like `Grid2OpException EnvError "This environment is closed. You cannot use it anymore."` + + This will also happen if you do things inside functions, for example like this: + + .. code-block:: python + + import grid2op + + def foo(manager): + env = grid2op.make("l2rpn_case14_sandbox") + obs = env.reset() + manager.append(obs) + obs2, reward, done, info = env.step(env.action_space()) + manager.append(obs2) + manager[0].simulate(env.action_space()) # works + return manager + + manager = [] + manager = foo(manager) + manager[0].simulate(env.action_space()) # DOES NOT WORK + + The same error is raised because the environment `env` is automatically deleted by python when the function `foo` ends + (well it might work on some cases, if the function is called before the variable `env` is actually deleted but you + should not rely on this behaviour.) + Attributes ---------- @@ -234,12 +299,17 @@ class BaseEnv(GridObjects, RandomObject, ABC): CAN_SKIP_TS = False # each step is exactly one time step + #: this are the keys of the dictionnary `options` + #: that can be used when calling `env.reset(..., options={})` + KEYS_RESET_OPTIONS = {"time serie id", "init state", "init ts", "max step"} + def __init__( self, init_env_path: os.PathLike, init_grid_path: os.PathLike, parameters: Parameters, voltagecontrolerClass: type, + name="unknown", thermal_limit_a: Optional[np.ndarray] = None, epsilon_poly: float = 1e-4, # precision of the redispatching algorithm tol_poly: float = 1e-2, # i need to compute a redispatching if the actual values are "more than tol_poly" the values they should be @@ -263,11 +333,33 @@ def __init__( observation_bk_kwargs=None, # type of backend for the observation space highres_sim_counter=None, update_obs_after_reward=False, + n_busbar=2, _is_test: bool = False, # TODO not implemented !! - _init_obs: Optional[BaseObservation] =None + _init_obs: Optional[BaseObservation] =None, + _local_dir_cls=None, + _read_from_local_dir=None, + _raw_backend_class=None, ): + #: flag to indicate not to erase the directory when the env has been used + self._do_not_erase_local_dir_cls = False GridObjects.__init__(self) RandomObject.__init__(self) + self.name = name + self._local_dir_cls = _local_dir_cls # suppose it's the second path to the environment, so the classes are already in the files + self._read_from_local_dir = _read_from_local_dir + if self._read_from_local_dir is not None: + if os.path.split(self._read_from_local_dir)[1] == "_grid2op_classes": + # legacy behaviour (using experimental_read_from_local_dir kwargs in env.make) + self._do_not_erase_local_dir_cls = True + else: + self._do_not_erase_local_dir_cls = True + + self._actionClass_orig = None + self._observationClass_orig = None + + self._raw_backend_class = _raw_backend_class + + self._n_busbar = n_busbar # env attribute not class attribute ! if other_rewards is None: other_rewards = {} if kwargs_attention_budget is None: @@ -319,10 +411,10 @@ def __init__( # class used for the action spaces self._helper_action_class: ActionSpace = None - self._helper_observation_class: ActionSpace = None + self._helper_observation_class: ObservationSpace = None # and calendar data - self.time_stamp: time.struct_time = None + self.time_stamp: time.struct_time = datetime(year=2019, month=1, day=1) self.nb_time_step: datetime.timedelta = dt_int(0) self.delta_time_seconds = None # number of seconds between two consecutive step @@ -342,7 +434,7 @@ def __init__( ) self._timestep_overflow: np.ndarray = None self._nb_timestep_overflow_allowed: np.ndarray = None - self._hard_overflow_threshold: float = self._parameters.HARD_OVERFLOW_THRESHOLD + self._hard_overflow_threshold: np.ndarray = None # store actions "cooldown" self._times_before_line_status_actionable: np.ndarray = None @@ -449,11 +541,11 @@ def __init__( self._voltage_controler = None # backend action - self._backend_action_class = None - self._backend_action = None + self._backend_action_class : type = None + self._backend_action : _BackendAction = None # specific to Basic Env, do not change - self.backend :Backend = None + self.backend : Backend = None self.__is_init = False self.debug_dispatch = False @@ -552,16 +644,26 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): if self.__closed: raise RuntimeError("Impossible to make a copy of a closed environment !") - if not self.backend._can_be_copied: - raise RuntimeError("Impossible to copy your environment: the backend " - "class you used cannot be copied.") + if hasattr(self.backend, "_can_be_copied"): + if not self.backend._can_be_copied: + # introduced later on, might not be copied perfectly for some older backends + raise RuntimeError("Impossible to copy your environment: the backend " + "class you used cannot be copied.") + # for earlier backend it is not possible to check this so I ignore it. + RandomObject._custom_deepcopy_for_copy(self, new_obj) + new_obj.name = self.name if dict_ is None: dict_ = {} - + new_obj._n_busbar = self._n_busbar + new_obj._init_grid_path = copy.deepcopy(self._init_grid_path) new_obj._init_env_path = copy.deepcopy(self._init_env_path) + new_obj._local_dir_cls = None # copy of a env is not the "main" env. TODO + new_obj._do_not_erase_local_dir_cls = self._do_not_erase_local_dir_cls + new_obj._read_from_local_dir = self._read_from_local_dir + new_obj._raw_backend_class = self._raw_backend_class new_obj._DEBUG = self._DEBUG new_obj._parameters = copy.deepcopy(self._parameters) new_obj.with_forecast = self.with_forecast @@ -581,27 +683,23 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): new_obj._tol_poly = self._tol_poly # - new_obj._complete_action_cls = copy.deepcopy(self._complete_action_cls) + new_obj._complete_action_cls = self._complete_action_cls # const # define logger new_obj.logger = copy.deepcopy(self.logger) # TODO does that make any sense ? # class used for the action spaces new_obj._helper_action_class = self._helper_action_class # const - new_obj._helper_observation_class = self._helper_observation_class + new_obj._helper_observation_class = self._helper_observation_class # const # and calendar data new_obj.time_stamp = self.time_stamp new_obj.nb_time_step = self.nb_time_step new_obj.delta_time_seconds = self.delta_time_seconds - # observation - if self.current_obs is not None: - new_obj.current_obs = self.current_obs.copy() - # backend # backend action - new_obj._backend_action_class = self._backend_action_class + new_obj._backend_action_class = self._backend_action_class # const new_obj._backend_action = copy.deepcopy(self._backend_action) # specific to Basic Env, do not change @@ -626,7 +724,7 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): new_obj._nb_timestep_overflow_allowed = copy.deepcopy( self._nb_timestep_overflow_allowed ) - new_obj._hard_overflow_threshold = self._hard_overflow_threshold + new_obj._hard_overflow_threshold = copy.deepcopy(self._hard_overflow_threshold) # store actions "cooldown" new_obj._times_before_line_status_actionable = copy.deepcopy( @@ -671,37 +769,48 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): new_obj._hazards = copy.deepcopy(self._hazards) new_obj._env_modification = copy.deepcopy(self._env_modification) + # action space used by the environment + new_obj._game_rules = copy.deepcopy(self._game_rules) + new_obj._helper_action_env = self._helper_action_env.copy() + new_obj._helper_action_env.legal_action = new_obj._game_rules.legal_action + # to use the data new_obj.done = self.done new_obj.current_reward = copy.deepcopy(self.current_reward) new_obj.chronics_handler = copy.deepcopy(self.chronics_handler) - new_obj._game_rules = copy.deepcopy(self._game_rules) - new_obj._helper_action_env = self._helper_action_env.copy() - new_obj._helper_action_env.legal_action = new_obj._game_rules.legal_action + # retrieve the "pointer" to the new_obj action space (for initializing the grid) + new_obj.chronics_handler.cleanup_action_space() + new_obj.chronics_handler.action_space = new_obj._helper_action_env + + # action space new_obj._action_space = self._action_space.copy() new_obj._action_space.legal_action = new_obj._game_rules.legal_action new_obj._rewardClass = self._rewardClass new_obj._actionClass = self._actionClass + new_obj._actionClass_orig = self._actionClass_orig new_obj._observationClass = self._observationClass + new_obj._observationClass_orig = self._observationClass_orig new_obj._legalActClass = self._legalActClass - new_obj._observation_space = self._observation_space.copy(copy_backend=True) - new_obj._observation_space._legal_action = ( - new_obj._game_rules.legal_action - ) # TODO this does not respect SOLID principles at all ! - new_obj._kwargs_observation = copy.deepcopy(self._kwargs_observation) - new_obj._observation_space._ptr_kwargs_observation = new_obj._kwargs_observation - new_obj._names_chronics_to_backend = self._names_chronics_to_backend - new_obj._reward_helper = copy.deepcopy(self._reward_helper) - - # gym compatibility - new_obj.reward_range = copy.deepcopy(self.reward_range) - new_obj._viewer = copy.deepcopy(self._viewer) - new_obj.viewer_fig = copy.deepcopy(self.viewer_fig) - + new_obj._names_chronics_to_backend = self._names_chronics_to_backend # cst + # other rewards - new_obj.other_rewards = copy.deepcopy(self.other_rewards) - + new_obj.other_rewards = {k: copy.deepcopy(v) for k, v in self.other_rewards.items()} + for extra_reward in new_obj.other_rewards.values(): + extra_reward.reset(new_obj) + + # voltage + new_obj._voltagecontrolerClass = self._voltagecontrolerClass + if self._voltage_controler is not None: + new_obj._voltage_controler = self._voltage_controler.copy() + else: + new_obj._voltage_controler = None + + # needed for the "Environment.get_kwargs(env, False, False)" (used in the observation_space) + new_obj._attention_budget_cls = self._attention_budget_cls # const + new_obj._kwargs_attention_budget = copy.deepcopy(self._kwargs_attention_budget) + new_obj._has_attention_budget = self._has_attention_budget + # opponent new_obj._opponent_space_type = self._opponent_space_type new_obj._opponent_action_class = self._opponent_action_class # const @@ -718,6 +827,27 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): new_obj._compute_opp_budget = self._opponent_budget_class( self._opponent_action_space ) + + new_obj._observation_bk_class = self._observation_bk_class + new_obj._observation_bk_kwargs = self._observation_bk_kwargs + + # do not copy it. + new_obj._highres_sim_counter = self._highres_sim_counter + + # observation space (might depends on the previous things) + # at this stage the function "Environment.get_kwargs(env, False, False)" should run + new_obj._kwargs_observation = copy.deepcopy(self._kwargs_observation) + new_obj._observation_space = self._observation_space.copy(copy_backend=True, env=new_obj) + new_obj._observation_space._legal_action = ( + new_obj._game_rules.legal_action + ) # TODO this does not respect SOLID principles at all ! + new_obj._observation_space._ptr_kwargs_observation = new_obj._kwargs_observation + new_obj._reward_helper = copy.deepcopy(self._reward_helper) + + # gym compatibility + new_obj.reward_range = copy.deepcopy(self.reward_range) + new_obj._viewer = copy.deepcopy(self._viewer) + new_obj.viewer_fig = copy.deepcopy(self.viewer_fig) # init the opponent new_obj._opponent = new_obj._opponent_class.__new__(new_obj._opponent_class) @@ -731,15 +861,11 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): attack_duration=new_obj._opponent_attack_duration, attack_cooldown=new_obj._opponent_attack_cooldown, budget_per_timestep=new_obj._opponent_budget_per_ts, - opponent=new_obj._opponent, + opponent=new_obj._opponent ) state_me, state_opp = self._oppSpace._get_state() new_obj._oppSpace._set_state(state_me) - - # voltage - new_obj._voltagecontrolerClass = self._voltagecontrolerClass - new_obj._voltage_controler = self._voltage_controler.copy() - + # to change the parameters new_obj.__new_param = copy.deepcopy(self.__new_param) new_obj.__new_forecast_param = copy.deepcopy(self.__new_forecast_param) @@ -763,19 +889,13 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): new_obj._limited_before = copy.deepcopy(self._limited_before) # attention budget - new_obj._has_attention_budget = self._has_attention_budget new_obj._attention_budget = copy.deepcopy(self._attention_budget) - new_obj._attention_budget_cls = self._attention_budget_cls # const new_obj._is_alarm_illegal = copy.deepcopy(self._is_alarm_illegal) new_obj._is_alarm_used_in_reward = copy.deepcopy(self._is_alarm_used_in_reward) # alert new_obj._is_alert_illegal = copy.deepcopy(self._is_alert_illegal) new_obj._is_alert_used_in_reward = copy.deepcopy(self._is_alert_used_in_reward) - - new_obj._kwargs_attention_budget = copy.deepcopy(self._kwargs_attention_budget) - - new_obj._last_obs = self._last_obs.copy() new_obj._has_just_been_seeded = self._has_just_been_seeded @@ -788,14 +908,8 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): else: new_obj._init_obs = self._init_obs.copy() - new_obj._observation_bk_class = self._observation_bk_class - new_obj._observation_bk_kwargs = self._observation_bk_kwargs - # do not forget ! - new_obj._is_test = self._is_test - - # do not copy it. - new_obj._highres_sim_counter = self._highres_sim_counter + new_obj._is_test = self._is_test # alert new_obj._last_alert = copy.deepcopy(self._last_alert) @@ -809,6 +923,17 @@ def _custom_deepcopy_for_copy(self, new_obj, dict_=None): new_obj._update_obs_after_reward = copy.deepcopy(self._update_obs_after_reward) + if self._last_obs is not None: + new_obj._last_obs = self._last_obs.copy(env=new_obj) + else: + new_obj._last_obs = None + + # observation + from grid2op.Environment._obsEnv import _ObsEnv + if self.current_obs is not None and not isinstance(self, _ObsEnv): + # breaks for some version of lightsim2grid... (a powerflow need to be run to retrieve the observation) + new_obj.current_obs = new_obj.get_obs() + def get_path_env(self): """ Get the path that allows to create this environment. @@ -951,7 +1076,7 @@ def load_alert_data(self): alertable_line_names = copy.deepcopy(lines_attacked) alertable_line_ids = np.empty(len(alertable_line_names), dtype=dt_int) for i, el in enumerate(alertable_line_names): - indx = np.where(self.backend.name_line == el)[0] + indx = (self.backend.name_line == el).nonzero()[0] if not len(indx): raise Grid2OpException(f"Attacked line {el} is not found in the grid.") alertable_line_ids[i] = indx[0] @@ -1149,6 +1274,7 @@ def _create_opponent(self): gridobj=type(self.backend), legal_action=AlwaysLegal, actionClass=self._opponent_action_class, + _local_dir_cls=self._local_dir_cls ) self._compute_opp_budget = self._opponent_budget_class( @@ -1162,6 +1288,7 @@ def _create_opponent(self): attack_cooldown=self._opponent_attack_cooldown, budget_per_timestep=self._opponent_budget_per_ts, opponent=self._opponent, + _local_dir_cls=self._local_dir_cls, ) self._oppSpace.init_opponent(partial_env=self, **self._kwargs_opponent) self._oppSpace.reset() @@ -1171,13 +1298,19 @@ def _init_myclass(self): # the class has already been initialized return # remember the original grid2op class - type(self)._INIT_GRID_CLS = type(self) + orig_cls = type(self) - bk_type = type( - self.backend - ) # be careful here: you need to initialize from the class, and not from the object + # be careful here: you need to initialize from the class, and not from the object + bk_type = type(self.backend) # create the proper environment class for this specific environment - self.__class__ = type(self).init_grid(bk_type) + new_cls = type(self).init_grid(bk_type, _local_dir_cls=self._local_dir_cls) + # assign the right initial grid class + if orig_cls._INIT_GRID_CLS is None: + new_cls._INIT_GRID_CLS = orig_cls + else: + new_cls._INIT_GRID_CLS = orig_cls._INIT_GRID_CLS + + self.__class__ = new_cls def _has_been_initialized(self): # type of power flow to play @@ -1186,7 +1319,7 @@ def _has_been_initialized(self): bk_type = type(self.backend) if np.min([self.n_line, self.n_gen, self.n_load, self.n_sub]) <= 0: raise EnvironmentError("Environment has not been initialized properly") - self._backend_action_class = _BackendAction.init_grid(bk_type) + self._backend_action_class = _BackendAction.init_grid(bk_type, _local_dir_cls=self._local_dir_cls) self._backend_action = self._backend_action_class() # initialize maintenance / hazards @@ -1204,7 +1337,6 @@ def _has_been_initialized(self): self._gen_downtime = np.zeros(self.n_gen, dtype=dt_int) self._gen_activeprod_t = np.zeros(self.n_gen, dtype=dt_float) self._gen_activeprod_t_redisp = np.zeros(self.n_gen, dtype=dt_float) - self._nb_timestep_overflow_allowed = np.ones(shape=self.n_line, dtype=dt_int) self._max_timestep_line_status_deactivated = ( self._parameters.NB_TIMESTEP_COOLDOWN_LINE ) @@ -1220,6 +1352,11 @@ def _has_been_initialized(self): fill_value=self._parameters.NB_TIMESTEP_OVERFLOW_ALLOWED, dtype=dt_int, ) + self._hard_overflow_threshold = np.full( + shape=(self.n_line,), + fill_value=self._parameters.HARD_OVERFLOW_THRESHOLD, + dtype=dt_float, + ) self._timestep_overflow = np.zeros(shape=(self.n_line,), dtype=dt_int) # update the parameters @@ -1261,7 +1398,6 @@ def _update_parameters(self): # type of power flow to play # if True, then it will not disconnect lines above their thermal limits self._no_overflow_disconnection = self._parameters.NO_OVERFLOW_DISCONNECTION - self._hard_overflow_threshold = self._parameters.HARD_OVERFLOW_THRESHOLD # store actions "cooldown" self._max_timestep_line_status_deactivated = ( @@ -1275,20 +1411,34 @@ def _update_parameters(self): self._nb_timestep_overflow_allowed[ : ] = self._parameters.NB_TIMESTEP_OVERFLOW_ALLOWED - + self._hard_overflow_threshold[:] = self._parameters.HARD_OVERFLOW_THRESHOLD # hard overflow part self._env_dc = self._parameters.ENV_DC self.__new_param = None - def reset(self): + def set_id(self, id_: Union[int, str]) -> None: + # nothing to do in general, overloaded for real Environment + pass + + def reset(self, + *, + seed: Union[int, None] = None, + options: RESET_OPTIONS_TYPING = None): """ Reset the base environment (set the appropriate variables to correct initialization). It is (and must be) overloaded in other :class:`grid2op.Environment` """ if self.__closed: raise EnvError("This environment is closed. You cannot use it anymore.") - + if options is not None: + for el in options: + if el not in type(self).KEYS_RESET_OPTIONS: + raise EnvError(f"You tried to customize the `reset` call with some " + f"`options` using the key `{el}` which is invalid. " + f"Only keys in {sorted(list(type(self).KEYS_RESET_OPTIONS))} " + f"can be used.") + self.__is_init = True # current = None is an indicator that this is the first step of the environment # so don't change the setting of current_obs = None unless you are willing to change that @@ -1309,9 +1459,15 @@ def reset(self): self._last_obs = None - # seeds (so that next episode does not depend on what happened in previous episode) - if self.seed_used is not None and not self._has_just_been_seeded: + if options is not None and "time serie id" in options: + self.set_id(options["time serie id"]) + + if seed is not None: + self.seed(seed) + elif self.seed_used is not None and not self._has_just_been_seeded: + # seeds (so that next episode does not depend on what happened in previous episode) self.seed(None, _seed_me=False) + self._reset_storage() self._reset_curtailment() self._reset_alert() @@ -1356,6 +1512,18 @@ def seed(self, seed=None, _seed_me=True): """ Set the seed of this :class:`Environment` for a better control and to ease reproducible experiments. + .. seealso:: + function :func:`Environment.reset` for extra information + + .. versionchanged:: 1.9.8 + Starting from version 1.9.8 you can directly set the seed when calling + reset. + + .. warning:: + It is preferable to call this function `just before` a call to `env.reset()` otherwise + the seeding might not work properly (especially if some non standard "time serie generators" + *aka* chronics are used) + Parameters ---------- seed: ``int`` @@ -1645,7 +1813,7 @@ def set_thermal_limit(self, thermal_limit): f"names. We found: {key} which is not a line name. The names of the " f"powerlines are {self.name_line}" ) - ind_line = np.where(self.name_line == key)[0][0] + ind_line = (self.name_line == key).nonzero()[0][0] if np.isfinite(tmp[ind_line]): raise Grid2OpException( f"Humm, there is a really strange bug, some lines are set twice." @@ -1741,9 +1909,9 @@ def _prepare_redisp(self, action, new_p, already_modified_gen): redisp_act_orig = 1.0 * action._redispatch if ( - np.all(redisp_act_orig == 0.0) - and np.all(self._target_dispatch == 0.0) - and np.all(self._actual_dispatch == 0.0) + np.all(np.abs(redisp_act_orig) <= 1e-7) + and np.all(np.abs(self._target_dispatch) <= 1e-7) + and np.all(np.abs(self._actual_dispatch) <= 1e-7) ): return valid, except_, info_ # check that everything is consistent with pmin, pmax: @@ -1755,7 +1923,7 @@ def _prepare_redisp(self, action, new_p, already_modified_gen): "invalid because, even if the sepoint is pmin, this dispatch would set it " "to a number higher than pmax, which is impossible]. Invalid dispatch for " "generator(s): " - "{}".format(np.where(cond_invalid)[0]) + "{}".format((cond_invalid).nonzero()[0]) ) self._target_dispatch -= redisp_act_orig return valid, except_, info_ @@ -1767,13 +1935,13 @@ def _prepare_redisp(self, action, new_p, already_modified_gen): "invalid because, even if the sepoint is pmax, this dispatch would set it " "to a number bellow pmin, which is impossible]. Invalid dispatch for " "generator(s): " - "{}".format(np.where(cond_invalid)[0]) + "{}".format((cond_invalid).nonzero()[0]) ) self._target_dispatch -= redisp_act_orig return valid, except_, info_ # i can't redispatch turned off generators [turned off generators need to be turned on before redispatching] - if (redisp_act_orig[new_p == 0.0]).any() and self._forbid_dispatch_off: + if (redisp_act_orig[np.abs(new_p) <= 1e-7]).any() and self._forbid_dispatch_off: # action is invalid, a generator has been redispatched, but it's turned off except_ = InvalidRedispatching( "Impossible to dispatch a turned off generator" @@ -1783,15 +1951,13 @@ def _prepare_redisp(self, action, new_p, already_modified_gen): if self._forbid_dispatch_off is True: redisp_act_orig_cut = 1.0 * redisp_act_orig - redisp_act_orig_cut[new_p == 0.0] = 0.0 + redisp_act_orig_cut[np.abs(new_p) <= 1e-7] = 0.0 if (redisp_act_orig_cut != redisp_act_orig).any(): info_.append( { - "INFO: redispatching cut because generator will be turned_off": np.where( + "INFO: redispatching cut because generator will be turned_off": ( redisp_act_orig_cut != redisp_act_orig - )[ - 0 - ] + ).nonzero()[0] } ) return valid, except_, info_ @@ -1814,11 +1980,18 @@ def _make_redisp(self, already_modified_gen, new_p): def _compute_dispatch_vect(self, already_modified_gen, new_p): except_ = None + + # handle the case where there are storage or redispatching + # action or curtailment action on the "init state" + # of the grid + if self.nb_time_step == 0: + self._gen_activeprod_t_redisp[:] = new_p + # first i define the participating generators # these are the generators that will be adjusted for redispatching gen_participating = ( (new_p > 0.0) - | (self._actual_dispatch != 0.0) + | (np.abs(self._actual_dispatch) >= 1e-7) | (self._target_dispatch != self._actual_dispatch) ) gen_participating[~self.gen_redispatchable] = False @@ -1969,8 +2142,8 @@ def _compute_dispatch_vect(self, already_modified_gen, new_p): # the idea here is to chose a initial point that would be close to the # desired solution (split the (sum of the) dispatch to the available generators) x0 = np.zeros(gen_participating.sum()) - if (self._target_dispatch != 0.).any() or already_modified_gen.any(): - gen_for_x0 = self._target_dispatch[gen_participating] != 0. + if (np.abs(self._target_dispatch) >= 1e-7).any() or already_modified_gen.any(): + gen_for_x0 = np.abs(self._target_dispatch[gen_participating]) >= 1e-7 gen_for_x0 |= already_modified_gen[gen_participating] x0[gen_for_x0] = ( self._target_dispatch[gen_participating][gen_for_x0] @@ -1982,7 +2155,7 @@ def _compute_dispatch_vect(self, already_modified_gen, new_p): # in this "if" block I set the other component of x0 to # their "right" value - can_adjust = (x0 == 0.0) + can_adjust = (np.abs(x0) <= 1e-7) if can_adjust.any(): init_sum = x0.sum() denom_adjust = (1.0 / weights[can_adjust]).sum() @@ -2247,8 +2420,8 @@ def _handle_updown_times(self, gen_up_before, redisp_act): self._gen_downtime[gen_connected_this_timestep] < self.gen_min_downtime[gen_connected_this_timestep] ) - id_gen = np.where(id_gen)[0] - id_gen = np.where(gen_connected_this_timestep[id_gen])[0] + id_gen = (id_gen).nonzero()[0] + id_gen = (gen_connected_this_timestep[id_gen]).nonzero()[0] except_ = GeneratorTurnedOnTooSoon( "Some generator has been connected too early ({})".format(id_gen) ) @@ -2269,8 +2442,8 @@ def _handle_updown_times(self, gen_up_before, redisp_act): self._gen_uptime[gen_disconnected_this] < self.gen_min_uptime[gen_disconnected_this] ) - id_gen = np.where(id_gen)[0] - id_gen = np.where(gen_connected_this_timestep[id_gen])[0] + id_gen = (id_gen).nonzero()[0] + id_gen = (gen_connected_this_timestep[id_gen]).nonzero()[0] except_ = GeneratorTurnedOffTooSoon( "Some generator has been disconnected too early ({})".format(id_gen) ) @@ -2419,7 +2592,7 @@ def _aux_remove_power_too_low(self, delta_, indx_too_low): def _compute_storage(self, action_storage_power): self._storage_previous_charge[:] = self._storage_current_charge - storage_act = np.isfinite(action_storage_power) & (action_storage_power != 0.0) + storage_act = np.isfinite(action_storage_power) & (np.abs(action_storage_power) >= 1e-7) self._action_storage[:] = 0.0 self._storage_power[:] = 0.0 modif = False @@ -2540,7 +2713,7 @@ def _aux_update_curtailment_act(self, action): def _aux_compute_new_p_curtailment(self, new_p, curtailment_vect): """modifies the new_p argument !!!!""" gen_curtailed = ( - curtailment_vect != 1.0 + np.abs(curtailment_vect - 1.) >= 1e-7 ) # curtailed either right now, or in a previous action max_action = self.gen_pmax[gen_curtailed] * curtailment_vect[gen_curtailed] new_p[gen_curtailed] = np.minimum(max_action, new_p[gen_curtailed]) @@ -2549,7 +2722,7 @@ def _aux_compute_new_p_curtailment(self, new_p, curtailment_vect): def _aux_handle_curtailment_without_limit(self, action, new_p): """modifies the new_p argument !!!! (but not the action)""" if self.redispatching_unit_commitment_availble and ( - action._modif_curtailment or (self._limit_curtailment != 1.0).any() + action._modif_curtailment or (np.abs(self._limit_curtailment - 1.) >= 1e-7).any() ): self._aux_update_curtailment_act(action) @@ -2570,7 +2743,7 @@ def _aux_handle_curtailment_without_limit(self, action, new_p): else: self._sum_curtailment_mw = -self._sum_curtailment_mw_prev self._sum_curtailment_mw_prev = dt_float(0.0) - gen_curtailed = self._limit_curtailment != 1.0 + gen_curtailed = np.abs(self._limit_curtailment - 1.) >= 1e-7 return gen_curtailed @@ -2782,7 +2955,7 @@ def _aux_apply_redisp(self, action, new_p, new_p_th, gen_curtailed, except_): is_illegal_redisp = True except_.append(except_tmp) - if self.n_storage > 0: + if type(self).n_storage > 0: # TODO curtailment: cancel it here too ! self._storage_current_charge[:] = self._storage_previous_charge self._amount_storage -= self._amount_storage_prev @@ -2804,7 +2977,6 @@ def _aux_apply_redisp(self, action, new_p, new_p_th, gen_curtailed, except_): self._storage_power_prev[:] = self._storage_power # case where the action modifies load (TODO maybe make a different env for that...) self._aux_handle_act_inj(action) - valid_disp, except_tmp = self._make_redisp(already_modified_gen, new_p) if not valid_disp or except_tmp is not None: @@ -2839,10 +3011,14 @@ def _aux_apply_redisp(self, action, new_p, new_p_th, gen_curtailed, except_): res_action = action return res_action, is_illegal_redisp, is_illegal_reco, is_done - def _aux_update_backend_action(self, action, action_storage_power, init_disp): + def _aux_update_backend_action(self, + action: BaseAction, + action_storage_power: np.ndarray, + init_disp: np.ndarray): # make sure the dispatching action is not implemented "as is" by the backend. # the environment must make sure it's a zero-sum action. # same kind of limit for the storage + res_exc_ = None action._redispatch[:] = 0.0 action._storage_power[:] = self._storage_power self._backend_action += action @@ -2851,6 +3027,7 @@ def _aux_update_backend_action(self, action, action_storage_power, init_disp): # TODO storage: check the original action, even when replaced by do nothing is not modified self._backend_action += self._env_modification self._backend_action.set_redispatch(self._actual_dispatch) + return res_exc_ def _update_alert_properties(self, action, lines_attacked, subs_attacked): # update the environment with the alert information from the @@ -2957,16 +3134,19 @@ def _aux_register_env_converged(self, disc_lines, action, init_line_status, new_ # TODO is non zero and disconnected, this should be ok. self._time_extract_obs += time.perf_counter() - beg_res + def _backend_next_grid_state(self): + """overlaoded in MaskedEnv""" + return self.backend.next_grid_state(env=self, is_dc=self._env_dc) + def _aux_run_pf_after_state_properly_set( self, action, init_line_status, new_p, except_ ): has_error = True + detailed_info = None try: # compute the next _grid state beg_pf = time.perf_counter() - disc_lines, detailed_info, conv_ = self.backend.next_grid_state( - env=self, is_dc=self._env_dc - ) + disc_lines, detailed_info, conv_ = self._backend_next_grid_state() self._disc_lines[:] = disc_lines self._time_powerflow += time.perf_counter() - beg_pf if conv_ is None: @@ -2985,7 +3165,10 @@ def _aux_run_pf_after_state_properly_set( ) return detailed_info, has_error - def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]: + def step(self, action: BaseAction) -> Tuple[BaseObservation, + float, + bool, + STEP_INFO_TYPING]: """ Run one timestep of the environment's dynamics. When end of episode is reached, you are responsible for calling `reset()` @@ -3039,11 +3222,10 @@ def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]: the simulation of the "cascading failures". - "rewards": dictionary of all "other_rewards" provided when the env was built. - "time_series_id": id of the time series used (if any, similar to a call to `env.chronics_handler.get_id()`) - Examples --------- - As any openAI gym environment, this is used like: + This is used like: .. code-block:: python @@ -3087,6 +3269,7 @@ def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]: # somehow "env.step()" or "env.reset()" self._has_just_been_seeded = False + cls = type(self) has_error = True is_done = False is_illegal = False @@ -3103,7 +3286,7 @@ def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]: detailed_info = [] init_disp = 1.0 * action._redispatch # dispatching action init_alert = None - if type(self).dim_alerts > 0: + if cls.dim_alerts > 0: init_alert = copy.deepcopy(action._raise_alert) action_storage_power = 1.0 * action._storage_power # battery information @@ -3130,12 +3313,12 @@ def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]: ) # battery information is_ambiguous = True - if type(self).dim_alerts > 0: + if cls.dim_alerts > 0: # keep the alert even if the rest is ambiguous (if alert is non ambiguous) is_ambiguous_alert = isinstance(except_tmp, AmbiguousActionRaiseAlert) if is_ambiguous_alert: # reset the alert - init_alert = np.zeros(type(self).dim_alerts, dtype=dt_bool) + init_alert = np.zeros(cls.dim_alerts, dtype=dt_bool) else: action.raise_alert = init_alert except_.append(except_tmp) @@ -3149,13 +3332,13 @@ def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]: 1.0 * action._storage_power ) # battery information except_.append(reason) - if type(self).dim_alerts > 0: + if cls.dim_alerts > 0: # keep the alert even if the rest is illegal action.raise_alert = init_alert is_illegal = True if self._has_attention_budget: - if type(self).assistant_warning_type == "zonal": + if cls.assistant_warning_type == "zonal": # this feature is implemented, so i do it reason_alarm_illegal = self._attention_budget.register_action( self, action, is_illegal, is_ambiguous @@ -3171,7 +3354,7 @@ def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]: new_p_th = 1.0 * new_p # storage unit - if self.n_storage > 0: + if cls.n_storage > 0: # limiting the storage units is done in `_aux_apply_redisp` # this only ensure the Emin / Emax and all the actions self._compute_storage(action_storage_power) @@ -3182,7 +3365,7 @@ def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]: gen_curtailed = self._aux_handle_curtailment_without_limit(action, new_p) beg__redisp = time.perf_counter() - if self.redispatching_unit_commitment_availble or self.n_storage > 0.0: + if cls.redispatching_unit_commitment_availble or cls.n_storage > 0.0: # this computes the "optimal" redispatching # and it is also in this function that the limiting of the curtailment / storage actions # is perform to make the state "feasible" @@ -3208,19 +3391,25 @@ def step(self, action: BaseAction) -> Tuple[BaseObservation, float, bool, dict]: tock = time.perf_counter() self._time_opponent += tock - tick self._time_create_bk_act += tock - beg_ - - self.backend.apply_action(self._backend_action) + try: + self.backend.apply_action(self._backend_action) + except ImpossibleTopology as exc_: + has_error = True + except_.append(exc_) + is_done = True + # TODO in this case: cancel the topological action of the agent + # and continue instead of "game over" self._time_apply_act += time.perf_counter() - beg_ # now it's time to run the powerflow properly # and to update the time dependant properties - self._update_alert_properties(action, lines_attacked, subs_attacked) - detailed_info, has_error = self._aux_run_pf_after_state_properly_set( - action, init_line_status, new_p, except_ - ) + if not is_done: + self._update_alert_properties(action, lines_attacked, subs_attacked) + detailed_info, has_error = self._aux_run_pf_after_state_properly_set( + action, init_line_status, new_p, except_ + ) else: has_error = True - except StopIteration: # episode is over is_done = True @@ -3327,7 +3516,7 @@ def _reset_vectors_and_timings(self): ] = self._parameters.NB_TIMESTEP_OVERFLOW_ALLOWED self.nb_time_step = 0 # to have the first step at 0 - self._hard_overflow_threshold = self._parameters.HARD_OVERFLOW_THRESHOLD + self._hard_overflow_threshold[:] = self._parameters.HARD_OVERFLOW_THRESHOLD self._env_dc = self._parameters.ENV_DC self._times_before_line_status_actionable[:] = 0 @@ -3560,7 +3749,24 @@ def close(self): if hasattr(self, attr_nm): delattr(self, attr_nm) setattr(self, attr_nm, None) - + + if self._do_not_erase_local_dir_cls: + # The resources are not held by this env, so + # I do not remove them + # (case for ObsEnv or ForecastedEnv) + return + self._aux_close_local_dir_cls() + + def _aux_close_local_dir_cls(self): + if self._local_dir_cls is not None: + # I am the "keeper" of the temporary directory + # deleting this env should also delete the temporary directory + if not (hasattr(self._local_dir_cls, "_RUNNER_DO_NOT_ERASE") and not self._local_dir_cls._RUNNER_DO_NOT_ERASE): + # BUT if a runner uses it, then I should not delete it ! + self._local_dir_cls.cleanup() + self._local_dir_cls = None + # In this case it's likely that the OS will clean it for grid2op with a warning... + def attach_layout(self, grid_layout): """ Compare to the method of the base class, this one performs a check. @@ -3642,6 +3848,17 @@ def fast_forward_chronics(self, nb_timestep): 00:00). This can lead to suboptimal exploration, as during this phase, only a few time steps are managed by the agent, so in general these few time steps will correspond to grid state around Jan 1st at 00:00. + .. seealso:: + From grid2op version 1.10.3, a similar objective can be + obtained directly by calling :func:`grid2op.Environment.Environment.reset` with `"init ts"` + as option, for example like `obs = env.reset(options={"init ts": 12})` + + + .. danger:: + The usage of both :func:`BaseEnv.fast_forward_chronics` and :func:`Environment.set_max_iter` + is not recommended at all and might not behave correctly. Please use `env.reset` with + `obs = env.reset(options={"max step": xxx, "init ts": yyy})` for a correct behaviour. + Parameters ---------- nb_timestep: ``int`` @@ -3649,7 +3866,20 @@ def fast_forward_chronics(self, nb_timestep): Examples --------- - This can be used like this: + + From grid2op version 1.10.3 we recommend not to use this function (which will be deprecated) + but to use the :func:`grid2op.Environment.Environment.reset` functon with the `"init ts"` + option. + + .. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + obs = env.reset(options={"init ts": 123}) + + For the legacy usave, this can be used like this: .. code-block:: python @@ -3800,29 +4030,86 @@ def change_reward(self, new_reward_func): ) self.__new_reward_func = new_reward_func - def _aux_gen_classes(self, cls, sys_path): - if not isinstance(cls, type): - raise RuntimeError(f"cls should be a type and not an object !: {cls}") - if not issubclass(cls, GridObjects): - raise RuntimeError(f"cls should inherit from GridObjects: {cls}") + @staticmethod + def _aux_gen_classes(cls_other, sys_path, _add_class_output=False): + if not isinstance(cls_other, type): + raise RuntimeError(f"cls_other should be a type and not an object !: {cls_other}") + if not issubclass(cls_other, GridObjects): + raise RuntimeError(f"cls_other should inherit from GridObjects: {cls_other}") from pathlib import Path - path_env = cls._PATH_ENV - cls._PATH_ENV = str(Path(self.get_path_env()).as_posix()) + path_env = cls_other._PATH_GRID_CLASSES + # cls_other._PATH_GRID_CLASSES = str(Path(self.get_path_env()).as_posix()) + cls_other._PATH_GRID_CLASSES = str(Path(sys_path).as_posix()) - res = cls._get_full_cls_str() - cls._PATH_ENV = path_env - output_file = os.path.join(sys_path, f"{cls.__name__}_file.py") + res = cls_other._get_full_cls_str() + cls_other._PATH_GRID_CLASSES = path_env + output_file = os.path.join(sys_path, f"{cls_other.__name__}_file.py") if not os.path.exists(output_file): # if the file is not already saved, i save it and add it to the __init__ file with open(output_file, "w", encoding="utf-8") as f: f.write(res) - return f"\nfrom .{cls.__name__}_file import {cls.__name__}" + str_import = f"\nfrom .{cls_other.__name__}_file import {cls_other.__name__}" else: - # otherwise i do nothing - return "" - - def generate_classes(self, _guard=None, _is_base_env__=True, sys_path=None): + # if the file exists, I check it's the same + from grid2op.MakeEnv.UpdateEnv import _aux_hash_file, _aux_update_hash_text + hash_saved = _aux_hash_file(output_file) + my_hash = _aux_update_hash_text(res) + if hash_saved.hexdigest() != my_hash.hexdigest(): + raise EnvError(f"It appears some classes have been modified between what was saved on the hard drive " + f"and the current state of the grid. This should not have happened. " + f"Check class {cls_other.__name__}") + str_import = None + if not _add_class_output: + return str_import + + # NB: these imports needs to be consistent with what is done in + # griobj.init_grid(...) + package_path, nm_ = os.path.split(output_file) + nm_, ext = os.path.splitext(nm_) + sub_repo, tmp_nm = os.path.split(package_path) + if sub_repo not in sys.path: + sys.path.append(sub_repo) + + sub_repo_mod = None + if tmp_nm == "_grid2op_classes": + # legacy "experimental_read_from_local_dir" + # issue was the module "_grid2op_classes" had the same name + # regardless of the environment, so grid2op was "confused" + path_init = os.path.join(sub_repo, "__init__.py") + if not os.path.exists(path_init): + try: + with open(path_init, "w", encoding='utf-8') as f: + f.write("# DO NOT REMOVE, automatically generated by grid2op") + except FileExistsError: + pass + env_path, env_nm = os.path.split(sub_repo) + if env_path not in sys.path: + sys.path.append(env_path) + if not package_path in sys.path: + sys.path.append(package_path) + super_supermodule = importlib.import_module(env_nm) + nm_ = f"{tmp_nm}.{nm_}" + tmp_nm = env_nm + super_module = importlib.import_module(tmp_nm, package=sub_repo_mod) + add_sys_path = os.path.dirname(super_module.__file__) + if not add_sys_path in sys.path: + sys.path.append(add_sys_path) + + if f"{tmp_nm}.{nm_}" in sys.modules: + cls_res = getattr(sys.modules[f"{tmp_nm}.{nm_}"], cls_other.__name__) + return str_import, cls_res + try: + module = importlib.import_module(f".{nm_}", package=tmp_nm) + except ModuleNotFoundError as exc_: + # invalidate the cache and reload the package in this case + importlib.invalidate_caches() + importlib.reload(super_module) + module = importlib.import_module(f".{nm_}", package=tmp_nm) + cls_res = getattr(module, cls_other.__name__) + return str_import, cls_res + + def generate_classes(self, *, local_dir_id=None, _guard=None, _is_base_env__=True, sys_path=None): """ Use with care, but can be incredibly useful ! @@ -3890,15 +4177,14 @@ def generate_classes(self, _guard=None, _is_base_env__=True, sys_path=None): if self.__closed: return - # create the folder if _guard is not None: raise RuntimeError("use `env.generate_classes()` with no arguments !") - if type(self)._PATH_ENV is not None: + if type(self)._PATH_GRID_CLASSES is not None: raise RuntimeError( "This function should only be called ONCE without specifying that the classes " - "need to be read from disk (class attribute type(self)._PATH_ENV should be None)" + "need to be read from disk (class attribute type(self)._PATH_GRID_CLASSES should be None)" ) import shutil @@ -3907,54 +4193,96 @@ def generate_classes(self, _guard=None, _is_base_env__=True, sys_path=None): raise RuntimeError("Cannot generate file from a \"sub env\" " "(eg no the top level env) if I don't know the path of " "the top level environment.") - sys_path = os.path.join(self.get_path_env(), "_grid2op_classes") - + if local_dir_id is not None: + sys_path = os.path.join(self.get_path_env(), "_grid2op_classes", local_dir_id) + else: + sys_path = os.path.join(self.get_path_env(), "_grid2op_classes") + if _is_base_env__: if os.path.exists(sys_path): shutil.rmtree(sys_path) os.mkdir(sys_path) + + with open(os.path.join(sys_path, "__init__.py"), "w", encoding="utf-8") as f: + f.write(BASE_TXT_COPYRIGHT) # initialized the "__init__" file _init_txt = "" - mode = "w" - if not _is_base_env__: - _init_txt = BASE_TXT_COPYRIGHT + _init_txt - else: - # i am apppending to the __init__ file in case of obs_env - mode = "a" + mode = "a" # generate the classes - _init_txt += self._aux_gen_classes(type(self), sys_path) - _init_txt += self._aux_gen_classes(type(self.backend), sys_path) - _init_txt += self._aux_gen_classes( - self.backend._complete_action_class, sys_path + # for the environment + txt_ = self._aux_gen_classes(type(self), sys_path) + if txt_ is not None: + _init_txt += txt_ + + # for the forecast env (we do this even if it's not used) + from grid2op.Environment._forecast_env import _ForecastEnv + for_env_cls = _ForecastEnv.init_grid(type(self.backend), _local_dir_cls=self._local_dir_cls) + txt_ = self._aux_gen_classes(for_env_cls, sys_path, _add_class_output=False) + if txt_ is not None: + _init_txt += txt_ + + # for the backend + txt_, cls_res_bk = self._aux_gen_classes(type(self.backend), sys_path, _add_class_output=True) + if txt_ is not None: + _init_txt += txt_ + old_bk_cls = self.backend.__class__ + self.backend.__class__ = cls_res_bk + txt_, cls_res_complete_act = self._aux_gen_classes( + old_bk_cls._complete_action_class, sys_path, _add_class_output=True ) - _init_txt += self._aux_gen_classes(self._backend_action_class, sys_path) - _init_txt += self._aux_gen_classes(type(self.action_space), sys_path) - _init_txt += self._aux_gen_classes(self._actionClass, sys_path) - _init_txt += self._aux_gen_classes(self._complete_action_cls, sys_path) - _init_txt += self._aux_gen_classes(type(self.observation_space), sys_path) - _init_txt += self._aux_gen_classes(self._observationClass, sys_path) - _init_txt += self._aux_gen_classes( + if txt_ is not None: + _init_txt += txt_ + self.backend.__class__._complete_action_class = cls_res_complete_act + txt_, cls_res_bk_act = self._aux_gen_classes(self._backend_action_class, sys_path, _add_class_output=True) + if txt_ is not None: + _init_txt += txt_ + self._backend_action_class = cls_res_bk_act + self.backend.__class__.my_bk_act_class = cls_res_bk_act + + # for the other class + txt_ = self._aux_gen_classes(type(self.action_space), sys_path) + if txt_ is not None: + _init_txt += txt_ + txt_ = self._aux_gen_classes(self._actionClass, sys_path) + if txt_ is not None: + _init_txt += txt_ + txt_ = self._aux_gen_classes(self._complete_action_cls, sys_path) + if txt_ is not None: + _init_txt += txt_ + txt_ = self._aux_gen_classes(type(self.observation_space), sys_path) + if txt_ is not None: + _init_txt += txt_ + txt_ = self._aux_gen_classes(self._observationClass, sys_path) + if txt_ is not None: + _init_txt += txt_ + txt_ = self._aux_gen_classes( self._opponent_action_space.subtype, sys_path ) + if txt_ is not None: + _init_txt += txt_ # now do the same for the obs_env if _is_base_env__: - _init_txt += self._aux_gen_classes( + txt_ = self._aux_gen_classes( self._voltage_controler.action_space.subtype, sys_path ) + if txt_ is not None: + _init_txt += txt_ init_grid_tmp = self._observation_space.obs_env._init_grid_path self._observation_space.obs_env._init_grid_path = self._init_grid_path - self._observation_space.obs_env.generate_classes(_is_base_env__=False, sys_path=sys_path) + self._observation_space.obs_env.generate_classes(local_dir_id=local_dir_id, + _is_base_env__=False, + sys_path=sys_path) self._observation_space.obs_env._init_grid_path = init_grid_tmp # now write the __init__ file _init_txt += "\n" with open(os.path.join(sys_path, "__init__.py"), mode, encoding="utf-8") as f: f.write(_init_txt) - + def __del__(self): """when the environment is garbage collected, free all the memory, including cross reference to itself in the observation space.""" if hasattr(self, "_BaseEnv__closed") and not self.__closed: @@ -4112,4 +4440,17 @@ def _check_rules_correct(legalActClass): 'grid2op.BaseRules class, type provided is "{}"'.format( type(legalActClass) ) - ) \ No newline at end of file + ) + + def classes_are_in_files(self) -> bool: + """ + + Whether the classes created when this environment has been made are + store on the hard drive (will return `True`) or not. + + .. note:: + This will become the default behaviour in future grid2op versions. + + See :ref:`troubleshoot_pickle` for more information. + """ + return self._read_from_local_dir is not None diff --git a/grid2op/Environment/baseMultiProcessEnv.py b/grid2op/Environment/baseMultiProcessEnv.py index 37d571702..b2e7aecdc 100644 --- a/grid2op/Environment/baseMultiProcessEnv.py +++ b/grid2op/Environment/baseMultiProcessEnv.py @@ -87,7 +87,9 @@ def init_env(self): """ self.space_prng = np.random.RandomState() self.space_prng.seed(seed=self.seed_used) - self.backend = self.env_params["_raw_backend_class"]() + self.backend = self.env_params["_raw_backend_class"](**self.env_params["_backend_kwargs"]) + del self.env_params["_backend_kwargs"] + with warnings.catch_warnings(): # warnings have bee already sent in the main process, no need to resend them warnings.filterwarnings("ignore") @@ -206,6 +208,7 @@ def run(self): self.remote.send(self.env._time_step) elif cmd == "set_filter": self.env.chronics_handler.set_filter(data) + self.env.chronics_handler.reset() self.remote.send(None) elif cmd == "set_id": self.env.set_id(data) @@ -288,7 +291,7 @@ def __init__(self, envs, obs_as_class=True, return_info=True, logger=None): max_int = np.iinfo(dt_int).max _remotes, _work_remotes = zip(*[Pipe() for _ in range(self.nb_env)]) - env_params = [sub_env.get_kwargs(with_backend=False) for sub_env in envs] + env_params = [sub_env.get_kwargs(with_backend=False, with_backend_kwargs=True) for sub_env in envs] self._ps = [ RemoteEnv( env_params=env_, @@ -322,6 +325,7 @@ def __init__(self, envs, obs_as_class=True, return_info=True, logger=None): self.obs_as_class = obs_as_class # self.__return_info = return_info self._waiting = True + self._read_from_local_dir = env._read_from_local_dir def _send_act(self, actions): for remote, action in zip(self._remotes, actions): diff --git a/grid2op/Environment/environment.py b/grid2op/Environment/environment.py index f7047204a..c41871bf0 100644 --- a/grid2op/Environment/environment.py +++ b/grid2op/Environment/environment.py @@ -10,6 +10,7 @@ import warnings import numpy as np import re +from typing import Optional, Union, Literal import grid2op from grid2op.Opponent import OpponentSpace @@ -31,12 +32,23 @@ from grid2op.Environment.baseEnv import BaseEnv from grid2op.Opponent import BaseOpponent, NeverAttackBudget from grid2op.operator_attention import LinearAttentionBudget +from grid2op.Space import DEFAULT_N_BUSBAR_PER_SUB +from grid2op.typing_variables import RESET_OPTIONS_TYPING, N_BUSBAR_PER_SUB_TYPING +from grid2op.MakeEnv.PathUtils import USE_CLASS_IN_FILE class Environment(BaseEnv): """ This class is the grid2op implementation of the "Environment" entity in the RL framework. + .. danger:: + + Long story short, once a environment is deleted, you cannot use anything it "holds" including, + but not limited to the capacity to perform `obs.simulate(...)` even if the `obs` is still + referenced. + + See :ref:`danger-env-ownership` (first danger block). + Attributes ---------- @@ -63,7 +75,7 @@ class Environment(BaseEnv): """ - REGEX_SPLIT = r"^[a-zA-Z0-9]*$" + REGEX_SPLIT = r"^[a-zA-Z0-9_\\.]*$" def __init__( self, @@ -73,6 +85,7 @@ def __init__( backend, parameters, name="unknown", + n_busbar : N_BUSBAR_PER_SUB_TYPING=DEFAULT_N_BUSBAR_PER_SUB, names_chronics_to_backend=None, actionClass=TopologyAction, observationClass=CompleteObservation, @@ -105,8 +118,11 @@ def __init__( _init_obs=None, _raw_backend_class=None, _compat_glop_version=None, - _read_from_local_dir=True, # TODO runner and all here ! + _read_from_local_dir=None, _is_test=False, + _allow_loaded_backend=False, + _local_dir_cls=None, # only set at the first call to `make(...)` after should be false + _overload_name_multimix=None, ): BaseEnv.__init__( self, @@ -139,32 +155,51 @@ def __init__( observation_bk_kwargs=observation_bk_kwargs, highres_sim_counter=highres_sim_counter, update_obs_after_reward=_update_obs_after_reward, + n_busbar=n_busbar, # TODO n_busbar_per_sub different num per substations: read from a config file maybe (if not provided by the user) + name=name, + _raw_backend_class=_raw_backend_class if _raw_backend_class is not None else type(backend), _init_obs=_init_obs, _is_test=_is_test, # is this created with "test=True" # TODO not implemented !! + _local_dir_cls=_local_dir_cls, + _read_from_local_dir=_read_from_local_dir, ) + if name == "unknown": warnings.warn( 'It is NOT recommended to create an environment without "make" and EVEN LESS ' "to use an environment without a name..." ) - self.name = name - self._read_from_local_dir = _read_from_local_dir + + if _overload_name_multimix is not None: + # this means that the "make" call is issued from the + # creation of a MultiMix. + # So I use the base name instead. + self.name = "".join(_overload_name_multimix[2:]) + self.multimix_mix_name = name + self._overload_name_multimix = _overload_name_multimix + else: + self.name = name + self._overload_name_multimix = None + self.multimix_mix_name = None + # to remember if the user specified a "max_iter" at some point + self._max_iter = chronics_handler.max_iter # for all episode, set in the chronics_handler or by a call to `env.set_max_iter` + self._max_step = None # for the current episode + + #: starting grid2Op 1.11 classes are stored on the disk when an environment is created + #: so the "environment" is created twice (one to generate the class and then correctly to load them) + self._allow_loaded_backend : bool = _allow_loaded_backend - # for gym compatibility (initialized below) - # self.action_space = None - # self.observation_space = None + # for gym compatibility (action_spacen and observation_space initialized below) self.reward_range = None self._viewer = None self.metadata = None self.spec = None - if _raw_backend_class is None: - self._raw_backend_class = type(backend) - else: - self._raw_backend_class = _raw_backend_class - self._compat_glop_version = _compat_glop_version + # needs to be done before "_init_backend" otherwise observationClass is not defined in the + # observation space (real_env_kwargs) + self._observationClass_orig = observationClass # for plotting self._init_backend( chronics_handler, @@ -175,8 +210,6 @@ def __init__( rewardClass, legalActClass, ) - self._actionClass_orig = actionClass - self._observationClass_orig = observationClass def _init_backend( self, @@ -219,26 +252,39 @@ def _init_backend( 'grid2op.Backend class, type provided is "{}"'.format(type(backend)) ) self.backend = backend - if self.backend.is_loaded and self._init_obs is None: + if self.backend.is_loaded and self._init_obs is None and not self._allow_loaded_backend: raise EnvError( "Impossible to use the same backend twice. Please create your environment with a " "new backend instance (new object)." ) - - need_process_backend = False + self._actionClass_orig = actionClass + + need_process_backend = False if not self.backend.is_loaded: + if hasattr(self.backend, "init_pp_backend") and self.backend.init_pp_backend is not None: + # hack for lightsim2grid ... + if type(self.backend.init_pp_backend)._INIT_GRID_CLS is not None: + type(self.backend.init_pp_backend)._INIT_GRID_CLS._clear_grid_dependant_class_attributes() + type(self.backend.init_pp_backend)._clear_grid_dependant_class_attributes() + # usual case: the backend is not loaded # NB it is loaded when the backend comes from an observation for # example - if self._read_from_local_dir: + if self._read_from_local_dir is not None: # test to support pickle conveniently - self.backend._PATH_ENV = self.get_path_env() + # type(self.backend)._PATH_GRID_CLASSES = self.get_path_env() + self.backend._PATH_GRID_CLASSES = self._read_from_local_dir # all the above should be done in this exact order, otherwise some weird behaviour might occur # this is due to the class attribute - self.backend.set_env_name(self.name) + type(self.backend).set_env_name(self.name) + type(self.backend).set_n_busbar_per_sub(self._n_busbar) + if self._compat_glop_version is not None: + type(self.backend).glop_version = self._compat_glop_version self.backend.load_grid( self._init_grid_path ) # the real powergrid of the environment + self.backend.load_storage_data(self.get_path_env()) + self.backend._fill_names_obj() try: self.backend.load_redispacthing_data(self.get_path_env()) except BackendError as exc_: @@ -246,21 +292,21 @@ def _init_backend( warnings.warn(f"Impossible to load redispatching data. This is not an error but you will not be able " f"to use all grid2op functionalities. " f"The error was: \"{exc_}\"") - self.backend.load_storage_data(self.get_path_env()) exc_ = self.backend.load_grid_layout(self.get_path_env()) if exc_ is not None: warnings.warn( f"No layout have been found for you grid (or the layout provided was corrupted). You will " f'not be able to use the renderer, plot the grid etc. The error was "{exc_}"' ) - self.backend.is_loaded = True # alarm set up self.load_alarm_data() self.load_alert_data() # to force the initialization of the backend to the proper type - self.backend.assert_grid_correct() + self.backend.assert_grid_correct( + _local_dir_cls=self._local_dir_cls) + self.backend.is_loaded = True need_process_backend = True self._handle_compat_glop_version(need_process_backend) @@ -312,28 +358,29 @@ def _init_backend( ) # action affecting the grid that will be made by the agent - bk_type = type( - self.backend - ) # be careful here: you need to initialize from the class, and not from the object + # be careful here: you need to initialize from the class, and not from the object + bk_type = type(self.backend) self._rewardClass = rewardClass - self._actionClass = actionClass.init_grid(gridobj=bk_type) + self._actionClass = actionClass.init_grid(gridobj=bk_type, _local_dir_cls=self._local_dir_cls) self._actionClass._add_shunt_data() self._actionClass._update_value_set() - self._observationClass = observationClass.init_grid(gridobj=bk_type) + self._observationClass = observationClass.init_grid(gridobj=bk_type, _local_dir_cls=self._local_dir_cls) - self._complete_action_cls = CompleteAction.init_grid(gridobj=bk_type) + self._complete_action_cls = CompleteAction.init_grid(gridobj=bk_type, _local_dir_cls=self._local_dir_cls) - self._helper_action_class = ActionSpace.init_grid(gridobj=bk_type) + self._helper_action_class = ActionSpace.init_grid(gridobj=bk_type, _local_dir_cls=self._local_dir_cls) self._action_space = self._helper_action_class( gridobj=bk_type, actionClass=actionClass, legal_action=self._game_rules.legal_action, + _local_dir_cls=self._local_dir_cls ) # action that affect the grid made by the environment. self._helper_action_env = self._helper_action_class( gridobj=bk_type, actionClass=CompleteAction, legal_action=self._game_rules.legal_action, + _local_dir_cls=self._local_dir_cls, ) # handles input data @@ -355,12 +402,14 @@ def _init_backend( self.name_sub, names_chronics_to_backend=names_chronics_to_backend, ) + # new in grdi2op 1.10.2: used + self.chronics_handler.action_space = self._helper_action_env self._names_chronics_to_backend = names_chronics_to_backend self.delta_time_seconds = dt_float(self.chronics_handler.time_interval.seconds) # this needs to be done after the chronics handler: rewards might need information # about the chronics to work properly. - self._helper_observation_class = ObservationSpace.init_grid(gridobj=bk_type) + self._helper_observation_class = ObservationSpace.init_grid(gridobj=bk_type, _local_dir_cls=self._local_dir_cls) # FYI: this try to copy the backend if it fails it will modify the backend # and the environment to force the deactivation of the # forecasts @@ -372,7 +421,8 @@ def _init_backend( env=self, kwargs_observation=self._kwargs_observation, observation_bk_class=self._observation_bk_class, - observation_bk_kwargs=self._observation_bk_kwargs + observation_bk_kwargs=self._observation_bk_kwargs, + _local_dir_cls=self._local_dir_cls ) # test to make sure the backend is consistent with the chronics generator @@ -395,6 +445,7 @@ def _init_backend( gridobj=bk_type, controler_backend=self.backend, actionSpace_cls=self._helper_action_class, + _local_dir_cls=self._local_dir_cls ) # create the opponent @@ -413,17 +464,44 @@ def _init_backend( self._reset_redispatching() self._reward_to_obs = {} do_nothing = self._helper_action_env({}) + + # needs to be done at the end, but before the first "step" is called + self._observation_space.set_real_env_kwargs(self) + + # see issue https://github.com/rte-france/Grid2Op/issues/617 + # thermal limits are set AFTER this initial step + _no_overflow_disconnection = self._no_overflow_disconnection + self._no_overflow_disconnection = True *_, fail_to_start, info = self.step(do_nothing) + self._no_overflow_disconnection = _no_overflow_disconnection + if fail_to_start: raise Grid2OpException( "Impossible to initialize the powergrid, the powerflow diverge at iteration 0. " "Available information are: {}".format(info) - ) + ) from info["exception"][0] # test the backend returns object of the proper size if need_process_backend: - self.backend.assert_grid_correct_after_powerflow() + + # hack to fix an issue with lightsim2grid... + # (base class is not reset correctly, will be fixed ASAP) + base_cls_ls = None + if hasattr(self.backend, "init_pp_backend") and self.backend.init_pp_backend is not None: + base_cls_ls = type(self.backend.init_pp_backend) + self.backend.assert_grid_correct_after_powerflow() + + # hack to fix an issue with lightsim2grid... + # (base class is not reset correctly, will be fixed ASAP) + if hasattr(self.backend, "init_pp_backend") and self.backend.init_pp_backend is not None: + if self.backend._INIT_GRID_CLS is not None: + # the init grid class has already been properly computed + self.backend._INIT_GRID_CLS._clear_grid_dependant_class_attributes() + elif base_cls_ls is not None: + # we need to clear the class of the original type as it has not been properly computed + base_cls_ls._clear_grid_dependant_class_attributes() + # for gym compatibility self.reward_range = self._reward_helper.range() self._viewer = None @@ -437,7 +515,7 @@ def _init_backend( # reset everything to be consistent self._reset_vectors_and_timings() - + def max_episode_duration(self): """ Return the maximum duration (in number of steps) of the current episode. @@ -448,20 +526,97 @@ def max_episode_duration(self): to the maximum 32 bit integer (usually `2147483647`) """ + if self._max_step is not None: + return self._max_step tmp = dt_int(self.chronics_handler.max_episode_duration()) if tmp < 0: tmp = dt_int(np.iinfo(dt_int).max) return tmp + def _aux_check_max_iter(self, max_iter): + try: + max_iter_int = int(max_iter) + except ValueError as exc_: + raise EnvError("Impossible to set 'max_iter' by providing something that is not an integer.") from exc_ + if max_iter_int != max_iter: + raise EnvError("Impossible to set 'max_iter' by providing something that is not an integer.") + if max_iter_int < 1 and max_iter_int != -1: + raise EnvError("'max_iter' should be an int >= 1 or -1") + return max_iter_int + def set_max_iter(self, max_iter): """ - + Set the maximum duration of an episode for all the next episodes. + + .. seealso:: + The option `max step` when calling the :func:`Environment.reset` function + used like `obs = env.reset(options={"max step": 288})` (see examples of + `env.reset` for more information) + + .. note:: + The real maximum duration of a duration depends on this parameter but also on the + size of the time series used. For example, if you use an environment with + time series lasting 8064 steps and you call `env.set_max_iter(9000)` + the maximum number of iteration will still be 8064. + + .. warning:: + It only has an impact on future episode. Said differently it also has an impact AFTER + `env.reset` has been called. + + .. danger:: + The usage of both :func:`BaseEnv.fast_forward_chronics` and :func:`Environment.set_max_iter` + is not recommended at all and might not behave correctly. Please use `env.reset` with + `obs = env.reset(options={"max step": xxx, "init ts": yyy})` for a correct behaviour. + Parameters ---------- max_iter: ``int`` - The maximum number of iteration you can do before reaching the end of the episode. Set it to "-1" for + The maximum number of iterations you can do before reaching the end of the episode. Set it to "-1" for possibly infinite episode duration. + + Examples + -------- + It can be used like this: + + .. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + + env = grid2op.make(env_name) + + obs = env.reset() + obs.max_step == 8064 # default for this environment + + env.set_max_iter(288) + # no impact here + + obs = env.reset() + obs.max_step == 288 + + # the limitation still applies to the next episode + obs = env.reset() + obs.max_step == 288 + + If you want to "unset" your limitation, you can do: + + .. code-block:: python + + env.set_max_iter(-1) + obs = env.reset() + obs.max_step == 8064 + + Finally, you cannot limit it to something larger than the duration + of the time series of the environment: + + .. code-block:: python + + env.set_max_iter(9000) + obs = env.reset() + obs.max_step == 8064 + # the call to env.set_max_iter has no impact here + Notes ------- @@ -469,7 +624,9 @@ def set_max_iter(self, max_iter): more information """ - self.chronics_handler.set_max_iter(max_iter) + max_iter_int = self._aux_check_max_iter(max_iter) + self._max_iter = max_iter_int + self.chronics_handler._set_max_iter(max_iter_int) @property def _helper_observation(self): @@ -490,81 +647,10 @@ def _handle_compat_glop_version(self, need_process_backend): "read back data (for example with EpisodeData) that were stored with previous " "grid2op version." ) - if need_process_backend: - self.backend.set_env_name(f"{self.name}_{self._compat_glop_version}") - cls_bk = type(self.backend) - cls_bk.glop_version = self._compat_glop_version - if cls_bk.glop_version == cls_bk.BEFORE_COMPAT_VERSION: - # oldest version: no storage and no curtailment available - # deactivate storage - # recompute the topology vector (more or less everything need to be adjusted... - stor_locs = [pos for pos in cls_bk.storage_pos_topo_vect] - for stor_loc in sorted(stor_locs, reverse=True): - for vect in [ - cls_bk.load_pos_topo_vect, - cls_bk.gen_pos_topo_vect, - cls_bk.line_or_pos_topo_vect, - cls_bk.line_ex_pos_topo_vect, - ]: - vect[vect >= stor_loc] -= 1 - - # deals with the "sub_pos" vector - for sub_id in range(cls_bk.n_sub): - if (cls_bk.storage_to_subid == sub_id).any(): - stor_ids = np.where(cls_bk.storage_to_subid == sub_id)[0] - stor_locs = cls_bk.storage_to_sub_pos[stor_ids] - for stor_loc in sorted(stor_locs, reverse=True): - for vect, sub_id_me in zip( - [ - cls_bk.load_to_sub_pos, - cls_bk.gen_to_sub_pos, - cls_bk.line_or_to_sub_pos, - cls_bk.line_ex_to_sub_pos, - ], - [ - cls_bk.load_to_subid, - cls_bk.gen_to_subid, - cls_bk.line_or_to_subid, - cls_bk.line_ex_to_subid, - ], - ): - vect[(vect >= stor_loc) & (sub_id_me == sub_id)] -= 1 - - # remove storage from the number of element in the substation - for sub_id in range(cls_bk.n_sub): - cls_bk.sub_info[sub_id] -= (cls_bk.storage_to_subid == sub_id).sum() - # remove storage from the total number of element - cls_bk.dim_topo -= cls_bk.n_storage - - # recompute this private member - cls_bk._topo_vect_to_sub = np.repeat( - np.arange(cls_bk.n_sub), repeats=cls_bk.sub_info - ) - self.backend._topo_vect_to_sub = np.repeat( - np.arange(cls_bk.n_sub), repeats=cls_bk.sub_info - ) - new_grid_objects_types = cls_bk.grid_objects_types - new_grid_objects_types = new_grid_objects_types[ - new_grid_objects_types[:, cls_bk.STORAGE_COL] == -1, : - ] - cls_bk.grid_objects_types = 1 * new_grid_objects_types - self.backend.grid_objects_types = 1 * new_grid_objects_types - - # erase all trace of storage units - cls_bk.set_no_storage() - Environment.deactivate_storage(self.backend) - - if need_process_backend: - # the following line must be called BEFORE "self.backend.assert_grid_correct()" ! - self.backend.storage_deact_for_backward_comaptibility() - - # and recomputes everything while making sure everything is consistent - self.backend.assert_grid_correct() - type(self.backend)._topo_vect_to_sub = np.repeat( - np.arange(cls_bk.n_sub), repeats=cls_bk.sub_info - ) - type(self.backend).grid_objects_types = new_grid_objects_types + if need_process_backend: + # the following line must be called BEFORE "self.backend.assert_grid_correct()" ! + self.backend.storage_deact_for_backward_comaptibility() def _voltage_control(self, agent_action, prod_v_chronics): """ @@ -672,7 +758,7 @@ def simulate(self, action): """ return self.get_obs().simulate(action) - def set_id(self, id_): + def set_id(self, id_: Union[int, str]) -> None: """ Set the id that will be used at the next call to :func:`Environment.reset`. @@ -680,6 +766,29 @@ def set_id(self, id_): **NB** The environment need to be **reset** for this to take effect. + .. versionchanged:: 1.6.4 + `id_` can now be a string instead of an integer. You can call something like + `env.set_id("0000")` or `env.set_id("Scenario_april_000")` + or `env.set_id("2050-01-03_0")` (depending on your environment) + to use the right time series. + + .. seealso:: + function :func:`Environment.reset` for extra information + + .. versionchanged:: 1.9.8 + Starting from version 1.9.8 you can directly set the time serie id when calling + reset. + + .. warning:: + If the "time serie generator" you use is on standard (*eg* it is random in some sense) + and if you want fully reproducible results, you should first call `env.set_id(...)` and + then call `env.seed(...)` (and of course `env.reset()`) + + Calling `env.seed(...)` and then `env.set_id(...)` might not behave the way you want. + + In this case, it is much better to use the function + `reset(seed=..., options={"time serie id": ...})` directly. + Parameters ---------- id_: ``int`` @@ -812,7 +921,9 @@ def __str__(self): return "<{} instance named {}>".format(type(self).__name__, self.name) # TODO be closer to original gym implementation - def reset_grid(self): + def reset_grid(self, + init_act_opt : Optional[BaseAction]=None, + method:Literal["combine", "ignore"]="combine"): """ INTERNAL @@ -827,23 +938,55 @@ def reset_grid(self): """ self.backend.reset( - self._init_grid_path + self._init_grid_path, ) # the real powergrid of the environment - self.backend.assert_grid_correct() + # self.backend.assert_grid_correct() if self._thermal_limit_a is not None: self.backend.set_thermal_limit(self._thermal_limit_a.astype(dt_float)) self._backend_action = self._backend_action_class() - self.nb_time_step = -1 # to have init obs at step 1 - do_nothing = self._helper_action_env({}) - *_, fail_to_start, info = self.step(do_nothing) + self.nb_time_step = -1 # to have init obs at step 1 (and to prevent 'setting to proper state' "action" to be illegal) + init_action = None + if not self._parameters.IGNORE_INITIAL_STATE_TIME_SERIE: + # load the initial state from the time series (default) + # TODO logger: log that + init_action : BaseAction = self.chronics_handler.get_init_action(self._names_chronics_to_backend) + else: + # do as if everything was connected to busbar 1 + # TODO logger: log that + init_action = self._helper_action_env({"set_bus": np.ones(type(self).dim_topo, dtype=dt_int)}) + if type(self).shunts_data_available: + init_action += self._helper_action_env({"shunt": {"set_bus": np.ones(type(self).n_shunt, dtype=dt_int)}}) + if init_action is None: + # default behaviour for grid2op < 1.10.2 + init_action = self._helper_action_env({}) + else: + # remove the "change part" of the action + init_action.remove_change() + + if init_act_opt is not None: + init_act_opt.remove_change() + if method == "combine": + init_action._add_act_and_remove_line_status_only_set(init_act_opt) + elif method == "ignore": + init_action = init_act_opt + else: + raise Grid2OpException(f"kwargs `method` used to set the initial state of the grid " + f"is not understood (use one of `combine` or `ignore` and " + f"not `{method}`)") + init_action._set_topo_vect.nonzero() + *_, fail_to_start, info = self.step(init_action) if fail_to_start: raise Grid2OpException( "Impossible to initialize the powergrid, the powerflow diverge at iteration 0. " "Available information are: {}".format(info) ) - + if info["exception"] and init_action.can_affect_something(): + raise Grid2OpException(f"There has been an error at the initialization, most likely due to a " + f"incorrect 'init state'. You need to change either the time series used (chronics, chronics_handler, " + f"gridvalue, etc.) or the 'init state' option provided in " + f"`env.reset(..., options={'init state': XXX, ...})`. Error was: {info['exception']}") # assign the right self._observation_space.set_real_env_kwargs(self) @@ -862,7 +1005,22 @@ def add_text_logger(self, logger=None): self.logger = logger return self - def reset(self) -> BaseObservation: + def _aux_get_skip_ts(self, options): + skip_ts = None + if options is not None and "init ts" in options: + try: + skip_ts = int(options["init ts"]) + except ValueError as exc_: + raise Grid2OpException("In `env.reset` the kwargs `init ts` should be convertible to an int") from exc_ + + if skip_ts != options["init ts"]: + raise Grid2OpException(f"In `env.reset` the kwargs `init ts` should be convertible to an int, found {options['init ts']}") + return skip_ts + + def reset(self, + *, + seed: Union[int, None] = None, + options: RESET_OPTIONS_TYPING = None) -> BaseObservation: """ Reset the environment to a clean state. It will reload the next chronics if any. And reset the grid to a clean state. @@ -871,7 +1029,22 @@ def reset(self) -> BaseObservation: to ensure the episode is fully over. This method should be called only at the end of an episode. - + + Parameters + ---------- + seed: int + The seed to used (new in version 1.9.8), see examples for more details. Ignored if not set (meaning no seeds will + be used, experiments might not be reproducible) + + options: dict + Some options to "customize" the reset call. For example specifying the "time serie id" (grid2op >= 1.9.8) to use + or the "initial state of the grid" (grid2op >= 1.10.2) or to + start the episode at some specific time in the time series (grid2op >= 1.10.3) with the + "init ts" key. + + See examples for more information about this. Ignored if + not set. + Examples -------- The standard "gym loop" can be done with the following code: @@ -881,17 +1054,255 @@ def reset(self) -> BaseObservation: import grid2op # create the environment - env = grid2op.make("l2rpn_case14_sandbox") + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) - # and now you can "render" (plot) the state of the grid + # start a new episode obs = env.reset() done = False reward = env.reward_range[0] while not done: action = agent.act(obs, reward, done) obs, reward, done, info = env.step(action) + + .. versionadded:: 1.9.8 + It is now possible to set the seed and the time series you want to use at the new + episode by calling `env.reset(seed=..., options={"time serie id": ...})` + + Before version 1.9.8, if you wanted to use a fixed seed, you would need to (see + doc of :func:`grid2op.Environment.BaseEnv.seed` ): + + .. code-block:: python + + seed = ... + env.seed(seed) + obs = env.reset() + ... + + Starting from version 1.9.8 you can do this in one call: + + .. code-block:: python + + seed = ... + obs = env.reset(seed=seed) + + For the "time series id" it is the same concept. Before you would need to do (see + doc of :func:`Environment.set_id` for more information ): + + .. code-block:: python + + time_serie_id = ... + env.set_id(time_serie_id) + obs = env.reset() + ... + + And now (from version 1.9.8) you can more simply do: + + .. code-block:: python + + time_serie_id = ... + obs = env.reset(options={"time serie id": time_serie_id}) + ... + + .. versionadded:: 1.10.2 + + Another feature has been added in version 1.10.2, which is the possibility to set the + grid to a given "topological" state at the first observation (before this version, + you could only retrieve an observation with everything connected together). + + In grid2op 1.10.2, you can do that by using the keys `"init state"` in the "options" kwargs of + the reset function. The value associated to this key should be dictionnary that can be + converted to a non ambiguous grid2op action using an "action space". + + .. note:: + The "action space" used here is not the action space of the agent. It's an "action + space" that uses a :func:`grid2op.Action.Action.BaseAction` class meaning you can do any + type of action, on shunts, on topology, on line status etc. even if the agent is not + allowed to. + + Likewise, nothing check if this action is legal or not. + + You can use it like this: + + .. code-block:: python + + # to start an episode with a line disconnected, you can do: + init_state_dict = {"set_line_status": [(0, -1)]} + obs = env.reset(options={"init state": init_state_dict}) + obs.line_status[0] is False + + # to start an episode with a different topolovy + init_state_dict = {"set_bus": {"lines_or_id": [(0, 2)], "lines_ex_id": [(3, 2)]}} + obs = env.reset(options={"init state": init_state_dict}) + + .. note:: + Since grid2op version 1.10.2, there is also the possibility to set the "initial state" + of the grid directly in the time series. The priority is always given to the + argument passed in the "options" value. + + Concretely if, in the "time series" (formelly called "chronics") provides an action would change + the topology of substation 1 and 2 (for example) and you provide an action that disable the + line 6, then the initial state will see substation 1 and 2 changed (as in the time series) + and line 6 disconnected. + + Another example in this case: if the action you provide would change topology of substation 2 and 4 + then the initial state (after `env.reset`) will give: + + - substation 1 as in the time serie + - substation 2 as in "options" + - substation 4 as in "options" + + .. note:: + Concerning the previously described behaviour, if you want to ignore the data in the + time series, you can add : `"method": "ignore"` in the dictionary describing the action. + In this case the action in the time series will be totally ignored and the initial + state will be fully set by the action passed in the "options" dict. + + An example is: + + .. code-block:: python + + init_state_dict = {"set_line_status": [(0, -1)], "method": "force"} + obs = env.reset(options={"init state": init_state_dict}) + obs.line_status[0] is False + + .. versionadded:: 1.10.3 + + Another feature has been added in version 1.10.3, the possibility to skip the + some steps of the time series and starts at some given steps. + + The time series often always start at a given day of the week (*eg* Monday) + and at a given time (*eg* midnight). But for some reason you notice that your + agent performs poorly on other day of the week or time of the day. This might be + because it has seen much more data from Monday at midnight that from any other + day and hour of the day. + + To alleviate this issue, you can now easily reset an episode and ask grid2op + to start this episode after xxx steps have "passed". + + Concretely, you can do it with: + + .. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + obs = env.reset(options={"init ts": 1}) + + Doing that your agent will start its episode not at midnight (which + is the case for this environment), but at 00:05 + + If you do: + + .. code-block:: python + + obs = env.reset(options={"init ts": 12}) + + In this case, you start the episode at 01:00 and not at midnight (you + start at what would have been the 12th steps) + + If you want to start the "next day", you can do: + + .. code-block:: python + + obs = env.reset(options={"init ts": 288}) + + etc. + + .. note:: + On this feature, if a powerline is on soft overflow (meaning its flow is above + the limit but below the :attr:`grid2op.Parameters.Parameters.HARD_OVERFLOW_THRESHOLD` * `the limit`) + then it is still connected (of course) and the counter + :attr:`grid2op.Observation.BaseObservation.timestep_overflow` is at 0. + + If a powerline is on "hard overflow" (meaning its flow would be above + :attr:`grid2op.Parameters.Parameters.HARD_OVERFLOW_THRESHOLD` * `the limit`), then, as it is + the case for a "normal" (without options) reset, this line is disconnected, but can be reconnected + directly (:attr:`grid2op.Observation.BaseObservation.time_before_cooldown_line` == 0) + + .. seealso:: + The function :func:`Environment.fast_forward_chronics` for an alternative usage (that will be + deprecated at some point) + + Yet another feature has been added in grid2op version 1.10.3 in this `env.reset` function. It is + the capacity to limit the duration of an episode. + + .. code-block:: python + + import grid2op + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + obs = env.reset(options={"max step": 288}) + + This will limit the duration to 288 steps (1 day), meaning your agent + will have successfully managed the entire episode if it manages to keep + the grid in a safe state for a whole day (depending on the environment you are + using the default duration is either one week - roughly 2016 steps or 4 weeks) + + .. note:: + This option only affect the current episode. It will have no impact on the + next episode (after reset) + + For example: + + .. code-block:: python + + obs = env.reset() + obs.max_step == 8064 # default for this environment + + obs = env.reset(options={"max step": 288}) + obs.max_step == 288 # specified by the option + + obs = env.reset() + obs.max_step == 8064 # retrieve the default behaviour + + .. seealso:: + The function :func:`Environment.set_max_iter` for an alternative usage with the different + that `set_max_iter` is permenanent: it impacts all the future episodes and not only + the next one. + """ - super().reset() + # process the "options" kwargs + # (if there is an init state then I need to process it to remove the + # some keys) + self._max_step = None + method = "combine" + init_state = None + skip_ts = self._aux_get_skip_ts(options) + max_iter_int = None + if options is not None and "init state" in options: + act_as_dict = options["init state"] + if isinstance(act_as_dict, dict): + if "method" in act_as_dict: + method = act_as_dict["method"] + del act_as_dict["method"] + init_state : BaseAction = self._helper_action_env(act_as_dict) + elif isinstance(act_as_dict, BaseAction): + init_state = act_as_dict + else: + raise Grid2OpException("`init state` kwargs in `env.reset(, options=XXX) should either be a " + "grid2op action (instance of grid2op.Action.BaseAction) or a dictionaray " + f"representing an action. You provided {act_as_dict} which is a {type(act_as_dict)}") + ambiguous, except_tmp = init_state.is_ambiguous() + if ambiguous: + raise Grid2OpException("You provided an invalid (ambiguous) action to set the 'init state'") from except_tmp + init_state.remove_change() + + super().reset(seed=seed, options=options) + + if options is not None and "max step" in options: + # use the "max iter" provided in the options + max_iter_int = self._aux_check_max_iter(options["max step"]) + if skip_ts is not None: + max_iter_chron = max_iter_int + skip_ts + else: + max_iter_chron = max_iter_int + self.chronics_handler._set_max_iter(max_iter_chron) + else: + # reset previous max iter to value set with `env.set_max_iter(...)` (or -1 by default) + self.chronics_handler._set_max_iter(self._max_iter) self.chronics_handler.next_chronics() self.chronics_handler.initialize( self.backend.name_load, @@ -900,14 +1311,33 @@ def reset(self) -> BaseObservation: self.backend.name_sub, names_chronics_to_backend=self._names_chronics_to_backend, ) + if max_iter_int is not None: + self._max_step = min(max_iter_int, self.chronics_handler.real_data.max_iter - (skip_ts if skip_ts is not None else 0)) + else: + self._max_step = None self._env_modification = None self._reset_maintenance() self._reset_redispatching() self._reset_vectors_and_timings() # it need to be done BEFORE to prevent cascading failure when there has been - self.reset_grid() + + self.reset_grid(init_state, method) if self.viewer_fig is not None: del self.viewer_fig self.viewer_fig = None + + if skip_ts is not None: + self._reset_vectors_and_timings() + + if skip_ts < 1: + raise Grid2OpException(f"In `env.reset` the kwargs `init ts` should be an int >= 1, found {options['init ts']}") + if skip_ts == 1: + self._init_obs = None + self.step(self.action_space()) + elif skip_ts == 2: + self.fast_forward_chronics(1) + else: + self.fast_forward_chronics(skip_ts) + # if True, then it will not disconnect lines above their thermal limits self._reset_vectors_and_timings() # and it needs to be done AFTER to have proper timings at tbe beginning # the attention budget is reset above @@ -986,17 +1416,15 @@ def render(self, mode="rgb_array"): return rgb_array def _custom_deepcopy_for_copy(self, new_obj): - super()._custom_deepcopy_for_copy(new_obj) - - new_obj.name = self.name - new_obj._read_from_local_dir = self._read_from_local_dir new_obj.metadata = copy.deepcopy(self.metadata) new_obj.spec = copy.deepcopy(self.spec) - new_obj._raw_backend_class = self._raw_backend_class new_obj._compat_glop_version = self._compat_glop_version - new_obj._actionClass_orig = self._actionClass_orig - new_obj._observationClass_orig = self._observationClass_orig + new_obj._max_iter = self._max_iter + new_obj._max_step = self._max_step + new_obj._overload_name_multimix = self._overload_name_multimix + new_obj.multimix_mix_name = self.multimix_mix_name + super()._custom_deepcopy_for_copy(new_obj) def copy(self) -> "Environment": """ @@ -1023,7 +1451,10 @@ def copy(self) -> "Environment": self._custom_deepcopy_for_copy(res) return res - def get_kwargs(self, with_backend=True, with_chronics_handler=True): + def get_kwargs(self, + with_backend=True, + with_chronics_handler=True, + with_backend_kwargs=False): """ This function allows to make another Environment with the same parameters as the one that have been used to make this one. @@ -1059,16 +1490,21 @@ def get_kwargs(self, with_backend=True, with_chronics_handler=True): """ res = {} + res["n_busbar"] = self._n_busbar res["init_env_path"] = self._init_env_path res["init_grid_path"] = self._init_grid_path if with_chronics_handler: res["chronics_handler"] = copy.deepcopy(self.chronics_handler) + res["chronics_handler"].cleanup_action_space() + + # deals with the backend if with_backend: if not self.backend._can_be_copied: raise RuntimeError("Impossible to get the kwargs for this " "environment, the backend cannot be copied.") res["backend"] = self.backend.copy() res["backend"]._is_loaded = False # i can reload a copy of an environment + res["parameters"] = copy.deepcopy(self._parameters) res["names_chronics_to_backend"] = copy.deepcopy( self._names_chronics_to_backend @@ -1083,7 +1519,14 @@ def get_kwargs(self, with_backend=True, with_chronics_handler=True): res["voltagecontrolerClass"] = self._voltagecontrolerClass res["other_rewards"] = {k: v.rewardClass for k, v in self.other_rewards.items()} res["name"] = self.name + res["_raw_backend_class"] = self._raw_backend_class + if with_backend_kwargs: + # used for multi processing, to pass exactly the + # right things when building the backends + # in each sub process + res["_backend_kwargs"] = self.backend._my_kwargs + res["with_forecast"] = self.with_forecast res["opponent_space_type"] = self._opponent_space_type @@ -1280,21 +1723,22 @@ def train_val_split( """ # define all the locations - if re.match(self.REGEX_SPLIT, add_for_train) is None: + cls = type(self) + if re.match(cls.REGEX_SPLIT, add_for_train) is None: raise EnvError( f"The suffixes you can use for training data (add_for_train) " - f'should match the regex "{self.REGEX_SPLIT}"' + f'should match the regex "{cls.REGEX_SPLIT}"' ) - if re.match(self.REGEX_SPLIT, add_for_val) is None: + if re.match(cls.REGEX_SPLIT, add_for_val) is None: raise EnvError( f"The suffixes you can use for validation data (add_for_val)" - f'should match the regex "{self.REGEX_SPLIT}"' + f'should match the regex "{cls.REGEX_SPLIT}"' ) if add_for_test is not None: - if re.match(self.REGEX_SPLIT, add_for_test) is None: + if re.match(cls.REGEX_SPLIT, add_for_test) is None: raise EnvError( f"The suffixes you can use for test data (add_for_test)" - f'should match the regex "{self.REGEX_SPLIT}"' + f'should match the regex "{cls.REGEX_SPLIT}"' ) if add_for_test is None and test_scen_id is not None: @@ -1674,6 +2118,7 @@ def get_params_for_runner(self): res["envClass"] = Environment # TODO ! res["gridStateclass"] = self.chronics_handler.chronicsClass res["backendClass"] = self._raw_backend_class + res["_overload_name_multimix"] = self._overload_name_multimix if hasattr(self.backend, "_my_kwargs"): res["backend_kwargs"] = self.backend._my_kwargs else: @@ -1697,6 +2142,7 @@ def get_params_for_runner(self): res["other_rewards"] = {k: v.rewardClass for k, v in self.other_rewards.items()} res["grid_layout"] = self.grid_layout res["name_env"] = self.name + res["n_busbar"] = self._n_busbar res["opponent_space_type"] = self._opponent_space_type res["opponent_action_class"] = self._opponent_action_class @@ -1712,6 +2158,7 @@ def get_params_for_runner(self): res["kwargs_attention_budget"] = copy.deepcopy(self._kwargs_attention_budget) res["has_attention_budget"] = self._has_attention_budget res["_read_from_local_dir"] = self._read_from_local_dir + res["_local_dir_cls"] = self._local_dir_cls # should be transfered to the runner so that folder is not deleted while runner exists res["logger"] = self.logger res["kwargs_observation"] = copy.deepcopy(self._kwargs_observation) res["observation_bk_class"] = self._observation_bk_class @@ -1721,6 +2168,7 @@ def get_params_for_runner(self): @classmethod def init_obj_from_kwargs(cls, + *, other_env_kwargs, init_env_path, init_grid_path, @@ -1753,39 +2201,46 @@ def init_obj_from_kwargs(cls, observation_bk_class, observation_bk_kwargs, _raw_backend_class, - _read_from_local_dir): - res = Environment(init_env_path=init_env_path, - init_grid_path=init_grid_path, - chronics_handler=chronics_handler, - backend=backend, - parameters=parameters, - name=name, - names_chronics_to_backend=names_chronics_to_backend, - actionClass=actionClass, - observationClass=observationClass, - rewardClass=rewardClass, - legalActClass=legalActClass, - voltagecontrolerClass=voltagecontrolerClass, - other_rewards=other_rewards, - opponent_space_type=opponent_space_type, - opponent_action_class=opponent_action_class, - opponent_class=opponent_class, - opponent_init_budget=opponent_init_budget, - opponent_budget_per_ts=opponent_budget_per_ts, - opponent_budget_class=opponent_budget_class, - opponent_attack_duration=opponent_attack_duration, - opponent_attack_cooldown=opponent_attack_cooldown, - kwargs_opponent=kwargs_opponent, - with_forecast=with_forecast, - attention_budget_cls=attention_budget_cls, - kwargs_attention_budget=kwargs_attention_budget, - has_attention_budget=has_attention_budget, - logger=logger, - kwargs_observation=kwargs_observation, - observation_bk_class=observation_bk_class, - observation_bk_kwargs=observation_bk_kwargs, - _raw_backend_class=_raw_backend_class, - _read_from_local_dir=_read_from_local_dir) + _read_from_local_dir, + _local_dir_cls, + _overload_name_multimix, + n_busbar=DEFAULT_N_BUSBAR_PER_SUB + ): + res = cls(init_env_path=init_env_path, + init_grid_path=init_grid_path, + chronics_handler=chronics_handler, + backend=backend, + parameters=parameters, + name=name, + names_chronics_to_backend=names_chronics_to_backend, + actionClass=actionClass, + observationClass=observationClass, + rewardClass=rewardClass, + legalActClass=legalActClass, + voltagecontrolerClass=voltagecontrolerClass, + other_rewards=other_rewards, + opponent_space_type=opponent_space_type, + opponent_action_class=opponent_action_class, + opponent_class=opponent_class, + opponent_init_budget=opponent_init_budget, + opponent_budget_per_ts=opponent_budget_per_ts, + opponent_budget_class=opponent_budget_class, + opponent_attack_duration=opponent_attack_duration, + opponent_attack_cooldown=opponent_attack_cooldown, + kwargs_opponent=kwargs_opponent, + with_forecast=with_forecast, + attention_budget_cls=attention_budget_cls, + kwargs_attention_budget=kwargs_attention_budget, + has_attention_budget=has_attention_budget, + logger=logger, + kwargs_observation=kwargs_observation, + observation_bk_class=observation_bk_class, + observation_bk_kwargs=observation_bk_kwargs, + n_busbar=int(n_busbar), + _raw_backend_class=_raw_backend_class, + _read_from_local_dir=_read_from_local_dir, + _local_dir_cls=_local_dir_cls, + _overload_name_multimix=_overload_name_multimix) return res def generate_data(self, nb_year=1, nb_core=1, seed=None, **kwargs): @@ -1795,8 +2250,7 @@ def generate_data(self, nb_year=1, nb_core=1, seed=None, **kwargs): I also requires the lightsim2grid simulator. - This is only available for some environment (only the environment used for wcci 2022 competition at - time of writing). + This is only available for some environment (only the environment after 2022). Generating data takes some time (around 1 - 2 minutes to generate a weekly scenario) and this why we recommend to do it "offline" and then use the generated data for training or evaluation. @@ -1862,3 +2316,20 @@ def generate_data(self, nb_year=1, nb_core=1, seed=None, **kwargs): env=self, seed=seed, nb_scenario=nb_year, nb_core=nb_core, **kwargs ) + + def _add_classes_in_files(self, sys_path, bk_type, are_classes_in_files): + if are_classes_in_files: + # then generate the proper classes + _PATH_GRID_CLASSES = bk_type._PATH_GRID_CLASSES + try: + bk_type._PATH_GRID_CLASSES = None + my_type_tmp = type(self).init_grid(gridobj=bk_type, _local_dir_cls=None) + txt_, cls_res_me = self._aux_gen_classes(my_type_tmp, + sys_path, + _add_class_output=True) + # then add the class to the init file + with open(os.path.join(sys_path, "__init__.py"), "a", encoding="utf-8") as f: + f.write(txt_) + finally: + # make sure to put back the correct _PATH_GRID_CLASSES + bk_type._PATH_GRID_CLASSES = _PATH_GRID_CLASSES diff --git a/grid2op/Environment/maskedEnvironment.py b/grid2op/Environment/maskedEnvironment.py new file mode 100644 index 000000000..e3c55a7d9 --- /dev/null +++ b/grid2op/Environment/maskedEnvironment.py @@ -0,0 +1,209 @@ +# Copyright (c) 2023, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import copy +import numpy as np +import os +from typing import Tuple, Union, List + +from grid2op.Environment.environment import Environment +from grid2op.Exceptions import EnvError +from grid2op.dtypes import dt_bool, dt_float, dt_int +from grid2op.Space import DEFAULT_N_BUSBAR_PER_SUB +from grid2op.MakeEnv.PathUtils import USE_CLASS_IN_FILE + + +class MaskedEnvironment(Environment): + """This class is the grid2op implementation of a "maked" environment: lines not in the + `lines_of_interest` mask will NOT be deactivated by the environment is the flow is too high + (or moderately high for too long.) + + .. warning:: + This class might not behave normally if used with TimeOutEnvironment, MultiEnv, MultiMixEnv etc. + + .. warning:: + At time of writing, the behaviour of "obs.simulate" is not modified + + Examples + --------- + + We recommend you build such an environment with: + + .. code-block:: python + + import grid2op + from grid2op.Environment import MaskedEnvironment + + env_name = "l2rpn_case14_sandbox" + lines_of_interest = np.array([True, True, True, True, True, True, + False, False, False, False, False, False, + False, False, False, False, False, False, + False, False]) + env = MaskedEnvironment(grid2op.make(env_name), + lines_of_interest=lines_of_interest) + + + In particular, make sure to use `grid2op.make(...)` when creating the MaskedEnvironment + and not to use another environment. + + """ + # some kind of infinity value + # NB we multiply np.finfo(dt_float).max by a small number (1e-7) to avoid overflow + # indeed, _hard_overflow_threshold is multiply by the flow on the lines + INF_VAL_THM_LIM = 1e-7 * np.finfo(dt_float).max + + # some kind of infinity value + INF_VAL_TS_OVERFLOW_ALLOW = np.iinfo(dt_int).max - 1 + + def __init__(self, + grid2op_env: Union[Environment, dict], + lines_of_interest): + + self._lines_of_interest = self._make_lines_of_interest(lines_of_interest) + if isinstance(grid2op_env, Environment): + kwargs = grid2op_env.get_kwargs() + if grid2op_env.classes_are_in_files(): + # I need to build the classes + + # first take the "ownership" of the tmp directory + kwargs["_local_dir_cls"] = grid2op_env._local_dir_cls + grid2op_env._local_dir_cls = None + + # then generate the proper classes + sys_path = os.path.abspath(kwargs["_local_dir_cls"].name) + bk_type = type(grid2op_env.backend) + self._add_classes_in_files(sys_path, bk_type, grid2op_env.classes_are_in_files()) + super().__init__(**kwargs) + elif isinstance(grid2op_env, dict): + super().__init__(**grid2op_env) + else: + raise EnvError(f"For MaskedEnvironment you need to provide " + f"either an Environment or a dict " + f"for grid2op_env. You provided: {type(grid2op_env)}") + # if self._lines_of_interest.size() != type(self).n_line: + # raise EnvError("Impossible to init A masked environment when the number of lines " + # "of the mask do not match the number of lines on the grid.") + + def _make_lines_of_interest(self, lines_of_interest): + # NB is called BEFORE the env has been created... + if isinstance(lines_of_interest, np.ndarray): + res = lines_of_interest.astype(dt_bool) + if res.sum() == 0: + raise EnvError("You cannot use MaskedEnvironment and masking all " + "the grid. If you don't want to simulate powerline " + "disconnection when they are game over, please " + "set params.NO_OVERFLOW_DISCONNECT=True (see doc)") + else: + raise EnvError("Format of lines_of_interest is not understood. " + "Please provide a vector of the size of the " + "number of lines on the grid.") + return res + + def _reset_vectors_and_timings(self): + super()._reset_vectors_and_timings() + self._hard_overflow_threshold[~self._lines_of_interest] = type(self).INF_VAL_THM_LIM + self._nb_timestep_overflow_allowed[~self._lines_of_interest] = type(self).INF_VAL_TS_OVERFLOW_ALLOW + + def get_kwargs(self, with_backend=True, with_chronics_handler=True): + res = {} + res["lines_of_interest"] = copy.deepcopy(self._lines_of_interest) + res["grid2op_env"] = super().get_kwargs(with_backend, with_chronics_handler) + return res + + def get_params_for_runner(self): + res = super().get_params_for_runner() + res["envClass"] = MaskedEnvironment + res["other_env_kwargs"] = {"lines_of_interest": copy.deepcopy(self._lines_of_interest)} + return res + + def _custom_deepcopy_for_copy(self, new_obj): + super()._custom_deepcopy_for_copy(new_obj) + new_obj._lines_of_interest = copy.deepcopy(self._lines_of_interest) + + @classmethod + def init_obj_from_kwargs(cls, + *, + other_env_kwargs, + init_env_path, + init_grid_path, + chronics_handler, + backend, + parameters, + name, + names_chronics_to_backend, + actionClass, + observationClass, + rewardClass, + legalActClass, + voltagecontrolerClass, + other_rewards, + opponent_space_type, + opponent_action_class, + opponent_class, + opponent_init_budget, + opponent_budget_per_ts, + opponent_budget_class, + opponent_attack_duration, + opponent_attack_cooldown, + kwargs_opponent, + with_forecast, + attention_budget_cls, + kwargs_attention_budget, + has_attention_budget, + logger, + kwargs_observation, + observation_bk_class, + observation_bk_kwargs, + _raw_backend_class, + _read_from_local_dir, + _overload_name_multimix, + _local_dir_cls, + n_busbar=DEFAULT_N_BUSBAR_PER_SUB): + grid2op_env = {"init_env_path": init_env_path, + "init_grid_path": init_grid_path, + "chronics_handler": chronics_handler, + "backend": backend, + "parameters": parameters, + "name": name, + "names_chronics_to_backend": names_chronics_to_backend, + "actionClass": actionClass, + "observationClass": observationClass, + "rewardClass": rewardClass, + "legalActClass": legalActClass, + "voltagecontrolerClass": voltagecontrolerClass, + "other_rewards": other_rewards, + "opponent_space_type": opponent_space_type, + "opponent_action_class": opponent_action_class, + "opponent_class": opponent_class, + "opponent_init_budget": opponent_init_budget, + "opponent_budget_per_ts": opponent_budget_per_ts, + "opponent_budget_class": opponent_budget_class, + "opponent_attack_duration": opponent_attack_duration, + "opponent_attack_cooldown": opponent_attack_cooldown, + "kwargs_opponent": kwargs_opponent, + "with_forecast": with_forecast, + "attention_budget_cls": attention_budget_cls, + "kwargs_attention_budget": kwargs_attention_budget, + "has_attention_budget": has_attention_budget, + "logger": logger, + "kwargs_observation": kwargs_observation, + "observation_bk_class": observation_bk_class, + "observation_bk_kwargs": observation_bk_kwargs, + "n_busbar": int(n_busbar), + "_raw_backend_class": _raw_backend_class, + "_read_from_local_dir": _read_from_local_dir, + "_local_dir_cls": _local_dir_cls, + "_overload_name_multimix": _overload_name_multimix} + if not "lines_of_interest" in other_env_kwargs: + raise EnvError("You cannot make a MaskedEnvironment without providing the list of lines of interest") + for el in other_env_kwargs: + if el == "lines_of_interest": + continue + warnings.warn(f"kwargs {el} provided to make the environment will be ignored") + res = MaskedEnvironment(grid2op_env, lines_of_interest=other_env_kwargs["lines_of_interest"]) + return res diff --git a/grid2op/Environment/multiEnvMultiProcess.py b/grid2op/Environment/multiEnvMultiProcess.py index 53c2cec18..00a6fc803 100644 --- a/grid2op/Environment/multiEnvMultiProcess.py +++ b/grid2op/Environment/multiEnvMultiProcess.py @@ -5,14 +5,12 @@ # you can obtain one at http://mozilla.org/MPL/2.0/. # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. -from multiprocessing import Process, Pipe + import numpy as np from grid2op.dtypes import dt_int -from grid2op.Exceptions import Grid2OpException, MultiEnvException -from grid2op.Space import GridObjects +from grid2op.Exceptions import MultiEnvException from grid2op.Environment.baseMultiProcessEnv import BaseMultiProcessEnvironment -from grid2op.Action import BaseAction class MultiEnvMultiProcess(BaseMultiProcessEnvironment): diff --git a/grid2op/Environment/multiMixEnv.py b/grid2op/Environment/multiMixEnv.py index 56f251665..be2508478 100644 --- a/grid2op/Environment/multiMixEnv.py +++ b/grid2op/Environment/multiMixEnv.py @@ -10,10 +10,15 @@ import warnings import numpy as np import copy +from typing import Any, Dict, Tuple, Union, List, Literal from grid2op.dtypes import dt_int, dt_float -from grid2op.Space import GridObjects, RandomObject +from grid2op.Space import GridObjects, RandomObject, DEFAULT_N_BUSBAR_PER_SUB from grid2op.Exceptions import EnvError, Grid2OpException +from grid2op.Observation import BaseObservation +from grid2op.MakeEnv.PathUtils import USE_CLASS_IN_FILE +from grid2op.Environment.baseEnv import BaseEnv +from grid2op.typing_variables import STEP_INFO_TYPING, RESET_OPTIONS_TYPING class MultiMixEnvironment(GridObjects, RandomObject): @@ -152,11 +157,14 @@ class MultiMixEnvironment(GridObjects, RandomObject): """ + KEYS_RESET_OPTIONS = BaseEnv.KEYS_RESET_OPTIONS + def __init__( self, envs_dir, logger=None, - experimental_read_from_local_dir=False, + experimental_read_from_local_dir=None, + n_busbar=DEFAULT_N_BUSBAR_PER_SUB, _add_to_name="", # internal, for test only, do not use ! _compat_glop_version=None, # internal, for test only, do not use ! _test=False, @@ -169,6 +177,10 @@ def __init__( self.mix_envs = [] self._env_dir = os.path.abspath(envs_dir) self.__closed = False + self._do_not_erase_local_dir_cls = False + self._local_dir_cls = None + if not os.path.exists(envs_dir): + raise EnvError(f"There is nothing at {envs_dir}") # Special case handling for backend # TODO: with backend.copy() instead ! backendClass = None @@ -179,74 +191,170 @@ def __init__( # was introduced in grid2op 1.7.1 backend_kwargs = kwargs["backend"]._my_kwargs del kwargs["backend"] - - # Inline import to prevent cyclical import - from grid2op.MakeEnv.Make import make - + + li_mix_nms = [mix_name for mix_name in sorted(os.listdir(envs_dir)) if os.path.isdir(os.path.join(envs_dir, mix_name))] + if not li_mix_nms: + raise EnvError("We did not find any mix in this multi-mix environment.") + + # Make sure GridObject class attributes are set from first env + # Should be fine since the grid is the same for all envs + multi_env_name = (None, envs_dir, os.path.basename(os.path.abspath(envs_dir)), _add_to_name) + env_for_init = self._aux_create_a_mix(envs_dir, + li_mix_nms[0], + logger, + backendClass, + backend_kwargs, + _add_to_name, + _compat_glop_version, + n_busbar, + _test, + experimental_read_from_local_dir, + multi_env_name, + kwargs) + + cls_res_me = self._aux_add_class_file(env_for_init) + if cls_res_me is not None: + self.__class__ = cls_res_me + else: + self.__class__ = type(self).init_grid(type(env_for_init.backend), _local_dir_cls=env_for_init._local_dir_cls) + self.mix_envs.append(env_for_init) + self._local_dir_cls = env_for_init._local_dir_cls + # TODO reuse same observation_space and action_space in all the envs maybe ? + multi_env_name = (type(env_for_init)._PATH_GRID_CLASSES, *multi_env_name[1:]) try: - for env_dir in sorted(os.listdir(envs_dir)): - env_path = os.path.join(envs_dir, env_dir) - if not os.path.isdir(env_path): + for mix_name in li_mix_nms[1:]: + mix_path = os.path.join(envs_dir, mix_name) + if not os.path.isdir(mix_path): continue - this_logger = ( - logger.getChild(f"MultiMixEnvironment_{env_dir}") - if logger is not None - else None - ) - # Special case for backend - if backendClass is not None: - try: - # should pass with grid2op >= 1.7.1 - bk = backendClass(**backend_kwargs) - except TypeError as exc_: - # with grid2Op version prior to 1.7.1 - # you might have trouble with - # "TypeError: __init__() got an unexpected keyword argument 'can_be_copied'" - msg_ = ("Impossible to create a backend for each mix using the " - "backend key-word arguments. Falling back to creating " - "with no argument at all (default behaviour with grid2op <= 1.7.0).") - warnings.warn(msg_) - bk = backendClass() - env = make( - env_path, - backend=bk, - _add_to_name=_add_to_name, - _compat_glop_version=_compat_glop_version, - test=_test, - logger=this_logger, - experimental_read_from_local_dir=experimental_read_from_local_dir, - **kwargs, - ) - else: - env = make( - env_path, - _add_to_name=_add_to_name, - _compat_glop_version=_compat_glop_version, - test=_test, - logger=this_logger, - experimental_read_from_local_dir=experimental_read_from_local_dir, - **kwargs, - ) - self.mix_envs.append(env) + mix = self._aux_create_a_mix(envs_dir, + mix_name, + logger, + backendClass, + backend_kwargs, + _add_to_name, + _compat_glop_version, + n_busbar, + _test, + experimental_read_from_local_dir, + multi_env_name, + kwargs) + self.mix_envs.append(mix) except Exception as exc_: - err_msg = "MultiMix environment creation failed: {}".format(exc_) - raise EnvError(err_msg) + err_msg = "MultiMix environment creation failed at the creation of the first mix. Error: {}".format(exc_) + raise EnvError(err_msg) from exc_ if len(self.mix_envs) == 0: err_msg = "MultiMix envs_dir did not contain any valid env" raise EnvError(err_msg) + # tell every mix the "MultiMix" is responsible for deleting the + # folder that stores the classes definition + for el in self.mix_envs: + el._do_not_erase_local_dir_cls = True self.env_index = 0 self.current_env = self.mix_envs[self.env_index] - # Make sure GridObject class attributes are set from first env - # Should be fine since the grid is the same for all envs - multi_env_name = os.path.basename(os.path.abspath(envs_dir)) + _add_to_name - save_env_name = self.current_env.env_name - self.current_env.env_name = multi_env_name - self.__class__ = self.init_grid(self.current_env) - self.current_env.env_name = save_env_name + # legacy behaviour (using experimental_read_from_local_dir kwargs in env.make) + if self._read_from_local_dir is not None: + if os.path.split(self._read_from_local_dir)[1] == "_grid2op_classes": + self._do_not_erase_local_dir_cls = True + else: + self._do_not_erase_local_dir_cls = True + + def _aux_aux_add_class_file(self, sys_path, env_for_init): + # used for the old behaviour (setting experimental_read_from_local_dir=True in make) + bk_type = type(env_for_init.backend) + _PATH_GRID_CLASSES = bk_type._PATH_GRID_CLASSES + cls_res_me = None + try: + bk_type._PATH_GRID_CLASSES = None + my_type_tmp = MultiMixEnvironment.init_grid(gridobj=bk_type, _local_dir_cls=None) + txt_, cls_res_me = BaseEnv._aux_gen_classes(my_type_tmp, + sys_path, + _add_class_output=True) + # then add the class to the init file + with open(os.path.join(sys_path, "__init__.py"), "a", encoding="utf-8") as f: + f.write(txt_) + finally: + # make sure to put back the correct _PATH_GRID_CLASSES + bk_type._PATH_GRID_CLASSES = _PATH_GRID_CLASSES + return cls_res_me + + def _aux_add_class_file(self, env_for_init): + # used for the "new" bahviour for grid2op make (automatic read from local dir) + if env_for_init.classes_are_in_files() and env_for_init._local_dir_cls is not None: + sys_path = os.path.abspath(env_for_init._local_dir_cls.name) + self._local_dir_cls = env_for_init._local_dir_cls + env_for_init._local_dir_cls = None + # then generate the proper classes + cls_res_me = self._aux_aux_add_class_file(sys_path, env_for_init) + return cls_res_me + return None + + def _aux_create_a_mix(self, + envs_dir, + mix_name, + logger, + backendClass, + backend_kwargs, + _add_to_name, + _compat_glop_version, + n_busbar, + _test, + experimental_read_from_local_dir, + multi_env_name, + kwargs + ): + # Inline import to prevent cyclical import + from grid2op.MakeEnv.Make import make + + this_logger = ( + logger.getChild(f"MultiMixEnvironment_{mix_name}") + if logger is not None + else None + ) + mix_path = os.path.join(envs_dir, mix_name) + # Special case for backend + if backendClass is not None: + try: + # should pass with grid2op >= 1.7.1 + bk = backendClass(**backend_kwargs) + except TypeError as exc_: + # with grid2Op version prior to 1.7.1 + # you might have trouble with + # "TypeError: __init__() got an unexpected keyword argument 'can_be_copied'" + msg_ = ("Impossible to create a backend for each mix using the " + "backend key-word arguments. Falling back to creating " + "with no argument at all (default behaviour with grid2op <= 1.7.0).") + warnings.warn(msg_) + bk = backendClass() + mix = make( + mix_path, + backend=bk, + _add_to_name=_add_to_name, + _compat_glop_version=_compat_glop_version, + n_busbar=n_busbar, + test=_test, + logger=this_logger, + experimental_read_from_local_dir=experimental_read_from_local_dir, + _overload_name_multimix=multi_env_name, + **kwargs, + ) + else: + mix = make( + mix_path, + n_busbar=n_busbar, + _add_to_name=_add_to_name, + _compat_glop_version=_compat_glop_version, + test=_test, + logger=this_logger, + experimental_read_from_local_dir=experimental_read_from_local_dir, + _overload_name_multimix=multi_env_name, + **kwargs, + ) + return mix + def get_path_env(self): """ Get the path that allows to create this environment. @@ -297,11 +405,13 @@ def __next__(self): def __getattr__(self, name): # TODO what if name is an integer ? make it possible to loop with integer here + if self.__closed: + raise EnvError("This environment is closed, you cannot use it.") return getattr(self.current_env, name) def keys(self): for mix in self.mix_envs: - yield mix.name + yield mix.multimix_mix_name def values(self): for mix in self.mix_envs: @@ -309,7 +419,7 @@ def values(self): def items(self): for mix in self.mix_envs: - yield mix.name, mix + yield mix.multimix_mix_name, mix def copy(self): if self.__closed: @@ -319,6 +429,11 @@ def copy(self): current_env = self.current_env self.current_env = None + # do not copy these attributes + _local_dir_cls = self._local_dir_cls + self._local_dir_cls = None + + # create the new object and copy the normal attribute cls = self.__class__ res = cls.__new__(cls) for k in self.__dict__: @@ -326,11 +441,17 @@ def copy(self): # this is handled elsewhere continue setattr(res, k, copy.deepcopy(getattr(self, k))) + # now deal with the mixes res.mix_envs = [mix.copy() for mix in mix_envs] res.current_env = res.mix_envs[res.env_index] - + # finally deal with the ownership of the class folder + res._local_dir_cls = _local_dir_cls + res._do_not_erase_local_dir_cls = True + + # put back attributes of `self` that have been put aside self.mix_envs = mix_envs self.current_env = current_env + self._local_dir_cls = _local_dir_cls return res def __getitem__(self, key): @@ -353,23 +474,36 @@ def __getitem__(self, key): raise EnvError("This environment is closed, you cannot use it.") # Search for key for mix in self.mix_envs: - if mix.name == key: + if mix.multimix_mix_name == key: return mix # Not found by name raise KeyError - def reset(self, random=False): + def reset(self, + *, + seed: Union[int, None] = None, + random=False, + options: RESET_OPTIONS_TYPING = None) -> BaseObservation: + if self.__closed: raise EnvError("This environment is closed, you cannot use it.") + + if options is not None: + for el in options: + if el not in type(self).KEYS_RESET_OPTIONS: + raise EnvError(f"You tried to customize the `reset` call with some " + f"`options` using the key `{el}` which is invalid. " + f"Only keys in {sorted(list(type(self).KEYS_RESET_OPTIONS))} " + f"can be used.") + if random: self.env_index = self.space_prng.randint(len(self.mix_envs)) else: self.env_index = (self.env_index + 1) % len(self.mix_envs) self.current_env = self.mix_envs[self.env_index] - self.current_env.reset() - return self.get_obs() + return self.current_env.reset(seed=seed, options=options) def seed(self, seed=None): """ @@ -464,7 +598,17 @@ def close(self): for mix in self.mix_envs: mix.close() + self.__closed = True + + # free the resources (temporary directory) + if self._do_not_erase_local_dir_cls: + # The resources are not held by this env, so + # I do not remove them + # (case for ObsEnv or ForecastedEnv) + return + BaseEnv._aux_close_local_dir_cls(self) + def attach_layout(self, grid_layout): if self.__closed: @@ -478,7 +622,12 @@ def __del__(self): self.close() def generate_classes(self): - # TODO this is not really a good idea, as the multi-mix itself is not read from the - # files ! - for mix in self.mix_envs: - mix.generate_classes() + mix_for_classes = self.mix_envs[0] + path_cls = os.path.join(mix_for_classes.get_path_env(), "_grid2op_classes") + if not os.path.exists(path_cls): + try: + os.mkdir(path_cls) + except FileExistsError: + pass + mix_for_classes.generate_classes() + self._aux_aux_add_class_file(path_cls, mix_for_classes) diff --git a/grid2op/Environment/timedOutEnv.py b/grid2op/Environment/timedOutEnv.py index fcccd7641..a1952f99a 100644 --- a/grid2op/Environment/timedOutEnv.py +++ b/grid2op/Environment/timedOutEnv.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2020, RTE (https://www.rte-france.com) +# Copyright (c) 2023, RTE (https://www.rte-france.com) # See AUTHORS.txt # This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. # If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, @@ -8,11 +8,15 @@ import time from math import floor -from typing import Tuple, Union, List +from typing import Any, Dict, Tuple, Union, List, Literal +import os + from grid2op.Environment.environment import Environment from grid2op.Action import BaseAction from grid2op.Observation import BaseObservation from grid2op.Exceptions import EnvError +from grid2op.Space import DEFAULT_N_BUSBAR_PER_SUB +from grid2op.MakeEnv.PathUtils import USE_CLASS_IN_FILE class TimedOutEnvironment(Environment): # TODO heritage ou alors on met un truc de base @@ -23,7 +27,10 @@ class TimedOutEnvironment(Environment): # TODO heritage ou alors on met un truc of the `step` function. For more information, see the documentation of - :func:`TimedOutEnvironment.step` for + :func:`TimedOutEnvironment.step` + + .. warning:: + This class might not behave normally if used with MaskedEnvironment, MultiEnv, MultiMixEnv etc. Attributes ---------- @@ -66,7 +73,19 @@ def __init__(self, self._nb_dn_last = 0 self._is_init_dn = False if isinstance(grid2op_env, Environment): - super().__init__(**grid2op_env.get_kwargs()) + kwargs = grid2op_env.get_kwargs() + if grid2op_env.classes_are_in_files(): + # I need to build the classes + + # first take the "ownership" of the tmp directory + kwargs["_local_dir_cls"] = grid2op_env._local_dir_cls + grid2op_env._local_dir_cls = None + + # then generate the proper classes + sys_path = os.path.abspath(kwargs["_local_dir_cls"].name) + bk_type = type(grid2op_env.backend) + self._add_classes_in_files(sys_path, bk_type, grid2op_env.classes_are_in_files()) + super().__init__(**kwargs) elif isinstance(grid2op_env, dict): super().__init__(**grid2op_env) else: @@ -177,6 +196,7 @@ def get_params_for_runner(self): @classmethod def init_obj_from_kwargs(cls, + *, other_env_kwargs, init_env_path, init_grid_path, @@ -209,45 +229,64 @@ def init_obj_from_kwargs(cls, observation_bk_class, observation_bk_kwargs, _raw_backend_class, - _read_from_local_dir): - res = TimedOutEnvironment(grid2op_env={"init_env_path": init_env_path, - "init_grid_path": init_grid_path, - "chronics_handler": chronics_handler, - "backend": backend, - "parameters": parameters, - "name": name, - "names_chronics_to_backend": names_chronics_to_backend, - "actionClass": actionClass, - "observationClass": observationClass, - "rewardClass": rewardClass, - "legalActClass": legalActClass, - "voltagecontrolerClass": voltagecontrolerClass, - "other_rewards": other_rewards, - "opponent_space_type": opponent_space_type, - "opponent_action_class": opponent_action_class, - "opponent_class": opponent_class, - "opponent_init_budget": opponent_init_budget, - "opponent_budget_per_ts": opponent_budget_per_ts, - "opponent_budget_class": opponent_budget_class, - "opponent_attack_duration": opponent_attack_duration, - "opponent_attack_cooldown": opponent_attack_cooldown, - "kwargs_opponent": kwargs_opponent, - "with_forecast": with_forecast, - "attention_budget_cls": attention_budget_cls, - "kwargs_attention_budget": kwargs_attention_budget, - "has_attention_budget": has_attention_budget, - "logger": logger, - "kwargs_observation": kwargs_observation, - "observation_bk_class": observation_bk_class, - "observation_bk_kwargs": observation_bk_kwargs, - "_raw_backend_class": _raw_backend_class, - "_read_from_local_dir": _read_from_local_dir}, - **other_env_kwargs) + _read_from_local_dir, + _local_dir_cls, + _overload_name_multimix, + n_busbar=DEFAULT_N_BUSBAR_PER_SUB): + grid2op_env={"init_env_path": init_env_path, + "init_grid_path": init_grid_path, + "chronics_handler": chronics_handler, + "backend": backend, + "parameters": parameters, + "name": name, + "names_chronics_to_backend": names_chronics_to_backend, + "actionClass": actionClass, + "observationClass": observationClass, + "rewardClass": rewardClass, + "legalActClass": legalActClass, + "voltagecontrolerClass": voltagecontrolerClass, + "other_rewards": other_rewards, + "opponent_space_type": opponent_space_type, + "opponent_action_class": opponent_action_class, + "opponent_class": opponent_class, + "opponent_init_budget": opponent_init_budget, + "opponent_budget_per_ts": opponent_budget_per_ts, + "opponent_budget_class": opponent_budget_class, + "opponent_attack_duration": opponent_attack_duration, + "opponent_attack_cooldown": opponent_attack_cooldown, + "kwargs_opponent": kwargs_opponent, + "with_forecast": with_forecast, + "attention_budget_cls": attention_budget_cls, + "kwargs_attention_budget": kwargs_attention_budget, + "has_attention_budget": has_attention_budget, + "logger": logger, + "kwargs_observation": kwargs_observation, + "observation_bk_class": observation_bk_class, + "observation_bk_kwargs": observation_bk_kwargs, + "_raw_backend_class": _raw_backend_class, + "_read_from_local_dir": _read_from_local_dir, + "n_busbar": int(n_busbar), + "_local_dir_cls": _local_dir_cls, + "_overload_name_multimix": _overload_name_multimix} + if not "time_out_ms" in other_env_kwargs: + raise EnvError("You cannot make a MaskedEnvironment without providing the list of lines of interest") + for el in other_env_kwargs: + if el == "time_out_ms": + continue + warnings.warn(f"kwargs {el} provided to make the environment will be ignored") + res = TimedOutEnvironment(grid2op_env, time_out_ms=other_env_kwargs["time_out_ms"]) return res - - def reset(self) -> BaseObservation: + + + def reset(self, + *, + seed: Union[int, None] = None, + options: Union[Dict[Union[str, Literal["time serie id"]], Union[int, str]], None] = None) -> BaseObservation: """Reset the environment. + .. seealso:: + The doc of :func:`Environment.reset` for more information + Returns ------- BaseObservation @@ -257,7 +296,7 @@ def reset(self) -> BaseObservation: self.__last_act_send = time.perf_counter() self.__last_act_received = self.__last_act_send self._is_init_dn = False - res = super().reset() + res = super().reset(seed=seed, options=options) self.__last_act_send = time.perf_counter() self._is_init_dn = True return res diff --git a/grid2op/Episode/CompactEpisodeData.py b/grid2op/Episode/CompactEpisodeData.py new file mode 100644 index 000000000..e5cdabf9d --- /dev/null +++ b/grid2op/Episode/CompactEpisodeData.py @@ -0,0 +1,316 @@ +# Copyright (c) 2019-2020, RTE (https://www.rte-france.com) +# Addition by Xavier Weiss (@DEUCE1957) +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +import json +import numpy as np +from grid2op.Action import ActionSpace +from grid2op.Observation import ObservationSpace + +from pathlib import Path as p + + +class CompactEpisodeData(): + + """ + This module provides a compact way to serialize/deserialize one episode of a Reinforcement Learning (RL) run. + This enables episodes to be replayed, so we can understand the behaviour of the agent. + It is compatible with :class:`EpisodeData` through the "to_dict()" method. + + If enabled when using the :class:`Runner`, the :class:`CompactEpisodeData` + will save the information in a structured and compact way. + For each unique environment it will store a folder with: + - "dict_action_space.json" + - "dict_attack_space.json" + - "dict_env_modification.json" + - "dict_observation_space.json" + Then for each episode it stores a single compressed Numpy archive (.npz) file, identified by the chronics ID (e.g. "003"). + Inside this archive we find: + - "actions": actions taken by the :class:`grid2op.BaseAgent.BaseAgent`, each row of this numpy 2d-array is a vector representation of the action + taken by the agent at a particular timestep. + - "env_actions": represents the modification of the powergrid by the environment, these modification usually concern hazards, maintenance, as well as modification of the generators production + setpoint or the loads consumption. + - "attacks": actions taken by any opponent present in the RL environment, stored similary to "actions". + - "observations": observations of the class :class:`grid2op.BaseObservation.BaseObservation made by the :class:`grid2op.Agent.BaseAgent` after taking an action, stored as a numpy 2d-array + where each row corresponds to a vector representation of the observation at that timestep. Note this includes the initial timestep, hence this array is 1 row longer than (e.g.) the actionss. + - "rewards": reward received by the :class:`grid2op.Agent.BaseAgent from the :class:`grid2op.Environment` at timestep 't', represented as 1d-array. + - "other_rewards": any other rewards logged by the :class:`grid2op.Environment` (but not necessarily passed to the agent), represented as a 2d-array. + - "disc_lines": gives which lines have been disconnected during the simulation at each time step. The same convention as for "rewards" has been adopted. This means that the powerlines are + disconnected when the :class:`grid2op.Agent.BaseAgent` takes the :class:`grid2op.BaseAction` at timestep 't`. + - "times": gives some information about the processor time spent (in seconds), mainly the time taken by + :class:`grid2op.Agent.BaseAgent` (and especially its method :func:`grid2op.BaseAgent.act`) and amount of time + spent in the :class:`grid2op.Environment.Environment` + All of the above can be read back from disk. + + Inputs + ---------- + environment: :class:`grid2op.Environment` + The environment we are running, contains most of the metadata required to store the episode. + obs: :class:`grid2op.Observation` + The initial observation of the environment in the current episode. Used to store the first observation. + + Examples + -------- + Here is an example on how to use the :class:`CompactEpisodeData` class outside of the :class:`grid2op.Runner.Runner`. + + .. code-block:: python + from pathlib import Path as p + from grid2op.Agent import DoNothingAgent + env = grid2op.make(""rte_case14_realistic") + obs = env.reset() + ep_id = env.chronics_handler.get_name() + data_dir = p.cwd() # CHANGE THIS TO DESIRED LOCATION ON DISK + agent = DoNothingAgent(env.action_space) + reward = 0.0 + episode_store = CompactEpisodeData(env, obs) + for t in range(env.max_episode_duration()): + start = time.perf_counter() + act = agent.act(obs, reward) + obs, reward, done, info = env.step(act) + duration = time.perf_counter() - start + episode_store.update(t, env, act, obs, reward, duration, info) + # Store Episode Data to file (compactly) + episode_store.to_disk() + # Load Episode Data from disk by referring to the specific episode ID + episode_store.from_disk(ep_id) + """ + + def __init__(self, env, obs, exp_dir, ep_id:str=None): + """ + Creates Dictionary of Numpy Arrays for storing the details of a Grid2Op Episode (actions, observations, etc.). + Pre-allocating the arrays like this is more efficient than appending to a mutable datatype (like a list). + For the initial timestep, an extra observation is stored (the initial state of the Environment). + + Args: + env (grid2op.Environment): Current Grid2Op Environment, used to grab static attributes. + obs (grid2op.Observation): Initial Observation (before agent is active) + exp_dir (pathlib.Path): Where experiment data is stored + ep_id (str | None): If provided tries to load previously stored episode from disk. + + Returns: + dict: Contains all data to fully represent what happens in an episode + """ + if exp_dir is not None: + self.exp_dir = p(exp_dir) + else: + self.exp_dir = None + self.array_names = ("actions", "env_actions", "attacks", "observations", "rewards", "other_rewards", "disc_lines", "times") + self.space_names = ("observation_space", "action_space", "attack_space", "env_modification_space") + if ep_id is None: + self.ep_id = env.chronics_handler.get_name() + max_no_of_timesteps = int(env.max_episode_duration()) + + # Numpy Arrays + self.actions = np.full((max_no_of_timesteps, env.action_space.n), fill_value=np.NaN, dtype=np.float16) + self.env_actions = np.full((max_no_of_timesteps, env._helper_action_env.n), fill_value=np.NaN, dtype=np.float32) + self.attacks = np.full((max_no_of_timesteps, env._opponent_action_space.n), fill_value=0.0, dtype=np.float32) + self.observations = np.full((max_no_of_timesteps + 1, len(obs.to_vect())),fill_value=np.NaN,dtype=np.float32) + self.rewards = np.full(max_no_of_timesteps, fill_value=np.NaN, dtype=np.float32) + self.other_reward_names = list(sorted(env.other_rewards.keys())) + self.other_rewards = np.full((max_no_of_timesteps, len(self.other_reward_names)), fill_value=np.NaN, dtype=np.float32) + self.disc_lines = np.full((max_no_of_timesteps, env.backend.n_line), fill_value=np.NaN, dtype=np.bool_) + self.times = np.full(max_no_of_timesteps, fill_value=np.NaN, dtype=np.float32) + + self.disc_lines_templ = np.full((1, env.backend.n_line), fill_value=False, dtype=np.bool_) + # AttackTempl: Not used, kept for comptabiility with EpisodeData + self.attack_templ = np.full((1, env._oppSpace.action_space.size()), fill_value=0.0, dtype=np.float32) + + self.legal = np.full(max_no_of_timesteps, fill_value=True, dtype=np.bool_) + self.ambiguous = np.full(max_no_of_timesteps, fill_value=False, dtype=np.bool_) + self.n_cols = env.action_space.n + env._helper_action_env.n + len(obs.to_vect()) + env.backend.n_line + env._oppSpace.action_space.size() + 6 + + # Store First Observation + self.observations[0] = obs.to_vect() + self.game_over_timestep = max_no_of_timesteps + + # JSON-serializable Objects + self.observation_space=env.observation_space + self.action_space=env.action_space + self.attack_space=env._opponent_action_space + self.env_modification_space=env._helper_action_env + + # Special JSON-Serializable Object: Episode MetaData + self.meta = dict( + chronics_path = self.ep_id, + chronics_max_timestep = max_no_of_timesteps, + game_over_timestep = self.game_over_timestep, + other_reward_names = self.other_reward_names, + grid_path = env._init_grid_path, + backend_type = type(env.backend).__name__, + env_type = type(env).__name__, + env_seed = (env.seed_used.item() if env.seed_used.ndim == 0 else list(env.seed_used)) if isinstance(env.seed_used, np.ndarray) else env.seed_used, + agent_seed = self.action_space.seed_used, + nb_timestep_played = 0, + cumulative_reward = 0.0, + ) + elif exp_dir is not None: + self.load_metadata(ep_id) + self.load_spaces() + self.load_arrays(ep_id) + + def update(self, t:int, env, action, + obs, reward:float, done:bool, duration:float, info): + """ + Update the arrays in the Episode Store for each step of the environment. + Args: + t (int): Current time step + env (grid2op.Environment): State of Environment + action (grid2op.Action): Action agent took on the Environment + obs (grid2op.Observation): Observed result of action on Environment + reward (float): Numeric reward returned by Environment for the given action + duration (float): Time in seconds needed to choose and execute the action + info (dict): Dictionary containing information on legality and ambiguity of action + """ + self.actions[t - 1] = action.to_vect() + self.env_actions[t - 1] = env._env_modification.to_vect() + self.observations[t] = obs.to_vect() + opp_attack = env._oppSpace.last_attack + if opp_attack is not None: + self.attacks[t - 1] = opp_attack.to_vect() + self.rewards[t - 1] = reward + if "disc_lines" in info: + arr = info["disc_lines"] + if arr is not None: + self.disc_lines[t - 1] = arr + else: + self.disc_lines[t - 1] = self.disc_lines_templ + if "rewards" in info: + for i, other_reward_name in enumerate(self.other_reward_names): + self.other_rewards[t-1, i] = info["rewards"][other_reward_name] + self.times[t - 1] = duration + self.legal[t - 1] = not info["is_illegal"] + self.ambiguous[t - 1] = info["is_ambiguous"] + if done: + self.game_over_timestep = t + # Update metadata + self.meta.update( + nb_timestep_played = t, + cumulative_reward = self.meta["cumulative_reward"] + float(reward), + ) + return self.meta["cumulative_reward"] + + def asdict(self): + """ + Return the Episode Store as a dictionary. + Compatible with Grid2Op's internal EpisodeData format as keyword arguments. + """ + # Other rewards in Grid2op's internal Episode Data is a list of dictionaries, so we convert to that format + other_rewards = [{other_reward_name:float(self.other_rewards[t, i]) for i, other_reward_name in enumerate(self.other_reward_names)} for t in range(len(self.times))] + return dict(actions=self.actions, env_actions=self.env_actions, + observations=self.observations, + rewards=self.rewards, + other_rewards=other_rewards, + disc_lines=self.disc_lines, times=self.times, + disc_lines_templ=self.disc_lines_templ, attack_templ=self.attack_templ, + attack=self.attacks, legal=self.legal, ambiguous=self.ambiguous, + observation_space=self.observation_space, action_space=self.action_space, + attack_space=self.attack_space, helper_action_env=self.env_modification_space) + + def store_metadata(self): + """ + Store this Episode's meta data to disk. + """ + with open(self.exp_dir / f"{self.ep_id}_metadata.json", "w", encoding="utf-8") as f: + json.dump(self.meta, f, indent=4, sort_keys=True) + + def load_metadata(self, ep_id:str): + """ + Load metadata from a specific Episode. + """ + with open(self.exp_dir / f"{ep_id}_metadata.json", "r", encoding="utf-8") as f: + self.meta = json.load(f) + self.other_reward_names = self.meta["other_reward_names"] + self.game_over_timestep = self.meta["game_over_timestep"] + + def store_spaces(self): + """ + Store the Observation, Action, Environment and Opponent spaces to disk. + """ + for space_name in self.space_names: + with open(self.exp_dir / f"dict_{space_name}.json", "w", encoding="utf-8") as f: + json.dump(getattr(self, space_name).cls_to_dict(), f, indent=4, sort_keys=True) + + def load_spaces(self): + """ + Load the Observation, Action, Environment and Opponent spaces from disk + """ + for space_name in self.space_names: + with open(self.exp_dir / f"dict_{space_name}.json", "r", encoding="utf-8") as f: + if space_name == "observation_space": + setattr(self, space_name, ObservationSpace.from_dict(json.load(f))) + else: + setattr(self, space_name, ActionSpace.from_dict(json.load(f))) + + def store_arrays(self): + """ + Store compressed versions of the Actions, Observations, Rewards, Attacks and other metadata + to disk as a compressed numpy archive (single file per episode). + """ + np.savez_compressed(self.exp_dir / f"{self.ep_id}.npz", **{array_name: getattr(self, array_name) for array_name in self.array_names}) + + def load_arrays(self, ep_id:str): + """ + Load Actions, Observations, Rewards, Attacks and other metadata from disk + for a specific Episode ID (identified by Chronics name) + """ + arrays = np.load(self.exp_dir / f"{ep_id}.npz") + for array_name in self.array_names: + setattr(self, array_name, arrays[array_name]) + self.ep_id = ep_id + + def to_disk(self): + """ + Store this EpisodeStore object instance to disk (as .json and .npz files) + """ + if self.exp_dir is not None: + # Store Episode metadata + self.store_metadata() + # Store Spaces (values are static, so only save once per experiment) + if len([f for f in self.exp_dir.glob("*.json")]) != 4: + self.store_spaces() + # Store Arrays as Compressed Numpy archive + self.store_arrays() + + @classmethod + def from_disk(cls, path, ep_id:str): + """ + Load EpisodeStore data from disk for a specific episode. + """ + return cls(env=None, obs=None, exp_dir=p(path), ep_id=ep_id) + + @staticmethod + def list_episode(path): + """ + From a given path, extracts the episodes that can be loaded + + Parameters + ---------- + path: ``str`` + The path where to look for data coming from "episode" + + Returns + ------- + res: ``list`` + A list of possible episodes. Each element of this list is a tuple: (full_path, episode_name) + """ + return [(str(full_path), full_path.stem) for full_path in path.glob("*.npz")] + + def __len__(self): + return self.game_over_timestep + + def make_serializable(self): + """ + INTERNAL + + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + Used by he runner to serialize properly an episode + + Called in the _aux_run_one_episode (one of the Runner auxilliary function) to make + sure the EpisodeData can be sent back to the main process withtout issue (otherwise + there is a complain about the _ObsEnv) + """ + from grid2op.Episode.EpisodeData import EpisodeData + EpisodeData._aux_make_obs_space_serializable(self) diff --git a/grid2op/Episode/EpisodeData.py b/grid2op/Episode/EpisodeData.py index e06ac7325..1925fd7ba 100644 --- a/grid2op/Episode/EpisodeData.py +++ b/grid2op/Episode/EpisodeData.py @@ -800,6 +800,48 @@ def to_disk(self): dict_ = {"version": f"{grid2op.__version__}"} json.dump(obj=dict_, fp=f, indent=4, sort_keys=True) + def _aux_make_obs_space_serializable(self): + """I put it here because it's also used by CompactEpisodeData. + + The only requirement is that `self` has an attribute `observation_space` which is a + valid grid2op ObservationSpace""" + if self.observation_space is None: + return + from grid2op.Environment._obsEnv import _ObsEnv + # remove the observation_env of the observation_space + self.observation_space = self.observation_space.copy(copy_backend=True) + self.observation_space._backend_obs.close() + self.observation_space._backend_obs = None + self.observation_space.obs_env.close() + self.observation_space.obs_env = None + self.observation_space._ObsEnv_class = _ObsEnv + self.observation_space._real_env_kwargs = None + self.observation_space._template_obj._obs_env = None + self.observation_space._template_obj._ptr_kwargs_env = None + self.observation_space._empty_obs._obs_env = None + self.observation_space._empty_obs._ptr_kwargs_env = None + self.observation_space._deactivate_simulate(None) + + def make_serializable(self): + """ + INTERNAL + + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + Used by he runner to serialize properly an episode + + Called in the _aux_run_one_episode (one of the Runner auxilliary function) to make + sure the EpisodeData can be sent back to the main process withtout issue (otherwise + there is a complain about the _ObsEnv) + """ + self._aux_make_obs_space_serializable() + # remove the observation_env of the observation + for el in self.observations.objects: + if el is not None: + el._obs_env = None + el._ptr_kwargs_env = None + + self.observations.helper = self.observation_space + @staticmethod def get_grid2op_version(path_episode): """ diff --git a/grid2op/Episode/EpisodeReplay.py b/grid2op/Episode/EpisodeReplay.py index 6213bf450..83aaafc25 100644 --- a/grid2op/Episode/EpisodeReplay.py +++ b/grid2op/Episode/EpisodeReplay.py @@ -15,7 +15,7 @@ from grid2op.Exceptions import Grid2OpException from grid2op.PlotGrid.PlotMatplot import PlotMatplot from grid2op.Episode.EpisodeData import EpisodeData - +from grid2op.Episode.CompactEpisodeData import CompactEpisodeData class EpisodeReplay(object): """ @@ -102,15 +102,15 @@ def replay_episode( load_info: ``str`` Defaults to "p". What kind of values to show on loads. - Can be oneof `["p", "v", None]` + Can be one of `["p", "v", None]` gen_info: ``str`` Defaults to "p". What kind of values to show on generators. - Can be oneof `["p", "v", None]` + Can be one of `["p", "v", None]` line_info: ``str`` Defaults to "rho". What kind of values to show on lines. - Can be oneof `["rho", "a", "p", "v", None]` + Can be one of `["rho", "a", "p", "v", None]` resolution: ``tuple`` Defaults to (1280, 720). The resolution to use for the gif. @@ -187,13 +187,18 @@ def replay_episode( # Export all frames as gif if enabled if gif_name is not None and len(frames) > 0: try: - imageio.mimwrite(gif_path, frames, fps=fps) + try: + # with imageio > 2.5 you need to compute the duration + imageio.mimwrite(gif_path, frames, duration=1000./fps) + except TypeError: + # imageio <= 2.5 can be given fps directly + imageio.mimwrite(gif_path, frames, fps=fps) # Try to compress try: from pygifsicle import optimize optimize(gif_path, options=["-w", "--no-conserve-memory"]) - except: + except Exception as exc_: warn_msg = ( "Failed to optimize .GIF size, but gif is still saved:\n" "Install dependencies to reduce size by ~3 folds\n" diff --git a/grid2op/Episode/__init__.py b/grid2op/Episode/__init__.py index 46040fba3..12abb7475 100644 --- a/grid2op/Episode/__init__.py +++ b/grid2op/Episode/__init__.py @@ -1,6 +1,7 @@ __all__ = ["EpisodeData"] from grid2op.Episode.EpisodeData import EpisodeData +from grid2op.Episode.CompactEpisodeData import CompactEpisodeData # Try to import optional module try: diff --git a/grid2op/Exceptions/__init__.py b/grid2op/Exceptions/__init__.py index f25ca1d26..f75a3bba6 100644 --- a/grid2op/Exceptions/__init__.py +++ b/grid2op/Exceptions/__init__.py @@ -52,6 +52,7 @@ "IsolatedElement", "DisconnectedLoad", "DisconnectedGenerator", + "ImpossibleTopology", "PlotError", "OpponentError", "UsedRunnerError", @@ -124,6 +125,8 @@ IsolatedElement, DisconnectedLoad, DisconnectedGenerator, + ImpossibleTopology, + ) DivergingPowerFlow = DivergingPowerflow # for compatibility with lightsim2grid diff --git a/grid2op/Exceptions/backendExceptions.py b/grid2op/Exceptions/backendExceptions.py index 297c63d69..e70cd645b 100644 --- a/grid2op/Exceptions/backendExceptions.py +++ b/grid2op/Exceptions/backendExceptions.py @@ -53,3 +53,10 @@ class DisconnectedLoad(BackendError): class DisconnectedGenerator(BackendError): """Specific error raised by the backend when a generator is disconnected""" pass + + +class ImpossibleTopology(BackendError): + """Specific error raised by the backend :func:`grid2op.Backend.Backend.apply_action` + when the player asked a topology (for example using `set_bus`) that + cannot be applied by the backend. + """ \ No newline at end of file diff --git a/grid2op/MakeEnv/Make.py b/grid2op/MakeEnv/Make.py index 8dbb24104..15bd5b6c3 100644 --- a/grid2op/MakeEnv/Make.py +++ b/grid2op/MakeEnv/Make.py @@ -247,17 +247,21 @@ def _aux_make_multimix( dataset_path, test=False, experimental_read_from_local_dir=False, + n_busbar=2, _add_to_name="", _compat_glop_version=None, + _overload_name_multimix=None, logger=None, **kwargs ) -> Environment: # Local import to prevent imports loop from grid2op.Environment import MultiMixEnvironment - + if _overload_name_multimix is not None: + raise RuntimeError("You should not create a MultiMix with `_overload_name_multimix`.") return MultiMixEnvironment( dataset_path, experimental_read_from_local_dir=experimental_read_from_local_dir, + n_busbar=n_busbar, _test=test, _add_to_name=_add_to_name, _compat_glop_version=_compat_glop_version, @@ -266,14 +270,25 @@ def _aux_make_multimix( ) +def _get_path_multimix(_overload_name_multimix) -> str: + baseenv_path, multi_mix_name, add_to_name = _overload_name_multimix + if os.path.exists(baseenv_path): + return baseenv_path + if multi_mix_name in TEST_DEV_ENVS: + return TEST_DEV_ENVS[multi_mix_name] + raise Grid2OpException(f"Unknown multimix environment with name {multi_mix_name} that should be located at {baseenv_path}.") + + def make( dataset : Union[str, os.PathLike], *, test : bool=False, logger: Optional[logging.Logger]=None, experimental_read_from_local_dir : bool=False, + n_busbar=2, _add_to_name : str="", _compat_glop_version : Optional[str]=None, + _overload_name_multimix : Optional[str]=None, # do not use ! **kwargs ) -> Environment: """ @@ -286,6 +301,9 @@ def make( .. versionchanged:: 1.9.3 Remove the possibility to use this function with arguments (force kwargs) + + .. versionadded:: 1.10.0 + The `n_busbar` parameters Parameters ---------- @@ -308,6 +326,9 @@ def make( processing, you can set this flag to ``True``. See the doc of :func:`grid2op.Environment.BaseEnv.generate_classes` for more information. + n_busbar: ``int`` + Number of independant busbars allowed per substations. By default it's 2. + kwargs: Other keyword argument to give more control on the environment you are creating. See the Parameters information of the :func:`make_from_dataset_path`. @@ -318,6 +339,9 @@ def make( _compat_glop_version: Internal, do not use (and can only be used when setting "test=True") + + _overload_name_multimix: + Internal, do not use ! Returns ------- @@ -350,7 +374,15 @@ def make( raise Grid2OpException("Impossible to create an environment without its name. Please call something like: \n" "> env = grid2op.make('l2rpn_case14_sandbox') \nor\n" "> env = grid2op.make('rte_case14_realistic')") + try: + n_busbar_int = int(n_busbar) + except Exception as exc_: + raise Grid2OpException("n_busbar parameters should be convertible to integer") from exc_ + if n_busbar != n_busbar_int: + raise Grid2OpException(f"n_busbar parameters should be convertible to integer, but we have " + f"int(n_busbar) = {n_busbar_int} != {n_busbar}") + accepted_kwargs = ERR_MSG_KWARGS.keys() | {"dataset", "test"} for el in kwargs: if el not in accepted_kwargs: @@ -402,6 +434,8 @@ def make_from_path_fn_(*args, **kwargs): dataset_path=dataset, _add_to_name=_add_to_name_tmp, _compat_glop_version=_compat_glop_version_tmp, + _overload_name_multimix=_overload_name_multimix, + n_busbar=n_busbar, **kwargs ) @@ -412,7 +446,7 @@ def make_from_path_fn_(*args, **kwargs): ) # Unknown dev env - if test and dataset_name not in TEST_DEV_ENVS: + if _overload_name_multimix is None and test and dataset_name not in TEST_DEV_ENVS: raise Grid2OpException(_MAKE_UNKNOWN_ENV.format(dataset)) # Known test env and test flag enabled @@ -425,7 +459,13 @@ def make_from_path_fn_(*args, **kwargs): or dataset_name.startswith("educ") ): warnings.warn(_MAKE_DEV_ENV_DEPRECATED_WARN.format(dataset_name)) - ds_path = TEST_DEV_ENVS[dataset_name] + if _overload_name_multimix: + # make is invoked from a Multimix + path_multimix = _get_path_multimix(_overload_name_multimix) + ds_path = os.path.join(path_multimix, dataset_name) + else: + # normal behaviour + ds_path = TEST_DEV_ENVS[dataset_name] # Check if multimix from path if _aux_is_multimix(ds_path): @@ -441,9 +481,11 @@ def make_from_path_fn_(*args, **kwargs): return make_from_path_fn( dataset_path=ds_path, logger=logger, + n_busbar=n_busbar, _add_to_name=_add_to_name, _compat_glop_version=_compat_glop_version, experimental_read_from_local_dir=experimental_read_from_local_dir, + _overload_name_multimix=_overload_name_multimix, **kwargs ) @@ -454,7 +496,9 @@ def make_from_path_fn_(*args, **kwargs): return make_from_path_fn( real_ds_path, logger=logger, + n_busbar=n_busbar, experimental_read_from_local_dir=experimental_read_from_local_dir, + _overload_name_multimix=_overload_name_multimix, **kwargs ) @@ -472,6 +516,8 @@ def make_from_path_fn_(*args, **kwargs): return make_from_path_fn( dataset_path=real_ds_path, logger=logger, + n_busbar=n_busbar, experimental_read_from_local_dir=experimental_read_from_local_dir, + _overload_name_multimix=_overload_name_multimix, **kwargs ) diff --git a/grid2op/MakeEnv/MakeFromPath.py b/grid2op/MakeEnv/MakeFromPath.py index 98054513f..ff85d56f7 100644 --- a/grid2op/MakeEnv/MakeFromPath.py +++ b/grid2op/MakeEnv/MakeFromPath.py @@ -7,19 +7,26 @@ # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. import os +import time +import copy import importlib.util import numpy as np import json import warnings +from grid2op.MakeEnv.PathUtils import USE_CLASS_IN_FILE from grid2op.Environment import Environment from grid2op.Backend import Backend, PandaPowerBackend from grid2op.Opponent.opponentSpace import OpponentSpace from grid2op.Parameters import Parameters -from grid2op.Chronics import ChronicsHandler, ChangeNothing, FromNPY, FromChronix2grid -from grid2op.Chronics import GridStateFromFile, GridValue +from grid2op.Chronics import (ChronicsHandler, + ChangeNothing, + FromNPY, + FromChronix2grid, + GridStateFromFile, + GridValue) from grid2op.Action import BaseAction, DontAct -from grid2op.Exceptions import * +from grid2op.Exceptions import EnvError from grid2op.Observation import CompleteObservation, BaseObservation from grid2op.Reward import BaseReward, L2RPNReward from grid2op.Rules import BaseRules, DefaultRules @@ -28,6 +35,8 @@ from grid2op.operator_attention import LinearAttentionBudget from grid2op.MakeEnv.get_default_aux import _get_default_aux +from grid2op.MakeEnv.PathUtils import _aux_fix_backend_internal_classes + DIFFICULTY_NAME = "difficulty" CHALLENGE_NAME = "competition" @@ -87,7 +96,9 @@ "obs.simulate and obs.get_forecasted_env). If provided, this should " "be a type / class and not an instance of this class. (by default it's None)"), "observation_backend_kwargs": ("key-word arguments to build the observation backend (used for Simulator, " - " obs.simulate and obs.get_forecasted_env). This should be a dictionnary. (by default it's None)") + " obs.simulate and obs.get_forecasted_env). This should be a dictionnary. (by default it's None)"), + "class_in_file": ("experimental: tell grid2op to store the classes generated in the hard drive " + "which can solve lots of pickle / multi processing related issue"), } NAME_CHRONICS_FOLDER = "chronics" @@ -115,8 +126,10 @@ def make_from_dataset_path( dataset_path="/", logger=None, experimental_read_from_local_dir=False, + n_busbar=2, _add_to_name="", _compat_glop_version=None, + _overload_name_multimix=None, **kwargs, ) -> Environment: """ @@ -150,6 +163,9 @@ def make_from_dataset_path( backend: ``grid2op.Backend.Backend``, optional The backend to use for the computation. If provided, it must be an instance of :class:`grid2op.Backend.Backend`. + n_busbar: ``int`` + Number of independant busbars allowed per substations. By default it's 2. + action_class: ``type``, optional Type of BaseAction the BaseAgent will be able to perform. If provided, it must be a subclass of :class:`grid2op.BaseAction.BaseAction` @@ -270,7 +286,7 @@ def make_from_dataset_path( # Compute env name from directory name name_env = os.path.split(dataset_path_abs)[1] - + # Compute and find chronics folder chronics_path = _get_default_aux( "chronics_path", @@ -350,7 +366,7 @@ def make_from_dataset_path( else: is_none = False names_chronics_to_backend = _get_default_aux( - "names_chronics_to_backend", + "names_chronics_to_grid", kwargs, defaultClassApp=dict, defaultinstance=name_converter, @@ -358,7 +374,7 @@ def make_from_dataset_path( ) if is_none and names_chronics_to_backend == {}: names_chronics_to_backend = None - + # Get default backend class backend_class_cfg = PandaPowerBackend if "backend_class" in config_data and config_data["backend_class"] is not None: @@ -462,7 +478,7 @@ def make_from_dataset_path( try: int_ = int(el) available_parameters_int[int_] = el - except: + except Exception as exc_: pass max_ = np.max(list(available_parameters_int.keys())) keys_ = available_parameters_int[max_] @@ -552,21 +568,22 @@ def make_from_dataset_path( default_chronics_kwargs = { "path": chronics_path_abs, "chronicsClass": chronics_class_cfg, - # "gridvalueClass": grid_value_class_cfg, } + dfkwargs_cfg = {} # in the config if "data_feeding_kwargs" in config_data and config_data["data_feeding_kwargs"] is not None: dfkwargs_cfg = config_data["data_feeding_kwargs"] for el in dfkwargs_cfg: default_chronics_kwargs[el] = dfkwargs_cfg[el] - data_feeding_kwargs = _get_default_aux( + data_feeding_kwargs_user_prov = _get_default_aux( "data_feeding_kwargs", kwargs, defaultClassApp=dict, defaultinstance=default_chronics_kwargs, msg_error=ERR_MSG_KWARGS["data_feeding_kwargs"], ) + data_feeding_kwargs = data_feeding_kwargs_user_prov.copy() for el in default_chronics_kwargs: if el not in data_feeding_kwargs: data_feeding_kwargs[el] = default_chronics_kwargs[el] @@ -582,7 +599,9 @@ def make_from_dataset_path( isclass=True, ) if ( - (chronics_class_used != ChangeNothing) and (chronics_class_used != FromNPY) and (chronics_class_used != FromChronix2grid) + ((chronics_class_used != ChangeNothing) and + (chronics_class_used != FromNPY) and + (chronics_class_used != FromChronix2grid)) ) and exc_chronics is not None: raise EnvError( f"Impossible to find the chronics for your environment. Please make sure to provide " @@ -595,7 +614,25 @@ def make_from_dataset_path( # parameters is not given in the "make" function but present in the config file if "gridvalueClass" not in data_feeding_kwargs: data_feeding_kwargs["gridvalueClass"] = grid_value_class_cfg - + + + # code bellow is added to fix + # https://github.com/rte-france/Grid2Op/issues/593 + import inspect + possible_params = inspect.signature(data_feeding_kwargs["gridvalueClass"].__init__).parameters + data_feeding_kwargs_res = data_feeding_kwargs.copy() + for el in data_feeding_kwargs: + if el == "gridvalueClass": + continue + if el == "chronicsClass": + continue + if el not in possible_params: + # if it's in the config but is not supported by the + # user, then we ignore it + # see https://github.com/rte-france/Grid2Op/issues/593 + if el in dfkwargs_cfg and not el in data_feeding_kwargs_user_prov: + del data_feeding_kwargs_res[el] + data_feeding_kwargs = data_feeding_kwargs_res # now build the chronics handler data_feeding = _get_default_aux( "data_feeding", @@ -787,24 +824,6 @@ def make_from_dataset_path( isclass=False, ) - if experimental_read_from_local_dir: - sys_path = os.path.join(os.path.split(grid_path_abs)[0], "_grid2op_classes") - if not os.path.exists(sys_path): - raise RuntimeError( - "Attempting to load the grid classes from the env path. Yet the directory " - "where they should be placed does not exists. Did you call `env.generate_classes()` " - "BEFORE creating an environment with `experimental_read_from_local_dir=True` ?" - ) - if not os.path.isdir(sys_path) or not os.path.exists( - os.path.join(sys_path, "__init__.py") - ): - raise RuntimeError( - f"Impossible to load the classes from the env path. There is something that is " - f"not a directory and that is called `_grid2op_classes`. " - f'Please remove "{sys_path}" and call `env.generate_classes()` where env is an ' - f"environment created with `experimental_read_from_local_dir=False` (default)" - ) - # observation key word arguments kwargs_observation = _get_default_aux( "kwargs_observation", @@ -856,8 +875,134 @@ def make_from_dataset_path( ) if observation_backend_kwargs is observation_backend_kwargs_cfg_: observation_backend_kwargs = None + + # new in 1.10.2 : + allow_loaded_backend = False + classes_path = None + init_env = None + this_local_dir = None + use_class_in_files = USE_CLASS_IN_FILE + if "class_in_file" in kwargs: + classes_in_file_kwargs = bool(kwargs["class_in_file"]) + use_class_in_files = classes_in_file_kwargs + if use_class_in_files: + # new behaviour + sys_path = os.path.join(os.path.split(grid_path_abs)[0], "_grid2op_classes") + if not os.path.exists(sys_path): + try: + os.mkdir(sys_path) + except FileExistsError: + # if another process created it, no problem + pass + init_nm = os.path.join(sys_path, "__init__.py") + if not os.path.exists(init_nm): + try: + with open(init_nm, "w", encoding="utf-8") as f: + f.write("This file has been created by grid2op in a `env.make(...)` call. Do not modify it or remove it") + except FileExistsError: + pass + + import tempfile + this_local_dir = tempfile.TemporaryDirectory(dir=sys_path) + + if experimental_read_from_local_dir: + warnings.warn("With the automatic class generation, we removed the possibility to " + "set `experimental_read_from_local_dir` to True.") + experimental_read_from_local_dir = False + # TODO: check the hash thingy is working in baseEnv._aux_gen_classes (currently a pdb) + + # TODO check that it works if the backend changes, if shunt / no_shunt if name of env changes etc. + + # TODO: what if it cannot write on disk => fallback to previous behaviour + data_feeding_fake = copy.deepcopy(data_feeding) + data_feeding_fake.cleanup_action_space() + + # Set graph layout if not None and not an empty dict + if graph_layout is not None and graph_layout: + type(backend).attach_layout(graph_layout) + + if not os.path.exists(this_local_dir.name): + raise EnvError(f"Path {this_local_dir.name} has not been created by the tempfile package") + init_env = Environment(init_env_path=os.path.abspath(dataset_path), + init_grid_path=grid_path_abs, + chronics_handler=data_feeding_fake, + backend=backend, + parameters=param, + name=name_env + _add_to_name, + names_chronics_to_backend=names_chronics_to_backend, + actionClass=action_class, + observationClass=observation_class, + rewardClass=reward_class, + legalActClass=gamerules_class, + voltagecontrolerClass=volagecontroler_class, + other_rewards=other_rewards, + opponent_space_type=opponent_space_type, + opponent_action_class=opponent_action_class, + opponent_class=opponent_class, + opponent_init_budget=opponent_init_budget, + opponent_attack_duration=opponent_attack_duration, + opponent_attack_cooldown=opponent_attack_cooldown, + opponent_budget_per_ts=opponent_budget_per_ts, + opponent_budget_class=opponent_budget_class, + kwargs_opponent=kwargs_opponent, + has_attention_budget=has_attention_budget, + attention_budget_cls=attention_budget_class, + kwargs_attention_budget=kwargs_attention_budget, + logger=logger, + n_busbar=n_busbar, # TODO n_busbar_per_sub different num per substations: read from a config file maybe (if not provided by the user) + _compat_glop_version=_compat_glop_version, + _read_from_local_dir=None, # first environment to generate the classes and save them + _local_dir_cls=None, + _overload_name_multimix=_overload_name_multimix, + kwargs_observation=kwargs_observation, + observation_bk_class=observation_backend_class, + observation_bk_kwargs=observation_backend_kwargs + ) + if not os.path.exists(this_local_dir.name): + raise EnvError(f"Path {this_local_dir.name} has not been created by the tempfile package") + init_env.generate_classes(local_dir_id=this_local_dir.name) + # fix `my_bk_act_class` and `_complete_action_class` + _aux_fix_backend_internal_classes(type(backend), this_local_dir) + init_env.backend = None # to avoid to close the backend when init_env is deleted + init_env._local_dir_cls = None + classes_path = this_local_dir.name + allow_loaded_backend = True + else: + # legacy behaviour (<= 1.10.1 behaviour) + classes_path = None if not experimental_read_from_local_dir else experimental_read_from_local_dir + if experimental_read_from_local_dir: + if _overload_name_multimix is not None: + # I am in a multimix + if _overload_name_multimix[0] is None: + # first mix: path is correct + sys_path = os.path.join(os.path.split(grid_path_abs)[0], "_grid2op_classes") + else: + # other mixes I need to retrieve the properties of the first mix + sys_path = _overload_name_multimix[0] + else: + # I am not in a multimix + sys_path = os.path.join(os.path.split(grid_path_abs)[0], "_grid2op_classes") + if not os.path.exists(sys_path): + raise RuntimeError( + "Attempting to load the grid classes from the env path. Yet the directory " + "where they should be placed does not exists. Did you call `env.generate_classes()` " + "BEFORE creating an environment with `experimental_read_from_local_dir=True` ?" + ) + if not os.path.isdir(sys_path) or not os.path.exists( + os.path.join(sys_path, "__init__.py") + ): + raise RuntimeError( + f"Impossible to load the classes from the env path. There is something that is " + f"not a directory and that is called `_grid2op_classes`. " + f'Please remove "{sys_path}" and call `env.generate_classes()` where env is an ' + f"environment created with `experimental_read_from_local_dir=False` (default)" + ) + import sys + sys.path.append(os.path.split(os.path.abspath(sys_path))[0]) + classes_path = sys_path # Finally instantiate env from config & overrides + # including (if activated the new grid2op behaviour) env = Environment( init_env_path=os.path.abspath(dataset_path), init_grid_path=grid_path_abs, @@ -885,17 +1030,20 @@ def make_from_dataset_path( attention_budget_cls=attention_budget_class, kwargs_attention_budget=kwargs_attention_budget, logger=logger, + n_busbar=n_busbar, # TODO n_busbar_per_sub different num per substations: read from a config file maybe (if not provided by the user) _compat_glop_version=_compat_glop_version, - _read_from_local_dir=experimental_read_from_local_dir, + _read_from_local_dir=classes_path, + _allow_loaded_backend=allow_loaded_backend, + _local_dir_cls=this_local_dir, + _overload_name_multimix=_overload_name_multimix, kwargs_observation=kwargs_observation, observation_bk_class=observation_backend_class, - observation_bk_kwargs=observation_backend_kwargs, - ) - + observation_bk_kwargs=observation_backend_kwargs + ) # Update the thermal limit if any if thermal_limits is not None: env.set_thermal_limit(thermal_limits) - + # Set graph layout if not None and not an empty dict if graph_layout is not None and graph_layout: env.attach_layout(graph_layout) diff --git a/grid2op/MakeEnv/PathUtils.py b/grid2op/MakeEnv/PathUtils.py index 8551f39ce..ece6a551f 100644 --- a/grid2op/MakeEnv/PathUtils.py +++ b/grid2op/MakeEnv/PathUtils.py @@ -10,16 +10,47 @@ import os import json + DEFAULT_PATH_CONFIG = os.path.expanduser("~/.grid2opconfig.json") DEFAULT_PATH_DATA = os.path.expanduser("~/data_grid2op") +USE_CLASS_IN_FILE = False # set to True for new behaviour (will be set to True in grid2op 1.11) + + KEY_DATA_PATH = "data_path" +KEY_CLASS_IN_FILE = "class_in_file" +KEY_CLASS_IN_FILE_ENV_VAR = f"grid2op_{KEY_CLASS_IN_FILE}" +def str_to_bool(string: str) -> bool: + """convert a "string" to a boolean, with the convention: + + - "t", "y", "yes", "true", "True", "TRUE" etc. returns True + - "false", "False", "FALSE" etc. returns False + - "1" returns True + - "0" returns False + + """ + string_ = string.lower() + if string_ in ["t", "true", "y", "yes", "on", "1"]: + return True + if string_ in ["f", "false", "n", "no", "off", "0"]: + return False + raise ValueError(f"Uknown way to convert `{string}` to a boolean. Please either set it to \"1\" or \"0\"") + + if os.path.exists(DEFAULT_PATH_CONFIG): with open(DEFAULT_PATH_CONFIG, "r") as f: dict_ = json.load(f) if KEY_DATA_PATH in dict_: DEFAULT_PATH_DATA = os.path.abspath(dict_[KEY_DATA_PATH]) + + if KEY_CLASS_IN_FILE in dict_: + USE_CLASS_IN_FILE = bool(dict_[KEY_CLASS_IN_FILE]) + if KEY_CLASS_IN_FILE_ENV_VAR in os.environ: + try: + USE_CLASS_IN_FILE = str_to_bool(os.environ[KEY_CLASS_IN_FILE_ENV_VAR]) + except ValueError as exc: + raise RuntimeError(f"Impossible to read the behaviour from `{KEY_CLASS_IN_FILE_ENV_VAR}` environment variable") from exc def _create_path_folder(data_path): @@ -33,3 +64,10 @@ def _create_path_folder(data_path): 'and set the "data_path" to point to a path where you can store data.' "".format(data_path, DEFAULT_PATH_CONFIG) ) + + +def _aux_fix_backend_internal_classes(backend_cls, this_local_dir): + # fix `my_bk_act_class` and `_complete_action_class` + backend_cls._add_internal_classes(this_local_dir) + tmp = {} + backend_cls._make_cls_dict_extended(backend_cls, tmp, as_list=False) diff --git a/grid2op/MakeEnv/UpdateEnv.py b/grid2op/MakeEnv/UpdateEnv.py index 01413f94e..abb2c208a 100644 --- a/grid2op/MakeEnv/UpdateEnv.py +++ b/grid2op/MakeEnv/UpdateEnv.py @@ -5,9 +5,11 @@ # you can obtain one at http://mozilla.org/MPL/2.0/. # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. -import time +import time import os +import re + import grid2op.MakeEnv.PathUtils from grid2op.Exceptions import UnknownEnv @@ -157,18 +159,45 @@ def _update_files(env_name=None, answer_json=None, env_hashes=None): ) +def _aux_get_hash_if_none(hash_=None): + """Auxilliary function used to avoid copy pasting the `hash_ = hashlib.blake2b()` part and that can + be further changed if another hash is better later. + + Do not modify unless you have a good reason too. + """ + if hash_ is None: + # we use this as it is supposedly faster than md5 + # we don't really care about the "secure" part of it (though it's a nice tool to have) + import hashlib # lazy import + hash_ = hashlib.blake2b() + return hash_ + + +def _aux_update_hash_text(text_, hash_=None): + hash_ = _aux_get_hash_if_none(hash_) + text_ = re.sub("\s", "", text_) + hash_.update(text_.encode("utf-8")) + return hash_ + + +def _aux_hash_file(full_path_file, hash_=None): + hash_ = _aux_get_hash_if_none(hash_) + with open(full_path_file, "r", encoding="utf-8") as f: + text_ = f.read() + # this is done to ensure a compatibility between platform + # sometime git replaces the "\r\n" in windows with "\n" on linux / macos and it messes + # up the hash + _aux_update_hash_text(text_, hash_) + return hash_ + + # TODO make that a method of the environment maybe ? def _hash_env( path_local_env, hash_=None, blocksize=64, # TODO is this correct ? ): - import hashlib # lazy import - - if hash_ is None: - # we use this as it is supposedly faster than md5 - # we don't really care about the "secure" part of it (though it's a nice tool to have) - hash_ = hashlib.blake2b() + hash_ = _aux_get_hash_if_none(hash_) if os.path.exists(os.path.join(path_local_env, ".multimix")): # this is a multi mix, so i need to run through all sub env mixes = sorted(os.listdir(path_local_env)) @@ -197,17 +226,9 @@ def _hash_env( "scenario_params.json", ]: # list the file we want to hash (we don't hash everything full_path_file = os.path.join(path_local_env, fn_) - import re if os.path.exists(full_path_file): - with open(full_path_file, "r", encoding="utf-8") as f: - text_ = f.read() - text_ = re.sub( - "\s", "", text_ - ) # this is done to ensure a compatibility between platform - # sometime git replaces the "\r\n" in windows with "\n" on linux / macos and it messes - # up the hash - hash_.update(text_.encode("utf-8")) + _aux_hash_file(full_path_file, hash_) # now I hash the chronics # but as i don't want to read every chronics (for time purposes) i will only hash the names diff --git a/grid2op/MakeEnv/UserUtils.py b/grid2op/MakeEnv/UserUtils.py index e7b0e7de9..3400f95c3 100644 --- a/grid2op/MakeEnv/UserUtils.py +++ b/grid2op/MakeEnv/UserUtils.py @@ -163,12 +163,12 @@ def change_local_dir(new_path): try: new_path = str(new_path) - except: + except Exception as exc_: raise Grid2OpException( 'The new path should be convertible to str. It is currently "{}"'.format( new_path ) - ) + ) from exc_ root_dir = os.path.split(new_path)[0] if not os.path.exists(root_dir): @@ -190,21 +190,21 @@ def change_local_dir(new_path): try: with open(DEFAULT_PATH_CONFIG, "r", encoding="utf-8") as f: newconfig = json.load(f) - except: + except Exception as exc_: raise Grid2OpException( 'Impossible to read the grid2op configuration files "{}". Make sure it is a ' 'valid json encoded with "utf-8" encoding.'.format(DEFAULT_PATH_CONFIG) - ) + ) from exc_ newconfig[KEY_DATA_PATH] = new_path try: with open(DEFAULT_PATH_CONFIG, "w", encoding="utf-8") as f: json.dump(fp=f, obj=newconfig, sort_keys=True, indent=4) - except: + except Exception as exc_: raise Grid2OpException( 'Impossible to write the grid2op configuration files "{}". Make sure you have ' "writing access to it.".format(DEFAULT_PATH_CONFIG) - ) + ) from exc_ grid2op.MakeEnv.PathUtils.DEFAULT_PATH_DATA = new_path diff --git a/grid2op/MakeEnv/__init__.py b/grid2op/MakeEnv/__init__.py index adc186f8c..00eaef8c5 100644 --- a/grid2op/MakeEnv/__init__.py +++ b/grid2op/MakeEnv/__init__.py @@ -7,11 +7,15 @@ "change_local_dir", "list_available_test_env", "update_env", - # deprecated in v 0.8.0 - "make_old", ] -from grid2op.MakeEnv.MakeOld import make_old +# try: +# from grid2op.MakeEnv.MakeOld import make_old +# # deprecated in v 0.8.0 +# __all__.append("make_old") +# except ImportError: +# pass + from grid2op.MakeEnv.MakeFromPath import make_from_dataset_path from grid2op.MakeEnv.Make import make from grid2op.MakeEnv.UserUtils import ( diff --git a/grid2op/MakeEnv/get_default_aux.py b/grid2op/MakeEnv/get_default_aux.py index 364418177..423127d74 100644 --- a/grid2op/MakeEnv/get_default_aux.py +++ b/grid2op/MakeEnv/get_default_aux.py @@ -143,7 +143,7 @@ def _get_default_aux( res = defaultClassApp(res) except Exception as exc_: # if there is any error, i raise the error message - raise EnvError(msg_error) + raise EnvError(msg_error) from exc_ else: # if there is any error, i raise the error message raise EnvError(msg_error) diff --git a/grid2op/Observation/baseObservation.py b/grid2op/Observation/baseObservation.py index 1c0a259fa..e1c1016ca 100644 --- a/grid2op/Observation/baseObservation.py +++ b/grid2op/Observation/baseObservation.py @@ -14,7 +14,16 @@ import numpy as np from scipy.sparse import csr_matrix from typing import Optional +from packaging import version +from typing import Dict, Union, Tuple, List, Optional, Any, Literal +try: + from typing import Self +except ImportError: + from typing_extensions import Self + +import grid2op # for type hints +from grid2op.typing_variables import STEP_INFO_TYPING from grid2op.dtypes import dt_int, dt_float, dt_bool from grid2op.Exceptions import ( Grid2OpException, @@ -103,44 +112,44 @@ class BaseObservation(GridObjects): voltage angles (see :attr:`BaseObservation.support_theta`). p_or: :class:`numpy.ndarray`, dtype:float - The active power flow at the origin end of each powerline (expressed in MW). + The active power flow at the origin side of each powerline (expressed in MW). q_or: :class:`numpy.ndarray`, dtype:float - The reactive power flow at the origin end of each powerline (expressed in MVar). + The reactive power flow at the origin side of each powerline (expressed in MVar). v_or: :class:`numpy.ndarray`, dtype:float - The voltage magnitude at the bus to which the origin end of each powerline is connected (expressed in kV). + The voltage magnitude at the bus to which the origin side of each powerline is connected (expressed in kV). theta_or: :class:`numpy.ndarray`, dtype:float - The voltage angle at the bus to which the origin end of each powerline + The voltage angle at the bus to which the origin side of each powerline is connected (expressed in degree). Only availble if the backend supports the retrieval of voltage angles (see :attr:`BaseObservation.support_theta`). a_or: :class:`numpy.ndarray`, dtype:float - The current flow at the origin end of each powerline (expressed in A). + The current flow at the origin side of each powerline (expressed in A). p_ex: :class:`numpy.ndarray`, dtype:float - The active power flow at the extremity end of each powerline (expressed in MW). + The active power flow at the extremity side of each powerline (expressed in MW). q_ex: :class:`numpy.ndarray`, dtype:float - The reactive power flow at the extremity end of each powerline (expressed in MVar). + The reactive power flow at the extremity side of each powerline (expressed in MVar). v_ex: :class:`numpy.ndarray`, dtype:float - The voltage magnitude at the bus to which the extremity end of each powerline is connected (expressed in kV). + The voltage magnitude at the bus to which the extremity side of each powerline is connected (expressed in kV). theta_ex: :class:`numpy.ndarray`, dtype:float - The voltage angle at the bus to which the extremity end of each powerline + The voltage angle at the bus to which the extremity side of each powerline is connected (expressed in degree). Only availble if the backend supports the retrieval of voltage angles (see :attr:`BaseObservation.support_theta`). a_ex: :class:`numpy.ndarray`, dtype:float - The current flow at the extremity end of each powerline (expressed in A). + The current flow at the extremity side of each powerline (expressed in A). rho: :class:`numpy.ndarray`, dtype:float The capacity of each powerline. It is defined at the observed current flow divided by the thermal limit of each powerline (no unit) - topo_vect: :class:`numpy.ndarray`, dtype:int + topo_vect: :class:`numpy.ndarray`, dtype:int For each object (load, generator, ends of a powerline) it gives on which bus this object is connected in its substation. See :func:`grid2op.Backend.Backend.get_topo_vect` for more information. @@ -151,16 +160,16 @@ class BaseObservation(GridObjects): timestep_overflow: :class:`numpy.ndarray`, dtype:int Gives the number of time steps since a powerline is in overflow. - time_before_cooldown_line: :class:`numpy.ndarray`, dtype:int + time_before_cooldown_line: :class:`numpy.ndarray`, dtype:int For each powerline, it gives the number of time step the powerline is unavailable due to "cooldown" - (see :attr:`grid2op.Parameters.NB_TIMESTEP_COOLDOWN_LINE` for more information). 0 means the + (see :attr:`grid2op.Parameters.Parameters.NB_TIMESTEP_COOLDOWN_LINE` for more information). 0 means the an action will be able to act on this same powerline, a number > 0 (eg 1) means that an action at this time step cannot act on this powerline (in the example the agent have to wait 1 time step) time_before_cooldown_sub: :class:`numpy.ndarray`, dtype:int Same as :attr:`BaseObservation.time_before_cooldown_line` but for substations. For each substation, it gives the number of timesteps to wait before acting on this substation (see - see :attr:`grid2op.Parameters.NB_TIMESTEP_COOLDOWN_SUB` for more information). + see :attr:`grid2op.Parameters.Parameters.NB_TIMESTEP_COOLDOWN_SUB` for more information). time_next_maintenance: :class:`numpy.ndarray`, dtype:int For each powerline, it gives the time of the next planned maintenance. For example if there is: @@ -401,13 +410,13 @@ class BaseObservation(GridObjects): For each attackable line `i` it says: - obs.attack_under_alert[i] = 0 => attackable line i has not been attacked OR it - has been attacked before the relevant window (env.parameters.ALERT_TIME_WINDOW) + has been attacked before the relevant window (`env.parameters.ALERT_TIME_WINDOW`) - obs.attack_under_alert[i] = -1 => attackable line i has been attacked and (before the attack) no alert was sent (so your agent expects to survive at least - env.parameters.ALERT_TIME_WINDOW steps) + `env.parameters.ALERT_TIME_WINDOW` steps) - obs.attack_under_alert[i] = +1 => attackable line i has been attacked and (before the attack) an alert was sent (so your agent expects to "game over" within the next - env.parameters.ALERT_TIME_WINDOW steps) + `env.parameters.ALERT_TIME_WINDOW` steps) _shunt_p: :class:`numpy.ndarray`, dtype:float Shunt active value (only available if shunts are available) (in MW) @@ -499,8 +508,9 @@ def __init__(self, self._forecasted_inj = [] self._env_internal_params = {} - self._obs_env = obs_env - self._ptr_kwargs_env = kwargs_env + from grid2op.Environment._obsEnv import _ObsEnv + self._obs_env : _ObsEnv = obs_env + self._ptr_kwargs_env : Dict = kwargs_env # calendar data self.year = dt_int(1970) @@ -510,64 +520,65 @@ def __init__(self, self.minute_of_hour = dt_int(0) self.day_of_week = dt_int(0) - self.timestep_overflow = np.empty(shape=(self.n_line,), dtype=dt_int) + cls = type(self) + self.timestep_overflow = np.empty(shape=(cls.n_line,), dtype=dt_int) # 0. (line is disconnected) / 1. (line is connected) - self.line_status = np.empty(shape=self.n_line, dtype=dt_bool) + self.line_status = np.empty(shape=cls.n_line, dtype=dt_bool) # topological vector - self.topo_vect = np.empty(shape=self.dim_topo, dtype=dt_int) + self.topo_vect = np.empty(shape=cls.dim_topo, dtype=dt_int) # generators information - self.gen_p = np.empty(shape=self.n_gen, dtype=dt_float) - self.gen_q = np.empty(shape=self.n_gen, dtype=dt_float) - self.gen_v = np.empty(shape=self.n_gen, dtype=dt_float) - self.gen_margin_up = np.empty(shape=self.n_gen, dtype=dt_float) - self.gen_margin_down = np.empty(shape=self.n_gen, dtype=dt_float) + self.gen_p = np.empty(shape=cls.n_gen, dtype=dt_float) + self.gen_q = np.empty(shape=cls.n_gen, dtype=dt_float) + self.gen_v = np.empty(shape=cls.n_gen, dtype=dt_float) + self.gen_margin_up = np.empty(shape=cls.n_gen, dtype=dt_float) + self.gen_margin_down = np.empty(shape=cls.n_gen, dtype=dt_float) # loads information - self.load_p = np.empty(shape=self.n_load, dtype=dt_float) - self.load_q = np.empty(shape=self.n_load, dtype=dt_float) - self.load_v = np.empty(shape=self.n_load, dtype=dt_float) + self.load_p = np.empty(shape=cls.n_load, dtype=dt_float) + self.load_q = np.empty(shape=cls.n_load, dtype=dt_float) + self.load_v = np.empty(shape=cls.n_load, dtype=dt_float) # lines origin information - self.p_or = np.empty(shape=self.n_line, dtype=dt_float) - self.q_or = np.empty(shape=self.n_line, dtype=dt_float) - self.v_or = np.empty(shape=self.n_line, dtype=dt_float) - self.a_or = np.empty(shape=self.n_line, dtype=dt_float) + self.p_or = np.empty(shape=cls.n_line, dtype=dt_float) + self.q_or = np.empty(shape=cls.n_line, dtype=dt_float) + self.v_or = np.empty(shape=cls.n_line, dtype=dt_float) + self.a_or = np.empty(shape=cls.n_line, dtype=dt_float) # lines extremity information - self.p_ex = np.empty(shape=self.n_line, dtype=dt_float) - self.q_ex = np.empty(shape=self.n_line, dtype=dt_float) - self.v_ex = np.empty(shape=self.n_line, dtype=dt_float) - self.a_ex = np.empty(shape=self.n_line, dtype=dt_float) + self.p_ex = np.empty(shape=cls.n_line, dtype=dt_float) + self.q_ex = np.empty(shape=cls.n_line, dtype=dt_float) + self.v_ex = np.empty(shape=cls.n_line, dtype=dt_float) + self.a_ex = np.empty(shape=cls.n_line, dtype=dt_float) # lines relative flows - self.rho = np.empty(shape=self.n_line, dtype=dt_float) + self.rho = np.empty(shape=cls.n_line, dtype=dt_float) # cool down and reconnection time after hard overflow, soft overflow or cascading failure - self.time_before_cooldown_line = np.empty(shape=self.n_line, dtype=dt_int) - self.time_before_cooldown_sub = np.empty(shape=self.n_sub, dtype=dt_int) + self.time_before_cooldown_line = np.empty(shape=cls.n_line, dtype=dt_int) + self.time_before_cooldown_sub = np.empty(shape=cls.n_sub, dtype=dt_int) self.time_next_maintenance = 1 * self.time_before_cooldown_line self.duration_next_maintenance = 1 * self.time_before_cooldown_line # redispatching - self.target_dispatch = np.empty(shape=self.n_gen, dtype=dt_float) - self.actual_dispatch = np.empty(shape=self.n_gen, dtype=dt_float) + self.target_dispatch = np.empty(shape=cls.n_gen, dtype=dt_float) + self.actual_dispatch = np.empty(shape=cls.n_gen, dtype=dt_float) # storage unit - self.storage_charge = np.empty(shape=self.n_storage, dtype=dt_float) # in MWh + self.storage_charge = np.empty(shape=cls.n_storage, dtype=dt_float) # in MWh self.storage_power_target = np.empty( - shape=self.n_storage, dtype=dt_float + shape=cls.n_storage, dtype=dt_float ) # in MW - self.storage_power = np.empty(shape=self.n_storage, dtype=dt_float) # in MW + self.storage_power = np.empty(shape=cls.n_storage, dtype=dt_float) # in MW # attention budget self.is_alarm_illegal = np.ones(shape=1, dtype=dt_bool) self.time_since_last_alarm = np.empty(shape=1, dtype=dt_int) - self.last_alarm = np.empty(shape=self.dim_alarms, dtype=dt_int) + self.last_alarm = np.empty(shape=cls.dim_alarms, dtype=dt_int) self.attention_budget = np.empty(shape=1, dtype=dt_float) self.was_alarm_used_after_game_over = np.zeros(shape=1, dtype=dt_bool) # alert - dim_alert = type(self).dim_alerts + dim_alert = cls.dim_alerts self.active_alert = np.empty(shape=dim_alert, dtype=dt_bool) self.attack_under_alert = np.empty(shape=dim_alert, dtype=dt_int) self.time_since_last_alert = np.empty(shape=dim_alert, dtype=dt_int) @@ -583,33 +594,33 @@ def __init__(self, self._vectorized = None # for shunt (these are not stored!) - if type(self).shunts_data_available: - self._shunt_p = np.empty(shape=self.n_shunt, dtype=dt_float) - self._shunt_q = np.empty(shape=self.n_shunt, dtype=dt_float) - self._shunt_v = np.empty(shape=self.n_shunt, dtype=dt_float) - self._shunt_bus = np.empty(shape=self.n_shunt, dtype=dt_int) + if cls.shunts_data_available: + self._shunt_p = np.empty(shape=cls.n_shunt, dtype=dt_float) + self._shunt_q = np.empty(shape=cls.n_shunt, dtype=dt_float) + self._shunt_v = np.empty(shape=cls.n_shunt, dtype=dt_float) + self._shunt_bus = np.empty(shape=cls.n_shunt, dtype=dt_int) - self._thermal_limit = np.empty(shape=self.n_line, dtype=dt_float) + self._thermal_limit = np.empty(shape=cls.n_line, dtype=dt_float) - self.gen_p_before_curtail = np.empty(shape=self.n_gen, dtype=dt_float) - self.curtailment = np.empty(shape=self.n_gen, dtype=dt_float) - self.curtailment_limit = np.empty(shape=self.n_gen, dtype=dt_float) - self.curtailment_limit_effective = np.empty(shape=self.n_gen, dtype=dt_float) + self.gen_p_before_curtail = np.empty(shape=cls.n_gen, dtype=dt_float) + self.curtailment = np.empty(shape=cls.n_gen, dtype=dt_float) + self.curtailment_limit = np.empty(shape=cls.n_gen, dtype=dt_float) + self.curtailment_limit_effective = np.empty(shape=cls.n_gen, dtype=dt_float) # the "theta" (voltage angle, in degree) self.support_theta = False - self.theta_or = np.empty(shape=self.n_line, dtype=dt_float) - self.theta_ex = np.empty(shape=self.n_line, dtype=dt_float) - self.load_theta = np.empty(shape=self.n_load, dtype=dt_float) - self.gen_theta = np.empty(shape=self.n_gen, dtype=dt_float) - self.storage_theta = np.empty(shape=self.n_storage, dtype=dt_float) + self.theta_or = np.empty(shape=cls.n_line, dtype=dt_float) + self.theta_ex = np.empty(shape=cls.n_line, dtype=dt_float) + self.load_theta = np.empty(shape=cls.n_load, dtype=dt_float) + self.gen_theta = np.empty(shape=cls.n_gen, dtype=dt_float) + self.storage_theta = np.empty(shape=cls.n_storage, dtype=dt_float) # counter self.current_step = dt_int(0) self.max_step = dt_int(np.iinfo(dt_int).max) self.delta_time = dt_float(5.0) - def _aux_copy(self, other): + def _aux_copy(self, other : Self) -> None: attr_simple = [ "max_step", "current_step", @@ -684,12 +695,39 @@ def _aux_copy(self, other): attr_vect += ["_shunt_bus", "_shunt_v", "_shunt_q", "_shunt_p"] for attr_nm in attr_simple: - setattr(other, attr_nm, getattr(self, attr_nm)) + setattr(other, attr_nm, copy.deepcopy(getattr(self, attr_nm))) for attr_nm in attr_vect: getattr(other, attr_nm)[:] = getattr(self, attr_nm) - def __copy__(self): + def change_reward(self, reward_func: "grid2op.Reward.BaseReward"): + """Allow to change the reward used when calling :func:`BaseObservation.simulate` + without having to access the observation space. + + .. versionadded:: 1.10.2 + + .. seealso:: :func:`grid2op.ObservationSpace.change_reward` + It has the same effet as :func:`grid2op.ObservationSpace.change_reward` + + Parameters + ---------- + reward_func : grid2op.Reward.BaseReward + _description_ + + Raises + ------ + BaseObservationError + _description_ + """ + if self._obs_env is not None: + if self._obs_env.is_valid(): + self._obs_env._reward_helper.change_reward(reward_func) + else: + raise BaseObservationError("Impossible to change the reward of the simulate " + "function when you cannot simulate (because the " + "backend could not be copied)") + + def __copy__(self) -> Self: res = type(self)(obs_env=self._obs_env, action_helper=self.action_helper, kwargs_env=self._ptr_kwargs_env) @@ -710,7 +748,7 @@ def __copy__(self): return res - def __deepcopy__(self, memodict={}): + def __deepcopy__(self, memodict={}) -> Self: res = type(self)(obs_env=self._obs_env, action_helper=self.action_helper, kwargs_env=self._ptr_kwargs_env) @@ -741,7 +779,12 @@ def state_of( line_id=None, storage_id=None, substation_id=None, - ): + ) -> Dict[Literal["p", "q", "v", "theta", "bus", "sub_id", "actual_dispatch", "target_dispatch", + "maintenance", "cooldown_time", "storage_power", "storage_charge", + "storage_power_target", "storage_theta", + "topo_vect", "nb_bus", "origin", "extremity"], + Union[int, float, Dict[Literal["p", "q", "v", "a", "sub_id", "bus", "theta"], Union[int, float]]] + ]: """ Return the state of this action on a give unique load, generator unit, powerline of substation. Only one of load, gen, line or substation should be filled. @@ -848,7 +891,6 @@ def state_of( raise Grid2OpException( "action.effect_on should only be called with named argument." ) - if ( load_id is None and gen_id is None @@ -861,6 +903,7 @@ def state_of( 'Please provide "load_id", "gen_id", "line_id", "storage_id" or ' '"substation_id"' ) + cls = type(self) if load_id is not None: if ( @@ -882,7 +925,7 @@ def state_of( "q": self.load_q[load_id], "v": self.load_v[load_id], "bus": self.topo_vect[self.load_pos_topo_vect[load_id]], - "sub_id": self.load_to_subid[load_id], + "sub_id": cls.load_to_subid[load_id], } if self.support_theta: res["theta"] = self.load_theta[load_id] @@ -907,7 +950,7 @@ def state_of( "q": self.gen_q[gen_id], "v": self.gen_v[gen_id], "bus": self.topo_vect[self.gen_pos_topo_vect[gen_id]], - "sub_id": self.gen_to_subid[gen_id], + "sub_id": cls.gen_to_subid[gen_id], "target_dispatch": self.target_dispatch[gen_id], "actual_dispatch": self.target_dispatch[gen_id], "curtailment": self.curtailment[gen_id], @@ -938,8 +981,8 @@ def state_of( "q": self.q_or[line_id], "v": self.v_or[line_id], "a": self.a_or[line_id], - "bus": self.topo_vect[self.line_or_pos_topo_vect[line_id]], - "sub_id": self.line_or_to_subid[line_id], + "bus": self.topo_vect[cls.line_or_pos_topo_vect[line_id]], + "sub_id": cls.line_or_to_subid[line_id], } if self.support_theta: res["origin"]["theta"] = self.theta_or[line_id] @@ -949,8 +992,8 @@ def state_of( "q": self.q_ex[line_id], "v": self.v_ex[line_id], "a": self.a_ex[line_id], - "bus": self.topo_vect[self.line_ex_pos_topo_vect[line_id]], - "sub_id": self.line_ex_to_subid[line_id], + "bus": self.topo_vect[cls.line_ex_pos_topo_vect[line_id]], + "sub_id": cls.line_ex_to_subid[line_id], } if self.support_theta: res["origin"]["theta"] = self.theta_ex[line_id] @@ -967,7 +1010,7 @@ def state_of( elif storage_id is not None: if substation_id is not None: raise Grid2OpException(ERROR_ONLY_SINGLE_EL) - if storage_id >= self.n_storage: + if storage_id >= cls.n_storage: raise Grid2OpException( 'There are no storage unit with id "storage_id={}" in this grid.'.format( storage_id @@ -977,23 +1020,24 @@ def state_of( raise Grid2OpException("`storage_id` should be a positive integer") res = {} + res["p"] = self.storage_power[storage_id] res["storage_power"] = self.storage_power[storage_id] res["storage_charge"] = self.storage_charge[storage_id] res["storage_power_target"] = self.storage_power_target[storage_id] - res["bus"] = self.topo_vect[self.storage_pos_topo_vect[storage_id]] - res["sub_id"] = self.storage_to_subid[storage_id] + res["bus"] = self.topo_vect[cls.storage_pos_topo_vect[storage_id]] + res["sub_id"] = cls.storage_to_subid[storage_id] if self.support_theta: res["theta"] = self.storage_theta[storage_id] else: - if substation_id >= len(self.sub_info): + if substation_id >= len(cls.sub_info): raise Grid2OpException( 'There are no substation of id "substation_id={}" in this grid.'.format( substation_id ) ) - beg_ = int(self.sub_info[:substation_id].sum()) - end_ = int(beg_ + self.sub_info[substation_id]) + beg_ = int(cls.sub_info[:substation_id].sum()) + end_ = int(beg_ + cls.sub_info[substation_id]) topo_sub = self.topo_vect[beg_:end_] if (topo_sub > 0).any(): nb_bus = ( @@ -1010,7 +1054,7 @@ def state_of( return res @classmethod - def process_shunt_satic_data(cls): + def process_shunt_satic_data(cls) -> None: if not cls.shunts_data_available: # this is really important, otherwise things from grid2op base types will be affected cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) @@ -1026,131 +1070,147 @@ def process_shunt_satic_data(cls): return super().process_shunt_satic_data() @classmethod - def process_grid2op_compat(cls): - if cls.glop_version == cls.BEFORE_COMPAT_VERSION: - # oldest version: no storage and no curtailment available - - # this is really important, otherwise things from grid2op base types will be affected - cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) - cls.attr_list_set = copy.deepcopy(cls.attr_list_set) - - # deactivate storage - cls.set_no_storage() - for el in ["storage_charge", "storage_power_target", "storage_power"]: - if el in cls.attr_list_vect: - try: - cls.attr_list_vect.remove(el) - except ValueError: - pass - - # remove the curtailment - for el in ["gen_p_before_curtail", "curtailment", "curtailment_limit"]: - if el in cls.attr_list_vect: - try: - cls.attr_list_vect.remove(el) - except ValueError: - pass - - cls.attr_list_set = set(cls.attr_list_vect) - - if cls.glop_version < "1.6.0" or cls.glop_version == cls.BEFORE_COMPAT_VERSION: - # this feature did not exist before and was introduced in grid2op 1.6.0 - cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) - cls.attr_list_set = copy.deepcopy(cls.attr_list_set) - cls.dim_alarms = 0 - for el in [ - "is_alarm_illegal", - "time_since_last_alarm", - "last_alarm", - "attention_budget", - "was_alarm_used_after_game_over", - ]: + def _aux_process_grid2op_compat_old(cls): + # this is really important, otherwise things from grid2op base types will be affected + cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) + cls.attr_list_set = copy.deepcopy(cls.attr_list_set) + + # deactivate storage + cls.set_no_storage() + for el in ["storage_charge", "storage_power_target", "storage_power"]: + if el in cls.attr_list_vect: try: cls.attr_list_vect.remove(el) - except ValueError as exc_: - # this attribute was not there in the first place + except ValueError: pass - for el in ["_shunt_p", "_shunt_q", "_shunt_v", "_shunt_bus"]: - # added in grid2op 1.6.0 mainly for the EpisodeReboot + # remove the curtailment + for el in ["gen_p_before_curtail", "curtailment", "curtailment_limit"]: + if el in cls.attr_list_vect: try: cls.attr_list_vect.remove(el) - except ValueError as exc_: - # this attribute was not there in the first place + except ValueError: pass - cls.attr_list_set = set(cls.attr_list_vect) + @classmethod + def _aux_process_grid2op_compat_160(cls): + cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) + cls.dim_alarms = 0 + for el in [ + "is_alarm_illegal", + "time_since_last_alarm", + "last_alarm", + "attention_budget", + "was_alarm_used_after_game_over", + ]: + try: + cls.attr_list_vect.remove(el) + except ValueError as exc_: + # this attribute was not there in the first place + pass + + for el in ["_shunt_p", "_shunt_q", "_shunt_v", "_shunt_bus"]: + # added in grid2op 1.6.0 mainly for the EpisodeReboot + try: + cls.attr_list_vect.remove(el) + except ValueError as exc_: + # this attribute was not there in the first place + pass + + @classmethod + def _aux_process_grid2op_compat_164(cls): + cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) + + for el in ["max_step", "current_step"]: + try: + cls.attr_list_vect.remove(el) + except ValueError as exc_: + # this attribute was not there in the first place + pass + + @classmethod + def _aux_process_grid2op_compat_165(cls): + cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) + + for el in ["delta_time"]: + try: + cls.attr_list_vect.remove(el) + except ValueError as exc_: + # this attribute was not there in the first place + pass + + @classmethod + def _aux_process_grid2op_compat_166(cls): + cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) - if cls.glop_version < "1.6.4" or cls.glop_version == cls.BEFORE_COMPAT_VERSION: - # "current_step", "max_step" were added in grid2Op 1.6.4 - cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) - cls.attr_list_set = copy.deepcopy(cls.attr_list_set) + for el in [ + "gen_margin_up", + "gen_margin_down", + "curtailment_limit_effective", + ]: + try: + cls.attr_list_vect.remove(el) + except ValueError as exc_: + # this attribute was not there in the first place + pass + + @classmethod + def _aux_process_grid2op_compat_191(cls): + cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) - for el in ["max_step", "current_step"]: - try: - cls.attr_list_vect.remove(el) - except ValueError as exc_: - # this attribute was not there in the first place - pass - cls.attr_list_set = set(cls.attr_list_vect) + for el in [ + "active_alert", + "attack_under_alert", + "time_since_last_alert", + "alert_duration", + "total_number_of_alert", + "time_since_last_attack", + "was_alert_used_after_attack" + ]: + try: + cls.attr_list_vect.remove(el) + except ValueError as exc_: + # this attribute was not there in the first place + pass + + @classmethod + def process_grid2op_compat(cls) -> None: + super().process_grid2op_compat() + glop_ver = cls._get_grid2op_version_as_version_obj() + + if cls.glop_version == cls.BEFORE_COMPAT_VERSION: + # oldest version: no storage and no curtailment available + cls._aux_process_grid2op_compat_old() + + if glop_ver < version.parse("1.6.0"): + # this feature did not exist before and was introduced in grid2op 1.6.0 + cls._aux_process_grid2op_compat_160() - if cls.glop_version < "1.6.5" or cls.glop_version == cls.BEFORE_COMPAT_VERSION: + if glop_ver < version.parse("1.6.4"): + # "current_step", "max_step" were added in grid2Op 1.6.4 + cls._aux_process_grid2op_compat_164() + + if glop_ver < version.parse("1.6.5"): # "current_step", "max_step" were added in grid2Op 1.6.5 - cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) - cls.attr_list_set = copy.deepcopy(cls.attr_list_set) - - for el in ["delta_time"]: - try: - cls.attr_list_vect.remove(el) - except ValueError as exc_: - # this attribute was not there in the first place - pass - cls.attr_list_set = set(cls.attr_list_vect) - - if cls.glop_version < "1.6.6" or cls.glop_version == cls.BEFORE_COMPAT_VERSION: + cls._aux_process_grid2op_compat_165() + + if glop_ver < version.parse("1.6.6"): # "gen_margin_up", "gen_margin_down" were added in grid2Op 1.6.6 - cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) - cls.attr_list_set = copy.deepcopy(cls.attr_list_set) - - for el in [ - "gen_margin_up", - "gen_margin_down", - "curtailment_limit_effective", - ]: - try: - cls.attr_list_vect.remove(el) - except ValueError as exc_: - # this attribute was not there in the first place - pass - cls.attr_list_set = set(cls.attr_list_vect) - - if cls.glop_version < "1.9.1" or cls.glop_version == cls.BEFORE_COMPAT_VERSION: + cls._aux_process_grid2op_compat_166() + + if glop_ver < version.parse("1.9.1"): # alert attributes have been added in 1.9.1 - cls.attr_list_vect = copy.deepcopy(cls.attr_list_vect) - cls.attr_list_set = copy.deepcopy(cls.attr_list_set) - - for el in [ - "active_alert", - "attack_under_alert", - "time_since_last_alert", - "alert_duration", - "total_number_of_alert", - "time_since_last_attack", - "was_alert_used_after_attack" - ]: - try: - cls.attr_list_vect.remove(el) - except ValueError as exc_: - # this attribute was not there in the first place - pass - cls.attr_list_set = set(cls.attr_list_vect) + cls._aux_process_grid2op_compat_191() + + cls.attr_list_set = copy.deepcopy(cls.attr_list_set) + cls.attr_list_set = set(cls.attr_list_vect) - def shape(self): + def shape(self) -> np.ndarray: return type(self).shapes() - def dtype(self): + def dtype(self) -> np.ndarray: return type(self).dtypes() - def reset(self): + def reset(self) -> None: """ INTERNAL @@ -1259,8 +1319,15 @@ def reset(self): self.max_step = dt_int(np.iinfo(dt_int).max) self.delta_time = dt_float(5.0) - def set_game_over(self, env=None): + def set_game_over(self, + env: Optional["grid2op.Environment.Environment"]=None) -> None: """ + INTERNAL + + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + This is used internally to reset an observation in a fixed state, possibly after + a game over. + Set the observation to the "game over" state: - all powerlines are disconnected @@ -1389,7 +1456,7 @@ def set_game_over(self, env=None): # was_alert_used_after_attack not updated here in this case # attack_under_alert not updated here in this case - def __compare_stats(self, other, name): + def __compare_stats(self, other: Self, name: str) -> bool: attr_me = getattr(self, name) attr_other = getattr(other, name) if attr_me is None and attr_other is not None: @@ -1419,7 +1486,7 @@ def __compare_stats(self, other, name): return False return True - def __eq__(self, other): + def __eq__(self, other : Self) -> bool: """ INTERNAL @@ -1440,7 +1507,7 @@ def __eq__(self, other): declared as different. **Known issue** if two backend are different, but the description of the _grid are identical (ie all - n_gen, n_load, n_line, sub_info, dim_topo, all vectors \*_to_subid, and \*_pos_topo_vect are + n_gen, n_load, n_line, sub_info, dim_topo, all vectors \\*_to_subid, and \\*_pos_topo_vect are identical) then this method will not detect the backend are different, and the action could be declared as identical. For now, this is only a theoretical behaviour: if everything is the same, then probably, up to the naming convention, then the powergrid are identical too. @@ -1480,13 +1547,50 @@ def __eq__(self, other): return True - def __sub__(self, other): + def _aux_sub_get_attr_diff(self, me_, oth_): + diff_ = None + if me_ is None and oth_ is None: + diff_ = None + elif me_ is not None and oth_ is None: + diff_ = me_ + elif me_ is None and oth_ is not None: + if oth_.dtype == dt_bool: + diff_ = np.full(oth_.shape, fill_value=False, dtype=dt_bool) + else: + diff_ = -oth_ + else: + # both are not None + if oth_.dtype == dt_bool: + diff_ = ~np.logical_xor(me_, oth_) + else: + diff_ = me_ - oth_ + return diff_ + + def __sub__(self, other : Self) -> Self: """ - computes the difference between two observation, and return an observation corresponding to + Computes the difference between two observations, and return an observation corresponding to this difference. This can be used to easily plot the difference between two observations at different step for example. + + + Examples + ---------- + + .. code-block:: python + + import grid2op + env = grid2op.make("l2rpn_case14_sandbox") + + obs_0 = env.reset() + + action = env.action_space() + obs_1, reward, done, info = env.step(action) + + diff_obs = obs_1 - obs_0 + + diff_obs.gen_p # the variation in generator between these steps """ same_grid = type(self).same_grid_class(type(other)) if not same_grid: @@ -1503,25 +1607,11 @@ def __sub__(self, other): for stat_nm in self._attr_eq: me_ = getattr(self, stat_nm) oth_ = getattr(other, stat_nm) - if me_ is None and oth_ is None: - diff_ = None - elif me_ is not None and oth_ is None: - diff_ = me_ - elif me_ is None and oth_ is not None: - if oth_.dtype == dt_bool: - diff_ = np.full(oth_.shape, fill_value=False, dtype=dt_bool) - else: - diff_ = -oth_ - else: - # both are not None - if oth_.dtype == dt_bool: - diff_ = ~np.logical_xor(me_, oth_) - else: - diff_ = me_ - oth_ + diff_ = self._aux_sub_get_attr_diff(me_, oth_) res.__setattr__(stat_nm, diff_) return res - def where_different(self, other): + def where_different(self, other : Self) -> Tuple[Self, List]: """ Returns the difference between two observation. @@ -1532,7 +1622,7 @@ def where_different(self, other): Returns ------- - diff_: :class:`grid2op.Observation.BaseObservation` + diff_: :class:`BaseObservation` The observation showing the difference between `self` and `other` attr_nm: ``list`` List of string representing the names of the different attributes. It's [] if the two observations @@ -1552,7 +1642,7 @@ def where_different(self, other): return diff_, res @abstractmethod - def update(self, env, with_forecast=True): + def update(self, env: "grid2op.Environment.Environment", with_forecast: bool=True) -> None: """ INTERNAL @@ -1589,7 +1679,69 @@ def update(self, env, with_forecast=True): """ pass - def connectivity_matrix(self, as_csr_matrix=False): + def _aux_build_conn_mat(self, as_csr_matrix): + # self._connectivity_matrix_ = np.zeros(shape=(self.dim_topo, self.dim_topo), dtype=dt_float) + # fill it by block for the objects + beg_ = 0 + end_ = 0 + row_ind = [] + col_ind = [] + cls = type(self) + for sub_id, nb_obj in enumerate(cls.sub_info): + # it must be a vanilla python integer, otherwise it's not handled by some backend + # especially if written in c++ + nb_obj = int(nb_obj) + end_ += nb_obj + # tmp = np.zeros(shape=(nb_obj, nb_obj), dtype=dt_float) + for obj1 in range(nb_obj): + my_bus = self.topo_vect[beg_ + obj1] + if my_bus == -1: + # object is disconnected, nothing is done + continue + # connect an object to itself + row_ind.append(beg_ + obj1) + col_ind.append(beg_ + obj1) + + # connect the other objects to it + for obj2 in range(obj1 + 1, nb_obj): + my_bus2 = self.topo_vect[beg_ + obj2] + if my_bus2 == -1: + # object is disconnected, nothing is done + continue + if my_bus == my_bus2: + # objects are on the same bus + # tmp[obj1, obj2] = 1 + # tmp[obj2, obj1] = 1 + row_ind.append(beg_ + obj2) + col_ind.append(beg_ + obj1) + row_ind.append(beg_ + obj1) + col_ind.append(beg_ + obj2) + beg_ += nb_obj + + # both ends of a line are connected together (if line is connected) + for q_id in range(cls.n_line): + if self.line_status[q_id]: + # if powerline is connected connect both its side + row_ind.append(cls.line_or_pos_topo_vect[q_id]) + col_ind.append(cls.line_ex_pos_topo_vect[q_id]) + row_ind.append(cls.line_ex_pos_topo_vect[q_id]) + col_ind.append(cls.line_or_pos_topo_vect[q_id]) + row_ind = np.array(row_ind).astype(dt_int) + col_ind = np.array(col_ind).astype(dt_int) + if not as_csr_matrix: + self._connectivity_matrix_ = np.zeros( + shape=(cls.dim_topo, cls.dim_topo), dtype=dt_float + ) + self._connectivity_matrix_[row_ind.T, col_ind] = 1.0 + else: + data = np.ones(row_ind.shape[0], dtype=dt_float) + self._connectivity_matrix_ = csr_matrix( + (data, (row_ind, col_ind)), + shape=(cls.dim_topo, cls.dim_topo), + dtype=dt_float, + ) + + def connectivity_matrix(self, as_csr_matrix: bool=False) -> Union[np.ndarray, csr_matrix]: """ Computes and return the "connectivity matrix" `con_mat`. Let "dim_topo := 2 * n_line + n_prod + n_conso + n_storage" (the total number of elements on the grid) @@ -1672,92 +1824,37 @@ def connectivity_matrix(self, as_csr_matrix=False): # - assign bus 2 to load 0 [on substation 1] # -> one of them is on bus 1 [line (extremity) 0] and the other on bus 2 [load 0] """ - if ( - self._connectivity_matrix_ is None - or ( - isinstance(self._connectivity_matrix_, csr_matrix) and not as_csr_matrix - ) - or ( - (not isinstance(self._connectivity_matrix_, csr_matrix)) - and as_csr_matrix - ) - ): - # self._connectivity_matrix_ = np.zeros(shape=(self.dim_topo, self.dim_topo), dtype=dt_float) - # fill it by block for the objects - beg_ = 0 - end_ = 0 - row_ind = [] - col_ind = [] - for sub_id, nb_obj in enumerate(self.sub_info): - # it must be a vanilla python integer, otherwise it's not handled by some backend - # especially if written in c++ - nb_obj = int(nb_obj) - end_ += nb_obj - # tmp = np.zeros(shape=(nb_obj, nb_obj), dtype=dt_float) - for obj1 in range(nb_obj): - my_bus = self.topo_vect[beg_ + obj1] - if my_bus == -1: - # object is disconnected, nothing is done - continue - # connect an object to itself - row_ind.append(beg_ + obj1) - col_ind.append(beg_ + obj1) - - # connect the other objects to it - for obj2 in range(obj1 + 1, nb_obj): - my_bus2 = self.topo_vect[beg_ + obj2] - if my_bus2 == -1: - # object is disconnected, nothing is done - continue - if my_bus == my_bus2: - # objects are on the same bus - # tmp[obj1, obj2] = 1 - # tmp[obj2, obj1] = 1 - row_ind.append(beg_ + obj2) - col_ind.append(beg_ + obj1) - row_ind.append(beg_ + obj1) - col_ind.append(beg_ + obj2) - beg_ += nb_obj - - # both ends of a line are connected together (if line is connected) - for q_id in range(self.n_line): - if self.line_status[q_id]: - # if powerline is connected connect both its side - row_ind.append(self.line_or_pos_topo_vect[q_id]) - col_ind.append(self.line_ex_pos_topo_vect[q_id]) - row_ind.append(self.line_ex_pos_topo_vect[q_id]) - col_ind.append(self.line_or_pos_topo_vect[q_id]) - row_ind = np.array(row_ind).astype(dt_int) - col_ind = np.array(col_ind).astype(dt_int) - if not as_csr_matrix: - self._connectivity_matrix_ = np.zeros( - shape=(self.dim_topo, self.dim_topo), dtype=dt_float - ) - self._connectivity_matrix_[row_ind.T, col_ind] = 1.0 - else: - data = np.ones(row_ind.shape[0], dtype=dt_float) - self._connectivity_matrix_ = csr_matrix( - (data, (row_ind, col_ind)), - shape=(self.dim_topo, self.dim_topo), - dtype=dt_float, - ) + need_build_mat = (self._connectivity_matrix_ is None or + isinstance(self._connectivity_matrix_, csr_matrix) and not as_csr_matrix or + ( + (not isinstance(self._connectivity_matrix_, csr_matrix)) + and as_csr_matrix + ) + ) + if need_build_mat : + self._aux_build_conn_mat(as_csr_matrix) return self._connectivity_matrix_ def _aux_fun_get_bus(self): """see in bus_connectivity matrix""" - bus_or = self.topo_vect[self.line_or_pos_topo_vect] - bus_ex = self.topo_vect[self.line_ex_pos_topo_vect] + cls = type(self) + bus_or = self.topo_vect[cls.line_or_pos_topo_vect] + bus_ex = self.topo_vect[cls.line_ex_pos_topo_vect] connected = (bus_or > 0) & (bus_ex > 0) bus_or = bus_or[connected] bus_ex = bus_ex[connected] - bus_or = self.line_or_to_subid[connected] + (bus_or - 1) * self.n_sub - bus_ex = self.line_ex_to_subid[connected] + (bus_ex - 1) * self.n_sub + # bus_or = self.line_or_to_subid[connected] + (bus_or - 1) * self.n_sub + # bus_ex = self.line_ex_to_subid[connected] + (bus_ex - 1) * self.n_sub + bus_or = cls.local_bus_to_global(bus_or, cls.line_or_to_subid[connected]) + bus_ex = cls.local_bus_to_global(bus_ex, cls.line_ex_to_subid[connected]) unique_bus = np.unique(np.concatenate((bus_or, bus_ex))) unique_bus = np.sort(unique_bus) nb_bus = unique_bus.shape[0] return nb_bus, unique_bus, bus_or, bus_ex - def bus_connectivity_matrix(self, as_csr_matrix=False, return_lines_index=False): + def bus_connectivity_matrix(self, + as_csr_matrix: bool=False, + return_lines_index: bool=False) -> Tuple[Union[np.ndarray, csr_matrix], Optional[Tuple[np.ndarray, np.ndarray]]]: """ If we denote by `nb_bus` the total number bus of the powergrid (you can think of a "bus" being a "node" if you represent a powergrid as a graph [mathematical object, not a plot] with the lines @@ -1784,11 +1881,19 @@ def bus_connectivity_matrix(self, as_csr_matrix=False, return_lines_index=False) return_lines_index: ``bool`` Whether to also return the bus index associated to both side of each powerline. + ``False`` by default, meaning the indexes are not returned. Returns ------- res: ``numpy.ndarray``, shape: (nb_bus, nb_bus) dtype:float The bus connectivity matrix defined above. + + optional: + + - `lor_bus` : for each powerline, it gives the id (row / column of the matrix) + of the bus of the matrix to which its origin side is connected + - `lex_bus` : for each powerline, it gives the id (row / column of the matrix) + of the bus of the matrix to which its extremity side is connected Notes ------ @@ -1890,10 +1995,13 @@ def _get_bus_id(self, id_topo_vect, sub_id): """ bus_id = 1 * self.topo_vect[id_topo_vect] connected = bus_id > 0 - bus_id[connected] = sub_id[connected] + (bus_id[connected] - 1) * self.n_sub + # bus_id[connected] = sub_id[connected] + (bus_id[connected] - 1) * self.n_sub + bus_id[connected] = type(self).local_bus_to_global(bus_id[connected], sub_id[connected]) return bus_id, connected - def flow_bus_matrix(self, active_flow=True, as_csr_matrix=False): + def flow_bus_matrix(self, + active_flow: bool=True, + as_csr_matrix: bool=False) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """ A matrix of size "nb bus" "nb bus". Each row and columns represent a "bus" of the grid ("bus" is a power system word that for computer scientist means "nodes" if the powergrid is represented as a graph). @@ -1957,7 +2065,7 @@ def flow_bus_matrix(self, active_flow=True, as_csr_matrix=False): flow on the origin (or extremity) side of the powerline connecting bus `i` to bus `j` You can also know how much power - (total generation + total storage discharging - total load - total storage charging - ) + (total generation + total storage discharging - total load - total storage charging) is injected at each bus `i` by looking at the `i` th diagonal coefficient. @@ -1966,11 +2074,11 @@ def flow_bus_matrix(self, active_flow=True, as_csr_matrix=False): matrix. `flow_mat.sum(axis=1)` """ + cls = type(self) if self._is_done: flow_mat = csr_matrix((1,1), dtype=dt_float) if not as_csr_matrix: flow_mat = flow_mat.toarray() - cls = type(self) load_bus = np.zeros(cls.n_load, dtype=dt_int) prod_bus = np.zeros(cls.n_gen, dtype=dt_int) stor_bus = np.zeros(cls.n_storage, dtype=dt_int) @@ -1980,26 +2088,26 @@ def flow_bus_matrix(self, active_flow=True, as_csr_matrix=False): nb_bus, unique_bus, bus_or, bus_ex = self._aux_fun_get_bus() prod_bus, prod_conn = self._get_bus_id( - self.gen_pos_topo_vect, self.gen_to_subid + cls.gen_pos_topo_vect, cls.gen_to_subid ) load_bus, load_conn = self._get_bus_id( - self.load_pos_topo_vect, self.load_to_subid + cls.load_pos_topo_vect, cls.load_to_subid ) stor_bus, stor_conn = self._get_bus_id( - self.storage_pos_topo_vect, self.storage_to_subid + cls.storage_pos_topo_vect, cls.storage_to_subid ) lor_bus, lor_conn = self._get_bus_id( - self.line_or_pos_topo_vect, self.line_or_to_subid + cls.line_or_pos_topo_vect, cls.line_or_to_subid ) lex_bus, lex_conn = self._get_bus_id( - self.line_ex_pos_topo_vect, self.line_ex_to_subid + cls.line_ex_pos_topo_vect, cls.line_ex_to_subid ) - if type(self).shunts_data_available: + if cls.shunts_data_available: sh_bus = 1 * self._shunt_bus sh_bus[sh_bus > 0] = ( - self.shunt_to_subid[sh_bus > 0] * (sh_bus[sh_bus > 0] - 1) - + self.shunt_to_subid[sh_bus > 0] + cls.shunt_to_subid[sh_bus > 0] * (sh_bus[sh_bus > 0] - 1) + + cls.shunt_to_subid[sh_bus > 0] ) sh_conn = self._shunt_bus != -1 @@ -2019,15 +2127,15 @@ def flow_bus_matrix(self, active_flow=True, as_csr_matrix=False): or_vect = self.p_or ex_vect = self.p_ex stor_vect = self.storage_power - if type(self).shunts_data_available: + if cls.shunts_data_available: sh_vect = self._shunt_p else: prod_vect = self.gen_q load_vect = self.load_q or_vect = self.q_or ex_vect = self.q_ex - stor_vect = np.zeros(self.n_storage, dtype=dt_float) - if type(self).shunts_data_available: + stor_vect = np.zeros(cls.n_storage, dtype=dt_float) + if cls.shunts_data_available: sh_vect = self._shunt_q nb_lor = lor_conn.sum() @@ -2068,7 +2176,7 @@ def flow_bus_matrix(self, active_flow=True, as_csr_matrix=False): ) data[bus_stor] -= map_mat.dot(stor_vect[stor_conn]) - if type(self).shunts_data_available: + if cls.shunts_data_available: # handle shunts nb_shunt = sh_conn.sum() if nb_shunt: @@ -2177,6 +2285,20 @@ def get_energy_graph(self) -> networkx.Graph: Convert this observation as a networkx graph. This graph is the graph "seen" by "the electron" / "the energy" of the power grid. + .. versionchanged:: 1.10.0 + Addition of the attribute `local_bus_id` and `global_bus_id` for the nodes of the returned graph. + + `local_bus_id` give the local bus id (from 1 to `obs.n_busbar_per_sub`) id of the + bus represented by this node. + + `global_bus_id` give the global bus id (from 0 to `obs.n_busbar_per_sub * obs.n_sub - 1`) id of the + bus represented by this node. + + Addition of the attribute `global_bus_or` and `global_bus_ex` for the edges of the returned graph. + + These provides the global id of the `origin` / `ext` side to which powerline(s) represented by + this edge is (are) connected. + Notes ------ The resulting graph is "frozen" this means that you cannot add / remove attribute on nodes or edges, nor add / @@ -2184,7 +2306,7 @@ def get_energy_graph(self) -> networkx.Graph: This graphs has the following properties: - - it counts as many nodes as the number of buses of the grid + - it counts as many nodes as the number of buses of the grid (so it has a dynamic size !) - it counts less edges than the number of lines of the grid (two lines connecting the same buses are "merged" into one single edge - this is the case for parallel line, that are hence "merged" into the same edge) - nodes (represents "buses" of the grid) have attributes: @@ -2195,9 +2317,14 @@ def get_energy_graph(self) -> networkx.Graph: - `v`: the voltage magnitude at this node - `cooldown`: how much longer you need to wait before being able to merge / split or change this node - 'sub_id': the id of the substation to which it is connected (typically between `0` and `obs.n_sub - 1`) - - (optional) `theta`: the voltage angle (in degree) at this nodes + - 'local_bus_id': the local bus id (from 1 to `obs.n_busbar_per_sub`) of the bus represented by this node + (new in version 1.10.0) + - 'global_bus_id': the global bus id (from 0 to `obs.n_busbar_per_sub * obs.n_sub - 1`) + of the bus represented by this node + (new in version 1.10.0) - `cooldown` : the time you need to wait (in number of steps) before being able to act on the substation to which this bus is connected. + - (optional) `theta`: the voltage angle (in degree) at this nodes - edges have attributes too (in this modeling an edge might represent more than one powerline, all parallel powerlines are represented by the same edge): @@ -2216,16 +2343,26 @@ def get_energy_graph(self) -> networkx.Graph: - `p`: active power injected at the "or" side (equal to p_or) (in MW) - `v_or`: voltage magnitude at the "or" bus (in kV) - `v_ex`: voltage magnitude at the "ex" bus (in kV) - - (optional) `theta_or`: voltage angle at the "or" bus (in deg) - - (optional) `theta_ex`: voltage angle at the "ex" bus (in deg) - `time_next_maintenance`: see :attr:`BaseObservation.time_next_maintenance` (min over all powerline) - `duration_next_maintenance` see :attr:`BaseObservation.duration_next_maintenance` (max over all powerlines) - `sub_id_or`: id of the substation of the "or" side of the powerlines - `sub_id_ex`: id of the substation of the "ex" side of the powerlines - `node_id_or`: id of the node (in this graph) of the "or" side of the powergraph - `node_id_ex`: id of the node (in this graph) of the "ex" side of the powergraph - - `bus_or`: on which bus [1 or 2] is this powerline connected to at its "or" substation - - `bus_ex`: on which bus [1 or 2] is this powerline connected to at its "ex" substation + - `bus_or`: on which bus [1 or 2 or 3, etc.] is this powerline connected to at its "or" substation + (this is the local id of the bus) + - `bus_ex`: on which bus [1 or 2 or 3, etc.] is this powerline connected to at its "ex" substation + (this is the local id of the bus) + - 'global_bus_or': the global bus id (from 0 to `obs.n_busbar_per_sub * obs.n_sub - 1`) + of the bus to which the origin side of the line(s) represented by this edge + is (are) connected + (new in version 1.10.0) + - 'global_bus_ex': the global bus id (from 0 to `obs.n_busbar_per_sub * obs.n_sub - 1`) + of the bus to which the ext side of the line(s) represented by this edge + is (are) connected + (new in version 1.10.0) + - (optional) `theta_or`: voltage angle at the "or" bus (in deg) + - (optional) `theta_ex`: voltage angle at the "ex" bus (in deg) .. danger:: **IMPORTANT NOTE** edges represents "fusion" of 1 or more powerlines. This graph is intended to be @@ -2316,6 +2453,10 @@ def get_energy_graph(self) -> networkx.Graph: bus_subid = np.zeros(mat_p.shape[0], dtype=dt_int) bus_subid[lor_bus[self.line_status]] = cls.line_or_to_subid[self.line_status] bus_subid[lex_bus[self.line_status]] = cls.line_ex_to_subid[self.line_status] + loc_bus_id = np.zeros(mat_p.shape[0], dtype=int) + loc_bus_id[lor_bus[self.line_status]] = self.topo_vect[cls.line_or_pos_topo_vect[self.line_status]] + loc_bus_id[lex_bus[self.line_status]] = self.topo_vect[cls.line_ex_pos_topo_vect[self.line_status]] + glob_bus_id = cls.local_bus_to_global(loc_bus_id, bus_subid) if self.support_theta: bus_theta[lor_bus[self.line_status]] = self.theta_or[self.line_status] bus_theta[lex_bus[self.line_status]] = self.theta_ex[self.line_status] @@ -2355,7 +2496,14 @@ def get_energy_graph(self) -> networkx.Graph: networkx.set_node_attributes(graph, {el: self.time_before_cooldown_sub[val] for el, val in enumerate(bus_subid)}, "cooldown") - + # add local_id and global_id as attribute to the node of this graph + networkx.set_node_attributes( + graph, {el: val for el, val in enumerate(loc_bus_id)}, "local_bus_id" + ) + networkx.set_node_attributes( + graph, {el: val for el, val in enumerate(glob_bus_id)}, "global_bus_id" + ) + # add the edges attributes self._add_edges_multi(self.p_or, self.p_ex, "p", lor_bus, lex_bus, graph) self._add_edges_multi(self.q_or, self.q_ex, "q", lor_bus, lex_bus, graph) @@ -2415,16 +2563,25 @@ def get_energy_graph(self) -> networkx.Graph: self.line_ex_bus, "bus_ex", lor_bus, lex_bus, graph ) + self._add_edges_simple( + glob_bus_id[lor_bus], + "global_bus_or", lor_bus, lex_bus, graph + ) + self._add_edges_simple( + glob_bus_id[lex_bus], + "global_bus_ex", lor_bus, lex_bus, graph + ) # extra layer of security: prevent accidental modification of this graph networkx.freeze(graph) return graph def _aux_get_connected_buses(self): - res = np.full(2 * self.n_sub, fill_value=False) - global_bus = type(self).local_bus_to_global(self.topo_vect, - self._topo_vect_to_sub) - res[np.unique(global_bus[global_bus != -1])] = True + cls = type(self) + res = np.full(cls.n_busbar_per_sub * cls.n_sub, fill_value=False) + global_bus = cls.local_bus_to_global(self.topo_vect, + cls._topo_vect_to_sub) + res[global_bus[global_bus != -1]] = True return res def _aux_add_edges(self, @@ -2454,6 +2611,7 @@ def _aux_add_edges(self, li_el_edges[ed_num][-1][prop_nm] = prop_vect[el_id] ed_num += 1 graph.add_edges_from(li_el_edges) + return li_el_edges def _aux_add_el_to_comp_graph(self, graph, @@ -2489,30 +2647,37 @@ def _aux_add_el_to_comp_graph(self, el_connected = np.array(el_global_bus) >= 0 for el_id in range(nb_el): li_el_node[el_id][-1]["connected"] = el_connected[el_id] + li_el_node[el_id][-1]["local_bus"] = el_bus[el_id] + li_el_node[el_id][-1]["global_bus"] = el_global_bus[el_id] if nodes_prop is not None: for el_id in range(nb_el): for prop_nm, prop_vect in nodes_prop: li_el_node[el_id][-1][prop_nm] = prop_vect[el_id] - graph.add_nodes_from(li_el_node) - graph.graph[f"{el_name}_nodes_id"] = el_ids if el_bus is None and el_to_sub_id is None: + graph.add_nodes_from(li_el_node) + graph.graph[f"{el_name}_nodes_id"] = el_ids return el_ids # add the edges - self._aux_add_edges(el_ids, - cls, - el_global_bus, - nb_el, - el_connected, - el_name, - edges_prop, - graph) + li_el_edges = self._aux_add_edges(el_ids, + cls, + el_global_bus, + nb_el, + el_connected, + el_name, + edges_prop, + graph) + for el_id, (el_node_id, edege_id, *_) in enumerate(li_el_edges): + li_el_node[el_id][-1]["bus_node_id"] = edege_id + + graph.add_nodes_from(li_el_node) + graph.graph[f"{el_name}_nodes_id"] = el_ids return el_ids def _aux_add_buses(self, graph, cls, first_id): - bus_ids = first_id + np.arange(2 * cls.n_sub) + bus_ids = first_id + np.arange(cls.n_busbar_per_sub * cls.n_sub) conn_bus = self._aux_get_connected_buses() bus_li = [ (bus_ids[bus_id], @@ -2522,7 +2687,7 @@ def _aux_add_buses(self, graph, cls, first_id): "type": "bus", "connected": conn_bus[bus_id]} ) - for bus_id in range(2 * cls.n_sub) + for bus_id in range(cls.n_busbar_per_sub * cls.n_sub) ] graph.add_nodes_from(bus_li) edge_bus_li = [(bus_id, @@ -2622,15 +2787,32 @@ def _aux_add_edge_line_side(self, ] if theta_vect is not None: edges_prop.append(("theta", theta_vect)) - self._aux_add_edges(line_node_ids, - cls, - global_bus, - cls.n_line, - conn_, - "line", - edges_prop, - graph) - + res = self._aux_add_edges(line_node_ids, + cls, + global_bus, + cls.n_line, + conn_, + "line", + edges_prop, + graph) + return res + + def _aux_add_local_global(self, cls, graph, lin_ids, el_loc_bus, xxx_subid, side): + el_global_bus = cls.local_bus_to_global(el_loc_bus, + xxx_subid) + dict_ = {} + for el_node_id, loc_bus in zip(lin_ids, el_loc_bus): + dict_[el_node_id] = loc_bus + networkx.set_node_attributes( + graph, dict_, f"local_bus_{side}" + ) + dict_ = {} + for el_node_id, glob_bus in zip(lin_ids, el_global_bus): + dict_[el_node_id] = glob_bus + networkx.set_node_attributes( + graph, dict_, f"global_bus_{side}" + ) + def _aux_add_lines(self, graph, cls, first_id): nodes_prop = [("rho", self.rho), ("connected", self.line_status), @@ -2639,6 +2821,7 @@ def _aux_add_lines(self, graph, cls, first_id): ("time_next_maintenance", self.time_next_maintenance), ("duration_next_maintenance", self.duration_next_maintenance), ] + # only add the nodes, not the edges right now lin_ids = self._aux_add_el_to_comp_graph(graph, first_id, @@ -2650,32 +2833,47 @@ def _aux_add_lines(self, graph, cls, first_id): nodes_prop=nodes_prop, edges_prop=None ) + self._aux_add_local_global(cls, graph, lin_ids, self.line_or_bus, cls.line_or_to_subid, "or") + self._aux_add_local_global(cls, graph, lin_ids, self.line_ex_bus, cls.line_ex_to_subid, "ex") # add "or" edges - self._aux_add_edge_line_side(cls, - graph, - self.line_or_bus, - cls.line_or_to_subid, - lin_ids, - "or", - self.p_or, - self.q_or, - self.v_or, - self.a_or, - self.theta_or if self.support_theta else None) + li_el_edges_or = self._aux_add_edge_line_side(cls, + graph, + self.line_or_bus, + cls.line_or_to_subid, + lin_ids, + "or", + self.p_or, + self.q_or, + self.v_or, + self.a_or, + self.theta_or if self.support_theta else None) + dict_or = {} + for el_id, (el_node_id, edege_id, *_) in enumerate(li_el_edges_or): + dict_or[el_node_id] = edege_id + networkx.set_node_attributes( + graph, dict_or, "bus_node_id_or" + ) # add "ex" edges - self._aux_add_edge_line_side(cls, - graph, - self.line_ex_bus, - cls.line_ex_to_subid, - lin_ids, - "ex", - self.p_ex, - self.q_ex, - self.v_ex, - self.a_ex, - self.theta_ex if self.support_theta else None) + li_el_edges_ex = self._aux_add_edge_line_side(cls, + graph, + self.line_ex_bus, + cls.line_ex_to_subid, + lin_ids, + "ex", + self.p_ex, + self.q_ex, + self.v_ex, + self.a_ex, + self.theta_ex if self.support_theta else None) + dict_ex = {} + for el_id, (el_node_id, edege_id, *_) in enumerate(li_el_edges_ex): + dict_ex[el_node_id] = edege_id + networkx.set_node_attributes( + graph, dict_ex, "bus_node_id_ex" + ) + return lin_ids def _aux_add_shunts(self, graph, cls, first_id): @@ -2702,7 +2900,8 @@ def get_elements_graph(self) -> networkx.DiGraph: """This function returns the "elements graph" as a networkx object. .. seealso:: - This object is extensively described in the documentation, see :ref:`elmnt-graph-gg` for more information. + This object is extensively described in the documentation, + see :ref:`elmnt-graph-gg` for more information. Basically, each "element" of the grid (element = a substation, a bus, a load, a generator, a powerline, a storate unit or a shunt) is represented by a node in this graph. @@ -2752,6 +2951,7 @@ def get_elements_graph(self) -> networkx.DiGraph: ------- networkx.DiGraph The "elements graph", see :ref:`elmnt-graph-gg` . + """ cls = type(self) @@ -2819,7 +3019,7 @@ def get_elements_graph(self) -> networkx.DiGraph: networkx.freeze(graph) return graph - def get_forecasted_inj(self, time_step=1): + def get_forecasted_inj(self, time_step:int =1) -> np.ndarray: """ This function allows you to retrieve directly the "forecast" injections for the step `time_step`. @@ -2848,11 +3048,12 @@ def get_forecasted_inj(self, time_step=1): time_step ) ) + cls = type(self) t, a = self._forecasted_inj[time_step] - prod_p_f = np.full(self.n_gen, fill_value=np.NaN, dtype=dt_float) - prod_v_f = np.full(self.n_gen, fill_value=np.NaN, dtype=dt_float) - load_p_f = np.full(self.n_load, fill_value=np.NaN, dtype=dt_float) - load_q_f = np.full(self.n_load, fill_value=np.NaN, dtype=dt_float) + prod_p_f = np.full(cls.n_gen, fill_value=np.NaN, dtype=dt_float) + prod_v_f = np.full(cls.n_gen, fill_value=np.NaN, dtype=dt_float) + load_p_f = np.full(cls.n_load, fill_value=np.NaN, dtype=dt_float) + load_q_f = np.full(cls.n_load, fill_value=np.NaN, dtype=dt_float) if "prod_p" in a["injection"]: prod_p_f = a["injection"]["prod_p"] @@ -2872,7 +3073,7 @@ def get_forecasted_inj(self, time_step=1): load_q_f[tmp_arg] = self.load_q[tmp_arg] return prod_p_f, prod_v_f, load_p_f, load_q_f - def get_time_stamp(self): + def get_time_stamp(self) -> datetime.datetime: """ Get the time stamp of the current observation as a `datetime.datetime` object """ @@ -2885,7 +3086,10 @@ def get_time_stamp(self): ) return res - def simulate(self, action, time_step=1): + def simulate(self, action : "grid2op.Action.BaseAction", time_step:int=1) -> Tuple["BaseObservation", + float, + bool, + STEP_INFO_TYPING]: """ This method is used to simulate the effect of an action on a forecast powergrid state. This forecast state is built upon the current observation. @@ -2911,7 +3115,34 @@ def simulate(self, action, time_step=1): If the data of the :class:`grid2op.Environment.Environment` you are using supports it (**ie** you can access multiple steps ahead forecasts), then you can now "chain" the simulate calls. - + + .. danger:: + A simulation can be different from the reality, even in case of perfect forecast or if + you "simulate" an action on the current step (time_step=0). + + For example, the solver used can be different for "simulate" and for the environment + "step" or you can be a setting with noisy action. + + A more subtle difference includes the "initialization" of the solver which is different + in env.step and in obs.simulate so the outcomes of the solver might be different + (this is especially relevant for larger grid). + + Even more subtle is the behaviour of the ramps for some generators. + + More concretely, + say you want to dispatch upward a generator (with a ramp of +5) of +5MW at a given + step. But in the same time this generator would see its production increased by +2MW + "naturally" in the time series. Then, grid2op would limit the increase of +5MW (instead + of +7 = +5 +2) by limiting the redispatching action to +3MW. + + If you simulate the same action on the resulting step, as there are no "previous step" + then your action will not be limited and the +5MW of redispatching will be given. + + You have the same phenomenon for storage losses: they are applied even if you simulate + at the current step and conversely are not applied "multiple times" if you simulate + for an horizon longer than 1 (say time_step=2) or if you chain two or more + calls to "simulate". + Examples --------- @@ -3131,7 +3362,7 @@ def simulate(self, action, time_step=1): "Forecast for {} timestep(s) ahead is not possible with your chronics." "".format(time_step) ) - + if time_step not in self._forecasted_grid_act: timestamp, inj_forecasted = self._forecasted_inj[time_step] self._forecasted_grid_act[time_step] = { @@ -3158,7 +3389,7 @@ def simulate(self, action, time_step=1): sim_obs._update_internal_env_params(self._obs_env) return (sim_obs, *rest) # parentheses are needed for python 3.6 at least. - def copy(self): + def copy(self, env=None) -> Self: """ INTERNAL @@ -3187,20 +3418,25 @@ def copy(self): res = copy.deepcopy(self) self._obs_env = obs_env - res._obs_env = obs_env - self.action_helper = action_helper - res.action_helper = action_helper - self._ptr_kwargs_env = _ptr_kwargs_env - res._ptr_kwargs_env = _ptr_kwargs_env - + if env is None: + # this will make a copy but the observation will still + # be "bound" to the original env + res._obs_env = obs_env + res.action_helper = action_helper + res._ptr_kwargs_env = _ptr_kwargs_env + else: + # the action will be "bound" to the new environment + res._obs_env = env._observation_space.obs_env + res.action_helper = env._observation_space.action_helper_env + res._ptr_kwargs_env = env._observation_space._real_env_kwargs return res @property - def line_or_bus(self): + def line_or_bus(self) -> np.ndarray: """ - Retrieve the busbar at which each origin end of powerline is connected. + Retrieve the busbar at which each origin side of powerline is connected. The result follow grid2op convention: @@ -3220,9 +3456,9 @@ def line_or_bus(self): return res @property - def line_ex_bus(self): + def line_ex_bus(self) -> np.ndarray: """ - Retrieve the busbar at which each extremity end of powerline is connected. + Retrieve the busbar at which each extremity side of powerline is connected. The result follow grid2op convention: @@ -3242,7 +3478,7 @@ def line_ex_bus(self): return res @property - def gen_bus(self): + def gen_bus(self) -> np.ndarray: """ Retrieve the busbar at which each generator is connected. @@ -3264,7 +3500,7 @@ def gen_bus(self): return res @property - def load_bus(self): + def load_bus(self) -> np.ndarray: """ Retrieve the busbar at which each load is connected. @@ -3286,7 +3522,7 @@ def load_bus(self): return res @property - def storage_bus(self): + def storage_bus(self) -> np.ndarray: """ Retrieve the busbar at which each storage unit is connected. @@ -3308,7 +3544,7 @@ def storage_bus(self): return res @property - def prod_p(self): + def prod_p(self) -> np.ndarray: """ As of grid2op version 1.5.0, for better consistency, the "prod_p" attribute has been renamed "gen_p", see the doc of :attr:`BaseObservation.gen_p` for more information. @@ -3323,7 +3559,7 @@ def prod_p(self): return self.gen_p @property - def prod_q(self): + def prod_q(self) -> np.ndarray: """ As of grid2op version 1.5.0, for better consistency, the "prod_q" attribute has been renamed "gen_q", see the doc of :attr:`BaseObservation.gen_q` for more information. @@ -3338,7 +3574,7 @@ def prod_q(self): return self.gen_q @property - def prod_v(self): + def prod_v(self) -> np.ndarray: """ As of grid2op version 1.5.0, for better consistency, the "prod_v" attribute has been renamed "gen_v", see the doc of :attr:`BaseObservation.gen_v` for more information. @@ -3352,7 +3588,7 @@ def prod_v(self): """ return self.gen_v - def sub_topology(self, sub_id): + def sub_topology(self, sub_id) -> np.ndarray: """ Returns the topology of the given substation. @@ -3526,7 +3762,111 @@ def to_dict(self): return self._dictionnarized - def add_act(self, act, issue_warn=True): + def _aux_add_act_set_line_status(self, cls, cls_act, act, res, issue_warn): + reco_powerline = act.line_set_status + if "set_bus" in cls_act.authorized_keys: + line_ex_set_bus = act.line_ex_set_bus + line_or_set_bus = act.line_or_set_bus + else: + line_ex_set_bus = np.zeros(cls.n_line, dtype=dt_int) + line_or_set_bus = np.zeros(cls.n_line, dtype=dt_int) + error_no_bus_set = ( + "You reconnected a powerline with your action but did not specify on which bus " + "to reconnect both its end. This behaviour, also perfectly fine for an environment " + "will not be accurate in the method obs + act. Consult the documentation for more " + "information. Problem arose for powerlines with id {}" + ) + + tmp = ( + (reco_powerline == 1) + & (line_ex_set_bus <= 0) + & (res.topo_vect[cls.line_ex_pos_topo_vect] == -1) + ) + if tmp.any(): + id_issue_ex = tmp.nonzero()[0] + if issue_warn: + warnings.warn(error_no_bus_set.format(id_issue_ex)) + if "set_bus" in cls_act.authorized_keys: + # assign 1 in the bus in this case + act.line_ex_set_bus = [(el, 1) for el in id_issue_ex] + tmp = ( + (reco_powerline == 1) + & (line_or_set_bus <= 0) + & (res.topo_vect[cls.line_or_pos_topo_vect] == -1) + ) + if tmp.any(): + id_issue_or = tmp.nonzero()[0] + if issue_warn: + warnings.warn(error_no_bus_set.format(id_issue_or)) + if "set_bus" in cls_act.authorized_keys: + # assign 1 in the bus in this case + act.line_or_set_bus = [(el, 1) for el in id_issue_or] + + def _aux_add_act_set_line_status2(self, cls, cls_act, act, res, issue_warn): + disco_line = (act.line_set_status == -1) & res.line_status + res.topo_vect[cls.line_or_pos_topo_vect[disco_line]] = -1 + res.topo_vect[cls.line_ex_pos_topo_vect[disco_line]] = -1 + res.line_status[disco_line] = False + + reco_line = (act.line_set_status >= 1) & (~res.line_status) + # i can do that because i already "fixed" the action to have it put 1 in case it + # bus were not provided + if "set_bus" in cls_act.authorized_keys: + # I assign previous bus (because it could have been modified) + res.topo_vect[ + cls.line_or_pos_topo_vect[reco_line] + ] = act.line_or_set_bus[reco_line] + res.topo_vect[ + cls.line_ex_pos_topo_vect[reco_line] + ] = act.line_ex_set_bus[reco_line] + else: + # I assign one (action do not allow me to modify the bus) + res.topo_vect[cls.line_or_pos_topo_vect[reco_line]] = 1 + res.topo_vect[cls.line_ex_pos_topo_vect[reco_line]] = 1 + + res.line_status[reco_line] = True + + def _aux_add_act_change_line_status2(self, cls, cls_act, act, res, issue_warn): + disco_line = act.line_change_status & res.line_status + reco_line = act.line_change_status & (~res.line_status) + + # handle disconnected powerlines + res.topo_vect[cls.line_or_pos_topo_vect[disco_line]] = -1 + res.topo_vect[cls.line_ex_pos_topo_vect[disco_line]] = -1 + res.line_status[disco_line] = False + + # handle reconnected powerlines + if reco_line.any(): + if "set_bus" in cls_act.authorized_keys: + line_ex_set_bus = 1 * act.line_ex_set_bus + line_or_set_bus = 1 * act.line_or_set_bus + else: + line_ex_set_bus = np.zeros(cls.n_line, dtype=dt_int) + line_or_set_bus = np.zeros(cls.n_line, dtype=dt_int) + + if issue_warn and ( + (line_or_set_bus[reco_line] == 0).any() + or (line_ex_set_bus[reco_line] == 0).any() + ): + warnings.warn( + 'A powerline has been reconnected with a "change_status" action without ' + "specifying on which bus it was supposed to be reconnected. This is " + "perfectly fine in regular grid2op environment, but this behaviour " + "cannot be properly implemented with the only information in the " + "observation. Please see the documentation for more information." + ) + line_or_set_bus[reco_line & (line_or_set_bus == 0)] = 1 + line_ex_set_bus[reco_line & (line_ex_set_bus == 0)] = 1 + + res.topo_vect[cls.line_or_pos_topo_vect[reco_line]] = line_or_set_bus[ + reco_line + ] + res.topo_vect[cls.line_ex_pos_topo_vect[reco_line]] = line_ex_set_bus[ + reco_line + ] + res.line_status[reco_line] = True + + def add_act(self, act : "grid2op.Action.BaseAction", issue_warn=True) -> Self: """ Easier access to the impact on the observation if an action were applied. @@ -3615,8 +3955,11 @@ def add_act(self, act, issue_warn=True): if not isinstance(act, BaseAction): raise RuntimeError("You can only add actions to observation at the moment") + cls = type(self) + cls_act = type(act) + act = copy.deepcopy(act) - res = type(self)() + res = cls() res.set_game_over(env=None) res.topo_vect[:] = self.topo_vect @@ -3630,138 +3973,52 @@ def add_act(self, act, issue_warn=True): ) # if a powerline has been reconnected without specific bus, i issue a warning - if "set_line_status" in act.authorized_keys: - reco_powerline = act.line_set_status - if "set_bus" in act.authorized_keys: - line_ex_set_bus = act.line_ex_set_bus - line_or_set_bus = act.line_or_set_bus - else: - line_ex_set_bus = np.zeros(res.n_line, dtype=dt_int) - line_or_set_bus = np.zeros(res.n_line, dtype=dt_int) - error_no_bus_set = ( - "You reconnected a powerline with your action but did not specify on which bus " - "to reconnect both its end. This behaviour, also perfectly fine for an environment " - "will not be accurate in the method obs + act. Consult the documentation for more " - "information. Problem arose for powerlines with id {}" - ) - - tmp = ( - (reco_powerline == 1) - & (line_ex_set_bus <= 0) - & (res.topo_vect[self.line_ex_pos_topo_vect] == -1) - ) - if tmp.any(): - id_issue_ex = np.where(tmp)[0] - if issue_warn: - warnings.warn(error_no_bus_set.format(id_issue_ex)) - if "set_bus" in act.authorized_keys: - # assign 1 in the bus in this case - act.line_ex_set_bus = [(el, 1) for el in id_issue_ex] - tmp = ( - (reco_powerline == 1) - & (line_or_set_bus <= 0) - & (res.topo_vect[self.line_or_pos_topo_vect] == -1) - ) - if tmp.any(): - id_issue_or = np.where(tmp)[0] - if issue_warn: - warnings.warn(error_no_bus_set.format(id_issue_or)) - if "set_bus" in act.authorized_keys: - # assign 1 in the bus in this case - act.line_or_set_bus = [(el, 1) for el in id_issue_or] - + if "set_line_status" in cls_act.authorized_keys: + self._aux_add_act_set_line_status(cls, cls_act, act, res, issue_warn) + # topo vect - if "set_bus" in act.authorized_keys: + if "set_bus" in cls_act.authorized_keys: res.topo_vect[act.set_bus != 0] = act.set_bus[act.set_bus != 0] - if "change_bus" in act.authorized_keys: + if "change_bus" in cls_act.authorized_keys: do_change_bus_on = act.change_bus & ( res.topo_vect > 0 ) # change bus of elements that were on res.topo_vect[do_change_bus_on] = 3 - res.topo_vect[do_change_bus_on] # topo vect: reco of powerline that should be - res.line_status = (res.topo_vect[self.line_or_pos_topo_vect] >= 1) & ( - res.topo_vect[self.line_ex_pos_topo_vect] >= 1 + res.line_status = (res.topo_vect[cls.line_or_pos_topo_vect] >= 1) & ( + res.topo_vect[cls.line_ex_pos_topo_vect] >= 1 ) # powerline status - if "set_line_status" in act.authorized_keys: - disco_line = (act.line_set_status == -1) & res.line_status - res.topo_vect[res.line_or_pos_topo_vect[disco_line]] = -1 - res.topo_vect[res.line_ex_pos_topo_vect[disco_line]] = -1 - res.line_status[disco_line] = False - - reco_line = (act.line_set_status >= 1) & (~res.line_status) - # i can do that because i already "fixed" the action to have it put 1 in case it - # bus were not provided - if "set_bus" in act.authorized_keys: - # I assign previous bus (because it could have been modified) - res.topo_vect[ - res.line_or_pos_topo_vect[reco_line] - ] = act.line_or_set_bus[reco_line] - res.topo_vect[ - res.line_ex_pos_topo_vect[reco_line] - ] = act.line_ex_set_bus[reco_line] - else: - # I assign one (action do not allow me to modify the bus) - res.topo_vect[res.line_or_pos_topo_vect[reco_line]] = 1 - res.topo_vect[res.line_ex_pos_topo_vect[reco_line]] = 1 - - res.line_status[reco_line] = True - - if "change_line_status" in act.authorized_keys: - disco_line = act.line_change_status & res.line_status - reco_line = act.line_change_status & (~res.line_status) - - # handle disconnected powerlines - res.topo_vect[res.line_or_pos_topo_vect[disco_line]] = -1 - res.topo_vect[res.line_ex_pos_topo_vect[disco_line]] = -1 - res.line_status[disco_line] = False - - # handle reconnected powerlines - if reco_line.any(): - if "set_bus" in act.authorized_keys: - line_ex_set_bus = 1 * act.line_ex_set_bus - line_or_set_bus = 1 * act.line_or_set_bus - else: - line_ex_set_bus = np.zeros(res.n_line, dtype=dt_int) - line_or_set_bus = np.zeros(res.n_line, dtype=dt_int) + if "set_line_status" in cls_act.authorized_keys: + self._aux_add_act_set_line_status2(cls, cls_act, act, res, issue_warn) + + if "change_line_status" in cls_act.authorized_keys: + self._aux_add_act_change_line_status2(cls, cls_act, act, res, issue_warn) - if issue_warn and ( - (line_or_set_bus[reco_line] == 0).any() - or (line_ex_set_bus[reco_line] == 0).any() - ): - warnings.warn( - 'A powerline has been reconnected with a "change_status" action without ' - "specifying on which bus it was supposed to be reconnected. This is " - "perfectly fine in regular grid2op environment, but this behaviour " - "cannot be properly implemented with the only information in the " - "observation. Please see the documentation for more information." - ) - line_or_set_bus[reco_line & (line_or_set_bus == 0)] = 1 - line_ex_set_bus[reco_line & (line_ex_set_bus == 0)] = 1 - - res.topo_vect[res.line_or_pos_topo_vect[reco_line]] = line_or_set_bus[ - reco_line - ] - res.topo_vect[res.line_ex_pos_topo_vect[reco_line]] = line_ex_set_bus[ - reco_line - ] - res.line_status[reco_line] = True - - if "redispatch" in act.authorized_keys: + if "redispatch" in cls_act.authorized_keys: redisp = act.redispatch - if (redisp != 0).any() and issue_warn: + if (np.abs(redisp) >= 1e-7).any() and issue_warn: warnings.warn( "You did redispatching on this action. Redispatching is heavily transformed " "by the environment (consult the documentation about the modeling of the " "generators for example) so we will not even try to mimic this here." ) - if "set_storage" in act.authorized_keys: + if "set_storage" in cls_act.authorized_keys: storage_p = act.storage_p - if (storage_p != 0).any() and issue_warn: + if (np.abs(storage_p) >= 1e-7).any() and issue_warn: + warnings.warn( + "You did action on storage units in this action. This implies performing some " + "redispatching which is heavily transformed " + "by the environment (consult the documentation about the modeling of the " + "generators for example) so we will not even try to mimic this here." + ) + if "curtail" in cls_act.authorized_keys: + curt = act.curtail + if (np.abs(curt + 1) >= 1e-7).any() and issue_warn: # curtail == -1. warnings.warn( "You did action on storage units in this action. This implies performing some " "redispatching which is heavily transformed " @@ -3770,7 +4027,7 @@ def add_act(self, act, issue_warn=True): ) return res - def __add__(self, act): + def __add__(self, act: "grid2op.Action.BaseAction") -> Self: from grid2op.Action import BaseAction if isinstance(act, BaseAction): @@ -3780,7 +4037,7 @@ def __add__(self, act): ) @property - def thermal_limit(self): + def thermal_limit(self) -> np.ndarray: """ Return the thermal limit of the powergrid, given in Amps (A) @@ -3801,7 +4058,7 @@ def thermal_limit(self): return res @property - def curtailment_mw(self): + def curtailment_mw(self) -> np.ndarray: """ return the curtailment, expressed in MW rather than in ratio of pmax. @@ -3820,7 +4077,7 @@ def curtailment_mw(self): return self.curtailment * self.gen_pmax @property - def curtailment_limit_mw(self): + def curtailment_limit_mw(self) -> np.ndarray: """ return the limit of production of a generator in MW rather in ratio @@ -3838,7 +4095,7 @@ def curtailment_limit_mw(self): """ return self.curtailment_limit * self.gen_pmax - def _update_attr_backend(self, backend): + def _update_attr_backend(self, backend: "grid2op.Backend.Backend") -> None: """This function updates the attribute of the observation that depends only on the backend. @@ -3846,8 +4103,10 @@ def _update_attr_backend(self, backend): ---------- backend : The backend from which to update the observation + """ - + cls = type(self) + self.line_status[:] = backend.get_line_status() self.topo_vect[:] = backend.get_topo_vect() @@ -3860,15 +4119,15 @@ def _update_attr_backend(self, backend): self.rho[:] = backend.get_relative_flow().astype(dt_float) # margin up and down - if type(self).redispatching_unit_commitment_availble: + if cls.redispatching_unit_commitment_availble: self.gen_margin_up[:] = np.minimum( - type(self).gen_pmax - self.gen_p, self.gen_max_ramp_up + cls.gen_pmax - self.gen_p, self.gen_max_ramp_up ) - self.gen_margin_up[type(self).gen_renewable] = 0.0 + self.gen_margin_up[cls.gen_renewable] = 0.0 self.gen_margin_down[:] = np.minimum( - self.gen_p - type(self).gen_pmin, self.gen_max_ramp_down + self.gen_p - cls.gen_pmin, self.gen_max_ramp_down ) - self.gen_margin_down[type(self).gen_renewable] = 0.0 + self.gen_margin_down[cls.gen_renewable] = 0.0 # because of the slack, sometimes it's negative... # see https://github.com/rte-france/Grid2Op/issues/313 @@ -3879,7 +4138,7 @@ def _update_attr_backend(self, backend): self.gen_margin_down[:] = 0.0 # handle shunts (if avaialble) - if type(self).shunts_data_available: + if cls.shunts_data_available: sh_p, sh_q, sh_v, sh_bus = backend.shunt_info() self._shunt_p[:] = sh_p self._shunt_q[:] = sh_q @@ -3903,7 +4162,7 @@ def _update_attr_backend(self, backend): self.gen_theta[:] = 0. self.storage_theta[:] = 0. - def _update_internal_env_params(self, env): + def _update_internal_env_params(self, env: "grid2op.Environment.BaseEnv"): # this is only done if the env supports forecast # some parameters used for the "forecast env" # but not directly accessible in the observation @@ -3928,7 +4187,7 @@ def _update_internal_env_params(self, env): # (self._env_internal_params["opp_space_state"], # self._env_internal_params["opp_state"]) = env._oppSpace._get_state() - def _update_obs_complete(self, env, with_forecast=True): + def _update_obs_complete(self, env: "grid2op.Environment.BaseEnv", with_forecast:bool=True): """ update all the observation attributes as if it was a complete, fully observable and without noise observation @@ -3957,9 +4216,6 @@ def _update_obs_complete(self, env, with_forecast=True): self.storage_charge[:] = env._storage_current_charge self.storage_power_target[:] = env._action_storage self.storage_power[:] = env._storage_power - - # handles forecasts here - self._update_forecast(env, with_forecast) # cool down and reconnection time after hard overflow, soft overflow or cascading failure self.time_before_cooldown_line[:] = env._times_before_line_status_actionable @@ -3998,13 +4254,18 @@ def _update_obs_complete(self, env, with_forecast=True): self.curtailment_limit[:] = 1.0 self.curtailment_limit_effective[:] = 1.0 - self._update_alarm(env) - self.delta_time = dt_float(1.0 * env.delta_time_seconds / 60.0) + # handles forecasts here + self._update_forecast(env, with_forecast) + + # handle alarms + self._update_alarm(env) + + # handle alerts self._update_alert(env) - def _update_forecast(self, env, with_forecast): + def _update_forecast(self, env: "grid2op.Environment.BaseEnv", with_forecast: bool) -> None: if not with_forecast: return @@ -4023,7 +4284,7 @@ def _update_forecast(self, env, with_forecast): self._env_internal_params = {} self._update_internal_env_params(env) - def _update_alarm(self, env): + def _update_alarm(self, env: "grid2op.Environment.BaseEnv"): if not (self.dim_alarms and env._has_attention_budget): return @@ -4038,7 +4299,7 @@ def _update_alarm(self, env): self.last_alarm[:] = env._attention_budget.last_successful_alarm_raised self.attention_budget[:] = env._attention_budget.current_budget - def _update_alert(self, env): + def _update_alert(self, env: "grid2op.Environment.BaseEnv"): self.active_alert[:] = env._last_alert self.time_since_last_alert[:] = env._time_since_last_alert self.alert_duration[:] = env._alert_duration @@ -4105,7 +4366,7 @@ def get_simulator(self) -> "grid2op.simulator.Simulator": self._obs_env.highres_sim_counter._HighResSimCounter__nb_highres_called = nb_highres_called return res - def _get_array_from_forecast(self, name): + def _get_array_from_forecast(self, name: str) -> np.ndarray: if len(self._forecasted_inj) <= 1: # self._forecasted_inj already embed the current step raise NoForecastAvailable("It appears this environment does not support any forecast at all.") @@ -4123,7 +4384,7 @@ def _get_array_from_forecast(self, name): res[h,:] = this_row return res - def _generate_forecasted_maintenance_for_simenv(self, nb_h: int): + def _generate_forecasted_maintenance_for_simenv(self, nb_h: int) -> np.ndarray: n_line = type(self).n_line res = np.full((nb_h, n_line), fill_value=False, dtype=dt_bool) for l_id in range(n_line): @@ -4207,7 +4468,18 @@ def get_forecast_env(self) -> "grid2op.Environment.Environment": f_obs_3, *_ = forecast_env.step(act_3) sim_obs_3, *_ = sim_obs_2.simulate(act_3) # f_obs_3 should be sim_obs_3 - + + .. danger:: + + Long story short, once a environment (and a forecast_env is one) + is deleted, you cannot use anything it "holds" including, + but not limited to the capacity to perform `obs.simulate(...)` even if the `obs` is still + referenced. + + See :ref:`danger-env-ownership` (first danger block). + + This caused issue https://github.com/rte-france/Grid2Op/issues/568 for example. + Returns ------- grid2op.Environment.Environment @@ -4232,7 +4504,7 @@ def get_forecast_env(self) -> "grid2op.Environment.Environment": maintenance = self._generate_forecasted_maintenance_for_simenv(prod_v.shape[0]) return self._make_env_from_arays(load_p, load_q, prod_p, prod_v, maintenance) - def get_forecast_arrays(self): + def get_forecast_arrays(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """ This functions allows to retrieve (as numpy arrays) the values for all the loads / generators / maintenance for the forseable future (they are the forecast availble in :func:`BaseObservation.simulate` and @@ -4339,8 +4611,26 @@ def get_env_from_external_forecasts(self, you have 100 rows then you have 100 steps. .. warning:: - We remind that, if you provide some forecasts, it is expected that + We remind that, if you provide some forecasts, it is expected that they allow some powerflow to converge. + The balance between total generation on one side and total demand and losses on the other should also + make "as close as possible" to reduce some modeling artifact (by the backend, grid2op does not check + anything here). + + Finally, make sure that your input data meet the constraints on the generators (pmin, pmax and ramps) + otherwise you might end up with incorrect behaviour. Grid2op supposes that data fed to it + is consistent with its model. If not it's "undefined behaviour". + + .. danger:: + + Long story short, once a environment (and a forecast_env is one) + is deleted, you cannot use anything it "holds" including, + but not limited to the capacity to perform `obs.simulate(...)` even if the `obs` is still + referenced. + + See :ref:`danger-env-ownership` (first danger block). + This caused issue https://github.com/rte-france/Grid2Op/issues/568 for example. + Examples -------- A typical use might look like @@ -4419,6 +4709,7 @@ def _make_env_from_arays(self, prod_p=prod_p, prod_v=prod_v, maintenance=maintenance) + ch.max_iter = ch.real_data.max_iter backend = self._obs_env.backend.copy() backend._is_loaded = True @@ -4434,7 +4725,7 @@ def _make_env_from_arays(self, res.highres_sim_counter._HighResSimCounter__nb_highres_called = nb_highres_called return res - def change_forecast_parameters(self, params): + def change_forecast_parameters(self, params: "grid2op.Parameters.Parameters") -> None: """This function allows to change the parameters (see :class:`grid2op.Parameters.Parameters` for more information) that are used for the `obs.simulate()` and `obs.get_forecast_env()` method. @@ -4474,7 +4765,7 @@ def change_forecast_parameters(self, params): self._obs_env.change_parameters(params) self._obs_env._parameters = params - def update_after_reward(self, env): + def update_after_reward(self, env: "grid2op.Environment.BaseEnv") -> None: """Only called for the regular environment (so not available for :func:`BaseObservation.get_forecast_env` or :func:`BaseObservation.simulate`) @@ -4502,4 +4793,55 @@ def update_after_reward(self, env): return # update the was_alert_used_after_attack ! - self.was_alert_used_after_attack[:] = env._was_alert_used_after_attack \ No newline at end of file + self.was_alert_used_after_attack[:] = env._was_alert_used_after_attack + + def get_back_to_ref_state( + self, + storage_setpoint: float=0.5, + precision: int=5, + ) -> Dict[Literal["powerline", + "substation", + "redispatching", + "storage", + "curtailment"], + List["grid2op.Action.BaseAction"]]: + """ + Allows to retrieve the list of actions that needs to be performed + to get back the grid in the "reference" state (all elements connected + to busbar 1, no redispatching, no curtailment) + + + .. versionadded:: 1.10.0 + + This function uses the method of the underlying action_space used + for the forecasts. + + See :func:`grid2op.Action.SerializableActionSpace.get_back_to_ref_state` + for more information. + + Examples + -------- + + You can use it like this: + + .. code-block:: python + + import grid2op + + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + obs = env.reset(seed=1) + + # perform a random action + obs, reward, done, info = env.step(env.action_space.sample()) + assert not done # you might end up in a "done" state depending on the random action + + acts = obs.get_back_to_ref_state() + print(acts) + """ + if self.action_helper is None: + raise Grid2OpException("Trying to use this function when no action space is " + "is available.") + if self._is_done: + raise Grid2OpException("Cannot use this function in a 'done' state.") + return self.action_helper.get_back_to_ref_state(self, storage_setpoint, precision) diff --git a/grid2op/Observation/observationSpace.py b/grid2op/Observation/observationSpace.py index add75c631..5b4a00d95 100644 --- a/grid2op/Observation/observationSpace.py +++ b/grid2op/Observation/observationSpace.py @@ -72,6 +72,7 @@ def __init__( observation_bk_kwargs=None, logger=None, _with_obs_env=True, # pass + _local_dir_cls=None, ): """ INTERNAL @@ -80,22 +81,14 @@ def __init__( Env: requires :attr:`grid2op.Environment.BaseEnv.parameters` and :attr:`grid2op.Environment.BaseEnv.backend` to be valid """ - - # lazy import to prevent circular references (Env -> Observation -> Obs Space -> _ObsEnv -> Env) - from grid2op.Environment._obsEnv import _ObsEnv - if actionClass is None: from grid2op.Action import CompleteAction actionClass = CompleteAction - if logger is None: - self.logger = logging.getLogger(__name__) - self.logger.disabled = True - else: - self.logger: logging.Logger = logger.getChild("grid2op_ObsSpace") - + self._init_observationClass = observationClass SerializableObservationSpace.__init__( - self, gridobj, observationClass=observationClass + self, gridobj, observationClass=observationClass, _local_dir_cls=_local_dir_cls, + logger=logger, ) self.with_forecast = with_forecast self._simulate_parameters = copy.deepcopy(env.parameters) @@ -112,14 +105,9 @@ def __init__( self.reward_helper = RewardHelper(reward_func=self._reward_func, logger=self.logger) self.__can_never_use_simulate = False - # TODO here: have another backend class maybe - _with_obs_env = _with_obs_env and self._create_backend_obs(env, observation_bk_class, observation_bk_kwargs) - - self._ObsEnv_class = _ObsEnv.init_grid( - type(env.backend), force_module=_ObsEnv.__module__ - ) - self._ObsEnv_class._INIT_GRID_CLS = _ObsEnv # otherwise it's lost - setattr(sys.modules[_ObsEnv.__module__], self._ObsEnv_class.__name__, self._ObsEnv_class) + _with_obs_env = _with_obs_env and self._create_backend_obs(env, observation_bk_class, observation_bk_kwargs, _local_dir_cls) + + self._ObsEnv_class = None if _with_obs_env: self._create_obs_env(env, observationClass) self.reward_helper.initialize(self.obs_env) @@ -175,6 +163,18 @@ def set_real_env_kwargs(self, env): del self._real_env_kwargs["observation_bk_kwargs"] def _create_obs_env(self, env, observationClass): + if self._ObsEnv_class is None: + # lazy import to prevent circular references (Env -> Observation -> Obs Space -> _ObsEnv -> Env) + from grid2op.Environment._obsEnv import _ObsEnv + + # self._ObsEnv_class = _ObsEnv.init_grid( + # type(env.backend), force_module=_ObsEnv.__module__, force=_local_dir_cls is not None + # ) + # self._ObsEnv_class._INIT_GRID_CLS = _ObsEnv # otherwise it's lost + self._ObsEnv_class = _ObsEnv.init_grid( + type(env.backend), _local_dir_cls=env._local_dir_cls + ) + self._ObsEnv_class._INIT_GRID_CLS = _ObsEnv # otherwise it's lost other_rewards = {k: v.rewardClass for k, v in env.other_rewards.items()} self.obs_env = self._ObsEnv_class( init_env_path=None, # don't leak the path of the real grid to the observation space @@ -200,14 +200,16 @@ def _create_obs_env(self, env, observationClass): highres_sim_counter=env.highres_sim_counter, _complete_action_cls=env._complete_action_cls, _ptr_orig_obs_space=self, + _local_dir_cls=env._local_dir_cls, + _read_from_local_dir=env._read_from_local_dir, ) for k, v in self.obs_env.other_rewards.items(): v.initialize(self.obs_env) - def _aux_create_backend(self, env, observation_bk_class, observation_bk_kwargs, path_grid_for): + def _aux_create_backend(self, env, observation_bk_class, observation_bk_kwargs, path_grid_for, _local_dir_cls): if observation_bk_kwargs is None: observation_bk_kwargs = env.backend._my_kwargs - observation_bk_class_used = observation_bk_class.init_grid(type(env.backend)) + observation_bk_class_used = observation_bk_class.init_grid(type(env.backend), _local_dir_cls=_local_dir_cls) self._backend_obs = observation_bk_class_used(**observation_bk_kwargs) self._backend_obs.set_env_name(env.name) self._backend_obs.load_grid(path_grid_for) @@ -216,7 +218,7 @@ def _aux_create_backend(self, env, observation_bk_class, observation_bk_kwargs, self._backend_obs.assert_grid_correct_after_powerflow() self._backend_obs.set_thermal_limit(env.get_thermal_limit()) - def _create_backend_obs(self, env, observation_bk_class, observation_bk_kwargs): + def _create_backend_obs(self, env, observation_bk_class, observation_bk_kwargs, _local_dir_cls): _with_obs_env = True path_sim_bk = os.path.join(env.get_path_env(), "grid_forecast.json") if observation_bk_class is not None or observation_bk_kwargs is not None: @@ -232,12 +234,12 @@ def _create_backend_obs(self, env, observation_bk_class, observation_bk_kwargs): path_grid_for = path_sim_bk else: path_grid_for = os.path.join(env.get_path_env(), "grid.json") - self._aux_create_backend(env, observation_bk_class, observation_bk_kwargs, path_grid_for) + self._aux_create_backend(env, observation_bk_class, observation_bk_kwargs, path_grid_for, _local_dir_cls) elif os.path.exists(path_sim_bk) and os.path.isfile(path_sim_bk): # backend used for simulate will use the same class with same args as the env # backend, but with a different grid observation_bk_class = env._raw_backend_class - self._aux_create_backend(env, observation_bk_class, observation_bk_kwargs, path_sim_bk) + self._aux_create_backend(env, observation_bk_class, observation_bk_kwargs, path_sim_bk, _local_dir_cls) elif env.backend._can_be_copied: # case where I can copy the backend for the 'simulate' and I don't need to build # it (uses same class and same grid) @@ -263,10 +265,11 @@ def _deactivate_simulate(self, env): self._backend_obs.close() self._backend_obs = None self.with_forecast = False - env.deactivate_forecast() - env.backend._can_be_copied = False - self.logger.warn("Forecasts have been deactivated because " - "the backend cannot be copied.") + if env is not None: + env.deactivate_forecast() + env.backend._can_be_copied = False + self.logger.warning("Forecasts have been deactivated because " + "the backend cannot be copied.") def reactivate_forecast(self, env): if self.__can_never_use_simulate: @@ -279,11 +282,11 @@ def reactivate_forecast(self, env): if self._backend_obs is not None: self._backend_obs.close() self._backend_obs = None - self._create_backend_obs(env, self._observation_bk_class, self._observation_bk_kwargs) - if self.obs_env is not None : + self._create_backend_obs(env, self._observation_bk_class, self._observation_bk_kwargs, env._local_dir_cls) + if self.obs_env is not None: self.obs_env.close() self.obs_env = None - self._create_obs_env(env) + self._create_obs_env(env, self._init_observationClass) self.set_real_env_kwargs(env) self.with_forecast = True @@ -329,7 +332,8 @@ def _change_parameters(self, new_param): change the parameter of the "simulate" environment """ - self.obs_env.change_parameters(new_param) + if self.obs_env is not None: + self.obs_env.change_parameters(new_param) self._simulate_parameters = new_param def change_other_rewards(self, dict_reward): @@ -386,8 +390,8 @@ def change_reward(self, reward_func): self.obs_env._reward_helper.change_reward(reward_func) else: raise EnvError("Impossible to change the reward of the simulate " - "function when you cannot simulate (because the " - "backend could not be copied)") + "function when you cannot simulate (because the " + "backend could not be copied)") def set_thermal_limit(self, thermal_limit_a): if self.obs_env is not None: @@ -453,7 +457,7 @@ def reset(self, real_env): self.obs_env.reset() self._env_param = copy.deepcopy(real_env.parameters) - def _custom_deepcopy_for_copy(self, new_obj): + def _custom_deepcopy_for_copy(self, new_obj, env=None): """implements a faster "res = copy.deepcopy(self)" to use in "self.copy" Do not use it anywhere else... @@ -463,6 +467,7 @@ def _custom_deepcopy_for_copy(self, new_obj): super()._custom_deepcopy_for_copy(new_obj) # now fill my class + new_obj._init_observationClass = self._init_observationClass new_obj.with_forecast = self.with_forecast new_obj._simulate_parameters = copy.deepcopy(self._simulate_parameters) new_obj._reward_func = copy.deepcopy(self._reward_func) @@ -488,13 +493,17 @@ def _custom_deepcopy_for_copy(self, new_obj): new_obj._ptr_kwargs_observation = self._ptr_kwargs_observation # real env kwargs, these is a "pointer" anyway - new_obj._real_env_kwargs = self._real_env_kwargs + if env is not None: + from grid2op.Environment import Environment + new_obj._real_env_kwargs = Environment.get_kwargs(env, False, False) + else: + new_obj._real_env_kwargs = self._real_env_kwargs new_obj._observation_bk_class = self._observation_bk_class new_obj._observation_bk_kwargs = self._observation_bk_kwargs new_obj._ObsEnv_class = self._ObsEnv_class - def copy(self, copy_backend=False): + def copy(self, copy_backend=False, env=None): """ INTERNAL @@ -515,18 +524,23 @@ def copy(self, copy_backend=False): # create an empty "me" my_cls = type(self) res = my_cls.__new__(my_cls) - self._custom_deepcopy_for_copy(res) + self._custom_deepcopy_for_copy(res, env) if not copy_backend: res._backend_obs = backend res._empty_obs = obs_.copy() res.obs_env = obs_env else: - res.obs_env = obs_env.copy() - res.obs_env._ptr_orig_obs_space = res - res._backend_obs = res.obs_env.backend - res._empty_obs = obs_.copy() - res._empty_obs._obs_env = res.obs_env + # backend needs to be copied + if obs_env is not None: + # I also need to copy the obs env + res.obs_env = obs_env.copy(env=env, new_obs_space=res) + res._backend_obs = res.obs_env.backend + res._empty_obs = obs_.copy() + res._empty_obs._obs_env = res.obs_env + else: + # no obs env: I do nothing + res.obs_env = None # assign back the results self._backend_obs = backend diff --git a/grid2op/Observation/serializableObservationSpace.py b/grid2op/Observation/serializableObservationSpace.py index 1471a51ef..7796eb74c 100644 --- a/grid2op/Observation/serializableObservationSpace.py +++ b/grid2op/Observation/serializableObservationSpace.py @@ -6,6 +6,9 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +import logging +import copy + from grid2op.Space import SerializableSpace from grid2op.Observation.completeObservation import CompleteObservation @@ -27,7 +30,7 @@ class SerializableObservationSpace(SerializableSpace): """ - def __init__(self, gridobj, observationClass=CompleteObservation, _init_grid=True): + def __init__(self, gridobj, observationClass=CompleteObservation, logger=None, _init_grid=True, _local_dir_cls=None): """ Parameters @@ -40,16 +43,26 @@ def __init__(self, gridobj, observationClass=CompleteObservation, _init_grid=Tru """ SerializableSpace.__init__( - self, gridobj=gridobj, subtype=observationClass, _init_grid=_init_grid + self, gridobj=gridobj, + subtype=observationClass, + _init_grid=_init_grid, + _local_dir_cls=_local_dir_cls ) self.observationClass = self.subtype self._empty_obs = self._template_obj + + if logger is None: + self.logger = logging.getLogger(__name__) + self.logger.disabled = True + else: + self.logger: logging.Logger = logger.getChild("grid2op_ObsSpace") def _custom_deepcopy_for_copy(self, new_obj): super()._custom_deepcopy_for_copy(new_obj) # SerializableObservationSpace new_obj.observationClass = self.observationClass # const new_obj._empty_obs = self._template_obj # const + new_obj.logger = copy.deepcopy(self.logger) @staticmethod def from_dict(dict_): diff --git a/grid2op/Opponent/geometricOpponent.py b/grid2op/Opponent/geometricOpponent.py index 71253d4a7..1c811aa54 100644 --- a/grid2op/Opponent/geometricOpponent.py +++ b/grid2op/Opponent/geometricOpponent.py @@ -109,7 +109,7 @@ def init( # Store attackable lines IDs self._lines_ids = [] for l_name in lines_attacked: - l_id = np.where(self.action_space.name_line == l_name) + l_id = (self.action_space.name_line == l_name).nonzero() if len(l_id) and len(l_id[0]): self._lines_ids.append(l_id[0][0]) else: diff --git a/grid2op/Opponent/opponentSpace.py b/grid2op/Opponent/opponentSpace.py index 60d3a9927..bca588d46 100644 --- a/grid2op/Opponent/opponentSpace.py +++ b/grid2op/Opponent/opponentSpace.py @@ -49,6 +49,7 @@ def __init__( attack_cooldown, # minimum duration between two consecutive attack budget_per_timestep=0.0, action_space=None, + _local_dir_cls=None ): if action_space is not None: diff --git a/grid2op/Opponent/randomLineOpponent.py b/grid2op/Opponent/randomLineOpponent.py index f1c5ed256..c59cdc4f2 100644 --- a/grid2op/Opponent/randomLineOpponent.py +++ b/grid2op/Opponent/randomLineOpponent.py @@ -57,7 +57,7 @@ def init(self, partial_env, lines_attacked=[], **kwargs): # Store attackable lines IDs self._lines_ids = [] for l_name in lines_attacked: - l_id = np.where(self.action_space.name_line == l_name) + l_id = (self.action_space.name_line == l_name).nonzero() if len(l_id) and len(l_id[0]): self._lines_ids.append(l_id[0][0]) else: diff --git a/grid2op/Opponent/weightedRandomOpponent.py b/grid2op/Opponent/weightedRandomOpponent.py index 35ad5f2be..4771a57c1 100644 --- a/grid2op/Opponent/weightedRandomOpponent.py +++ b/grid2op/Opponent/weightedRandomOpponent.py @@ -73,7 +73,7 @@ def init( # Store attackable lines IDs self._lines_ids = [] for l_name in lines_attacked: - l_id = np.where(self.action_space.name_line == l_name) + l_id = (self.action_space.name_line == l_name).nonzero() if len(l_id) and len(l_id[0]): self._lines_ids.append(l_id[0][0]) else: diff --git a/grid2op/Parameters.py b/grid2op/Parameters.py index 56e523b10..c5ec67b23 100644 --- a/grid2op/Parameters.py +++ b/grid2op/Parameters.py @@ -22,9 +22,9 @@ class Parameters: Attributes ---------- NO_OVERFLOW_DISCONNECTION: ``bool`` - If set to ``True`` then the :class:`grid2op.Environment.Environment` will not disconnect powerline above their - thermal - limit. Default is ``False`` + If set to ``True`` then the :class:`grid2op.Environment.Environment` will **NOT** disconnect powerline above their + thermal limit. Default is ``False``, meaning that grid2op will disconnect powerlines above their limits + for too long or for "too much". NB_TIMESTEP_OVERFLOW_ALLOWED: ``int`` Number of timesteps for which a soft overflow is allowed, default 2. This means that a powerline will be @@ -148,6 +148,19 @@ class Parameters: MAX_SIMULATE_PER_EPISODE: ``int`` Maximum number of calls to `obs.simuate(...)` allowed per episode (reset each "env.simulate(...)"). Defaults to -1 meaning "as much as you want". + IGNORE_INITIAL_STATE_TIME_SERIE: ``bool`` + If set to True (which is NOT the default), then the initial state of the grid + will always be "everything connected" and "everything connected to busbar 1" + regardless of the information present in the time series (see + :func:`grid2op.Chronics.GridValue.get_init_action`) + + .. versionadded:: 1.10.2 + + .. note:: + This flag has no impact if an initial state is set through a call to + `env.reset(options={"init state": ...})` (see doc of :func:`grid2op.Environment.Environment.reset` + for more information) + """ def __init__(self, parameters_path=None): @@ -227,6 +240,8 @@ def __init__(self, parameters_path=None): else: warn_msg = "Parameters: the file {} is not found. Continuing with default parameters." warnings.warn(warn_msg.format(parameters_path)) + + self.IGNORE_INITIAL_STATE_TIME_SERIE = False @staticmethod def _isok_txt(arg): @@ -368,6 +383,11 @@ def init_from_dict(self, dict_): if "MAX_SIMULATE_PER_EPISODE" in dict_: self.MAX_SIMULATE_PER_EPISODE = dt_int(dict_["MAX_SIMULATE_PER_EPISODE"]) + if "IGNORE_INITIAL_STATE_TIME_SERIE" in dict_: + self.IGNORE_INITIAL_STATE_TIME_SERIE = Parameters._isok_txt( + dict_["IGNORE_INITIAL_STATE_TIME_SERIE"] + ) + authorized_keys = set(self.__dict__.keys()) authorized_keys = authorized_keys | { "NB_TIMESTEP_POWERFLOW_ALLOWED", @@ -416,6 +436,7 @@ def to_dict(self): res["ALERT_TIME_WINDOW"] = int(self.ALERT_TIME_WINDOW) res["MAX_SIMULATE_PER_STEP"] = int(self.MAX_SIMULATE_PER_STEP) res["MAX_SIMULATE_PER_EPISODE"] = int(self.MAX_SIMULATE_PER_EPISODE) + res["IGNORE_INITIAL_STATE_TIME_SERIE"] = int(self.IGNORE_INITIAL_STATE_TIME_SERIE) return res def init_from_json(self, json_path): @@ -470,8 +491,10 @@ def check_valid(self): Raises ------- - An exception if the parameter is not valid + An exception (`RuntimeError`) if the parameter is not valid + """ + try: if not isinstance(self.NO_OVERFLOW_DISCONNECTION, (bool, dt_bool)): raise RuntimeError("NO_OVERFLOW_DISCONNECTION should be a boolean") @@ -479,7 +502,7 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert NO_OVERFLOW_DISCONNECTION to bool with error \n:"{exc_}"' - ) + ) from exc_ try: self.NB_TIMESTEP_OVERFLOW_ALLOWED = int( @@ -491,7 +514,7 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert NB_TIMESTEP_OVERFLOW_ALLOWED to int with error \n:"{exc_}"' - ) + ) from exc_ if self.NB_TIMESTEP_OVERFLOW_ALLOWED < 0: raise RuntimeError( @@ -505,7 +528,7 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert NB_TIMESTEP_RECONNECTION to int with error \n:"{exc_}"' - ) + ) from exc_ if self.NB_TIMESTEP_RECONNECTION < 0: raise RuntimeError("NB_TIMESTEP_RECONNECTION < 0., this should be >= 0.") try: @@ -514,7 +537,7 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert NB_TIMESTEP_COOLDOWN_LINE to int with error \n:"{exc_}"' - ) + ) from exc_ if self.NB_TIMESTEP_COOLDOWN_LINE < 0: raise RuntimeError("NB_TIMESTEP_COOLDOWN_LINE < 0., this should be >= 0.") try: @@ -525,7 +548,7 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert NB_TIMESTEP_COOLDOWN_SUB to int with error \n:"{exc_}"' - ) + ) from exc_ if self.NB_TIMESTEP_COOLDOWN_SUB < 0: raise RuntimeError("NB_TIMESTEP_COOLDOWN_SUB < 0., this should be >= 0.") try: @@ -536,7 +559,7 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert HARD_OVERFLOW_THRESHOLD to float with error \n:"{exc_}"' - ) + ) from exc_ if self.HARD_OVERFLOW_THRESHOLD < 1.0: raise RuntimeError( "HARD_OVERFLOW_THRESHOLD < 1., this should be >= 1. (use env.set_thermal_limit " @@ -551,7 +574,7 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert SOFT_OVERFLOW_THRESHOLD to float with error \n:"{exc_}"' - ) + ) from exc_ if self.SOFT_OVERFLOW_THRESHOLD < 1.0: raise RuntimeError( "SOFT_OVERFLOW_THRESHOLD < 1., this should be >= 1. (use env.set_thermal_limit " @@ -570,14 +593,14 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert ENV_DC to bool with error \n:"{exc_}"' - ) + ) from exc_ try: self.MAX_SUB_CHANGED = int(self.MAX_SUB_CHANGED) # to raise if numpy array self.MAX_SUB_CHANGED = dt_int(self.MAX_SUB_CHANGED) except Exception as exc_: raise RuntimeError( f'Impossible to convert MAX_SUB_CHANGED to int with error \n:"{exc_}"' - ) + ) from exc_ if self.MAX_SUB_CHANGED < 0: raise RuntimeError( "MAX_SUB_CHANGED should be >=0 (or -1 if you want to be able to change every " @@ -591,7 +614,7 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert MAX_LINE_STATUS_CHANGED to int with error \n:"{exc_}"' - ) + ) from exc_ if self.MAX_LINE_STATUS_CHANGED < 0: raise RuntimeError( "MAX_LINE_STATUS_CHANGED should be >=0 " @@ -604,7 +627,7 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert IGNORE_MIN_UP_DOWN_TIME to bool with error \n:"{exc_}"' - ) + ) from exc_ try: if not isinstance(self.ALLOW_DISPATCH_GEN_SWITCH_OFF, (bool, dt_bool)): raise RuntimeError("ALLOW_DISPATCH_GEN_SWITCH_OFF should be a boolean") @@ -614,7 +637,7 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert ALLOW_DISPATCH_GEN_SWITCH_OFF to bool with error \n:"{exc_}"' - ) + ) from exc_ try: if not isinstance( self.LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION, (bool, dt_bool) @@ -628,7 +651,7 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION to bool with error \n:"{exc_}"' - ) + ) from exc_ try: self.INIT_STORAGE_CAPACITY = float( @@ -638,16 +661,16 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert INIT_STORAGE_CAPACITY to float with error \n:"{exc_}"' - ) + ) from exc_ if self.INIT_STORAGE_CAPACITY < 0.0: raise RuntimeError( "INIT_STORAGE_CAPACITY < 0., this should be within range [0., 1.]" - ) + ) from exc_ if self.INIT_STORAGE_CAPACITY > 1.0: raise RuntimeError( "INIT_STORAGE_CAPACITY > 1., this should be within range [0., 1.]" - ) + ) from exc_ try: if not isinstance(self.ACTIVATE_STORAGE_LOSS, (bool, dt_bool)): @@ -656,26 +679,26 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert ACTIVATE_STORAGE_LOSS to bool with error \n:"{exc_}"' - ) + ) from exc_ try: self.ALARM_WINDOW_SIZE = dt_int(self.ALARM_WINDOW_SIZE) except Exception as exc_: raise RuntimeError( f'Impossible to convert ALARM_WINDOW_SIZE to int with error \n:"{exc_}"' - ) + ) from exc_ try: self.ALARM_BEST_TIME = dt_int(self.ALARM_BEST_TIME) except Exception as exc_: raise RuntimeError( f'Impossible to convert ALARM_BEST_TIME to int with error \n:"{exc_}"' - ) + ) from exc_ try: self.ALERT_TIME_WINDOW = dt_int(self.ALERT_TIME_WINDOW) except Exception as exc_: raise RuntimeError( f'Impossible to convert ALERT_TIME_WINDOW to int with error \n:"{exc_}"' - ) + ) from exc_ if self.ALARM_WINDOW_SIZE <= 0: raise RuntimeError("self.ALARM_WINDOW_SIZE should be a positive integer !") @@ -692,7 +715,7 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert MAX_SIMULATE_PER_STEP to int with error \n:"{exc_}"' - ) + ) from exc_ if self.MAX_SIMULATE_PER_STEP <= -2: raise RuntimeError( f"self.MAX_SIMULATE_PER_STEP should be a positive integer or -1, we found {self.MAX_SIMULATE_PER_STEP}" @@ -706,8 +729,15 @@ def check_valid(self): except Exception as exc_: raise RuntimeError( f'Impossible to convert MAX_SIMULATE_PER_EPISODE to int with error \n:"{exc_}"' - ) + ) from exc_ if self.MAX_SIMULATE_PER_EPISODE <= -2: raise RuntimeError( f"self.MAX_SIMULATE_PER_EPISODE should be a positive integer or -1, we found {self.MAX_SIMULATE_PER_EPISODE}" ) + + try: + self.IGNORE_INITIAL_STATE_TIME_SERIE = dt_bool(self.IGNORE_INITIAL_STATE_TIME_SERIE) + except Exception as exc_: + raise RuntimeError( + f'Impossible to convert IGNORE_INITIAL_STATE_TIME_SERIE to bool with error \n:"{exc_}"' + ) from exc_ diff --git a/grid2op/Plot/EpisodeReplay.py b/grid2op/Plot/EpisodeReplay.py index 77d20d1bd..d2e8ae87a 100644 --- a/grid2op/Plot/EpisodeReplay.py +++ b/grid2op/Plot/EpisodeReplay.py @@ -31,7 +31,8 @@ import imageio_ffmpeg can_save_gif = True -except: +except ImportError as exc_: + warnings.warn(f"Error while importing imageio and imageio_ffmpeg: \n{exc_}") can_save_gif = False diff --git a/grid2op/Plot/PlotPlotly.py b/grid2op/Plot/PlotPlotly.py index 14d5419d0..aae742f32 100644 --- a/grid2op/Plot/PlotPlotly.py +++ b/grid2op/Plot/PlotPlotly.py @@ -143,10 +143,10 @@ def draw_line(pos_sub_or, pos_sub_ex, rho, color_palette, status, line_color="gr Parameters ---------- pos_sub_or: ``tuple`` - Position (x,y) of the origin end of the powerline + Position (x,y) of the origin side of the powerline pos_sub_ex: ``tuple`` - Position (x,y) of the extremity end of the powerline + Position (x,y) of the extremity side of the powerline rho: ``float`` Line capacity usage diff --git a/grid2op/PlotGrid/BasePlot.py b/grid2op/PlotGrid/BasePlot.py index 041cd6d45..707c8d349 100644 --- a/grid2op/PlotGrid/BasePlot.py +++ b/grid2op/PlotGrid/BasePlot.py @@ -1011,10 +1011,10 @@ def plot_info( observation.rho = copy.deepcopy(line_values) try: observation.rho = np.array(observation.rho).astype(dt_float) - except: + except Exception as exc_: raise PlotError( "Impossible to convert the input values (line_values) to floating point" - ) + ) from exc_ # rescaling to have range 0 - 1.0 tmp = observation.rho[np.isfinite(observation.rho)] @@ -1038,10 +1038,10 @@ def plot_info( observation.prod_p = np.array(observation.prod_p).astype( dt_float ) - except: + except Exception as exc_: raise PlotError( "Impossible to convert the input values (gen_values) to floating point" - ) + ) from exc_ # rescaling to have range 0 - 1.0 tmp = observation.prod_p[np.isfinite(observation.prod_p)] diff --git a/grid2op/PlotGrid/PlotMatplot.py b/grid2op/PlotGrid/PlotMatplot.py index 9befd1cc4..ca584dd94 100644 --- a/grid2op/PlotGrid/PlotMatplot.py +++ b/grid2op/PlotGrid/PlotMatplot.py @@ -879,7 +879,7 @@ def draw_powerline( ) self._draw_powerline_bus(pos_ex_x, pos_ex_y, ex_dir_x, ex_dir_y, ex_bus) watt_value = observation.p_or[line_id] - if rho > 0.0 and watt_value != 0.0: + if rho > 0.0 and np.abs(watt_value) >= 1e-7: self._draw_powerline_arrow( pos_or_x, pos_or_y, pos_ex_x, pos_ex_y, color, watt_value ) diff --git a/grid2op/PlotGrid/PlotPlotly.py b/grid2op/PlotGrid/PlotPlotly.py index 52653b0b9..126e40ce9 100644 --- a/grid2op/PlotGrid/PlotPlotly.py +++ b/grid2op/PlotGrid/PlotPlotly.py @@ -144,8 +144,10 @@ def convert_figure_to_numpy_HWC(self, figure): format="png", width=self.width, height=self.height, scale=1 ) return imageio.imread(img_bytes, format="png") - except: - warnings.warn("Plotly need additional dependencies for offline rendering") + except Exception as exc_: + warnings.warn(f"Plotly need additional dependencies for " + f"offline rendering. Error was: " + f"\n{exc_}") return np.full((self.height, self.width, 3), 255, dtype=np.unit8) def _draw_substation_txt(self, name, pos_x, pos_y, text): @@ -564,7 +566,7 @@ def draw_powerline( capacity = observation.rho[line_id] capacity = np.clip(capacity, 0.0, 1.0) color = color_scheme[int(capacity * float(len(color_scheme) - 1))] - if capacity == 0.0: + if np.abs(capacity) <= 1e-7: color = "black" line_style = dict(dash=None if connected else "dash", color=color) line_text = "" @@ -613,7 +615,7 @@ def update_powerline( capacity = min(observation.rho[line_id], 1.0) color_idx = int(capacity * (len(color_scheme) - 1)) color = color_scheme[color_idx] - if capacity == 0.0: + if np.abs(capacity) <= 1e-7: color = "black" if line_value is not None: line_text = pltu.format_value_unit(line_value, line_unit) diff --git a/grid2op/Reward/alarmReward.py b/grid2op/Reward/alarmReward.py index e114a7920..884f78338 100644 --- a/grid2op/Reward/alarmReward.py +++ b/grid2op/Reward/alarmReward.py @@ -107,7 +107,7 @@ def _mult_for_zone(self, alarm, disc_lines, env): """compute the multiplicative factor that increases the score if the right zone is predicted""" res = 1.0 # extract the lines that have been disconnected due to cascading failures - lines_disconnected_first = np.where(disc_lines == 0)[0] + lines_disconnected_first = (disc_lines == 0).nonzero()[0] if ( alarm.sum() > 1 @@ -124,7 +124,7 @@ def _mult_for_zone(self, alarm, disc_lines, env): # now retrieve the id of the zones in which a powerline has been disconnected list_zone_names = list(zones_these_lines) - list_zone_ids = np.where(np.isin(env.alarms_area_names, list_zone_names))[0] + list_zone_ids = (np.isin(env.alarms_area_names, list_zone_names)).nonzero()[0] # and finally, award some extra points if one of the zone, containing one of the powerline disconnected # by protection is in the alarm if alarm[list_zone_ids].any(): diff --git a/grid2op/Reward/alertReward.py b/grid2op/Reward/alertReward.py index 1ab8d4d7c..c0c3ae03a 100644 --- a/grid2op/Reward/alertReward.py +++ b/grid2op/Reward/alertReward.py @@ -157,7 +157,7 @@ def _update_state(self, env, action): def _compute_score_attack_blackout(self, env, ts_attack_in_order, indexes_to_look): # retrieve the lines that have been attacked in the time window - ts_ind, line_ind = np.where(ts_attack_in_order) + ts_ind, line_ind = (ts_attack_in_order).nonzero() line_first_attack, first_ind_line_attacked = np.unique(line_ind, return_index=True) ts_first_line_attacked = ts_ind[first_ind_line_attacked] # now retrieve the array starting at the correct place diff --git a/grid2op/Reward/baseReward.py b/grid2op/Reward/baseReward.py index ab54b56a6..51eb5d783 100644 --- a/grid2op/Reward/baseReward.py +++ b/grid2op/Reward/baseReward.py @@ -8,7 +8,10 @@ import logging from abc import ABC, abstractmethod + +import grid2op from grid2op.dtypes import dt_float +from grid2op.Action import BaseAction class BaseReward(ABC): @@ -124,7 +127,7 @@ def is_simulated_env(self, env): from grid2op.Environment._forecast_env import _ForecastEnv return isinstance(env, (_ObsEnv, _ForecastEnv)) - def initialize(self, env): + def initialize(self, env: "grid2op.Environment.BaseEnv") -> None: """ If :attr:`BaseReward.reward_min`, :attr:`BaseReward.reward_max` or other custom attributes require to have a valid :class:`grid2op.Environment.Environment` to be initialized, this should be done in this method. @@ -141,7 +144,7 @@ def initialize(self, env): """ pass - def reset(self, env): + def reset(self, env: "grid2op.Environment.BaseEnv") -> None: """ This method is called each time `env` is reset. @@ -163,7 +166,13 @@ def reset(self, env): pass @abstractmethod - def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous): + def __call__(self, + action: BaseAction, + env: "grid2op.Environment.BaseEnv", + has_error: bool, + is_done: bool, + is_illegal: bool, + is_ambiguous: bool) -> float: """ Method called to compute the reward. @@ -228,7 +237,7 @@ def get_range(self): """ return self.reward_min, self.reward_max - def set_range(self, reward_min, reward_max): + def set_range(self, reward_min: float, reward_max: float): """ Setter function for the :attr:`BaseReward.reward_min` and :attr:`BaseReward.reward_max`. @@ -254,9 +263,9 @@ def __iter__(self): yield ("reward_min", float(self.reward_min)) yield ("reward_max", float(self.reward_max)) - def close(self): + def close(self) -> None: """overide this for certain reward that might need specific behaviour""" pass - def is_in_blackout(self, has_error, is_done): + def is_in_blackout(self, has_error, is_done) -> bool: return is_done and has_error diff --git a/grid2op/Reward/n1Reward.py b/grid2op/Reward/n1Reward.py index 9d11561ef..adc1ca43a 100644 --- a/grid2op/Reward/n1Reward.py +++ b/grid2op/Reward/n1Reward.py @@ -13,7 +13,11 @@ class N1Reward(BaseReward): """ - This class implements the "n-1" reward, which returns the maximum flows after a powerline + This class implements a reward that is inspired + by the "n-1" criterion widely used in power system. + + More specifically it returns the maximum flows (on all the powerlines) after a given (as input) a powerline + has been disconnected. Examples -------- @@ -26,8 +30,8 @@ class N1Reward(BaseReward): from grid2op.Reward import N1Reward L_ID = 0 env = grid2op.make("l2rpn_case14_sandbox", - reward_class=N1Reward(l_id=L_ID) - ) + reward_class=N1Reward(l_id=L_ID) + ) obs = env.reset() obs, reward, *_ = env.step(env.action_space()) print(f"reward: {reward:.3f}") diff --git a/grid2op/Rules/BaseRules.py b/grid2op/Rules/BaseRules.py index f6d6b1a44..b822f0f3d 100644 --- a/grid2op/Rules/BaseRules.py +++ b/grid2op/Rules/BaseRules.py @@ -38,7 +38,7 @@ def __call__(self, action, env): As opposed to "ambiguous action", "illegal action" are not illegal per se. They are legal or not on a certain environment. For example, disconnecting a powerline that has been cut off for maintenance is illegal. Saying to action to both disconnect a - powerline and assign it to bus 2 on it's origin end is ambiguous, and not tolerated in Grid2Op. + powerline and assign it to bus 2 on it's origin side is ambiguous, and not tolerated in Grid2Op. Parameters ---------- diff --git a/grid2op/Rules/DefaultRules.py b/grid2op/Rules/DefaultRules.py index 4685c38a1..9e4832a6e 100644 --- a/grid2op/Rules/DefaultRules.py +++ b/grid2op/Rules/DefaultRules.py @@ -27,6 +27,11 @@ class DefaultRules(LookParam, PreventDiscoStorageModif, PreventReconnection): def __call__(self, action, env): """ See :func:`BaseRules.__call__` for a definition of the _parameters of this function. + + ..versionchanged:: 1.10.2 + In grid2op 1.10.2 this function is not called when the environment is reset: + The "action" made by the environment to set the environment in the desired state is always legal + """ is_legal, reason = LookParam.__call__(self, action, env) if not is_legal: diff --git a/grid2op/Rules/LookParam.py b/grid2op/Rules/LookParam.py index 13445e612..e2e463fef 100644 --- a/grid2op/Rules/LookParam.py +++ b/grid2op/Rules/LookParam.py @@ -29,19 +29,24 @@ class LookParam(BaseRules): def __call__(self, action, env): """ See :func:`BaseRules.__call__` for a definition of the parameters of this function. + + ..versionchanged:: 1.10.2 + In grid2op 1.10.2 this function is not called when the environment is reset: + The "action" made by the environment to set the environment in the desired state is always legal + """ # at first iteration, env.current_obs is None... powerline_status = env.get_current_line_status() aff_lines, aff_subs = action.get_topological_impact(powerline_status) if aff_lines.sum() > env._parameters.MAX_LINE_STATUS_CHANGED: - ids = np.where(aff_lines)[0] + ids = (aff_lines).nonzero()[0] return False, IllegalAction( "More than {} line status affected by the action: {}" "".format(env.parameters.MAX_LINE_STATUS_CHANGED, ids) ) if aff_subs.sum() > env._parameters.MAX_SUB_CHANGED: - ids = np.where(aff_subs)[0] + ids = (aff_subs).nonzero()[0] return False, IllegalAction( "More than {} substation affected by the action: {}" "".format(env.parameters.MAX_SUB_CHANGED, ids) diff --git a/grid2op/Rules/PreventDiscoStorageModif.py b/grid2op/Rules/PreventDiscoStorageModif.py index ba52472f1..fb20ae344 100644 --- a/grid2op/Rules/PreventDiscoStorageModif.py +++ b/grid2op/Rules/PreventDiscoStorageModif.py @@ -23,24 +23,29 @@ class PreventDiscoStorageModif(BaseRules): def __call__(self, action, env): """ See :func:`BaseRules.__call__` for a definition of the parameters of this function. + + ..versionchanged:: 1.10.2 + In grid2op 1.10.2 this function is not called when the environment is reset: + The "action" made by the environment to set the environment in the desired state is always legal + """ - if env.n_storage == 0: + env_cls = type(env) + if env_cls.n_storage == 0: # nothing to do if no storage return True, None # at first iteration, env.current_obs is None... - storage_disco = env.backend.get_topo_vect()[env.storage_pos_topo_vect] < 0 + storage_disco = env.backend.get_topo_vect()[env_cls.storage_pos_topo_vect] < 0 storage_power, storage_set_bus, storage_change_bus = action.get_storage_modif() - power_modif_disco = (np.isfinite(storage_power[storage_disco])) & ( - storage_power[storage_disco] != 0.0 - ) + power_modif_disco = (np.isfinite(storage_power[storage_disco]) & + (np.abs(storage_power[storage_disco]) >= 1e-7)) not_set_status = storage_set_bus[storage_disco] <= 0 not_change_status = ~storage_change_bus[storage_disco] if (power_modif_disco & not_set_status & not_change_status).any(): tmp_ = power_modif_disco & not_set_status & not_change_status return False, IllegalAction( f"Attempt to modify the power produced / absorbed by a storage unit " - f"without reconnecting it (check storage with id {np.where(tmp_)[0]}." + f"without reconnecting it (check storage with id {(tmp_).nonzero()[0]}." ) return True, None diff --git a/grid2op/Rules/PreventReconnection.py b/grid2op/Rules/PreventReconnection.py index 464c3653e..d1356ddd7 100644 --- a/grid2op/Rules/PreventReconnection.py +++ b/grid2op/Rules/PreventReconnection.py @@ -27,6 +27,10 @@ def __call__(self, action, env): due to an overflow. See :func:`BaseRules.__call__` for a definition of the parameters of this function. + + ..versionchanged:: 1.10.2 + In grid2op 1.10.2 this function is not called when the environment is reset: + The "action" made by the environment to set the environment in the desired state is always legal """ # at first iteration, env.current_obs is None... @@ -38,7 +42,7 @@ def __call__(self, action, env): if (env._times_before_line_status_actionable[aff_lines] > 0).any(): # i tried to act on a powerline too shortly after a previous action # or shut down due to an overflow or opponent or hazards or maintenance - ids = np.where((env._times_before_line_status_actionable > 0) & aff_lines)[ + ids = ((env._times_before_line_status_actionable > 0) & aff_lines).nonzero()[ 0 ] return False, IllegalAction( @@ -49,7 +53,7 @@ def __call__(self, action, env): if (env._times_before_topology_actionable[aff_subs] > 0).any(): # I tried to act on a topology too shortly after a previous action - ids = np.where((env._times_before_topology_actionable > 0) & aff_subs)[0] + ids = ((env._times_before_topology_actionable > 0) & aff_subs).nonzero()[0] return False, IllegalAction( "Substation with ids {} have been modified illegally (cooldown of {})".format( ids, env._times_before_topology_actionable[ids] diff --git a/grid2op/Rules/RulesChecker.py b/grid2op/Rules/RulesChecker.py index a362344a3..6f857c453 100644 --- a/grid2op/Rules/RulesChecker.py +++ b/grid2op/Rules/RulesChecker.py @@ -81,4 +81,8 @@ def __call__(self, action, env): reason: A grid2op IllegalException given the reason for which the action is illegal """ + if env.nb_time_step <= 0: + # only access when env is reset + return True, None + return self.legal_action(action, env) diff --git a/grid2op/Rules/rulesByArea.py b/grid2op/Rules/rulesByArea.py index 66efe22b2..4c01dccee 100644 --- a/grid2op/Rules/rulesByArea.py +++ b/grid2op/Rules/rulesByArea.py @@ -87,13 +87,18 @@ def initialize(self, env): raise Grid2OpException("The number of listed ids of substations in rule initialization does not match the number of " "substations of the chosen environement. Look for missing ids or doublon") else: - self.lines_id_by_area = {key : sorted(list(chain(*[[item for item in np.where(env.line_or_to_subid == subid)[0] + self.lines_id_by_area = {key : sorted(list(chain(*[[item for item in (env.line_or_to_subid == subid).nonzero()[0] ] for subid in subid_list]))) for key,subid_list in self.substations_id_by_area.items()} def __call__(self, action, env): """ See :func:`BaseRules.__call__` for a definition of the _parameters of this function. + + ..versionchanged:: 1.10.2 + In grid2op 1.10.2 this function is not called when the environment is reset: + The "action" made by the environment to set the environment in the desired state is always legal + """ is_legal, reason = PreventDiscoStorageModif.__call__(self, action, env) if not is_legal: @@ -120,13 +125,13 @@ def _lookparam_byarea(self, action, env): aff_lines, aff_subs = action.get_topological_impact(powerline_status) if any([(aff_lines[line_ids]).sum() > env._parameters.MAX_LINE_STATUS_CHANGED for line_ids in self.lines_id_by_area.values()]): - ids = [[k for k in np.where(aff_lines)[0] if k in line_ids] for line_ids in self.lines_id_by_area.values()] + ids = [[k for k in (aff_lines).nonzero()[0] if k in line_ids] for line_ids in self.lines_id_by_area.values()] return False, IllegalAction( "More than {} line status affected by the action in one area: {}" "".format(env.parameters.MAX_LINE_STATUS_CHANGED, ids) ) if any([(aff_subs[sub_ids]).sum() > env._parameters.MAX_SUB_CHANGED for sub_ids in self.substations_id_by_area.values()]): - ids = [[k for k in np.where(aff_subs)[0] if k in sub_ids] for sub_ids in self.substations_id_by_area.values()] + ids = [[k for k in (aff_subs).nonzero()[0] if k in sub_ids] for sub_ids in self.substations_id_by_area.values()] return False, IllegalAction( "More than {} substation affected by the action in one area: {}" "".format(env.parameters.MAX_SUB_CHANGED, ids) diff --git a/grid2op/Runner/aux_fun.py b/grid2op/Runner/aux_fun.py index 2f69d520f..83ae34cd6 100644 --- a/grid2op/Runner/aux_fun.py +++ b/grid2op/Runner/aux_fun.py @@ -8,13 +8,13 @@ import copy import time - +import warnings import numpy as np from grid2op.Environment import Environment from grid2op.Agent import BaseAgent -from grid2op.Episode import EpisodeData +from grid2op.Episode import EpisodeData, CompactEpisodeData from grid2op.Runner.FakePBar import _FakePbar from grid2op.dtypes import dt_int, dt_float, dt_bool from grid2op.Chronics import ChronicsHandler @@ -36,6 +36,7 @@ def _aux_add_data(reward, env, episode, ) return reward + def _aux_one_process_parrallel( runner, episode_this_process, @@ -46,29 +47,35 @@ def _aux_one_process_parrallel( max_iter=None, add_detailed_output=False, add_nb_highres_sim=False, + init_states=None, + reset_options=None, ): """this is out of the runner, otherwise it does not work on windows / macos""" - chronics_handler = ChronicsHandler( - chronicsClass=runner.gridStateclass, - path=runner.path_chron, - **runner.gridStateclass_kwargs - ) parameters = copy.deepcopy(runner.parameters) nb_episode_this_process = len(episode_this_process) res = [(None, None, None) for _ in range(nb_episode_this_process)] for i, ep_id in enumerate(episode_this_process): # `ep_id`: grid2op id of the episode i want to play # `i`: my id of the episode played (0, 1, ... episode_this_process) - env, agent = runner._new_env( - chronics_handler=chronics_handler, parameters=parameters - ) + env, agent = runner._new_env(parameters=parameters) try: env_seed = None if env_seeds is not None: env_seed = env_seeds[i] + agt_seed = None if agent_seeds is not None: agt_seed = agent_seeds[i] + + if init_states is not None: + init_state = init_states[i] + else: + init_state = None + + if reset_options is not None: + reset_option = reset_options[i] + else: + reset_option = None tmp_ = _aux_run_one_episode( env, agent, @@ -79,9 +86,12 @@ def _aux_one_process_parrallel( max_iter=max_iter, agent_seed=agt_seed, detailed_output=add_detailed_output, + use_compact_episode_data=runner.use_compact_episode_data, + init_state=init_state, + reset_option=reset_option ) (name_chron, cum_reward, nb_time_step, max_ts, episode_data, nb_highres_sim) = tmp_ - id_chron = chronics_handler.get_id() + id_chron = env.chronics_handler.get_id() res[i] = (id_chron, name_chron, float(cum_reward), nb_time_step, max_ts) if add_detailed_output: @@ -104,6 +114,9 @@ def _aux_run_one_episode( agent_seed=None, max_iter=None, detailed_output=False, + use_compact_episode_data=False, + init_state=None, + reset_option=None, ): done = False time_step = int(0) @@ -111,17 +124,35 @@ def _aux_run_one_episode( cum_reward = dt_float(0.0) # set the environment to use the proper chronic - env.set_id(indx) - # set the seed - if env_seed is not None: - env.seed(env_seed) - + # env.set_id(indx) + if reset_option is None: + reset_option = {} + + if "time serie id" in reset_option: + warnings.warn("You provided both `episode_id` and the key `'time serie id'` is present " + "in the provided `reset_options`. In this case, grid2op will ignore the " + "`time serie id` of the `reset_options` and keep the value in `episode_id`.") + reset_option["time serie id"] = indx + # handle max_iter if max_iter is not None: - env.chronics_handler.set_max_iter(max_iter) + if "max step" in reset_option: + warnings.warn("You provided both `max_iter` and the key `'max step'` is present " + "in the provided `reset_options`. In this case, grid2op will ignore the " + "`max step` of the `reset_options` and keep the value in `max_iter`.") + reset_option["max step"] = max_iter + + # handle init state + if init_state is not None: + if "init state" in reset_option: + warnings.warn("You provided both `init_state` and the key `'init state'` is present " + "in the provided `reset_options`. In this case, grid2op will ignore the " + "`init state` of the `reset_options` and keep the value in `init_state`.") + reset_option["init state"] = init_state # reset it - obs = env.reset() + obs = env.reset(seed=env_seed, options=reset_option) + # reset the number of calls to high resolution simulator env._highres_sim_counter._HighResSimCounter__nb_highres_called = 0 @@ -135,96 +166,99 @@ def _aux_run_one_episode( efficient_storing = nb_timestep_max > 0 nb_timestep_max = max(nb_timestep_max, 0) max_ts = nb_timestep_max - if path_save is None and not detailed_output: - # i don't store anything on drive, so i don't need to store anything on memory - nb_timestep_max = 0 + if use_compact_episode_data: + episode = CompactEpisodeData(env, obs, exp_dir=path_save) + else: + if path_save is None and not detailed_output: + # i don't store anything on drive, so i don't need to store anything on memory + nb_timestep_max = 0 - disc_lines_templ = np.full((1, env.backend.n_line), fill_value=False, dtype=dt_bool) + disc_lines_templ = np.full((1, env.backend.n_line), fill_value=False, dtype=dt_bool) - attack_templ = np.full( - (1, env._oppSpace.action_space.size()), fill_value=0.0, dtype=dt_float - ) - - if efficient_storing: - times = np.full(nb_timestep_max, fill_value=np.NaN, dtype=dt_float) - rewards = np.full(nb_timestep_max, fill_value=np.NaN, dtype=dt_float) - actions = np.full( - (nb_timestep_max, env.action_space.n), fill_value=np.NaN, dtype=dt_float - ) - env_actions = np.full( - (nb_timestep_max, env._helper_action_env.n), - fill_value=np.NaN, - dtype=dt_float, - ) - observations = np.full( - (nb_timestep_max + 1, env.observation_space.n), - fill_value=np.NaN, - dtype=dt_float, - ) - disc_lines = np.full( - (nb_timestep_max, env.backend.n_line), fill_value=np.NaN, dtype=dt_bool + attack_templ = np.full( + (1, env._oppSpace.action_space.size()), fill_value=0.0, dtype=dt_float ) - attack = np.full( - (nb_timestep_max, env._opponent_action_space.n), - fill_value=0.0, - dtype=dt_float, - ) - legal = np.full(nb_timestep_max, fill_value=True, dtype=dt_bool) - ambiguous = np.full(nb_timestep_max, fill_value=False, dtype=dt_bool) - else: - times = np.full(0, fill_value=np.NaN, dtype=dt_float) - rewards = np.full(0, fill_value=np.NaN, dtype=dt_float) - actions = np.full((0, env.action_space.n), fill_value=np.NaN, dtype=dt_float) - env_actions = np.full( - (0, env._helper_action_env.n), fill_value=np.NaN, dtype=dt_float - ) - observations = np.full( - (0, env.observation_space.n), fill_value=np.NaN, dtype=dt_float - ) - disc_lines = np.full((0, env.backend.n_line), fill_value=np.NaN, dtype=dt_bool) - attack = np.full( - (0, env._opponent_action_space.n), fill_value=0.0, dtype=dt_float - ) - legal = np.full(0, fill_value=True, dtype=dt_bool) - ambiguous = np.full(0, fill_value=False, dtype=dt_bool) - - need_store_first_act = path_save is not None or detailed_output - if need_store_first_act: - # store observation at timestep 0 + if efficient_storing: - observations[time_step, :] = obs.to_vect() + times = np.full(nb_timestep_max, fill_value=np.NaN, dtype=dt_float) + rewards = np.full(nb_timestep_max, fill_value=np.NaN, dtype=dt_float) + actions = np.full( + (nb_timestep_max, env.action_space.n), fill_value=np.NaN, dtype=dt_float + ) + env_actions = np.full( + (nb_timestep_max, env._helper_action_env.n), + fill_value=np.NaN, + dtype=dt_float, + ) + observations = np.full( + (nb_timestep_max + 1, env.observation_space.n), + fill_value=np.NaN, + dtype=dt_float, + ) + disc_lines = np.full( + (nb_timestep_max, env.backend.n_line), fill_value=np.NaN, dtype=dt_bool + ) + attack = np.full( + (nb_timestep_max, env._opponent_action_space.n), + fill_value=0.0, + dtype=dt_float, + ) + legal = np.full(nb_timestep_max, fill_value=True, dtype=dt_bool) + ambiguous = np.full(nb_timestep_max, fill_value=False, dtype=dt_bool) else: - observations = np.concatenate((observations, obs.to_vect().reshape(1, -1))) - - episode = EpisodeData( - actions=actions, - env_actions=env_actions, - observations=observations, - rewards=rewards, - disc_lines=disc_lines, - times=times, - observation_space=env.observation_space, - action_space=env.action_space, - helper_action_env=env._helper_action_env, - path_save=path_save, - disc_lines_templ=disc_lines_templ, - attack_templ=attack_templ, - attack=attack, - attack_space=env._opponent_action_space, - logger=logger, - name=env.chronics_handler.get_name(), - force_detail=detailed_output, - other_rewards=[], - legal=legal, - ambiguous=ambiguous, - has_legal_ambiguous=True, - ) - if need_store_first_act: - # I need to manually force in the first observation (otherwise it's not computed) - episode.observations.objects[0] = episode.observations.helper.from_vect( - observations[time_step, :] + times = np.full(0, fill_value=np.NaN, dtype=dt_float) + rewards = np.full(0, fill_value=np.NaN, dtype=dt_float) + actions = np.full((0, env.action_space.n), fill_value=np.NaN, dtype=dt_float) + env_actions = np.full( + (0, env._helper_action_env.n), fill_value=np.NaN, dtype=dt_float + ) + observations = np.full( + (0, env.observation_space.n), fill_value=np.NaN, dtype=dt_float + ) + disc_lines = np.full((0, env.backend.n_line), fill_value=np.NaN, dtype=dt_bool) + attack = np.full( + (0, env._opponent_action_space.n), fill_value=0.0, dtype=dt_float + ) + legal = np.full(0, fill_value=True, dtype=dt_bool) + ambiguous = np.full(0, fill_value=False, dtype=dt_bool) + + need_store_first_act = path_save is not None or detailed_output + if need_store_first_act: + # store observation at timestep 0 + if efficient_storing: + observations[time_step, :] = obs.to_vect() + else: + observations = np.concatenate((observations, obs.to_vect().reshape(1, -1))) + + episode = EpisodeData( + actions=actions, + env_actions=env_actions, + observations=observations, + rewards=rewards, + disc_lines=disc_lines, + times=times, + observation_space=env.observation_space, + action_space=env.action_space, + helper_action_env=env._helper_action_env, + path_save=path_save, + disc_lines_templ=disc_lines_templ, + attack_templ=attack_templ, + attack=attack, + attack_space=env._opponent_action_space, + logger=logger, + name=env.chronics_handler.get_name(), + force_detail=detailed_output, + other_rewards=[], + legal=legal, + ambiguous=ambiguous, + has_legal_ambiguous=True, ) - episode.set_parameters(env) + if need_store_first_act: + # I need to manually force in the first observation (otherwise it's not computed) + episode.observations.objects[0] = episode.observations.helper.from_vect( + observations[time_step, :] + ) + episode.set_parameters(env) beg_ = time.perf_counter() @@ -246,26 +280,38 @@ def _aux_run_one_episode( res_env_tmp = env.steps(act) for (obs, reward, done, info), opp_attack in zip(*res_env_tmp): time_step += 1 - cum_reward += _aux_add_data(reward, env, episode, - efficient_storing, - end__, beg__, act, - obs, info, time_step, - opp_attack) + if use_compact_episode_data: + duration = end__ - beg__ + cum_reward = episode.update(time_step, env, act, + obs, reward, done, duration, info) + else: + cum_reward += _aux_add_data(reward, env, episode, + efficient_storing, + end__, beg__, act, + obs, info, time_step, + opp_attack) pbar_.update(1) else: # regular environment obs, reward, done, info = env.step(act) time_step += 1 opp_attack = env._oppSpace.last_attack - cum_reward += _aux_add_data(reward, env, episode, - efficient_storing, - end__, beg__, act, - obs, info, time_step, - opp_attack) + if use_compact_episode_data: + duration = end__ - beg__ + cum_reward = episode.update(time_step, env, act, + obs, reward, done, duration, info) + else: + cum_reward += _aux_add_data(reward, env, episode, + efficient_storing, + end__, beg__, act, + obs, info, time_step, + opp_attack) pbar_.update(1) - episode.set_game_over(time_step) + if not use_compact_episode_data: + episode.set_game_over(time_step) end_ = time.perf_counter() - episode.set_meta(env, time_step, float(cum_reward), env_seed, agent_seed) + if not use_compact_episode_data: + episode.set_meta(env, time_step, float(cum_reward), env_seed, agent_seed) li_text = [ "Env: {:.2f}s", "\t - apply act {:.2f}s", @@ -287,10 +333,11 @@ def _aux_run_one_episode( cum_reward, ) ) - - episode.set_episode_times(env, time_act, beg_, end_) + if not use_compact_episode_data: + episode.set_episode_times(env, time_act, beg_, end_) episode.to_disk() + episode.make_serializable() name_chron = env.chronics_handler.get_name() return (name_chron, cum_reward, int(time_step), diff --git a/grid2op/Runner/runner.py b/grid2op/Runner/runner.py index c790b0883..189dbefa6 100644 --- a/grid2op/Runner/runner.py +++ b/grid2op/Runner/runner.py @@ -9,8 +9,9 @@ import os import warnings import copy -from multiprocessing import Pool -from typing import Tuple, Optional, List, Union +import numpy as np +from multiprocessing import get_start_method, get_context, Pool +from typing import Tuple, List, Union from grid2op.Environment import BaseEnv from grid2op.Action import BaseAction, TopologyAction, DontAct @@ -18,9 +19,9 @@ from grid2op.Observation import CompleteObservation, BaseObservation from grid2op.Opponent.opponentSpace import OpponentSpace from grid2op.Reward import FlatReward, BaseReward -from grid2op.Rules import AlwaysLegal, BaseRules +from grid2op.Rules import AlwaysLegal from grid2op.Environment import Environment -from grid2op.Chronics import ChronicsHandler, GridStateFromFile, GridValue +from grid2op.Chronics import ChronicsHandler, GridStateFromFile, GridValue, MultifolderWithCache from grid2op.Backend import Backend, PandaPowerBackend from grid2op.Parameters import Parameters from grid2op.Agent import DoNothingAgent, BaseAgent @@ -28,18 +29,20 @@ from grid2op.dtypes import dt_float from grid2op.Opponent import BaseOpponent, NeverAttackBudget from grid2op.operator_attention import LinearAttentionBudget +from grid2op.Space import DEFAULT_N_BUSBAR_PER_SUB +from grid2op.Episode import EpisodeData +# on windows if i start using sequential, i need to continue using sequential +# if i start using parallel i need to continue using parallel +# so i force the usage of the "starmap" stuff even if there is one process on windows +from grid2op._glop_platform_info import _IS_WINDOWS, _IS_LINUX, _IS_MACOS + from grid2op.Runner.aux_fun import ( _aux_run_one_episode, _aux_make_progress_bar, _aux_one_process_parrallel, ) from grid2op.Runner.basic_logger import DoNothingLog, ConsoleLog -from grid2op.Episode import EpisodeData -# on windows if i start using sequential, i need to continue using sequential -# if i start using parallel i need to continue using parallel -# so i force the usage of the "starmap" stuff even if there is one process on windows -from grid2op._glop_platform_info import _IS_WINDOWS, _IS_LINUX, _IS_MACOS runner_returned_type = Union[Tuple[str, str, float, int, int], Tuple[str, str, float, int, int, EpisodeData], @@ -54,6 +57,7 @@ # TODO use gym logger if specified by the user. # TODO: if chronics are "loop through" multiple times, only last results are saved. :-/ +KEY_TIME_SERIE_ID = "time serie id" class Runner(object): """ @@ -70,8 +74,12 @@ class Runner(object): env = grid2op.make("l2rpn_case14_sandbox") + # use of a Runner + runner = Runner(**env.get_params_for_runner(), agentClass=RandomAgent) + res = runner.run(nb_episode=nn_episode) + ############### - # the gym loops + # the "equivalent" gym loops nb_episode = 5 for i in range(nb_episode): obs = env.reset() @@ -80,11 +88,10 @@ class Runner(object): while not done: act = agent.act(obs, reward, done) obs, reward, done, info = env.step(act) - + # but this loop does not handle the seeding, does not save the results + # does not store anything related to the run you made etc. + # the Runner can do that with simple calls (see bellow) ############### - # equivalent with use of a Runner - runner = Runner(**env.get_params_for_runner(), agentClass=RandomAgent) - res = runner.run(nb_episode=nn_episode) This specific class as for main purpose to evaluate the performance of a trained @@ -97,6 +104,109 @@ class Runner(object): encourage you to use the :func:`grid2op.Environment.Environment.get_params_for_runner` for creating a runner. + You can customize the agent instance you want with the following code: + + .. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=nn_episode) + + You can customize the seeds, the scenarios ID you want, the number of initial steps to skip, the + maximum duration of an episode etc. For more information, please refer to the :func:`Runner.run` + + You can also easily retrieve the :class:`grid2op.Episode.EpisodeData` representing your runs with: + + .. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=2, + add_detailed_output=True) + for *_, ep_data in res: + # ep_data are the EpisodeData you can use to do whatever + ... + + You can save the results in a standardized format with: + + .. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=2, + save_path="A/PATH/SOMEWHERE") # eg "/home/user/you/grid2op_results/this_run" + + You can also easily (on some platform) easily make the evaluation faster by using the "multi processing" python + package with: + + .. code-block:: python + + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=agent_instance) + res = runner.run(nb_episode=2, + nb_process=2) + + And, as of grid2op 1.10.3 you can know customize the multi processing context you want + to use to evaluate your agent, like this: + + .. code-block:: python + + import multiprocessing as mp + import grid2op + from grid2op.Agent import RandomAgent # for example... + from grid2op.Runner import Runner + + env = grid2op.make("l2rpn_case14_sandbox") + + agent_instance = RandomAgent(env.action_space) + + ctx = mp.get_context('spawn') # or "fork" or "forkserver" + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=agent_instance, + mp_context=ctx) + res = runner.run(nb_episode=2, + nb_process=2) + + If you set this, the multiprocessing `Pool` used to evaluate your agents will be made with: + + .. code-block:: python + + with mp_context.Pool(nb_process) as p: + .... + + Otherwise the default "Pool" is used: + + .. code-block:: python + + with Pool(nb_process) as p: + .... + + Attributes ---------- envClass: ``type`` @@ -243,6 +353,7 @@ def __init__( init_env_path: str, init_grid_path: str, path_chron, # path where chronics of injections are stored + n_busbar=DEFAULT_N_BUSBAR_PER_SUB, name_env="unknown", parameters_path=None, names_chronics_to_backend=None, @@ -280,12 +391,16 @@ def __init__( kwargs_attention_budget=None, has_attention_budget=False, logger=None, + use_compact_episode_data=False, kwargs_observation=None, observation_bk_class=None, observation_bk_kwargs=None, + mp_context=None, # experimental: whether to read from local dir or generate the classes on the fly: - _read_from_local_dir=False, + _read_from_local_dir=None, _is_test=False, # TODO not implemented !! + _local_dir_cls=None, + _overload_name_multimix=None ): """ Initialize the Runner. @@ -343,11 +458,17 @@ def __init__( voltagecontrolerClass: :class:`grid2op.VoltageControler.ControlVoltageFromFile`, optional The controler that will change the voltage setpoints of the generators. + use_compact_episode_data: ``bool``, optional + Whether to use :class:`grid2op.Episode.CompactEpisodeData` instead of :class:`grid2op.Episode.EpisodeData` to store + Episode to disk (allows it to be replayed later). Defaults to False. + # TODO documentation on the opponent # TOOD doc for the attention budget """ + self._n_busbar = n_busbar self.with_forecast = with_forecast self.name_env = name_env + self._overload_name_multimix = _overload_name_multimix if not isinstance(envClass, type): raise Grid2OpException( 'Parameter "envClass" used to build the Runner should be a type (a class) and not an object ' @@ -363,7 +484,6 @@ def __init__( self.other_env_kwargs = other_env_kwargs else: self.other_env_kwargs = {} - if not isinstance(actionClass, type): raise Grid2OpException( 'Parameter "actionClass" used to build the Runner should be a type (a class) and not an object ' @@ -419,7 +539,11 @@ def __init__( 'grid2op.GridValue. Please modify "gridStateclass" parameter.' ) self.gridStateclass = gridStateclass - + if issubclass(gridStateclass, MultifolderWithCache): + warnings.warn("We do not recommend to use the `MultifolderWithCache` during the " + "evaluation of your agents. It is possible but you might end up with " + "side effects (see issue 616 for example). It is safer to use the " + "`Multifolder` class as a drop-in replacement.") self.envClass._check_rules_correct(legalActClass) self.legalActClass = legalActClass @@ -441,6 +565,14 @@ def __init__( else: self._backend_kwargs = {} + # we keep a reference to the local directory (tmpfile) where + # the classes definition are stored while the runner lives + self._local_dir_cls = _local_dir_cls + + # multi processing context that controls the way the computations are + # distributed when using multiple processes + self._mp_context = mp_context + self.__can_copy_agent = True if agentClass is not None: if agentInstance is not None: @@ -477,7 +609,7 @@ def __init__( # Test if we can copy the agent for parallel runs try: copy.copy(self.agent) - except: + except Exception as exc_: self.__can_copy_agent = False else: raise RuntimeError( @@ -502,6 +634,8 @@ def __init__( else: self.logger = logger.getChild("grid2op_Runner") + self.use_compact_episode_data = use_compact_episode_data + # store _parameters self.init_env_path = init_env_path self.init_grid_path = init_grid_path @@ -528,11 +662,6 @@ def __init__( self.max_iter = max_iter if max_iter > 0: self.gridStateclass_kwargs["max_iter"] = max_iter - self.chronics_handler = ChronicsHandler( - chronicsClass=self.gridStateclass, - path=self.path_chron, - **self.gridStateclass_kwargs - ) self.verbose = verbose self.thermal_limit_a = thermal_limit_a @@ -605,20 +734,41 @@ def __init__( self.__used = False - def _new_env(self, chronics_handler, parameters) -> Tuple[BaseEnv, BaseAgent]: - # the same chronics_handler is used for all the environments. - # make sure to "reset" it properly - # (this is handled elsewhere in case of "multi chronics") - if not self.chronics_handler.chronicsClass.MULTI_CHRONICS: - self.chronics_handler.next_chronics() + def _make_new_backend(self): + try: + res = self.backendClass(**self._backend_kwargs) + except TypeError: + # for backward compatibility, some backend might not + # handle full kwargs (that might be added later) + import inspect + possible_params = inspect.signature(self.backendClass.__init__).parameters + this_kwargs = {} + for el in self._backend_kwargs: + if el in possible_params: + this_kwargs[el] = self._backend_kwargs[el] + else: + warnings.warn("Runner: your backend does not support the kwargs " + f"`{el}={self._backend_kwargs[el]}`. This usually " + "means it is outdated. Please upgrade it.") + res = self.backendClass(**this_kwargs) + return res + + def _new_env(self, parameters) -> Tuple[BaseEnv, BaseAgent]: + chronics_handler = ChronicsHandler( + chronicsClass=self.gridStateclass, + path=self.path_chron, + **self.gridStateclass_kwargs + ) + backend = self._make_new_backend() with warnings.catch_warnings(): warnings.filterwarnings("ignore") res = self.envClass.init_obj_from_kwargs( other_env_kwargs=self.other_env_kwargs, + n_busbar=self._n_busbar, init_env_path=self.init_env_path, init_grid_path=self.init_grid_path, chronics_handler=chronics_handler, - backend=self.backendClass(**self._backend_kwargs), + backend=backend, parameters=parameters, name=self.name_env, names_chronics_to_backend=self.names_chronics_to_backend, @@ -647,6 +797,9 @@ def _new_env(self, chronics_handler, parameters) -> Tuple[BaseEnv, BaseAgent]: observation_bk_kwargs=self._observation_bk_kwargs, _raw_backend_class=self.backendClass, _read_from_local_dir=self._read_from_local_dir, + # _local_dir_cls: we don't set it, in parrallel mode it makes no sense ! + _local_dir_cls=None, + _overload_name_multimix=self._overload_name_multimix ) if self.thermal_limit_a is not None: @@ -673,7 +826,7 @@ def init_env(self) -> BaseEnv: Function used to initialized the environment and the agent. It is called by :func:`Runner.reset`. """ - env, self.agent = self._new_env(self.chronics_handler, self.parameters) + env, self.agent = self._new_env(self.parameters) return env def reset(self): @@ -683,7 +836,7 @@ def reset(self): .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ Used to reset an environment. This method is called at the beginning of each new episode. - If the environment is not initialized, then it initializes it with :func:`Runner.make_env`. + If the environment is not initialized, then it initializes it with :func:`Runner.init_env`. """ pass @@ -698,6 +851,8 @@ def run_one_episode( episode_id=None, detailed_output=False, add_nb_highres_sim=False, + init_state=None, + reset_options=None, ) -> runner_returned_type: """ INTERNAL @@ -709,7 +864,7 @@ def run_one_episode( Parameters ---------- indx: ``int`` - The number of episode previously run + The index of the episode to run (ignored if `episode_id` is not None) path_save: ``str``, optional Path where to save the data. See the description of :mod:`grid2op.Runner` for the structure of the saved @@ -734,7 +889,18 @@ def run_one_episode( """ self.reset() - with self.init_env() as env: + with self.init_env() as env: + # small piece of code to detect the + # episode id + if episode_id is None: + # user did not provide any episode id, I check in the reset_options + if reset_options is not None: + if KEY_TIME_SERIE_ID in reset_options: + indx = int(reset_options[KEY_TIME_SERIE_ID]) + del reset_options[KEY_TIME_SERIE_ID] + else: + # user specified an episode id, I use it. + indx = episode_id res = _aux_run_one_episode( env, self.agent, @@ -746,15 +912,22 @@ def run_one_episode( max_iter=max_iter, agent_seed=agent_seed, detailed_output=detailed_output, + use_compact_episode_data = self.use_compact_episode_data, + init_state=init_state, + reset_option=reset_options, ) if max_iter is not None: - env.chronics_handler.set_max_iter(-1) - + env.chronics_handler._set_max_iter(-1) + + id_chron = env.chronics_handler.get_id() # `res` here necessarily contains detailed_output and nb_highres_call if not add_nb_highres_sim: res = res[:-1] if not detailed_output: res = res[:-1] + + # new in 1.10.2: id_chron is computed from here + res = (id_chron, *res) return res def _run_sequential( @@ -768,6 +941,8 @@ def _run_sequential( episode_id=None, add_detailed_output=False, add_nb_highres_sim=False, + init_states=None, + reset_options=None, ) -> List[runner_returned_type]: """ INTERNAL @@ -806,7 +981,11 @@ def _run_sequential( By default ``None``, no seeds are set. If provided, its size should match ``nb_episode``. - add_detailed_output: see Runner.run method + add_detailed_output: + see :func:`Runner.run` method + + init_states: + see :func:`Runner.run` method Returns ------- @@ -833,10 +1012,27 @@ def _run_sequential( agt_seed = None if agent_seeds is not None: agt_seed = agent_seeds[i] - ep_id = i # if no "episode_id" is provided i used the i th one + init_state = None + if init_states is not None: + init_state = init_states[i] + reset_opt = None + if reset_options is not None: + # we copy it because we might remove the "time serie id" + # from it + reset_opt = reset_options[i].copy() + # if no "episode_id" is provided i used the i th one + ep_id = i if episode_id is not None: + # if episode_id is provided, I use this one ep_id = episode_id[i] # otherwise i use the provided one + else: + # if it's not provided, I check if one is used in the `reset_options` + if reset_opt is not None: + if KEY_TIME_SERIE_ID in reset_opt: + ep_id = int(reset_opt[KEY_TIME_SERIE_ID]) + del reset_opt[KEY_TIME_SERIE_ID] ( + id_chron, name_chron, cum_reward, nb_time_step, @@ -846,14 +1042,16 @@ def _run_sequential( ) = self.run_one_episode( path_save=path_save, indx=ep_id, + episode_id=ep_id, pbar=next_pbar[0], env_seed=env_seed, agent_seed=agt_seed, max_iter=max_iter, detailed_output=True, - add_nb_highres_sim=True + add_nb_highres_sim=True, + init_state=init_state, + reset_options=reset_opt ) - id_chron = self.chronics_handler.get_id() res[i] = (id_chron, name_chron, float(cum_reward), @@ -878,6 +1076,8 @@ def _run_parrallel( episode_id=None, add_detailed_output=False, add_nb_highres_sim=False, + init_states=None, + reset_options=None, ) -> List[runner_returned_type]: """ INTERNAL @@ -918,8 +1118,12 @@ def _run_parrallel( If provided, its size should match the ``nb_episode``. The agent will be seeded at the beginning of each scenario BEFORE calling `agent.reset()`. - add_detailed_output: see Runner.run method - + add_detailed_output: + See :func:`Runner.run` method + + init_states: + See :func:`Runner.run` method + Returns ------- res: ``list`` @@ -944,7 +1148,7 @@ def _run_parrallel( # if i start using parallel i need to continue using parallel # so i force the usage of the sequential mode self.logger.warn( - "Runner.run_parrallel: number of process set to 1. Failing back into sequential mod." + "Runner.run_parrallel: number of process set to 1. Failing back into sequential mode." ) return self._run_sequential( nb_episode, @@ -955,23 +1159,41 @@ def _run_parrallel( episode_id=episode_id, add_detailed_output=add_detailed_output, add_nb_highres_sim=add_nb_highres_sim, + init_states=init_states, + reset_options=reset_options ) else: + if self._local_dir_cls is not None: + self._local_dir_cls._RUNNER_DO_NOT_ERASE = True self._clean_up() nb_process = int(nb_process) process_ids = [[] for i in range(nb_process)] for i in range(nb_episode): if episode_id is None: - process_ids[i % nb_process].append(i) + # user does not provide episode_id + if reset_options is not None: + # we copy them, because we might delete some things from them + reset_options = [el.copy() for el in reset_options] + + # we check if the reset_options contains the "time serie id" + if KEY_TIME_SERIE_ID in reset_options[i]: + this_ep_id = int(reset_options[i][KEY_TIME_SERIE_ID]) + del reset_options[i][KEY_TIME_SERIE_ID] + else: + this_ep_id = i + else: + this_ep_id = i + process_ids[i % nb_process].append(this_ep_id) else: + # user provided episode_id, we use this one process_ids[i % nb_process].append(episode_id[i]) if env_seeds is None: seeds_env_res = [None for _ in range(nb_process)] else: # split the seeds according to the process - seeds_env_res = [[] for i in range(nb_process)] + seeds_env_res = [[] for _ in range(nb_process)] for i in range(nb_episode): seeds_env_res[i % nb_process].append(env_seeds[i]) @@ -979,16 +1201,31 @@ def _run_parrallel( seeds_agt_res = [None for _ in range(nb_process)] else: # split the seeds according to the process - seeds_agt_res = [[] for i in range(nb_process)] + seeds_agt_res = [[] for _ in range(nb_process)] for i in range(nb_episode): seeds_agt_res[i % nb_process].append(agent_seeds[i]) + + if init_states is None: + init_states_res = [None for _ in range(nb_process)] + else: + # split the init states according to the process + init_states_res = [[] for _ in range(nb_process)] + for i in range(nb_episode): + init_states_res[i % nb_process].append(init_states[i]) + if reset_options is None: + reset_options_res = [None for _ in range(nb_process)] + else: + # split the reset options according to the process + reset_options_res = [[] for _ in range(nb_process)] + for i in range(nb_episode): + reset_options_res[i % nb_process].append(reset_options[i]) + res = [] if _IS_LINUX: lists = [(self,) for _ in enumerate(process_ids)] else: lists = [(Runner(**self._get_params()),) for _ in enumerate(process_ids)] - for i, pn in enumerate(process_ids): lists[i] = (*lists[i], pn, @@ -998,10 +1235,20 @@ def _run_parrallel( seeds_agt_res[i], max_iter, add_detailed_output, - add_nb_highres_sim) - - with Pool(nb_process) as p: - tmp = p.starmap(_aux_one_process_parrallel, lists) + add_nb_highres_sim, + init_states_res[i], + reset_options_res[i]) + if self._mp_context is not None: + with self._mp_context.Pool(nb_process) as p: + tmp = p.starmap(_aux_one_process_parrallel, lists) + else: + if get_start_method() == 'spawn': + # https://github.com/rte-france/Grid2Op/issues/600 + with get_context("spawn").Pool(nb_process) as p: + tmp = p.starmap(_aux_one_process_parrallel, lists) + else: + with Pool(nb_process) as p: + tmp = p.starmap(_aux_one_process_parrallel, lists) for el in tmp: res += el return res @@ -1045,9 +1292,17 @@ def _get_params(self): "kwargs_attention_budget": self._kwargs_attention_budget, "has_attention_budget": self._has_attention_budget, "logger": self.logger, + "use_compact_episode_data": self.use_compact_episode_data, "kwargs_observation": self._kwargs_observation, + "observation_bk_class": self._observation_bk_class, + "observation_bk_kwargs": self._observation_bk_kwargs, "_read_from_local_dir": self._read_from_local_dir, "_is_test": self._is_test, + "_overload_name_multimix": self._overload_name_multimix, + "other_env_kwargs": self.other_env_kwargs, + "n_busbar": self._n_busbar, + "mp_context": None, # this is used in multi processing context, avoid to multi process a multi process stuff + "_local_dir_cls": self._local_dir_cls, } return res @@ -1065,6 +1320,7 @@ def _clean_up(self): def run( self, nb_episode, + *, # force kwargs nb_process=1, path_save=None, max_iter=None, @@ -1074,6 +1330,8 @@ def run( episode_id=None, add_detailed_output=False, add_nb_highres_sim=False, + init_states=None, + reset_options=None, ) -> List[runner_returned_type]: """ Main method of the :class:`Runner` class. It will either call :func:`Runner._run_sequential` if "nb_process" is @@ -1094,7 +1352,11 @@ def run( max_iter: ``int`` Maximum number of iteration you want the runner to perform. - + + .. warning:: + (only for grid2op >= 1.10.3) If set in this parameters, it will + erase all values that may be present in the `reset_options` kwargs (key `"max step"`) + pbar: ``bool`` or ``type`` or ``object`` How to display the progress bar, understood as follow: @@ -1120,6 +1382,15 @@ def run( For each of the nb_episdeo you want to compute, it specifies the id of the chronix that will be used. By default ``None``, no seeds are set. If provided, its size should match ``nb_episode``. + + .. warning:: + (only for grid2op >= 1.10.3) If set in this parameters, it will + erase all values that may be present in the `reset_options` kwargs (key `"time serie id"`). + + .. danger:: + As of now, it's not properly handled to compute twice the same `episode_id` more than once using the runner + (more specifically, the computation will happen but file might not be saved correctly on the + hard drive: attempt to save all the results in the same location. We do not advise to do it) add_detailed_output: ``bool`` A flag to add an :class:`EpisodeData` object to the results, containing a lot of information about the run @@ -1127,27 +1398,76 @@ def run( add_nb_highres_sim: ``bool`` Whether to add an estimated number of "high resolution simulator" called performed by the agent (either by obs.simulate, or by obs.get_forecast_env or by obs.get_simulator) + + init_states: + (added in grid2op 1.10.2) Possibility to set the initial state of the powergrid (when calling `env.reset`). + It should either be: + + - a dictionary representing an action (see doc of :func:`grid2op.Environment.Environment.reset`) + - a grid2op action (see doc of :func:`grid2op.Environment.Environment.reset`) + - a list / tuple of one of the above with the same size as the number of episode you want. + + If you provide a dictionary or a grid2op action, then this element will be used for all scenarios you + want to run. + + .. warning:: + (only for grid2op >= 1.10.3) If set in this parameters, it will + erase all values that may be present in the `reset_options` kwargs (key `"init state"`). + + reset_options: + (added in grid2op 1.10.3) Possibility to customize the call to `env.reset` made internally by + the Runner. More specifically, it will pass a custom `options` when the runner calls + `env.reset(..., options=XXX)`. + It should either be: + + - a dictionary that can be used directly by :func:`grid2op.Environment.Environment.reset`. + In this case the same dictionary will be used for all the episodes computed by the runner. + - a list / tuple of one of the above with the same size as the number of episode you want to + compute which allow a full customization for each episode. + + .. warning:: + If the kwargs `max_iter` is present when calling `runner.run` function, then the key `max step` + will be ignored in all the `reset_options` dictionary. + + .. warning:: + If the kwargs `episode_id` is present when calling `runner.run` function, then the key `time serie id` + will be ignored in all the `reset_options` dictionary. + + .. warning:: + If the kwargs `init_states` is present when calling `runner.run` function, then the key `init state` + will be ignored in all the `reset_options` dictionary. + + .. danger:: + If you provide the key "time serie id" in one of the `reset_options` dictionary, we recommend + you do it for all `reset options` otherwise you might not end up computing the correct episodes. + + .. danger:: + As of now, it's not properly handled to compute twice the same `time serie` more than once using the runner + (more specifically, the computation will happen but file might not be saved correctly on the + hard drive: attempt to save all the results in the same location. We do not advise to do it) + Returns ------- res: ``list`` List of tuple. Each tuple having 3[4] elements: - - "i" unique identifier of the episode (compared to :func:`Runner.run_sequential`, the elements of the - returned list are not necessarily sorted by this value) + - "id_chron" unique identifier of the episode + - "name_chron" name of the time series (usually it is the path where it is stored) - "cum_reward" the cumulative reward obtained by the :attr:`Runner.Agent` on this episode i - "nb_time_step": the number of time steps played in this episode. + - "total_step": the total number of time steps possible in this episode. - "episode_data" : [Optional] The :class:`EpisodeData` corresponding to this episode run only if `add_detailed_output=True` - "add_nb_highres_sim": [Optional] The estimated number of calls to high resolution simulator made - by the agent + by the agent. Only preset if `add_nb_highres_sim=True` in the kwargs Examples -------- You can use the runner this way: - .. code-block: python + .. code-block:: python import grid2op from gri2op.Runner import Runner @@ -1159,7 +1479,7 @@ def run( If you would rather to provide an agent instance (and not a class) you can do it this way: - .. code-block: python + .. code-block:: python import grid2op from gri2op.Runner import Runner @@ -1175,7 +1495,7 @@ def run( by passing `env_seeds` and `agent_seeds` parameters (on the example bellow, the agent will be seeded with 42 and the environment with 0. - .. code-block: python + .. code-block:: python import grid2op from gri2op.Runner import Runner @@ -1186,6 +1506,118 @@ def run( runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=my_agent) res = runner.run(nb_episode=1, agent_seeds=[42], env_seeds=[0]) + Since grid2op 1.10.2 you can also set the initial state of the grid when + calling the runner. You can do that with the kwargs `init_states`, for example like this: + + .. code-block:: python + + import grid2op + from gri2op.Runner import Runner + from grid2op.Agent import RandomAgent + + env = grid2op.make("l2rpn_case14_sandbox") + my_agent = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=my_agent) + res = runner.run(nb_episode=1, + agent_seeds=[42], + env_seeds=[0], + init_states=[{"set_line_status": [(0, -1)]}] + ) + + .. note:: + We recommend that you provide `init_states` as a list having a length of + `nb_episode`. Each episode will be initialized with the provided + element of the list. However, if you provide only one element, then + all episodes you want to compute will be initialized with this same + action. + + .. note:: + At the beginning of each episode, if an `init_state` is set, + the environment is reset with a call like: `env.reset(options={"init state": init_state})` + + This is why we recommend you to use dictionary to set the initial state so + that you can control what exactly is done (set the `"method"`) more + information about this on the doc of the :func:`grid2op.Environment.Environment.reset` + function. + + Since grid2op 1.10.3 you can also customize the way the runner will "reset" the + environment with the kwargs `reset_options`. + + Concretely, if you specify `runner.run(..., reset_options=XXX)` then the environment + will be reset with a call to `env.reset(options=reset_options)`. + + As for the init states kwargs, reset_options can be either a dictionnary, in this + case the same dict will be used for running all the episode or a list / tuple + of dictionnaries with the same size as the `nb_episode` kwargs. + + .. code-block:: python + + import grid2op + from gri2op.Runner import Runner + from grid2op.Agent import RandomAgent + + env = grid2op.make("l2rpn_case14_sandbox") + my_agent = RandomAgent(env.action_space) + runner = Runner(**env.get_params_for_runner(), agentClass=None, agentInstance=my_agent) + res = runner.run(nb_episode=2, + agent_seeds=[42, 43], + env_seeds=[0, 1], + reset_options={"init state": {"set_line_status": [(0, -1)]}} + ) + # same initial state will be used for the two epusode + + res2 = runner.run(nb_episode=2, + agent_seeds=[42, 43], + env_seeds=[0, 1], + reset_options=[{"init state": {"set_line_status": [(0, -1)]}}, + {"init state": {"set_line_status": [(1, -1)]}}] + ) + # two different initial states will be used: the first one for the + # first episode and the second one for the second + + .. note:: + In case of conflicting inputs, for example when you specify: + + .. code-block:: python + + runner.run(..., + init_states=XXX, + reset_options={"init state"=YYY} + ) + + or + + .. code-block:: python + + runner.run(..., + max_iter=XXX, + reset_options={"max step"=YYY} + ) + + or + + .. code-block:: python + + runner.run(..., + episode_id=XXX, + reset_options={"time serie id"=YYY} + ) + + Then: 1) a warning is issued to inform you that you might have + done something wrong and 2) the value in `XXX` above (*ie* the + value provided in the `runner.run` kwargs) is always used + instead of the value `YYY` (*ie* the value present in the + reset_options). + + In other words, the arguments of the `runner.run` have the + priority over the arguments passed to the `reset_options`. + + .. danger:: + If you provide the key "time serie id" in one of the `reset_options` + dictionary, we recommend + you do it for all `reset_options` otherwise you might not end up + computing the correct episodes. + """ if nb_episode < 0: raise RuntimeError("Impossible to run a negative number of scenarios.") @@ -1212,6 +1644,63 @@ def run( "".format(nb_episode, len(episode_id)) ) + if init_states is not None: + if isinstance(init_states, (dict, BaseAction)): + # user provided one initial state, I copy it to all + # evaluation + init_states = [init_states.copy() for _ in range(nb_episode)] + elif isinstance(init_states, (list, tuple, np.ndarray)): + # user provided a list of initial states, it should match the + # number of scenarios + if len(init_states) != nb_episode: + raise RuntimeError( + 'You want to compute "{}" run(s) but provide only "{}" different initial state.' + "".format(nb_episode, len(init_states)) + ) + for i, el in enumerate(init_states): + if not isinstance(el, (dict, BaseAction)): + raise RuntimeError("When specifying `init_states` kwargs with a list (or a tuple) " + "it should be a list (or a tuple) of dictionary or BaseAction. " + f"You provided {type(el)} at position {i}.") + else: + raise RuntimeError("When using `init_state` in the runner, you should make sure to use " + "either use dictionnary, grid2op actions or list / tuple of actions.") + + if reset_options is not None: + if isinstance(reset_options, dict): + for k in reset_options: + if not k in self.envClass.KEYS_RESET_OPTIONS: + raise RuntimeError("Wehn specifying `reset options` all keys of the dictionary should " + "be compatible with the available reset options of your environment " + f"class. You provided the key \"{k}\" for the provided dictionary but" + f"possible keys are limited to {self.envClass.KEYS_RESET_OPTIONS}.") + # user provided one initial state, I copy it to all + # evaluation + reset_options = [reset_options.copy() for _ in range(nb_episode)] + elif isinstance(reset_options, (list, tuple, np.ndarray)): + # user provided a list ofreset_options, it should match the + # number of scenarios + if len(reset_options) != nb_episode: + raise RuntimeError( + 'You want to compute "{}" run(s) but provide only "{}" different reset options.' + "".format(nb_episode, len(reset_options)) + ) + for i, el in enumerate(reset_options): + if not isinstance(el, dict): + raise RuntimeError("When specifying `reset_options` kwargs with a list (or a tuple) " + "it should be a list (or a tuple) of dictionary or BaseAction. " + f"You provided {type(el)} at position {i}.") + for i, el in enumerate(reset_options): + for k in el: + if not k in self.envClass.KEYS_RESET_OPTIONS: + raise RuntimeError("Wehn specifying `reset options` all keys of the dictionary should " + "be compatible with the available reset options of your environment " + f"class. You provided the key \"{k}\" for the {i}th dictionary but" + f"possible keys are limited to {self.envClass.KEYS_RESET_OPTIONS}.") + else: + raise RuntimeError("When using `reset_options` in the runner, you should make sure to use " + "either use dictionnary, grid2op actions or list / tuple of actions.") + if max_iter is not None: max_iter = int(max_iter) @@ -1234,6 +1723,8 @@ def run( episode_id=episode_id, add_detailed_output=add_detailed_output, add_nb_highres_sim=add_nb_highres_sim, + init_states=init_states, + reset_options=reset_options ) else: if add_detailed_output and (_IS_WINDOWS or _IS_MACOS): @@ -1252,6 +1743,8 @@ def run( episode_id=episode_id, add_detailed_output=add_detailed_output, add_nb_highres_sim=add_nb_highres_sim, + init_states=init_states, + reset_options=reset_options ) else: self.logger.info("Parallel runner used.") @@ -1265,6 +1758,8 @@ def run( episode_id=episode_id, add_detailed_output=add_detailed_output, add_nb_highres_sim=add_nb_highres_sim, + init_states=init_states, + reset_options=reset_options ) finally: self._clean_up() diff --git a/grid2op/Space/GridObjects.py b/grid2op/Space/GridObjects.py index 16edd30bb..6878c8b16 100644 --- a/grid2op/Space/GridObjects.py +++ b/grid2op/Space/GridObjects.py @@ -19,16 +19,21 @@ """ import warnings import copy +import os import numpy as np -from typing import Optional - +import sys +from packaging import version +from typing import Dict, Union, Literal, Any, List, Optional, ClassVar, Tuple + import grid2op from grid2op.dtypes import dt_int, dt_float, dt_bool +from grid2op.typing_variables import CLS_AS_DICT_TYPING, N_BUSBAR_PER_SUB_TYPING from grid2op.Exceptions import * from grid2op.Space.space_utils import extract_from_dict, save_to_dict from grid2op.Space.detailed_topo_description import DetailedTopoDescription # TODO tests of these methods and this class in general +DEFAULT_N_BUSBAR_PER_SUB = 2 class GridObjects: @@ -112,18 +117,19 @@ class GridObjects: "local topology" of the substation 4 by looking at :attr:`grid2op.Observation.BaseObservation.topo_vect` [42:47]. iii) retrieve which component of this vector of dimension 5 (remember we assumed substation 4 had 5 elements) - encodes information about the origin end of the line with id `l_id`. This information is given in + encodes information about the origin side of the line with id `l_id`. This information is given in :attr:`GridObjects.line_or_to_sub_pos` [l_id]. This is a number between 0 and 4, say it's 3. 3 being the index of the object in the substation) - method 2 (not recommended): all of the above is stored (for the same powerline) in the :attr:`GridObjects.line_or_pos_topo_vect` [l_id]. In the example above, we will have: - :attr:`GridObjects.line_or_pos_topo_vect` [l_id] = 45 (=42+3: + :attr:`GridObjects.line_or_pos_topo_vect` [l_id] = 45 (=42+3): 42 being the index on which the substation started and 3 being the index of the object in the substation) - method 3 (recommended): use any of the function that computes it for you: :func:`grid2op.Observation.BaseObservation.state_of` is such an interesting method. The two previous methods "method 1" and "method 2" were presented as a way to give detailed and "concrete" example on how the modeling of the powergrid work. + - method 4 (recommended): use the :func:`GridObjects.topo_vect_element` For a given powergrid, this object should be initialized once in the :class:`grid2op.Backend.Backend` when the first call to :func:`grid2op.Backend.Backend.load_grid` is performed. In particular the following attributes @@ -155,7 +161,7 @@ class GridObjects: - :attr:`GridObjects.line_ex_to_sub_pos` - :attr:`GridObjects.storage_to_sub_pos` - A call to the function :func:`GridObjects._compute_pos_big_topo_cls` allow to compute the \*_pos_topo_vect attributes + A call to the function :func:`GridObjects._compute_pos_big_topo_cls` allow to compute the \\*_pos_topo_vect attributes (for example :attr:`GridObjects.line_ex_pos_topo_vect`) can be computed from the above data: - :attr:`GridObjects.load_pos_topo_vect` @@ -189,6 +195,12 @@ class GridObjects: Attributes ---------- + n_busbar_per_sub: :class:`int` + number of independant busbars for all substations [*class attribute*]. It's 2 by default + or if the implementation of the backend does not support this feature. + + .. versionadded:: 1.10.0 + n_line: :class:`int` number of powerlines in the powergrid [*class attribute*] @@ -255,7 +267,7 @@ class GridObjects: :attr:`GridObjects.load_to_sub_pos` [l] is the index of the load *l* in the vector :attr:`grid2op.BaseObservation.BaseObservation.topo_vect` . This means that, if - "`topo_vect` [ :attr:`GridObjects.load_pos_topo_vect` \[l\] ]=2" + "`topo_vect` [ :attr:`GridObjects.load_pos_topo_vect` \\[l\\] ]=2" then load of id *l* is connected to the second bus of the substation. [*class attribute*] gen_pos_topo_vect: :class:`numpy.ndarray`, dtype:int @@ -465,70 +477,75 @@ class GridObjects: .. versionadded:: 1.9.1 """ - BEFORE_COMPAT_VERSION : str= "neurips_2020_compat" - glop_version : str= grid2op.__version__ - _PATH_ENV : Optional[str] = None # especially do not modify that - - SUB_COL = 0 - LOA_COL = 1 - GEN_COL = 2 - LOR_COL = 3 - LEX_COL = 4 - STORAGE_COL = 5 - - attr_list_vect = None + BEFORE_COMPAT_VERSION : ClassVar[str] = "neurips_2020_compat" + glop_version : ClassVar[str] = grid2op.__version__ + + _INIT_GRID_CLS = None # do not modify that, this is handled by grid2op automatically + _PATH_GRID_CLASSES : ClassVar[Optional[str]] = None # especially do not modify that + _CLS_DICT : ClassVar[Optional[CLS_AS_DICT_TYPING]] = None # init once to avoid yet another serialization of the class as dict (in make_cls_dict) + _CLS_DICT_EXTENDED : ClassVar[Optional[CLS_AS_DICT_TYPING]] = None # init once to avoid yet another serialization of the class as dict (in make_cls_dict) + + SUB_COL : ClassVar[int] = 0 + LOA_COL : ClassVar[int] = 1 + GEN_COL : ClassVar[int] = 2 + LOR_COL : ClassVar[int] = 3 + LEX_COL : ClassVar[int] = 4 + STORAGE_COL : ClassVar[int] = 5 + + attr_list_vect : ClassVar[Optional[List[str]]] = None attr_list_set = {} - attr_list_json = [] + attr_list_json : ClassVar[Optional[List[str]]] = [] attr_nan_list_set = set() # name of the objects - env_name : str = "unknown" - name_load = None - name_gen = None - name_line = None - name_sub = None - name_storage = None - - n_gen = -1 - n_load = -1 - n_line = -1 - n_sub = -1 - n_storage = -1 - - sub_info = None - dim_topo = -1 + env_name : ClassVar[str] = "unknown" + name_load : ClassVar[np.ndarray] = None + name_gen : ClassVar[np.ndarray] = None + name_line : ClassVar[np.ndarray] = None + name_sub : ClassVar[np.ndarray] = None + name_storage : ClassVar[np.ndarray] = None + + n_busbar_per_sub : ClassVar[int] = DEFAULT_N_BUSBAR_PER_SUB + n_gen : ClassVar[int] = -1 + n_load : ClassVar[int] = -1 + n_line : ClassVar[int] = -1 + n_sub : ClassVar[int] = -1 + n_storage : ClassVar[int] = -1 + + sub_info : ClassVar[np.ndarray] = None + dim_topo : ClassVar[np.ndarray] = -1 # to which substation is connected each element - load_to_subid = None - gen_to_subid = None - line_or_to_subid = None - line_ex_to_subid = None - storage_to_subid = None + load_to_subid : ClassVar[np.ndarray] = None + gen_to_subid : ClassVar[np.ndarray] = None + line_or_to_subid : ClassVar[np.ndarray] = None + line_ex_to_subid : ClassVar[np.ndarray] = None + storage_to_subid : ClassVar[np.ndarray] = None # which index has this element in the substation vector - load_to_sub_pos = None - gen_to_sub_pos = None - line_or_to_sub_pos = None - line_ex_to_sub_pos = None - storage_to_sub_pos = None + load_to_sub_pos : ClassVar[np.ndarray] = None + gen_to_sub_pos : ClassVar[np.ndarray] = None + line_or_to_sub_pos : ClassVar[np.ndarray] = None + line_ex_to_sub_pos : ClassVar[np.ndarray] = None + storage_to_sub_pos : ClassVar[np.ndarray] = None # which index has this element in the topology vector - load_pos_topo_vect = None - gen_pos_topo_vect = None - line_or_pos_topo_vect = None - line_ex_pos_topo_vect = None - storage_pos_topo_vect = None + load_pos_topo_vect : ClassVar[np.ndarray] = None + gen_pos_topo_vect : ClassVar[np.ndarray] = None + line_or_pos_topo_vect : ClassVar[np.ndarray] = None + line_ex_pos_topo_vect : ClassVar[np.ndarray] = None + storage_pos_topo_vect : ClassVar[np.ndarray] = None # "convenient" way to retrieve information of the grid - grid_objects_types = None + grid_objects_types : ClassVar[np.ndarray] = None # to which substation each element of the topovect is connected - _topo_vect_to_sub = None + _topo_vect_to_sub : ClassVar[np.ndarray] = None # list of attribute to convert it from/to a vector _vectorized = None # for redispatching / unit commitment - _li_attr_disp = [ + _li_attr_disp : ClassVar[List[str]] = [ "gen_type", "gen_pmin", "gen_pmax", @@ -543,7 +560,7 @@ class GridObjects: "gen_renewable", ] - _type_attr_disp = [ + _type_attr_disp : ClassVar[List] = [ str, float, float, @@ -559,39 +576,39 @@ class GridObjects: ] # redispatch data, not available in all environment - redispatching_unit_commitment_availble = False - gen_type = None - gen_pmin = None - gen_pmax = None - gen_redispatchable = None - gen_max_ramp_up = None - gen_max_ramp_down = None - gen_min_uptime = None - gen_min_downtime = None - gen_cost_per_MW = None # marginal cost (in currency / (power.step) and not in $/(MW.h) it would be $ / (MW.5mins) ) - gen_startup_cost = None # start cost (in currency) - gen_shutdown_cost = None # shutdown cost (in currency) - gen_renewable = None + redispatching_unit_commitment_availble : ClassVar[bool] = False + gen_type : ClassVar[Optional[np.ndarray]] = None + gen_pmin : ClassVar[Optional[np.ndarray]] = None + gen_pmax : ClassVar[Optional[np.ndarray]] = None + gen_redispatchable : ClassVar[Optional[np.ndarray]] = None + gen_max_ramp_up : ClassVar[Optional[np.ndarray]] = None + gen_max_ramp_down : ClassVar[Optional[np.ndarray]] = None + gen_min_uptime : ClassVar[Optional[np.ndarray]] = None + gen_min_downtime : ClassVar[Optional[np.ndarray]] = None + gen_cost_per_MW : ClassVar[Optional[np.ndarray]] = None # marginal cost (in currency / (power.step) and not in $/(MW.h) it would be $ / (MW.5mins) ) + gen_startup_cost : ClassVar[Optional[np.ndarray]] = None # start cost (in currency) + gen_shutdown_cost : ClassVar[Optional[np.ndarray]] = None # shutdown cost (in currency) + gen_renewable : ClassVar[Optional[np.ndarray]] = None # storage unit static data - storage_type = None - storage_Emax = None - storage_Emin = None - storage_max_p_prod = None - storage_max_p_absorb = None - storage_marginal_cost = None - storage_loss = None - storage_charging_efficiency = None - storage_discharging_efficiency = None + storage_type : ClassVar[Optional[np.ndarray]] = None + storage_Emax : ClassVar[Optional[np.ndarray]] = None + storage_Emin : ClassVar[Optional[np.ndarray]] = None + storage_max_p_prod : ClassVar[Optional[np.ndarray]] = None + storage_max_p_absorb : ClassVar[Optional[np.ndarray]] = None + storage_marginal_cost : ClassVar[Optional[np.ndarray]] = None + storage_loss : ClassVar[Optional[np.ndarray]] = None + storage_charging_efficiency : ClassVar[Optional[np.ndarray]] = None + storage_discharging_efficiency : ClassVar[Optional[np.ndarray]] = None # grid layout - grid_layout = None + grid_layout : ClassVar[Optional[Dict[str, Tuple[float, float]]]] = None # shunt data, not available in every backend - shunts_data_available = False - n_shunt = None - name_shunt = None - shunt_to_subid = None + shunts_data_available : ClassVar[bool] = False + n_shunt : ClassVar[Optional[int]] = None + name_shunt : ClassVar[Optional[np.ndarray]] = None + shunt_to_subid : ClassVar[Optional[np.ndarray]] = None # alarm / alert assistant_warning_type = None @@ -613,17 +630,22 @@ class GridObjects: alertable_line_names = [] # name of each line to produce an alert on # TODO alertable_line_ids = [] - detailed_topo_desc : Optional[DetailedTopoDescription] = None + detailed_topo_desc : ClassVar[Optional[DetailedTopoDescription]] = None # test - _IS_INIT = False + _IS_INIT : ClassVar[Optional[bool]] = False def __init__(self): """nothing to do when an object of this class is created, the information is held by the class attributes""" pass @classmethod - def tell_dim_alarm(cls, dim_alarms): + def set_n_busbar_per_sub(cls, n_busbar_per_sub: N_BUSBAR_PER_SUB_TYPING) -> None: + # TODO n_busbar_per_sub different num per substations + cls.n_busbar_per_sub = n_busbar_per_sub + + @classmethod + def tell_dim_alarm(cls, dim_alarms: int) -> None: if cls.dim_alarms != 0: # number of alarms has already been set, i issue a warning warnings.warn( @@ -638,7 +660,7 @@ def tell_dim_alarm(cls, dim_alarms): cls.assistant_warning_type = "zonal" @classmethod - def tell_dim_alert(cls, dim_alerts): + def tell_dim_alert(cls, dim_alerts: int) -> None: if cls.dim_alerts != 0: # number of alerts has already been set, i issue a warning warnings.warn( @@ -653,8 +675,19 @@ def tell_dim_alert(cls, dim_alerts): cls.assistant_warning_type = "by_line" @classmethod - def _clear_class_attribute(cls): + def _reset_cls_dict(cls): + cls._CLS_DICT = None + cls._CLS_DICT_EXTENDED = None + + @classmethod + def _clear_class_attribute(cls) -> None: + """Also calls :func:`GridObjects._clear_grid_dependant_class_attributes` : this clear the attribute that + may be backend dependant too (eg shunts_data) + + This clear the class as if it was defined in grid2op directly. + """ cls.shunts_data_available = False + cls.n_busbar_per_sub = DEFAULT_N_BUSBAR_PER_SUB # for redispatching / unit commitment cls._li_attr_disp = [ @@ -690,9 +723,13 @@ def _clear_class_attribute(cls): cls._clear_grid_dependant_class_attributes() @classmethod - def _clear_grid_dependant_class_attributes(cls): + def _clear_grid_dependant_class_attributes(cls) -> None: + """reset to an original state all the class attributes that depends on an environment""" + cls._reset_cls_dict() + cls._INIT_GRID_CLS = None # do not modify that, this is handled by grid2op automatically + cls._PATH_GRID_CLASSES = None # especially do not modify that + cls.glop_version = grid2op.__version__ - cls._PATH_ENV = None cls.SUB_COL = 0 cls.LOA_COL = 1 @@ -807,7 +844,7 @@ def _clear_grid_dependant_class_attributes(cls): cls.detailed_topo_desc = None @classmethod - def _update_value_set(cls): + def _update_value_set(cls) -> None: """ INTERNAL @@ -817,7 +854,7 @@ def _update_value_set(cls): """ cls.attr_list_set = set(cls.attr_list_vect) - def _raise_error_attr_list_none(self): + def _raise_error_attr_list_none(self) -> None: """ INTERNAL @@ -837,7 +874,7 @@ def _raise_error_attr_list_none(self): "nor to know its size, shape or dtype.".format(type(self)) ) - def _get_array_from_attr_name(self, attr_name): + def _get_array_from_attr_name(self, attr_name: str) -> Union[np.ndarray, int, str]: """ INTERNAL @@ -862,7 +899,7 @@ def _get_array_from_attr_name(self, attr_name): """ return np.array(getattr(self, attr_name)).flatten() - def to_vect(self): + def to_vect(self) -> np.ndarray: """ Convert this instance of GridObjects to a numpy ndarray. The size of the array is always the same and is determined by the :func:`GridObject.size` method. @@ -907,7 +944,7 @@ def to_vect(self): self._vectorized = np.array([], dtype=dt_float) return self._vectorized - def to_json(self, convert=True): + def to_json(self, convert : bool=True) -> Dict[str, Any]: """ Convert this instance of GridObjects to a dictionary that can be json serialized. @@ -942,7 +979,7 @@ def to_json(self, convert=True): cls._convert_to_json(res) return res - def from_json(self, dict_): + def from_json(self, dict_: Dict[str, Any]) -> None: """ This transform an gridobject (typically an action or an observation) serialized in json format to the corresponding grid2op action / observation (subclass of grid2op.Action.BaseAction @@ -972,7 +1009,7 @@ def from_json(self, dict_): setattr(self, key, type_(array_[0])) @classmethod - def _convert_to_json(cls, dict_): + def _convert_to_json(cls, dict_: Dict[str, Any]) -> None: for attr_nm in cls.attr_list_vect + cls.attr_list_json: tmp = dict_[attr_nm] dtype = tmp.dtype @@ -989,7 +1026,7 @@ def _convert_to_json(cls, dict_): elif dtype == bool: dict_[attr_nm] = [bool(el) for el in tmp] - def shapes(self): + def shapes(self) -> np.ndarray: """ The shapes of all the components of the action, mainly used for gym compatibility is the shape of all part of the action. @@ -1035,7 +1072,7 @@ def shapes(self): ).astype(dt_int) return res - def dtypes(self): + def dtypes(self) -> np.ndarray: """ The types of the components of the GridObjects, mainly used for gym compatibility is the shape of all part of the action. @@ -1155,6 +1192,7 @@ def from_vect(self, vect, check_legit=True): act_cpy = env.action_space.from_vect(act_as_vect) """ + cls = type(self) if vect.shape[0] != self.size(): raise IncorrectNumberOfElements( "Incorrect number of elements found while load a GridObjects " @@ -1173,7 +1211,7 @@ def from_vect(self, vect, check_legit=True): self._raise_error_attr_list_none() prev_ = 0 - for attr_nm, sh, dt in zip(type(self).attr_list_vect, self.shapes(), self.dtypes()): + for attr_nm, sh, dt in zip(cls.attr_list_vect, self.shapes(), self.dtypes()): tmp = vect[prev_ : (prev_ + sh)] # TODO a flag that says "default Nan" for example for when attributes are initialized with @@ -1181,10 +1219,20 @@ def from_vect(self, vect, check_legit=True): # if np.any(~np.isfinite(tmp)) and default_nan: # raise NonFiniteElement("None finite number in from_vect detected") - if attr_nm not in type(self).attr_nan_list_set and ( + if attr_nm not in cls.attr_nan_list_set and ( (~np.isfinite(tmp)).any() ): - raise NonFiniteElement("None finite number in from_vect detected") + attrs_debug = [] + prev_debug = 0 + for attr_nm_debug, sh_debug, dt_debug in zip(cls.attr_list_vect, self.shapes(), self.dtypes()): + tmp = vect[prev_debug : (prev_debug + sh_debug)] + if attr_nm not in cls.attr_nan_list_set and ( + (~np.isfinite(tmp)).any()): + attrs_debug.append(attr_nm_debug) + prev_debug += sh_debug + raise NonFiniteElement(f"None finite number in from_vect " + f"detected for corresponding to attributes " + f"{attrs_debug}") try: tmp = tmp.astype(dt) @@ -1275,31 +1323,59 @@ def _aux_pos_big_topo(cls, vect_to_subid, vect_to_sub_pos): res[i] = obj_before + my_pos return res - def _init_class_attr(self, obj=None): + def _init_class_attr(self, obj=None, _topo_vect_only=False): """Init the class attribute from an instance of the class THIS IS NOT A CLASS ATTR obj should be an object and NOT a class ! + + Notes + ------- + _topo_vect_only: this function is called once when the backend is initialized in `backend.load_grid` + (in `backend._compute_pos_big_topo`) and then once when everything is set up + (after redispatching and storage data are loaded). + + This is why I need the `_topo_vect_only` flag that tells this function when it's called only for + `topo_vect` related attributed """ + if obj is None: obj = self - cls = type(self) + cls = type(self) cls_as_dict = {} - GridObjects._make_cls_dict_extended(obj, cls_as_dict, as_list=False) + GridObjects._make_cls_dict_extended(obj, cls_as_dict, as_list=False, _topo_vect_only=_topo_vect_only) for attr_nm, attr in cls_as_dict.items(): - setattr(cls, attr_nm, attr) - - if cls.detailed_topo_desc is not None: - cls.detailed_topo_desc = DetailedTopoDescription.from_dict(cls.detailed_topo_desc) + if _topo_vect_only: + # safety guard: only set the attribute needed for the computation of the topo_vect vector + # this should be the only attribute in cls_as_dict but let's be sure + if (attr_nm.endswith("to_subid") or + attr_nm.endswith("to_sub_pos") or + attr_nm.startswith("n_") or + attr_nm.startswith("dim_topo") or + attr_nm.startswith("name_") or + attr_nm.startswith("shunts_data_available") + ): + setattr(cls, attr_nm, attr) + else: + # set all the attributes + setattr(cls, attr_nm, attr) + if cls.detailed_topo_desc is not None and isinstance(cls.detailed_topo_desc, dict): + cls.detailed_topo_desc = DetailedTopoDescription.from_dict(cls.detailed_topo_desc) + + # make sure to catch data intiialized even outside of this function + if not _topo_vect_only: + cls._reset_cls_dict() + tmp = {} + GridObjects._make_cls_dict_extended(obj, tmp, as_list=False, copy_=True, _topo_vect_only=False) def _compute_pos_big_topo(self): # move the object attribute as class attribute ! if not type(self)._IS_INIT: - self._init_class_attr() + self._init_class_attr(_topo_vect_only=True) cls = type(self) cls._compute_pos_big_topo_cls() - + @classmethod def _compute_pos_big_topo_cls(cls): """ @@ -1330,8 +1406,9 @@ def _compute_pos_big_topo_cls(cls): ): # no storage on the grid, so i deactivate them cls.set_no_storage() - cls._compute_sub_elements() - cls._compute_sub_pos() + cls._compute_sub_elements() # fill the dim_topo and sub_info attributes + cls._compute_sub_pos() # fill the _to_sub_pos attributes + cls._fill_names() # fill the name_xxx attributes cls.load_pos_topo_vect = cls._aux_pos_big_topo( cls.load_to_subid, cls.load_to_sub_pos @@ -1350,6 +1427,7 @@ def _compute_pos_big_topo_cls(cls): ).astype(dt_int) cls._topo_vect_to_sub = np.repeat(np.arange(cls.n_sub), repeats=cls.sub_info) + cls._check_convert_to_np_array(raise_if_none=False) # there can still be "None" attribute at this stage cls.grid_objects_types = np.full( shape=(cls.dim_topo, 6), fill_value=-1, dtype=dt_int ) @@ -1464,9 +1542,12 @@ def _check_sub_id(cls): "is greater than the number of substations of the grid, which is {}." "".format(np.max(cls.line_or_to_subid), cls.n_sub) ) - + @classmethod def _fill_names(cls): + """fill the name vectors (**eg** name_line) if not done already in the backend. + This function is used to fill the name of the class. + """ if cls.name_line is None: cls.name_line = [ "{}_{}_{}".format(or_id, ex_id, l_id) @@ -1481,6 +1562,8 @@ def _fill_names(cls): "This might result in impossibility to load data." '\n\tIf "env.make" properly worked, you can safely ignore this warning.' ) + cls._reset_cls_dict() + if cls.name_load is None: cls.name_load = [ "load_{}_{}".format(bus_id, load_id) @@ -1493,6 +1576,8 @@ def _fill_names(cls): "This might result in impossibility to load data." '\n\tIf "env.make" properly worked, you can safely ignore this warning.' ) + cls._reset_cls_dict() + if cls.name_gen is None: cls.name_gen = [ "gen_{}_{}".format(bus_id, gen_id) @@ -1506,6 +1591,8 @@ def _fill_names(cls): "This might result in impossibility to load data." '\n\tIf "env.make" properly worked, you can safely ignore this warning.' ) + cls._reset_cls_dict() + if cls.name_sub is None: cls.name_sub = ["sub_{}".format(sub_id) for sub_id in range(cls.n_sub)] cls.name_sub = np.array(cls.name_sub) @@ -1516,6 +1603,8 @@ def _fill_names(cls): "This might result in impossibility to load data." '\n\tIf "env.make" properly worked, you can safely ignore this warning.' ) + cls._reset_cls_dict() + if cls.name_storage is None: cls.name_storage = [ "storage_{}_{}".format(bus_id, sto_id) @@ -1529,6 +1618,25 @@ def _fill_names(cls): "This might result in impossibility to load data." '\n\tIf "env.make" properly worked, you can safely ignore this warning.' ) + cls._reset_cls_dict() + + if cls.shunts_data_available and cls.name_shunt is None: + if cls.shunt_to_subid is not None: + # used for legacy lightsim2grid + # shunt names were defined after... + cls.name_shunt = [ + "shunt_{}_{}".format(bus_id, sh_id) + for sh_id, bus_id in enumerate(cls.shunt_to_subid) + ] + cls.name_shunt = np.array(cls.name_shunt) + warnings.warn( + "name_shunt is None so default shunt names have been assigned to your grid. " + "(FYI: shunt names are used to make the correspondence between the chronics and " + "the backend)" + "This might result in impossibility to load data." + '\n\tIf "env.make" properly worked, you can safely ignore this warning.' + ) + cls._reset_cls_dict() @classmethod def _check_names(cls): @@ -1540,45 +1648,40 @@ def _check_names(cls): cls.name_line = cls.name_line.astype(str) except Exception as exc_: raise EnvError( - f"self.name_line should be convertible to a numpy array of type str. Error was " - f"{exc_}" - ) + f"self.name_line should be convertible to a numpy array of type str" + ) from exc_ if not isinstance(cls.name_load, np.ndarray): try: cls.name_load = np.array(cls.name_load) cls.name_load = cls.name_load.astype(str) except Exception as exc_: raise EnvError( - "self.name_load should be convertible to a numpy array of type str. Error was " - f"{exc_}" - ) + "self.name_load should be convertible to a numpy array of type str." + ) from exc_ if not isinstance(cls.name_gen, np.ndarray): try: cls.name_gen = np.array(cls.name_gen) cls.name_gen = cls.name_gen.astype(str) except Exception as exc_: raise EnvError( - "self.name_gen should be convertible to a numpy array of type str. Error was " - f"{exc_}" - ) + "self.name_gen should be convertible to a numpy array of type str." + ) from exc_ if not isinstance(cls.name_sub, np.ndarray): try: cls.name_sub = np.array(cls.name_sub) cls.name_sub = cls.name_sub.astype(str) except Exception as exc_: raise EnvError( - "self.name_sub should be convertible to a numpy array of type str. Error was " - f"{exc_}" - ) + "self.name_sub should be convertible to a numpy array of type str." + ) from exc_ if not isinstance(cls.name_storage, np.ndarray): try: cls.name_storage = np.array(cls.name_storage) cls.name_storage = cls.name_storage.astype(str) except Exception as exc_: raise EnvError( - "self.name_storage should be convertible to a numpy array of type str. Error was " - f"{exc_}" - ) + "self.name_storage should be convertible to a numpy array of type str." + ) from exc_ attrs_nms = [ cls.name_gen, @@ -1594,7 +1697,13 @@ def _check_names(cls): nms.append("shunts") for arr_, nm in zip(attrs_nms, nms): - tmp = np.unique(arr_) + try: + tmp = np.unique(arr_) + tmp.shape[0] + arr_.shape[0] + except AttributeError as exc_: + raise Grid2OpException(f"Error for {nm}: name is most likely None") from exc_ + if tmp.shape[0] != arr_.shape[0]: nms = "\n\t - ".join(sorted(arr_)) raise EnvError( @@ -1823,6 +1932,81 @@ def _compute_sub_elements(cls): for s_id in cls.storage_to_subid: cls.sub_info[s_id] += 1 + @classmethod + def _assign_attr(cls, attrs_list, tp, tp_nm, raise_if_none=False): + for el in attrs_list: + arr = getattr(cls, el) + if arr is None: + if raise_if_none: + raise Grid2OpException(f"class attribute {el} is None, but should not be.") + continue + try: + arr2 = np.array(arr).astype(tp) + except ValueError as exc_: + raise Grid2OpException(f"Impossible to convert attribute name {el} to {tp_nm} for attr {el}") from exc_ + if len(arr) != len(arr2): + raise Grid2OpException(f"During the conversion to {tp} for attr {el} an error occured (results have not the proper size {len(arr2)} vs {len(arr)})") + if (arr != arr2).any(): + mask = arr != arr2 + raise Grid2OpException(f"Impossible to safely convert attribute name {el} to {tp_nm} for attr {el}: {arr[mask]} vs {arr2[mask]}.") + setattr(cls, el, arr2) + + @classmethod + def _check_convert_to_np_array(cls, raise_if_none=True): + # convert int to array of ints + attrs_int = ["load_pos_topo_vect", + "load_to_subid", + "load_to_sub_pos", + "gen_pos_topo_vect", + "gen_to_subid", + "gen_to_sub_pos", + "storage_pos_topo_vect", + "storage_to_subid", + "storage_to_sub_pos", + "line_or_pos_topo_vect", + "line_or_to_subid", + "line_or_to_sub_pos", + "line_ex_pos_topo_vect", + "line_ex_to_subid", + "line_ex_to_sub_pos", + ] + if cls.redispatching_unit_commitment_availble: + attrs_int.append("gen_min_uptime") + attrs_int.append("gen_min_downtime") + cls._assign_attr(attrs_int, dt_int, "int", raise_if_none) + + # convert str to array of str + attrs_str = ["name_load", + "name_gen", + "name_line", + "name_sub", + "name_storage", + "storage_type", + ] + if cls.redispatching_unit_commitment_availble: + attrs_str.append("gen_type") + cls._assign_attr(attrs_str, str, "str", raise_if_none) + + # convert float to array of float + attrs_float = ["storage_Emax", + "storage_Emin", + "storage_max_p_prod", + "storage_max_p_absorb", + "storage_marginal_cost", + "storage_loss", + "storage_charging_efficiency", + "storage_discharging_efficiency", + ] + if cls.redispatching_unit_commitment_availble: + attrs_float += ["gen_pmin", + "gen_pmax", + "gen_max_ramp_up", + "gen_max_ramp_down", + "gen_cost_per_MW", + "gen_startup_cost", + "gen_shutdown_cost"] + cls._assign_attr(attrs_float, dt_float, "float", raise_if_none) + @classmethod def assert_grid_correct_cls(cls): """ @@ -1852,6 +2036,23 @@ def assert_grid_correct_cls(cls): # TODO refactor this method with the `_check***` methods. # TODO refactor the `_check***` to use the same "base functions" that would be coded only once. + # TODO n_busbar_per_sub different num per substations + if isinstance(cls.n_busbar_per_sub, (int, dt_int, np.int32, np.int64)): + cls.n_busbar_per_sub = dt_int(cls.n_busbar_per_sub) + # np.full(cls.n_sub, + # fill_value=cls.n_busbar_per_sub, + # dtype=dt_int) + else: + # cls.n_busbar_per_sub = np.array(cls.n_busbar_per_sub) + # cls.n_busbar_per_sub = cls.n_busbar_per_sub.astype(dt_int) + raise EnvError("Grid2op cannot handle a different number of busbar per substations at the moment.") + + # if cls.n_busbar_per_sub != int(cls.n_busbar_per_sub): + # raise EnvError(f"`n_busbar_per_sub` should be convertible to an integer, found {cls.n_busbar_per_sub}") + # cls.n_busbar_per_sub = int(cls.n_busbar_per_sub) + if (cls.n_busbar_per_sub < 1).any(): + raise EnvError(f"`n_busbar_per_sub` should be >= 1 found {cls.n_busbar_per_sub}") + if cls.n_gen <= 0: raise EnvError( "n_gen is negative. Powergrid is invalid: there are no generator" @@ -1893,13 +2094,12 @@ def assert_grid_correct_cls(cls): f"self.sub_info should be convertible to a numpy array. " f'It fails with error "{exc_}"' ) - + # check everything can be converted to numpy array of right types + cls._check_convert_to_np_array() + # to which subtation they are connected cls._check_sub_id() - # for names - cls._check_names() - # compute the position in substation if not done already cls._compute_sub_pos() @@ -1966,6 +2166,10 @@ def assert_grid_correct_cls(cls): ) raise IncorrectNumberOfElements(err_msg) + + # for names + cls._check_names() + if len(cls.name_load) != cls.n_load: raise IncorrectNumberOfLoads("len(self.name_load) != self.n_load") if len(cls.name_gen) != cls.n_gen: @@ -2024,7 +2228,7 @@ def assert_grid_correct_cls(cls): if not np.all(obj_per_sub == cls.sub_info): raise IncorrectNumberOfElements( - f"for substation(s): {np.where(obj_per_sub != cls.sub_info)[0]}" + f"for substation(s): {(obj_per_sub != cls.sub_info).nonzero()[0]}" ) # test right number of element in substations @@ -2043,12 +2247,12 @@ def assert_grid_correct_cls(cls): zip(cls.line_or_to_subid, cls.line_or_to_sub_pos) ): if sub_pos >= cls.sub_info[sub_id]: - raise IncorrectPositionOfLines("for line {} at origin end".format(i)) + raise IncorrectPositionOfLines("for line {} at origin side".format(i)) for i, (sub_id, sub_pos) in enumerate( zip(cls.line_ex_to_subid, cls.line_ex_to_sub_pos) ): if sub_pos >= cls.sub_info[sub_id]: - raise IncorrectPositionOfLines("for line {} at extremity end".format(i)) + raise IncorrectPositionOfLines("for line {} at extremity side".format(i)) for i, (sub_id, sub_pos) in enumerate( zip(cls.storage_to_subid, cls.storage_to_sub_pos) ): @@ -2161,14 +2365,14 @@ def _check_validity_alarm_data(cls): # the "alarm" feature is supported assert isinstance( - cls.alarms_area_names, list - ), "cls.alarms_area_names should be a list" + cls.alarms_area_names, (list, tuple) + ), "cls.alarms_area_names should be a list or a tuple" assert isinstance( cls.alarms_lines_area, dict ), "cls.alarms_lines_area should be a dict" assert isinstance( - cls.alarms_area_lines, list - ), "cls.alarms_area_lines should be a dict" + cls.alarms_area_lines, (list, tuple) + ), "cls.alarms_area_lines should be a list or a tuple" assert ( len(cls.alarms_area_names) == cls.dim_alarms ), "len(cls.alarms_area_names) != cls.dim_alarms" @@ -2216,7 +2420,7 @@ def _check_validity_alarm_data(cls): @classmethod def _check_validity_detailed_topo(cls): if cls.detailed_topo_desc is not None: - cls.detailed_topo_desc.check_validity() + cls.detailed_topo_desc.check_validity(cls) @classmethod def _check_validity_alert_data(cls): @@ -2333,57 +2537,57 @@ def _check_validity_storage_data(cls): ) if (cls.storage_Emax < cls.storage_Emin).any(): - tmp = np.where(cls.storage_Emax < cls.storage_Emin)[0] + tmp = (cls.storage_Emax < cls.storage_Emin).nonzero()[0] raise BackendError( f"storage_Emax < storage_Emin for storage units with ids: {tmp}" ) if (cls.storage_Emax < 0.0).any(): - tmp = np.where(cls.storage_Emax < 0.0)[0] + tmp = (cls.storage_Emax < 0.0).nonzero()[0] raise BackendError( f"self.storage_Emax < 0. for storage units with ids: {tmp}" ) if (cls.storage_Emin < 0.0).any(): - tmp = np.where(cls.storage_Emin < 0.0)[0] + tmp = (cls.storage_Emin < 0.0).nonzero()[0] raise BackendError( f"self.storage_Emin < 0. for storage units with ids: {tmp}" ) if (cls.storage_max_p_prod < 0.0).any(): - tmp = np.where(cls.storage_max_p_prod < 0.0)[0] + tmp = (cls.storage_max_p_prod < 0.0).nonzero()[0] raise BackendError( f"self.storage_max_p_prod < 0. for storage units with ids: {tmp}" ) if (cls.storage_max_p_absorb < 0.0).any(): - tmp = np.where(cls.storage_max_p_absorb < 0.0)[0] + tmp = (cls.storage_max_p_absorb < 0.0).nonzero()[0] raise BackendError( f"self.storage_max_p_absorb < 0. for storage units with ids: {tmp}" ) if (cls.storage_loss < 0.0).any(): - tmp = np.where(cls.storage_loss < 0.0)[0] + tmp = (cls.storage_loss < 0.0).nonzero()[0] raise BackendError( f"self.storage_loss < 0. for storage units with ids: {tmp}" ) if (cls.storage_discharging_efficiency <= 0.0).any(): - tmp = np.where(cls.storage_discharging_efficiency <= 0.0)[0] + tmp = (cls.storage_discharging_efficiency <= 0.0).nonzero()[0] raise BackendError( f"self.storage_discharging_efficiency <= 0. for storage units with ids: {tmp}" ) if (cls.storage_discharging_efficiency > 1.0).any(): - tmp = np.where(cls.storage_discharging_efficiency > 1.0)[0] + tmp = (cls.storage_discharging_efficiency > 1.0).nonzero()[0] raise BackendError( f"self.storage_discharging_efficiency > 1. for storage units with ids: {tmp}" ) if (cls.storage_charging_efficiency < 0.0).any(): - tmp = np.where(cls.storage_charging_efficiency < 0.0)[0] + tmp = (cls.storage_charging_efficiency < 0.0).nonzero()[0] raise BackendError( f"self.storage_charging_efficiency < 0. for storage units with ids: {tmp}" ) if (cls.storage_charging_efficiency > 1.0).any(): - tmp = np.where(cls.storage_charging_efficiency > 1.0)[0] + tmp = (cls.storage_charging_efficiency > 1.0).nonzero()[0] raise BackendError( f"self.storage_charging_efficiency > 1. for storage units with ids: {tmp}" ) if (cls.storage_loss > cls.storage_max_p_absorb).any(): - tmp = np.where(cls.storage_loss > cls.storage_max_p_absorb)[0] + tmp = (cls.storage_loss > cls.storage_max_p_absorb).nonzero()[0] raise BackendError( f"Some storage units are such that their loss (self.storage_loss) is higher " f"than the maximum power at which they can be charged (self.storage_max_p_absorb). " @@ -2692,7 +2896,48 @@ def set_env_name(cls, name): cls.env_name = name @classmethod - def init_grid(cls, gridobj, force=False, extra_name=None, force_module=None): + def _aux_init_grid_from_cls(cls, gridobj, name_res): + import importlib + # NB: these imports needs to be consistent with what is done in + # base_env.generate_classes() + super_module_nm, module_nm = os.path.split(gridobj._PATH_GRID_CLASSES) + if module_nm == "_grid2op_classes": + # legacy "experimental_read_from_local_dir" + # issue was the module "_grid2op_classes" had the same name + # regardless of the environment, so grid2op was "confused" + env_path, env_nm = os.path.split(super_module_nm) + if env_path not in sys.path: + sys.path.append(env_path) + super_supermodule = importlib.import_module(env_nm) + module_nm = f"{env_nm}.{module_nm}" + super_module_nm = super_supermodule + + if f"{module_nm}.{name_res}_file" in sys.modules: + cls_res = getattr(sys.modules[f"{module_nm}.{name_res}_file"], name_res) + # do not forget to create the cls_dict once and for all + if cls_res._CLS_DICT is None: + tmp = {} + cls_res._make_cls_dict_extended(cls_res, tmp, as_list=False) + return cls_res + + super_module = importlib.import_module(module_nm, super_module_nm) # env/path/_grid2op_classes/ + module_all_classes = importlib.import_module(f"{module_nm}") # module specific to the tmpdir created + try: + module = importlib.import_module(f".{name_res}_file", package=module_nm) # module containing the definition of the class + except ModuleNotFoundError: + # in case we need to build the cache again if the module is not found the first time + importlib.invalidate_caches() + importlib.reload(super_module) + module = importlib.import_module(f".{name_res}_file", package=module_nm) + cls_res = getattr(module, name_res) + # do not forget to create the cls_dict once and for all + if cls_res._CLS_DICT is None: + tmp = {} + cls_res._make_cls_dict_extended(cls_res, tmp, as_list=False) + return cls_res + + @classmethod + def init_grid(cls, gridobj, force=False, extra_name=None, force_module=None, _local_dir_cls=None): """ INTERNAL @@ -2719,11 +2964,11 @@ def init_grid(cls, gridobj, force=False, extra_name=None, force_module=None): name_res = "{}_{}".format(cls.__name__, gridobj.env_name) if gridobj.glop_version != grid2op.__version__: name_res += f"_{gridobj.glop_version}" - - if gridobj._PATH_ENV is not None: + + if gridobj._PATH_GRID_CLASSES is not None: # the configuration equires to initialize the classes from the local environment path # this might be usefull when using pickle module or multiprocessing on Windows for example - my_class = GridObjects._build_cls_from_import(name_res, gridobj._PATH_ENV) + my_class = GridObjects._build_cls_from_import(name_res, gridobj._PATH_GRID_CLASSES) if my_class is not None: return my_class @@ -2733,14 +2978,44 @@ def init_grid(cls, gridobj, force=False, extra_name=None, force_module=None): # there might be issues name_res += "_noshunt" + # TODO n_busbar_per_sub different num per substations: if it's a vector, use some kind of hash of it + # for the name of the class ! + if gridobj.n_busbar_per_sub != DEFAULT_N_BUSBAR_PER_SUB: + # to be able to load same environment with + # different `n_busbar_per_sub` + name_res += f"_{gridobj.n_busbar_per_sub}" + + if _local_dir_cls is not None and gridobj._PATH_GRID_CLASSES is not None: + # new in grid2op 1.10.3: + # if I end up here it's because (done in base_env.generate_classes()): + # 1) the first initial env has already been created + # 2) I need to init the class from the files (and not from whetever else) + # So i do it. And if that is the case, the files are created on the hard drive + # AND the module is added to the path + + # check that it matches (security / consistency check) + if not os.path.samefile(_local_dir_cls.name , gridobj._PATH_GRID_CLASSES): + # in windows the string comparison fails because of things like "/", "\" or "\\" + # this is why we use "samefile" + raise EnvError(f"Unable to create the class: mismatch between " + f"_local_dir_cls ({_local_dir_cls.name}) and " + f" _PATH_GRID_CLASSES ({gridobj._PATH_GRID_CLASSES})") + return cls._aux_init_grid_from_cls(gridobj, name_res) + elif gridobj._PATH_GRID_CLASSES is not None: + # If I end up it's because the environment is created with already initialized + # classes. + return cls._aux_init_grid_from_cls(gridobj, name_res) + + # legacy behaviour: build the class "on the fly" + # of new (>= 1.10.3 for the intial creation of the environment) if name_res in globals(): - if not force: + if not force and _local_dir_cls is None: # no need to recreate the class, it already exists return globals()[name_res] else: # i recreate the variable del globals()[name_res] - + cls_attr_as_dict = {} GridObjects._make_cls_dict_extended(gridobj, cls_attr_as_dict, as_list=False) res_cls = type(name_res, (cls,), cls_attr_as_dict) @@ -2758,39 +3033,160 @@ def init_grid(cls, gridobj, force=False, extra_name=None, force_module=None): res_cls._compute_pos_big_topo_cls() res_cls.process_shunt_satic_data() - if res_cls.glop_version != grid2op.__version__: - res_cls.process_grid2op_compat() - + compat_mode = res_cls.process_grid2op_compat() + + # this needs to be done after process_grid2op_compat + # because process_grid2op_compat can remove the description of the topology + # which is not supported in earlier grid2op versions + if res_cls.detailed_topo_desc is not None: + res_cls.process_grid2op_detailed_topo_vect() + + res_cls._check_convert_to_np_array() # convert everything to numpy array if force_module is not None: res_cls.__module__ = force_module # hack because otherwise it says "abc" which is not the case # best would be to have a look at https://docs.python.org/3/library/types.html - + + if not compat_mode: + # I can reuse the "cls" dictionnary as they did not changed + if cls._CLS_DICT is not None: + res_cls._CLS_DICT = cls._CLS_DICT + if cls._CLS_DICT_EXTENDED is not None: + res_cls._CLS_DICT_EXTENDED = cls._CLS_DICT_EXTENDED + else: + # I need to rewrite the _CLS_DICT and _CLS_DICT_EXTENDED + # as the class has been modified with a "compatibility version" mode + tmp = {} + res_cls._make_cls_dict_extended(res_cls, tmp, as_list=False) + # store the type created here in the "globals" to prevent the initialization of the same class over and over globals()[name_res] = res_cls del res_cls return globals()[name_res] + @classmethod + def process_grid2op_detailed_topo_vect(cls): + """Process the class to register new attribute for observation and action + if the detailed_topo_desc is not empty (*ie* if there switches on your grid) + """ + pass + + @classmethod + def _get_grid2op_version_as_version_obj(cls): + if cls.glop_version == cls.BEFORE_COMPAT_VERSION: + glop_ver = version.parse("0.0.0") + else: + glop_ver = version.parse(cls.glop_version) + return glop_ver + @classmethod def process_grid2op_compat(cls): """ - This function can be overloaded. + INTERNAL + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + This is done at the creation of the environment. Use of this class outside of this particular + use is really dangerous and will lead to undefined behaviours. **Do not use this function**. + This is called when the class is initialized, with `init_grid` to broadcast grid2op compatibility feature. + + This function can be overloaded, but in this case it's best to call this original method too. + """ - if cls.glop_version < "1.6.0": + res = False + glop_ver = cls._get_grid2op_version_as_version_obj() + + if cls.glop_version == cls.BEFORE_COMPAT_VERSION: + # oldest version: no storage and no curtailment available + cls._aux_process_old_compat() + res = True + + if glop_ver < version.parse("1.6.0"): # this feature did not exist before. cls.dim_alarms = 0 cls.assistant_warning_type = None + res = True - if cls.glop_version < "1.9.1": + if glop_ver < version.parse("1.9.1"): # this feature did not exists before cls.dim_alerts = 0 cls.alertable_line_names = [] cls.alertable_line_ids = [] - if cls.glop_version < "1.9.5": + if glop_ver < version.parse("1.10.0.dev0"): + # this feature did not exists before + # I need to set it to the default if set elsewhere + cls.n_busbar_per_sub = DEFAULT_N_BUSBAR_PER_SUB + res = True + + if glop_ver < version.parse("1.10.2.dev3"): cls.detailed_topo_desc = None + res = True + + if res: + cls._reset_cls_dict() # forget the previous class (stored as dict) + return res + @classmethod + def _aux_fix_topo_vect_removed_storage(cls): + if cls.n_storage == 0: + return + + stor_locs = [pos for pos in cls.storage_pos_topo_vect] + for stor_loc in sorted(stor_locs, reverse=True): + for vect in [ + cls.load_pos_topo_vect, + cls.gen_pos_topo_vect, + cls.line_or_pos_topo_vect, + cls.line_ex_pos_topo_vect, + ]: + vect[vect >= stor_loc] -= 1 + + # deals with the "sub_pos" vector + for sub_id in range(cls.n_sub): + if (cls.storage_to_subid == sub_id).any(): + stor_ids = (cls.storage_to_subid == sub_id).nonzero()[0] + stor_locs = cls.storage_to_sub_pos[stor_ids] + for stor_loc in sorted(stor_locs, reverse=True): + for vect, sub_id_me in zip( + [ + cls.load_to_sub_pos, + cls.gen_to_sub_pos, + cls.line_or_to_sub_pos, + cls.line_ex_to_sub_pos, + ], + [ + cls.load_to_subid, + cls.gen_to_subid, + cls.line_or_to_subid, + cls.line_ex_to_subid, + ], + ): + vect[(vect >= stor_loc) & (sub_id_me == sub_id)] -= 1 + + # remove storage from the number of element in the substation + for sub_id in range(cls.n_sub): + cls.sub_info[sub_id] -= (cls.storage_to_subid == sub_id).sum() + # remove storage from the total number of element + cls.dim_topo -= cls.n_storage + + # recompute this private member + cls._topo_vect_to_sub = np.repeat( + np.arange(cls.n_sub), repeats=cls.sub_info + ) + + new_grid_objects_types = cls.grid_objects_types + new_grid_objects_types = new_grid_objects_types[ + new_grid_objects_types[:, cls.STORAGE_COL] == -1, : + ] + cls.grid_objects_types = 1 * new_grid_objects_types + + @classmethod + def _aux_process_old_compat(cls): + # remove "storage dependant attributes (topo_vect etc.) that are modified !" + cls._aux_fix_topo_vect_removed_storage() + # deactivate storage + cls.set_no_storage() + @classmethod def get_obj_connect_to(cls, _sentinel=None, substation_id=None): """ @@ -2836,9 +3232,9 @@ def get_obj_connect_to(cls, _sentinel=None, substation_id=None): sub_id, env.name_load[dict_["loads_id"]])) print("The names of the generators connected to substation {} are: {}".format( sub_id, env.name_gen[dict_["generators_id"]])) - print("The powerline whose origin end is connected to substation {} are: {}".format( + print("The powerline whose origin side is connected to substation {} are: {}".format( sub_id, env.name_line[dict_["lines_or_id"]])) - print("The powerline whose extremity end is connected to substation {} are: {}".format( + print("The powerline whose extremity side is connected to substation {} are: {}".format( sub_id, env.name_line[dict_["lines_ex_id"]])) print("The storage units connected to substation {} are: {}".format( sub_id, env.name_line[dict_["storages_id"]])) @@ -2860,15 +3256,55 @@ def get_obj_connect_to(cls, _sentinel=None, substation_id=None): "".format(substation_id) ) res = { - "loads_id": np.where(cls.load_to_subid == substation_id)[0], - "generators_id": np.where(cls.gen_to_subid == substation_id)[0], - "lines_or_id": np.where(cls.line_or_to_subid == substation_id)[0], - "lines_ex_id": np.where(cls.line_ex_to_subid == substation_id)[0], - "storages_id": np.where(cls.storage_to_subid == substation_id)[0], + "loads_id": (cls.load_to_subid == substation_id).nonzero()[0], + "generators_id": (cls.gen_to_subid == substation_id).nonzero()[0], + "lines_or_id": (cls.line_or_to_subid == substation_id).nonzero()[0], + "lines_ex_id": (cls.line_ex_to_subid == substation_id).nonzero()[0], + "storages_id": (cls.storage_to_subid == substation_id).nonzero()[0], "nb_elements": cls.sub_info[substation_id], } return res + @classmethod + def get_powerline_id(cls, sub_id: int) -> np.ndarray: + """ + Return the id of all powerlines connected to the substation `sub_id` + either "or" side or "ex" side + + Parameters + ----------- + sub_id: `int` + The id of the substation concerned + + Returns + ------- + res: np.ndarray, int + The id of all powerlines connected to this substation (either or side or ex side) + + Examples + -------- + + To get the id of all powerlines connected to substation with id 1, + you can do: + + .. code-block:: python + + import numpy as np + import grid2op + env = grid2op.make("l2rpn_case14_sandbox") + + all_lines_conn_to_sub_id_1 = type(env).get_powerline_id(1) + + """ + powerlines_or_id = cls.line_or_to_sub_pos[ + cls.line_or_to_subid == sub_id + ] + powerlines_ex_id = cls.line_ex_to_sub_pos[ + cls.line_ex_to_subid == sub_id + ] + powerlines_id = np.concatenate((powerlines_or_id, powerlines_ex_id)) + return powerlines_id + @classmethod def get_obj_substations(cls, _sentinel=None, substation_id=None): """ @@ -2894,10 +3330,10 @@ def get_obj_substations(cls, _sentinel=None, substation_id=None): 1. column 0: the id of the substation 2. column 1: -1 if this object is not a load, or `LOAD_ID` if this object is a load (see example) 3. column 2: -1 if this object is not a generator, or `GEN_ID` if this object is a generator (see example) - 4. column 3: -1 if this object is not the origin end of a line, or `LOR_ID` if this object is the - origin end of a powerline(see example) - 5. column 4: -1 if this object is not a extremity end, or `LEX_ID` if this object is the extremity - end of a powerline + 4. column 3: -1 if this object is not the origin side of a line, or `LOR_ID` if this object is the + origin side of a powerline(see example) + 5. column 4: -1 if this object is not a extremity side, or `LEX_ID` if this object is the extremity + side of a powerline 6. column 5: -1 if this object is not a storage unit, or `STO_ID` if this object is one Examples @@ -2920,14 +3356,14 @@ def get_obj_substations(cls, _sentinel=None, substation_id=None): # we can also get that: # 1. this is not a load (-1 at position 1 - so 2nd component) # 2. this is not a generator (-1 at position 2 - so 3rd component) - # 3. this is not the origin end of a powerline (-1 at position 3) - # 4. this is the extremity end of powerline 0 (there is a 0 at position 4) + # 3. this is not the origin side of a powerline (-1 at position 3) + # 4. this is the extremity side of powerline 0 (there is a 0 at position 4) # 5. this is not a storage unit (-1 at position 5 - so last component) # likewise, the second element connected at this substation is: mat[1,:] # array([ 1, -1, -1, 2, -1, -1], dtype=int32) - # it represents the origin end of powerline 2 + # it represents the origin side of powerline 2 # the 5th element connected at this substation is: mat[4,:] @@ -2980,7 +3416,8 @@ def get_obj_substations(cls, _sentinel=None, substation_id=None): ] return res - def get_lines_id(self, _sentinel=None, from_=None, to_=None): + @classmethod + def get_lines_id(cls, _sentinel=None, from_=None, to_=None): """ Returns the list of all the powerlines id in the backend going from `from_` to `to_` @@ -2990,10 +3427,10 @@ def get_lines_id(self, _sentinel=None, from_=None, to_=None): Internal, do not use from_: ``int`` - Id the substation to which the origin end of the powerline to look for should be connected to + Id the substation to which the origin side of the powerline to look for should be connected to to_: ``int`` - Id the substation to which the extremity end of the powerline to look for should be connected to + Id the substation to which the extremity side of the powerline to look for should be connected to Returns ------- @@ -3032,7 +3469,7 @@ def get_lines_id(self, _sentinel=None, from_=None, to_=None): ) for i, (ori, ext) in enumerate( - zip(self.line_or_to_subid, self.line_ex_to_subid) + zip(cls.line_or_to_subid, cls.line_ex_to_subid) ): if ori == from_ and ext == to_: res.append(i) @@ -3045,7 +3482,8 @@ def get_lines_id(self, _sentinel=None, from_=None, to_=None): return res - def get_generators_id(self, sub_id): + @classmethod + def get_generators_id(cls, sub_id): """ Returns the list of all generators id in the backend connected to the substation sub_id @@ -3085,7 +3523,7 @@ def get_generators_id(self, sub_id): 'Please modify "sub_id" parameter' ) - for i, s_id_gen in enumerate(self.gen_to_subid): + for i, s_id_gen in enumerate(cls.gen_to_subid): if s_id_gen == sub_id: res.append(i) @@ -3097,7 +3535,8 @@ def get_generators_id(self, sub_id): return res - def get_loads_id(self, sub_id): + @classmethod + def get_loads_id(cls, sub_id): """ Returns the list of all loads id in the backend connected to the substation sub_id @@ -3136,7 +3575,7 @@ def get_loads_id(self, sub_id): 'Please modify "sub_id" parameter' ) - for i, s_id_gen in enumerate(self.load_to_subid): + for i, s_id_gen in enumerate(cls.load_to_subid): if s_id_gen == sub_id: res.append(i) @@ -3149,7 +3588,8 @@ def get_loads_id(self, sub_id): return res - def get_storages_id(self, sub_id): + @classmethod + def get_storages_id(cls, sub_id): """ Returns the list of all storages element (battery or damp) id in the grid connected to the substation sub_id @@ -3188,24 +3628,160 @@ def get_storages_id(self, sub_id): 'Please modify "sub_id" parameter' ) - for i, s_id_gen in enumerate(self.storage_to_subid): + for i, s_id_gen in enumerate(cls.storage_to_subid): if s_id_gen == sub_id: res.append(i) if not res: # res is empty here raise BackendError( - "GridObjects.bd: impossible to find a storage unit connected at substation {}".format( + "GridObjects.get_storages_id: impossible to find a storage unit connected at substation {}".format( sub_id ) ) return res + @classmethod + def topo_vect_element(cls, topo_vect_id: int) -> Dict[Literal["load_id", "gen_id", "line_id", "storage_id", "line_or_id", "line_ex_id", "sub_id"], + Union[int, Dict[Literal["or", "ex"], int]]]: + """ + This function aims to be the "opposite" of the + `cls.xxx_pos_topo_vect` (**eg** `cls.load_pos_topo_vect`) + + You give it an id in the topo_vect (*eg* 10) and it gives you + information about which element it is. More precisely, if + `type(env).topo_vect[topo_vect_id]` is: + + - a **load** then it will return `{'load_id': load_id}`, with `load_id` + being such that `type(env).load_pos_topo_vect[load_id] == topo_vect_id` + - a **generator** then it will return `{'gen_id': gen_id}`, with `gen_id` + being such that `type(env).gen_pos_topo_vect[gen_id] == topo_vect_id` + - a **storage** then it will return `{'storage_id': storage_id}`, with `storage_id` + being such that `type(env).storage_pos_topo_vect[storage_id] == topo_vect_id` + - a **line** (origin side) then it will return `{'line_id': {'or': line_id}, 'line_or_id': line_id}`, + with `line_id` + being such that `type(env).line_or_pos_topo_vect[line_id] == topo_vect_id` + - a **line** (ext side) then it will return `{'line_id': {'ex': line_id}, 'line_ex_id': line_id}`, + with `line_id` + being such that `type(env).line_or_pos_topo_vect[line_id] == topo_vect_id` + + .. seealso:: + The attributes :attr:`GridObjects.load_pos_topo_vect`, :attr:`GridObjects.gen_pos_topo_vect`, + :attr:`GridObjects.storage_pos_topo_vect`, :attr:`GridObjects.line_or_pos_topo_vect` and + :attr:`GridObjects.line_ex_pos_topo_vect` to do the opposite. + + And you can also have a look at :attr:`GridObjects.grid_objects_types` + + Parameters + ---------- + topo_vect_id: ``int`` + The element of the topo vect to which you want more information. + + Returns + ------- + res: ``dict`` + See details in the description + + Examples + -------- + It can be used like: + + .. code-block:: python + + import numpy as np + import grid2op + env = grid2op.make("l2rpn_case14_sandbox") + + env_cls = type(env) # or `type(act)` or` type(obs)` etc. or even `env.topo_vect_element(...)` or `obs.topo_vect_element(...)` + for load_id, pos_topo_vect in enumerate(env_cls.load_pos_topo_vect): + res = env_cls.topo_vect_element(pos_topo_vect) + assert "load_id" in res + assert res["load_id"] == load_id + + for gen_id, pos_topo_vect in enumerate(env_cls.gen_pos_topo_vect): + res = env_cls.topo_vect_element(pos_topo_vect) + assert "gen_id" in res + assert res["gen_id"] == gen_id + + for sto_id, pos_topo_vect in enumerate(env_cls.storage_pos_topo_vect): + res = env_cls.topo_vect_element(pos_topo_vect) + assert "storage_id" in res + assert res["storage_id"] == sto_id + + for line_id, pos_topo_vect in enumerate(env_cls.line_or_pos_topo_vect): + res = env_cls.topo_vect_element(pos_topo_vect) + assert "line_id" in res + assert res["line_id"] == {"or": line_id} + assert "line_or_id" in res + assert res["line_or_id"] == line_id + + for line_id, pos_topo_vect in enumerate(env_cls.line_ex_pos_topo_vect): + res = env_cls.topo_vect_element(pos_topo_vect) + assert "line_id" in res + assert res["line_id"] == {"ex": line_id} + assert "line_ex_id" in res + assert res["line_ex_id"] == line_id + + """ + elt = cls.grid_objects_types[topo_vect_id] + res = {"sub_id": int(elt[cls.SUB_COL])} + if elt[cls.LOA_COL] != -1: + res["load_id"] = int(elt[cls.LOA_COL]) + return res + if elt[cls.GEN_COL] != -1: + res["gen_id"] = int(elt[cls.GEN_COL]) + return res + if elt[cls.STORAGE_COL] != -1: + res["storage_id"] = int(elt[cls.STORAGE_COL]) + return res + if elt[cls.LOR_COL] != -1: + res["line_or_id"] = int(elt[cls.LOR_COL]) + res["line_id"] = {"or": int(elt[cls.LOR_COL])} + return res + if elt[cls.LEX_COL] != -1: + res["line_ex_id"] = int(elt[cls.LEX_COL]) + res["line_id"] = {"ex": int(elt[cls.LEX_COL])} + return res + raise Grid2OpException(f"Unknown element at position {topo_vect_id}") + @staticmethod - def _make_cls_dict(cls, res, as_list=True, copy_=True): - """NB: `cls` can be here a class or an object of a class...""" - save_to_dict(res, cls, "glop_version", str, copy_) - res["_PATH_ENV"] = cls._PATH_ENV # i do that manually for more control + def _make_cls_dict(cls, res, as_list=True, copy_=True, _topo_vect_only=False): + """ + INTERNAL + + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + + NB: `cls` can be here a class or an object of a class... + + Notes + ------- + _topo_vect_only: this function is called once when the backend is initialized in `backend.load_grid` + (in `backend._compute_pos_big_topo`) and then once when everything is set up + (after redispatching and storage data are loaded). + + This is why I need the `_topo_vect_only` flag that tells this function when it's called only for + `topo_vect` related attributed + + """ + if cls._CLS_DICT is not None and not as_list and not _topo_vect_only: + # speed optimization: it has already been computed, so + # I reuse it (class attr are const) + for k, v in cls._CLS_DICT.items(): + if copy_: + res[k] = copy.deepcopy(v) + else: + res[k] = v + return + + if not _topo_vect_only: + # all the attributes bellow are not needed for the "first call" + # to this function when the elements are put together in the topo_vect. + # Indeed, at this stage (first call in the backend.load_grid) these + # attributes are not (necessary) loaded yet + save_to_dict(res, cls, "glop_version", str, copy_) + res["_PATH_GRID_CLASSES"] = cls._PATH_GRID_CLASSES # i do that manually for more control + save_to_dict(res, cls, "n_busbar_per_sub", str, copy_) + save_to_dict( res, cls, @@ -3359,185 +3935,229 @@ def _make_cls_dict(cls, res, as_list=True, copy_=True): copy_, ) - # redispatching - if cls.redispatching_unit_commitment_availble: - for nm_attr, type_attr in zip(cls._li_attr_disp, cls._type_attr_disp): + # shunts (not in topo vect but still usefull) + if cls.shunts_data_available: + save_to_dict( + res, + cls, + "name_shunt", + (lambda li: [str(el) for el in li]) if as_list else None, + copy_, + ) + save_to_dict( + res, + cls, + "shunt_to_subid", + (lambda li: [int(el) for el in li]) if as_list else None, + copy_, + ) + else: + res["name_shunt"] = None + res["shunt_to_subid"] = None + + if not _topo_vect_only: + # all the attributes bellow are not needed for the "first call" + # to this function when the elements are put together in the topo_vect. + # Indeed, at this stage (first call in the backend.load_grid) these + # attributes are not loaded yet + + # redispatching + if cls.redispatching_unit_commitment_availble: + for nm_attr, type_attr in zip(cls._li_attr_disp, cls._type_attr_disp): + save_to_dict( + res, + cls, + nm_attr, + (lambda li: [type_attr(el) for el in li]) if as_list else None, + copy_, + ) + else: + for nm_attr in cls._li_attr_disp: + res[nm_attr] = None + + # layout (position of substation on a map of the grid) + if cls.grid_layout is not None: save_to_dict( res, cls, - nm_attr, - (lambda li: [type_attr(el) for el in li]) if as_list else None, + "grid_layout", + (lambda gl: {str(k): [float(x), float(y)] for k, (x, y) in gl.items()}) + if as_list + else None, copy_, ) - else: - for nm_attr in cls._li_attr_disp: - res[nm_attr] = None + else: + res["grid_layout"] = None - # shunts - if cls.grid_layout is not None: + # storage data save_to_dict( res, cls, - "grid_layout", - (lambda gl: {str(k): [float(x), float(y)] for k, (x, y) in gl.items()}) - if as_list - else None, + "storage_type", + (lambda li: [str(el) for el in li]) if as_list else None, copy_, ) - else: - res["grid_layout"] = None - - # shunts - if cls.shunts_data_available: save_to_dict( res, cls, - "name_shunt", - (lambda li: [str(el) for el in li]) if as_list else None, + "storage_Emax", + (lambda li: [float(el) for el in li]) if as_list else None, copy_, ) save_to_dict( res, cls, - "shunt_to_subid", - (lambda li: [int(el) for el in li]) if as_list else None, + "storage_Emin", + (lambda li: [float(el) for el in li]) if as_list else None, + copy_, + ) + save_to_dict( + res, + cls, + "storage_max_p_prod", + (lambda li: [float(el) for el in li]) if as_list else None, + copy_, + ) + save_to_dict( + res, + cls, + "storage_max_p_absorb", + (lambda li: [float(el) for el in li]) if as_list else None, + copy_, + ) + save_to_dict( + res, + cls, + "storage_marginal_cost", + (lambda li: [float(el) for el in li]) if as_list else None, + copy_, + ) + save_to_dict( + res, + cls, + "storage_loss", + (lambda li: [float(el) for el in li]) if as_list else None, + copy_, + ) + save_to_dict( + res, + cls, + "storage_charging_efficiency", + (lambda li: [float(el) for el in li]) if as_list else None, + copy_, + ) + save_to_dict( + res, + cls, + "storage_discharging_efficiency", + (lambda li: [float(el) for el in li]) if as_list else None, copy_, ) - else: - res["name_shunt"] = None - res["shunt_to_subid"] = None - - # storage data - save_to_dict( - res, - cls, - "storage_type", - (lambda li: [str(el) for el in li]) if as_list else None, - copy_, - ) - save_to_dict( - res, - cls, - "storage_Emax", - (lambda li: [float(el) for el in li]) if as_list else None, - copy_, - ) - save_to_dict( - res, - cls, - "storage_Emin", - (lambda li: [float(el) for el in li]) if as_list else None, - copy_, - ) - save_to_dict( - res, - cls, - "storage_max_p_prod", - (lambda li: [float(el) for el in li]) if as_list else None, - copy_, - ) - save_to_dict( - res, - cls, - "storage_max_p_absorb", - (lambda li: [float(el) for el in li]) if as_list else None, - copy_, - ) - save_to_dict( - res, - cls, - "storage_marginal_cost", - (lambda li: [float(el) for el in li]) if as_list else None, - copy_, - ) - save_to_dict( - res, - cls, - "storage_loss", - (lambda li: [float(el) for el in li]) if as_list else None, - copy_, - ) - save_to_dict( - res, - cls, - "storage_charging_efficiency", - (lambda li: [float(el) for el in li]) if as_list else None, - copy_, - ) - save_to_dict( - res, - cls, - "storage_discharging_efficiency", - (lambda li: [float(el) for el in li]) if as_list else None, - copy_, - ) - # alert or alarm - if cls.assistant_warning_type is not None: - res["assistant_warning_type"] = str(cls.assistant_warning_type) - else: - res["assistant_warning_type"] = None + # alert or alarm + if cls.assistant_warning_type is not None: + res["assistant_warning_type"] = str(cls.assistant_warning_type) + else: + res["assistant_warning_type"] = None + + # area for the alarm feature + res["dim_alarms"] = cls.dim_alarms + save_to_dict( + res, cls, "alarms_area_names", (lambda li: [str(el) for el in li]), copy_ + ) + save_to_dict( + res, + cls, + "alarms_lines_area", + ( + lambda dict_: { + str(l_nm): [str(ar_nm) for ar_nm in areas] + for l_nm, areas in dict_.items() + } + ), + copy_, + ) + save_to_dict( + res, + cls, + "alarms_area_lines", + (lambda lili: [[str(l_nm) for l_nm in lines] for lines in lili]), + copy_, + ) + + # number of line alert for the alert feature + res['dim_alerts'] = cls.dim_alerts + # save alert line names to dict + save_to_dict( + res, cls, "alertable_line_names", (lambda li: [str(el) for el in li]) if as_list else None, copy_ + ) + save_to_dict( + res, cls, "alertable_line_ids", (lambda li: [int(el) for el in li]) if as_list else None, copy_ + ) + - # area for the alarm feature - res["dim_alarms"] = cls.dim_alarms - + if cls.detailed_topo_desc is not None: + res["detailed_topo_desc"] = {} + cls.detailed_topo_desc.save_to_dict(res["detailed_topo_desc"], as_list=as_list, copy_=copy_) - save_to_dict( - res, cls, "alarms_area_names", (lambda li: [str(el) for el in li]), copy_ - ) - save_to_dict( - res, - cls, - "alarms_lines_area", - ( - lambda dict_: { - str(l_nm): [str(ar_nm) for ar_nm in areas] - for l_nm, areas in dict_.items() - } - ), - copy_, - ) - save_to_dict( - res, - cls, - "alarms_area_lines", - (lambda lili: [[str(l_nm) for l_nm in lines] for lines in lili]), - copy_, - ) - - # number of line alert for the alert feature - res['dim_alerts'] = cls.dim_alerts - # save alert line names to dict - save_to_dict( - res, cls, "alertable_line_names", (lambda li: [str(el) for el in li]) if as_list else None, copy_ - ) - save_to_dict( - res, cls, "alertable_line_ids", (lambda li: [int(el) for el in li]) if as_list else None, copy_ - ) - - if cls.detailed_topo_desc is not None: - res["detailed_topo_desc"] = {} - cls.detailed_topo_desc.save_to_dict(res["detailed_topo_desc"], as_list=as_list, copy_=copy_) + # avoid further computation and save it + if not as_list: + cls._CLS_DICT = res.copy() return res @staticmethod - def _make_cls_dict_extended(cls, res, as_list=True, copy_=True): - """add the n_gen and all in the class created""" - GridObjects._make_cls_dict(cls, res, as_list=as_list, copy_=copy_) + def _make_cls_dict_extended(cls, res: CLS_AS_DICT_TYPING, as_list=True, copy_=True, _topo_vect_only=False): + """add the n_gen and all in the class created + + Notes + ------- + _topo_vect_only: this function is called once when the backend is initialized in `backend.load_grid` + (in `backend._compute_pos_big_topo`) and then once when everything is set up + (after redispatching and storage data are loaded). + + This is why I need the `_topo_vect_only` flag that tells this function when it's called only for + `topo_vect` related attributed + + """ + if cls._CLS_DICT_EXTENDED is not None and not as_list and not _topo_vect_only: + # speed optimization: it has already been computed, so + # I reuse it (class attr are const) + for k, v in cls._CLS_DICT_EXTENDED.items(): + if copy_: + res[k] = copy.deepcopy(v) + else: + res[k] = v + return + + GridObjects._make_cls_dict(cls, res, as_list=as_list, copy_=copy_, _topo_vect_only=_topo_vect_only) res["n_gen"] = cls.n_gen res["n_load"] = cls.n_load res["n_line"] = cls.n_line res["n_sub"] = cls.n_sub res["dim_topo"] = 1 * cls.dim_topo - # shunt - res["n_shunt"] = cls.n_shunt - res["shunts_data_available"] = cls.shunts_data_available # storage res["n_storage"] = cls.n_storage - # redispatching / curtailment - res[ - "redispatching_unit_commitment_availble" - ] = cls.redispatching_unit_commitment_availble + # shunt (not in topo vect but might be usefull) + res["shunts_data_available"] = cls.shunts_data_available + res["n_shunt"] = cls.n_shunt + + if not _topo_vect_only: + # all the attributes bellow are not needed for the "first call" + # to this function when the elements are put together in the topo_vect. + # Indeed, at this stage (first call in the backend.load_grid) these + # attributes are not loaded yet + + # redispatching / curtailment + res[ + "redispatching_unit_commitment_availble" + ] = cls.redispatching_unit_commitment_availble + + # n_busbar_per_sub + res["n_busbar_per_sub"] = cls.n_busbar_per_sub + + # avoid further computation and save it + if not as_list and not _topo_vect_only: + cls._CLS_DICT_EXTENDED = res.copy() @classmethod def cls_to_dict(cls): @@ -3557,7 +4177,7 @@ def cls_to_dict(cls): The representation of the object as a dictionary that can be json serializable. """ res = {} - GridObjects._make_cls_dict(cls, res) + cls._make_cls_dict(cls, res) return res @staticmethod @@ -3590,14 +4210,29 @@ class res(GridObjects): cls = res if "glop_version" in dict_: - cls.glop_version = dict_["glop_version"] + cls.glop_version = str(dict_["glop_version"]) else: cls.glop_version = cls.BEFORE_COMPAT_VERSION - if "_PATH_ENV" in dict_: - cls._PATH_ENV = str(dict_["_PATH_ENV"]) + if "_PATH_GRID_CLASSES" in dict_: + if dict_["_PATH_GRID_CLASSES"] is not None: + cls._PATH_GRID_CLASSES = str(dict_["_PATH_GRID_CLASSES"]) + else: + cls._PATH_GRID_CLASSES = None + elif "_PATH_ENV" in dict_: + # legacy mode in grid2op <= 1.10.1 this was saved in "PATH_ENV" + if dict_["_PATH_ENV"] is not None: + cls._PATH_GRID_CLASSES = str(dict_["_PATH_ENV"]) + else: + cls._PATH_GRID_CLASSES = None + else: + cls._PATH_GRID_CLASSES = None + + if 'n_busbar_per_sub' in dict_: + cls.n_busbar_per_sub = int(dict_["n_busbar_per_sub"]) else: - cls._PATH_ENV = None + # compat version: was not set + cls.n_busbar_per_sub = DEFAULT_N_BUSBAR_PER_SUB cls.name_gen = extract_from_dict( dict_, "name_gen", lambda x: np.array(x).astype(str) @@ -3730,32 +4365,33 @@ class res(GridObjects): dict_, "storage_pos_topo_vect", lambda x: np.array(x).astype(dt_int) ) cls.n_storage = len(cls.name_storage) + # storage static data - extract_from_dict(dict_, "storage_type", lambda x: np.array(x).astype(str)) - extract_from_dict( + cls.storage_type = extract_from_dict(dict_, "storage_type", lambda x: np.array(x).astype(str)) + cls.storage_Emax = extract_from_dict( dict_, "storage_Emax", lambda x: np.array(x).astype(dt_float) ) - extract_from_dict( + cls.storage_Emin = extract_from_dict( dict_, "storage_Emin", lambda x: np.array(x).astype(dt_float) ) - extract_from_dict( + cls.storage_max_p_prod = extract_from_dict( dict_, "storage_max_p_prod", lambda x: np.array(x).astype(dt_float) ) - extract_from_dict( + cls.storage_max_p_absorb = extract_from_dict( dict_, "storage_max_p_absorb", lambda x: np.array(x).astype(dt_float) ) - extract_from_dict( + cls.storage_marginal_cost = extract_from_dict( dict_, "storage_marginal_cost", lambda x: np.array(x).astype(dt_float) ) - extract_from_dict( + cls.storage_loss = extract_from_dict( dict_, "storage_loss", lambda x: np.array(x).astype(dt_float) ) - extract_from_dict( + cls.storage_charging_efficiency = extract_from_dict( dict_, "storage_charging_efficiency", lambda x: np.array(x).astype(dt_float), ) - extract_from_dict( + cls.storage_discharging_efficiency = extract_from_dict( dict_, "storage_discharging_efficiency", lambda x: np.array(x).astype(dt_float), @@ -3804,10 +4440,14 @@ class res(GridObjects): if "detailed_topo_desc" in dict_: cls.detailed_topo_desc = DetailedTopoDescription.from_dict(dict_["detailed_topo_desc"]) + # save the representation of this class as dict + tmp = {} + cls._make_cls_dict_extended(cls, tmp, as_list=False, copy_=True) + # retrieve the redundant information that are not stored (for efficiency) obj_ = cls() obj_._compute_pos_big_topo_cls() - cls = cls.init_grid(obj_, force=True) + cls = cls.init_grid(obj_) # , force=True return cls() @classmethod @@ -3867,9 +4507,7 @@ def same_grid_class(cls, other_cls) -> bool: me_dict = {} GridObjects._make_cls_dict_extended(cls, me_dict, as_list=False, copy_=False) other_cls_dict = {} - GridObjects._make_cls_dict_extended( - other_cls, other_cls_dict, as_list=False, copy_=False - ) + GridObjects._make_cls_dict_extended(other_cls, other_cls_dict, as_list=False, copy_=False) if me_dict.keys() - other_cls_dict.keys(): # one key is in me but not in other @@ -3924,9 +4562,9 @@ def init_grid_from_dict_for_pickle(name_res, orig_cls, cls_attr): object in the __reduce__ method. """ res_cls = None - if "_PATH_ENV" in cls_attr and cls_attr["_PATH_ENV"] is not None: + if "_PATH_GRID_CLASSES" in cls_attr and cls_attr["_PATH_GRID_CLASSES"] is not None: res_cls = GridObjects._build_cls_from_import( - name_res, cls_attr["_PATH_ENV"] + name_res, cls_attr["_PATH_GRID_CLASSES"] ) # check if the class already exists, if so returns it @@ -3958,11 +4596,13 @@ def __reduce__(self): """ It here to avoid issue with pickle. But the problem is that it's also used by deepcopy... So its implementation is used a lot + + see https://docs.python.org/3/library/pickle.html#object.__reduce__ """ # TODO this is not really a convenient use of that i'm sure ! # Try to see if it can be better cls_attr_as_dict = {} - GridObjects._make_cls_dict_extended(type(self), cls_attr_as_dict, as_list=False) + GridObjects._make_cls_dict_extended(type(self), cls_attr_as_dict, as_list=False) # TODO save that in the class definition if hasattr(self, "__getstate__"): my_state = self.__getstate__() else: @@ -3984,7 +4624,7 @@ def __reduce__(self): ) @classmethod - def local_bus_to_global(cls, local_bus, to_sub_id): + def local_bus_to_global(cls, local_bus: np.ndarray, to_sub_id: np.ndarray) -> np.ndarray: """This function translate "local bus" whose id are in a substation, to "global bus id" whose id are consistent for the whole grid. @@ -3993,24 +4633,30 @@ def local_bus_to_global(cls, local_bus, to_sub_id): global id 41 or 40 or 39 or etc. .. note:: - Typically, "local bus" are numbered 1 or 2. They represent the id of the busbar to which the element + Typically, "local bus" are numbered 1, 2, ... cls.n_busbar_per_sub. They represent the id of the busbar to which the element is connected IN its substation. On the other hand, the "global bus" are numberd, 0, 1, 2, 3, ..., 2 * self.n_sub. They represent some kind of "universal" labelling of the busbars of all the grid. For example, substation 0 might have busbar `0` and `self.n_sub`, substation 1 have busbar `1` and `self.n_sub + 1` etc. - [on_bus_1] + Local and global bus id represents the same thing. The difference comes down to convention. + + ..warning:: + In order to be as fast as possible, these functions do not check for "out of bound" or + "impossible" configuration. + + They assume that the input data are consistent with the grid. """ global_bus = (1 * local_bus).astype(dt_int) # make a copy - on_bus_1 = global_bus == 1 - on_bus_2 = global_bus == 2 - global_bus[on_bus_1] = to_sub_id[on_bus_1] - global_bus[on_bus_2] = to_sub_id[on_bus_2] + cls.n_sub + global_bus[local_bus < 0] = -1 + for i in range(cls.n_busbar_per_sub): + on_bus_i = local_bus == i + 1 + global_bus[on_bus_i] = to_sub_id[on_bus_i] + i * cls.n_sub return global_bus @classmethod - def local_bus_to_global_int(cls, local_bus, to_sub_id): + def local_bus_to_global_int(cls, local_bus : int, to_sub_id : int) -> int: """This function translate "local bus" whose id are in a substation, to "global bus id" whose id are consistent for the whole grid. @@ -4019,26 +4665,30 @@ def local_bus_to_global_int(cls, local_bus, to_sub_id): global id 41 or 40 or 39 or etc. .. note:: - Typically, "local bus" are numbered 1 or 2. They represent the id of the busbar to which the element + Typically, "local bus" are numbered 1, 2, ... cls.n_busbar_per_sub. They represent the id of the busbar to which the element is connected IN its substation. - On the other hand, the "global bus" are numberd, 0, 1, 2, 3, ..., 2 * self.n_sub. They represent some kind of + On the other hand, the "global bus" are numberd, 0, 1, 2, 3, ..., cls.n_busbar_per_sub * self.n_sub. They represent some kind of "universal" labelling of the busbars of all the grid. For example, substation 0 might have busbar `0` and `self.n_sub`, substation 1 have busbar `1` and `self.n_sub + 1` etc. Local and global bus id represents the same thing. The difference comes down to convention. .. note:: - This is the "non vectorized" version that applies only on integers. + This is the "non vectorized" version that applies only on integers. + + ..warning:: + In order to be as fast as possible, these functions do not check for "out of bound" or + "impossible" configuration. + + They assume that the input data are consistent with the grid. """ - if local_bus == 1: - return to_sub_id - elif local_bus == 2: - return to_sub_id + cls.n_sub - return -1 + if local_bus == -1: + return -1 + return to_sub_id + (int(local_bus) - 1) * cls.n_sub @classmethod - def global_bus_to_local(cls, global_bus, to_sub_id): + def global_bus_to_local(cls, global_bus: np.ndarray, to_sub_id: np.ndarray) -> np.ndarray: """This function translate "local bus" whose id are in a substation, to "global bus id" whose id are consistent for the whole grid. @@ -4047,23 +4697,29 @@ def global_bus_to_local(cls, global_bus, to_sub_id): global id 41 or 40 or 39 or etc. .. note:: - Typically, "local bus" are numbered 1 or 2. They represent the id of the busbar to which the element + Typically, "local bus" are numbered 1, 2, ... cls.n_busbar_per_sub. They represent the id of the busbar to which the element is connected IN its substation. - On the other hand, the "global bus" are numberd, 0, 1, 2, 3, ..., 2 * self.n_sub. They represent some kind of + On the other hand, the "global bus" are numberd, 0, 1, 2, 3, ..., cls.n_busbar_per_sub * self.n_sub. They represent some kind of "universal" labelling of the busbars of all the grid. For example, substation 0 might have busbar `0` and `self.n_sub`, substation 1 have busbar `1` and `self.n_sub + 1` etc. Local and global bus id represents the same thing. The difference comes down to convention. + + ..warning:: + In order to be as fast as possible, these functions do not check for "out of bound" or + "impossible" configuration. + + They assume that the input data are consistent with the grid. """ res = (1 * global_bus).astype(dt_int) # make a copy - res[global_bus < cls.n_sub] = 1 - res[global_bus >= cls.n_sub] = 2 + for i in range(cls.n_busbar_per_sub): + res[(i * cls.n_sub <= global_bus) & (global_bus < (i+1) * cls.n_sub)] = i + 1 res[global_bus == -1] = -1 return res @classmethod - def global_bus_to_local_int(cls, global_bus, to_sub_id): + def global_bus_to_local_int(cls, global_bus: int, to_sub_id: int) -> int: """This function translate "local bus" whose id are in a substation, to "global bus id" whose id are consistent for the whole grid. @@ -4072,22 +4728,27 @@ def global_bus_to_local_int(cls, global_bus, to_sub_id): global id 41 or 40 or 39 or etc. .. note:: - Typically, "local bus" are numbered 1 or 2. They represent the id of the busbar to which the element + Typically, "local bus" are numbered 1, 2, ... cls.n_busbar_per_sub. They represent the id of the busbar to which the element is connected IN its substation. - On the other hand, the "global bus" are numberd, 0, 1, 2, 3, ..., 2 * self.n_sub. They represent some kind of + On the other hand, the "global bus" are numberd, 0, 1, 2, 3, ..., cls.n_busbar_per_sub * self.n_sub. They represent some kind of "universal" labelling of the busbars of all the grid. For example, substation 0 might have busbar `0` and `self.n_sub`, substation 1 have busbar `1` and `self.n_sub + 1` etc. - Local and global bus id represents the same thing. The difference comes down to convention. + Local and global bus id represents the same thing. The difference comes down to convention. + + ..warning:: + In order to be as fast as possible, these functions do not check for "out of bound" or + "impossible" configuration. + + They assume that the input data are consistent with the grid. """ if global_bus == -1: return -1 - if global_bus < cls.n_sub: - return 1 - if global_bus >= cls.n_sub: - return 2 - return -1 + for i in range(cls.n_busbar_per_sub): + if global_bus < (i+1) * cls.n_sub: + return i+1 + raise EnvError(f"This environment can have only {cls.n_busbar_per_sub} independant busbars per substation.") @staticmethod def _format_int_vect_to_cls_str(int_vect): @@ -4115,7 +4776,7 @@ def _format_bool_vect_to_cls_str(bool_vect): @classmethod def _get_full_cls_str(cls): - _PATH_ENV_str = "None" if cls._PATH_ENV is None else f'"{cls._PATH_ENV}"' + _PATH_ENV_str = "None" if cls._PATH_GRID_CLASSES is None else f'"{cls._PATH_GRID_CLASSES}"' attr_list_vect_str = None attr_list_set_str = "{}" if cls.attr_list_vect is not None: @@ -4249,9 +4910,12 @@ def format_el_int(values): def format_el(values): return ",".join([f'"{el}"' for el in values]) - tmp_tmp_ = [f'"{k}": [{format_el(v)}]' for k, v in cls.grid_layout.items()] - tmp_ = ",".join(tmp_tmp_) - grid_layout_str = f"{{{tmp_}}}" + if cls.grid_layout is not None: + tmp_tmp_ = [f'"{k}": [{format_el(v)}]' for k, v in cls.grid_layout.items()] + tmp_ = ",".join(tmp_tmp_) + grid_layout_str = f"{{{tmp_}}}" + else: + grid_layout_str = "None" name_shunt_str = ",".join([f'"{el}"' for el in cls.name_shunt]) shunt_to_subid_str = GridObjects._format_int_vect_to_cls_str(cls.shunt_to_subid) @@ -4311,8 +4975,10 @@ def format_el(values): class {cls.__name__}({cls._INIT_GRID_CLS.__name__}): BEFORE_COMPAT_VERSION = \"{cls.BEFORE_COMPAT_VERSION}\" glop_version = grid2op.__version__ # tells it's the installed grid2op version - _PATH_ENV = {_PATH_ENV_str} - _INIT_GRID_CLS = {cls._INIT_GRID_CLS.__name__} + _PATH_GRID_CLASSES = {_PATH_ENV_str} # especially do not modify that + _INIT_GRID_CLS = {cls._INIT_GRID_CLS.__name__} + _CLS_DICT = None # init once to avoid yet another serialization of the class as dict (in make_cls_dict) + _CLS_DICT_EXTENDED = None # init once to avoid yet another serialization of the class as dict (in make_cls_dict) SUB_COL = 0 LOA_COL = 1 @@ -4328,12 +4994,13 @@ class {cls.__name__}({cls._INIT_GRID_CLS.__name__}): # name of the objects env_name = "{cls.env_name}" - name_load = np.array([{name_load_str}]) - name_gen = np.array([{name_gen_str}]) - name_line = np.array([{name_line_str}]) - name_sub = np.array([{name_sub_str}]) - name_storage = np.array([{name_storage_str}]) + name_load = np.array([{name_load_str}], dtype=str) + name_gen = np.array([{name_gen_str}], dtype=str) + name_line = np.array([{name_line_str}], dtype=str) + name_sub = np.array([{name_sub_str}], dtype=str) + name_storage = np.array([{name_storage_str}], dtype=str) + n_busbar_per_sub = {cls.n_busbar_per_sub} n_gen = {cls.n_gen} n_load = {cls.n_load} n_line = {cls.n_line} @@ -4395,7 +5062,7 @@ class {cls.__name__}({cls._INIT_GRID_CLS.__name__}): gen_renewable = {gen_renewable_str} # storage unit static data - storage_type = np.array([{storage_type_str}]) + storage_type = np.array([{storage_type_str}], dtype=str) storage_Emax = {storage_Emax_str} storage_Emin = {storage_Emin_str} storage_max_p_prod = {storage_max_p_prod_str} @@ -4425,7 +5092,7 @@ class {cls.__name__}({cls._INIT_GRID_CLS.__name__}): alarms_area_lines = {alarms_area_lines_str} # alert feature - dim_alert = {cls.dim_alerts} + dim_alerts = {cls.dim_alerts} alertable_line_names = {alertable_line_names_str} alertable_line_ids = {alertable_line_ids_str} diff --git a/grid2op/Space/SerializableSpace.py b/grid2op/Space/SerializableSpace.py index 7aa514a69..379743169 100644 --- a/grid2op/Space/SerializableSpace.py +++ b/grid2op/Space/SerializableSpace.py @@ -61,7 +61,7 @@ class SerializableSpace(GridObjects, RandomObject): """ - def __init__(self, gridobj, subtype=object, _init_grid=True): + def __init__(self, gridobj, subtype=object, _init_grid=True, _local_dir_cls=None): """ subtype: ``type`` @@ -83,7 +83,7 @@ def __init__(self, gridobj, subtype=object, _init_grid=True): RandomObject.__init__(self) self._init_subtype = subtype # do not use, use to save restore only !!! if _init_grid: - self.subtype = subtype.init_grid(gridobj) + self.subtype = subtype.init_grid(gridobj, _local_dir_cls=_local_dir_cls) from grid2op.Action import ( BaseAction, ) # lazy loading to prevent circular reference @@ -175,7 +175,7 @@ def from_dict(dict_): path = dict_ if not os.path.exists(path): raise Grid2OpException( - 'Unable to find the file "{}" to load the ObservationSpace'.format( + 'Unable to find the file "{}" to load the grid2op classes'.format( path ) ) @@ -185,7 +185,8 @@ def from_dict(dict_): gridobj = GridObjects.from_dict(dict_) actionClass_str = extract_from_dict(dict_, "_init_subtype", str) actionClass_li = actionClass_str.split(".") - + _local_dir_cls = None # TODO when reading back the data + if actionClass_li[-1] in globals(): subtype = globals()[actionClass_li[-1]] else: @@ -265,8 +266,8 @@ def from_dict(dict_): msg_err_ = msg_err_.format(actionClass_str) raise Grid2OpException(msg_err_) # create the proper SerializableSpace class for this environment - CLS = SerializableSpace.init_grid(gridobj) - res = CLS(gridobj=gridobj, subtype=subtype, _init_grid=True) + CLS = SerializableSpace.init_grid(gridobj, _local_dir_cls=_local_dir_cls) + res = CLS(gridobj=gridobj, subtype=subtype, _init_grid=True, _local_dir_cls=_local_dir_cls) return res def cls_to_dict(self): diff --git a/grid2op/Space/__init__.py b/grid2op/Space/__init__.py index b18057aeb..5736eb2a7 100644 --- a/grid2op/Space/__init__.py +++ b/grid2op/Space/__init__.py @@ -1,6 +1,12 @@ -__all__ = ["RandomObject", "SerializableSpace", "GridObjects", "DetailedTopoDescription"] +__all__ = ["RandomObject", + "SerializableSpace", + "GridObjects", + "DEFAULT_N_BUSBAR_PER_SUB", + "DetailedTopoDescription", + "AddDetailedTopoIEEE"] from grid2op.Space.RandomObject import RandomObject from grid2op.Space.SerializableSpace import SerializableSpace -from grid2op.Space.GridObjects import GridObjects +from grid2op.Space.GridObjects import GridObjects, DEFAULT_N_BUSBAR_PER_SUB from grid2op.Space.detailed_topo_description import DetailedTopoDescription +from grid2op.Space.addDetailedTopoIEEE import AddDetailedTopoIEEE diff --git a/grid2op/Space/addDetailedTopoIEEE.py b/grid2op/Space/addDetailedTopoIEEE.py new file mode 100644 index 000000000..bbbf474e5 --- /dev/null +++ b/grid2op/Space/addDetailedTopoIEEE.py @@ -0,0 +1,93 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +from typing import Optional +import numpy as np + + +from grid2op.Space.detailed_topo_description import DetailedTopoDescription + + +class AddDetailedTopoIEEE: + """This class allows to add some detailed topology for the ieee networks, because + most of the time this information is not present in the released grid (only + buses information is present in the description of the IEEE grid used for grid2op + environment as of writing). + + If you want to use it, you can by doing the following (or something similar) + + .. code-block:: python + + import grid2op + from grid2op.Space import AddDetailedTopoIEEE + from grid2op.Backend import PandaPowerBackend # or any other backend (*eg* lightsim2grid) + + class PandaPowerBackendWithDetailedTopo(AddDetailedTopoIEEE, PandaPowerBackend): + pass + + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name, backend=PandaPowerBackendWithDetailedTopo()) + # do wathever you want, with the possibility to operate switches. + + More specifically, this class will build each substation in the following way, + with each substation : + + - counting as many busbars as there are of `n_busbar_per_substation` on the grid + (2 by default, but can be changed with `env = grid2op.make(..., n_busbar=XXX)` + - having the possibility to connect each pairs of busbar together with an + appropriate switch (so you will have, **per substation** exactly + `n_busbar * (n_busbar - 1) // 2` switches allowing to connect them) + - having the possibility to disconnect each element of the grid independantly of + anything else. This means there is `n_load + n_gen + n_storage + 2 * n_line + n_shunt` + such switch like this in total + - having the possibility to connect each element to each busbar. This means + there is `n_busbar * (n_load + n_gen + n_storage + 2 * n_line + n_shunt)` such + switches on the grid. + + Here is the number of switches for some released grid2op environment (with 2 busbars - the default- per substation ): + + - `l2rpn_case14_sandbox`: 188 + - `l2rpn_neurips_2020_track1`: 585 + - `l2rpn_neurips_2020_track2`: 1759 + - `l2rpn_wcci_2022`: 1756 + - `l2rpn_idf_2023`: 1780 + + .. warning:: + As you can see, by using directly the switches to control the grid, the action space blows up. In this case you can + achieve exactly the same as the "classic" grid2op representation, but instead of having + an action space with a size of `n_load + n_gen + n_storage + 2 * n_line + n_shunt` (for chosing on which busbar you + want to connect the element) and again `n_load + n_gen + n_storage + 2 * n_line + n_shunt` (for chosing if you + want to connect / disconnect each element) you end up with an action space of + `(n_busbar + 1) * (n_load + n_gen + n_storage + 2 * n_line + n_shunt) + n_sub * (n_busbar * (n_busbar - 1) // 2)` + + This is of course to represent **exactly** the same actions: there are no more (and no less) action you can + do with the switches that you cannot do in the "original" grid2op representation. + + This gives, for some grid2op environments: + + ========================== ======================= ============= + env name original representation with switches + ========================== ======================= ============= + l2rpn_case14_sandbox 116 188 + l2rpn_neurips_2020_track1 366 585 + l2rpn_neurips_2020_track2 1094 1759 + l2rpn_wcci_2022 1092 1756 + l2rpn_idf_2023 1108 1780 + ========================== ======================= ============= + + + """ + def load_grid(self, path=None, filename=None): + super().load_grid(path, filename) + self.detailed_topo_desc = DetailedTopoDescription.from_ieee_grid(self) + + def get_switches_position(self) -> Optional[np.ndarray]: + topo_vect = self.get_topo_vect() + *_, shunt_bus = self.shunt_info() + res = self.detailed_topo_desc.compute_switches_position(topo_vect, shunt_bus) + return res diff --git a/grid2op/Space/detailed_topo_description.py b/grid2op/Space/detailed_topo_description.py index 482a5ccbe..91a55ceb5 100644 --- a/grid2op/Space/detailed_topo_description.py +++ b/grid2op/Space/detailed_topo_description.py @@ -6,217 +6,1139 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. - +from typing import Optional import numpy as np - +import networkx as nx +import copy + +import grid2op from grid2op.dtypes import dt_int, dt_bool - +from grid2op.Exceptions import Grid2OpException, ImpossibleTopology from grid2op.Space.space_utils import extract_from_dict, save_to_dict class DetailedTopoDescription(object): """This class represent the detail description of the - switches in the grid. + switches in the grid. It allows to use new types of actions (`act.set_switches = ..` # TODO detailed topo) + and to get some extra information in the observation (`obs.switches_state` # TODO detailed topo). + + This class only stores the existence of switches. It just informs + the user that "just that it exists a switch between this and this". It does + not say whether switches / breakers / etc. are opened or closed (for that you need to have + a look at the observation) and it does not allow to modify the switches state (for that you + need to use the action). + + If set, it is "const" / "read only" / immutable. + It should be initialized by the backend and never modified afterwards. + + It is a const member of the main grid2op classes (not the object, the class !), just like the `n_sub` or + `lines_or_pos_topo_vect` property for example. + + # TODO detailed topo: remove the switches to topo_vect id and make a "conn node id to topo_vect id" + # TODO detailed topo: remove in the doc the requirement of conn_node_1 and conn_node_2 + # TODO detailed topo: remove the element types from the switch matrix + + In order to fill a :class:`DetailedTopoDescription` you need to fill the + following attributes : + + - :attr:`DetailedTopoDescription.conn_node_name` : for each connectivity node, you provide a name. For now we + recommend using it (at least for debug purpose) but later this vector might contain None for internal connectivity + node. + - :attr:`DetailedTopoDescription.conn_node_to_subid` : for each connectiviy node, you provide the substation to + which it is connected. The substation should exist in the grid. All substation should have a least one connectivity + node at the moment. + - :attr:`DetailedTopoDescription.switches` : this is the "main" information about detailed topology. It provide the + information about each switches on your grid. It is a matrix with 4 columns: + + - the first is the substation id to which this switches belong. As of now you have to fill it manually + and this information should match the one given by the connectivity node this switch + represent. TODO detailed topo: have a routine to add it automatically afterwards + - the second one is an information about the element - *eg* load or generator or side of powerline- it concerns (if any) + - the third one is the ID of one of the connectivity node this switch is attached to + - the fourth one is the ID of the other connectivity node this switch is attached to + + - :attr:`DetailedTopoDescription.conn_node_to_topovect_id` : for each connectivity node, it gives the index in the + topo_vect vector to which this connectivity node is connected. Put -1 for conn node not represented in + the "topo_vect" vector + otherwise the id of the topo_vect converned by this switch (should be -1 for everything except for + switch whose conn_node_id_1 represents element modeled in the topo_vect eg load, generator or side of powerline) + (TODO detailed topo: something again that for now you should manually process but that will + be automatically processed by grid2op in the near future). + - :attr:`DetailedTopoDescription.conn_node_to_shunt_id` : for each connectivity node, it gives the index of the shunt it + concerns (should be -1 except for connectivity node that concerns shunts) + (TODO detailed topo: something again that for now you should manually process but that will + be automatically processed by grid2op in the near future). + - :attr:`DetailedTopoDescription.load_to_conn_node_id` : for each load, it gives by which connectivity + node it is represented. It should match the info in the colum 2 (third column) of the switches matrix. + (TODO detailed topo: something again that for now you should manually process but that will + be automatically processed by grid2op in the near future). + - :attr:`DetailedTopoDescription.gen_to_conn_node_id` : for each generator, it gives by which connectivity + node it is represented. It should match the info in the colum 2 (third column) of the switches matrix. + (TODO detailed topo: something again that for now you should manually process but that will + be automatically processed by grid2op in the near future). + - :attr:`DetailedTopoDescription.line_or_to_conn_node_id` : for each "origin" side of powerline, + it gives by which connectivity + node it is represented. It should match the info in the colum 2 (third column) of the switches matrix. + (TODO detailed topo: something again that for now you should manually process but that will + be automatically processed by grid2op in the near future). + - :attr:`DetailedTopoDescription.line_ex_to_conn_node_id` : for each "extremity" side of powerline, + it gives by which connectivity + node it is represented. It should match the info in the colum 2 (third column) of the switches matrix. + (TODO detailed topo: something again that for now you should manually process but that will + be automatically processed by grid2op in the near future). + - :attr:`DetailedTopoDescription.storage_to_conn_node_id` : for each storage unit, + it gives by which connectivity + node it is represented. It should match the info in the colum 2 (third column) of the switches matrix. + (TODO detailed topo: something again that for now you should manually process but that will + be automatically processed by grid2op in the near future). + - :attr:`DetailedTopoDescription.shunt_to_conn_node_id` : for each shunt, + it gives by which connectivity + node it is represented. It should match the info in the colum 2 (third column) of the switches matrix. + (TODO detailed topo: something again that for now you should manually process but that will + be automatically processed by grid2op in the near future). + - :attr:`DetailedTopoDescription.busbar_section_to_conn_node_id` : this vector has the size of the number + of "busbar sections" in the grid. And for each busbar section, it gives the information for which + connectivity node it is represented. + - :attr:`DetailedTopoDescription.busbar_section_to_subid` : this vector has the same size as the + :attr:`DetailedTopoDescription.busbar_section_to_conn_node_id` and give the information of + the substation id each busbar section is part of. It should match the + information in `self.switches` too + (TODO detailed topo: something again that for now you should manually process but that will + be automatically processed by grid2op in the near future). + + .. warning:: + If a switch connects an element - *eg* load or generator or side of powerline- on one of it side, the + connectivity node of this element should be on the 3rd column (index 2 in python) in the switches + matrix and not on the 4th column (index 4 in python) + + .. danger:: + As opposed to some other elements of grid2op, by default, connectivity nodes should be labeled + in a "global" way. This means that there is exactly one connectivity node labeled `1` + for the whole grid (as opposed to 1 per substation !). + + They are labelled the same way as *eg* `load` (there is a unique `load 1`) and not like `busbar in the + substation` where thare are "as many busbar 1 as there are substation". + + TODO detailed topo: this is `True` for now but there would be nothing (except some added tests + and maybe a bit of code) to allow the "substation local" labelling. + + To create a "detailed description of the swtiches", somewhere in the implementation of your + backend you have a piece of code looking like: - It does not say whether switches / breakers / etc. are opened or closed - just that it exists a switch between this and this + .. code-block:: python - It is const, should be initialized by the backend and never modified afterwards. + import os + from grid2op.Backend import Backend + from typing import Optional, Union, Tuple + + class MyBackend(Backend): + # some implementation of other methods... + + def load_grid(self, + path : Union[os.PathLike, str], + filename : Optional[Union[os.PathLike, str]]=None) -> None: + # do the regular implementation of the load_grid function + ... + ... + + # once done, then you can create a detailed topology + dtd = DetailedTopoDescription() + + # you fill it with the data in the grid you read + # (at this stage you tell grid2op what the grid is made of) + dtd.conn_node_name = ... + dtd.conn_node_to_subid = ... + dtd.switches = ... + dtd.conn_node_to_topovect_id = ... + dtd.conn_node_to_shunt_id = ... + dtd.load_to_conn_node_id = ... + dtd.gen_to_conn_node_id = ... + dtd.line_or_to_conn_node_id = ... + dtd.line_ex_to_conn_node_id = ... + dtd.storage_to_conn_node_id = ... + dtd.shunt_to_conn_node_id = ... + dtd.busbar_section_to_conn_node_id = ... + dtd.busbar_section_to_subid = ... + + # and then you assign it as a member of this class + self.detailed_topo_desc = dtd + + # some other implementation of other methods + + Examples + -------- - It is a const member of the class (not the object, the class !) + Unfortunately, most of the ieee grid (used in released grid2op environments) does not + come with a detailed description of the topology. They only describe the "nodal" topology (buses) + and not how things are wired together with switches. + + If you want to use this feature with released grid2op environment, + you can create a new backend class, and use it to create a new environment like this: + + .. code-block:: python + + import grid2op + from grid2op.Space import AddDetailedTopoIEEE + from grid2op.Backend import PandaPowerBackend # or any other backend (*eg* lightsim2grid) + + class PandaPowerBackendWithDetailedTopo(AddDetailedTopoIEEE, PandaPowerBackend): + pass + + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name, backend=PandaPowerBackendWithDetailedTopo()) + # do wathever you want + + """ - SUB_COL = 0 - OBJ_TYPE_COL = 1 - OBJ_ID_COL = 2 - BUSBAR_ID_COL = 3 + #: In the :attr:`DetailedTopoDescription.switches` table, tells that column 0 + #: concerns the substation + SUB_COL = 0 + + #: In the :attr:`DetailedTopoDescription.switches` table, tells that column 2 + #: concerns the id of object that this switches connects / disconnects + CONN_NODE_1_ID_COL = 1 + #: In the :attr:`DetailedTopoDescription.switches` table, tells that column 2 + #: concerns the id of the connection node that this switches connects / disconnects + CONN_NODE_2_ID_COL = 2 + + #: TODO detailed topo doc LOAD_ID = 0 + + #: TODO detailed topo doc GEN_ID = 1 + + #: TODO detailed topo doc STORAGE_ID = 2 + + #: TODO detailed topo doc LINE_OR_ID = 3 + + #: TODO detailed topo doc LINE_EX_ID = 4 + + #: TODO detailed topo doc SHUNT_ID = 5 + #: TODO detailed topo doc + OTHER = 6 + def __init__(self): - self.busbar_name = None # id / name / whatever for each busbars - self.busbar_to_subid = None # which busbar belongs to which substation + #: vector of string that has the size of the number of connection nodes on your grid + #: and for each connection node it gives... its name + self.conn_node_name = None - self.busbar_connectors = None # for each element that connects busbars, tells which busbars its connect (by id) + #: vector of int that has the size of the number of connection nodes on + #: your grid and for each connection node it gives the substation id [0...n_sub] to which + #: the connection node belongs to. + self.conn_node_to_subid = None - self.switches = None # a matrix of 'n_switches' rows and 4 columns - # col 0 gives the substation id - # col 1 gives the object type it connects (0 = LOAD, etc.) - # col 2 gives the ID of the object it connects (number between 0 and n_load-1 if previous column is 0 for example) - # col 3 gives the busbar id that this switch connects its element to + #: It is a matrix describing each switches. This matrix has 'n_switches' rows and 4 columns. + #: Each column provides an information about the switch: + #: + #: - col 0 gives the substation id + #: - col 1 TODO detailed topo doc + #: - col 2 TODO detailed topo doc + self.switches = None + + #: TODO + self.conn_node_to_topovect_id = None + + #: TODO + self.conn_node_to_shunt_id = None + + #: A list of tuple that has the same size as the number of loads on the grid. + #: For each loads, it gives the connection node ids to which (thanks to a switch) a load can be + #: connected. For example if `type(env)..detailed_topo_desc.load_to_conn_node_id[0]` is the tuple `(1, 15)` this means that load + #: id 0 can be connected to either connection node id 1 or connection node id 15. + #: This information is redundant with the one provided in :attr:`DetailedTopoDescription.switches` + self.load_to_conn_node_id = None + + #: Same as :attr:`DetailedTopoDescription.load_to_conn_node_id` but for generators + self.gen_to_conn_node_id = None - # for each switches says which element in the "topo_vect" it concerns [-1 for shunt] - self.switches_to_topovect_id = None - self.switches_to_shunt_id = None + #: Same as :attr:`DetailedTopoDescription.load_to_conn_node_id` but for lines (or side) + self.line_or_to_conn_node_id = None - # whether the switches connects an element represented in the topo_vect vector (unused atm) - self.in_topo_vect = None + #: Same as :attr:`DetailedTopoDescription.load_to_conn_node_id` but for lines (ex side) + self.line_ex_to_conn_node_id = None - self.load_to_busbar_id = None # for each loads, you have a tuple of busbars to which it can be connected - self.gen_to_busbar_id = None - self.line_or_to_busbar_id = None - self.line_ex_to_busbar_id = None - self.storage_to_busbar_id = None - self.shunt_to_busbar_id = None + #: Same as :attr:`DetailedTopoDescription.load_to_conn_node_id` but for storage unit + self.storage_to_conn_node_id = None + + #: Same as :attr:`DetailedTopoDescription.load_to_conn_node_id` but for shunt + self.shunt_to_conn_node_id = None + + #: For each busbar section, it gives the connection node id + #: that represent this busbar section + self.busbar_section_to_conn_node_id = None + + #: For each busbar section, it gives the substation id to which it + #: is connected + self.busbar_section_to_subid = None + + #: flag to detect that the detailed topo have been built with the + #: :func:`.DetailedTopoDescriptionfrom_ieee_grid` + #: which enables some feature that will be more generic in the future. + self._from_ieee_grid = False + + #: number of substation on the grid + #: this is automatically set when the detailed topo description + #: is processed + self._n_sub : int = -1 + + #: dimension of the "topology vector" (in grid2op) + #: this is automatically set when the detailed topo description + #: is processed + self._dim_topo : int = -1 + + #: number of shunt in the grid + #: this is automatically set when the detailed topo description + #: is processed + self._n_shunt : int = -1 + + #: INTERNAL + self._conn_node_to_bbs_conn_node_id = None + + #: INTERNAL + self._connectivity_graph = None + # TODO detailed topo: list per substation ! @classmethod - def from_init_grid(cls, init_grid): - """For now, suppose that the grid comes from ieee""" - n_sub = init_grid.n_sub + def from_ieee_grid(cls, init_grid : "grid2op.Space.GridObjects.GridObjects"): + """For now, suppose that the grid comes from ieee grids. + + See doc of :class:`AddDetailedTopoIEEE` for more information. + """ + init_grid_cls = type(init_grid) + + n_sub = init_grid_cls.n_sub + n_bb_per_sub = init_grid_cls.n_busbar_per_sub + if n_bb_per_sub < 2: + raise NotImplementedError("This function has not been implemented for less " + "than 2 busbars per subs at the moment.") res = cls() - res.busbar_name = np.array([f"busbar_{i}" for i in range(2 * init_grid.n_sub)]) - res.busbar_to_subid = np.arange(n_sub) % init_grid.n_sub - - # in current environment, there are 2 busbars per substations, - # and 1 connector allows to connect both of them - nb_connector = n_sub - res.busbar_connectors = np.zeros((nb_connector, 2), dtype=dt_int) - res.busbar_connectors[:,0] = np.arange(n_sub) - res.busbar_connectors[:,1] = np.arange(n_sub) + n_sub - - # for each element (load, gen, etc.) - # gives the id of the busbar to which this element can be connected thanks to a - # switches - # in current grid2op environment, there are 2 switches for each element - # one that connects it to busbar 1 - # another one that connects it to busbar 2 - n_shunt = init_grid.n_shunt if init_grid.shunts_data_available else 0 - res.switches = np.zeros((2*(init_grid.dim_topo + n_shunt), 4), dtype=dt_int) - # add the shunts (considered as element here !) - sub_info = 1 * init_grid.sub_info - if init_grid.shunts_data_available: - for sub_id in init_grid.shunt_to_subid: + res._from_ieee_grid = True + res._n_sub = n_sub + res._dim_topo = init_grid_cls.dim_topo + res._n_shunt = init_grid_cls.n_shunt + + # define the "connection nodes" + # for ieee grid we model: + # one connection node per busbar (per sub) + # for each element (side of powerline, load, generator, storage, shunt etc.) 2 connection nodes + # (status of the element) + # conn node for each busbar + bb_conn_node = sum([[f"conn_node_sub_{subid}_busbar_{bb_i}" for bb_i in range(n_bb_per_sub)] for subid in range(n_sub)], + start=[]) + res.busbar_section_to_subid = np.repeat(np.arange(n_sub),n_bb_per_sub) + res.busbar_section_to_conn_node_id = np.arange(len(bb_conn_node)) + + el_conn_node = ([f"conn_node_load_{i}" for i in range(init_grid_cls.n_load)] + + [f"conn_node_gen_{i}" for i in range(init_grid_cls.n_gen)] + + [f"conn_node_line_or_{i}" for i in range(init_grid_cls.n_line)] + + [f"conn_node_line_ex_{i}" for i in range(init_grid_cls.n_line)] + + [f"conn_node_storage_{i}" for i in range(init_grid_cls.n_storage)] + + [f"conn_node_shunt_{i}" for i in range(init_grid_cls.n_shunt)] if init_grid_cls.shunts_data_available else [] + ) + el_breaker_conn_node = ([f"conn_node_breaker_load_{i}" for i in range(init_grid_cls.n_load)] + + [f"conn_node_breaker_gen_{i}" for i in range(init_grid_cls.n_gen)] + + [f"conn_node_breaker_line_or_{i}" for i in range(init_grid_cls.n_line)] + + [f"conn_node_breaker_line_ex_{i}" for i in range(init_grid_cls.n_line)] + + [f"conn_node_breaker_storage_{i}" for i in range(init_grid_cls.n_storage)] + + [f"conn_node_breaker_shunt_{i}" for i in range(init_grid_cls.n_shunt)] if init_grid_cls.shunts_data_available else [] + ) + res.conn_node_name = np.array(bb_conn_node + + el_conn_node + + el_breaker_conn_node) + res.conn_node_to_subid = np.array(sum([[subid for bb_i in range(n_bb_per_sub)] for subid in range(n_sub)], start=[]) + + 2* (init_grid_cls.load_to_subid.tolist() + + init_grid_cls.gen_to_subid.tolist() + + init_grid_cls.line_or_to_subid.tolist() + + init_grid_cls.line_ex_to_subid.tolist() + + init_grid_cls.storage_to_subid.tolist() + + init_grid_cls.shunt_to_subid.tolist() if init_grid_cls.shunts_data_available else [] + ) + ) + n_conn_nodes = res.conn_node_name.shape[0] + + # add the switches : there are 1 switches that connects all pairs + # of busbars in the substation, plus for each element: + # - 1 switch for the status of the element ("conn_node_breaker_xxx_i") + # - 1 breaker connecting the element to each busbar + n_shunt = init_grid_cls.n_shunt if init_grid_cls.shunts_data_available else 0 + nb_switch_bb_per_sub = (n_bb_per_sub * (n_bb_per_sub - 1)) // 2 # switches between busbars + nb_switch_busbars = n_sub * nb_switch_bb_per_sub # switches between busbars at each substation + nb_switch_total = nb_switch_busbars + (init_grid_cls.dim_topo + n_shunt) * (1 + n_bb_per_sub) + res.switches = np.zeros((nb_switch_total, 3), dtype=dt_int) + + # add the shunts in the "sub_info" (considered as element here !) + sub_info = 1 * init_grid_cls.sub_info + if init_grid_cls.shunts_data_available: + for sub_id in init_grid_cls.shunt_to_subid: sub_info[sub_id] += 1 - # now fill the switches: 2 switches per element, everything stored in the res.switches matrix - res.switches[:, cls.SUB_COL] = np.repeat(np.arange(n_sub), 2 * sub_info) - res.switches_to_topovect_id = np.zeros(np.sum(sub_info) * 2, dtype=dt_int) - 1 - if init_grid.shunts_data_available: - res.switches_to_shunt_id = np.zeros(np.sum(sub_info) * 2, dtype=dt_int) - 1 - # res.in_topo_vect = np.zeros(np.sum(sub_info), dtype=dt_int) - - arrs_subid = [init_grid.load_to_subid, - init_grid.gen_to_subid, - init_grid.line_or_to_subid, - init_grid.line_ex_to_subid, - init_grid.storage_to_subid, + # now fill the switches matrix + # fill with the switches between busbars + res.switches[:nb_switch_busbars, cls.SUB_COL] = np.repeat(np.arange(n_sub), nb_switch_bb_per_sub) + + li_or_bb_switch = sum([[j for i in range(j+1, n_bb_per_sub)] for j in range(n_bb_per_sub - 1)], start=[]) # order relative to the substation + li_ex_bb_switch = sum([[i for i in range(j+1, n_bb_per_sub)] for j in range(n_bb_per_sub - 1)], start=[]) # order relative to the substation + add_sub_id_unique_id = np.repeat(np.arange(n_sub), nb_switch_bb_per_sub) * n_bb_per_sub # make it a unique substation labelling + res.switches[:nb_switch_busbars, cls.CONN_NODE_1_ID_COL] = np.array(n_sub * li_or_bb_switch) + add_sub_id_unique_id + res.switches[:nb_switch_busbars, cls.CONN_NODE_2_ID_COL] = np.array(n_sub * li_ex_bb_switch) + add_sub_id_unique_id + + # and now fill the switches for all elements + res.conn_node_to_topovect_id = np.zeros(n_conn_nodes, dtype=dt_int) - 1 + if init_grid_cls.shunts_data_available: + res.conn_node_to_shunt_id = np.zeros(n_conn_nodes, dtype=dt_int) - 1 + + arrs_subid = [init_grid_cls.load_to_subid, + init_grid_cls.gen_to_subid, + init_grid_cls.line_or_to_subid, + init_grid_cls.line_ex_to_subid, + init_grid_cls.storage_to_subid, ] - ars2 = [init_grid.load_pos_topo_vect, - init_grid.gen_pos_topo_vect, - init_grid.line_or_pos_topo_vect, - init_grid.line_ex_pos_topo_vect, - init_grid.storage_pos_topo_vect, + ars2 = [init_grid_cls.load_pos_topo_vect, + init_grid_cls.gen_pos_topo_vect, + init_grid_cls.line_or_pos_topo_vect, + init_grid_cls.line_ex_pos_topo_vect, + init_grid_cls.storage_pos_topo_vect, ] ids = [cls.LOAD_ID, cls.GEN_ID, cls.LINE_OR_ID, cls.LINE_EX_ID, cls.STORAGE_ID] - if init_grid.shunts_data_available: - arrs_subid.append(init_grid.shunt_to_subid) - ars2.append(np.array([-1] * init_grid.n_shunt)) + if init_grid_cls.shunts_data_available: + arrs_subid.append(init_grid_cls.shunt_to_subid) + ars2.append(np.array([-1] * init_grid_cls.n_shunt)) ids.append(cls.SHUNT_ID) - prev_el = 0 - # prev_el1 = 0 - for sub_id in range(n_sub): - for arr_subid, pos_topo_vect, obj_col in zip(arrs_subid, ars2, ids): - nb_el = (arr_subid == sub_id).sum() - where_el = np.where(arr_subid == sub_id)[0] - res.switches[prev_el : (prev_el + 2 * nb_el), cls.OBJ_TYPE_COL] = obj_col - res.switches[prev_el : (prev_el + 2 * nb_el), cls.OBJ_ID_COL] = np.repeat(where_el, 2) - res.switches[prev_el : (prev_el + 2 * nb_el), cls.BUSBAR_ID_COL] = np.tile(np.array([1, 2]), nb_el) - res.switches_to_topovect_id[prev_el : (prev_el + 2 * nb_el)] = np.repeat(pos_topo_vect[arr_subid == sub_id], 2) - if init_grid.shunts_data_available and obj_col == cls.SHUNT_ID: - res.switches_to_shunt_id[prev_el : (prev_el + 2 * nb_el)] = np.repeat(where_el, 2) - - # if obj_col != cls.SHUNT_ID: - # # object is modeled in topo_vect - # res.in_topo_vect[prev_el1 : (prev_el1 + nb_el)] = 1 - prev_el += 2 * nb_el - # prev_el1 += nb_el - - # and also fill some extra information - res.load_to_busbar_id = [(load_sub, load_sub + n_sub) for load_id, load_sub in enumerate(init_grid.load_to_subid)] - res.gen_to_busbar_id = [(gen_sub, gen_sub + n_sub) for gen_id, gen_sub in enumerate(init_grid.gen_to_subid)] - res.line_or_to_busbar_id = [(line_or_sub, line_or_sub + n_sub) for line_or_id, line_or_sub in enumerate(init_grid.line_or_to_subid)] - res.line_ex_to_busbar_id = [(line_ex_sub, line_ex_sub + n_sub) for line_ex_id, line_ex_sub in enumerate(init_grid.line_ex_to_subid)] - res.storage_to_busbar_id = [(storage_sub, storage_sub + n_sub) for storage_id, storage_sub in enumerate(init_grid.storage_to_subid)] - if init_grid.shunts_data_available: - res.shunt_to_busbar_id = [(shunt_sub, shunt_sub + n_sub) for shunt_id, shunt_sub in enumerate(init_grid.shunt_to_subid)] + + prev_el = nb_switch_busbars + handled = 0 + for arr_subid, pos_topo_vect, obj_col in zip(arrs_subid, ars2, ids): + nb_el = arr_subid.shape[0] + next_el = prev_el + (1 + n_bb_per_sub) * nb_el + + # fill the object type + this_conn_nodes = np.arange(len(bb_conn_node) + handled, + len(bb_conn_node) + handled + nb_el) + if obj_col == cls.LOAD_ID: + res.load_to_conn_node_id = this_conn_nodes + elif obj_col == cls.GEN_ID: + res.gen_to_conn_node_id = this_conn_nodes + elif obj_col == cls.LINE_OR_ID: + res.line_or_to_conn_node_id = this_conn_nodes + elif obj_col == cls.LINE_EX_ID: + res.line_ex_to_conn_node_id = this_conn_nodes + elif obj_col == cls.STORAGE_ID: + res.storage_to_conn_node_id = this_conn_nodes + elif obj_col == cls.SHUNT_ID and init_grid_cls.shunts_data_available: + res.shunt_to_conn_node_id = this_conn_nodes + + # fill the substation id + res.switches[prev_el : next_el, cls.SUB_COL] = np.repeat(arr_subid, (1 + n_bb_per_sub)) + + conn_node_breaker_ids = (len(bb_conn_node) + len(el_conn_node) + handled + np.arange(nb_el)) + # fill the switches that connect the element to each busbars (eg) + # `conn_node_breaker_load_{i}` to `conn_node_sub_{subid}_busbar_{bb_i}` + # nb some values here are erased by the following statement (but I did not want to make a for loop in python) + res.switches[prev_el : next_el, cls.CONN_NODE_1_ID_COL] = np.repeat(conn_node_breaker_ids, 1 + n_bb_per_sub) + res.switches[prev_el : next_el, cls.CONN_NODE_2_ID_COL] = (np.tile(np.arange(-1, n_bb_per_sub), nb_el) + + np.repeat(arr_subid * n_bb_per_sub, n_bb_per_sub+1)) + + # fill the breaker that connect (eg): + # `conn_node_load_{i}` to `conn_node_breaker_load_{i}` + res.switches[prev_el : next_el : (1 + n_bb_per_sub), cls.CONN_NODE_1_ID_COL] = len(bb_conn_node) + handled + np.arange(nb_el) + res.switches[prev_el : next_el : (1 + n_bb_per_sub), cls.CONN_NODE_2_ID_COL] = conn_node_breaker_ids + + if obj_col != cls.SHUNT_ID: + res.conn_node_to_topovect_id[this_conn_nodes] = pos_topo_vect + if init_grid_cls.shunts_data_available and obj_col == cls.SHUNT_ID: + res.conn_node_to_shunt_id[this_conn_nodes] = np.arange(nb_el) + prev_el = next_el + handled += nb_el + + # TODO detailed topo: have a function to compute the switches `sub_id` columns from the `conn_node_to_subid` + # TODO detailed topo: have a function for the "conn_node_to_topovect_id" and "switches_to_shunt_id" return res - def compute_switches_position(self, topo_vect, shunt_bus): - # TODO detailed topo - # TODO in reality, for more complex environment, this requires a routine to compute it - # but for now in grid2op as only ficitive grid are modeled then - # this is not a problem - switches_state = np.zeros(self.switches.shape[0], dtype=dt_bool) - busbar_connectors_state = np.zeros(self.busbar_connectors.shape[0], dtype=dt_bool) # we can always say they are opened + def _aux_compute_busbars_sections(self): + # TODO detailed topo: speed optimization: install graph-tool (but not available with pip...) + + # import time + # beg_ = time.perf_counter() + self._connectivity_graph = nx.Graph() + self._connectivity_graph.add_edges_from([(el[1], el[2], {"id": switch_id}) for switch_id, el in enumerate(self.switches)]) + # je veux isoler les elements qui, si on enleve les busbar, peuvent atteindre les autres busbars + self._conn_node_to_bbs_conn_node_id = [set() for _ in range(self.conn_node_name.shape[0])] + for busbar_id in self.busbar_section_to_conn_node_id: + tmp_g = copy.deepcopy(self._connectivity_graph) + tmp_g.remove_nodes_from([el for el in self.busbar_section_to_conn_node_id if el != busbar_id]) + conn_nodes = nx.node_connected_component(tmp_g, busbar_id) + for el in conn_nodes: + self._conn_node_to_bbs_conn_node_id[el].add(busbar_id) + # print(time.perf_counter() - beg_) # 2ms for 1 sub + + def get_switch_id_ieee(self, conn_node_id: int): + """TODO detailed topo + + Parameters + ---------- + conn_node_id : int + _description_ + """ + switch_id = (self.switches[:, type(self).CONN_NODE_1_ID_COL] == conn_node_id).nonzero()[0] + if switch_id.shape[0] == 0: + raise Grid2OpException(f"Cannot find a switch for connectivity node {conn_node_id}") + if switch_id.shape[0] > 1: + raise Grid2OpException(f"Found multiple switch for connectivity node {conn_node_id}") + switch_id = switch_id[0] + return switch_id + + def _aux_compute_switches_pos_ieee(self, + bus_vect, # topo_vect + el_to_conn_node_id, # load_to_conn_node_id + conn_node_to_bus_id, # conn_node_to_topo_vect_id + switches_state, # result + ): + if not self._from_ieee_grid: + raise NotImplementedError("This function is only implemented for detailed topology " + "generated from ieee grids. You can use `compute_switches_position` " + "for a more generic function") + # compute the position for the switches of the "topo_vect" elements # only work for current grid2op modelling ! # TODO detailed topo vectorize this ! (or cython maybe ?) - for el_id, bus_id in enumerate(topo_vect): - mask_el = self.switches_to_topovect_id == el_id - if mask_el.any(): - # it's a regular element - if bus_id == 1: - mask_el[np.where(mask_el)[0][1]] = False # I open the switch to busbar 2 in this case - switches_state[mask_el] = True - elif bus_id == 2: - mask_el[np.where(mask_el)[0][0]] = False # I open the switch to busbar 1 in this case - switches_state[mask_el] = True + for conn_node in el_to_conn_node_id: + switch_id = self.get_switch_id_ieee(conn_node) + my_bus = bus_vect[conn_node_to_bus_id[conn_node]] + if my_bus == -1: + # I init the swith at False, so nothing to do in this case + continue + switches_state[switch_id] = True # connector is connected + switches_state[switch_id + my_bus] = True # connector to busbar is connected + + def compute_switches_position_ieee(self, topo_vect, shunt_bus): + if not self._from_ieee_grid: + raise NotImplementedError("This function is only implemented for detailed topology " + "generated from ieee grids. You can use `compute_switches_position` " + "for a more generic function.") + switches_state = np.zeros(self.switches.shape[0], dtype=dt_bool) + + # compute the position for the switches of the "topo_vect" elements + self._aux_compute_switches_pos_ieee(topo_vect, self.load_to_conn_node_id, self.conn_node_to_topovect_id, switches_state) + self._aux_compute_switches_pos_ieee(topo_vect, self.gen_to_conn_node_id, self.conn_node_to_topovect_id, switches_state) + self._aux_compute_switches_pos_ieee(topo_vect, self.line_or_to_conn_node_id, self.conn_node_to_topovect_id, switches_state) + self._aux_compute_switches_pos_ieee(topo_vect, self.line_ex_to_conn_node_id, self.conn_node_to_topovect_id, switches_state) + self._aux_compute_switches_pos_ieee(topo_vect, self.storage_to_conn_node_id, self.conn_node_to_topovect_id, switches_state) + + if self.conn_node_to_shunt_id is None or shunt_bus is None or self._n_shunt == 0: + # no need to process the shunts in these cases + return switches_state + + # compute the position for the switches of the "shunts" elements + self._aux_compute_switches_pos_ieee(shunt_bus, self.shunt_to_conn_node_id, self.conn_node_to_shunt_id, switches_state) + return switches_state + + def compute_switches_position(self, + topo_vect: np.ndarray, + shunt_bus: Optional[np.ndarray]=None, + subs_changed : Optional[np.ndarray]=None): + """This function compute a plausible switches configuration + from a given `topo_vect` representation. + + TODO detailed topo: documentation + + Parameters + ---------- + topo_vect : `np.ndarray` + The `topo_vect` detailing on which bus each element of the grid is connected + shunt_bus : `np.ndarray` + The busbar on which each shunt is connected. + + Returns + ------- + `switches_state` state (connected = ``True`` or disconnected = ``False``) of each switches as + a numpy boolean array. + + """ + # TODO detailed topo: input the previous switch state + + if topo_vect.shape[0] != self._dim_topo: + raise Grid2OpException("Incorrect input size for the topology vector.") + if shunt_bus is not None and shunt_bus.shape[0] != self._n_shunt: + raise Grid2OpException("Incorrect size for the shunt bus vector.") + if topo_vect[topo_vect != -1].min() < 1: + raise Grid2OpException("In grid2op buses are labelled starting from 1 and not 0 " + "(check your `topo_vect` input)") + if self._n_shunt > 0 and shunt_bus is not None: + conn_shunt = shunt_bus[shunt_bus != -1] + if conn_shunt.shape[0]: + if conn_shunt.min() < 1: + raise Grid2OpException("In grid2op buses are labelled starting from 1 and not 0 " + "(check your `shunt_bus` input)") + if np.unique(topo_vect).shape[0] > self.busbar_section_to_subid.shape[0]: + raise ImpossibleTopology("You ask for more independant buses than there are " + "busbar section on this substation") + if self._from_ieee_grid: + # specific case for IEEE grid, consistent with the AddDetailedTopoIEEE + # class + return self.compute_switches_position_ieee(topo_vect, shunt_bus) + + if subs_changed is None: + subs_changed = np.ones(self._n_sub, dtype=dt_bool) + + if subs_changed.shape[0] != self._n_sub: + raise Grid2OpException("Incorrect size for the substation mask") + + if self._conn_node_to_bbs_conn_node_id is None: + self._aux_compute_busbars_sections() + + full_res = np.zeros(self.switches.shape[0], dtype=dt_bool) + for sub_id, is_sub_modif in enumerate(subs_changed): + if not is_sub_modif: + continue + mask_this_sub = self.switches[:, type(self).SUB_COL] == sub_id + res_this_sub = self._aux_compute_switches_position_one_sub(sub_id, topo_vect, shunt_bus) + full_res[mask_this_sub] = res_this_sub + return full_res + + def _aux_compute_switches_position_one_sub(self, + sub_id, + topo_vect: np.ndarray, + shunt_bus: Optional[np.ndarray]=None): + + # by default they are False + nb_switch = self.switches[self.switches[:, type(self).SUB_COL] == sub_id].shape[0] + nb_conn_node = self.conn_node_name[self.conn_node_to_subid == sub_id].shape[0] + switches_state = np.zeros(nb_switch, dtype=dt_bool) # results + + # whether the switch is already assigned to a bus + switch_visited = np.zeros(nb_switch, dtype=dt_bool) + # whether the connectivity node is assigned to a bus + conn_node_visited = np.zeros(nb_conn_node, dtype=dt_bool) + conn_node_to_bus_id = np.zeros(nb_conn_node, dtype=dt_int) + all_pos = ((self.conn_node_to_topovect_id != -1) & (self.conn_node_to_subid == sub_id)).nonzero()[0] + + if self._n_shunt > 0 and shunt_bus is not None: + # add the shunts + all_pos = np.concatenate((all_pos, + ((self.conn_node_to_shunt_id != -1) & (self.conn_node_to_subid == sub_id)).nonzero()[0])) + topo_vect = np.concatenate((topo_vect, shunt_bus)) + + # traverse all objects + main_obj_id = 0 + try: + res = self._dfs_compute_switches_position(topo_vect, + main_obj_id, + all_pos, + switch_visited, + switches_state, + conn_node_visited, + conn_node_to_bus_id) + except RecursionError as exc_: + raise ImpossibleTopology(f"For substation {sub_id}: " + "No topology found, maybe the substation is " + "too large or there is a bug in the implementation. " + "It is most likely due to the fact that does not exist " + "a valid switch state for the input topology, but we " + "exclude a bug or a substation too large.") from exc_ + if res is None: + raise ImpossibleTopology(f"For substation {sub_id}") + return res + + def _dfs_compute_switches_position(self, + topo_vect, + main_obj_id, + all_pos, + switch_visited, + switches_state, + conn_node_visited, + conn_node_to_bus_id): + """should be use for one substation only, otherwise it will not work !""" + if main_obj_id >= len(all_pos): + return switch_visited + + if switch_visited.all(): + # TODO detailed topo do I have to check if result topo is correct + return None + + el_cn_id = all_pos[main_obj_id] + my_bus = topo_vect[self.conn_node_to_topovect_id[el_cn_id]] + cn_bbs_possible = self._conn_node_to_bbs_conn_node_id[el_cn_id] + if my_bus == -1: + # the object is disconnected, I suppose here that there exist + # a switch that directly control this element. + # With this hyp. this switch will never be changed + # so there is nothing to do. + conn_node_visited[el_cn_id] = True + main_obj_id = self._aux_find_next_el_id(main_obj_id, all_pos, conn_node_visited) + if main_obj_id is not None: + # I still need to visit some other elements + this_res = self._dfs_compute_switches_position(topo_vect, + main_obj_id, + all_pos, + switch_visited, + switches_state, + conn_node_visited, + conn_node_to_bus_id) + return this_res + # all elements have been visited + return switches_state + + for cn_bbs in cn_bbs_possible: # chose a busbar section + n_switch_visited = copy.deepcopy(switch_visited) + n_switches_state = copy.deepcopy(switches_state) + n_conn_node_to_bus_id = copy.deepcopy(conn_node_to_bus_id) + n_conn_node_visited = copy.deepcopy(conn_node_visited) + + if conn_node_visited[cn_bbs]: + if my_bus != conn_node_to_bus_id[cn_bbs]: + # cannot assign on the same busbar section two objects not on the same bus + # so I need to "backtrack" + continue + + elif (conn_node_to_bus_id == my_bus).any(): + # there is already an element connected to "my" bus, so I need to connect both busbars + which_other_bbs = (conn_node_to_bus_id[self.busbar_section_to_conn_node_id] == my_bus).nonzero()[0] + other_bbs_cn_ids = self.busbar_section_to_conn_node_id[which_other_bbs] + for other_bbs_cn in other_bbs_cn_ids: + this_tmp_g = copy.deepcopy(self._connectivity_graph) + this_tmp_g.remove_nodes_from([el for el in self.busbar_section_to_conn_node_id if el != cn_bbs and el != other_bbs_cn]) + bbs_switch, bbs_cn = self._aux_connect_el_to_switch(other_bbs_cn, cn_bbs, n_switch_visited, n_switches_state, this_tmp_g) + for bbs_sw, bbs_cn_ in zip(bbs_switch, bbs_cn): + # there is a way to connect both busbar sections + # we see if it works out until the end + n_switch_visited[bbs_sw] = True + n_switches_state[bbs_sw] = True + n_conn_node_visited[bbs_cn_] = True + n_conn_node_to_bus_id[bbs_cn_] = my_bus + this_res = self._dfs_compute_switches_position(topo_vect, + main_obj_id, + all_pos, + n_switch_visited, + n_switches_state, + n_conn_node_visited, + n_conn_node_to_bus_id) + if this_res is not None: + return this_res + # I cannot connect two busbars in this case + continue + # graph with all busbars remove except the "correct" one + tmp_g = copy.deepcopy(self._connectivity_graph) + tmp_g.remove_nodes_from([el for el in self.busbar_section_to_conn_node_id if el != cn_bbs]) + + # check if "main" element can be connected to this busbar + possible_switches_tmp, cn_visited_tmp = self._aux_connect_el_to_switch(el_cn_id, cn_bbs, switch_visited, switches_state, tmp_g) + + if len(possible_switches_tmp) == 0: + # this is not possible, I should move to other choice + continue + + something_works = False + this_res = None + n_conn_node_visited[el_cn_id] = True + n_conn_node_to_bus_id[el_cn_id] = my_bus + n_conn_node_visited[cn_visited_tmp] = True + n_conn_node_to_bus_id[cn_visited_tmp] = my_bus + for path in possible_switches_tmp: + n_switch_visited[path] = True + n_switches_state[path] = True + is_working = True + for other_cn_id in all_pos: + # find if all other elements can be assigned to this path (just an assessment for now) + if topo_vect[self.conn_node_to_topovect_id[other_cn_id]] != my_bus: + # nothing to do if the object is not on the same bus + continue + if n_conn_node_visited[other_cn_id]: + # node already visited + continue + + ps_tmp, cns_tmp = self._aux_connect_el_to_switch(other_cn_id, + cn_bbs, + n_switch_visited, + n_switches_state, + self._connectivity_graph) + if len(ps_tmp) == 0: + is_working = False + break - if self.switches_to_shunt_id is not None: - # read the switches associated with the shunts - for el_id, bus_id in enumerate(shunt_bus): - # it's an element not in the topo_vect (for now only switches) - mask_el = self.switches_to_shunt_id == el_id - if mask_el.any(): - # it's a shunt - if bus_id == 1: - mask_el[np.where(mask_el)[0][1]] = False # I open the switch to busbar 2 in this case - switches_state[mask_el] = True - elif bus_id == 2: - mask_el[np.where(mask_el)[0][0]] = False # I open the switch to busbar 1 in this case - switches_state[mask_el] = True - return busbar_connectors_state, switches_state - - def from_switches_position(self): + if len(ps_tmp) == 1: + # both objects are on the same bus and there is only one path + # to connect this object to the main object, so I necessarily + # toggle all switches on this path and continue + tmp_path = ps_tmp[0] + n_switch_visited[tmp_path] = True + n_switches_state[tmp_path] = True + n_conn_node_visited[cns_tmp] = True + n_conn_node_to_bus_id[cns_tmp] = my_bus + + if not is_working: + # this path is not working, I don't use it + continue + else: + # this seems to work, I try to see if I can + # handle all the remaining elements + main_obj_id = self._aux_find_next_el_id(main_obj_id, all_pos, n_conn_node_visited) + if main_obj_id is not None: + # I still need to visit some other elements + this_res = self._dfs_compute_switches_position(topo_vect, + main_obj_id, + all_pos, + n_switch_visited, + n_switches_state, + n_conn_node_visited, + n_conn_node_to_bus_id) + else: + # I found a correct path + return n_switches_state + if this_res is not None: + something_works = True + break # I found a solution + else: + # I need to back track + something_works = False + if something_works: + # I found a solution valid for everything + return this_res + else: + # no solution found, this bus is not possible + continue + # If I end up here it's because + # none of the `cn_bbs in cn_bbs_possible` are working + # so there is not solution + return None + + def _aux_find_next_el_id(self, main_obj_id, all_pos, n_conn_node_visited): + still_more_els = True + while n_conn_node_visited[all_pos[main_obj_id]]: + main_obj_id += 1 + if main_obj_id >= len(all_pos): + still_more_els = False + break + if still_more_els: + return main_obj_id + return None + + def _aux_connect_el_to_switch(self, el_cn_id, cn_bbs, switch_visited, switches_state, tmp_g): + """connect the connectivity node `el_cn_id` (representing an element) to + the connectivity node representing a busbar `cn_bbs` and should return all possible ways + to connect it without having to traverse another busbar + """ + paths = [el for el in nx.all_simple_paths(tmp_g, el_cn_id, cn_bbs)] + tmp = [np.array([self._connectivity_graph[pp[i]][pp[i+1]]["id"] for i in range(len(pp)-1)]) for pp in paths] # retrieve the switch id + res_switch = [] + res_cn = [] + for el, cn_path in zip(tmp, paths): + if not (switches_state[el] | ~switch_visited[el]).all(): + continue + res_switch.append(el) + res_cn.append(np.array(cn_path)) + return res_switch, res_cn + + def from_switches_position(self, + switches_state : np.ndarray, + subs_changed : Optional[np.ndarray]=None): + if switches_state.shape[0] != self.switches.shape[0]: + raise Grid2OpException("Impossible to compute the nodal topology from " + "the switches as you did not provide the state " + "of the correct number of switches: " + f"expected {self.switches.shape[0]} " + f"found {switches_state.shape[0]}") + if subs_changed is None: + subs_changed = np.ones(self._n_sub, dtype=dt_bool) + + if subs_changed.shape[0] != self._n_sub: + raise Grid2OpException("Incorrect number of substation provided in the " + "subs_changed argument (it should be a mask " + "indicating for each one whether this substation " + "has been modified or not)") + # TODO detailed topo # opposite of `compute_switches_position` - topo_vect = None - shunt_bus = None + topo_vect = np.zeros(self._dim_topo, dtype=dt_int) + if self.conn_node_to_shunt_id is not None: + shunt_bus = np.zeros(self._n_shunt, dtype=dt_int) + else: + shunt_bus = None + + # TODO detailed topo: find a way to accelarate it + for sub_id in range(self._n_sub): + if not subs_changed[sub_id]: + continue + + bbs_this_sub = self.busbar_section_to_subid == sub_id # bbs = busbar section + bbs_id = bbs_this_sub.nonzero()[0] + bbs_id_inv = np.zeros(bbs_id.max() + 1, dtype=dt_int) - 1 + bbs_id_inv[bbs_id] = np.arange(bbs_id.shape[0]) + bbs_handled = np.zeros(bbs_id.shape[0], dtype=dt_bool) + mask_s_this_sub = self.switches[:, type(self).SUB_COL] == sub_id + switches_this_sub = self.switches[mask_s_this_sub,:] + switches_state_this_sub = switches_state[mask_s_this_sub] + mask_cn_this_sub = self.conn_node_to_subid == sub_id + cn_to_tv_id = self.conn_node_to_topovect_id[mask_cn_this_sub] + # by default elements of this subs are disconnected + topo_vect[cn_to_tv_id[cn_to_tv_id != -1]] = -1 + + if self.conn_node_to_shunt_id is not None: + cn_to_sh_id = self.conn_node_to_shunt_id[mask_cn_this_sub] + # by default all shunts are connected + shunt_bus[cn_to_sh_id[cn_to_sh_id != -1]] = -1 + bbs_id_this_sub = 0 + bbs_node_id = 1 + while True: + if bbs_handled[bbs_id_this_sub]: + # this busbar section has already been process + bbs_id_this_sub += 1 + continue + + connected_conn_node = np.array([bbs_id[bbs_id_this_sub]]) + # now find all "connection node" connected to this busbar section + while True: + add_conn_2 = np.isin(switches_this_sub[:, type(self).CONN_NODE_1_ID_COL], connected_conn_node) & switches_state_this_sub + add_conn_1 = np.isin(switches_this_sub[:, type(self).CONN_NODE_2_ID_COL], connected_conn_node) & switches_state_this_sub + if add_conn_1.any() or add_conn_2.any(): + size_bef = connected_conn_node.shape[0] + connected_conn_node = np.concatenate((connected_conn_node, + switches_this_sub[add_conn_2, type(self).CONN_NODE_2_ID_COL])) + connected_conn_node = np.concatenate((connected_conn_node, + switches_this_sub[add_conn_1, type(self).CONN_NODE_1_ID_COL])) + connected_conn_node = np.unique(connected_conn_node) + if connected_conn_node.shape[0] == size_bef: + # nothing added + break + else: + break + + # now connect all real element link to the connection node to the right bus id + topo_vect_id = self.conn_node_to_topovect_id[connected_conn_node] # keep only connected "connection node" that are connected to an element + topo_vect_id = topo_vect_id[topo_vect_id != -1] + topo_vect_id = topo_vect_id[topo_vect[topo_vect_id] == -1] # remove element already assigned on a bus + topo_vect[topo_vect_id] = bbs_node_id # assign the current bus bar section id + # now handle the shunts + if self.conn_node_to_shunt_id is not None: + shunt_id = self.conn_node_to_shunt_id[connected_conn_node] # keep only connected "connection node" that are connected to an element + shunt_id = shunt_id[shunt_id != -1] + shunt_id = shunt_id[shunt_bus[shunt_id] == -1] # remove element already assigned on a bus + shunt_bus[shunt_id] = bbs_node_id # assign the current bus bar section id + + # say we go to the next bus id + bbs_node_id += 1 + + # now find the next busbar section at this substation not handled + bbs_conn_this = connected_conn_node[np.isin(connected_conn_node, bbs_id)] + bbs_handled[bbs_id_inv[bbs_conn_this]] = True + stop = False + while True: + bbs_id_this_sub += 1 + if bbs_id_this_sub >= bbs_handled.shape[0]: + stop = True + break + if not bbs_handled[bbs_id_this_sub]: + stop = False + break + if stop: + # go to next substation as all the busbar sections to + # this substation have been processed + break return topo_vect, shunt_bus + + def _aux_check_pos_topo_vect(self, + el_ids, # eg load_to_conn_node_id + vect_pos_tv, # eg gridobj_cls.load_pos_topo_vect + el_nm, # eg "load" + ): + el_tv_id = self.conn_node_to_topovect_id[el_ids] + if (vect_pos_tv != el_tv_id).any(): + raise Grid2OpException(f"Inconsistency in `conn_node_to_topovect_id` and `switch` for {el_nm}: " + f"Some switch representing {el_nm} do not have the same " + f"`conn_node_to_topovect_id` and `gridobj_cls.{el_nm}_pos_topo_vect`") - def check_validity(self): - # TODO detailed topo - pass + def check_validity(self, gridobj_cls: "grid2op.Space.GridObjects.GridObjects"): + cls = type(self) + if self._n_sub is None or self._n_sub == -1: + self._n_sub = gridobj_cls.n_sub + if self._n_sub != gridobj_cls.n_sub: + raise Grid2OpException("Incorrect number of susbtation registered " + "in the detailed topology description") + if self._dim_topo is None or self._dim_topo == -1: + self._dim_topo = gridobj_cls.dim_topo + if self._dim_topo != gridobj_cls.dim_topo: + raise Grid2OpException("Incorrect size for the topology vector registered " + "in the detailed topology description") + if self._n_shunt is None or self._n_shunt == -1: + self._n_shunt = gridobj_cls.n_shunt + if self._n_shunt != gridobj_cls.n_shunt: + raise Grid2OpException("Incorrect number of shunts registered " + "in the detailed topology description") + + if self.conn_node_to_subid.max() != gridobj_cls.n_sub - 1: + raise Grid2OpException("There are some 'connectivity node' connected to unknown substation, check conn_node_to_subid") + if self.conn_node_name.shape[0] != self.conn_node_to_subid.shape[0]: + raise Grid2OpException(f"There are {self.conn_node_name.shape[0]} according to `conn_node_name` " + f"but {self.conn_node_to_subid.shape[0]} according to `conn_node_to_subid`.") + arr = self.conn_node_to_subid + arr = arr[arr != -1] + arr.sort() + if (np.unique(arr) != np.arange(gridobj_cls.n_sub)).any(): + raise Grid2OpException("There are no 'connectivity node' on some substation, check conn_node_to_subid") + + if self.conn_node_to_subid.shape != self.conn_node_name.shape: + raise Grid2OpException(f"Inconsistencies found on the connectivity nodes: " + f"you declared {len(self.conn_node_to_subid)} connectivity nodes " + f"in `self.conn_node_to_subid` but " + f"{len( self.conn_node_name)} connectivity nodes in " + "`self.conn_node_name`") + + nb_conn_node = self.conn_node_name.shape[0] + all_conn_nodes = np.arange(nb_conn_node) + if not (np.isin(self.busbar_section_to_conn_node_id, all_conn_nodes)).all(): + raise Grid2OpException("Some busbar are connected to unknown connectivity nodes. Check `busbar_section_to_conn_node_id`") + if not (np.isin(self.switches[:,cls.CONN_NODE_1_ID_COL], all_conn_nodes)).all(): + raise Grid2OpException(f"Some busbar are connected to unknown connectivity nodes. Check `switches` " + f"(column {cls.CONN_NODE_1_ID_COL})") + if not (np.isin(self.switches[:,cls.CONN_NODE_2_ID_COL], all_conn_nodes)).all(): + raise Grid2OpException(f"Some busbar are connected to unknown connectivity nodes. Check `switches` " + f"(column {cls.CONN_NODE_2_ID_COL})") + + if self.switches[:,cls.CONN_NODE_1_ID_COL].max() >= len(self.conn_node_to_subid): + raise Grid2OpException("Inconsistencies found in the switches: some switches are " + "mapping unknown connectivity nodes for 'CONN_NODE_1_ID_COL' (too high)") + if self.switches[:,cls.CONN_NODE_2_ID_COL].max() >= len(self.conn_node_to_subid): + raise Grid2OpException("Inconsistencies found in the switches: some switches are " + "mapping unknown connectivity nodes for 'CONN_NODE_2_ID_COL' (too high)") + if self.switches[:,cls.CONN_NODE_1_ID_COL].min() < 0: + raise Grid2OpException("Inconsistencies found in the switches: some switches are " + "mapping unknown connectivity nodes for 'CONN_NODE_1_ID_COL' (too low)") + if self.switches[:,cls.CONN_NODE_2_ID_COL].max() >= len(self.conn_node_to_subid): + raise Grid2OpException("Inconsistencies found in the switches: some switches are " + "mapping unknown connectivity nodes for 'CONN_NODE_2_ID_COL' (too low)") + + # check connectivity node info is consistent + if (self.conn_node_to_subid[self.switches[:,cls.CONN_NODE_1_ID_COL]] != + self.conn_node_to_subid[self.switches[:,cls.CONN_NODE_2_ID_COL]]).any(): + raise Grid2OpException("Inconsistencies found in the switches mapping. Some switches are " + "mapping connectivity nodes that belong to different substation.") + if (self.conn_node_to_subid[self.switches[:,cls.CONN_NODE_1_ID_COL]] != + self.switches[:,cls.SUB_COL] + ).any(): + raise Grid2OpException(f"Inconsistencies detected between `conn_node_to_subid` and `switches`. " + f"There are some switches declared to belong to some substation (col {cls.SUB_COL}) " + f"or `switches` that connects connectivity node not belonging to this substation " + f"`conn_node_to_subid[switches[:,{cls.CONN_NODE_1_ID_COL}]]`") + if (self.conn_node_to_subid[self.switches[:,cls.CONN_NODE_2_ID_COL]] != + self.switches[:,cls.SUB_COL] + ).any(): + raise Grid2OpException(f"Inconsistencies detected between `conn_node_to_subid` and `switches`. " + f"There are some switches declared to belong to some substation (col {cls.SUB_COL}) " + f"or `switches` that connects connectivity node not belonging to this substation " + f"`conn_node_to_subid[switches[:,{cls.CONN_NODE_2_ID_COL}]]`") + + # check topo vect is consistent + arr = self.conn_node_to_topovect_id[self.conn_node_to_topovect_id != -1] + dim_topo = gridobj_cls.dim_topo + if arr.max() != dim_topo - 1: + raise Grid2OpException("Inconsistency in `self.conn_node_to_topovect_id`: some objects in the " + "topo_vect are not connected to any switch") + if arr.shape[0] != dim_topo: + raise Grid2OpException("Inconsistencies in `self.conn_node_to_topovect_id`: some elements of " + "topo vect are not controlled by any switches.") + arr.sort() + if (arr != np.arange(dim_topo)).any(): + raise Grid2OpException("Inconsistencies in `self.conn_node_to_topovect_id`: two or more swtiches " + "are pointing to the same element") + self._aux_check_pos_topo_vect(self.load_to_conn_node_id, gridobj_cls.load_pos_topo_vect, "load") + self._aux_check_pos_topo_vect(self.gen_to_conn_node_id, gridobj_cls.gen_pos_topo_vect, "gen") + self._aux_check_pos_topo_vect(self.line_or_to_conn_node_id, gridobj_cls.line_or_pos_topo_vect, "line_or") + self._aux_check_pos_topo_vect(self.line_ex_to_conn_node_id, gridobj_cls.line_ex_pos_topo_vect, "line_ex") + self._aux_check_pos_topo_vect(self.storage_to_conn_node_id, gridobj_cls.storage_pos_topo_vect, "storage") + + # check "el to connectivity nodes" are consistent + if self.load_to_conn_node_id.shape[0] != gridobj_cls.n_load: + raise Grid2OpException("load_to_conn_node_id is not with a size of n_load") + if self.gen_to_conn_node_id.shape[0] != gridobj_cls.n_gen: + raise Grid2OpException("gen_to_conn_node_id is not with a size of n_gen") + if self.line_or_to_conn_node_id.shape[0] != gridobj_cls.n_line: + raise Grid2OpException("line_or_to_conn_node_id is not with a size of n_line") + if self.line_ex_to_conn_node_id.shape[0] != gridobj_cls.n_line: + raise Grid2OpException("line_ex_to_conn_node_id is not with a size of n_line") + if self.storage_to_conn_node_id.shape[0] != gridobj_cls.n_storage: + raise Grid2OpException("storage_to_conn_node_id is not with a size of n_storage") + if self.shunt_to_conn_node_id is not None: + if self.shunt_to_conn_node_id.shape[0] != gridobj_cls.n_shunt: + raise Grid2OpException("storage_to_conn_node_id is not with a size of n_shunt") + + # if (self.load_to_conn_node_id != self.switches[self.switches[:,cls.OBJ_TYPE_COL] == cls.LOAD_ID, cls.CONN_NODE_1_ID_COL]).any(): + # raise Grid2OpException("load_to_conn_node_id does not match info on the switches") + # if (self.gen_to_conn_node_id != self.switches[self.switches[:,cls.OBJ_TYPE_COL] == cls.GEN_ID, cls.CONN_NODE_1_ID_COL]).any(): + # raise Grid2OpException("gen_to_conn_node_id does not match info on the switches") + # if (self.line_or_to_conn_node_id != self.switches[self.switches[:,cls.OBJ_TYPE_COL] == cls.LINE_OR_ID, cls.CONN_NODE_1_ID_COL]).any(): + # raise Grid2OpException("line_or_to_conn_node_id does not match info on the switches") + # if (self.line_ex_to_conn_node_id != self.switches[self.switches[:,cls.OBJ_TYPE_COL] == cls.LINE_EX_ID, cls.CONN_NODE_1_ID_COL]).any(): + # raise Grid2OpException("line_ex_to_conn_node_id does not match info on the switches") + # if (self.storage_to_conn_node_id != self.switches[self.switches[:,cls.OBJ_TYPE_COL] == cls.STORAGE_ID, cls.CONN_NODE_1_ID_COL]).any(): + # raise Grid2OpException("storage_to_conn_node_id does not match info on the switches") + # if gridobj_cls.shunts_data_available: + # if (self.shunt_to_conn_node_id != self.switches[self.switches[:,cls.OBJ_TYPE_COL] == cls.SHUNT_ID, cls.CONN_NODE_1_ID_COL]).any(): + # raise Grid2OpException("shunt_to_conn_node_id does not match info on the switches") + + # check some info about the busbars + if self.busbar_section_to_subid.max() != gridobj_cls.n_sub - 1: + raise Grid2OpException("There are some 'busbar section' connected to unknown substation, check busbar_section_to_subid") + arr = self.busbar_section_to_subid + arr = arr[arr != -1] + arr.sort() + if (np.unique(arr) != np.arange(gridobj_cls.n_sub)).any(): + raise Grid2OpException("There are no 'busbar section' on some substation, check busbar_section_to_subid") + if self.busbar_section_to_subid.shape[0] != self.busbar_section_to_conn_node_id.shape[0]: + raise Grid2OpException("Wrong size detected for busbar_section_to_subid or busbar_section_to_conn_node_id") + + # test "unicity" of connectivity node + # eg. 1 connectivity nodes cannot represent 2 different objects + tup =(self.load_to_conn_node_id, + self.gen_to_conn_node_id, + self.line_or_to_conn_node_id, + self.line_ex_to_conn_node_id, + self.storage_to_conn_node_id, + self.busbar_section_to_conn_node_id) + shape_th = (gridobj_cls.n_load + + gridobj_cls.n_gen + + 2 * gridobj_cls.n_line + + gridobj_cls.n_storage + + self.busbar_section_to_conn_node_id.shape[0]) + if self.shunt_to_conn_node_id is not None: + tup = tup + (self.shunt_to_conn_node_id,) + shape_th += self._n_shunt + conn_nodes = np.concatenate(tup) + if np.unique(conn_nodes).shape[0] != shape_th: + raise Grid2OpException("It appears the same connectivity node represent " + "different element (for example it could represent " + "at the same time a load and a busbar section or " + "a generator and the origin side of a powerline)") + + # TODO detailed topo proper exception class and not Grid2OpException def save_to_dict(self, res, as_list=True, copy_=True): # TODO detailed topo save_to_dict( res, self, - "busbar_name", + "conn_node_name", (lambda arr: [str(el) for el in arr]) if as_list else None, copy_, ) save_to_dict( res, self, - "busbar_to_subid", + "conn_node_to_subid", (lambda arr: [int(el) for el in arr]) if as_list else None, copy_, ) - save_to_dict( - res, - self, - "busbar_connectors", - (lambda arr: [int(el) for el in arr]) if as_list else lambda arr: arr.flatten(), - copy_, - ) + res["_from_ieee_grid"] = self._from_ieee_grid + res["_n_sub"] = int(self._n_sub) + res["_dim_topo"] = int(self._dim_topo) + res["_n_shunt"] = int(self._n_shunt) + save_to_dict( res, self, @@ -227,15 +1149,15 @@ def save_to_dict(self, res, as_list=True, copy_=True): save_to_dict( res, self, - "switches_to_topovect_id", + "conn_node_to_topovect_id", (lambda arr: [int(el) for el in arr]) if as_list else lambda arr: arr.flatten(), copy_, ) - if self.switches_to_topovect_id is not None: + if self.conn_node_to_shunt_id is not None: save_to_dict( res, self, - "switches_to_shunt_id", + "conn_node_to_shunt_id", (lambda arr: [int(el) for el in arr]) if as_list else lambda arr: arr.flatten(), copy_, ) @@ -244,44 +1166,58 @@ def save_to_dict(self, res, as_list=True, copy_=True): save_to_dict( res, self, - "load_to_busbar_id", - lambda arr: [(int(el1), int(el2)) for el1, el2 in arr], + "load_to_conn_node_id", + (lambda arr: [int(el) for el in arr]) if as_list else None, + copy_, + ) + save_to_dict( + res, + self, + "gen_to_conn_node_id", + (lambda arr: [int(el) for el in arr]) if as_list else None, copy_, ) save_to_dict( res, self, - "gen_to_busbar_id", - lambda arr: [(int(el1), int(el2)) for el1, el2 in arr], + "line_or_to_conn_node_id", + (lambda arr: [int(el) for el in arr]) if as_list else None, copy_, ) save_to_dict( res, self, - "line_or_to_busbar_id", - lambda arr: [(int(el1), int(el2)) for el1, el2 in arr], + "line_ex_to_conn_node_id", + (lambda arr: [int(el) for el in arr]) if as_list else None, copy_, ) save_to_dict( res, self, - "line_ex_to_busbar_id", - lambda arr: [(int(el1), int(el2)) for el1, el2 in arr], + "storage_to_conn_node_id", + (lambda arr: [int(el) for el in arr]) if as_list else None, copy_, ) save_to_dict( res, self, - "storage_to_busbar_id", - lambda arr: [(int(el1), int(el2)) for el1, el2 in arr], + "busbar_section_to_conn_node_id", + (lambda arr: [int(el) for el in arr]) if as_list else None, copy_, ) - if self.shunt_to_busbar_id is not None: + save_to_dict( + res, + self, + "busbar_section_to_subid", + (lambda arr: [int(el) for el in arr]) if as_list else None, + copy_, + ) + if self.shunt_to_conn_node_id is not None: save_to_dict( res, self, - "shunt_to_busbar_id", - lambda arr: [(int(el1), int(el2)) for el1, el2 in arr], + "shunt_to_conn_node_id", + (lambda arr: [int(el) for el in arr]) if as_list else None, copy_, ) # TODO detailed topo @@ -297,53 +1233,58 @@ def from_dict(cls, dict_): """ res = cls() - - res.busbar_name = extract_from_dict( - dict_, "busbar_name", lambda x: np.array(x).astype(str) - ) - res.busbar_to_subid = extract_from_dict( - dict_, "busbar_to_subid", lambda x: np.array(x).astype(dt_int) + res.conn_node_name = extract_from_dict( + dict_, "conn_node_name", lambda x: np.array(x).astype(str) ) - res.busbar_connectors = extract_from_dict( - dict_, "busbar_connectors", lambda x: np.array(x).astype(dt_int) + res.conn_node_to_subid = extract_from_dict( + dict_, "conn_node_to_subid", lambda x: np.array(x).astype(dt_int) ) - res.busbar_connectors = res.busbar_connectors.reshape((-1, 2)) res.switches = extract_from_dict( dict_, "switches", lambda x: np.array(x).astype(dt_int) ) - res.switches = res.switches.reshape((-1, 4)) + res.switches = res.switches.reshape((-1, 3)) - res.switches_to_topovect_id = extract_from_dict( - dict_, "switches_to_topovect_id", lambda x: np.array(x).astype(dt_int) + res.conn_node_to_topovect_id = extract_from_dict( + dict_, "conn_node_to_topovect_id", lambda x: np.array(x).astype(dt_int) ) + res._from_ieee_grid = bool(dict_["_from_ieee_grid"]) + res._n_sub = int(dict_["_n_sub"]) + res._dim_topo = int(dict_["_dim_topo"]) + res._n_shunt = int(dict_["_n_shunt"]) - if "switches_to_shunt_id" in dict_: - res.switches_to_shunt_id = extract_from_dict( - dict_, "switches_to_shunt_id", lambda x: np.array(x).astype(dt_int) + if "conn_node_to_shunt_id" in dict_: + res.conn_node_to_shunt_id = extract_from_dict( + dict_, "conn_node_to_shunt_id", lambda x: np.array(x).astype(dt_int) ) else: # shunts are not supported - res.switches_to_shunt_id = None + res.conn_node_to_shunt_id = None - res.load_to_busbar_id = extract_from_dict( - dict_, "load_to_busbar_id", lambda x: x + res.load_to_conn_node_id = extract_from_dict( + dict_, "load_to_conn_node_id", lambda x: x + ) + res.gen_to_conn_node_id = extract_from_dict( + dict_, "gen_to_conn_node_id", lambda x: x + ) + res.line_or_to_conn_node_id = extract_from_dict( + dict_, "line_or_to_conn_node_id", lambda x: x ) - res.gen_to_busbar_id = extract_from_dict( - dict_, "gen_to_busbar_id", lambda x: x + res.line_ex_to_conn_node_id = extract_from_dict( + dict_, "line_ex_to_conn_node_id", lambda x: x ) - res.line_or_to_busbar_id = extract_from_dict( - dict_, "line_or_to_busbar_id", lambda x: x + res.storage_to_conn_node_id = extract_from_dict( + dict_, "storage_to_conn_node_id", lambda x: x ) - res.line_ex_to_busbar_id = extract_from_dict( - dict_, "line_ex_to_busbar_id", lambda x: x + res.busbar_section_to_conn_node_id = extract_from_dict( + dict_, "busbar_section_to_conn_node_id", lambda x: x ) - res.storage_to_busbar_id = extract_from_dict( - dict_, "storage_to_busbar_id", lambda x: x + res.busbar_section_to_subid = extract_from_dict( + dict_, "busbar_section_to_subid", lambda x: x ) - if "shunt_to_busbar_id" in dict_: - res.shunt_to_busbar_id = extract_from_dict( - dict_, "shunt_to_busbar_id", lambda x: x + if "shunt_to_conn_node_id" in dict_: + res.shunt_to_conn_node_id = extract_from_dict( + dict_, "shunt_to_conn_node_id", lambda x: x ) # TODO detailed topo diff --git a/grid2op/VoltageControler/BaseVoltageController.py b/grid2op/VoltageControler/BaseVoltageController.py index 02eb6c978..e29fc883f 100644 --- a/grid2op/VoltageControler/BaseVoltageController.py +++ b/grid2op/VoltageControler/BaseVoltageController.py @@ -23,7 +23,7 @@ class BaseVoltageController(RandomObject, ABC): If the voltages are not on the chronics (missing files), it will not change the voltage setpoints at all. """ - def __init__(self, gridobj, controler_backend, actionSpace_cls): + def __init__(self, gridobj, controler_backend, actionSpace_cls, _local_dir_cls=None): """ Parameters @@ -39,7 +39,10 @@ def __init__(self, gridobj, controler_backend, actionSpace_cls): legal_act = AlwaysLegal() self._actionSpace_cls = actionSpace_cls self.action_space = actionSpace_cls( - gridobj=gridobj, actionClass=VoltageOnlyAction, legal_action=legal_act + gridobj=gridobj, + actionClass=VoltageOnlyAction, + legal_action=legal_act, + _local_dir_cls=_local_dir_cls ) def _custom_deepcopy_for_copy(self, new_obj): diff --git a/grid2op/VoltageControler/ControlVoltageFromFile.py b/grid2op/VoltageControler/ControlVoltageFromFile.py index ed6004842..3322eafe0 100644 --- a/grid2op/VoltageControler/ControlVoltageFromFile.py +++ b/grid2op/VoltageControler/ControlVoltageFromFile.py @@ -19,7 +19,11 @@ class ControlVoltageFromFile(BaseVoltageController): If the voltages are not on the chronics (missing files), it will not change the voltage setpoint at all. """ - def __init__(self, gridobj, controler_backend, actionSpace_cls): + def __init__(self, + gridobj, + controler_backend, + actionSpace_cls, + _local_dir_cls=None): """ Parameters @@ -36,6 +40,7 @@ def __init__(self, gridobj, controler_backend, actionSpace_cls): gridobj=gridobj, controler_backend=controler_backend, actionSpace_cls=actionSpace_cls, + _local_dir_cls=_local_dir_cls ) def fix_voltage(self, observation, agent_action, env_action, prod_v_chronics): diff --git a/grid2op/__init__.py b/grid2op/__init__.py index bd891c039..3bb1d7bcc 100644 --- a/grid2op/__init__.py +++ b/grid2op/__init__.py @@ -11,7 +11,7 @@ Grid2Op """ -__version__ = '1.9.8.dev0' +__version__ = '1.10.4.dev0' __all__ = [ "Action", @@ -45,18 +45,18 @@ ] -from grid2op.MakeEnv import (make, - update_env, - list_available_remote_env, - list_available_local_env, - get_current_local_dir, - change_local_dir, - list_available_test_env - ) +from grid2op.MakeEnv import (make, + update_env, + list_available_remote_env, + list_available_local_env, + get_current_local_dir, + change_local_dir, + list_available_test_env + ) try: from grid2op._create_test_suite import create_test_suite __all__.append("create_test_suite") except ImportError as exc_: # grid2op is most likely not installed in editable mode from source - pass \ No newline at end of file + pass diff --git a/grid2op/command_line.py b/grid2op/command_line.py index 1579a9cfb..970be2e73 100644 --- a/grid2op/command_line.py +++ b/grid2op/command_line.py @@ -61,7 +61,7 @@ def replay(): def testinstall(): """ - Performs aperforms basic tests to make sure grid2op is properly installed and working. + Performs basic tests to make sure grid2op is properly installed and working. It's not because these tests pass that grid2op will be fully functional however. """ @@ -76,15 +76,25 @@ def testinstall(): os.path.join(this_directory, "tests"), pattern=file_name ) ) - results = unittest.TextTestResult(stream=sys.stderr, descriptions=True, verbosity=1) + + def fun(first=None, *args, **kwargs): + if first is not None: + sys.stderr.write(first, *args, **kwargs) + sys.stderr.write("\n") + sys.stderr.writeln = fun + results = unittest.TextTestResult(stream=sys.stderr, + descriptions=True, + verbosity=2) test_suite.run(results) if results.wasSuccessful(): - sys.exit(0) + return 0 else: - for _, str_ in results.errors: - print(str_) - print("-------------------------\n") - for _, str_ in results.failures: - print(str_) - print("-------------------------\n") + print("\n") + results.printErrors() + # for _, str_ in results.errors: + # print(str_) + # print("-------------------------\n") + # for _, str_ in results.failures: + # print(str_) + # print("-------------------------\n") raise RuntimeError("Test not successful !") diff --git a/grid2op/data/educ_case14_redisp/__init__.py b/grid2op/data/educ_case14_redisp/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/educ_case14_redisp/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/educ_case14_storage/__init__.py b/grid2op/data/educ_case14_storage/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/educ_case14_storage/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_case14_sandbox/__init__.py b/grid2op/data/l2rpn_case14_sandbox/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_case14_sandbox/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_case14_sandbox_diff_grid/__init__.py b/grid2op/data/l2rpn_case14_sandbox_diff_grid/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_case14_sandbox_diff_grid/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_icaps_2021/__init__.py b/grid2op/data/l2rpn_icaps_2021/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_icaps_2021/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_idf_2023/__init__.py b/grid2op/data/l2rpn_idf_2023/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_idf_2023/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_neurips_2020_track1/__init__.py b/grid2op/data/l2rpn_neurips_2020_track1/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_neurips_2020_track1/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_neurips_2020_track2/x1/__init__.py b/grid2op/data/l2rpn_neurips_2020_track2/x1/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_neurips_2020_track2/x1/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_neurips_2020_track2/x2.5/__init__.py b/grid2op/data/l2rpn_neurips_2020_track2/x2.5/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_neurips_2020_track2/x2.5/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_wcci_2020/__init__.py b/grid2op/data/l2rpn_wcci_2020/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_wcci_2020/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/l2rpn_wcci_2022_dev/__init__.py b/grid2op/data/l2rpn_wcci_2022_dev/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/l2rpn_wcci_2022_dev/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/rte_case118_example/__init__.py b/grid2op/data/rte_case118_example/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/rte_case118_example/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/rte_case14_opponent/__init__.py b/grid2op/data/rte_case14_opponent/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/rte_case14_opponent/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/rte_case14_realistic/__init__.py b/grid2op/data/rte_case14_realistic/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/rte_case14_realistic/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/rte_case14_redisp/__init__.py b/grid2op/data/rte_case14_redisp/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/rte_case14_redisp/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/rte_case14_test/__init__.py b/grid2op/data/rte_case14_test/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/rte_case14_test/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data/rte_case5_example/__init__.py b/grid2op/data/rte_case5_example/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data/rte_case5_example/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/5bus_example.json b/grid2op/data_test/5bus_example_act_topo_set_init/5bus_example.json new file mode 100644 index 000000000..2e9dd0c79 --- /dev/null +++ b/grid2op/data_test/5bus_example_act_topo_set_init/5bus_example.json @@ -0,0 +1,1339 @@ +{ + "_module": "pandapower.auxiliary", + "_class": "pandapowerNet", + "_object": { + "bus": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"vn_kv\",\"type\",\"zone\",\"in_service\"],\"index\":[0,1,2,3,4],\"data\":[[\"substation_1\",100.0,\"b\",null,true],[\"substation_2\",100.0,\"b\",null,true],[\"substation_3\",100.0,\"b\",null,true],[\"substation_4\",100.0,\"b\",null,true],[\"substation_5\",100.0,\"b\",null,true]]}", + "dtype": { + "name": "object", + "vn_kv": "float64", + "type": "object", + "zone": "object", + "in_service": "bool" + }, + "orient": "split" + }, + "load": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"const_z_percent\",\"const_i_percent\",\"sn_mva\",\"scaling\",\"in_service\",\"type\"],\"index\":[0,1,2],\"data\":[[\"load_0_0\",0,10.0,7.0,0.0,0.0,null,1.0,true,null],[\"load_3_1\",3,10.0,7.0,0.0,0.0,null,1.0,true,null],[\"load_4_2\",4,10.0,7.0,0.0,0.0,null,1.0,true,null]]}", + "dtype": { + "name": "object", + "bus": "uint32", + "p_mw": "float64", + "q_mvar": "float64", + "const_z_percent": "float64", + "const_i_percent": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object" + }, + "orient": "split" + }, + "sgen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"sn_mva\",\"scaling\",\"in_service\",\"type\",\"current_source\"],\"index\":[],\"data\":[]}", + "dtype": { + "name": "object", + "bus": "int64", + "p_mw": "float64", + "q_mvar": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object", + "current_source": "bool" + }, + "orient": "split" + }, + "storage": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"sn_mva\",\"soc_percent\",\"min_e_mwh\",\"max_e_mwh\",\"scaling\",\"in_service\",\"type\"],\"index\":[],\"data\":[]}", + "dtype": { + "name": "object", + "bus": "int64", + "p_mw": "float64", + "q_mvar": "float64", + "sn_mva": "float64", + "soc_percent": "float64", + "min_e_mwh": "float64", + "max_e_mwh": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object" + }, + "orient": "split" + }, + "gen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"vm_pu\",\"sn_mva\",\"min_q_mvar\",\"max_q_mvar\",\"scaling\",\"slack\",\"in_service\",\"type\"],\"index\":[0,1],\"data\":[[\"gen_0_0\",0,10.0,1.02,null,null,null,1.0,false,true,null],[\"gen_1_1\",1,20.0,1.02,null,null,null,1.0,true,true,null]]}", + "dtype": { + "name": "object", + "bus": "uint32", + "p_mw": "float64", + "vm_pu": "float64", + "sn_mva": "float64", + "min_q_mvar": "float64", + "max_q_mvar": "float64", + "scaling": "float64", + "slack": "bool", + "in_service": "bool", + "type": "object" + }, + "orient": "split" + }, + "switch": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"bus\",\"element\",\"et\",\"type\",\"closed\",\"name\",\"z_ohm\"],\"index\":[],\"data\":[]}", + "dtype": { + "bus": "int64", + "element": "int64", + "et": "object", + "type": "object", + "closed": "bool", + "name": "object", + "z_ohm": "float64" + }, + "orient": "split" + }, + "shunt": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"bus\",\"name\",\"q_mvar\",\"p_mw\",\"vn_kv\",\"step\",\"max_step\",\"in_service\"],\"index\":[],\"data\":[]}", + "dtype": { + "bus": "uint32", + "name": "object", + "q_mvar": "float64", + "p_mw": "float64", + "vn_kv": "float64", + "step": "uint32", + "max_step": "uint32", + "in_service": "bool" + }, + "orient": "split" + }, + "ext_grid": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"vm_pu\",\"va_degree\",\"in_service\"],\"index\":[],\"data\":[]}", + "dtype": { + "name": "object", + "bus": "uint32", + "vm_pu": "float64", + "va_degree": "float64", + "in_service": "bool" + }, + "orient": "split" + }, + "line": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"std_type\",\"from_bus\",\"to_bus\",\"length_km\",\"r_ohm_per_km\",\"x_ohm_per_km\",\"c_nf_per_km\",\"g_us_per_km\",\"max_i_ka\",\"df\",\"parallel\",\"type\",\"in_service\"],\"index\":[0,1,2,3,4,5,6,7],\"data\":[[null,\"NAYY 4x50 SE\",0,1,4.0,0.642,0.083,210.0,0.0,0.6,1.0,1,\"cs\",true],[\"0_2_2\",\"NAYY 4x50 SE\",0,2,4.47,0.642,0.083,210.0,0.0,0.22,1.0,1,\"cs\",true],[\"0_3_3\",\"NAYY 4x50 SE\",0,3,5.65,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true],[\"0_4_4\",\"NAYY 4x50 SE\",0,4,4.0,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true],[\"1_2_5\",\"NAYY 4x50 SE\",1,2,2.0,0.642,0.083,210.0,0.0,0.6,1.0,1,\"cs\",true],[\"2_3_6\",\"NAYY 4x50 SE\",2,3,2.0,0.642,0.083,210.0,0.0,0.3,1.0,1,\"cs\",true],[\"2_3_7\",\"NAYY 4x50 SE\",2,3,2.0,0.642,0.083,210.0,0.0,0.3,1.0,1,\"cs\",true],[\"3_4_8\",\"NAYY 4x50 SE\",3,4,4.0,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true]]}", + "dtype": { + "name": "object", + "std_type": "object", + "from_bus": "uint32", + "to_bus": "uint32", + "length_km": "float64", + "r_ohm_per_km": "float64", + "x_ohm_per_km": "float64", + "c_nf_per_km": "float64", + "g_us_per_km": "float64", + "max_i_ka": "float64", + "df": "float64", + "parallel": "uint32", + "type": "object", + "in_service": "bool" + }, + "orient": "split" + }, + "trafo": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"std_type\",\"hv_bus\",\"lv_bus\",\"sn_mva\",\"vn_hv_kv\",\"vn_lv_kv\",\"vk_percent\",\"vkr_percent\",\"pfe_kw\",\"i0_percent\",\"shift_degree\",\"tap_side\",\"tap_neutral\",\"tap_min\",\"tap_max\",\"tap_step_percent\",\"tap_step_degree\",\"tap_pos\",\"tap_phase_shifter\",\"parallel\",\"df\",\"in_service\"],\"index\":[],\"data\":[]}", + "dtype": { + "name": "object", + "std_type": "object", + "hv_bus": "uint32", + "lv_bus": "uint32", + "sn_mva": "float64", + "vn_hv_kv": "float64", + "vn_lv_kv": "float64", + "vk_percent": "float64", + "vkr_percent": "float64", + "pfe_kw": "float64", + "i0_percent": "float64", + "shift_degree": "float64", + "tap_side": "object", + "tap_neutral": "int32", + "tap_min": "int32", + "tap_max": "int32", + "tap_step_percent": "float64", + "tap_step_degree": "float64", + "tap_pos": "int32", + "tap_phase_shifter": "bool", + "parallel": "uint32", + "df": "float64", + "in_service": "bool" + }, + "orient": "split" + }, + "trafo3w": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"std_type\",\"hv_bus\",\"mv_bus\",\"lv_bus\",\"sn_hv_mva\",\"sn_mv_mva\",\"sn_lv_mva\",\"vn_hv_kv\",\"vn_mv_kv\",\"vn_lv_kv\",\"vk_hv_percent\",\"vk_mv_percent\",\"vk_lv_percent\",\"vkr_hv_percent\",\"vkr_mv_percent\",\"vkr_lv_percent\",\"pfe_kw\",\"i0_percent\",\"shift_mv_degree\",\"shift_lv_degree\",\"tap_side\",\"tap_neutral\",\"tap_min\",\"tap_max\",\"tap_step_percent\",\"tap_step_degree\",\"tap_pos\",\"tap_at_star_point\",\"in_service\"],\"index\":[],\"data\":[]}", + "dtype": { + "name": "object", + "std_type": "object", + "hv_bus": "uint32", + "mv_bus": "uint32", + "lv_bus": "uint32", + "sn_hv_mva": "float64", + "sn_mv_mva": "float64", + "sn_lv_mva": "float64", + "vn_hv_kv": "float64", + "vn_mv_kv": "float64", + "vn_lv_kv": "float64", + "vk_hv_percent": "float64", + "vk_mv_percent": "float64", + "vk_lv_percent": "float64", + "vkr_hv_percent": "float64", + "vkr_mv_percent": "float64", + "vkr_lv_percent": "float64", + "pfe_kw": "float64", + "i0_percent": "float64", + "shift_mv_degree": "float64", + "shift_lv_degree": "float64", + "tap_side": "object", + "tap_neutral": "int32", + "tap_min": "int32", + "tap_max": "int32", + "tap_step_percent": "float64", + "tap_step_degree": "float64", + "tap_pos": "int32", + "tap_at_star_point": "bool", + "in_service": "bool" + }, + "orient": "split" + }, + "impedance": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"from_bus\",\"to_bus\",\"rft_pu\",\"xft_pu\",\"rtf_pu\",\"xtf_pu\",\"sn_mva\",\"in_service\"],\"index\":[],\"data\":[]}", + "dtype": { + "name": "object", + "from_bus": "uint32", + "to_bus": "uint32", + "rft_pu": "float64", + "xft_pu": "float64", + "rtf_pu": "float64", + "xtf_pu": "float64", + "sn_mva": "float64", + "in_service": "bool" + }, + "orient": "split" + }, + "dcline": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"from_bus\",\"to_bus\",\"p_mw\",\"loss_percent\",\"loss_mw\",\"vm_from_pu\",\"vm_to_pu\",\"max_p_mw\",\"min_q_from_mvar\",\"min_q_to_mvar\",\"max_q_from_mvar\",\"max_q_to_mvar\",\"in_service\"],\"index\":[],\"data\":[]}", + "dtype": { + "name": "object", + "from_bus": "uint32", + "to_bus": "uint32", + "p_mw": "float64", + "loss_percent": "float64", + "loss_mw": "float64", + "vm_from_pu": "float64", + "vm_to_pu": "float64", + "max_p_mw": "float64", + "min_q_from_mvar": "float64", + "min_q_to_mvar": "float64", + "max_q_from_mvar": "float64", + "max_q_to_mvar": "float64", + "in_service": "bool" + }, + "orient": "split" + }, + "ward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"ps_mw\",\"qs_mvar\",\"qz_mvar\",\"pz_mw\",\"in_service\"],\"index\":[],\"data\":[]}", + "dtype": { + "name": "object", + "bus": "uint32", + "ps_mw": "float64", + "qs_mvar": "float64", + "qz_mvar": "float64", + "pz_mw": "float64", + "in_service": "bool" + }, + "orient": "split" + }, + "xward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"ps_mw\",\"qs_mvar\",\"qz_mvar\",\"pz_mw\",\"r_ohm\",\"x_ohm\",\"vm_pu\",\"in_service\"],\"index\":[],\"data\":[]}", + "dtype": { + "name": "object", + "bus": "uint32", + "ps_mw": "float64", + "qs_mvar": "float64", + "qz_mvar": "float64", + "pz_mw": "float64", + "r_ohm": "float64", + "x_ohm": "float64", + "vm_pu": "float64", + "in_service": "bool" + }, + "orient": "split" + }, + "measurement": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"measurement_type\",\"element_type\",\"element\",\"value\",\"std_dev\",\"side\"],\"index\":[],\"data\":[]}", + "dtype": { + "name": "object", + "measurement_type": "object", + "element_type": "object", + "element": "uint32", + "value": "float64", + "std_dev": "float64", + "side": "object" + }, + "orient": "split" + }, + "pwl_cost": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"power_type\",\"element\",\"et\",\"points\"],\"index\":[],\"data\":[]}", + "dtype": { + "power_type": "object", + "element": "object", + "et": "object", + "points": "object" + }, + "orient": "split" + }, + "poly_cost": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"element\",\"et\",\"cp0_eur\",\"cp1_eur_per_mw\",\"cp2_eur_per_mw2\",\"cq0_eur\",\"cq1_eur_per_mvar\",\"cq2_eur_per_mvar2\"],\"index\":[],\"data\":[]}", + "dtype": { + "element": "object", + "et": "object", + "cp0_eur": "float64", + "cp1_eur_per_mw": "float64", + "cp2_eur_per_mw2": "float64", + "cq0_eur": "float64", + "cq1_eur_per_mvar": "float64", + "cq2_eur_per_mvar2": "float64" + }, + "orient": "split" + }, + "line_geodata": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"coords\"],\"index\":[0,1,2,3,4,5,6,7],\"data\":[[[[0,0],[0,4]]],[[[0,0],[2,4]]],[[[0,0],[4,4]]],[[[0,0],[4,0]]],[[[0,4],[2,4]]],[[[2,4],[3,4.2],[4,4]]],[[[2,4],[3,3.8],[4,4]]],[[[4,4],[4,0]]]]}", + "dtype": { + "coords": "object" + }, + "orient": "split" + }, + "bus_geodata": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"x\",\"y\",\"coords\"],\"index\":[0,1,2,3,4],\"data\":[[0.0,0.0,null],[0.0,4.0,null],[2.0,4.0,null],[4.0,4.0,null],[4.0,0.0,null]]}", + "dtype": { + "x": "float64", + "y": "float64", + "coords": "object" + }, + "orient": "split" + }, + "version": "2.1.0", + "converged": true, + "name": "5bus", + "f_hz": 50.0, + "sn_mva": 1, + "std_types": { + "line": { + "NAYY 4x50 SE": { + "c_nf_per_km": 210, + "r_ohm_per_km": 0.642, + "x_ohm_per_km": 0.083, + "max_i_ka": 0.142, + "type": "cs", + "q_mm2": 50, + "alpha": 0.00403 + }, + "NAYY 4x120 SE": { + "c_nf_per_km": 264, + "r_ohm_per_km": 0.225, + "x_ohm_per_km": 0.08, + "max_i_ka": 0.242, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00403 + }, + "NAYY 4x150 SE": { + "c_nf_per_km": 261, + "r_ohm_per_km": 0.208, + "x_ohm_per_km": 0.08, + "max_i_ka": 0.27, + "type": "cs", + "q_mm2": 150, + "alpha": 0.00403 + }, + "NA2XS2Y 1x95 RM/25 12/20 kV": { + "c_nf_per_km": 216, + "r_ohm_per_km": 0.313, + "x_ohm_per_km": 0.132, + "max_i_ka": 0.252, + "type": "cs", + "q_mm2": 95, + "alpha": 0.00403 + }, + "NA2XS2Y 1x185 RM/25 12/20 kV": { + "c_nf_per_km": 273, + "r_ohm_per_km": 0.161, + "x_ohm_per_km": 0.117, + "max_i_ka": 0.362, + "type": "cs", + "q_mm2": 185, + "alpha": 0.00403 + }, + "NA2XS2Y 1x240 RM/25 12/20 kV": { + "c_nf_per_km": 304, + "r_ohm_per_km": 0.122, + "x_ohm_per_km": 0.112, + "max_i_ka": 0.421, + "type": "cs", + "q_mm2": 240, + "alpha": 0.00403 + }, + "NA2XS2Y 1x95 RM/25 6/10 kV": { + "c_nf_per_km": 315, + "r_ohm_per_km": 0.313, + "x_ohm_per_km": 0.123, + "max_i_ka": 0.249, + "type": "cs", + "q_mm2": 95, + "alpha": 0.00403 + }, + "NA2XS2Y 1x185 RM/25 6/10 kV": { + "c_nf_per_km": 406, + "r_ohm_per_km": 0.161, + "x_ohm_per_km": 0.11, + "max_i_ka": 0.358, + "type": "cs", + "q_mm2": 185, + "alpha": 0.00403 + }, + "NA2XS2Y 1x240 RM/25 6/10 kV": { + "c_nf_per_km": 456, + "r_ohm_per_km": 0.122, + "x_ohm_per_km": 0.105, + "max_i_ka": 0.416, + "type": "cs", + "q_mm2": 240, + "alpha": 0.00403 + }, + "NA2XS2Y 1x150 RM/25 12/20 kV": { + "c_nf_per_km": 250, + "r_ohm_per_km": 0.206, + "x_ohm_per_km": 0.116, + "max_i_ka": 0.319, + "type": "cs", + "q_mm2": 150, + "alpha": 0.00403 + }, + "NA2XS2Y 1x120 RM/25 12/20 kV": { + "c_nf_per_km": 230, + "r_ohm_per_km": 0.253, + "x_ohm_per_km": 0.119, + "max_i_ka": 0.283, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00403 + }, + "NA2XS2Y 1x70 RM/25 12/20 kV": { + "c_nf_per_km": 190, + "r_ohm_per_km": 0.443, + "x_ohm_per_km": 0.132, + "max_i_ka": 0.22, + "type": "cs", + "q_mm2": 70, + "alpha": 0.00403 + }, + "NA2XS2Y 1x150 RM/25 6/10 kV": { + "c_nf_per_km": 360, + "r_ohm_per_km": 0.206, + "x_ohm_per_km": 0.11, + "max_i_ka": 0.315, + "type": "cs", + "q_mm2": 150, + "alpha": 0.00403 + }, + "NA2XS2Y 1x120 RM/25 6/10 kV": { + "c_nf_per_km": 340, + "r_ohm_per_km": 0.253, + "x_ohm_per_km": 0.113, + "max_i_ka": 0.28, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00403 + }, + "NA2XS2Y 1x70 RM/25 6/10 kV": { + "c_nf_per_km": 280, + "r_ohm_per_km": 0.443, + "x_ohm_per_km": 0.123, + "max_i_ka": 0.217, + "type": "cs", + "q_mm2": 70, + "alpha": 0.00403 + }, + "N2XS(FL)2Y 1x120 RM/35 64/110 kV": { + "c_nf_per_km": 112, + "r_ohm_per_km": 0.153, + "x_ohm_per_km": 0.166, + "max_i_ka": 0.366, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00393 + }, + "N2XS(FL)2Y 1x185 RM/35 64/110 kV": { + "c_nf_per_km": 125, + "r_ohm_per_km": 0.099, + "x_ohm_per_km": 0.156, + "max_i_ka": 0.457, + "type": "cs", + "q_mm2": 185, + "alpha": 0.00393 + }, + "N2XS(FL)2Y 1x240 RM/35 64/110 kV": { + "c_nf_per_km": 135, + "r_ohm_per_km": 0.075, + "x_ohm_per_km": 0.149, + "max_i_ka": 0.526, + "type": "cs", + "q_mm2": 240, + "alpha": 0.00393 + }, + "N2XS(FL)2Y 1x300 RM/35 64/110 kV": { + "c_nf_per_km": 144, + "r_ohm_per_km": 0.06, + "x_ohm_per_km": 0.144, + "max_i_ka": 0.588, + "type": "cs", + "q_mm2": 300, + "alpha": 0.00393 + }, + "15-AL1/3-ST1A 0.4": { + "c_nf_per_km": 11, + "r_ohm_per_km": 1.8769, + "x_ohm_per_km": 0.35, + "max_i_ka": 0.105, + "type": "ol", + "q_mm2": 16, + "alpha": 0.00403 + }, + "24-AL1/4-ST1A 0.4": { + "c_nf_per_km": 11.25, + "r_ohm_per_km": 1.2012, + "x_ohm_per_km": 0.335, + "max_i_ka": 0.14, + "type": "ol", + "q_mm2": 24, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 0.4": { + "c_nf_per_km": 12.2, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.3, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 0.4": { + "c_nf_per_km": 13.2, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.29, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "34-AL1/6-ST1A 10.0": { + "c_nf_per_km": 9.7, + "r_ohm_per_km": 0.8342, + "x_ohm_per_km": 0.36, + "max_i_ka": 0.17, + "type": "ol", + "q_mm2": 34, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 10.0": { + "c_nf_per_km": 10.1, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.35, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "70-AL1/11-ST1A 10.0": { + "c_nf_per_km": 10.4, + "r_ohm_per_km": 0.4132, + "x_ohm_per_km": 0.339, + "max_i_ka": 0.29, + "type": "ol", + "q_mm2": 70, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 10.0": { + "c_nf_per_km": 10.75, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.33, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "122-AL1/20-ST1A 10.0": { + "c_nf_per_km": 11.1, + "r_ohm_per_km": 0.2376, + "x_ohm_per_km": 0.323, + "max_i_ka": 0.41, + "type": "ol", + "q_mm2": 122, + "alpha": 0.00403 + }, + "149-AL1/24-ST1A 10.0": { + "c_nf_per_km": 11.25, + "r_ohm_per_km": 0.194, + "x_ohm_per_km": 0.315, + "max_i_ka": 0.47, + "type": "ol", + "q_mm2": 149, + "alpha": 0.00403 + }, + "34-AL1/6-ST1A 20.0": { + "c_nf_per_km": 9.15, + "r_ohm_per_km": 0.8342, + "x_ohm_per_km": 0.382, + "max_i_ka": 0.17, + "type": "ol", + "q_mm2": 34, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 20.0": { + "c_nf_per_km": 9.5, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.372, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "70-AL1/11-ST1A 20.0": { + "c_nf_per_km": 9.7, + "r_ohm_per_km": 0.4132, + "x_ohm_per_km": 0.36, + "max_i_ka": 0.29, + "type": "ol", + "q_mm2": 70, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 20.0": { + "c_nf_per_km": 10, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.35, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "122-AL1/20-ST1A 20.0": { + "c_nf_per_km": 10.3, + "r_ohm_per_km": 0.2376, + "x_ohm_per_km": 0.344, + "max_i_ka": 0.41, + "type": "ol", + "q_mm2": 122, + "alpha": 0.00403 + }, + "149-AL1/24-ST1A 20.0": { + "c_nf_per_km": 10.5, + "r_ohm_per_km": 0.194, + "x_ohm_per_km": 0.337, + "max_i_ka": 0.47, + "type": "ol", + "q_mm2": 149, + "alpha": 0.00403 + }, + "184-AL1/30-ST1A 20.0": { + "c_nf_per_km": 10.75, + "r_ohm_per_km": 0.1571, + "x_ohm_per_km": 0.33, + "max_i_ka": 0.535, + "type": "ol", + "q_mm2": 184, + "alpha": 0.00403 + }, + "243-AL1/39-ST1A 20.0": { + "c_nf_per_km": 11, + "r_ohm_per_km": 0.1188, + "x_ohm_per_km": 0.32, + "max_i_ka": 0.645, + "type": "ol", + "q_mm2": 243, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 110.0": { + "c_nf_per_km": 8, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.46, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "70-AL1/11-ST1A 110.0": { + "c_nf_per_km": 8.4, + "r_ohm_per_km": 0.4132, + "x_ohm_per_km": 0.45, + "max_i_ka": 0.29, + "type": "ol", + "q_mm2": 70, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 110.0": { + "c_nf_per_km": 8.65, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.44, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "122-AL1/20-ST1A 110.0": { + "c_nf_per_km": 8.5, + "r_ohm_per_km": 0.2376, + "x_ohm_per_km": 0.43, + "max_i_ka": 0.41, + "type": "ol", + "q_mm2": 122, + "alpha": 0.00403 + }, + "149-AL1/24-ST1A 110.0": { + "c_nf_per_km": 8.75, + "r_ohm_per_km": 0.194, + "x_ohm_per_km": 0.41, + "max_i_ka": 0.47, + "type": "ol", + "q_mm2": 149, + "alpha": 0.00403 + }, + "184-AL1/30-ST1A 110.0": { + "c_nf_per_km": 8.8, + "r_ohm_per_km": 0.1571, + "x_ohm_per_km": 0.4, + "max_i_ka": 0.535, + "type": "ol", + "q_mm2": 184, + "alpha": 0.00403 + }, + "243-AL1/39-ST1A 110.0": { + "c_nf_per_km": 9, + "r_ohm_per_km": 0.1188, + "x_ohm_per_km": 0.39, + "max_i_ka": 0.645, + "type": "ol", + "q_mm2": 243, + "alpha": 0.00403 + }, + "305-AL1/39-ST1A 110.0": { + "c_nf_per_km": 9.2, + "r_ohm_per_km": 0.0949, + "x_ohm_per_km": 0.38, + "max_i_ka": 0.74, + "type": "ol", + "q_mm2": 305, + "alpha": 0.00403 + }, + "490-AL1/64-ST1A 110.0": { + "c_nf_per_km": 9.75, + "r_ohm_per_km": 0.059, + "x_ohm_per_km": 0.37, + "max_i_ka": 0.96, + "type": "ol", + "q_mm2": 490, + "alpha": 0.00403 + }, + "679-AL1/86-ST1A 110.0": { + "c_nf_per_km": 9.95, + "r_ohm_per_km": 0.042, + "x_ohm_per_km": 0.36, + "max_i_ka": 0.115, + "type": "ol", + "q_mm2": 679, + "alpha": 0.00403 + }, + "490-AL1/64-ST1A 220.0": { + "c_nf_per_km": 10, + "r_ohm_per_km": 0.059, + "x_ohm_per_km": 0.285, + "max_i_ka": 0.96, + "type": "ol", + "q_mm2": 490, + "alpha": 0.00403 + }, + "679-AL1/86-ST1A 220.0": { + "c_nf_per_km": 11.7, + "r_ohm_per_km": 0.042, + "x_ohm_per_km": 0.275, + "max_i_ka": 0.115, + "type": "ol", + "q_mm2": 679, + "alpha": 0.00403 + }, + "490-AL1/64-ST1A 380.0": { + "c_nf_per_km": 11, + "r_ohm_per_km": 0.059, + "x_ohm_per_km": 0.253, + "max_i_ka": 0.96, + "type": "ol", + "q_mm2": 490, + "alpha": 0.00403 + }, + "679-AL1/86-ST1A 380.0": { + "c_nf_per_km": 14.6, + "r_ohm_per_km": 0.042, + "x_ohm_per_km": 0.25, + "max_i_ka": 0.115, + "type": "ol", + "q_mm2": 679, + "alpha": 0.00403 + } + }, + "trafo": { + "160 MVA 380/110 kV": { + "i0_percent": 0.06, + "pfe_kw": 60, + "vkr_percent": 0.25, + "sn_mva": 160, + "vn_lv_kv": 110.0, + "vn_hv_kv": 380.0, + "vk_percent": 12.2, + "shift_degree": 0, + "vector_group": "Yy0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "100 MVA 220/110 kV": { + "i0_percent": 0.06, + "pfe_kw": 55, + "vkr_percent": 0.26, + "sn_mva": 100, + "vn_lv_kv": 110.0, + "vn_hv_kv": 220.0, + "vk_percent": 12.0, + "shift_degree": 0, + "vector_group": "Yy0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "63 MVA 110/20 kV": { + "i0_percent": 0.04, + "pfe_kw": 22, + "vkr_percent": 0.32, + "sn_mva": 63, + "vn_lv_kv": 20.0, + "vn_hv_kv": 110.0, + "vk_percent": 18, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "40 MVA 110/20 kV": { + "i0_percent": 0.05, + "pfe_kw": 18, + "vkr_percent": 0.34, + "sn_mva": 40, + "vn_lv_kv": 20.0, + "vn_hv_kv": 110.0, + "vk_percent": 16.2, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "25 MVA 110/20 kV": { + "i0_percent": 0.07, + "pfe_kw": 14, + "vkr_percent": 0.41, + "sn_mva": 25, + "vn_lv_kv": 20.0, + "vn_hv_kv": 110.0, + "vk_percent": 12, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "63 MVA 110/10 kV": { + "sn_mva": 63, + "vn_hv_kv": 110, + "vn_lv_kv": 10, + "vk_percent": 18, + "vkr_percent": 0.32, + "pfe_kw": 22, + "i0_percent": 0.04, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "40 MVA 110/10 kV": { + "sn_mva": 40, + "vn_hv_kv": 110, + "vn_lv_kv": 10, + "vk_percent": 16.2, + "vkr_percent": 0.34, + "pfe_kw": 18, + "i0_percent": 0.05, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "25 MVA 110/10 kV": { + "sn_mva": 25, + "vn_hv_kv": 110, + "vn_lv_kv": 10, + "vk_percent": 12, + "vkr_percent": 0.41, + "pfe_kw": 14, + "i0_percent": 0.07, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "0.25 MVA 20/0.4 kV": { + "sn_mva": 0.25, + "vn_hv_kv": 20, + "vn_lv_kv": 0.4, + "vk_percent": 6, + "vkr_percent": 1.44, + "pfe_kw": 0.8, + "i0_percent": 0.32, + "shift_degree": 150, + "vector_group": "Yzn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.4 MVA 20/0.4 kV": { + "sn_mva": 0.4, + "vn_hv_kv": 20, + "vn_lv_kv": 0.4, + "vk_percent": 6, + "vkr_percent": 1.425, + "pfe_kw": 1.35, + "i0_percent": 0.3375, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.63 MVA 20/0.4 kV": { + "sn_mva": 0.63, + "vn_hv_kv": 20, + "vn_lv_kv": 0.4, + "vk_percent": 6, + "vkr_percent": 1.206, + "pfe_kw": 1.65, + "i0_percent": 0.2619, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.25 MVA 10/0.4 kV": { + "sn_mva": 0.25, + "vn_hv_kv": 10, + "vn_lv_kv": 0.4, + "vk_percent": 4, + "vkr_percent": 1.2, + "pfe_kw": 0.6, + "i0_percent": 0.24, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.4 MVA 10/0.4 kV": { + "sn_mva": 0.4, + "vn_hv_kv": 10, + "vn_lv_kv": 0.4, + "vk_percent": 4, + "vkr_percent": 1.325, + "pfe_kw": 0.95, + "i0_percent": 0.2375, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.63 MVA 10/0.4 kV": { + "sn_mva": 0.63, + "vn_hv_kv": 10, + "vn_lv_kv": 0.4, + "vk_percent": 4, + "vkr_percent": 1.0794, + "pfe_kw": 1.18, + "i0_percent": 0.1873, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + } + }, + "trafo3w": { + "63/25/38 MVA 110/20/10 kV": { + "sn_hv_mva": 63, + "sn_mv_mva": 25, + "sn_lv_mva": 38, + "vn_hv_kv": 110, + "vn_mv_kv": 20, + "vn_lv_kv": 10, + "vk_hv_percent": 10.4, + "vk_mv_percent": 10.4, + "vk_lv_percent": 10.4, + "vkr_hv_percent": 0.28, + "vkr_mv_percent": 0.32, + "vkr_lv_percent": 0.35, + "pfe_kw": 35, + "i0_percent": 0.89, + "shift_mv_degree": 0, + "shift_lv_degree": 0, + "vector_group": "YN0yn0yn0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -10, + "tap_max": 10, + "tap_step_percent": 1.2 + }, + "63/25/38 MVA 110/10/10 kV": { + "sn_hv_mva": 63, + "sn_mv_mva": 25, + "sn_lv_mva": 38, + "vn_hv_kv": 110, + "vn_mv_kv": 10, + "vn_lv_kv": 10, + "vk_hv_percent": 10.4, + "vk_mv_percent": 10.4, + "vk_lv_percent": 10.4, + "vkr_hv_percent": 0.28, + "vkr_mv_percent": 0.32, + "vkr_lv_percent": 0.35, + "pfe_kw": 35, + "i0_percent": 0.89, + "shift_mv_degree": 0, + "shift_lv_degree": 0, + "vector_group": "YN0yn0yn0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -10, + "tap_max": 10, + "tap_step_percent": 1.2 + } + } + }, + "res_bus": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"vm_pu\",\"va_degree\",\"p_mw\",\"q_mvar\"],\"index\":[0,1,2,3,4],\"data\":[[1.02,-0.845445168673926,0.0,-111.791243672370911],[1.02,0.0,-21.729831330858325,116.839935541152954],[1.019214100496144,-0.409103297622625,0.0,0.0],[1.018637116919488,-0.503470352662766,10.0,7.0],[1.017983079721402,-0.653497665026562,10.0,7.0]]}", + "dtype": { + "vm_pu": "float64", + "va_degree": "float64", + "p_mw": "float64", + "q_mvar": "float64" + }, + "orient": "split" + }, + "res_line": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\",\"i_ka\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\",\"loading_percent\"],\"index\":[0,1,2,3,4,5,6,7],\"data\":[[-7.167647147657727,57.480079867900443,8.03525639977348,-60.113463233922118,0.867609252115754,-2.633383366021676,0.327874112511858,0.343286326507116,0.343286326507116,1.02,-0.845445168673926,1.02,0.0,57.214387751185988],[-0.657313913963437,25.969126903729045,0.866078469150186,-29.007927174007612,0.208764555186749,-3.038800270278568,0.147040043868819,0.164393305610081,0.164393305610081,1.02,-0.845445168673926,1.019214100496144,-0.409103297622625,74.724229822763931],[1.64566972119938,15.370129751576128,-1.540268914180618,-19.229415550834709,0.105400807018762,-3.859285799258581,0.087496748884432,0.109338903896103,0.109338903896103,1.02,-0.845445168673926,1.018637116919488,-0.503470352662766,68.336814935064211],[6.179291340421495,12.971907266349552,-6.119076735247816,-15.70424981919658,0.060214605173678,-2.732342552847028,0.081330018729726,0.095589209712924,0.095589209712924,1.02,-0.845445168673926,1.017983079721402,-0.653497665026562,59.743256070577175],[13.694574931085771,-56.726472302863066,-13.283848894885464,55.407854241119566,0.410726036200307,-1.3186180617435,0.330312825878128,0.322760996590474,0.330312825878128,1.02,0.0,1.019214100496144,-0.409103297622625,55.052137646354595],[6.208885212872048,-13.199963533555254,-6.184761786109662,11.833197159642042,0.024123426762386,-1.366766373913212,0.082632108556076,0.075677384410291,0.082632108556076,1.019214100496144,-0.409103297622625,1.018637116919488,-0.503470352662766,27.544036185358689],[6.208885212872048,-13.199963533555254,-6.184761786109662,11.833197159642042,0.024123426762386,-1.366766373913212,0.082632108556076,0.075677384410291,0.082632108556076,1.019214100496144,-0.409103297622625,1.018637116919488,-0.503470352662766,27.544036185358689],[3.909792486391969,-11.436978768449999,-3.88092326475316,8.704249819196738,0.028869221638809,-2.732728949253261,0.068506463438984,0.054050881891821,0.068506463438984,1.018637116919488,-0.503470352662766,1.017983079721402,-0.653497665026562,42.816539649365005]]}", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64", + "i_ka": "float64", + "vm_from_pu": "float64", + "va_from_degree": "float64", + "vm_to_pu": "float64", + "va_to_degree": "float64", + "loading_percent": "float64" + }, + "orient": "split" + }, + "res_trafo": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "loading_percent": "float64" + }, + "orient": "split" + }, + "res_trafo3w": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_mv_mw\",\"q_mv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_mv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_mv_pu\",\"va_mv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"va_internal_degree\",\"vm_internal_pu\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_mv_mw": "float64", + "q_mv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_mv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_mv_pu": "float64", + "va_mv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "va_internal_degree": "float64", + "vm_internal_pu": "float64", + "loading_percent": "float64" + }, + "orient": "split" + }, + "res_impedance": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\"],\"index\":[],\"data\":[]}", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64" + }, + "orient": "split" + }, + "res_ext_grid": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + }, + "orient": "split" + }, + "res_load": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[0,1,2],\"data\":[[10.0,7.0],[10.0,7.0],[10.0,7.0]]}", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + }, + "orient": "split" + }, + "res_sgen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + }, + "orient": "split" + }, + "res_storage": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + }, + "orient": "split" + }, + "res_shunt": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\"],\"index\":[],\"data\":[]}", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "vm_pu": "float64" + }, + "orient": "split" + }, + "res_gen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"va_degree\",\"vm_pu\"],\"index\":[0,1],\"data\":[[10.0,118.791243672370911,-0.845445168673926,1.02],[21.729831330858325,-116.839935541152954,0.0,1.02]]}", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "va_degree": "float64", + "vm_pu": "float64" + }, + "orient": "split" + }, + "res_ward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\"],\"index\":[],\"data\":[]}", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "vm_pu": "float64" + }, + "orient": "split" + }, + "res_xward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\",\"va_internal_degree\",\"vm_internal_pu\"],\"index\":[],\"data\":[]}", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "vm_pu": "float64", + "va_internal_degree": "float64", + "vm_internal_pu": "float64" + }, + "orient": "split" + }, + "res_dcline": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\"],\"index\":[],\"data\":[]}", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "vm_from_pu": "float64", + "va_from_degree": "float64", + "vm_to_pu": "float64", + "va_to_degree": "float64" + }, + "orient": "split" + }, + "user_pf_options": {}, + "OPF_converged": false + } +} diff --git a/grid2op/data_test/5bus_example_diff_name/chronics/0/maintenance_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/hazards.csv.bz2 similarity index 100% rename from grid2op/data_test/5bus_example_diff_name/chronics/0/maintenance_forecasted.csv.bz2 rename to grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/hazards.csv.bz2 diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/init_state.json b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/init_state.json new file mode 100644 index 000000000..e822a78b8 --- /dev/null +++ b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/init_state.json @@ -0,0 +1,3 @@ +{ + "set_bus": {"lines_or_id": [["0_2_1", 2], ["0_3_2", 1]], "loads_id": [["load_0_0", 2]]} +} \ No newline at end of file diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/load_p.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/load_p.csv.bz2 new file mode 100644 index 000000000..df894b76b Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/load_p.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/load_p_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/load_p_forecasted.csv.bz2 new file mode 100644 index 000000000..d91236226 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/load_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/load_q.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/load_q.csv.bz2 new file mode 100644 index 000000000..d401840ad Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/load_q.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/load_q_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/load_q_forecasted.csv.bz2 new file mode 100644 index 000000000..494dcd68f Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/load_q_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/maintenance.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/maintenance.csv.bz2 new file mode 100644 index 000000000..5257b64d8 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/maintenance.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/maintenance_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/maintenance_forecasted.csv.bz2 new file mode 100644 index 000000000..5257b64d8 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/maintenance_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/maintenance_meta.json b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/maintenance_meta.json new file mode 100644 index 000000000..e6faa8ec2 --- /dev/null +++ b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/maintenance_meta.json @@ -0,0 +1,7 @@ +{ + "maintenance_starting_hour": 9 , + "maintenance_ending_hour": 17, + "line_to_maintenance": ["0_1_0", "2_3_5"], + "daily_proba_per_month_maintenance": [0.0, 0.0, 0.0, 0.02, 0.02, 0.03, 0.05, 0.06, 0.03, 0.02, 0.0, 0.0], + "max_daily_number_per_month_maintenance": [0, 0, 0, 1, 1, 2, 2, 2, 1, 1, 0, 0] +} diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/prod_p.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/prod_p.csv.bz2 new file mode 100644 index 000000000..45ae98a8e Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/prod_p.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/prod_p_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/prod_p_forecasted.csv.bz2 new file mode 100644 index 000000000..555c0fdde Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/prod_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/prod_v.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/prod_v.csv.bz2 new file mode 100644 index 000000000..9fde13d8a Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/prod_v.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/prod_v_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/prod_v_forecasted.csv.bz2 new file mode 100644 index 000000000..c685c39c0 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/0/prod_v_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/hazards.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/hazards.csv.bz2 new file mode 100644 index 000000000..5257b64d8 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/hazards.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/init_state.json b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/init_state.json new file mode 100644 index 000000000..9a4c0b72f --- /dev/null +++ b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/init_state.json @@ -0,0 +1,3 @@ +{ + "set_line_status": [["0_2_1", -1]] +} diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/load_p.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/load_p.csv.bz2 new file mode 100644 index 000000000..99b3b13cc Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/load_p.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/load_p_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/load_p_forecasted.csv.bz2 new file mode 100644 index 000000000..26dfdb6cd Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/load_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/load_q.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/load_q.csv.bz2 new file mode 100644 index 000000000..baa780869 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/load_q.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/load_q_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/load_q_forecasted.csv.bz2 new file mode 100644 index 000000000..8953fd7a1 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/load_q_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/maintenance.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/maintenance.csv.bz2 new file mode 100644 index 000000000..5257b64d8 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/maintenance.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/maintenance_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/maintenance_forecasted.csv.bz2 new file mode 100644 index 000000000..5257b64d8 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/maintenance_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/maintenance_meta.json b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/maintenance_meta.json new file mode 100644 index 000000000..e6faa8ec2 --- /dev/null +++ b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/maintenance_meta.json @@ -0,0 +1,7 @@ +{ + "maintenance_starting_hour": 9 , + "maintenance_ending_hour": 17, + "line_to_maintenance": ["0_1_0", "2_3_5"], + "daily_proba_per_month_maintenance": [0.0, 0.0, 0.0, 0.02, 0.02, 0.03, 0.05, 0.06, 0.03, 0.02, 0.0, 0.0], + "max_daily_number_per_month_maintenance": [0, 0, 0, 1, 1, 2, 2, 2, 1, 1, 0, 0] +} diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/prod_p.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/prod_p.csv.bz2 new file mode 100644 index 000000000..180acf18e Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/prod_p.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/prod_p_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/prod_p_forecasted.csv.bz2 new file mode 100644 index 000000000..5cb7f768c Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/prod_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/prod_v.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/prod_v.csv.bz2 new file mode 100644 index 000000000..9fde13d8a Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/prod_v.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/prod_v_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/prod_v_forecasted.csv.bz2 new file mode 100644 index 000000000..c685c39c0 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/1/prod_v_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/hazards.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/hazards.csv.bz2 new file mode 100644 index 000000000..5257b64d8 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/hazards.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/init_state.json b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/init_state.json new file mode 100644 index 000000000..f244dbf6f --- /dev/null +++ b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/init_state.json @@ -0,0 +1,3 @@ +{ + "set_bus": {"lines_or_id": [["0_2_1", 2]], "lines_ex_id": [["2_3_5", 2]]} +} diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/load_p.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/load_p.csv.bz2 new file mode 100644 index 000000000..36a5a5db2 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/load_p.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/load_p_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/load_p_forecasted.csv.bz2 new file mode 100644 index 000000000..c639bd528 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/load_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/load_q.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/load_q.csv.bz2 new file mode 100644 index 000000000..ad307afaf Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/load_q.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/load_q_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/load_q_forecasted.csv.bz2 new file mode 100644 index 000000000..4106e2b6b Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/load_q_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/maintenance.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/maintenance.csv.bz2 new file mode 100644 index 000000000..5257b64d8 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/maintenance.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/maintenance_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/maintenance_forecasted.csv.bz2 new file mode 100644 index 000000000..5257b64d8 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/maintenance_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/maintenance_meta.json b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/maintenance_meta.json new file mode 100644 index 000000000..e6faa8ec2 --- /dev/null +++ b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/maintenance_meta.json @@ -0,0 +1,7 @@ +{ + "maintenance_starting_hour": 9 , + "maintenance_ending_hour": 17, + "line_to_maintenance": ["0_1_0", "2_3_5"], + "daily_proba_per_month_maintenance": [0.0, 0.0, 0.0, 0.02, 0.02, 0.03, 0.05, 0.06, 0.03, 0.02, 0.0, 0.0], + "max_daily_number_per_month_maintenance": [0, 0, 0, 1, 1, 2, 2, 2, 1, 1, 0, 0] +} diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/prod_p.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/prod_p.csv.bz2 new file mode 100644 index 000000000..ce04dcfee Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/prod_p.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/prod_p_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/prod_p_forecasted.csv.bz2 new file mode 100644 index 000000000..b191b116d Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/prod_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/prod_v.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/prod_v.csv.bz2 new file mode 100644 index 000000000..9fde13d8a Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/prod_v.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/prod_v_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/prod_v_forecasted.csv.bz2 new file mode 100644 index 000000000..c685c39c0 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/prod_v_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/start_datetime.info b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/start_datetime.info similarity index 100% rename from grid2op/data_test/multimix/case14_002/chronics/000/start_datetime.info rename to grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/start_datetime.info diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/time_interval.info b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/time_interval.info similarity index 100% rename from grid2op/data_test/multimix/case14_002/chronics/000/time_interval.info rename to grid2op/data_test/5bus_example_act_topo_set_init/chronics/2/time_interval.info diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/hazards.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/hazards.csv.bz2 new file mode 100644 index 000000000..5257b64d8 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/hazards.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/init_state.json b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/init_state.json new file mode 100644 index 000000000..d46062e0d --- /dev/null +++ b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/init_state.json @@ -0,0 +1,4 @@ +{ + "change_bus": {"lines_or_id": ["0_2_1", "0_3_2"], "loads_id": ["load_0_0"]}, + "change_line_status": ["2_3_5"] +} diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/load_p.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/load_p.csv.bz2 new file mode 100644 index 000000000..c4dcdc721 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/load_p.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/load_p_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/load_p_forecasted.csv.bz2 new file mode 100644 index 000000000..f9a76fa2b Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/load_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/load_q.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/load_q.csv.bz2 new file mode 100644 index 000000000..75859b6ff Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/load_q.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/load_q_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/load_q_forecasted.csv.bz2 new file mode 100644 index 000000000..079926332 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/load_q_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/maintenance.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/maintenance.csv.bz2 new file mode 100644 index 000000000..5257b64d8 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/maintenance.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/maintenance_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/maintenance_forecasted.csv.bz2 new file mode 100644 index 000000000..5257b64d8 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/maintenance_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/maintenance_meta.json b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/maintenance_meta.json new file mode 100644 index 000000000..e6faa8ec2 --- /dev/null +++ b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/maintenance_meta.json @@ -0,0 +1,7 @@ +{ + "maintenance_starting_hour": 9 , + "maintenance_ending_hour": 17, + "line_to_maintenance": ["0_1_0", "2_3_5"], + "daily_proba_per_month_maintenance": [0.0, 0.0, 0.0, 0.02, 0.02, 0.03, 0.05, 0.06, 0.03, 0.02, 0.0, 0.0], + "max_daily_number_per_month_maintenance": [0, 0, 0, 1, 1, 2, 2, 2, 1, 1, 0, 0] +} diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/prod_p.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/prod_p.csv.bz2 new file mode 100644 index 000000000..b5b19653c Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/prod_p.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/prod_p_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/prod_p_forecasted.csv.bz2 new file mode 100644 index 000000000..7e5d59968 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/prod_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/prod_v.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/prod_v.csv.bz2 new file mode 100644 index 000000000..9fde13d8a Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/prod_v.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/prod_v_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/prod_v_forecasted.csv.bz2 new file mode 100644 index 000000000..c685c39c0 Binary files /dev/null and b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/prod_v_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/start_datetime.info b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/start_datetime.info similarity index 100% rename from grid2op/data_test/multimix/case14_002/chronics/001/start_datetime.info rename to grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/start_datetime.info diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/time_interval.info b/grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/time_interval.info similarity index 100% rename from grid2op/data_test/multimix/case14_002/chronics/001/time_interval.info rename to grid2op/data_test/5bus_example_act_topo_set_init/chronics/3/time_interval.info diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/config.py b/grid2op/data_test/5bus_example_act_topo_set_init/config.py new file mode 100644 index 000000000..1ec901a06 --- /dev/null +++ b/grid2op/data_test/5bus_example_act_topo_set_init/config.py @@ -0,0 +1,19 @@ +from grid2op.Action import TopologyAction +from grid2op.Reward import L2RPNReward +from grid2op.Rules import DefaultRules +from grid2op.Chronics import Multifolder +from grid2op.Chronics import GridStateFromFileWithForecasts +from grid2op.Backend import PandaPowerBackend + +config = { + "backend": PandaPowerBackend, + "action_class": TopologyAction, + "observation_class": None, + "reward_class": L2RPNReward, + "gamerules_class": DefaultRules, + "chronics_class": Multifolder, + "grid_value_class": GridStateFromFileWithForecasts, + "volagecontroler_class": None, + "thermal_limits": None, + "names_chronics_to_grid": None, +} diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/grid.json b/grid2op/data_test/5bus_example_act_topo_set_init/grid.json new file mode 100644 index 000000000..b94667b9c --- /dev/null +++ b/grid2op/data_test/5bus_example_act_topo_set_init/grid.json @@ -0,0 +1,1772 @@ +{ + "_module": "pandapower.auxiliary", + "_class": "pandapowerNet", + "_object": { + "bus": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"vn_kv\",\"type\",\"zone\",\"in_service\"],\"index\":[0,1,2,3,4],\"data\":[[\"substation_1\",100.0,\"b\",null,true],[\"substation_2\",100.0,\"b\",null,true],[\"substation_3\",100.0,\"b\",null,true],[\"substation_4\",100.0,\"b\",null,true],[\"substation_5\",100.0,\"b\",null,true]]}", + "orient": "split", + "dtype": { + "name": "object", + "vn_kv": "float64", + "type": "object", + "zone": "object", + "in_service": "bool" + } + }, + "load": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"const_z_percent\",\"const_i_percent\",\"sn_mva\",\"scaling\",\"in_service\",\"type\"],\"index\":[0,1,2],\"data\":[[\"load_0_0\",0,10.0,7.0,0.0,0.0,null,1.0,true,null],[\"load_3_1\",3,10.0,7.0,0.0,0.0,null,1.0,true,null],[\"load_4_2\",4,10.0,7.0,0.0,0.0,null,1.0,true,null]]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "p_mw": "float64", + "q_mvar": "float64", + "const_z_percent": "float64", + "const_i_percent": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object" + } + }, + "sgen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"sn_mva\",\"scaling\",\"in_service\",\"type\",\"current_source\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "int64", + "p_mw": "float64", + "q_mvar": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object", + "current_source": "bool" + } + }, + "motor": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"pn_mech_mw\",\"loading_percent\",\"cos_phi\",\"cos_phi_n\",\"efficiency_percent\",\"efficiency_n_percent\",\"lrc_pu\",\"vn_kv\",\"scaling\",\"in_service\",\"rx\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "int64", + "pn_mech_mw": "float64", + "loading_percent": "float64", + "cos_phi": "float64", + "cos_phi_n": "float64", + "efficiency_percent": "float64", + "efficiency_n_percent": "float64", + "lrc_pu": "float64", + "vn_kv": "float64", + "scaling": "float64", + "in_service": "bool", + "rx": "float64" + } + }, + "asymmetric_load": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\",\"sn_mva\",\"scaling\",\"in_service\",\"type\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object" + } + }, + "asymmetric_sgen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\",\"sn_mva\",\"scaling\",\"in_service\",\"type\",\"current_source\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "int64", + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object", + "current_source": "bool" + } + }, + "storage": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"sn_mva\",\"soc_percent\",\"min_e_mwh\",\"max_e_mwh\",\"scaling\",\"in_service\",\"type\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "int64", + "p_mw": "float64", + "q_mvar": "float64", + "sn_mva": "float64", + "soc_percent": "float64", + "min_e_mwh": "float64", + "max_e_mwh": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object" + } + }, + "gen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"vm_pu\",\"sn_mva\",\"min_q_mvar\",\"max_q_mvar\",\"scaling\",\"slack\",\"in_service\",\"type\",\"slack_weight\"],\"index\":[0,1],\"data\":[[\"gen_0_0\",0,10.0,1.02,null,null,null,1.0,false,true,null,0.0],[\"gen_1_1\",1,20.0,1.02,null,null,null,1.0,true,true,null,1.0]]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "p_mw": "float64", + "vm_pu": "float64", + "sn_mva": "float64", + "min_q_mvar": "float64", + "max_q_mvar": "float64", + "scaling": "float64", + "slack": "bool", + "in_service": "bool", + "type": "object", + "slack_weight": "float64" + } + }, + "switch": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"bus\",\"element\",\"et\",\"type\",\"closed\",\"name\",\"z_ohm\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "bus": "int64", + "element": "int64", + "et": "object", + "type": "object", + "closed": "bool", + "name": "object", + "z_ohm": "float64" + } + }, + "shunt": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"bus\",\"name\",\"q_mvar\",\"p_mw\",\"vn_kv\",\"step\",\"max_step\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "bus": "uint32", + "name": "object", + "q_mvar": "float64", + "p_mw": "float64", + "vn_kv": "float64", + "step": "uint32", + "max_step": "uint32", + "in_service": "bool" + } + }, + "ext_grid": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"vm_pu\",\"va_degree\",\"in_service\",\"slack_weight\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "vm_pu": "float64", + "va_degree": "float64", + "in_service": "bool", + "slack_weight": "float64" + } + }, + "line": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"std_type\",\"from_bus\",\"to_bus\",\"length_km\",\"r_ohm_per_km\",\"x_ohm_per_km\",\"c_nf_per_km\",\"g_us_per_km\",\"max_i_ka\",\"df\",\"parallel\",\"type\",\"in_service\"],\"index\":[0,1,2,3,4,5,6,7],\"data\":[[null,\"NAYY 4x50 SE\",0,1,4.0,0.642,0.083,210.0,0.0,0.6,1.0,1,\"cs\",true],[\"0_2_2\",\"NAYY 4x50 SE\",0,2,4.47,0.642,0.083,210.0,0.0,0.22,1.0,1,\"cs\",true],[\"0_3_3\",\"NAYY 4x50 SE\",0,3,5.65,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true],[\"0_4_4\",\"NAYY 4x50 SE\",0,4,4.0,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true],[\"1_2_5\",\"NAYY 4x50 SE\",1,2,2.0,0.642,0.083,210.0,0.0,0.6,1.0,1,\"cs\",true],[\"2_3_6\",\"NAYY 4x50 SE\",2,3,2.0,0.642,0.083,210.0,0.0,0.3,1.0,1,\"cs\",true],[\"2_3_7\",\"NAYY 4x50 SE\",2,3,2.0,0.642,0.083,210.0,0.0,0.3,1.0,1,\"cs\",true],[\"3_4_8\",\"NAYY 4x50 SE\",3,4,4.0,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true]]}", + "orient": "split", + "dtype": { + "name": "object", + "std_type": "object", + "from_bus": "uint32", + "to_bus": "uint32", + "length_km": "float64", + "r_ohm_per_km": "float64", + "x_ohm_per_km": "float64", + "c_nf_per_km": "float64", + "g_us_per_km": "float64", + "max_i_ka": "float64", + "df": "float64", + "parallel": "uint32", + "type": "object", + "in_service": "bool" + } + }, + "trafo": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"std_type\",\"hv_bus\",\"lv_bus\",\"sn_mva\",\"vn_hv_kv\",\"vn_lv_kv\",\"vk_percent\",\"vkr_percent\",\"pfe_kw\",\"i0_percent\",\"shift_degree\",\"tap_side\",\"tap_neutral\",\"tap_min\",\"tap_max\",\"tap_step_percent\",\"tap_step_degree\",\"tap_pos\",\"tap_phase_shifter\",\"parallel\",\"df\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "std_type": "object", + "hv_bus": "uint32", + "lv_bus": "uint32", + "sn_mva": "float64", + "vn_hv_kv": "float64", + "vn_lv_kv": "float64", + "vk_percent": "float64", + "vkr_percent": "float64", + "pfe_kw": "float64", + "i0_percent": "float64", + "shift_degree": "float64", + "tap_side": "object", + "tap_neutral": "int32", + "tap_min": "int32", + "tap_max": "int32", + "tap_step_percent": "float64", + "tap_step_degree": "float64", + "tap_pos": "int32", + "tap_phase_shifter": "bool", + "parallel": "uint32", + "df": "float64", + "in_service": "bool" + } + }, + "trafo3w": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"std_type\",\"hv_bus\",\"mv_bus\",\"lv_bus\",\"sn_hv_mva\",\"sn_mv_mva\",\"sn_lv_mva\",\"vn_hv_kv\",\"vn_mv_kv\",\"vn_lv_kv\",\"vk_hv_percent\",\"vk_mv_percent\",\"vk_lv_percent\",\"vkr_hv_percent\",\"vkr_mv_percent\",\"vkr_lv_percent\",\"pfe_kw\",\"i0_percent\",\"shift_mv_degree\",\"shift_lv_degree\",\"tap_side\",\"tap_neutral\",\"tap_min\",\"tap_max\",\"tap_step_percent\",\"tap_step_degree\",\"tap_pos\",\"tap_at_star_point\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "std_type": "object", + "hv_bus": "uint32", + "mv_bus": "uint32", + "lv_bus": "uint32", + "sn_hv_mva": "float64", + "sn_mv_mva": "float64", + "sn_lv_mva": "float64", + "vn_hv_kv": "float64", + "vn_mv_kv": "float64", + "vn_lv_kv": "float64", + "vk_hv_percent": "float64", + "vk_mv_percent": "float64", + "vk_lv_percent": "float64", + "vkr_hv_percent": "float64", + "vkr_mv_percent": "float64", + "vkr_lv_percent": "float64", + "pfe_kw": "float64", + "i0_percent": "float64", + "shift_mv_degree": "float64", + "shift_lv_degree": "float64", + "tap_side": "object", + "tap_neutral": "int32", + "tap_min": "int32", + "tap_max": "int32", + "tap_step_percent": "float64", + "tap_step_degree": "float64", + "tap_pos": "int32", + "tap_at_star_point": "bool", + "in_service": "bool" + } + }, + "impedance": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"from_bus\",\"to_bus\",\"rft_pu\",\"xft_pu\",\"rtf_pu\",\"xtf_pu\",\"sn_mva\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "from_bus": "uint32", + "to_bus": "uint32", + "rft_pu": "float64", + "xft_pu": "float64", + "rtf_pu": "float64", + "xtf_pu": "float64", + "sn_mva": "float64", + "in_service": "bool" + } + }, + "dcline": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"from_bus\",\"to_bus\",\"p_mw\",\"loss_percent\",\"loss_mw\",\"vm_from_pu\",\"vm_to_pu\",\"max_p_mw\",\"min_q_from_mvar\",\"min_q_to_mvar\",\"max_q_from_mvar\",\"max_q_to_mvar\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "from_bus": "uint32", + "to_bus": "uint32", + "p_mw": "float64", + "loss_percent": "float64", + "loss_mw": "float64", + "vm_from_pu": "float64", + "vm_to_pu": "float64", + "max_p_mw": "float64", + "min_q_from_mvar": "float64", + "min_q_to_mvar": "float64", + "max_q_from_mvar": "float64", + "max_q_to_mvar": "float64", + "in_service": "bool" + } + }, + "ward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"ps_mw\",\"qs_mvar\",\"qz_mvar\",\"pz_mw\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "ps_mw": "float64", + "qs_mvar": "float64", + "qz_mvar": "float64", + "pz_mw": "float64", + "in_service": "bool" + } + }, + "xward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"ps_mw\",\"qs_mvar\",\"qz_mvar\",\"pz_mw\",\"r_ohm\",\"x_ohm\",\"vm_pu\",\"in_service\",\"slack_weight\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "ps_mw": "float64", + "qs_mvar": "float64", + "qz_mvar": "float64", + "pz_mw": "float64", + "r_ohm": "float64", + "x_ohm": "float64", + "vm_pu": "float64", + "in_service": "bool", + "slack_weight": "float64" + } + }, + "measurement": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"measurement_type\",\"element_type\",\"element\",\"value\",\"std_dev\",\"side\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "measurement_type": "object", + "element_type": "object", + "element": "uint32", + "value": "float64", + "std_dev": "float64", + "side": "object" + } + }, + "pwl_cost": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"power_type\",\"element\",\"et\",\"points\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "power_type": "object", + "element": "uint32", + "et": "object", + "points": "object" + } + }, + "poly_cost": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"element\",\"et\",\"cp0_eur\",\"cp1_eur_per_mw\",\"cp2_eur_per_mw2\",\"cq0_eur\",\"cq1_eur_per_mvar\",\"cq2_eur_per_mvar2\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "element": "uint32", + "et": "object", + "cp0_eur": "float64", + "cp1_eur_per_mw": "float64", + "cp2_eur_per_mw2": "float64", + "cq0_eur": "float64", + "cq1_eur_per_mvar": "float64", + "cq2_eur_per_mvar2": "float64" + } + }, + "characteristic": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"object\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "object": "object" + } + }, + "controller": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"object\",\"in_service\",\"order\",\"level\",\"initial_run\",\"recycle\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "object": "object", + "in_service": "bool", + "order": "float64", + "level": "object", + "initial_run": "bool", + "recycle": "object" + } + }, + "line_geodata": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"coords\"],\"index\":[0,1,2,3,4,5,6,7],\"data\":[[[[0,0],[0,4]]],[[[0,0],[2,4]]],[[[0,0],[4,4]]],[[[0,0],[4,0]]],[[[0,4],[2,4]]],[[[2,4],[3,4.2],[4,4]]],[[[2,4],[3,3.8],[4,4]]],[[[4,4],[4,0]]]]}", + "orient": "split", + "dtype": { + "coords": "object" + } + }, + "bus_geodata": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"x\",\"y\",\"coords\"],\"index\":[0,1,2,3,4],\"data\":[[0.0,0.0,null],[0.0,4.0,null],[2.0,4.0,null],[4.0,4.0,null],[4.0,0.0,null]]}", + "orient": "split", + "dtype": { + "x": "float64", + "y": "float64", + "coords": "object" + } + }, + "version": "2.8.0", + "converged": true, + "name": "5bus", + "f_hz": 50.0, + "sn_mva": 1, + "std_types": { + "line": { + "NAYY 4x50 SE": { + "c_nf_per_km": 210, + "r_ohm_per_km": 0.642, + "x_ohm_per_km": 0.083, + "max_i_ka": 0.142, + "type": "cs", + "q_mm2": 50, + "alpha": 0.00403 + }, + "NAYY 4x120 SE": { + "c_nf_per_km": 264, + "r_ohm_per_km": 0.225, + "x_ohm_per_km": 0.08, + "max_i_ka": 0.242, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00403 + }, + "NAYY 4x150 SE": { + "c_nf_per_km": 261, + "r_ohm_per_km": 0.208, + "x_ohm_per_km": 0.08, + "max_i_ka": 0.27, + "type": "cs", + "q_mm2": 150, + "alpha": 0.00403 + }, + "NA2XS2Y 1x95 RM/25 12/20 kV": { + "c_nf_per_km": 216, + "r_ohm_per_km": 0.313, + "x_ohm_per_km": 0.132, + "max_i_ka": 0.252, + "type": "cs", + "q_mm2": 95, + "alpha": 0.00403 + }, + "NA2XS2Y 1x185 RM/25 12/20 kV": { + "c_nf_per_km": 273, + "r_ohm_per_km": 0.161, + "x_ohm_per_km": 0.117, + "max_i_ka": 0.362, + "type": "cs", + "q_mm2": 185, + "alpha": 0.00403 + }, + "NA2XS2Y 1x240 RM/25 12/20 kV": { + "c_nf_per_km": 304, + "r_ohm_per_km": 0.122, + "x_ohm_per_km": 0.112, + "max_i_ka": 0.421, + "type": "cs", + "q_mm2": 240, + "alpha": 0.00403 + }, + "NA2XS2Y 1x95 RM/25 6/10 kV": { + "c_nf_per_km": 315, + "r_ohm_per_km": 0.313, + "x_ohm_per_km": 0.123, + "max_i_ka": 0.249, + "type": "cs", + "q_mm2": 95, + "alpha": 0.00403 + }, + "NA2XS2Y 1x185 RM/25 6/10 kV": { + "c_nf_per_km": 406, + "r_ohm_per_km": 0.161, + "x_ohm_per_km": 0.11, + "max_i_ka": 0.358, + "type": "cs", + "q_mm2": 185, + "alpha": 0.00403 + }, + "NA2XS2Y 1x240 RM/25 6/10 kV": { + "c_nf_per_km": 456, + "r_ohm_per_km": 0.122, + "x_ohm_per_km": 0.105, + "max_i_ka": 0.416, + "type": "cs", + "q_mm2": 240, + "alpha": 0.00403 + }, + "NA2XS2Y 1x150 RM/25 12/20 kV": { + "c_nf_per_km": 250, + "r_ohm_per_km": 0.206, + "x_ohm_per_km": 0.116, + "max_i_ka": 0.319, + "type": "cs", + "q_mm2": 150, + "alpha": 0.00403 + }, + "NA2XS2Y 1x120 RM/25 12/20 kV": { + "c_nf_per_km": 230, + "r_ohm_per_km": 0.253, + "x_ohm_per_km": 0.119, + "max_i_ka": 0.283, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00403 + }, + "NA2XS2Y 1x70 RM/25 12/20 kV": { + "c_nf_per_km": 190, + "r_ohm_per_km": 0.443, + "x_ohm_per_km": 0.132, + "max_i_ka": 0.22, + "type": "cs", + "q_mm2": 70, + "alpha": 0.00403 + }, + "NA2XS2Y 1x150 RM/25 6/10 kV": { + "c_nf_per_km": 360, + "r_ohm_per_km": 0.206, + "x_ohm_per_km": 0.11, + "max_i_ka": 0.315, + "type": "cs", + "q_mm2": 150, + "alpha": 0.00403 + }, + "NA2XS2Y 1x120 RM/25 6/10 kV": { + "c_nf_per_km": 340, + "r_ohm_per_km": 0.253, + "x_ohm_per_km": 0.113, + "max_i_ka": 0.28, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00403 + }, + "NA2XS2Y 1x70 RM/25 6/10 kV": { + "c_nf_per_km": 280, + "r_ohm_per_km": 0.443, + "x_ohm_per_km": 0.123, + "max_i_ka": 0.217, + "type": "cs", + "q_mm2": 70, + "alpha": 0.00403 + }, + "N2XS(FL)2Y 1x120 RM/35 64/110 kV": { + "c_nf_per_km": 112, + "r_ohm_per_km": 0.153, + "x_ohm_per_km": 0.166, + "max_i_ka": 0.366, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00393 + }, + "N2XS(FL)2Y 1x185 RM/35 64/110 kV": { + "c_nf_per_km": 125, + "r_ohm_per_km": 0.099, + "x_ohm_per_km": 0.156, + "max_i_ka": 0.457, + "type": "cs", + "q_mm2": 185, + "alpha": 0.00393 + }, + "N2XS(FL)2Y 1x240 RM/35 64/110 kV": { + "c_nf_per_km": 135, + "r_ohm_per_km": 0.075, + "x_ohm_per_km": 0.149, + "max_i_ka": 0.526, + "type": "cs", + "q_mm2": 240, + "alpha": 0.00393 + }, + "N2XS(FL)2Y 1x300 RM/35 64/110 kV": { + "c_nf_per_km": 144, + "r_ohm_per_km": 0.06, + "x_ohm_per_km": 0.144, + "max_i_ka": 0.588, + "type": "cs", + "q_mm2": 300, + "alpha": 0.00393 + }, + "15-AL1/3-ST1A 0.4": { + "c_nf_per_km": 11, + "r_ohm_per_km": 1.8769, + "x_ohm_per_km": 0.35, + "max_i_ka": 0.105, + "type": "ol", + "q_mm2": 16, + "alpha": 0.00403 + }, + "24-AL1/4-ST1A 0.4": { + "c_nf_per_km": 11.25, + "r_ohm_per_km": 1.2012, + "x_ohm_per_km": 0.335, + "max_i_ka": 0.14, + "type": "ol", + "q_mm2": 24, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 0.4": { + "c_nf_per_km": 12.2, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.3, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 0.4": { + "c_nf_per_km": 13.2, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.29, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "34-AL1/6-ST1A 10.0": { + "c_nf_per_km": 9.7, + "r_ohm_per_km": 0.8342, + "x_ohm_per_km": 0.36, + "max_i_ka": 0.17, + "type": "ol", + "q_mm2": 34, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 10.0": { + "c_nf_per_km": 10.1, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.35, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "70-AL1/11-ST1A 10.0": { + "c_nf_per_km": 10.4, + "r_ohm_per_km": 0.4132, + "x_ohm_per_km": 0.339, + "max_i_ka": 0.29, + "type": "ol", + "q_mm2": 70, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 10.0": { + "c_nf_per_km": 10.75, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.33, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "122-AL1/20-ST1A 10.0": { + "c_nf_per_km": 11.1, + "r_ohm_per_km": 0.2376, + "x_ohm_per_km": 0.323, + "max_i_ka": 0.41, + "type": "ol", + "q_mm2": 122, + "alpha": 0.00403 + }, + "149-AL1/24-ST1A 10.0": { + "c_nf_per_km": 11.25, + "r_ohm_per_km": 0.194, + "x_ohm_per_km": 0.315, + "max_i_ka": 0.47, + "type": "ol", + "q_mm2": 149, + "alpha": 0.00403 + }, + "34-AL1/6-ST1A 20.0": { + "c_nf_per_km": 9.15, + "r_ohm_per_km": 0.8342, + "x_ohm_per_km": 0.382, + "max_i_ka": 0.17, + "type": "ol", + "q_mm2": 34, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 20.0": { + "c_nf_per_km": 9.5, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.372, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "70-AL1/11-ST1A 20.0": { + "c_nf_per_km": 9.7, + "r_ohm_per_km": 0.4132, + "x_ohm_per_km": 0.36, + "max_i_ka": 0.29, + "type": "ol", + "q_mm2": 70, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 20.0": { + "c_nf_per_km": 10, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.35, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "122-AL1/20-ST1A 20.0": { + "c_nf_per_km": 10.3, + "r_ohm_per_km": 0.2376, + "x_ohm_per_km": 0.344, + "max_i_ka": 0.41, + "type": "ol", + "q_mm2": 122, + "alpha": 0.00403 + }, + "149-AL1/24-ST1A 20.0": { + "c_nf_per_km": 10.5, + "r_ohm_per_km": 0.194, + "x_ohm_per_km": 0.337, + "max_i_ka": 0.47, + "type": "ol", + "q_mm2": 149, + "alpha": 0.00403 + }, + "184-AL1/30-ST1A 20.0": { + "c_nf_per_km": 10.75, + "r_ohm_per_km": 0.1571, + "x_ohm_per_km": 0.33, + "max_i_ka": 0.535, + "type": "ol", + "q_mm2": 184, + "alpha": 0.00403 + }, + "243-AL1/39-ST1A 20.0": { + "c_nf_per_km": 11, + "r_ohm_per_km": 0.1188, + "x_ohm_per_km": 0.32, + "max_i_ka": 0.645, + "type": "ol", + "q_mm2": 243, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 110.0": { + "c_nf_per_km": 8, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.46, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "70-AL1/11-ST1A 110.0": { + "c_nf_per_km": 8.4, + "r_ohm_per_km": 0.4132, + "x_ohm_per_km": 0.45, + "max_i_ka": 0.29, + "type": "ol", + "q_mm2": 70, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 110.0": { + "c_nf_per_km": 8.65, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.44, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "122-AL1/20-ST1A 110.0": { + "c_nf_per_km": 8.5, + "r_ohm_per_km": 0.2376, + "x_ohm_per_km": 0.43, + "max_i_ka": 0.41, + "type": "ol", + "q_mm2": 122, + "alpha": 0.00403 + }, + "149-AL1/24-ST1A 110.0": { + "c_nf_per_km": 8.75, + "r_ohm_per_km": 0.194, + "x_ohm_per_km": 0.41, + "max_i_ka": 0.47, + "type": "ol", + "q_mm2": 149, + "alpha": 0.00403 + }, + "184-AL1/30-ST1A 110.0": { + "c_nf_per_km": 8.8, + "r_ohm_per_km": 0.1571, + "x_ohm_per_km": 0.4, + "max_i_ka": 0.535, + "type": "ol", + "q_mm2": 184, + "alpha": 0.00403 + }, + "243-AL1/39-ST1A 110.0": { + "c_nf_per_km": 9, + "r_ohm_per_km": 0.1188, + "x_ohm_per_km": 0.39, + "max_i_ka": 0.645, + "type": "ol", + "q_mm2": 243, + "alpha": 0.00403 + }, + "305-AL1/39-ST1A 110.0": { + "c_nf_per_km": 9.2, + "r_ohm_per_km": 0.0949, + "x_ohm_per_km": 0.38, + "max_i_ka": 0.74, + "type": "ol", + "q_mm2": 305, + "alpha": 0.00403 + }, + "490-AL1/64-ST1A 110.0": { + "c_nf_per_km": 9.75, + "r_ohm_per_km": 0.059, + "x_ohm_per_km": 0.37, + "max_i_ka": 0.96, + "type": "ol", + "q_mm2": 490, + "alpha": 0.00403 + }, + "679-AL1/86-ST1A 110.0": { + "c_nf_per_km": 9.95, + "r_ohm_per_km": 0.042, + "x_ohm_per_km": 0.36, + "max_i_ka": 0.115, + "type": "ol", + "q_mm2": 679, + "alpha": 0.00403 + }, + "490-AL1/64-ST1A 220.0": { + "c_nf_per_km": 10, + "r_ohm_per_km": 0.059, + "x_ohm_per_km": 0.285, + "max_i_ka": 0.96, + "type": "ol", + "q_mm2": 490, + "alpha": 0.00403 + }, + "679-AL1/86-ST1A 220.0": { + "c_nf_per_km": 11.7, + "r_ohm_per_km": 0.042, + "x_ohm_per_km": 0.275, + "max_i_ka": 0.115, + "type": "ol", + "q_mm2": 679, + "alpha": 0.00403 + }, + "490-AL1/64-ST1A 380.0": { + "c_nf_per_km": 11, + "r_ohm_per_km": 0.059, + "x_ohm_per_km": 0.253, + "max_i_ka": 0.96, + "type": "ol", + "q_mm2": 490, + "alpha": 0.00403 + }, + "679-AL1/86-ST1A 380.0": { + "c_nf_per_km": 14.6, + "r_ohm_per_km": 0.042, + "x_ohm_per_km": 0.25, + "max_i_ka": 0.115, + "type": "ol", + "q_mm2": 679, + "alpha": 0.00403 + } + }, + "trafo": { + "160 MVA 380/110 kV": { + "i0_percent": 0.06, + "pfe_kw": 60, + "vkr_percent": 0.25, + "sn_mva": 160, + "vn_lv_kv": 110.0, + "vn_hv_kv": 380.0, + "vk_percent": 12.2, + "shift_degree": 0, + "vector_group": "Yy0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "100 MVA 220/110 kV": { + "i0_percent": 0.06, + "pfe_kw": 55, + "vkr_percent": 0.26, + "sn_mva": 100, + "vn_lv_kv": 110.0, + "vn_hv_kv": 220.0, + "vk_percent": 12.0, + "shift_degree": 0, + "vector_group": "Yy0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "63 MVA 110/20 kV": { + "i0_percent": 0.04, + "pfe_kw": 22, + "vkr_percent": 0.32, + "sn_mva": 63, + "vn_lv_kv": 20.0, + "vn_hv_kv": 110.0, + "vk_percent": 18, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "40 MVA 110/20 kV": { + "i0_percent": 0.05, + "pfe_kw": 18, + "vkr_percent": 0.34, + "sn_mva": 40, + "vn_lv_kv": 20.0, + "vn_hv_kv": 110.0, + "vk_percent": 16.2, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "25 MVA 110/20 kV": { + "i0_percent": 0.07, + "pfe_kw": 14, + "vkr_percent": 0.41, + "sn_mva": 25, + "vn_lv_kv": 20.0, + "vn_hv_kv": 110.0, + "vk_percent": 12, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "63 MVA 110/10 kV": { + "sn_mva": 63, + "vn_hv_kv": 110, + "vn_lv_kv": 10, + "vk_percent": 18, + "vkr_percent": 0.32, + "pfe_kw": 22, + "i0_percent": 0.04, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "40 MVA 110/10 kV": { + "sn_mva": 40, + "vn_hv_kv": 110, + "vn_lv_kv": 10, + "vk_percent": 16.2, + "vkr_percent": 0.34, + "pfe_kw": 18, + "i0_percent": 0.05, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "25 MVA 110/10 kV": { + "sn_mva": 25, + "vn_hv_kv": 110, + "vn_lv_kv": 10, + "vk_percent": 12, + "vkr_percent": 0.41, + "pfe_kw": 14, + "i0_percent": 0.07, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "0.25 MVA 20/0.4 kV": { + "sn_mva": 0.25, + "vn_hv_kv": 20, + "vn_lv_kv": 0.4, + "vk_percent": 6, + "vkr_percent": 1.44, + "pfe_kw": 0.8, + "i0_percent": 0.32, + "shift_degree": 150, + "vector_group": "Yzn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.4 MVA 20/0.4 kV": { + "sn_mva": 0.4, + "vn_hv_kv": 20, + "vn_lv_kv": 0.4, + "vk_percent": 6, + "vkr_percent": 1.425, + "pfe_kw": 1.35, + "i0_percent": 0.3375, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.63 MVA 20/0.4 kV": { + "sn_mva": 0.63, + "vn_hv_kv": 20, + "vn_lv_kv": 0.4, + "vk_percent": 6, + "vkr_percent": 1.206, + "pfe_kw": 1.65, + "i0_percent": 0.2619, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.25 MVA 10/0.4 kV": { + "sn_mva": 0.25, + "vn_hv_kv": 10, + "vn_lv_kv": 0.4, + "vk_percent": 4, + "vkr_percent": 1.2, + "pfe_kw": 0.6, + "i0_percent": 0.24, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.4 MVA 10/0.4 kV": { + "sn_mva": 0.4, + "vn_hv_kv": 10, + "vn_lv_kv": 0.4, + "vk_percent": 4, + "vkr_percent": 1.325, + "pfe_kw": 0.95, + "i0_percent": 0.2375, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.63 MVA 10/0.4 kV": { + "sn_mva": 0.63, + "vn_hv_kv": 10, + "vn_lv_kv": 0.4, + "vk_percent": 4, + "vkr_percent": 1.0794, + "pfe_kw": 1.18, + "i0_percent": 0.1873, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + } + }, + "trafo3w": { + "63/25/38 MVA 110/20/10 kV": { + "sn_hv_mva": 63, + "sn_mv_mva": 25, + "sn_lv_mva": 38, + "vn_hv_kv": 110, + "vn_mv_kv": 20, + "vn_lv_kv": 10, + "vk_hv_percent": 10.4, + "vk_mv_percent": 10.4, + "vk_lv_percent": 10.4, + "vkr_hv_percent": 0.28, + "vkr_mv_percent": 0.32, + "vkr_lv_percent": 0.35, + "pfe_kw": 35, + "i0_percent": 0.89, + "shift_mv_degree": 0, + "shift_lv_degree": 0, + "vector_group": "YN0yn0yn0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -10, + "tap_max": 10, + "tap_step_percent": 1.2 + }, + "63/25/38 MVA 110/10/10 kV": { + "sn_hv_mva": 63, + "sn_mv_mva": 25, + "sn_lv_mva": 38, + "vn_hv_kv": 110, + "vn_mv_kv": 10, + "vn_lv_kv": 10, + "vk_hv_percent": 10.4, + "vk_mv_percent": 10.4, + "vk_lv_percent": 10.4, + "vkr_hv_percent": 0.28, + "vkr_mv_percent": 0.32, + "vkr_lv_percent": 0.35, + "pfe_kw": 35, + "i0_percent": 0.89, + "shift_mv_degree": 0, + "shift_lv_degree": 0, + "vector_group": "YN0yn0yn0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -10, + "tap_max": 10, + "tap_step_percent": 1.2 + } + } + }, + "res_bus": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"vm_pu\",\"va_degree\",\"p_mw\",\"q_mvar\"],\"index\":[0,1,2,3,4],\"data\":[[1.02,-0.845445168673926,0.0,-111.791243672370911],[1.02,0.0,-21.729831330858325,116.839935541152954],[1.019214100496144,-0.409103297622625,0.0,0.0],[1.018637116919488,-0.503470352662766,10.0,7.0],[1.017983079721402,-0.653497665026562,10.0,7.0]]}", + "orient": "split", + "dtype": { + "vm_pu": "float64", + "va_degree": "float64", + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_line": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\",\"i_ka\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\",\"loading_percent\"],\"index\":[0,1,2,3,4,5,6,7],\"data\":[[-7.167647147657727,57.480079867900443,8.03525639977348,-60.113463233922118,0.867609252115754,-2.633383366021676,0.327874112511858,0.343286326507116,0.343286326507116,1.02,-0.845445168673926,1.02,0.0,57.214387751185988],[-0.657313913963437,25.969126903729045,0.866078469150186,-29.007927174007612,0.208764555186749,-3.038800270278568,0.147040043868819,0.164393305610081,0.164393305610081,1.02,-0.845445168673926,1.019214100496144,-0.409103297622625,74.724229822763931],[1.64566972119938,15.370129751576128,-1.540268914180618,-19.229415550834709,0.105400807018762,-3.859285799258581,0.087496748884432,0.109338903896103,0.109338903896103,1.02,-0.845445168673926,1.018637116919488,-0.503470352662766,68.336814935064211],[6.179291340421495,12.971907266349552,-6.119076735247816,-15.70424981919658,0.060214605173678,-2.732342552847028,0.081330018729726,0.095589209712924,0.095589209712924,1.02,-0.845445168673926,1.017983079721402,-0.653497665026562,59.743256070577175],[13.694574931085771,-56.726472302863066,-13.283848894885464,55.407854241119566,0.410726036200307,-1.3186180617435,0.330312825878128,0.322760996590474,0.330312825878128,1.02,0.0,1.019214100496144,-0.409103297622625,55.052137646354595],[6.208885212872048,-13.199963533555254,-6.184761786109662,11.833197159642042,0.024123426762386,-1.366766373913212,0.082632108556076,0.075677384410291,0.082632108556076,1.019214100496144,-0.409103297622625,1.018637116919488,-0.503470352662766,27.544036185358689],[6.208885212872048,-13.199963533555254,-6.184761786109662,11.833197159642042,0.024123426762386,-1.366766373913212,0.082632108556076,0.075677384410291,0.082632108556076,1.019214100496144,-0.409103297622625,1.018637116919488,-0.503470352662766,27.544036185358689],[3.909792486391969,-11.436978768449999,-3.88092326475316,8.704249819196738,0.028869221638809,-2.732728949253261,0.068506463438984,0.054050881891821,0.068506463438984,1.018637116919488,-0.503470352662766,1.017983079721402,-0.653497665026562,42.816539649365005]]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64", + "i_ka": "float64", + "vm_from_pu": "float64", + "va_from_degree": "float64", + "vm_to_pu": "float64", + "va_to_degree": "float64", + "loading_percent": "float64" + } + }, + "res_trafo": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "loading_percent": "float64" + } + }, + "res_trafo3w": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_mv_mw\",\"q_mv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_mv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_mv_pu\",\"va_mv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"va_internal_degree\",\"vm_internal_pu\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_mv_mw": "float64", + "q_mv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_mv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_mv_pu": "float64", + "va_mv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "va_internal_degree": "float64", + "vm_internal_pu": "float64", + "loading_percent": "float64" + } + }, + "res_impedance": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64" + } + }, + "res_ext_grid": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_load": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[0,1,2],\"data\":[[10.0,7.0],[10.0,7.0],[10.0,7.0]]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_motor": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_sgen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_storage": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_shunt": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "vm_pu": "float64" + } + }, + "res_gen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"va_degree\",\"vm_pu\"],\"index\":[0,1],\"data\":[[10.0,118.791243672370911,-0.845445168673926,1.02],[21.729831330858325,-116.839935541152954,0.0,1.02]]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "va_degree": "float64", + "vm_pu": "float64" + } + }, + "res_ward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "vm_pu": "float64" + } + }, + "res_xward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\",\"va_internal_degree\",\"vm_internal_pu\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "vm_pu": "float64", + "va_internal_degree": "float64", + "vm_internal_pu": "float64" + } + }, + "res_dcline": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "vm_from_pu": "float64", + "va_from_degree": "float64", + "vm_to_pu": "float64", + "va_to_degree": "float64" + } + }, + "res_asymmetric_load": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_asymmetric_sgen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_bus_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"vm_pu\",\"va_degree\",\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "vm_pu": "float64", + "va_degree": "float64", + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_line_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\",\"i_ka\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64", + "i_ka": "float64", + "vm_from_pu": "float64", + "va_from_degree": "float64", + "vm_to_pu": "float64", + "va_to_degree": "float64", + "loading_percent": "float64" + } + }, + "res_trafo_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "loading_percent": "float64" + } + }, + "res_trafo3w_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_mv_mw\",\"q_mv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_mv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_mv_pu\",\"va_mv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"va_internal_degree\",\"vm_internal_pu\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_mv_mw": "float64", + "q_mv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_mv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_mv_pu": "float64", + "va_mv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "va_internal_degree": "float64", + "vm_internal_pu": "float64", + "loading_percent": "float64" + } + }, + "res_impedance_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64" + } + }, + "res_bus_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_line_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_trafo_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_trafo3w_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_ext_grid_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_gen_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_sgen_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_bus_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"vm_a_pu\",\"va_a_degree\",\"vm_b_pu\",\"va_b_degree\",\"vm_c_pu\",\"va_c_degree\",\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "vm_a_pu": "float64", + "va_a_degree": "float64", + "vm_b_pu": "float64", + "va_b_degree": "float64", + "vm_c_pu": "float64", + "va_c_degree": "float64", + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64" + } + }, + "res_line_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_from_mw\",\"q_a_from_mvar\",\"p_b_from_mw\",\"q_b_from_mvar\",\"q_c_from_mvar\",\"p_a_to_mw\",\"q_a_to_mvar\",\"p_b_to_mw\",\"q_b_to_mvar\",\"p_c_to_mw\",\"q_c_to_mvar\",\"p_a_l_mw\",\"q_a_l_mvar\",\"p_b_l_mw\",\"q_b_l_mvar\",\"p_c_l_mw\",\"q_c_l_mvar\",\"i_a_from_ka\",\"i_a_to_ka\",\"i_b_from_ka\",\"i_b_to_ka\",\"i_c_from_ka\",\"i_c_to_ka\",\"i_a_ka\",\"i_b_ka\",\"i_c_ka\",\"i_n_from_ka\",\"i_n_to_ka\",\"i_n_ka\",\"loading_a_percent\",\"loading_b_percent\",\"loading_c_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_from_mw": "float64", + "q_a_from_mvar": "float64", + "p_b_from_mw": "float64", + "q_b_from_mvar": "float64", + "q_c_from_mvar": "float64", + "p_a_to_mw": "float64", + "q_a_to_mvar": "float64", + "p_b_to_mw": "float64", + "q_b_to_mvar": "float64", + "p_c_to_mw": "float64", + "q_c_to_mvar": "float64", + "p_a_l_mw": "float64", + "q_a_l_mvar": "float64", + "p_b_l_mw": "float64", + "q_b_l_mvar": "float64", + "p_c_l_mw": "float64", + "q_c_l_mvar": "float64", + "i_a_from_ka": "float64", + "i_a_to_ka": "float64", + "i_b_from_ka": "float64", + "i_b_to_ka": "float64", + "i_c_from_ka": "float64", + "i_c_to_ka": "float64", + "i_a_ka": "float64", + "i_b_ka": "float64", + "i_c_ka": "float64", + "i_n_from_ka": "float64", + "i_n_to_ka": "float64", + "i_n_ka": "float64", + "loading_a_percent": "float64", + "loading_b_percent": "float64", + "loading_c_percent": "float64" + } + }, + "res_trafo_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_hv_mw\",\"q_a_hv_mvar\",\"p_b_hv_mw\",\"q_b_hv_mvar\",\"p_c_hv_mw\",\"q_c_hv_mvar\",\"p_a_lv_mw\",\"q_a_lv_mvar\",\"p_b_lv_mw\",\"q_b_lv_mvar\",\"p_c_lv_mw\",\"q_c_lv_mvar\",\"p_a_l_mw\",\"q_a_l_mvar\",\"p_b_l_mw\",\"q_b_l_mvar\",\"p_c_l_mw\",\"q_c_l_mvar\",\"i_a_hv_ka\",\"i_a_lv_ka\",\"i_b_hv_ka\",\"i_b_lv_ka\",\"i_c_hv_ka\",\"i_c_lv_ka\",\"loading_a_percent\",\"loading_b_percent\",\"loading_c_percent\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_hv_mw": "float64", + "q_a_hv_mvar": "float64", + "p_b_hv_mw": "float64", + "q_b_hv_mvar": "float64", + "p_c_hv_mw": "float64", + "q_c_hv_mvar": "float64", + "p_a_lv_mw": "float64", + "q_a_lv_mvar": "float64", + "p_b_lv_mw": "float64", + "q_b_lv_mvar": "float64", + "p_c_lv_mw": "float64", + "q_c_lv_mvar": "float64", + "p_a_l_mw": "float64", + "q_a_l_mvar": "float64", + "p_b_l_mw": "float64", + "q_b_l_mvar": "float64", + "p_c_l_mw": "float64", + "q_c_l_mvar": "float64", + "i_a_hv_ka": "float64", + "i_a_lv_ka": "float64", + "i_b_hv_ka": "float64", + "i_b_lv_ka": "float64", + "i_c_hv_ka": "float64", + "i_c_lv_ka": "float64", + "loading_a_percent": "float64", + "loading_b_percent": "float64", + "loading_c_percent": "float64", + "loading_percent": "float64" + } + }, + "res_ext_grid_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64" + } + }, + "res_shunt_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_load_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_sgen_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_storage_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_asymmetric_load_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64" + } + }, + "res_asymmetric_sgen_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64" + } + }, + "user_pf_options": {}, + "OPF_converged": false + } +} \ No newline at end of file diff --git a/grid2op/data_test/5bus_example_act_topo_set_init/prods_charac.csv b/grid2op/data_test/5bus_example_act_topo_set_init/prods_charac.csv new file mode 100644 index 000000000..f47a90595 --- /dev/null +++ b/grid2op/data_test/5bus_example_act_topo_set_init/prods_charac.csv @@ -0,0 +1,3 @@ +Pmax,Pmin,name,type,bus,max_ramp_up,max_ramp_down,min_up_time,min_down_time,marginal_cost,shut_down_cost,start_cost,x,y,V +15,0.0,gen_0_0,wind,5,0,0,0,0,0,0,0,0,0,102. +35,0.0,gen_1_1,thermal,0,15,15,4,4,70,1,2,0,400,102. \ No newline at end of file diff --git a/grid2op/data_test/5bus_example_diff_name/chronics/0/hazards.csv.bz2 b/grid2op/data_test/5bus_example_diff_name/chronics/0/hazards.csv.bz2 index 5257b64d8..738ccee0e 100644 Binary files a/grid2op/data_test/5bus_example_diff_name/chronics/0/hazards.csv.bz2 and b/grid2op/data_test/5bus_example_diff_name/chronics/0/hazards.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_diff_name/chronics/0/maintenance.csv.bz2 b/grid2op/data_test/5bus_example_diff_name/chronics/0/maintenance.csv.bz2 index 5257b64d8..738ccee0e 100644 Binary files a/grid2op/data_test/5bus_example_diff_name/chronics/0/maintenance.csv.bz2 and b/grid2op/data_test/5bus_example_diff_name/chronics/0/maintenance.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_p.csv.bz2 b/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_p.csv.bz2 index 45ae98a8e..f3169fa0a 100644 Binary files a/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_p.csv.bz2 and b/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_p.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_p_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_p_forecasted.csv.bz2 index 555c0fdde..21416f274 100644 Binary files a/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_p_forecasted.csv.bz2 and b/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_v.csv.bz2 b/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_v.csv.bz2 index 9fde13d8a..63209b7aa 100644 Binary files a/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_v.csv.bz2 and b/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_v.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_v_forecasted.csv.bz2 b/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_v_forecasted.csv.bz2 index c685c39c0..43614717a 100644 Binary files a/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_v_forecasted.csv.bz2 and b/grid2op/data_test/5bus_example_diff_name/chronics/0/prod_v_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/5bus_example_diff_name/grid.json b/grid2op/data_test/5bus_example_diff_name/grid.json index 4ed416f32..427f1b756 100644 --- a/grid2op/data_test/5bus_example_diff_name/grid.json +++ b/grid2op/data_test/5bus_example_diff_name/grid.json @@ -72,7 +72,7 @@ "gen": { "_module": "pandas.core.frame", "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"vm_pu\",\"sn_mva\",\"min_q_mvar\",\"max_q_mvar\",\"scaling\",\"slack\",\"in_service\",\"type\"],\"index\":[0,1],\"data\":[[\"gen_0_0\",0,10.0,1.02,null,null,null,1.0,false,true,null],[\"gen_1_1\",1,20.0,1.02,null,null,null,1.0,true,true,null]]}", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"vm_pu\",\"sn_mva\",\"min_q_mvar\",\"max_q_mvar\",\"scaling\",\"slack\",\"in_service\",\"type\"],\"index\":[0,1],\"data\":[[\"othername_0_0\",0,10.0,1.02,null,null,null,1.0,false,true,null],[\"othername_1_1\",1,20.0,1.02,null,null,null,1.0,true,true,null]]}", "dtype": { "name": "object", "bus": "uint32", @@ -135,7 +135,7 @@ "line": { "_module": "pandas.core.frame", "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"std_type\",\"from_bus\",\"to_bus\",\"length_km\",\"r_ohm_per_km\",\"x_ohm_per_km\",\"c_nf_per_km\",\"g_us_per_km\",\"max_i_ka\",\"df\",\"parallel\",\"type\",\"in_service\"],\"index\":[0,1,2,3,4,5,6,7],\"data\":[[null,\"NAYY 4x50 SE\",0,1,4.0,0.642,0.083,210.0,0.0,0.6,1.0,1,\"cs\",true],[\"0_2_2\",\"NAYY 4x50 SE\",0,2,4.47,0.642,0.083,210.0,0.0,0.22,1.0,1,\"cs\",true],[\"0_3_3\",\"NAYY 4x50 SE\",0,3,5.65,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true],[\"0_4_4\",\"NAYY 4x50 SE\",0,4,4.0,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true],[\"1_2_5\",\"NAYY 4x50 SE\",1,2,2.0,0.642,0.083,210.0,0.0,0.6,1.0,1,\"cs\",true],[\"2_3_6\",\"NAYY 4x50 SE\",2,3,2.0,0.642,0.083,210.0,0.0,0.3,1.0,1,\"cs\",true],[\"2_3_7\",\"NAYY 4x50 SE\",2,3,2.0,0.642,0.083,210.0,0.0,0.3,1.0,1,\"cs\",true],[\"3_4_8\",\"NAYY 4x50 SE\",3,4,4.0,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true]]}", + "_object": "{\"columns\":[\"name\",\"std_type\",\"from_bus\",\"to_bus\",\"length_km\",\"r_ohm_per_km\",\"x_ohm_per_km\",\"c_nf_per_km\",\"g_us_per_km\",\"max_i_ka\",\"df\",\"parallel\",\"type\",\"in_service\"],\"index\":[0,1,2,3,4,5,6,7],\"data\":[[\"l_0_1_0\",\"NAYY 4x50 SE\",0,1,4.0,0.642,0.083,210.0,0.0,0.6,1.0,1,\"cs\",true],[\"l_0_2_1\",\"NAYY 4x50 SE\",0,2,4.47,0.642,0.083,210.0,0.0,0.22,1.0,1,\"cs\",true],[\"l_0_3_2\",\"NAYY 4x50 SE\",0,3,5.65,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true],[\"l_0_4_3\",\"NAYY 4x50 SE\",0,4,4.0,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true],[\"l_1_2_4\",\"NAYY 4x50 SE\",1,2,2.0,0.642,0.083,210.0,0.0,0.6,1.0,1,\"cs\",true],[\"l_2_3_5\",\"NAYY 4x50 SE\",2,3,2.0,0.642,0.083,210.0,0.0,0.3,1.0,1,\"cs\",true],[\"l_2_3_6\",\"NAYY 4x50 SE\",2,3,2.0,0.642,0.083,210.0,0.0,0.3,1.0,1,\"cs\",true],[\"l_3_4_7\",\"NAYY 4x50 SE\",3,4,4.0,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true]]}", "dtype": { "name": "object", "std_type": "object", diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/init_state.json b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/init_state.json new file mode 100644 index 000000000..3b9b42ef9 --- /dev/null +++ b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/init_state.json @@ -0,0 +1,4 @@ +{ + "shunt": {"shunt_q": [[0, 0.0]]} +} + diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/load_p.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/load_p.csv.bz2 new file mode 100644 index 000000000..cb68d0275 Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/load_p.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/load_p_forecasted.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/load_p_forecasted.csv.bz2 new file mode 100644 index 000000000..19c21de8b Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/load_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/load_q.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/load_q.csv.bz2 new file mode 100644 index 000000000..35d398131 Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/load_q.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/load_q_forecasted.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/load_q_forecasted.csv.bz2 new file mode 100644 index 000000000..6c262e2d8 Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/load_q_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/prod_p.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/prod_p.csv.bz2 new file mode 100644 index 000000000..c2f9e0442 Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/prod_p.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/prod_p_forecasted.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/prod_p_forecasted.csv.bz2 new file mode 100644 index 000000000..c79351441 Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/prod_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/prod_v.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/prod_v.csv.bz2 new file mode 100644 index 000000000..79c31540f Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/prod_v.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/prod_v_forecasted.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/prod_v_forecasted.csv.bz2 new file mode 100644 index 000000000..79c31540f Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/prod_v_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/start_datetime.info b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/start_datetime.info new file mode 100644 index 000000000..5e520426f --- /dev/null +++ b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/start_datetime.info @@ -0,0 +1 @@ +2019-01-11 23:55 diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/time_interval.info b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/time_interval.info new file mode 100644 index 000000000..beb9b9011 --- /dev/null +++ b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-12/time_interval.info @@ -0,0 +1 @@ +00:05 diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/init_state.json b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/init_state.json new file mode 100644 index 000000000..1993de4f4 --- /dev/null +++ b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/init_state.json @@ -0,0 +1,4 @@ +{ + "set_storage": [[0, 5]] +} + diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/load_p.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/load_p.csv.bz2 new file mode 100644 index 000000000..488a31b2a Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/load_p.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/load_p_forecasted.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/load_p_forecasted.csv.bz2 new file mode 100644 index 000000000..8e341212e Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/load_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/load_q.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/load_q.csv.bz2 new file mode 100644 index 000000000..887ff90b5 Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/load_q.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/load_q_forecasted.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/load_q_forecasted.csv.bz2 new file mode 100644 index 000000000..74f6595f6 Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/load_q_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/prod_p.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/prod_p.csv.bz2 new file mode 100644 index 000000000..a8f9567a0 Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/prod_p.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/prod_p_forecasted.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/prod_p_forecasted.csv.bz2 new file mode 100644 index 000000000..8a5119858 Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/prod_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/prod_v.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/prod_v.csv.bz2 new file mode 100644 index 000000000..79c31540f Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/prod_v.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/prod_v_forecasted.csv.bz2 b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/prod_v_forecasted.csv.bz2 new file mode 100644 index 000000000..79c31540f Binary files /dev/null and b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/prod_v_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/start_datetime.info b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/start_datetime.info new file mode 100644 index 000000000..d1822dcde --- /dev/null +++ b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/start_datetime.info @@ -0,0 +1 @@ +2019-01-12 23:55 diff --git a/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/time_interval.info b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/time_interval.info new file mode 100644 index 000000000..beb9b9011 --- /dev/null +++ b/grid2op/data_test/educ_case14_storage_init_state/chronics/2019-01-13/time_interval.info @@ -0,0 +1 @@ +00:05 diff --git a/grid2op/data_test/educ_case14_storage_init_state/config.py b/grid2op/data_test/educ_case14_storage_init_state/config.py new file mode 100644 index 000000000..afefe03d4 --- /dev/null +++ b/grid2op/data_test/educ_case14_storage_init_state/config.py @@ -0,0 +1,40 @@ +from grid2op.Action import PowerlineChangeDispatchAndStorageAction +from grid2op.Reward import L2RPNReward +from grid2op.Rules import DefaultRules +from grid2op.Chronics import Multifolder +from grid2op.Chronics import GridStateFromFileWithForecasts +from grid2op.Backend import PandaPowerBackend + +config = { + "backend": PandaPowerBackend, + "action_class": PowerlineChangeDispatchAndStorageAction, + "observation_class": None, + "reward_class": L2RPNReward, + "gamerules_class": DefaultRules, + "chronics_class": Multifolder, + "grid_value_class": GridStateFromFileWithForecasts, + "volagecontroler_class": None, + "thermal_limits": [ + 541.0, + 450.0, + 375.0, + 636.0, + 175.0, + 285.0, + 335.0, + 657.0, + 496.0, + 827.0, + 442.0, + 641.0, + 840.0, + 156.0, + 664.0, + 235.0, + 119.0, + 179.0, + 1986.0, + 1572.0, + ], + "names_chronics_to_grid": None, +} diff --git a/grid2op/data_test/educ_case14_storage_init_state/difficulty_levels.json b/grid2op/data_test/educ_case14_storage_init_state/difficulty_levels.json new file mode 100644 index 000000000..da8317445 --- /dev/null +++ b/grid2op/data_test/educ_case14_storage_init_state/difficulty_levels.json @@ -0,0 +1,58 @@ +{ + "0": { + "NO_OVERFLOW_DISCONNECTION": true, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 9999, + "NB_TIMESTEP_COOLDOWN_SUB": 0, + "NB_TIMESTEP_COOLDOWN_LINE": 0, + "HARD_OVERFLOW_THRESHOLD": 9999, + "NB_TIMESTEP_RECONNECTION": 0, + "IGNORE_MIN_UP_DOWN_TIME": true, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "MAX_SUB_CHANGED": 1, + "MAX_LINE_STATUS_CHANGED": 1 + }, + "1": { + "NO_OVERFLOW_DISCONNECTION": false, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 6, + "NB_TIMESTEP_COOLDOWN_SUB": 0, + "NB_TIMESTEP_COOLDOWN_LINE": 0, + "HARD_OVERFLOW_THRESHOLD": 3.0, + "NB_TIMESTEP_RECONNECTION": 1, + "IGNORE_MIN_UP_DOWN_TIME": true, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "MAX_SUB_CHANGED": 1, + "MAX_LINE_STATUS_CHANGED": 1 + }, + "2": { + "NO_OVERFLOW_DISCONNECTION": false, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 3, + "NB_TIMESTEP_COOLDOWN_SUB": 1, + "NB_TIMESTEP_COOLDOWN_LINE": 1, + "HARD_OVERFLOW_THRESHOLD": 2.5, + "NB_TIMESTEP_RECONNECTION": 6, + "IGNORE_MIN_UP_DOWN_TIME": true, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "MAX_SUB_CHANGED": 1, + "MAX_LINE_STATUS_CHANGED": 1 + }, + "competition": { + "NO_OVERFLOW_DISCONNECTION": false, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 3, + "NB_TIMESTEP_COOLDOWN_SUB": 3, + "NB_TIMESTEP_COOLDOWN_LINE": 3, + "HARD_OVERFLOW_THRESHOLD": 2.0, + "NB_TIMESTEP_RECONNECTION": 12, + "IGNORE_MIN_UP_DOWN_TIME": true, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "MAX_SUB_CHANGED": 1, + "MAX_LINE_STATUS_CHANGED": 1 + } +} diff --git a/grid2op/data_test/educ_case14_storage_init_state/grid.json b/grid2op/data_test/educ_case14_storage_init_state/grid.json new file mode 100644 index 000000000..2f27b3528 --- /dev/null +++ b/grid2op/data_test/educ_case14_storage_init_state/grid.json @@ -0,0 +1,1766 @@ +{ + "_module": "pandapower.auxiliary", + "_class": "pandapowerNet", + "_object": { + "bus": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"vn_kv\",\"type\",\"zone\",\"in_service\",\"min_vm_pu\",\"max_vm_pu\"],\"index\":[0,1,2,3,4,5,6,7,8,9,10,11,12,13],\"data\":[[1,138.0,\"b\",1.0,true,0.94,1.06],[2,138.0,\"b\",1.0,true,0.94,1.06],[3,138.0,\"b\",1.0,true,0.94,1.06],[4,138.0,\"b\",1.0,true,0.94,1.06],[5,138.0,\"b\",1.0,true,0.94,1.06],[6,20.0,\"b\",1.0,true,0.94,1.06],[7,14.0,\"b\",1.0,true,0.94,1.06],[8,12.0,\"b\",1.0,true,0.94,1.06],[9,20.0,\"b\",1.0,true,0.94,1.06],[10,20.0,\"b\",1.0,true,0.94,1.06],[11,20.0,\"b\",1.0,true,0.94,1.06],[12,20.0,\"b\",1.0,true,0.94,1.06],[13,20.0,\"b\",1.0,true,0.94,1.06],[14,20.0,\"b\",1.0,true,0.94,1.06]]}", + "orient": "split", + "dtype": { + "name": "object", + "vn_kv": "float64", + "type": "object", + "zone": "object", + "in_service": "bool", + "min_vm_pu": "float64", + "max_vm_pu": "float64" + } + }, + "load": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"const_z_percent\",\"const_i_percent\",\"sn_mva\",\"scaling\",\"in_service\",\"type\",\"controllable\"],\"index\":[0,1,2,3,4,5,6,7,8,9,10],\"data\":[[null,1,21.699999999999999,12.699999999999999,0.0,0.0,null,1.0,true,null,false],[null,2,94.200000000000003,19.0,0.0,0.0,null,1.0,true,null,false],[null,3,47.799999999999997,-3.9,0.0,0.0,null,1.0,true,null,false],[null,4,7.6,1.6,0.0,0.0,null,1.0,true,null,false],[null,5,11.199999999999999,7.5,0.0,0.0,null,1.0,true,null,false],[null,8,29.5,16.600000000000001,0.0,0.0,null,1.0,true,null,false],[null,9,9.0,5.8,0.0,0.0,null,1.0,true,null,false],[null,10,3.5,1.8,0.0,0.0,null,1.0,true,null,false],[null,11,6.1,1.6,0.0,0.0,null,1.0,true,null,false],[null,12,13.5,5.8,0.0,0.0,null,1.0,true,null,false],[null,13,14.9,5.0,0.0,0.0,null,1.0,true,null,false]]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "p_mw": "float64", + "q_mvar": "float64", + "const_z_percent": "float64", + "const_i_percent": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object", + "controllable": "object" + } + }, + "sgen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"sn_mva\",\"scaling\",\"in_service\",\"type\",\"current_source\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "int64", + "p_mw": "float64", + "q_mvar": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object", + "current_source": "bool" + } + }, + "motor": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"pn_mech_mw\",\"loading_percent\",\"cos_phi\",\"cos_phi_n\",\"efficiency_percent\",\"efficiency_n_percent\",\"lrc_pu\",\"vn_kv\",\"scaling\",\"in_service\",\"rx\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "int64", + "pn_mech_mw": "float64", + "loading_percent": "float64", + "cos_phi": "float64", + "cos_phi_n": "float64", + "efficiency_percent": "float64", + "efficiency_n_percent": "float64", + "lrc_pu": "float64", + "vn_kv": "float64", + "scaling": "float64", + "in_service": "bool", + "rx": "float64" + } + }, + "asymmetric_load": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\",\"sn_mva\",\"scaling\",\"in_service\",\"type\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object" + } + }, + "asymmetric_sgen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\",\"sn_mva\",\"scaling\",\"in_service\",\"type\",\"current_source\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "int64", + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object", + "current_source": "bool" + } + }, + "storage": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"sn_mva\",\"soc_percent\",\"min_e_mwh\",\"max_e_mwh\",\"scaling\",\"in_service\",\"type\"],\"index\":[0,1],\"data\":[[null,5,0.0,0.0,null,null,0.0,15.0,1.0,true,null],[null,7,0.0,0.0,null,null,0.0,7.0,1.0,true,null]]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "int64", + "p_mw": "float64", + "q_mvar": "float64", + "sn_mva": "float64", + "soc_percent": "float64", + "min_e_mwh": "float64", + "max_e_mwh": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object" + } + }, + "gen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"vm_pu\",\"sn_mva\",\"min_q_mvar\",\"max_q_mvar\",\"scaling\",\"slack\",\"in_service\",\"type\",\"controllable\",\"min_p_mw\",\"max_p_mw\",\"slack_weight\",\"power_station_trafo\"],\"index\":[0,1,2,3,4,5],\"data\":[[null,1,40.0,1.045,null,-40.0,50.0,1.0,false,true,null,true,0.0,140.0,0.0,null],[null,2,0.0,1.01,null,0.0,40.0,1.0,false,true,null,true,0.0,100.0,0.0,null],[null,5,0.0,1.07,null,-6.0,24.0,1.0,false,true,null,true,0.0,100.0,0.0,null],[null,5,0.0,1.07,null,-6.0,24.0,1.0,false,true,null,true,0.0,100.0,0.0,null],[null,7,0.0,1.09,null,-6.0,24.0,1.0,false,true,null,true,0.0,100.0,0.0,null],[\"gen_0_5\",0,-219.0,1.06,null,-9999.0,9999.0,1.0,true,true,null,true,null,null,1.0,null]]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "p_mw": "float64", + "vm_pu": "float64", + "sn_mva": "float64", + "min_q_mvar": "float64", + "max_q_mvar": "float64", + "scaling": "float64", + "slack": "bool", + "in_service": "bool", + "type": "object", + "controllable": "object", + "min_p_mw": "float64", + "max_p_mw": "float64", + "slack_weight": "float64", + "power_station_trafo": "float64" + } + }, + "switch": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"bus\",\"element\",\"et\",\"type\",\"closed\",\"name\",\"z_ohm\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "bus": "int64", + "element": "int64", + "et": "object", + "type": "object", + "closed": "bool", + "name": "object", + "z_ohm": "float64" + } + }, + "shunt": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"bus\",\"name\",\"q_mvar\",\"p_mw\",\"vn_kv\",\"step\",\"max_step\",\"in_service\"],\"index\":[0],\"data\":[[8,null,-19.0,0.0,20.0,1,1,true]]}", + "orient": "split", + "dtype": { + "bus": "uint32", + "name": "object", + "q_mvar": "float64", + "p_mw": "float64", + "vn_kv": "float64", + "step": "uint32", + "max_step": "uint32", + "in_service": "bool" + } + }, + "line": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"std_type\",\"from_bus\",\"to_bus\",\"length_km\",\"r_ohm_per_km\",\"x_ohm_per_km\",\"c_nf_per_km\",\"g_us_per_km\",\"max_i_ka\",\"df\",\"parallel\",\"type\",\"in_service\",\"max_loading_percent\"],\"index\":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],\"data\":[[null,null,0,1,1.0,3.6907272,11.2683348,882.522683811391971,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,0,4,1.0,10.2894732,42.475737599999995,822.350682642433412,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,1,2,1.0,8.948775599999999,37.701406800000001,732.092680888995574,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,1,3,1.0,11.0664684,33.578380799999998,568.29112215127509,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,1,4,1.0,10.845558,33.1137072,578.319789012768069,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,2,3,1.0,12.761384400000001,32.570953199999998,213.94489304518595,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,3,4,1.0,2.542374,8.019428400000001,0.0,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,5,10,1.0,0.37992,0.7956,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,5,11,1.0,0.49164,1.02324,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,5,12,1.0,0.2646,0.52108,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,8,9,1.0,0.12724,0.338,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,8,13,1.0,0.50844,1.08152,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,9,10,1.0,0.3282,0.76828,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,11,12,1.0,0.88368,0.79952,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,12,13,1.0,0.68372,1.39208,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0]]}", + "orient": "split", + "dtype": { + "name": "object", + "std_type": "object", + "from_bus": "uint32", + "to_bus": "uint32", + "length_km": "float64", + "r_ohm_per_km": "float64", + "x_ohm_per_km": "float64", + "c_nf_per_km": "float64", + "g_us_per_km": "float64", + "max_i_ka": "float64", + "df": "float64", + "parallel": "uint32", + "type": "object", + "in_service": "bool", + "max_loading_percent": "float64" + } + }, + "trafo": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"std_type\",\"hv_bus\",\"lv_bus\",\"sn_mva\",\"vn_hv_kv\",\"vn_lv_kv\",\"vk_percent\",\"vkr_percent\",\"pfe_kw\",\"i0_percent\",\"shift_degree\",\"tap_side\",\"tap_neutral\",\"tap_min\",\"tap_max\",\"tap_step_percent\",\"tap_step_degree\",\"tap_pos\",\"tap_phase_shifter\",\"parallel\",\"df\",\"in_service\",\"max_loading_percent\"],\"index\":[0,1,2,3,4],\"data\":[[null,null,3,6,9900.0,138.0,14.0,2070.288000000000011,0.0,0.0,0.0,0.0,\"hv\",0,null,null,2.200000000000002,0.0,-1,false,1,1.0,true,100.0],[null,null,3,8,9900.0,138.0,20.0,5506.181999999999789,0.0,0.0,0.0,0.0,\"hv\",0,null,null,3.100000000000003,0.0,-1,false,1,1.0,true,100.0],[null,null,4,5,9900.0,138.0,20.0,2494.998000000000047,0.0,0.0,0.0,0.0,\"hv\",0,null,null,6.799999999999995,0.0,-1,false,1,1.0,true,100.0],[null,null,6,7,9900.0,14.0,12.0,1743.884999999999991,0.0,0.0,0.0,0.0,false,0,null,null,0.0,0.0,0,false,1,1.0,true,100.0],[null,null,8,6,9900.0,20.0,14.0,1089.098999999999933,0.0,0.0,0.0,0.0,false,0,null,null,0.0,0.0,0,false,1,1.0,true,100.0]]}", + "orient": "split", + "dtype": { + "name": "object", + "std_type": "object", + "hv_bus": "uint32", + "lv_bus": "uint32", + "sn_mva": "float64", + "vn_hv_kv": "float64", + "vn_lv_kv": "float64", + "vk_percent": "float64", + "vkr_percent": "float64", + "pfe_kw": "float64", + "i0_percent": "float64", + "shift_degree": "float64", + "tap_side": "object", + "tap_neutral": "int32", + "tap_min": "float64", + "tap_max": "float64", + "tap_step_percent": "float64", + "tap_step_degree": "float64", + "tap_pos": "int32", + "tap_phase_shifter": "bool", + "parallel": "uint32", + "df": "float64", + "in_service": "bool", + "max_loading_percent": "float64" + } + }, + "trafo3w": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"std_type\",\"hv_bus\",\"mv_bus\",\"lv_bus\",\"sn_hv_mva\",\"sn_mv_mva\",\"sn_lv_mva\",\"vn_hv_kv\",\"vn_mv_kv\",\"vn_lv_kv\",\"vk_hv_percent\",\"vk_mv_percent\",\"vk_lv_percent\",\"vkr_hv_percent\",\"vkr_mv_percent\",\"vkr_lv_percent\",\"pfe_kw\",\"i0_percent\",\"shift_mv_degree\",\"shift_lv_degree\",\"tap_side\",\"tap_neutral\",\"tap_min\",\"tap_max\",\"tap_step_percent\",\"tap_step_degree\",\"tap_pos\",\"tap_at_star_point\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "std_type": "object", + "hv_bus": "uint32", + "mv_bus": "uint32", + "lv_bus": "uint32", + "sn_hv_mva": "float64", + "sn_mv_mva": "float64", + "sn_lv_mva": "float64", + "vn_hv_kv": "float64", + "vn_mv_kv": "float64", + "vn_lv_kv": "float64", + "vk_hv_percent": "float64", + "vk_mv_percent": "float64", + "vk_lv_percent": "float64", + "vkr_hv_percent": "float64", + "vkr_mv_percent": "float64", + "vkr_lv_percent": "float64", + "pfe_kw": "float64", + "i0_percent": "float64", + "shift_mv_degree": "float64", + "shift_lv_degree": "float64", + "tap_side": "object", + "tap_neutral": "int32", + "tap_min": "int32", + "tap_max": "int32", + "tap_step_percent": "float64", + "tap_step_degree": "float64", + "tap_pos": "int32", + "tap_at_star_point": "bool", + "in_service": "bool" + } + }, + "impedance": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"from_bus\",\"to_bus\",\"rft_pu\",\"xft_pu\",\"rtf_pu\",\"xtf_pu\",\"sn_mva\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "from_bus": "uint32", + "to_bus": "uint32", + "rft_pu": "float64", + "xft_pu": "float64", + "rtf_pu": "float64", + "xtf_pu": "float64", + "sn_mva": "float64", + "in_service": "bool" + } + }, + "dcline": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"from_bus\",\"to_bus\",\"p_mw\",\"loss_percent\",\"loss_mw\",\"vm_from_pu\",\"vm_to_pu\",\"max_p_mw\",\"min_q_from_mvar\",\"min_q_to_mvar\",\"max_q_from_mvar\",\"max_q_to_mvar\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "from_bus": "uint32", + "to_bus": "uint32", + "p_mw": "float64", + "loss_percent": "float64", + "loss_mw": "float64", + "vm_from_pu": "float64", + "vm_to_pu": "float64", + "max_p_mw": "float64", + "min_q_from_mvar": "float64", + "min_q_to_mvar": "float64", + "max_q_from_mvar": "float64", + "max_q_to_mvar": "float64", + "in_service": "bool" + } + }, + "ward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"ps_mw\",\"qs_mvar\",\"qz_mvar\",\"pz_mw\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "ps_mw": "float64", + "qs_mvar": "float64", + "qz_mvar": "float64", + "pz_mw": "float64", + "in_service": "bool" + } + }, + "xward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"ps_mw\",\"qs_mvar\",\"qz_mvar\",\"pz_mw\",\"r_ohm\",\"x_ohm\",\"vm_pu\",\"in_service\",\"slack_weight\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "ps_mw": "float64", + "qs_mvar": "float64", + "qz_mvar": "float64", + "pz_mw": "float64", + "r_ohm": "float64", + "x_ohm": "float64", + "vm_pu": "float64", + "in_service": "bool", + "slack_weight": "float64" + } + }, + "measurement": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"measurement_type\",\"element_type\",\"element\",\"value\",\"std_dev\",\"side\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "measurement_type": "object", + "element_type": "object", + "element": "uint32", + "value": "float64", + "std_dev": "float64", + "side": "object" + } + }, + "pwl_cost": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"power_type\",\"element\",\"et\",\"points\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "power_type": "object", + "element": "uint32", + "et": "object", + "points": "object" + } + }, + "poly_cost": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"element\",\"et\",\"cp0_eur\",\"cp1_eur_per_mw\",\"cp2_eur_per_mw2\",\"cq0_eur\",\"cq1_eur_per_mvar\",\"cq2_eur_per_mvar2\"],\"index\":[0,1,2,3,4],\"data\":[[0,\"ext_grid\",0.0,20.0,0.0430293,0.0,0.0,0.0],[0,\"gen\",0.0,20.0,0.25,0.0,0.0,0.0],[1,\"gen\",0.0,40.0,0.01,0.0,0.0,0.0],[2,\"gen\",0.0,40.0,0.01,0.0,0.0,0.0],[3,\"gen\",0.0,40.0,0.01,0.0,0.0,0.0]]}", + "orient": "split", + "dtype": { + "element": "uint32", + "et": "object", + "cp0_eur": "float64", + "cp1_eur_per_mw": "float64", + "cp2_eur_per_mw2": "float64", + "cq0_eur": "float64", + "cq1_eur_per_mvar": "float64", + "cq2_eur_per_mvar2": "float64" + } + }, + "characteristic": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"object\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "object": "object" + } + }, + "controller": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"object\",\"in_service\",\"order\",\"level\",\"initial_run\",\"recycle\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "object": "object", + "in_service": "bool", + "order": "float64", + "level": "object", + "initial_run": "bool", + "recycle": "object" + } + }, + "line_geodata": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"coords\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "coords": "object" + } + }, + "bus_geodata": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"x\",\"y\",\"coords\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "x": "float64", + "y": "float64", + "coords": "object" + } + }, + "version": "2.8.0", + "converged": false, + "name": "", + "f_hz": 50, + "sn_mva": 1.0, + "std_types": { + "line": { + "NAYY 4x50 SE": { + "c_nf_per_km": 210, + "r_ohm_per_km": 0.642, + "x_ohm_per_km": 0.083, + "max_i_ka": 0.142, + "type": "cs", + "q_mm2": 50, + "alpha": 0.00403 + }, + "NAYY 4x120 SE": { + "c_nf_per_km": 264, + "r_ohm_per_km": 0.225, + "x_ohm_per_km": 0.08, + "max_i_ka": 0.242, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00403 + }, + "NAYY 4x150 SE": { + "c_nf_per_km": 261, + "r_ohm_per_km": 0.208, + "x_ohm_per_km": 0.08, + "max_i_ka": 0.27, + "type": "cs", + "q_mm2": 150, + "alpha": 0.00403 + }, + "NA2XS2Y 1x95 RM/25 12/20 kV": { + "c_nf_per_km": 216, + "r_ohm_per_km": 0.313, + "x_ohm_per_km": 0.132, + "max_i_ka": 0.252, + "type": "cs", + "q_mm2": 95, + "alpha": 0.00403 + }, + "NA2XS2Y 1x185 RM/25 12/20 kV": { + "c_nf_per_km": 273, + "r_ohm_per_km": 0.161, + "x_ohm_per_km": 0.117, + "max_i_ka": 0.362, + "type": "cs", + "q_mm2": 185, + "alpha": 0.00403 + }, + "NA2XS2Y 1x240 RM/25 12/20 kV": { + "c_nf_per_km": 304, + "r_ohm_per_km": 0.122, + "x_ohm_per_km": 0.112, + "max_i_ka": 0.421, + "type": "cs", + "q_mm2": 240, + "alpha": 0.00403 + }, + "NA2XS2Y 1x95 RM/25 6/10 kV": { + "c_nf_per_km": 315, + "r_ohm_per_km": 0.313, + "x_ohm_per_km": 0.123, + "max_i_ka": 0.249, + "type": "cs", + "q_mm2": 95, + "alpha": 0.00403 + }, + "NA2XS2Y 1x185 RM/25 6/10 kV": { + "c_nf_per_km": 406, + "r_ohm_per_km": 0.161, + "x_ohm_per_km": 0.11, + "max_i_ka": 0.358, + "type": "cs", + "q_mm2": 185, + "alpha": 0.00403 + }, + "NA2XS2Y 1x240 RM/25 6/10 kV": { + "c_nf_per_km": 456, + "r_ohm_per_km": 0.122, + "x_ohm_per_km": 0.105, + "max_i_ka": 0.416, + "type": "cs", + "q_mm2": 240, + "alpha": 0.00403 + }, + "NA2XS2Y 1x150 RM/25 12/20 kV": { + "c_nf_per_km": 250, + "r_ohm_per_km": 0.206, + "x_ohm_per_km": 0.116, + "max_i_ka": 0.319, + "type": "cs", + "q_mm2": 150, + "alpha": 0.00403 + }, + "NA2XS2Y 1x120 RM/25 12/20 kV": { + "c_nf_per_km": 230, + "r_ohm_per_km": 0.253, + "x_ohm_per_km": 0.119, + "max_i_ka": 0.283, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00403 + }, + "NA2XS2Y 1x70 RM/25 12/20 kV": { + "c_nf_per_km": 190, + "r_ohm_per_km": 0.443, + "x_ohm_per_km": 0.132, + "max_i_ka": 0.22, + "type": "cs", + "q_mm2": 70, + "alpha": 0.00403 + }, + "NA2XS2Y 1x150 RM/25 6/10 kV": { + "c_nf_per_km": 360, + "r_ohm_per_km": 0.206, + "x_ohm_per_km": 0.11, + "max_i_ka": 0.315, + "type": "cs", + "q_mm2": 150, + "alpha": 0.00403 + }, + "NA2XS2Y 1x120 RM/25 6/10 kV": { + "c_nf_per_km": 340, + "r_ohm_per_km": 0.253, + "x_ohm_per_km": 0.113, + "max_i_ka": 0.28, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00403 + }, + "NA2XS2Y 1x70 RM/25 6/10 kV": { + "c_nf_per_km": 280, + "r_ohm_per_km": 0.443, + "x_ohm_per_km": 0.123, + "max_i_ka": 0.217, + "type": "cs", + "q_mm2": 70, + "alpha": 0.00403 + }, + "N2XS(FL)2Y 1x120 RM/35 64/110 kV": { + "c_nf_per_km": 112, + "r_ohm_per_km": 0.153, + "x_ohm_per_km": 0.166, + "max_i_ka": 0.366, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00393 + }, + "N2XS(FL)2Y 1x185 RM/35 64/110 kV": { + "c_nf_per_km": 125, + "r_ohm_per_km": 0.099, + "x_ohm_per_km": 0.156, + "max_i_ka": 0.457, + "type": "cs", + "q_mm2": 185, + "alpha": 0.00393 + }, + "N2XS(FL)2Y 1x240 RM/35 64/110 kV": { + "c_nf_per_km": 135, + "r_ohm_per_km": 0.075, + "x_ohm_per_km": 0.149, + "max_i_ka": 0.526, + "type": "cs", + "q_mm2": 240, + "alpha": 0.00393 + }, + "N2XS(FL)2Y 1x300 RM/35 64/110 kV": { + "c_nf_per_km": 144, + "r_ohm_per_km": 0.06, + "x_ohm_per_km": 0.144, + "max_i_ka": 0.588, + "type": "cs", + "q_mm2": 300, + "alpha": 0.00393 + }, + "15-AL1/3-ST1A 0.4": { + "c_nf_per_km": 11, + "r_ohm_per_km": 1.8769, + "x_ohm_per_km": 0.35, + "max_i_ka": 0.105, + "type": "ol", + "q_mm2": 16, + "alpha": 0.00403 + }, + "24-AL1/4-ST1A 0.4": { + "c_nf_per_km": 11.25, + "r_ohm_per_km": 1.2012, + "x_ohm_per_km": 0.335, + "max_i_ka": 0.14, + "type": "ol", + "q_mm2": 24, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 0.4": { + "c_nf_per_km": 12.2, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.3, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 0.4": { + "c_nf_per_km": 13.2, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.29, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "34-AL1/6-ST1A 10.0": { + "c_nf_per_km": 9.7, + "r_ohm_per_km": 0.8342, + "x_ohm_per_km": 0.36, + "max_i_ka": 0.17, + "type": "ol", + "q_mm2": 34, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 10.0": { + "c_nf_per_km": 10.1, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.35, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "70-AL1/11-ST1A 10.0": { + "c_nf_per_km": 10.4, + "r_ohm_per_km": 0.4132, + "x_ohm_per_km": 0.339, + "max_i_ka": 0.29, + "type": "ol", + "q_mm2": 70, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 10.0": { + "c_nf_per_km": 10.75, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.33, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "122-AL1/20-ST1A 10.0": { + "c_nf_per_km": 11.1, + "r_ohm_per_km": 0.2376, + "x_ohm_per_km": 0.323, + "max_i_ka": 0.41, + "type": "ol", + "q_mm2": 122, + "alpha": 0.00403 + }, + "149-AL1/24-ST1A 10.0": { + "c_nf_per_km": 11.25, + "r_ohm_per_km": 0.194, + "x_ohm_per_km": 0.315, + "max_i_ka": 0.47, + "type": "ol", + "q_mm2": 149, + "alpha": 0.00403 + }, + "34-AL1/6-ST1A 20.0": { + "c_nf_per_km": 9.15, + "r_ohm_per_km": 0.8342, + "x_ohm_per_km": 0.382, + "max_i_ka": 0.17, + "type": "ol", + "q_mm2": 34, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 20.0": { + "c_nf_per_km": 9.5, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.372, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "70-AL1/11-ST1A 20.0": { + "c_nf_per_km": 9.7, + "r_ohm_per_km": 0.4132, + "x_ohm_per_km": 0.36, + "max_i_ka": 0.29, + "type": "ol", + "q_mm2": 70, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 20.0": { + "c_nf_per_km": 10, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.35, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "122-AL1/20-ST1A 20.0": { + "c_nf_per_km": 10.3, + "r_ohm_per_km": 0.2376, + "x_ohm_per_km": 0.344, + "max_i_ka": 0.41, + "type": "ol", + "q_mm2": 122, + "alpha": 0.00403 + }, + "149-AL1/24-ST1A 20.0": { + "c_nf_per_km": 10.5, + "r_ohm_per_km": 0.194, + "x_ohm_per_km": 0.337, + "max_i_ka": 0.47, + "type": "ol", + "q_mm2": 149, + "alpha": 0.00403 + }, + "184-AL1/30-ST1A 20.0": { + "c_nf_per_km": 10.75, + "r_ohm_per_km": 0.1571, + "x_ohm_per_km": 0.33, + "max_i_ka": 0.535, + "type": "ol", + "q_mm2": 184, + "alpha": 0.00403 + }, + "243-AL1/39-ST1A 20.0": { + "c_nf_per_km": 11, + "r_ohm_per_km": 0.1188, + "x_ohm_per_km": 0.32, + "max_i_ka": 0.645, + "type": "ol", + "q_mm2": 243, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 110.0": { + "c_nf_per_km": 8, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.46, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "70-AL1/11-ST1A 110.0": { + "c_nf_per_km": 8.4, + "r_ohm_per_km": 0.4132, + "x_ohm_per_km": 0.45, + "max_i_ka": 0.29, + "type": "ol", + "q_mm2": 70, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 110.0": { + "c_nf_per_km": 8.65, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.44, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "122-AL1/20-ST1A 110.0": { + "c_nf_per_km": 8.5, + "r_ohm_per_km": 0.2376, + "x_ohm_per_km": 0.43, + "max_i_ka": 0.41, + "type": "ol", + "q_mm2": 122, + "alpha": 0.00403 + }, + "149-AL1/24-ST1A 110.0": { + "c_nf_per_km": 8.75, + "r_ohm_per_km": 0.194, + "x_ohm_per_km": 0.41, + "max_i_ka": 0.47, + "type": "ol", + "q_mm2": 149, + "alpha": 0.00403 + }, + "184-AL1/30-ST1A 110.0": { + "c_nf_per_km": 8.8, + "r_ohm_per_km": 0.1571, + "x_ohm_per_km": 0.4, + "max_i_ka": 0.535, + "type": "ol", + "q_mm2": 184, + "alpha": 0.00403 + }, + "243-AL1/39-ST1A 110.0": { + "c_nf_per_km": 9, + "r_ohm_per_km": 0.1188, + "x_ohm_per_km": 0.39, + "max_i_ka": 0.645, + "type": "ol", + "q_mm2": 243, + "alpha": 0.00403 + }, + "305-AL1/39-ST1A 110.0": { + "c_nf_per_km": 9.2, + "r_ohm_per_km": 0.0949, + "x_ohm_per_km": 0.38, + "max_i_ka": 0.74, + "type": "ol", + "q_mm2": 305, + "alpha": 0.00403 + }, + "490-AL1/64-ST1A 110.0": { + "c_nf_per_km": 9.75, + "r_ohm_per_km": 0.059, + "x_ohm_per_km": 0.37, + "max_i_ka": 0.96, + "type": "ol", + "q_mm2": 490, + "alpha": 0.00403 + }, + "679-AL1/86-ST1A 110.0": { + "c_nf_per_km": 9.95, + "r_ohm_per_km": 0.042, + "x_ohm_per_km": 0.36, + "max_i_ka": 1.15, + "type": "ol", + "q_mm2": 679, + "alpha": 0.00403 + }, + "490-AL1/64-ST1A 220.0": { + "c_nf_per_km": 10, + "r_ohm_per_km": 0.059, + "x_ohm_per_km": 0.285, + "max_i_ka": 0.96, + "type": "ol", + "q_mm2": 490, + "alpha": 0.00403 + }, + "679-AL1/86-ST1A 220.0": { + "c_nf_per_km": 11.7, + "r_ohm_per_km": 0.042, + "x_ohm_per_km": 0.275, + "max_i_ka": 1.15, + "type": "ol", + "q_mm2": 679, + "alpha": 0.00403 + }, + "490-AL1/64-ST1A 380.0": { + "c_nf_per_km": 11, + "r_ohm_per_km": 0.059, + "x_ohm_per_km": 0.253, + "max_i_ka": 0.96, + "type": "ol", + "q_mm2": 490, + "alpha": 0.00403 + }, + "679-AL1/86-ST1A 380.0": { + "c_nf_per_km": 14.6, + "r_ohm_per_km": 0.042, + "x_ohm_per_km": 0.25, + "max_i_ka": 1.15, + "type": "ol", + "q_mm2": 679, + "alpha": 0.00403 + } + }, + "trafo": { + "160 MVA 380/110 kV": { + "i0_percent": 0.06, + "pfe_kw": 60, + "vkr_percent": 0.25, + "sn_mva": 160, + "vn_lv_kv": 110.0, + "vn_hv_kv": 380.0, + "vk_percent": 12.2, + "shift_degree": 0, + "vector_group": "Yy0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "100 MVA 220/110 kV": { + "i0_percent": 0.06, + "pfe_kw": 55, + "vkr_percent": 0.26, + "sn_mva": 100, + "vn_lv_kv": 110.0, + "vn_hv_kv": 220.0, + "vk_percent": 12.0, + "shift_degree": 0, + "vector_group": "Yy0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "63 MVA 110/20 kV": { + "i0_percent": 0.04, + "pfe_kw": 22, + "vkr_percent": 0.32, + "sn_mva": 63, + "vn_lv_kv": 20.0, + "vn_hv_kv": 110.0, + "vk_percent": 18, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "40 MVA 110/20 kV": { + "i0_percent": 0.05, + "pfe_kw": 18, + "vkr_percent": 0.34, + "sn_mva": 40, + "vn_lv_kv": 20.0, + "vn_hv_kv": 110.0, + "vk_percent": 16.2, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "25 MVA 110/20 kV": { + "i0_percent": 0.07, + "pfe_kw": 14, + "vkr_percent": 0.41, + "sn_mva": 25, + "vn_lv_kv": 20.0, + "vn_hv_kv": 110.0, + "vk_percent": 12, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "63 MVA 110/10 kV": { + "sn_mva": 63, + "vn_hv_kv": 110, + "vn_lv_kv": 10, + "vk_percent": 18, + "vkr_percent": 0.32, + "pfe_kw": 22, + "i0_percent": 0.04, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "40 MVA 110/10 kV": { + "sn_mva": 40, + "vn_hv_kv": 110, + "vn_lv_kv": 10, + "vk_percent": 16.2, + "vkr_percent": 0.34, + "pfe_kw": 18, + "i0_percent": 0.05, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "25 MVA 110/10 kV": { + "sn_mva": 25, + "vn_hv_kv": 110, + "vn_lv_kv": 10, + "vk_percent": 12, + "vkr_percent": 0.41, + "pfe_kw": 14, + "i0_percent": 0.07, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "0.25 MVA 20/0.4 kV": { + "sn_mva": 0.25, + "vn_hv_kv": 20, + "vn_lv_kv": 0.4, + "vk_percent": 6, + "vkr_percent": 1.44, + "pfe_kw": 0.8, + "i0_percent": 0.32, + "shift_degree": 150, + "vector_group": "Yzn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.4 MVA 20/0.4 kV": { + "sn_mva": 0.4, + "vn_hv_kv": 20, + "vn_lv_kv": 0.4, + "vk_percent": 6, + "vkr_percent": 1.425, + "pfe_kw": 1.35, + "i0_percent": 0.3375, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.63 MVA 20/0.4 kV": { + "sn_mva": 0.63, + "vn_hv_kv": 20, + "vn_lv_kv": 0.4, + "vk_percent": 6, + "vkr_percent": 1.206, + "pfe_kw": 1.65, + "i0_percent": 0.2619, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.25 MVA 10/0.4 kV": { + "sn_mva": 0.25, + "vn_hv_kv": 10, + "vn_lv_kv": 0.4, + "vk_percent": 4, + "vkr_percent": 1.2, + "pfe_kw": 0.6, + "i0_percent": 0.24, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.4 MVA 10/0.4 kV": { + "sn_mva": 0.4, + "vn_hv_kv": 10, + "vn_lv_kv": 0.4, + "vk_percent": 4, + "vkr_percent": 1.325, + "pfe_kw": 0.95, + "i0_percent": 0.2375, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.63 MVA 10/0.4 kV": { + "sn_mva": 0.63, + "vn_hv_kv": 10, + "vn_lv_kv": 0.4, + "vk_percent": 4, + "vkr_percent": 1.0794, + "pfe_kw": 1.18, + "i0_percent": 0.1873, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + } + }, + "trafo3w": { + "63/25/38 MVA 110/20/10 kV": { + "sn_hv_mva": 63, + "sn_mv_mva": 25, + "sn_lv_mva": 38, + "vn_hv_kv": 110, + "vn_mv_kv": 20, + "vn_lv_kv": 10, + "vk_hv_percent": 10.4, + "vk_mv_percent": 10.4, + "vk_lv_percent": 10.4, + "vkr_hv_percent": 0.28, + "vkr_mv_percent": 0.32, + "vkr_lv_percent": 0.35, + "pfe_kw": 35, + "i0_percent": 0.89, + "shift_mv_degree": 0, + "shift_lv_degree": 0, + "vector_group": "YN0yn0yn0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -10, + "tap_max": 10, + "tap_step_percent": 1.2 + }, + "63/25/38 MVA 110/10/10 kV": { + "sn_hv_mva": 63, + "sn_mv_mva": 25, + "sn_lv_mva": 38, + "vn_hv_kv": 110, + "vn_mv_kv": 10, + "vn_lv_kv": 10, + "vk_hv_percent": 10.4, + "vk_mv_percent": 10.4, + "vk_lv_percent": 10.4, + "vkr_hv_percent": 0.28, + "vkr_mv_percent": 0.32, + "vkr_lv_percent": 0.35, + "pfe_kw": 35, + "i0_percent": 0.89, + "shift_mv_degree": 0, + "shift_lv_degree": 0, + "vector_group": "YN0yn0yn0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -10, + "tap_max": 10, + "tap_step_percent": 1.2 + } + } + }, + "res_bus": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"vm_pu\",\"va_degree\",\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "vm_pu": "float64", + "va_degree": "float64", + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_line": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\",\"i_ka\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64", + "i_ka": "float64", + "vm_from_pu": "float64", + "va_from_degree": "float64", + "vm_to_pu": "float64", + "va_to_degree": "float64", + "loading_percent": "float64" + } + }, + "res_trafo": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "loading_percent": "float64" + } + }, + "res_trafo3w": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_mv_mw\",\"q_mv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_mv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_mv_pu\",\"va_mv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"va_internal_degree\",\"vm_internal_pu\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_mv_mw": "float64", + "q_mv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_mv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_mv_pu": "float64", + "va_mv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "va_internal_degree": "float64", + "vm_internal_pu": "float64", + "loading_percent": "float64" + } + }, + "res_impedance": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64" + } + }, + "res_ext_grid": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_load": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_motor": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_sgen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_storage": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_shunt": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "vm_pu": "float64" + } + }, + "res_gen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"va_degree\",\"vm_pu\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "va_degree": "float64", + "vm_pu": "float64" + } + }, + "res_ward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "vm_pu": "float64" + } + }, + "res_xward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\",\"va_internal_degree\",\"vm_internal_pu\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "vm_pu": "float64", + "va_internal_degree": "float64", + "vm_internal_pu": "float64" + } + }, + "res_dcline": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "vm_from_pu": "float64", + "va_from_degree": "float64", + "vm_to_pu": "float64", + "va_to_degree": "float64" + } + }, + "res_asymmetric_load": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_asymmetric_sgen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_bus_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"vm_pu\",\"va_degree\",\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "vm_pu": "float64", + "va_degree": "float64", + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_line_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\",\"i_ka\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64", + "i_ka": "float64", + "vm_from_pu": "float64", + "va_from_degree": "float64", + "vm_to_pu": "float64", + "va_to_degree": "float64", + "loading_percent": "float64" + } + }, + "res_trafo_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "loading_percent": "float64" + } + }, + "res_trafo3w_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_mv_mw\",\"q_mv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_mv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_mv_pu\",\"va_mv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"va_internal_degree\",\"vm_internal_pu\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_mv_mw": "float64", + "q_mv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_mv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_mv_pu": "float64", + "va_mv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "va_internal_degree": "float64", + "vm_internal_pu": "float64", + "loading_percent": "float64" + } + }, + "res_impedance_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64" + } + }, + "res_bus_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_line_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_trafo_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_trafo3w_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_ext_grid_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_gen_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_sgen_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_bus_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"vm_a_pu\",\"va_a_degree\",\"vm_b_pu\",\"va_b_degree\",\"vm_c_pu\",\"va_c_degree\",\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "vm_a_pu": "float64", + "va_a_degree": "float64", + "vm_b_pu": "float64", + "va_b_degree": "float64", + "vm_c_pu": "float64", + "va_c_degree": "float64", + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64" + } + }, + "res_line_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_from_mw\",\"q_a_from_mvar\",\"p_b_from_mw\",\"q_b_from_mvar\",\"q_c_from_mvar\",\"p_a_to_mw\",\"q_a_to_mvar\",\"p_b_to_mw\",\"q_b_to_mvar\",\"p_c_to_mw\",\"q_c_to_mvar\",\"p_a_l_mw\",\"q_a_l_mvar\",\"p_b_l_mw\",\"q_b_l_mvar\",\"p_c_l_mw\",\"q_c_l_mvar\",\"i_a_from_ka\",\"i_a_to_ka\",\"i_b_from_ka\",\"i_b_to_ka\",\"i_c_from_ka\",\"i_c_to_ka\",\"i_a_ka\",\"i_b_ka\",\"i_c_ka\",\"i_n_from_ka\",\"i_n_to_ka\",\"i_n_ka\",\"loading_a_percent\",\"loading_b_percent\",\"loading_c_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_from_mw": "float64", + "q_a_from_mvar": "float64", + "p_b_from_mw": "float64", + "q_b_from_mvar": "float64", + "q_c_from_mvar": "float64", + "p_a_to_mw": "float64", + "q_a_to_mvar": "float64", + "p_b_to_mw": "float64", + "q_b_to_mvar": "float64", + "p_c_to_mw": "float64", + "q_c_to_mvar": "float64", + "p_a_l_mw": "float64", + "q_a_l_mvar": "float64", + "p_b_l_mw": "float64", + "q_b_l_mvar": "float64", + "p_c_l_mw": "float64", + "q_c_l_mvar": "float64", + "i_a_from_ka": "float64", + "i_a_to_ka": "float64", + "i_b_from_ka": "float64", + "i_b_to_ka": "float64", + "i_c_from_ka": "float64", + "i_c_to_ka": "float64", + "i_a_ka": "float64", + "i_b_ka": "float64", + "i_c_ka": "float64", + "i_n_from_ka": "float64", + "i_n_to_ka": "float64", + "i_n_ka": "float64", + "loading_a_percent": "float64", + "loading_b_percent": "float64", + "loading_c_percent": "float64" + } + }, + "res_trafo_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_hv_mw\",\"q_a_hv_mvar\",\"p_b_hv_mw\",\"q_b_hv_mvar\",\"p_c_hv_mw\",\"q_c_hv_mvar\",\"p_a_lv_mw\",\"q_a_lv_mvar\",\"p_b_lv_mw\",\"q_b_lv_mvar\",\"p_c_lv_mw\",\"q_c_lv_mvar\",\"p_a_l_mw\",\"q_a_l_mvar\",\"p_b_l_mw\",\"q_b_l_mvar\",\"p_c_l_mw\",\"q_c_l_mvar\",\"i_a_hv_ka\",\"i_a_lv_ka\",\"i_b_hv_ka\",\"i_b_lv_ka\",\"i_c_hv_ka\",\"i_c_lv_ka\",\"loading_a_percent\",\"loading_b_percent\",\"loading_c_percent\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_hv_mw": "float64", + "q_a_hv_mvar": "float64", + "p_b_hv_mw": "float64", + "q_b_hv_mvar": "float64", + "p_c_hv_mw": "float64", + "q_c_hv_mvar": "float64", + "p_a_lv_mw": "float64", + "q_a_lv_mvar": "float64", + "p_b_lv_mw": "float64", + "q_b_lv_mvar": "float64", + "p_c_lv_mw": "float64", + "q_c_lv_mvar": "float64", + "p_a_l_mw": "float64", + "q_a_l_mvar": "float64", + "p_b_l_mw": "float64", + "q_b_l_mvar": "float64", + "p_c_l_mw": "float64", + "q_c_l_mvar": "float64", + "i_a_hv_ka": "float64", + "i_a_lv_ka": "float64", + "i_b_hv_ka": "float64", + "i_b_lv_ka": "float64", + "i_c_hv_ka": "float64", + "i_c_lv_ka": "float64", + "loading_a_percent": "float64", + "loading_b_percent": "float64", + "loading_c_percent": "float64", + "loading_percent": "float64" + } + }, + "res_ext_grid_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64" + } + }, + "res_shunt_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_load_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_sgen_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_storage_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_asymmetric_load_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64" + } + }, + "res_asymmetric_sgen_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64" + } + }, + "user_pf_options": {} + } +} \ No newline at end of file diff --git a/grid2op/data_test/educ_case14_storage_init_state/grid_layout.json b/grid2op/data_test/educ_case14_storage_init_state/grid_layout.json new file mode 100644 index 000000000..e1534647f --- /dev/null +++ b/grid2op/data_test/educ_case14_storage_init_state/grid_layout.json @@ -0,0 +1,58 @@ +{ + "sub_0": [ + -280.0, + -81.0 + ], + "sub_1": [ + -100.0, + -270.0 + ], + "sub_2": [ + 366.0, + -270.0 + ], + "sub_3": [ + 366.0, + -54.0 + ], + "sub_4": [ + -64.0, + -54.0 + ], + "sub_5": [ + -64.0, + 54.0 + ], + "sub_6": [ + 450.0, + 0.0 + ], + "sub_7": [ + 550.0, + 0.0 + ], + "sub_8": [ + 326.0, + 54.0 + ], + "sub_9": [ + 222.0, + 108.0 + ], + "sub_10": [ + 79.0, + 162.0 + ], + "sub_11": [ + -170.0, + 270.0 + ], + "sub_12": [ + -64.0, + 270.0 + ], + "sub_13": [ + 222.0, + 216.0 + ] +} diff --git a/grid2op/data_test/educ_case14_storage_init_state/prods_charac.csv b/grid2op/data_test/educ_case14_storage_init_state/prods_charac.csv new file mode 100644 index 000000000..0c1159a06 --- /dev/null +++ b/grid2op/data_test/educ_case14_storage_init_state/prods_charac.csv @@ -0,0 +1,7 @@ +Pmax,Pmin,name,type,bus,max_ramp_up,max_ramp_down,min_up_time,min_down_time,marginal_cost,shut_down_cost,start_cost,x,y,V +140,0.0,gen_1_0,nuclear,1,5,5,96,96,40,10,20,180,10,142.1 +120,0.0,gen_2_1,thermal,2,10,10,4,4,70,1,2,646,10,142.1 +70,0.0,gen_5_2,wind,5,0,0,0,0,0,0,0,216,334,22.0 +70,0.0,gen_5_3,solar,5,0,0,0,0,0,0,0,216,334,22.0 +40,0.0,gen_7_4,solar,7,0,0,0,0,0,0,0,718,280,13.2 +100,0.0,gen_0_5,hydro,0,15,15,4,4,70,1,2,0,199,142.1 diff --git a/grid2op/data_test/educ_case14_storage_init_state/storage_units_charac.csv b/grid2op/data_test/educ_case14_storage_init_state/storage_units_charac.csv new file mode 100644 index 000000000..0bb5168fb --- /dev/null +++ b/grid2op/data_test/educ_case14_storage_init_state/storage_units_charac.csv @@ -0,0 +1,3 @@ +Emax,Emin,name,type,max_p_prod,max_p_absorb,marginal_cost,power_loss,charging_efficiency,discharging_efficiency +15,0,storage_5_0,battery,5,5,20,0.1,0.95,1 +7,0,storage_7_1,battery,10,10,20,0.1,1,0.9 diff --git a/grid2op/data_test/l2rpn_idf_2023_with_alert/__init__.py b/grid2op/data_test/l2rpn_idf_2023_with_alert/__init__.py new file mode 100644 index 000000000..bd6582d7e --- /dev/null +++ b/grid2op/data_test/l2rpn_idf_2023_with_alert/__init__.py @@ -0,0 +1 @@ +# DO NOT REMOVE, automatically generated by grid2op \ No newline at end of file diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/hazards.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/hazards.csv.bz2 new file mode 100644 index 000000000..19f4c400c Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/hazards.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/load_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/load_p.csv.bz2 new file mode 100644 index 000000000..5e58c54d6 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/load_p.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/load_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/load_p_forecasted.csv.bz2 new file mode 100644 index 000000000..afa02b2b1 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/load_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/load_q.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/load_q.csv.bz2 new file mode 100644 index 000000000..335611c49 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/load_q.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/load_q_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/load_q_forecasted.csv.bz2 new file mode 100644 index 000000000..3183c02d1 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/load_q_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/maintenance.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/maintenance.csv.bz2 new file mode 100644 index 000000000..19f4c400c Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/maintenance.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/maintenance_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/maintenance_forecasted.csv.bz2 new file mode 100644 index 000000000..19f4c400c Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/maintenance_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/prod_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/prod_p.csv.bz2 new file mode 100644 index 000000000..b523af8b6 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/prod_p.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/prod_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/prod_p_forecasted.csv.bz2 new file mode 100644 index 000000000..7ee0bdb2b Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/prod_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/prod_v.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/prod_v.csv.bz2 new file mode 100644 index 000000000..2d590080e Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/prod_v.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/prod_v_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/0/prod_v_forecasted.csv.bz2 new file mode 100644 index 000000000..b7c0d91cc Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/0/prod_v_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/start_datetime.info b/grid2op/data_test/multimix/case14_002/chronics/0/start_datetime.info new file mode 100644 index 000000000..bd8d52ab4 --- /dev/null +++ b/grid2op/data_test/multimix/case14_002/chronics/0/start_datetime.info @@ -0,0 +1 @@ +2019-01-05 23:55 diff --git a/grid2op/data_test/multimix/case14_002/chronics/0/time_interval.info b/grid2op/data_test/multimix/case14_002/chronics/0/time_interval.info new file mode 100644 index 000000000..beb9b9011 --- /dev/null +++ b/grid2op/data_test/multimix/case14_002/chronics/0/time_interval.info @@ -0,0 +1 @@ +00:05 diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/load_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/load_p.csv.bz2 deleted file mode 100644 index 77fd7af71..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/load_p.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/load_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/load_p_forecasted.csv.bz2 deleted file mode 100644 index ce08ec0e1..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/load_p_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/load_q.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/load_q.csv.bz2 deleted file mode 100644 index b2b092db7..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/load_q.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/load_q_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/load_q_forecasted.csv.bz2 deleted file mode 100644 index 631f0f40b..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/load_q_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/prod_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/prod_p.csv.bz2 deleted file mode 100644 index 84bf12179..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/prod_p.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/prod_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/prod_p_forecasted.csv.bz2 deleted file mode 100644 index 2d7ef6442..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/prod_p_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/prod_v.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/prod_v.csv.bz2 deleted file mode 100644 index c300e1563..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/prod_v.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/000/prod_v_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/000/prod_v_forecasted.csv.bz2 deleted file mode 100644 index 70cb99dbc..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/000/prod_v_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/load_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/load_p.csv.bz2 deleted file mode 100644 index 30336696c..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/load_p.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/load_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/load_p_forecasted.csv.bz2 deleted file mode 100644 index 5de1d99be..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/load_p_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/load_q.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/load_q.csv.bz2 deleted file mode 100644 index 17d69b9a6..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/load_q.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/load_q_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/load_q_forecasted.csv.bz2 deleted file mode 100644 index 303dbf42d..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/load_q_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/prod_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/prod_p.csv.bz2 deleted file mode 100644 index 2a1cf249d..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/prod_p.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/prod_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/prod_p_forecasted.csv.bz2 deleted file mode 100644 index c7bc25425..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/prod_p_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/prod_v.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/prod_v.csv.bz2 deleted file mode 100644 index c300e1563..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/prod_v.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/001/prod_v_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/001/prod_v_forecasted.csv.bz2 deleted file mode 100644 index 70cb99dbc..000000000 Binary files a/grid2op/data_test/multimix/case14_002/chronics/001/prod_v_forecasted.csv.bz2 and /dev/null differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/hazards.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/hazards.csv.bz2 new file mode 100644 index 000000000..19f4c400c Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/hazards.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/load_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/load_p.csv.bz2 new file mode 100644 index 000000000..1fb2cfedb Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/load_p.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/load_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/load_p_forecasted.csv.bz2 new file mode 100644 index 000000000..6ec178d02 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/load_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/load_q.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/load_q.csv.bz2 new file mode 100644 index 000000000..f398706f6 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/load_q.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/load_q_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/load_q_forecasted.csv.bz2 new file mode 100644 index 000000000..8deb04b51 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/load_q_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/maintenance.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/maintenance.csv.bz2 new file mode 100644 index 000000000..19f4c400c Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/maintenance.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/maintenance_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/maintenance_forecasted.csv.bz2 new file mode 100644 index 000000000..19f4c400c Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/maintenance_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/prod_p.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/prod_p.csv.bz2 new file mode 100644 index 000000000..c13834eb5 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/prod_p.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/prod_p_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/prod_p_forecasted.csv.bz2 new file mode 100644 index 000000000..6fed9d123 Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/prod_p_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/prod_v.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/prod_v.csv.bz2 new file mode 100644 index 000000000..2d590080e Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/prod_v.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/prod_v_forecasted.csv.bz2 b/grid2op/data_test/multimix/case14_002/chronics/1/prod_v_forecasted.csv.bz2 new file mode 100644 index 000000000..b7c0d91cc Binary files /dev/null and b/grid2op/data_test/multimix/case14_002/chronics/1/prod_v_forecasted.csv.bz2 differ diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/start_datetime.info b/grid2op/data_test/multimix/case14_002/chronics/1/start_datetime.info new file mode 100644 index 000000000..bd8d52ab4 --- /dev/null +++ b/grid2op/data_test/multimix/case14_002/chronics/1/start_datetime.info @@ -0,0 +1 @@ +2019-01-05 23:55 diff --git a/grid2op/data_test/multimix/case14_002/chronics/1/time_interval.info b/grid2op/data_test/multimix/case14_002/chronics/1/time_interval.info new file mode 100644 index 000000000..beb9b9011 --- /dev/null +++ b/grid2op/data_test/multimix/case14_002/chronics/1/time_interval.info @@ -0,0 +1 @@ +00:05 diff --git a/grid2op/data_test/multimix/case14_002/config.py b/grid2op/data_test/multimix/case14_002/config.py index d2e6e585c..1c34314d6 100644 --- a/grid2op/data_test/multimix/case14_002/config.py +++ b/grid2op/data_test/multimix/case14_002/config.py @@ -15,26 +15,26 @@ "grid_value_class": GridStateFromFileWithForecasts, "volagecontroler_class": None, "thermal_limits": [ - 384.900179, - 384.900179, - 380.0, - 380.0, - 157.0, - 380.0, - 380.0, - 1077.7205012, - 461.8802148, - 769.80036, - 269.4301253, - 384.900179, - 760.0, - 380.0, - 760.0, - 384.900179, - 230.9401074, - 170.79945452, - 3402.24266, - 3402.24266, + 3.84900179e02, + 3.84900179e02, + 2.28997102e05, + 2.28997102e05, + 2.28997102e05, + 1.52664735e04, + 2.28997102e05, + 3.84900179e02, + 3.84900179e02, + 1.83285800e02, + 3.84900179e02, + 3.84900179e02, + 2.28997102e05, + 2.28997102e05, + 6.93930612e04, + 3.84900179e02, + 3.84900179e02, + 2.40562612e02, + 3.40224266e03, + 3.40224266e03, ], "names_chronics_to_grid": None, } diff --git a/grid2op/data_test/multimix/case14_002/grid.json b/grid2op/data_test/multimix/case14_002/grid.json index 88699329a..27dacefd7 100644 --- a/grid2op/data_test/multimix/case14_002/grid.json +++ b/grid2op/data_test/multimix/case14_002/grid.json @@ -1,1363 +1,5 @@ { - "_module": "pandapower.auxiliary", - "_class": "pandapowerNet", - "_object": { - "bus": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"vn_kv\",\"type\",\"zone\",\"in_service\",\"min_vm_pu\",\"max_vm_pu\"],\"index\":[0,1,2,3,4,5,6,7,8,9,10,11,12,13],\"data\":[[1,138.0,\"b\",1.0,true,0.94,1.06],[2,138.0,\"b\",1.0,true,0.94,1.06],[3,138.0,\"b\",1.0,true,0.94,1.06],[4,138.0,\"b\",1.0,true,0.94,1.06],[5,138.0,\"b\",1.0,true,0.94,1.06],[6,20.0,\"b\",1.0,true,0.94,1.06],[7,14.0,\"b\",1.0,true,0.94,1.06],[8,12.0,\"b\",1.0,true,0.94,1.06],[9,20.0,\"b\",1.0,true,0.94,1.06],[10,20.0,\"b\",1.0,true,0.94,1.06],[11,20.0,\"b\",1.0,true,0.94,1.06],[12,20.0,\"b\",1.0,true,0.94,1.06],[13,20.0,\"b\",1.0,true,0.94,1.06],[14,20.0,\"b\",1.0,true,0.94,1.06]]}", - "orient": "split", - "dtype": { - "name": "object", - "vn_kv": "float64", - "type": "object", - "zone": "object", - "in_service": "bool", - "min_vm_pu": "float64", - "max_vm_pu": "float64" - } - }, - "load": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"const_z_percent\",\"const_i_percent\",\"sn_mva\",\"scaling\",\"in_service\",\"type\",\"controllable\"],\"index\":[0,1,2,3,4,5,6,7,8,9,10],\"data\":[[null,1,21.699999999999999,12.699999999999999,0.0,0.0,null,1.0,true,null,false],[null,2,94.200000000000003,19.0,0.0,0.0,null,1.0,true,null,false],[null,3,47.799999999999997,-3.9,0.0,0.0,null,1.0,true,null,false],[null,4,7.6,1.6,0.0,0.0,null,1.0,true,null,false],[null,5,11.199999999999999,7.5,0.0,0.0,null,1.0,true,null,false],[null,8,29.5,16.600000000000001,0.0,0.0,null,1.0,true,null,false],[null,9,9.0,5.8,0.0,0.0,null,1.0,true,null,false],[null,10,3.5,1.8,0.0,0.0,null,1.0,true,null,false],[null,11,6.1,1.6,0.0,0.0,null,1.0,true,null,false],[null,12,13.5,5.8,0.0,0.0,null,1.0,true,null,false],[null,13,14.9,5.0,0.0,0.0,null,1.0,true,null,false]]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "uint32", - "p_mw": "float64", - "q_mvar": "float64", - "const_z_percent": "float64", - "const_i_percent": "float64", - "sn_mva": "float64", - "scaling": "float64", - "in_service": "bool", - "type": "object", - "controllable": "object" - } - }, - "sgen": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"sn_mva\",\"scaling\",\"in_service\",\"type\",\"current_source\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "int64", - "p_mw": "float64", - "q_mvar": "float64", - "sn_mva": "float64", - "scaling": "float64", - "in_service": "bool", - "type": "object", - "current_source": "bool" - } - }, - "storage": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"sn_mva\",\"soc_percent\",\"min_e_mwh\",\"max_e_mwh\",\"scaling\",\"in_service\",\"type\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "int64", - "p_mw": "float64", - "q_mvar": "float64", - "sn_mva": "float64", - "soc_percent": "float64", - "min_e_mwh": "float64", - "max_e_mwh": "float64", - "scaling": "float64", - "in_service": "bool", - "type": "object" - } - }, - "gen": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"vm_pu\",\"sn_mva\",\"min_q_mvar\",\"max_q_mvar\",\"scaling\",\"slack\",\"in_service\",\"type\",\"controllable\",\"min_p_mw\",\"max_p_mw\"],\"index\":[0,1,2,3],\"data\":[[null,1,40.0,1.045,null,-40.0,50.0,1.0,false,true,null,true,0.0,140.0],[null,2,0.0,1.01,null,0.0,40.0,1.0,false,true,null,true,0.0,100.0],[null,5,0.0,1.07,null,-6.0,24.0,1.0,false,true,null,true,0.0,100.0],[null,7,0.0,1.09,null,-6.0,24.0,1.0,false,true,null,true,0.0,100.0]]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "uint32", - "p_mw": "float64", - "vm_pu": "float64", - "sn_mva": "float64", - "min_q_mvar": "float64", - "max_q_mvar": "float64", - "scaling": "float64", - "slack": "bool", - "in_service": "bool", - "type": "object", - "controllable": "bool", - "min_p_mw": "float64", - "max_p_mw": "float64" - } - }, - "switch": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"bus\",\"element\",\"et\",\"type\",\"closed\",\"name\",\"z_ohm\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "bus": "int64", - "element": "int64", - "et": "object", - "type": "object", - "closed": "bool", - "name": "object", - "z_ohm": "float64" - } - }, - "shunt": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"bus\",\"name\",\"q_mvar\",\"p_mw\",\"vn_kv\",\"step\",\"max_step\",\"in_service\"],\"index\":[0],\"data\":[[8,null,-19.0,0.0,20.0,1,1,true]]}", - "orient": "split", - "dtype": { - "bus": "uint32", - "name": "object", - "q_mvar": "float64", - "p_mw": "float64", - "vn_kv": "float64", - "step": "uint32", - "max_step": "uint32", - "in_service": "bool" - } - }, - "ext_grid": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"vm_pu\",\"va_degree\",\"in_service\",\"min_p_mw\",\"max_p_mw\",\"min_q_mvar\",\"max_q_mvar\"],\"index\":[0],\"data\":[[null,0,1.06,0.0,true,0.0,332.399999999999977,0.0,10.0]]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "uint32", - "vm_pu": "float64", - "va_degree": "float64", - "in_service": "bool", - "min_p_mw": "float64", - "max_p_mw": "float64", - "min_q_mvar": "float64", - "max_q_mvar": "float64" - } - }, - "line": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"std_type\",\"from_bus\",\"to_bus\",\"length_km\",\"r_ohm_per_km\",\"x_ohm_per_km\",\"c_nf_per_km\",\"g_us_per_km\",\"max_i_ka\",\"df\",\"parallel\",\"type\",\"in_service\",\"max_loading_percent\"],\"index\":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14],\"data\":[[null,null,0,1,1.0,3.6907272,11.2683348,882.522683811391971,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,0,4,1.0,10.2894732,42.475737599999995,822.350682642433412,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,1,2,1.0,8.948775599999999,37.701406800000001,732.092680888995574,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,1,3,1.0,11.0664684,33.578380799999998,568.29112215127509,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,1,4,1.0,10.845558,33.1137072,578.319789012768069,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,2,3,1.0,12.761384400000001,32.570953199999998,213.94489304518595,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,3,4,1.0,2.542374,8.019428400000001,0.0,0.0,41.418606267951418,1.0,1,\"ol\",true,100.0],[null,null,5,10,1.0,0.37992,0.7956,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,5,11,1.0,0.49164,1.02324,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,5,12,1.0,0.2646,0.52108,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,8,9,1.0,0.12724,0.338,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,8,13,1.0,0.50844,1.08152,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,9,10,1.0,0.3282,0.76828,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,11,12,1.0,0.88368,0.79952,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0],[null,null,12,13,1.0,0.68372,1.39208,0.0,0.0,285.788383248864761,1.0,1,\"ol\",true,100.0]]}", - "orient": "split", - "dtype": { - "name": "object", - "std_type": "object", - "from_bus": "uint32", - "to_bus": "uint32", - "length_km": "float64", - "r_ohm_per_km": "float64", - "x_ohm_per_km": "float64", - "c_nf_per_km": "float64", - "g_us_per_km": "float64", - "max_i_ka": "float64", - "df": "float64", - "parallel": "uint32", - "type": "object", - "in_service": "bool", - "max_loading_percent": "float64" - } - }, - "trafo": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"std_type\",\"hv_bus\",\"lv_bus\",\"sn_mva\",\"vn_hv_kv\",\"vn_lv_kv\",\"vk_percent\",\"vkr_percent\",\"pfe_kw\",\"i0_percent\",\"shift_degree\",\"tap_side\",\"tap_neutral\",\"tap_min\",\"tap_max\",\"tap_step_percent\",\"tap_step_degree\",\"tap_pos\",\"tap_phase_shifter\",\"parallel\",\"df\",\"in_service\",\"max_loading_percent\"],\"index\":[0,1,2,3,4],\"data\":[[null,null,3,6,9900.0,138.0,14.0,2070.288000000000011,0.0,0.0,0.0,0.0,\"hv\",0.0,null,null,2.200000000000002,null,-1.0,false,1,1.0,true,100.0],[null,null,3,8,9900.0,138.0,20.0,5506.181999999999789,0.0,0.0,0.0,0.0,\"hv\",0.0,null,null,3.100000000000003,null,-1.0,false,1,1.0,true,100.0],[null,null,4,5,9900.0,138.0,20.0,2494.998000000000047,0.0,0.0,0.0,0.0,\"hv\",0.0,null,null,6.799999999999995,null,-1.0,false,1,1.0,true,100.0],[null,null,6,7,9900.0,14.0,12.0,1743.884999999999991,0.0,0.0,0.0,0.0,null,null,null,null,null,null,null,false,1,1.0,true,100.0],[null,null,8,6,9900.0,20.0,14.0,1089.098999999999933,0.0,0.0,0.0,0.0,null,null,null,null,null,null,null,false,1,1.0,true,100.0]]}", - "orient": "split", - "dtype": { - "name": "object", - "std_type": "object", - "hv_bus": "uint32", - "lv_bus": "uint32", - "sn_mva": "float64", - "vn_hv_kv": "float64", - "vn_lv_kv": "float64", - "vk_percent": "float64", - "vkr_percent": "float64", - "pfe_kw": "float64", - "i0_percent": "float64", - "shift_degree": "float64", - "tap_side": "object", - "tap_neutral": "float64", - "tap_min": "float64", - "tap_max": "float64", - "tap_step_percent": "float64", - "tap_step_degree": "float64", - "tap_pos": "float64", - "tap_phase_shifter": "bool", - "parallel": "uint32", - "df": "float64", - "in_service": "bool", - "max_loading_percent": "float64" - } - }, - "trafo3w": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"std_type\",\"hv_bus\",\"mv_bus\",\"lv_bus\",\"sn_hv_mva\",\"sn_mv_mva\",\"sn_lv_mva\",\"vn_hv_kv\",\"vn_mv_kv\",\"vn_lv_kv\",\"vk_hv_percent\",\"vk_mv_percent\",\"vk_lv_percent\",\"vkr_hv_percent\",\"vkr_mv_percent\",\"vkr_lv_percent\",\"pfe_kw\",\"i0_percent\",\"shift_mv_degree\",\"shift_lv_degree\",\"tap_side\",\"tap_neutral\",\"tap_min\",\"tap_max\",\"tap_step_percent\",\"tap_step_degree\",\"tap_pos\",\"tap_at_star_point\",\"in_service\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "std_type": "object", - "hv_bus": "uint32", - "mv_bus": "uint32", - "lv_bus": "uint32", - "sn_hv_mva": "float64", - "sn_mv_mva": "float64", - "sn_lv_mva": "float64", - "vn_hv_kv": "float64", - "vn_mv_kv": "float64", - "vn_lv_kv": "float64", - "vk_hv_percent": "float64", - "vk_mv_percent": "float64", - "vk_lv_percent": "float64", - "vkr_hv_percent": "float64", - "vkr_mv_percent": "float64", - "vkr_lv_percent": "float64", - "pfe_kw": "float64", - "i0_percent": "float64", - "shift_mv_degree": "float64", - "shift_lv_degree": "float64", - "tap_side": "object", - "tap_neutral": "int32", - "tap_min": "int32", - "tap_max": "int32", - "tap_step_percent": "float64", - "tap_step_degree": "float64", - "tap_pos": "int32", - "tap_at_star_point": "bool", - "in_service": "bool" - } - }, - "impedance": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"from_bus\",\"to_bus\",\"rft_pu\",\"xft_pu\",\"rtf_pu\",\"xtf_pu\",\"sn_mva\",\"in_service\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "from_bus": "uint32", - "to_bus": "uint32", - "rft_pu": "float64", - "xft_pu": "float64", - "rtf_pu": "float64", - "xtf_pu": "float64", - "sn_mva": "float64", - "in_service": "bool" - } - }, - "dcline": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"from_bus\",\"to_bus\",\"p_mw\",\"loss_percent\",\"loss_mw\",\"vm_from_pu\",\"vm_to_pu\",\"max_p_mw\",\"min_q_from_mvar\",\"min_q_to_mvar\",\"max_q_from_mvar\",\"max_q_to_mvar\",\"in_service\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "from_bus": "uint32", - "to_bus": "uint32", - "p_mw": "float64", - "loss_percent": "float64", - "loss_mw": "float64", - "vm_from_pu": "float64", - "vm_to_pu": "float64", - "max_p_mw": "float64", - "min_q_from_mvar": "float64", - "min_q_to_mvar": "float64", - "max_q_from_mvar": "float64", - "max_q_to_mvar": "float64", - "in_service": "bool" - } - }, - "ward": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"ps_mw\",\"qs_mvar\",\"qz_mvar\",\"pz_mw\",\"in_service\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "uint32", - "ps_mw": "float64", - "qs_mvar": "float64", - "qz_mvar": "float64", - "pz_mw": "float64", - "in_service": "bool" - } - }, - "xward": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"bus\",\"ps_mw\",\"qs_mvar\",\"qz_mvar\",\"pz_mw\",\"r_ohm\",\"x_ohm\",\"vm_pu\",\"in_service\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "bus": "uint32", - "ps_mw": "float64", - "qs_mvar": "float64", - "qz_mvar": "float64", - "pz_mw": "float64", - "r_ohm": "float64", - "x_ohm": "float64", - "vm_pu": "float64", - "in_service": "bool" - } - }, - "measurement": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"name\",\"measurement_type\",\"element_type\",\"element\",\"value\",\"std_dev\",\"side\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "name": "object", - "measurement_type": "object", - "element_type": "object", - "element": "uint32", - "value": "float64", - "std_dev": "float64", - "side": "object" - } - }, - "pwl_cost": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"power_type\",\"element\",\"et\",\"points\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "power_type": "object", - "element": "uint32", - "et": "object", - "points": "object" - } - }, - "poly_cost": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"element\",\"et\",\"cp0_eur\",\"cp1_eur_per_mw\",\"cp2_eur_per_mw2\",\"cq0_eur\",\"cq1_eur_per_mvar\",\"cq2_eur_per_mvar2\"],\"index\":[0,1,2,3,4],\"data\":[[0,\"ext_grid\",0.0,20.0,0.0430293,0.0,0.0,0.0],[0,\"gen\",0.0,20.0,0.25,0.0,0.0,0.0],[1,\"gen\",0.0,40.0,0.01,0.0,0.0,0.0],[2,\"gen\",0.0,40.0,0.01,0.0,0.0,0.0],[3,\"gen\",0.0,40.0,0.01,0.0,0.0,0.0]]}", - "orient": "split", - "dtype": { - "element": "uint32", - "et": "object", - "cp0_eur": "float64", - "cp1_eur_per_mw": "float64", - "cp2_eur_per_mw2": "float64", - "cq0_eur": "float64", - "cq1_eur_per_mvar": "float64", - "cq2_eur_per_mvar2": "float64" - } - }, - "controller": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"object\",\"in_service\",\"order\",\"level\",\"recycle\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "object": "object", - "in_service": "bool", - "order": "float64", - "level": "object", - "recycle": "bool" - } - }, - "line_geodata": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"coords\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "coords": "object" - } - }, - "bus_geodata": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"x\",\"y\",\"coords\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "x": "float64", - "y": "float64", - "coords": "object" - } - }, - "version": "2.2.1", - "converged": false, - "name": "", - "f_hz": 50, - "sn_mva": 100.0, - "std_types": { - "line": { - "NAYY 4x50 SE": { - "c_nf_per_km": 210, - "r_ohm_per_km": 0.642, - "x_ohm_per_km": 0.083, - "max_i_ka": 0.142, - "type": "cs", - "q_mm2": 50, - "alpha": 0.00403 - }, - "NAYY 4x120 SE": { - "c_nf_per_km": 264, - "r_ohm_per_km": 0.225, - "x_ohm_per_km": 0.08, - "max_i_ka": 0.242, - "type": "cs", - "q_mm2": 120, - "alpha": 0.00403 - }, - "NAYY 4x150 SE": { - "c_nf_per_km": 261, - "r_ohm_per_km": 0.208, - "x_ohm_per_km": 0.08, - "max_i_ka": 0.27, - "type": "cs", - "q_mm2": 150, - "alpha": 0.00403 - }, - "NA2XS2Y 1x95 RM/25 12/20 kV": { - "c_nf_per_km": 216, - "r_ohm_per_km": 0.313, - "x_ohm_per_km": 0.132, - "max_i_ka": 0.252, - "type": "cs", - "q_mm2": 95, - "alpha": 0.00403 - }, - "NA2XS2Y 1x185 RM/25 12/20 kV": { - "c_nf_per_km": 273, - "r_ohm_per_km": 0.161, - "x_ohm_per_km": 0.117, - "max_i_ka": 0.362, - "type": "cs", - "q_mm2": 185, - "alpha": 0.00403 - }, - "NA2XS2Y 1x240 RM/25 12/20 kV": { - "c_nf_per_km": 304, - "r_ohm_per_km": 0.122, - "x_ohm_per_km": 0.112, - "max_i_ka": 0.421, - "type": "cs", - "q_mm2": 240, - "alpha": 0.00403 - }, - "NA2XS2Y 1x95 RM/25 6/10 kV": { - "c_nf_per_km": 315, - "r_ohm_per_km": 0.313, - "x_ohm_per_km": 0.123, - "max_i_ka": 0.249, - "type": "cs", - "q_mm2": 95, - "alpha": 0.00403 - }, - "NA2XS2Y 1x185 RM/25 6/10 kV": { - "c_nf_per_km": 406, - "r_ohm_per_km": 0.161, - "x_ohm_per_km": 0.11, - "max_i_ka": 0.358, - "type": "cs", - "q_mm2": 185, - "alpha": 0.00403 - }, - "NA2XS2Y 1x240 RM/25 6/10 kV": { - "c_nf_per_km": 456, - "r_ohm_per_km": 0.122, - "x_ohm_per_km": 0.105, - "max_i_ka": 0.416, - "type": "cs", - "q_mm2": 240, - "alpha": 0.00403 - }, - "NA2XS2Y 1x150 RM/25 12/20 kV": { - "c_nf_per_km": 250, - "r_ohm_per_km": 0.206, - "x_ohm_per_km": 0.116, - "max_i_ka": 0.319, - "type": "cs", - "q_mm2": 150, - "alpha": 0.00403 - }, - "NA2XS2Y 1x120 RM/25 12/20 kV": { - "c_nf_per_km": 230, - "r_ohm_per_km": 0.253, - "x_ohm_per_km": 0.119, - "max_i_ka": 0.283, - "type": "cs", - "q_mm2": 120, - "alpha": 0.00403 - }, - "NA2XS2Y 1x70 RM/25 12/20 kV": { - "c_nf_per_km": 190, - "r_ohm_per_km": 0.443, - "x_ohm_per_km": 0.132, - "max_i_ka": 0.22, - "type": "cs", - "q_mm2": 70, - "alpha": 0.00403 - }, - "NA2XS2Y 1x150 RM/25 6/10 kV": { - "c_nf_per_km": 360, - "r_ohm_per_km": 0.206, - "x_ohm_per_km": 0.11, - "max_i_ka": 0.315, - "type": "cs", - "q_mm2": 150, - "alpha": 0.00403 - }, - "NA2XS2Y 1x120 RM/25 6/10 kV": { - "c_nf_per_km": 340, - "r_ohm_per_km": 0.253, - "x_ohm_per_km": 0.113, - "max_i_ka": 0.28, - "type": "cs", - "q_mm2": 120, - "alpha": 0.00403 - }, - "NA2XS2Y 1x70 RM/25 6/10 kV": { - "c_nf_per_km": 280, - "r_ohm_per_km": 0.443, - "x_ohm_per_km": 0.123, - "max_i_ka": 0.217, - "type": "cs", - "q_mm2": 70, - "alpha": 0.00403 - }, - "N2XS(FL)2Y 1x120 RM/35 64/110 kV": { - "c_nf_per_km": 112, - "r_ohm_per_km": 0.153, - "x_ohm_per_km": 0.166, - "max_i_ka": 0.366, - "type": "cs", - "q_mm2": 120, - "alpha": 0.00393 - }, - "N2XS(FL)2Y 1x185 RM/35 64/110 kV": { - "c_nf_per_km": 125, - "r_ohm_per_km": 0.099, - "x_ohm_per_km": 0.156, - "max_i_ka": 0.457, - "type": "cs", - "q_mm2": 185, - "alpha": 0.00393 - }, - "N2XS(FL)2Y 1x240 RM/35 64/110 kV": { - "c_nf_per_km": 135, - "r_ohm_per_km": 0.075, - "x_ohm_per_km": 0.149, - "max_i_ka": 0.526, - "type": "cs", - "q_mm2": 240, - "alpha": 0.00393 - }, - "N2XS(FL)2Y 1x300 RM/35 64/110 kV": { - "c_nf_per_km": 144, - "r_ohm_per_km": 0.06, - "x_ohm_per_km": 0.144, - "max_i_ka": 0.588, - "type": "cs", - "q_mm2": 300, - "alpha": 0.00393 - }, - "15-AL1/3-ST1A 0.4": { - "c_nf_per_km": 11, - "r_ohm_per_km": 1.8769, - "x_ohm_per_km": 0.35, - "max_i_ka": 0.105, - "type": "ol", - "q_mm2": 16, - "alpha": 0.00403 - }, - "24-AL1/4-ST1A 0.4": { - "c_nf_per_km": 11.25, - "r_ohm_per_km": 1.2012, - "x_ohm_per_km": 0.335, - "max_i_ka": 0.14, - "type": "ol", - "q_mm2": 24, - "alpha": 0.00403 - }, - "48-AL1/8-ST1A 0.4": { - "c_nf_per_km": 12.2, - "r_ohm_per_km": 0.5939, - "x_ohm_per_km": 0.3, - "max_i_ka": 0.21, - "type": "ol", - "q_mm2": 48, - "alpha": 0.00403 - }, - "94-AL1/15-ST1A 0.4": { - "c_nf_per_km": 13.2, - "r_ohm_per_km": 0.306, - "x_ohm_per_km": 0.29, - "max_i_ka": 0.35, - "type": "ol", - "q_mm2": 94, - "alpha": 0.00403 - }, - "34-AL1/6-ST1A 10.0": { - "c_nf_per_km": 9.7, - "r_ohm_per_km": 0.8342, - "x_ohm_per_km": 0.36, - "max_i_ka": 0.17, - "type": "ol", - "q_mm2": 34, - "alpha": 0.00403 - }, - "48-AL1/8-ST1A 10.0": { - "c_nf_per_km": 10.1, - "r_ohm_per_km": 0.5939, - "x_ohm_per_km": 0.35, - "max_i_ka": 0.21, - "type": "ol", - "q_mm2": 48, - "alpha": 0.00403 - }, - "70-AL1/11-ST1A 10.0": { - "c_nf_per_km": 10.4, - "r_ohm_per_km": 0.4132, - "x_ohm_per_km": 0.339, - "max_i_ka": 0.29, - "type": "ol", - "q_mm2": 70, - "alpha": 0.00403 - }, - "94-AL1/15-ST1A 10.0": { - "c_nf_per_km": 10.75, - "r_ohm_per_km": 0.306, - "x_ohm_per_km": 0.33, - "max_i_ka": 0.35, - "type": "ol", - "q_mm2": 94, - "alpha": 0.00403 - }, - "122-AL1/20-ST1A 10.0": { - "c_nf_per_km": 11.1, - "r_ohm_per_km": 0.2376, - "x_ohm_per_km": 0.323, - "max_i_ka": 0.41, - "type": "ol", - "q_mm2": 122, - "alpha": 0.00403 - }, - "149-AL1/24-ST1A 10.0": { - "c_nf_per_km": 11.25, - "r_ohm_per_km": 0.194, - "x_ohm_per_km": 0.315, - "max_i_ka": 0.47, - "type": "ol", - "q_mm2": 149, - "alpha": 0.00403 - }, - "34-AL1/6-ST1A 20.0": { - "c_nf_per_km": 9.15, - "r_ohm_per_km": 0.8342, - "x_ohm_per_km": 0.382, - "max_i_ka": 0.17, - "type": "ol", - "q_mm2": 34, - "alpha": 0.00403 - }, - "48-AL1/8-ST1A 20.0": { - "c_nf_per_km": 9.5, - "r_ohm_per_km": 0.5939, - "x_ohm_per_km": 0.372, - "max_i_ka": 0.21, - "type": "ol", - "q_mm2": 48, - "alpha": 0.00403 - }, - "70-AL1/11-ST1A 20.0": { - "c_nf_per_km": 9.7, - "r_ohm_per_km": 0.4132, - "x_ohm_per_km": 0.36, - "max_i_ka": 0.29, - "type": "ol", - "q_mm2": 70, - "alpha": 0.00403 - }, - "94-AL1/15-ST1A 20.0": { - "c_nf_per_km": 10, - "r_ohm_per_km": 0.306, - "x_ohm_per_km": 0.35, - "max_i_ka": 0.35, - "type": "ol", - "q_mm2": 94, - "alpha": 0.00403 - }, - "122-AL1/20-ST1A 20.0": { - "c_nf_per_km": 10.3, - "r_ohm_per_km": 0.2376, - "x_ohm_per_km": 0.344, - "max_i_ka": 0.41, - "type": "ol", - "q_mm2": 122, - "alpha": 0.00403 - }, - "149-AL1/24-ST1A 20.0": { - "c_nf_per_km": 10.5, - "r_ohm_per_km": 0.194, - "x_ohm_per_km": 0.337, - "max_i_ka": 0.47, - "type": "ol", - "q_mm2": 149, - "alpha": 0.00403 - }, - "184-AL1/30-ST1A 20.0": { - "c_nf_per_km": 10.75, - "r_ohm_per_km": 0.1571, - "x_ohm_per_km": 0.33, - "max_i_ka": 0.535, - "type": "ol", - "q_mm2": 184, - "alpha": 0.00403 - }, - "243-AL1/39-ST1A 20.0": { - "c_nf_per_km": 11, - "r_ohm_per_km": 0.1188, - "x_ohm_per_km": 0.32, - "max_i_ka": 0.645, - "type": "ol", - "q_mm2": 243, - "alpha": 0.00403 - }, - "48-AL1/8-ST1A 110.0": { - "c_nf_per_km": 8, - "r_ohm_per_km": 0.5939, - "x_ohm_per_km": 0.46, - "max_i_ka": 0.21, - "type": "ol", - "q_mm2": 48, - "alpha": 0.00403 - }, - "70-AL1/11-ST1A 110.0": { - "c_nf_per_km": 8.4, - "r_ohm_per_km": 0.4132, - "x_ohm_per_km": 0.45, - "max_i_ka": 0.29, - "type": "ol", - "q_mm2": 70, - "alpha": 0.00403 - }, - "94-AL1/15-ST1A 110.0": { - "c_nf_per_km": 8.65, - "r_ohm_per_km": 0.306, - "x_ohm_per_km": 0.44, - "max_i_ka": 0.35, - "type": "ol", - "q_mm2": 94, - "alpha": 0.00403 - }, - "122-AL1/20-ST1A 110.0": { - "c_nf_per_km": 8.5, - "r_ohm_per_km": 0.2376, - "x_ohm_per_km": 0.43, - "max_i_ka": 0.41, - "type": "ol", - "q_mm2": 122, - "alpha": 0.00403 - }, - "149-AL1/24-ST1A 110.0": { - "c_nf_per_km": 8.75, - "r_ohm_per_km": 0.194, - "x_ohm_per_km": 0.41, - "max_i_ka": 0.47, - "type": "ol", - "q_mm2": 149, - "alpha": 0.00403 - }, - "184-AL1/30-ST1A 110.0": { - "c_nf_per_km": 8.8, - "r_ohm_per_km": 0.1571, - "x_ohm_per_km": 0.4, - "max_i_ka": 0.535, - "type": "ol", - "q_mm2": 184, - "alpha": 0.00403 - }, - "243-AL1/39-ST1A 110.0": { - "c_nf_per_km": 9, - "r_ohm_per_km": 0.1188, - "x_ohm_per_km": 0.39, - "max_i_ka": 0.645, - "type": "ol", - "q_mm2": 243, - "alpha": 0.00403 - }, - "305-AL1/39-ST1A 110.0": { - "c_nf_per_km": 9.2, - "r_ohm_per_km": 0.0949, - "x_ohm_per_km": 0.38, - "max_i_ka": 0.74, - "type": "ol", - "q_mm2": 305, - "alpha": 0.00403 - }, - "490-AL1/64-ST1A 110.0": { - "c_nf_per_km": 9.75, - "r_ohm_per_km": 0.059, - "x_ohm_per_km": 0.37, - "max_i_ka": 0.96, - "type": "ol", - "q_mm2": 490, - "alpha": 0.00403 - }, - "679-AL1/86-ST1A 110.0": { - "c_nf_per_km": 9.95, - "r_ohm_per_km": 0.042, - "x_ohm_per_km": 0.36, - "max_i_ka": 1.15, - "type": "ol", - "q_mm2": 679, - "alpha": 0.00403 - }, - "490-AL1/64-ST1A 220.0": { - "c_nf_per_km": 10, - "r_ohm_per_km": 0.059, - "x_ohm_per_km": 0.285, - "max_i_ka": 0.96, - "type": "ol", - "q_mm2": 490, - "alpha": 0.00403 - }, - "679-AL1/86-ST1A 220.0": { - "c_nf_per_km": 11.7, - "r_ohm_per_km": 0.042, - "x_ohm_per_km": 0.275, - "max_i_ka": 1.15, - "type": "ol", - "q_mm2": 679, - "alpha": 0.00403 - }, - "490-AL1/64-ST1A 380.0": { - "c_nf_per_km": 11, - "r_ohm_per_km": 0.059, - "x_ohm_per_km": 0.253, - "max_i_ka": 0.96, - "type": "ol", - "q_mm2": 490, - "alpha": 0.00403 - }, - "679-AL1/86-ST1A 380.0": { - "c_nf_per_km": 14.6, - "r_ohm_per_km": 0.042, - "x_ohm_per_km": 0.25, - "max_i_ka": 1.15, - "type": "ol", - "q_mm2": 679, - "alpha": 0.00403 - } - }, - "trafo": { - "160 MVA 380/110 kV": { - "i0_percent": 0.06, - "pfe_kw": 60, - "vkr_percent": 0.25, - "sn_mva": 160, - "vn_lv_kv": 110.0, - "vn_hv_kv": 380.0, - "vk_percent": 12.2, - "shift_degree": 0, - "vector_group": "Yy0", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "100 MVA 220/110 kV": { - "i0_percent": 0.06, - "pfe_kw": 55, - "vkr_percent": 0.26, - "sn_mva": 100, - "vn_lv_kv": 110.0, - "vn_hv_kv": 220.0, - "vk_percent": 12.0, - "shift_degree": 0, - "vector_group": "Yy0", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "63 MVA 110/20 kV": { - "i0_percent": 0.04, - "pfe_kw": 22, - "vkr_percent": 0.32, - "sn_mva": 63, - "vn_lv_kv": 20.0, - "vn_hv_kv": 110.0, - "vk_percent": 18, - "shift_degree": 150, - "vector_group": "YNd5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "40 MVA 110/20 kV": { - "i0_percent": 0.05, - "pfe_kw": 18, - "vkr_percent": 0.34, - "sn_mva": 40, - "vn_lv_kv": 20.0, - "vn_hv_kv": 110.0, - "vk_percent": 16.2, - "shift_degree": 150, - "vector_group": "YNd5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "25 MVA 110/20 kV": { - "i0_percent": 0.07, - "pfe_kw": 14, - "vkr_percent": 0.41, - "sn_mva": 25, - "vn_lv_kv": 20.0, - "vn_hv_kv": 110.0, - "vk_percent": 12, - "shift_degree": 150, - "vector_group": "YNd5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "63 MVA 110/10 kV": { - "sn_mva": 63, - "vn_hv_kv": 110, - "vn_lv_kv": 10, - "vk_percent": 18, - "vkr_percent": 0.32, - "pfe_kw": 22, - "i0_percent": 0.04, - "shift_degree": 150, - "vector_group": "YNd5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "40 MVA 110/10 kV": { - "sn_mva": 40, - "vn_hv_kv": 110, - "vn_lv_kv": 10, - "vk_percent": 16.2, - "vkr_percent": 0.34, - "pfe_kw": 18, - "i0_percent": 0.05, - "shift_degree": 150, - "vector_group": "YNd5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "25 MVA 110/10 kV": { - "sn_mva": 25, - "vn_hv_kv": 110, - "vn_lv_kv": 10, - "vk_percent": 12, - "vkr_percent": 0.41, - "pfe_kw": 14, - "i0_percent": 0.07, - "shift_degree": 150, - "vector_group": "YNd5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -9, - "tap_max": 9, - "tap_step_degree": 0, - "tap_step_percent": 1.5, - "tap_phase_shifter": false - }, - "0.25 MVA 20/0.4 kV": { - "sn_mva": 0.25, - "vn_hv_kv": 20, - "vn_lv_kv": 0.4, - "vk_percent": 6, - "vkr_percent": 1.44, - "pfe_kw": 0.8, - "i0_percent": 0.32, - "shift_degree": 150, - "vector_group": "Yzn5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -2, - "tap_max": 2, - "tap_step_degree": 0, - "tap_step_percent": 2.5, - "tap_phase_shifter": false - }, - "0.4 MVA 20/0.4 kV": { - "sn_mva": 0.4, - "vn_hv_kv": 20, - "vn_lv_kv": 0.4, - "vk_percent": 6, - "vkr_percent": 1.425, - "pfe_kw": 1.35, - "i0_percent": 0.3375, - "shift_degree": 150, - "vector_group": "Dyn5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -2, - "tap_max": 2, - "tap_step_degree": 0, - "tap_step_percent": 2.5, - "tap_phase_shifter": false - }, - "0.63 MVA 20/0.4 kV": { - "sn_mva": 0.63, - "vn_hv_kv": 20, - "vn_lv_kv": 0.4, - "vk_percent": 6, - "vkr_percent": 1.206, - "pfe_kw": 1.65, - "i0_percent": 0.2619, - "shift_degree": 150, - "vector_group": "Dyn5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -2, - "tap_max": 2, - "tap_step_degree": 0, - "tap_step_percent": 2.5, - "tap_phase_shifter": false - }, - "0.25 MVA 10/0.4 kV": { - "sn_mva": 0.25, - "vn_hv_kv": 10, - "vn_lv_kv": 0.4, - "vk_percent": 4, - "vkr_percent": 1.2, - "pfe_kw": 0.6, - "i0_percent": 0.24, - "shift_degree": 150, - "vector_group": "Dyn5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -2, - "tap_max": 2, - "tap_step_degree": 0, - "tap_step_percent": 2.5, - "tap_phase_shifter": false - }, - "0.4 MVA 10/0.4 kV": { - "sn_mva": 0.4, - "vn_hv_kv": 10, - "vn_lv_kv": 0.4, - "vk_percent": 4, - "vkr_percent": 1.325, - "pfe_kw": 0.95, - "i0_percent": 0.2375, - "shift_degree": 150, - "vector_group": "Dyn5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -2, - "tap_max": 2, - "tap_step_degree": 0, - "tap_step_percent": 2.5, - "tap_phase_shifter": false - }, - "0.63 MVA 10/0.4 kV": { - "sn_mva": 0.63, - "vn_hv_kv": 10, - "vn_lv_kv": 0.4, - "vk_percent": 4, - "vkr_percent": 1.0794, - "pfe_kw": 1.18, - "i0_percent": 0.1873, - "shift_degree": 150, - "vector_group": "Dyn5", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -2, - "tap_max": 2, - "tap_step_degree": 0, - "tap_step_percent": 2.5, - "tap_phase_shifter": false - } - }, - "trafo3w": { - "63/25/38 MVA 110/20/10 kV": { - "sn_hv_mva": 63, - "sn_mv_mva": 25, - "sn_lv_mva": 38, - "vn_hv_kv": 110, - "vn_mv_kv": 20, - "vn_lv_kv": 10, - "vk_hv_percent": 10.4, - "vk_mv_percent": 10.4, - "vk_lv_percent": 10.4, - "vkr_hv_percent": 0.28, - "vkr_mv_percent": 0.32, - "vkr_lv_percent": 0.35, - "pfe_kw": 35, - "i0_percent": 0.89, - "shift_mv_degree": 0, - "shift_lv_degree": 0, - "vector_group": "YN0yn0yn0", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -10, - "tap_max": 10, - "tap_step_percent": 1.2 - }, - "63/25/38 MVA 110/10/10 kV": { - "sn_hv_mva": 63, - "sn_mv_mva": 25, - "sn_lv_mva": 38, - "vn_hv_kv": 110, - "vn_mv_kv": 10, - "vn_lv_kv": 10, - "vk_hv_percent": 10.4, - "vk_mv_percent": 10.4, - "vk_lv_percent": 10.4, - "vkr_hv_percent": 0.28, - "vkr_mv_percent": 0.32, - "vkr_lv_percent": 0.35, - "pfe_kw": 35, - "i0_percent": 0.89, - "shift_mv_degree": 0, - "shift_lv_degree": 0, - "vector_group": "YN0yn0yn0", - "tap_side": "hv", - "tap_neutral": 0, - "tap_min": -10, - "tap_max": 10, - "tap_step_percent": 1.2 - } - } - }, - "res_bus": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"vm_pu\",\"va_degree\",\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "vm_pu": "float64", - "va_degree": "float64", - "p_mw": "float64", - "q_mvar": "float64" - } - }, - "res_line": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\",\"i_ka\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_from_mw": "float64", - "q_from_mvar": "float64", - "p_to_mw": "float64", - "q_to_mvar": "float64", - "pl_mw": "float64", - "ql_mvar": "float64", - "i_from_ka": "float64", - "i_to_ka": "float64", - "i_ka": "float64", - "vm_from_pu": "float64", - "va_from_degree": "float64", - "vm_to_pu": "float64", - "va_to_degree": "float64", - "loading_percent": "float64" - } - }, - "res_trafo": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_hv_mw": "float64", - "q_hv_mvar": "float64", - "p_lv_mw": "float64", - "q_lv_mvar": "float64", - "pl_mw": "float64", - "ql_mvar": "float64", - "i_hv_ka": "float64", - "i_lv_ka": "float64", - "vm_hv_pu": "float64", - "va_hv_degree": "float64", - "vm_lv_pu": "float64", - "va_lv_degree": "float64", - "loading_percent": "float64" - } - }, - "res_trafo3w": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_mv_mw\",\"q_mv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_mv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_mv_pu\",\"va_mv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"va_internal_degree\",\"vm_internal_pu\",\"loading_percent\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_hv_mw": "float64", - "q_hv_mvar": "float64", - "p_mv_mw": "float64", - "q_mv_mvar": "float64", - "p_lv_mw": "float64", - "q_lv_mvar": "float64", - "pl_mw": "float64", - "ql_mvar": "float64", - "i_hv_ka": "float64", - "i_mv_ka": "float64", - "i_lv_ka": "float64", - "vm_hv_pu": "float64", - "va_hv_degree": "float64", - "vm_mv_pu": "float64", - "va_mv_degree": "float64", - "vm_lv_pu": "float64", - "va_lv_degree": "float64", - "va_internal_degree": "float64", - "vm_internal_pu": "float64", - "loading_percent": "float64" - } - }, - "res_impedance": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_from_mw": "float64", - "q_from_mvar": "float64", - "p_to_mw": "float64", - "q_to_mvar": "float64", - "pl_mw": "float64", - "ql_mvar": "float64", - "i_from_ka": "float64", - "i_to_ka": "float64" - } - }, - "res_ext_grid": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64" - } - }, - "res_load": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64" - } - }, - "res_sgen": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64" - } - }, - "res_storage": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64" - } - }, - "res_shunt": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64", - "vm_pu": "float64" - } - }, - "res_gen": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"va_degree\",\"vm_pu\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64", - "va_degree": "float64", - "vm_pu": "float64" - } - }, - "res_ward": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64", - "vm_pu": "float64" - } - }, - "res_xward": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\",\"va_internal_degree\",\"vm_internal_pu\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_mw": "float64", - "q_mvar": "float64", - "vm_pu": "float64", - "va_internal_degree": "float64", - "vm_internal_pu": "float64" - } - }, - "res_dcline": { - "_module": "pandas.core.frame", - "_class": "DataFrame", - "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\"],\"index\":[],\"data\":[]}", - "orient": "split", - "dtype": { - "p_from_mw": "float64", - "q_from_mvar": "float64", - "p_to_mw": "float64", - "q_to_mvar": "float64", - "pl_mw": "float64", - "vm_from_pu": "float64", - "va_from_degree": "float64", - "vm_to_pu": "float64", - "va_to_degree": "float64" - } - }, - "user_pf_options": {} - } -} \ No newline at end of file + "_module": "pandapower.auxiliary", + "_class": "pandapowerNet", + "_object": "{\"OPF_converged\":false,\"bus\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"in_service\\\",\\\"max_vm_pu\\\",\\\"min_vm_pu\\\",\\\"name\\\",\\\"type\\\",\\\"vn_kv\\\",\\\"zone\\\"],\\\"index\\\":[0,1,10,11,12,13,2,3,4,5,6,7,8,9],\\\"data\\\":[[true,1.06,0.94,1,\\\"b\\\",135.0,1.0],[true,1.06,0.94,2,\\\"b\\\",135.0,1.0],[true,1.06,0.94,11,\\\"b\\\",0.208,1.0],[true,1.06,0.94,12,\\\"b\\\",0.208,1.0],[true,1.06,0.94,13,\\\"b\\\",0.208,1.0],[true,1.06,0.94,14,\\\"b\\\",0.208,1.0],[true,1.06,0.94,3,\\\"b\\\",135.0,1.0],[true,1.06,0.94,4,\\\"b\\\",135.0,1.0],[true,1.06,0.94,5,\\\"b\\\",135.0,1.0],[true,1.06,0.94,6,\\\"b\\\",0.208,1.0],[true,1.06,0.94,7,\\\"b\\\",14.0,1.0],[true,1.06,0.94,8,\\\"b\\\",12.0,1.0],[true,1.06,0.94,9,\\\"b\\\",0.208,1.0],[true,1.06,0.94,10,\\\"b\\\",0.208,1.0]]}\",\n \"dtype\": {\n \"in_service\": \"bool\",\n \"max_vm_pu\": \"float64\",\n \"min_vm_pu\": \"float64\",\n \"name\": \"object\",\n \"type\": \"object\",\n \"vn_kv\": \"float64\",\n \"zone\": \"object\"\n },\n \"orient\": \"split\"\n},\"bus_geodata\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"x\\\",\\\"y\\\",\\\"coords\\\"],\\\"index\\\":[0,1,10,11,12,13,2,3,4,5,6,7,8,9],\\\"data\\\":[[1.9673949894,-0.9610198739,null],[2.9779852289,-1.0412882366,null],[1.8366837619,1.0890065149,null],[2.3371166416,2.3091630377,null],[3.3094922817,2.1179802998,null],[4.3962052866,1.6847581464,null],[3.780660539,-1.6066859687,null],[3.8337344898,-0.4914657254,null],[2.6937067209,-0.095882852,null],[2.5321180205,1.2056156419,null],[4.8721406581,-0.2692952825,null],[5.9042747731,-0.5402149495,null],[4.274948799,0.5335379916,null],[3.2723067024,0.9619849305,null]]}\",\n \"dtype\": {\n \"x\": \"float64\",\n \"y\": \"float64\",\n \"coords\": \"object\"\n },\n \"orient\": \"split\"\n},\"converged\":true,\"dcline\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"from_bus\\\",\\\"to_bus\\\",\\\"p_mw\\\",\\\"loss_percent\\\",\\\"loss_mw\\\",\\\"vm_from_pu\\\",\\\"vm_to_pu\\\",\\\"max_p_mw\\\",\\\"min_q_from_mvar\\\",\\\"min_q_to_mvar\\\",\\\"max_q_from_mvar\\\",\\\"max_q_to_mvar\\\",\\\"in_service\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"from_bus\": \"uint32\",\n \"to_bus\": \"uint32\",\n \"p_mw\": \"float64\",\n \"loss_percent\": \"float64\",\n \"loss_mw\": \"float64\",\n \"vm_from_pu\": \"float64\",\n \"vm_to_pu\": \"float64\",\n \"max_p_mw\": \"float64\",\n \"min_q_from_mvar\": \"float64\",\n \"min_q_to_mvar\": \"float64\",\n \"max_q_from_mvar\": \"float64\",\n \"max_q_to_mvar\": \"float64\",\n \"in_service\": \"bool\"\n },\n \"orient\": \"split\"\n},\"ext_grid\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"bus\\\",\\\"in_service\\\",\\\"name\\\",\\\"va_degree\\\",\\\"vm_pu\\\",\\\"max_p_mw\\\",\\\"min_p_mw\\\",\\\"max_q_mvar\\\",\\\"min_q_mvar\\\"],\\\"index\\\":[0],\\\"data\\\":[[0,true,null,0.0,1.06,332.400000000000034,0.0,10.0,0.0]]}\",\n \"dtype\": {\n \"bus\": \"uint32\",\n \"in_service\": \"bool\",\n \"name\": \"object\",\n \"va_degree\": \"float64\",\n \"vm_pu\": \"float64\",\n \"max_p_mw\": \"float64\",\n \"min_p_mw\": \"float64\",\n \"max_q_mvar\": \"float64\",\n \"min_q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"f_hz\":60,\"gen\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"bus\\\",\\\"controllable\\\",\\\"in_service\\\",\\\"name\\\",\\\"p_mw\\\",\\\"scaling\\\",\\\"sn_mva\\\",\\\"type\\\",\\\"vm_pu\\\",\\\"slack\\\",\\\"max_p_mw\\\",\\\"min_p_mw\\\",\\\"max_q_mvar\\\",\\\"min_q_mvar\\\"],\\\"index\\\":[0,1,2,3],\\\"data\\\":[[1,true,true,null,40.0,1.0,null,null,1.045,false,140.0,0.0,50.0,-40.0],[2,true,true,null,0.0,1.0,null,null,1.01,false,100.0,0.0,40.0,0.0],[5,true,true,null,0.0,1.0,null,null,1.07,false,100.0,0.0,24.0,-6.0],[7,true,true,null,0.0,1.0,null,null,1.09,false,100.0,0.0,24.0,-6.0]]}\",\n \"dtype\": {\n \"bus\": \"uint32\",\n \"controllable\": \"bool\",\n \"in_service\": \"bool\",\n \"name\": \"object\",\n \"p_mw\": \"float64\",\n \"scaling\": \"float64\",\n \"sn_mva\": \"float64\",\n \"type\": \"object\",\n \"vm_pu\": \"float64\",\n \"slack\": \"bool\",\n \"max_p_mw\": \"float64\",\n \"min_p_mw\": \"float64\",\n \"max_q_mvar\": \"float64\",\n \"min_q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"impedance\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"from_bus\\\",\\\"to_bus\\\",\\\"rft_pu\\\",\\\"xft_pu\\\",\\\"rtf_pu\\\",\\\"xtf_pu\\\",\\\"sn_mva\\\",\\\"in_service\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"from_bus\": \"uint32\",\n \"to_bus\": \"uint32\",\n \"rft_pu\": \"float64\",\n \"xft_pu\": \"float64\",\n \"rtf_pu\": \"float64\",\n \"xtf_pu\": \"float64\",\n \"sn_mva\": \"float64\",\n \"in_service\": \"bool\"\n },\n \"orient\": \"split\"\n},\"line\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"c_nf_per_km\\\",\\\"df\\\",\\\"from_bus\\\",\\\"g_us_per_km\\\",\\\"in_service\\\",\\\"length_km\\\",\\\"max_i_ka\\\",\\\"max_loading_percent\\\",\\\"name\\\",\\\"parallel\\\",\\\"r_ohm_per_km\\\",\\\"std_type\\\",\\\"to_bus\\\",\\\"type\\\",\\\"x_ohm_per_km\\\"],\\\"index\\\":[0,1,10,11,12,13,14,2,3,4,5,6,7,8,9],\\\"data\\\":[[768.484773228356175,1.0,0,0.0,true,1.0,42.339019740572553,100.0,null,1,3.532005,null,1,\\\"ol\\\",10.783732499999999],[716.088084144604636,1.0,0,0.0,true,1.0,42.339019740572553,100.0,null,1,9.8469675,null,4,\\\"ol\\\",40.649039999999999],[0.0,1.0,8,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.0000137622784,null,9,\\\"ol\\\",0.00003655808],[0.0,1.0,8,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.0000549928704,null,13,\\\"ol\\\",0.0001169772032],[0.0,1.0,9,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.000035498112,null,10,\\\"ol\\\",0.0000830971648],[0.0,1.0,11,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.0000955788288,null,12,\\\"ol\\\",0.0000864760832],[0.0,1.0,12,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.0000739511552,null,13,\\\"ol\\\",0.0001505673728],[637.49305051897727,1.0,1,0.0,true,1.0,42.339019740572553,100.0,null,1,8.5639275,null,2,\\\"ol\\\",36.080032500000002],[494.857619124320308,1.0,1,0.0,true,1.0,42.339019740572553,100.0,null,1,10.5905475,null,3,\\\"ol\\\",32.134320000000002],[503.590400638278879,1.0,1,0.0,true,1.0,42.339019740572553,100.0,null,1,10.379137500000001,null,4,\\\"ol\\\",31.689630000000001],[186.299338964449987,1.0,2,0.0,true,1.0,42.339019740572553,100.0,null,1,12.2125725,null,3,\\\"ol\\\",31.1702175],[0.0,1.0,3,0.0,true,1.0,42.339019740572553,100.0,null,1,2.4330375,null,4,\\\"ol\\\",7.6745475],[0.0,1.0,5,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.0000410921472,null,10,\\\"ol\\\",0.000086052096],[0.0,1.0,5,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.0000531757824,null,11,\\\"ol\\\",0.0001106736384],[0.0,1.0,5,0.0,true,1.0,27479.65223546776906,100.0,null,1,0.000028619136,null,12,\\\"ol\\\",0.0000563600128]]}\",\n \"dtype\": {\n \"c_nf_per_km\": \"float64\",\n \"df\": \"float64\",\n \"from_bus\": \"uint32\",\n \"g_us_per_km\": \"float64\",\n \"in_service\": \"bool\",\n \"length_km\": \"float64\",\n \"max_i_ka\": \"float64\",\n \"max_loading_percent\": \"float64\",\n \"name\": \"object\",\n \"parallel\": \"uint32\",\n \"r_ohm_per_km\": \"float64\",\n \"std_type\": \"object\",\n \"to_bus\": \"uint32\",\n \"type\": \"object\",\n \"x_ohm_per_km\": \"float64\"\n },\n \"orient\": \"split\"\n},\"line_geodata\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"coords\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"coords\": \"object\"\n },\n \"orient\": \"split\"\n},\"load\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"bus\\\",\\\"const_i_percent\\\",\\\"const_z_percent\\\",\\\"controllable\\\",\\\"in_service\\\",\\\"name\\\",\\\"p_mw\\\",\\\"q_mvar\\\",\\\"scaling\\\",\\\"sn_mva\\\",\\\"type\\\"],\\\"index\\\":[0,1,10,2,3,4,5,6,7,8,9],\\\"data\\\":[[1,0.0,0.0,false,true,null,21.699999999999999,12.699999999999999,1.0,null,null],[2,0.0,0.0,false,true,null,94.200000000000003,19.0,1.0,null,null],[13,0.0,0.0,false,true,null,14.9,5.0,1.0,null,null],[3,0.0,0.0,false,true,null,47.799999999999997,-3.9,1.0,null,null],[4,0.0,0.0,false,true,null,7.6,1.6,1.0,null,null],[5,0.0,0.0,false,true,null,11.199999999999999,7.5,1.0,null,null],[8,0.0,0.0,false,true,null,29.5,16.600000000000001,1.0,null,null],[9,0.0,0.0,false,true,null,9.0,5.8,1.0,null,null],[10,0.0,0.0,false,true,null,3.5,1.8,1.0,null,null],[11,0.0,0.0,false,true,null,6.1,1.6,1.0,null,null],[12,0.0,0.0,false,true,null,13.5,5.8,1.0,null,null]]}\",\n \"dtype\": {\n \"bus\": \"uint32\",\n \"const_i_percent\": \"float64\",\n \"const_z_percent\": \"float64\",\n \"controllable\": \"bool\",\n \"in_service\": \"bool\",\n \"name\": \"object\",\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"scaling\": \"float64\",\n \"sn_mva\": \"float64\",\n \"type\": \"object\"\n },\n \"orient\": \"split\"\n},\"measurement\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"measurement_type\\\",\\\"element_type\\\",\\\"element\\\",\\\"value\\\",\\\"std_dev\\\",\\\"side\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"measurement_type\": \"object\",\n \"element_type\": \"object\",\n \"element\": \"uint32\",\n \"value\": \"float64\",\n \"std_dev\": \"float64\",\n \"side\": \"object\"\n },\n \"orient\": \"split\"\n},\"name\":\"\",\"poly_cost\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"element\\\",\\\"et\\\",\\\"cp0_eur\\\",\\\"cp1_eur_per_mw\\\",\\\"cp2_eur_per_mw2\\\",\\\"cq0_eur\\\",\\\"cq1_eur_per_mvar\\\",\\\"cq2_eur_per_mvar2\\\"],\\\"index\\\":[0,1,2,3,4],\\\"data\\\":[[0.0,\\\"ext_grid\\\",0.0,20.0,0.0430293,0.0,0.0,0.0],[0.0,\\\"gen\\\",0.0,20.0,0.25,0.0,0.0,0.0],[1.0,\\\"gen\\\",0.0,40.0,0.01,0.0,0.0,0.0],[2.0,\\\"gen\\\",0.0,40.0,0.01,0.0,0.0,0.0],[3.0,\\\"gen\\\",0.0,40.0,0.01,0.0,0.0,0.0]]}\",\n \"dtype\": {\n \"element\": \"object\",\n \"et\": \"object\",\n \"cp0_eur\": \"float64\",\n \"cp1_eur_per_mw\": \"float64\",\n \"cp2_eur_per_mw2\": \"float64\",\n \"cq0_eur\": \"float64\",\n \"cq1_eur_per_mvar\": \"float64\",\n \"cq2_eur_per_mvar2\": \"float64\"\n },\n \"orient\": \"split\"\n},\"pwl_cost\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"power_type\\\",\\\"element\\\",\\\"et\\\",\\\"points\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"power_type\": \"object\",\n \"element\": \"object\",\n \"et\": \"object\",\n \"points\": \"object\"\n },\n \"orient\": \"split\"\n},\"res_bus\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"vm_pu\\\",\\\"va_degree\\\",\\\"p_mw\\\",\\\"q_mvar\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"vm_pu\": \"float64\",\n \"va_degree\": \"float64\",\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_dcline\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_from_mw\\\",\\\"q_from_mvar\\\",\\\"p_to_mw\\\",\\\"q_to_mvar\\\",\\\"pl_mw\\\",\\\"vm_from_pu\\\",\\\"va_from_degree\\\",\\\"vm_to_pu\\\",\\\"va_to_degree\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_from_mw\": \"float64\",\n \"q_from_mvar\": \"float64\",\n \"p_to_mw\": \"float64\",\n \"q_to_mvar\": \"float64\",\n \"pl_mw\": \"float64\",\n \"vm_from_pu\": \"float64\",\n \"va_from_degree\": \"float64\",\n \"vm_to_pu\": \"float64\",\n \"va_to_degree\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_ext_grid\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\"],\\\"index\\\":[0],\\\"data\\\":[[null,null]]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_gen\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\",\\\"va_degree\\\",\\\"vm_pu\\\"],\\\"index\\\":[0,1,2,3],\\\"data\\\":[[null,null,null,null],[null,null,null,null],[null,null,null,null],[null,null,null,null]]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"va_degree\": \"float64\",\n \"vm_pu\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_impedance\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_from_mw\\\",\\\"q_from_mvar\\\",\\\"p_to_mw\\\",\\\"q_to_mvar\\\",\\\"pl_mw\\\",\\\"ql_mvar\\\",\\\"i_from_ka\\\",\\\"i_to_ka\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_from_mw\": \"float64\",\n \"q_from_mvar\": \"float64\",\n \"p_to_mw\": \"float64\",\n \"q_to_mvar\": \"float64\",\n \"pl_mw\": \"float64\",\n \"ql_mvar\": \"float64\",\n \"i_from_ka\": \"float64\",\n \"i_to_ka\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_line\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_from_mw\\\",\\\"q_from_mvar\\\",\\\"p_to_mw\\\",\\\"q_to_mvar\\\",\\\"pl_mw\\\",\\\"ql_mvar\\\",\\\"i_from_ka\\\",\\\"i_to_ka\\\",\\\"i_ka\\\",\\\"vm_from_pu\\\",\\\"va_from_degree\\\",\\\"vm_to_pu\\\",\\\"va_to_degree\\\",\\\"loading_percent\\\"],\\\"index\\\":[0,1,10,11,12,13,14,2,3,4,5,6,7,8,9],\\\"data\\\":[[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null,null]]}\",\n \"dtype\": {\n \"p_from_mw\": \"float64\",\n \"q_from_mvar\": \"float64\",\n \"p_to_mw\": \"float64\",\n \"q_to_mvar\": \"float64\",\n \"pl_mw\": \"float64\",\n \"ql_mvar\": \"float64\",\n \"i_from_ka\": \"float64\",\n \"i_to_ka\": \"float64\",\n \"i_ka\": \"float64\",\n \"vm_from_pu\": \"float64\",\n \"va_from_degree\": \"float64\",\n \"vm_to_pu\": \"float64\",\n \"va_to_degree\": \"float64\",\n \"loading_percent\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_load\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\"],\\\"index\\\":[0,1,10,2,3,4,5,6,7,8,9],\\\"data\\\":[[null,null],[null,null],[null,null],[null,null],[null,null],[null,null],[null,null],[null,null],[null,null],[null,null],[null,null]]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_sgen\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_shunt\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\",\\\"vm_pu\\\"],\\\"index\\\":[0],\\\"data\\\":[[null,null,null]]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"vm_pu\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_storage\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_trafo\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_hv_mw\\\",\\\"q_hv_mvar\\\",\\\"p_lv_mw\\\",\\\"q_lv_mvar\\\",\\\"pl_mw\\\",\\\"ql_mvar\\\",\\\"i_hv_ka\\\",\\\"i_lv_ka\\\",\\\"vm_hv_pu\\\",\\\"va_hv_degree\\\",\\\"vm_lv_pu\\\",\\\"va_lv_degree\\\",\\\"loading_percent\\\"],\\\"index\\\":[0,1,2,3,4],\\\"data\\\":[[null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null],[null,null,null,null,null,null,null,null,null,null,null,null,null]]}\",\n \"dtype\": {\n \"p_hv_mw\": \"float64\",\n \"q_hv_mvar\": \"float64\",\n \"p_lv_mw\": \"float64\",\n \"q_lv_mvar\": \"float64\",\n \"pl_mw\": \"float64\",\n \"ql_mvar\": \"float64\",\n \"i_hv_ka\": \"float64\",\n \"i_lv_ka\": \"float64\",\n \"vm_hv_pu\": \"float64\",\n \"va_hv_degree\": \"float64\",\n \"vm_lv_pu\": \"float64\",\n \"va_lv_degree\": \"float64\",\n \"loading_percent\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_trafo3w\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_hv_mw\\\",\\\"q_hv_mvar\\\",\\\"p_mv_mw\\\",\\\"q_mv_mvar\\\",\\\"p_lv_mw\\\",\\\"q_lv_mvar\\\",\\\"pl_mw\\\",\\\"ql_mvar\\\",\\\"i_hv_ka\\\",\\\"i_mv_ka\\\",\\\"i_lv_ka\\\",\\\"vm_hv_pu\\\",\\\"va_hv_degree\\\",\\\"vm_mv_pu\\\",\\\"va_mv_degree\\\",\\\"vm_lv_pu\\\",\\\"va_lv_degree\\\",\\\"va_internal_degree\\\",\\\"vm_internal_pu\\\",\\\"loading_percent\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_hv_mw\": \"float64\",\n \"q_hv_mvar\": \"float64\",\n \"p_mv_mw\": \"float64\",\n \"q_mv_mvar\": \"float64\",\n \"p_lv_mw\": \"float64\",\n \"q_lv_mvar\": \"float64\",\n \"pl_mw\": \"float64\",\n \"ql_mvar\": \"float64\",\n \"i_hv_ka\": \"float64\",\n \"i_mv_ka\": \"float64\",\n \"i_lv_ka\": \"float64\",\n \"vm_hv_pu\": \"float64\",\n \"va_hv_degree\": \"float64\",\n \"vm_mv_pu\": \"float64\",\n \"va_mv_degree\": \"float64\",\n \"vm_lv_pu\": \"float64\",\n \"va_lv_degree\": \"float64\",\n \"va_internal_degree\": \"float64\",\n \"vm_internal_pu\": \"float64\",\n \"loading_percent\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_ward\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\",\\\"vm_pu\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"vm_pu\": \"float64\"\n },\n \"orient\": \"split\"\n},\"res_xward\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"p_mw\\\",\\\"q_mvar\\\",\\\"vm_pu\\\",\\\"va_internal_degree\\\",\\\"vm_internal_pu\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"vm_pu\": \"float64\",\n \"va_internal_degree\": \"float64\",\n \"vm_internal_pu\": \"float64\"\n },\n \"orient\": \"split\"\n},\"sgen\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"bus\\\",\\\"p_mw\\\",\\\"q_mvar\\\",\\\"sn_mva\\\",\\\"scaling\\\",\\\"in_service\\\",\\\"type\\\",\\\"current_source\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"bus\": \"int64\",\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"sn_mva\": \"float64\",\n \"scaling\": \"float64\",\n \"in_service\": \"bool\",\n \"type\": \"object\",\n \"current_source\": \"bool\"\n },\n \"orient\": \"split\"\n},\"shunt\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"bus\\\",\\\"name\\\",\\\"q_mvar\\\",\\\"p_mw\\\",\\\"vn_kv\\\",\\\"step\\\",\\\"max_step\\\",\\\"in_service\\\"],\\\"index\\\":[0],\\\"data\\\":[[8,null,-19.0,0.0,0.208,1,1,true]]}\",\n \"dtype\": {\n \"bus\": \"uint32\",\n \"name\": \"object\",\n \"q_mvar\": \"float64\",\n \"p_mw\": \"float64\",\n \"vn_kv\": \"float64\",\n \"step\": \"uint32\",\n \"max_step\": \"uint32\",\n \"in_service\": \"bool\"\n },\n \"orient\": \"split\"\n},\"sn_mva\":1.0,\"std_types\":{\n \"line\": {\n \"NAYY 4x150 SE\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.208,\n \"q_mm2\": 150,\n \"x_ohm_per_km\": 0.08,\n \"c_nf_per_km\": 261.0,\n \"max_i_ka\": 0.27\n },\n \"70-AL1/11-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.4132,\n \"q_mm2\": 70,\n \"x_ohm_per_km\": 0.36,\n \"c_nf_per_km\": 9.7,\n \"max_i_ka\": 0.29\n },\n \"NA2XS2Y 1x70 RM/25 6/10 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.443,\n \"q_mm2\": 70,\n \"x_ohm_per_km\": 0.123,\n \"c_nf_per_km\": 280.0,\n \"max_i_ka\": 0.217\n },\n \"N2XS(FL)2Y 1x300 RM/35 64/110 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.06,\n \"q_mm2\": 300,\n \"x_ohm_per_km\": 0.144,\n \"c_nf_per_km\": 144.0,\n \"max_i_ka\": 0.588\n },\n \"NA2XS2Y 1x120 RM/25 6/10 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.253,\n \"q_mm2\": 120,\n \"x_ohm_per_km\": 0.113,\n \"c_nf_per_km\": 340.0,\n \"max_i_ka\": 0.28\n },\n \"149-AL1/24-ST1A 10.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.194,\n \"q_mm2\": 149,\n \"x_ohm_per_km\": 0.315,\n \"c_nf_per_km\": 11.25,\n \"max_i_ka\": 0.47\n },\n \"15-AL1/3-ST1A 0.4\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 1.8769,\n \"q_mm2\": 16,\n \"x_ohm_per_km\": 0.35,\n \"c_nf_per_km\": 11.0,\n \"max_i_ka\": 0.105\n },\n \"NA2XS2Y 1x185 RM/25 6/10 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.161,\n \"q_mm2\": 185,\n \"x_ohm_per_km\": 0.11,\n \"c_nf_per_km\": 406.0,\n \"max_i_ka\": 0.358\n },\n \"NA2XS2Y 1x240 RM/25 6/10 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.122,\n \"q_mm2\": 240,\n \"x_ohm_per_km\": 0.105,\n \"c_nf_per_km\": 456.0,\n \"max_i_ka\": 0.416\n },\n \"N2XS(FL)2Y 1x240 RM/35 64/110 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.075,\n \"q_mm2\": 240,\n \"x_ohm_per_km\": 0.149,\n \"c_nf_per_km\": 135.0,\n \"max_i_ka\": 0.526\n },\n \"NAYY 4x120 SE\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.225,\n \"q_mm2\": 120,\n \"x_ohm_per_km\": 0.08,\n \"c_nf_per_km\": 264.0,\n \"max_i_ka\": 0.242\n },\n \"48-AL1/8-ST1A 10.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.5939,\n \"q_mm2\": 48,\n \"x_ohm_per_km\": 0.35,\n \"c_nf_per_km\": 10.1,\n \"max_i_ka\": 0.21\n },\n \"94-AL1/15-ST1A 10.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.306,\n \"q_mm2\": 94,\n \"x_ohm_per_km\": 0.33,\n \"c_nf_per_km\": 10.75,\n \"max_i_ka\": 0.35\n },\n \"NA2XS2Y 1x70 RM/25 12/20 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.443,\n \"q_mm2\": 70,\n \"x_ohm_per_km\": 0.132,\n \"c_nf_per_km\": 190.0,\n \"max_i_ka\": 0.22\n },\n \"243-AL1/39-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.1188,\n \"q_mm2\": 243,\n \"x_ohm_per_km\": 0.32,\n \"c_nf_per_km\": 11.0,\n \"max_i_ka\": 0.645\n },\n \"NA2XS2Y 1x150 RM/25 6/10 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.206,\n \"q_mm2\": 150,\n \"x_ohm_per_km\": 0.11,\n \"c_nf_per_km\": 360.0,\n \"max_i_ka\": 0.315\n },\n \"184-AL1/30-ST1A 110.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.1571,\n \"q_mm2\": 184,\n \"x_ohm_per_km\": 0.4,\n \"c_nf_per_km\": 8.8,\n \"max_i_ka\": 0.535\n },\n \"149-AL1/24-ST1A 110.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.194,\n \"q_mm2\": 149,\n \"x_ohm_per_km\": 0.41,\n \"c_nf_per_km\": 8.75,\n \"max_i_ka\": 0.47\n },\n \"NA2XS2Y 1x240 RM/25 12/20 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.122,\n \"q_mm2\": 240,\n \"x_ohm_per_km\": 0.112,\n \"c_nf_per_km\": 304.0,\n \"max_i_ka\": 0.421\n },\n \"122-AL1/20-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.2376,\n \"q_mm2\": 122,\n \"x_ohm_per_km\": 0.344,\n \"c_nf_per_km\": 10.3,\n \"max_i_ka\": 0.41\n },\n \"48-AL1/8-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.5939,\n \"q_mm2\": 48,\n \"x_ohm_per_km\": 0.372,\n \"c_nf_per_km\": 9.5,\n \"max_i_ka\": 0.21\n },\n \"34-AL1/6-ST1A 10.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.8342,\n \"q_mm2\": 34,\n \"x_ohm_per_km\": 0.36,\n \"c_nf_per_km\": 9.7,\n \"max_i_ka\": 0.17\n },\n \"24-AL1/4-ST1A 0.4\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 1.2012,\n \"q_mm2\": 24,\n \"x_ohm_per_km\": 0.335,\n \"c_nf_per_km\": 11.25,\n \"max_i_ka\": 0.14\n },\n \"184-AL1/30-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.1571,\n \"q_mm2\": 184,\n \"x_ohm_per_km\": 0.33,\n \"c_nf_per_km\": 10.75,\n \"max_i_ka\": 0.535\n },\n \"94-AL1/15-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.306,\n \"q_mm2\": 94,\n \"x_ohm_per_km\": 0.35,\n \"c_nf_per_km\": 10.0,\n \"max_i_ka\": 0.35\n },\n \"NAYY 4x50 SE\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.642,\n \"q_mm2\": 50,\n \"x_ohm_per_km\": 0.083,\n \"c_nf_per_km\": 210.0,\n \"max_i_ka\": 0.142\n },\n \"490-AL1/64-ST1A 380.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.059,\n \"q_mm2\": 490,\n \"x_ohm_per_km\": 0.253,\n \"c_nf_per_km\": 11.0,\n \"max_i_ka\": 0.96\n },\n \"48-AL1/8-ST1A 0.4\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.5939,\n \"q_mm2\": 48,\n \"x_ohm_per_km\": 0.3,\n \"c_nf_per_km\": 12.2,\n \"max_i_ka\": 0.21\n },\n \"NA2XS2Y 1x95 RM/25 6/10 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.313,\n \"q_mm2\": 95,\n \"x_ohm_per_km\": 0.123,\n \"c_nf_per_km\": 315.0,\n \"max_i_ka\": 0.249\n },\n \"NA2XS2Y 1x120 RM/25 12/20 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.253,\n \"q_mm2\": 120,\n \"x_ohm_per_km\": 0.119,\n \"c_nf_per_km\": 230.0,\n \"max_i_ka\": 0.283\n },\n \"34-AL1/6-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.8342,\n \"q_mm2\": 34,\n \"x_ohm_per_km\": 0.382,\n \"c_nf_per_km\": 9.15,\n \"max_i_ka\": 0.17\n },\n \"94-AL1/15-ST1A 0.4\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.306,\n \"q_mm2\": 94,\n \"x_ohm_per_km\": 0.29,\n \"c_nf_per_km\": 13.2,\n \"max_i_ka\": 0.35\n },\n \"NA2XS2Y 1x185 RM/25 12/20 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.161,\n \"q_mm2\": 185,\n \"x_ohm_per_km\": 0.117,\n \"c_nf_per_km\": 273.0,\n \"max_i_ka\": 0.362\n },\n \"NA2XS2Y 1x150 RM/25 12/20 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.206,\n \"q_mm2\": 150,\n \"x_ohm_per_km\": 0.116,\n \"c_nf_per_km\": 250.0,\n \"max_i_ka\": 0.319\n },\n \"243-AL1/39-ST1A 110.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.1188,\n \"q_mm2\": 243,\n \"x_ohm_per_km\": 0.39,\n \"c_nf_per_km\": 9.0,\n \"max_i_ka\": 0.645\n },\n \"490-AL1/64-ST1A 220.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.059,\n \"q_mm2\": 490,\n \"x_ohm_per_km\": 0.285,\n \"c_nf_per_km\": 10.0,\n \"max_i_ka\": 0.96\n },\n \"N2XS(FL)2Y 1x185 RM/35 64/110 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.099,\n \"q_mm2\": 185,\n \"x_ohm_per_km\": 0.156,\n \"c_nf_per_km\": 125.0,\n \"max_i_ka\": 0.457\n },\n \"N2XS(FL)2Y 1x120 RM/35 64/110 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.153,\n \"q_mm2\": 120,\n \"x_ohm_per_km\": 0.166,\n \"c_nf_per_km\": 112.0,\n \"max_i_ka\": 0.366\n },\n \"NA2XS2Y 1x95 RM/25 12/20 kV\": {\n \"type\": \"cs\",\n \"r_ohm_per_km\": 0.313,\n \"q_mm2\": 95,\n \"x_ohm_per_km\": 0.132,\n \"c_nf_per_km\": 216.0,\n \"max_i_ka\": 0.252\n },\n \"122-AL1/20-ST1A 10.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.2376,\n \"q_mm2\": 122,\n \"x_ohm_per_km\": 0.323,\n \"c_nf_per_km\": 11.1,\n \"max_i_ka\": 0.41\n },\n \"149-AL1/24-ST1A 20.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.194,\n \"q_mm2\": 149,\n \"x_ohm_per_km\": 0.337,\n \"c_nf_per_km\": 10.5,\n \"max_i_ka\": 0.47\n },\n \"70-AL1/11-ST1A 10.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.4132,\n \"q_mm2\": 70,\n \"x_ohm_per_km\": 0.339,\n \"c_nf_per_km\": 10.4,\n \"max_i_ka\": 0.29\n },\n \"305-AL1/39-ST1A 110.0\": {\n \"type\": \"ol\",\n \"r_ohm_per_km\": 0.0949,\n \"q_mm2\": 305,\n \"x_ohm_per_km\": 0.38,\n \"c_nf_per_km\": 9.2,\n \"max_i_ka\": 0.74\n }\n },\n \"trafo\": {\n \"0.4 MVA 20/0.4 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"Dyn5\",\n \"vn_hv_kv\": 20.0,\n \"pfe_kw\": 1.35,\n \"i0_percent\": 0.3375,\n \"vn_lv_kv\": 0.4,\n \"sn_mva\": 0.4,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -2,\n \"vkr_percent\": 1.425,\n \"tap_step_percent\": 2.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 2,\n \"vk_percent\": 6.0\n },\n \"63 MVA 110/20 kV v1.4.3 and older\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 33.0,\n \"i0_percent\": 0.086,\n \"vn_lv_kv\": 20.0,\n \"sn_mva\": 63.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.322,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 11.2\n },\n \"63 MVA 110/10 kV v1.4.3 and older\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 31.51,\n \"i0_percent\": 0.078,\n \"vn_lv_kv\": 10.0,\n \"sn_mva\": 63.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.31,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 10.04\n },\n \"25 MVA 110/20 kV v1.4.3 and older\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 29.0,\n \"i0_percent\": 0.071,\n \"vn_lv_kv\": 20.0,\n \"sn_mva\": 25.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.282,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 11.2\n },\n \"40 MVA 110/20 kV v1.4.3 and older\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 31.0,\n \"i0_percent\": 0.08,\n \"vn_lv_kv\": 20.0,\n \"sn_mva\": 40.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.302,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 11.2\n },\n \"0.25 MVA 20/0.4 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"Yzn5\",\n \"vn_hv_kv\": 20.0,\n \"pfe_kw\": 0.8,\n \"i0_percent\": 0.32,\n \"vn_lv_kv\": 0.4,\n \"sn_mva\": 0.25,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -2,\n \"vkr_percent\": 1.44,\n \"tap_step_percent\": 2.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 2,\n \"vk_percent\": 6.0\n },\n \"25 MVA 110/10 kV v1.4.3 and older\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 28.51,\n \"i0_percent\": 0.073,\n \"vn_lv_kv\": 10.0,\n \"sn_mva\": 25.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.276,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 10.04\n },\n \"0.25 MVA 10/0.4 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"Dyn5\",\n \"vn_hv_kv\": 10.0,\n \"pfe_kw\": 0.6,\n \"i0_percent\": 0.24,\n \"vn_lv_kv\": 0.4,\n \"sn_mva\": 0.25,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -2,\n \"vkr_percent\": 1.2,\n \"tap_step_percent\": 2.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 2,\n \"vk_percent\": 4.0\n },\n \"160 MVA 380/110 kV\": {\n \"shift_degree\": 0,\n \"vector_group\": \"Yy0\",\n \"vn_hv_kv\": 380.0,\n \"pfe_kw\": 60.0,\n \"i0_percent\": 0.06,\n \"vn_lv_kv\": 110.0,\n \"sn_mva\": 160.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.25,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 12.2\n },\n \"63 MVA 110/10 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 22.0,\n \"i0_percent\": 0.04,\n \"vn_lv_kv\": 10.0,\n \"sn_mva\": 63.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.32,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 18.0\n },\n \"0.63 MVA 20/0.4 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"Dyn5\",\n \"vn_hv_kv\": 20.0,\n \"pfe_kw\": 1.65,\n \"i0_percent\": 0.2619,\n \"vn_lv_kv\": 0.4,\n \"sn_mva\": 0.63,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -2,\n \"vkr_percent\": 1.206,\n \"tap_step_percent\": 2.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 2,\n \"vk_percent\": 6.0\n },\n \"0.4 MVA 10/0.4 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"Dyn5\",\n \"vn_hv_kv\": 10.0,\n \"pfe_kw\": 0.95,\n \"i0_percent\": 0.2375,\n \"vn_lv_kv\": 0.4,\n \"sn_mva\": 0.4,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -2,\n \"vkr_percent\": 1.325,\n \"tap_step_percent\": 2.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 2,\n \"vk_percent\": 4.0\n },\n \"0.63 MVA 10/0.4 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"Dyn5\",\n \"vn_hv_kv\": 10.0,\n \"pfe_kw\": 1.18,\n \"i0_percent\": 0.1873,\n \"vn_lv_kv\": 0.4,\n \"sn_mva\": 0.63,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -2,\n \"vkr_percent\": 1.0794,\n \"tap_step_percent\": 2.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 2,\n \"vk_percent\": 4.0\n },\n \"63 MVA 110/20 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 22.0,\n \"i0_percent\": 0.04,\n \"vn_lv_kv\": 20.0,\n \"sn_mva\": 63.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.32,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 18.0\n },\n \"100 MVA 220/110 kV\": {\n \"shift_degree\": 0,\n \"vector_group\": \"Yy0\",\n \"vn_hv_kv\": 220.0,\n \"pfe_kw\": 55.0,\n \"i0_percent\": 0.06,\n \"vn_lv_kv\": 110.0,\n \"sn_mva\": 100.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.26,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 12.0\n },\n \"25 MVA 110/10 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 14.0,\n \"i0_percent\": 0.07,\n \"vn_lv_kv\": 10.0,\n \"sn_mva\": 25.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.41,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 12.0\n },\n \"40 MVA 110/20 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 18.0,\n \"i0_percent\": 0.05,\n \"vn_lv_kv\": 20.0,\n \"sn_mva\": 40.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.34,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 16.2\n },\n \"40 MVA 110/10 kV v1.4.3 and older\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 30.45,\n \"i0_percent\": 0.076,\n \"vn_lv_kv\": 10.0,\n \"sn_mva\": 40.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.295,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 10.04\n },\n \"25 MVA 110/20 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 14.0,\n \"i0_percent\": 0.07,\n \"vn_lv_kv\": 20.0,\n \"sn_mva\": 25.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.41,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 12.0\n },\n \"40 MVA 110/10 kV\": {\n \"shift_degree\": 150,\n \"vector_group\": \"YNd5\",\n \"vn_hv_kv\": 110.0,\n \"pfe_kw\": 18.0,\n \"i0_percent\": 0.05,\n \"vn_lv_kv\": 10.0,\n \"sn_mva\": 40.0,\n \"tap_step_degree\": 0,\n \"tap_neutral\": 0,\n \"tap_min\": -9,\n \"vkr_percent\": 0.34,\n \"tap_step_percent\": 1.5,\n \"tap_side\": \"hv\",\n \"tap_phase_shifter\": false,\n \"tap_max\": 9,\n \"vk_percent\": 16.2\n }\n },\n \"trafo3w\": {\n \"63/25/38 MVA 110/10/10 kV\": {\n \"vector_group\": \"YN0yn0yn0\",\n \"vn_mv_kv\": 10,\n \"vn_lv_kv\": 10,\n \"shift_lv_degree\": 0,\n \"shift_mv_degree\": 0,\n \"pfe_kw\": 35,\n \"vn_hv_kv\": 110,\n \"i0_percent\": 0.89,\n \"sn_lv_mva\": 38.0,\n \"sn_hv_mva\": 63.0,\n \"sn_mv_mva\": 25.0,\n \"vkr_lv_percent\": 0.35,\n \"tap_neutral\": 0,\n \"tap_min\": -10,\n \"vk_mv_percent\": 10.4,\n \"vkr_hv_percent\": 0.28,\n \"vk_lv_percent\": 10.4,\n \"tap_max\": 10,\n \"vkr_mv_percent\": 0.32,\n \"tap_step_percent\": 1.2,\n \"tap_side\": \"hv\",\n \"vk_hv_percent\": 10.4\n },\n \"63/25/38 MVA 110/20/10 kV\": {\n \"vector_group\": \"YN0yn0yn0\",\n \"vn_mv_kv\": 20,\n \"vn_lv_kv\": 10,\n \"shift_lv_degree\": 0,\n \"shift_mv_degree\": 0,\n \"pfe_kw\": 35,\n \"vn_hv_kv\": 110,\n \"i0_percent\": 0.89,\n \"sn_lv_mva\": 38.0,\n \"sn_hv_mva\": 63.0,\n \"sn_mv_mva\": 25.0,\n \"vkr_lv_percent\": 0.35,\n \"tap_neutral\": 0,\n \"tap_min\": -10,\n \"vk_mv_percent\": 10.4,\n \"vkr_hv_percent\": 0.28,\n \"vk_lv_percent\": 10.4,\n \"tap_max\": 10,\n \"vkr_mv_percent\": 0.32,\n \"tap_step_percent\": 1.2,\n \"tap_side\": \"hv\",\n \"vk_hv_percent\": 10.4\n }\n }\n},\"storage\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"bus\\\",\\\"p_mw\\\",\\\"q_mvar\\\",\\\"sn_mva\\\",\\\"soc_percent\\\",\\\"min_e_mwh\\\",\\\"max_e_mwh\\\",\\\"scaling\\\",\\\"in_service\\\",\\\"type\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"bus\": \"int64\",\n \"p_mw\": \"float64\",\n \"q_mvar\": \"float64\",\n \"sn_mva\": \"float64\",\n \"soc_percent\": \"float64\",\n \"min_e_mwh\": \"float64\",\n \"max_e_mwh\": \"float64\",\n \"scaling\": \"float64\",\n \"in_service\": \"bool\",\n \"type\": \"object\"\n },\n \"orient\": \"split\"\n},\"switch\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"bus\\\",\\\"element\\\",\\\"et\\\",\\\"type\\\",\\\"closed\\\",\\\"name\\\",\\\"z_ohm\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"bus\": \"int64\",\n \"element\": \"int64\",\n \"et\": \"object\",\n \"type\": \"object\",\n \"closed\": \"bool\",\n \"name\": \"object\",\n \"z_ohm\": \"float64\"\n },\n \"orient\": \"split\"\n},\"trafo\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"df\\\",\\\"hv_bus\\\",\\\"i0_percent\\\",\\\"in_service\\\",\\\"lv_bus\\\",\\\"max_loading_percent\\\",\\\"name\\\",\\\"parallel\\\",\\\"pfe_kw\\\",\\\"shift_degree\\\",\\\"sn_mva\\\",\\\"std_type\\\",\\\"tap_max\\\",\\\"tap_neutral\\\",\\\"tap_min\\\",\\\"tap_phase_shifter\\\",\\\"tap_pos\\\",\\\"tap_side\\\",\\\"tap_step_degree\\\",\\\"tap_step_percent\\\",\\\"vn_hv_kv\\\",\\\"vn_lv_kv\\\",\\\"vk_percent\\\",\\\"vkr_percent\\\"],\\\"index\\\":[0,1,2,3,4],\\\"data\\\":[[1.0,3,0.0,true,6,100.0,null,1,0.0,0.0,9900.0,null,null,0.0,null,false,-1.0,\\\"hv\\\",null,2.2,135.0,14.0,2070.288000000000011,0.0],[1.0,3,0.0,true,8,100.0,null,1,0.0,0.0,9900.0,null,null,0.0,null,false,-1.0,\\\"hv\\\",null,3.1,135.0,0.208,5506.181999999999789,0.0],[1.0,4,0.0,true,5,100.0,null,1,0.0,0.0,9900.0,null,null,0.0,null,false,-1.0,\\\"hv\\\",null,6.8,135.0,0.208,2494.998000000000047,0.0],[1.0,6,0.0,true,7,100.0,null,1,0.0,0.0,9900.0,null,null,null,null,false,null,null,null,null,14.0,12.0,1743.884999999999991,0.0],[1.0,6,0.0,true,8,100.0,null,1,0.0,0.0,9900.0,null,null,null,null,false,null,null,null,null,14.0,0.208,1089.098999999999933,0.0]]}\",\n \"dtype\": {\n \"df\": \"float64\",\n \"hv_bus\": \"uint32\",\n \"i0_percent\": \"float64\",\n \"in_service\": \"bool\",\n \"lv_bus\": \"uint32\",\n \"max_loading_percent\": \"float64\",\n \"name\": \"object\",\n \"parallel\": \"uint32\",\n \"pfe_kw\": \"float64\",\n \"shift_degree\": \"float64\",\n \"sn_mva\": \"float64\",\n \"std_type\": \"object\",\n \"tap_max\": \"float64\",\n \"tap_neutral\": \"float64\",\n \"tap_min\": \"float64\",\n \"tap_phase_shifter\": \"bool\",\n \"tap_pos\": \"float64\",\n \"tap_side\": \"object\",\n \"tap_step_degree\": \"float64\",\n \"tap_step_percent\": \"float64\",\n \"vn_hv_kv\": \"float64\",\n \"vn_lv_kv\": \"float64\",\n \"vk_percent\": \"float64\",\n \"vkr_percent\": \"float64\"\n },\n \"orient\": \"split\"\n},\"trafo3w\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"std_type\\\",\\\"hv_bus\\\",\\\"mv_bus\\\",\\\"lv_bus\\\",\\\"sn_hv_mva\\\",\\\"sn_mv_mva\\\",\\\"sn_lv_mva\\\",\\\"vn_hv_kv\\\",\\\"vn_mv_kv\\\",\\\"vn_lv_kv\\\",\\\"vk_hv_percent\\\",\\\"vk_mv_percent\\\",\\\"vk_lv_percent\\\",\\\"vkr_hv_percent\\\",\\\"vkr_mv_percent\\\",\\\"vkr_lv_percent\\\",\\\"pfe_kw\\\",\\\"i0_percent\\\",\\\"shift_mv_degree\\\",\\\"shift_lv_degree\\\",\\\"tap_side\\\",\\\"tap_neutral\\\",\\\"tap_min\\\",\\\"tap_max\\\",\\\"tap_step_percent\\\",\\\"tap_step_degree\\\",\\\"tap_pos\\\",\\\"tap_at_star_point\\\",\\\"in_service\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"std_type\": \"object\",\n \"hv_bus\": \"uint32\",\n \"mv_bus\": \"uint32\",\n \"lv_bus\": \"uint32\",\n \"sn_hv_mva\": \"float64\",\n \"sn_mv_mva\": \"float64\",\n \"sn_lv_mva\": \"float64\",\n \"vn_hv_kv\": \"float64\",\n \"vn_mv_kv\": \"float64\",\n \"vn_lv_kv\": \"float64\",\n \"vk_hv_percent\": \"float64\",\n \"vk_mv_percent\": \"float64\",\n \"vk_lv_percent\": \"float64\",\n \"vkr_hv_percent\": \"float64\",\n \"vkr_mv_percent\": \"float64\",\n \"vkr_lv_percent\": \"float64\",\n \"pfe_kw\": \"float64\",\n \"i0_percent\": \"float64\",\n \"shift_mv_degree\": \"float64\",\n \"shift_lv_degree\": \"float64\",\n \"tap_side\": \"object\",\n \"tap_neutral\": \"int32\",\n \"tap_min\": \"int32\",\n \"tap_max\": \"int32\",\n \"tap_step_percent\": \"float64\",\n \"tap_step_degree\": \"float64\",\n \"tap_pos\": \"int32\",\n \"tap_at_star_point\": \"bool\",\n \"in_service\": \"bool\"\n },\n \"orient\": \"split\"\n},\"user_pf_options\":{},\"version\":\"2.0.1\",\"ward\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"bus\\\",\\\"ps_mw\\\",\\\"qs_mvar\\\",\\\"qz_mvar\\\",\\\"pz_mw\\\",\\\"in_service\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"bus\": \"uint32\",\n \"ps_mw\": \"float64\",\n \"qs_mvar\": \"float64\",\n \"qz_mvar\": \"float64\",\n \"pz_mw\": \"float64\",\n \"in_service\": \"bool\"\n },\n \"orient\": \"split\"\n},\"xward\":{\n \"_module\": \"pandas.core.frame\",\n \"_class\": \"DataFrame\",\n \"_object\": \"{\\\"columns\\\":[\\\"name\\\",\\\"bus\\\",\\\"ps_mw\\\",\\\"qs_mvar\\\",\\\"qz_mvar\\\",\\\"pz_mw\\\",\\\"r_ohm\\\",\\\"x_ohm\\\",\\\"vm_pu\\\",\\\"in_service\\\"],\\\"index\\\":[],\\\"data\\\":[]}\",\n \"dtype\": {\n \"name\": \"object\",\n \"bus\": \"uint32\",\n \"ps_mw\": \"float64\",\n \"qs_mvar\": \"float64\",\n \"qz_mvar\": \"float64\",\n \"pz_mw\": \"float64\",\n \"r_ohm\": \"float64\",\n \"x_ohm\": \"float64\",\n \"vm_pu\": \"float64\",\n \"in_service\": \"bool\"\n },\n \"orient\": \"split\"\n}}\n" +} diff --git a/grid2op/data_test/multimix/case14_002/prods_charac.csv b/grid2op/data_test/multimix/case14_002/prods_charac.csv index 21c8943e5..f27dff8db 100644 --- a/grid2op/data_test/multimix/case14_002/prods_charac.csv +++ b/grid2op/data_test/multimix/case14_002/prods_charac.csv @@ -1,6 +1,6 @@ Pmax,Pmin,name,type,bus,max_ramp_up,max_ramp_down,min_up_time,min_down_time,marginal_cost,shut_down_cost,start_cost,x,y,V 150,0.0,gen_1_0,nuclear,1,5,5,96,96,40,10,20,180,10,142.1 200,0.0,gen_2_1,thermal,2,10,10,4,4,70,1,2,646,10,142.1 -70,0.0,gen_5_2,wind,5,0,0,0,0,0,0,0,216,334,22.0 -50,0.0,gen_7_3,solar,7,0,0,0,0,0,0,0,718,280,13.2 +70,0.0,gen_5_2,wind,5,0,0,0,0,0,0,0,216,334,0.208 +50,0.0,gen_7_3,solar,7,0,0,0,0,0,0,0,718,280,12.0 300,0.0,gen_0_4,thermal,0,10,10,4,4,70,1,2,0,199,142.1 \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/00/_parameters.json b/grid2op/data_test/runner_data/res_agent_1.10.0/00/_parameters.json new file mode 100644 index 000000000..ce75edee3 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/00/_parameters.json @@ -0,0 +1,23 @@ +{ + "ACTIVATE_STORAGE_LOSS": true, + "ALARM_BEST_TIME": 12, + "ALARM_WINDOW_SIZE": 12, + "ALERT_TIME_WINDOW": 12, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "HARD_OVERFLOW_THRESHOLD": 2.0, + "IGNORE_MIN_UP_DOWN_TIME": true, + "INIT_STORAGE_CAPACITY": 0.5, + "LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION": false, + "MAX_LINE_STATUS_CHANGED": 1, + "MAX_SIMULATE_PER_EPISODE": -1, + "MAX_SIMULATE_PER_STEP": -1, + "MAX_SUB_CHANGED": 1, + "NB_TIMESTEP_COOLDOWN_LINE": 0, + "NB_TIMESTEP_COOLDOWN_SUB": 0, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 2, + "NB_TIMESTEP_RECONNECTION": 10, + "NO_OVERFLOW_DISCONNECTION": false, + "SOFT_OVERFLOW_THRESHOLD": 1.0 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/00/actions.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/00/actions.npz new file mode 100644 index 000000000..eb9d22c04 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/00/actions.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/00/agent_exec_times.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/00/agent_exec_times.npz new file mode 100644 index 000000000..1a7422035 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/00/agent_exec_times.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/00/disc_lines_cascading_failure.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/00/disc_lines_cascading_failure.npz new file mode 100644 index 000000000..298421483 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/00/disc_lines_cascading_failure.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/00/env_modifications.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/00/env_modifications.npz new file mode 100644 index 000000000..263cde47f Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/00/env_modifications.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/00/episode_meta.json b/grid2op/data_test/runner_data/res_agent_1.10.0/00/episode_meta.json new file mode 100644 index 000000000..0e4133a8c --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/00/episode_meta.json @@ -0,0 +1,11 @@ +{ + "agent_seed": null, + "backend_type": "PandaPowerBackend_rte_case5_example", + "chronics_max_timestep": "100", + "chronics_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/chronics/00", + "cumulative_reward": 51.58775329589844, + "env_seed": null, + "env_type": "Environment_rte_case5_example", + "grid_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/grid.json", + "nb_timestep_played": 11 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/00/episode_times.json b/grid2op/data_test/runner_data/res_agent_1.10.0/00/episode_times.json new file mode 100644 index 000000000..58640d881 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/00/episode_times.json @@ -0,0 +1,12 @@ +{ + "Agent": { + "total": 0.0003374060324858874 + }, + "Env": { + "apply_act": 0.046520611009327695, + "observation_computation": 0.013889413006836548, + "powerflow_computation": 0.28536968800472096, + "total": 0.3457797120208852 + }, + "total": 0.3526929799991194 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/00/grid2op.info b/grid2op/data_test/runner_data/res_agent_1.10.0/00/grid2op.info new file mode 100644 index 000000000..83661e42c --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/00/grid2op.info @@ -0,0 +1,3 @@ +{ + "version": "1.10.0" +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/00/observations.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/00/observations.npz new file mode 100644 index 000000000..857cda59b Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/00/observations.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/00/opponent_attack.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/00/opponent_attack.npz new file mode 100644 index 000000000..e05f26912 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/00/opponent_attack.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/00/other_rewards.json b/grid2op/data_test/runner_data/res_agent_1.10.0/00/other_rewards.json new file mode 100644 index 000000000..43a486f79 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/00/other_rewards.json @@ -0,0 +1,13 @@ +[ + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {} +] \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/00/rewards.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/00/rewards.npz new file mode 100644 index 000000000..c2bca13e4 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/00/rewards.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/01/_parameters.json b/grid2op/data_test/runner_data/res_agent_1.10.0/01/_parameters.json new file mode 100644 index 000000000..ce75edee3 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/01/_parameters.json @@ -0,0 +1,23 @@ +{ + "ACTIVATE_STORAGE_LOSS": true, + "ALARM_BEST_TIME": 12, + "ALARM_WINDOW_SIZE": 12, + "ALERT_TIME_WINDOW": 12, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "HARD_OVERFLOW_THRESHOLD": 2.0, + "IGNORE_MIN_UP_DOWN_TIME": true, + "INIT_STORAGE_CAPACITY": 0.5, + "LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION": false, + "MAX_LINE_STATUS_CHANGED": 1, + "MAX_SIMULATE_PER_EPISODE": -1, + "MAX_SIMULATE_PER_STEP": -1, + "MAX_SUB_CHANGED": 1, + "NB_TIMESTEP_COOLDOWN_LINE": 0, + "NB_TIMESTEP_COOLDOWN_SUB": 0, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 2, + "NB_TIMESTEP_RECONNECTION": 10, + "NO_OVERFLOW_DISCONNECTION": false, + "SOFT_OVERFLOW_THRESHOLD": 1.0 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/01/actions.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/01/actions.npz new file mode 100644 index 000000000..da81db1be Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/01/actions.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/01/agent_exec_times.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/01/agent_exec_times.npz new file mode 100644 index 000000000..e88f10a0c Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/01/agent_exec_times.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/01/disc_lines_cascading_failure.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/01/disc_lines_cascading_failure.npz new file mode 100644 index 000000000..be3a8e021 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/01/disc_lines_cascading_failure.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/01/env_modifications.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/01/env_modifications.npz new file mode 100644 index 000000000..932771c86 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/01/env_modifications.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/01/episode_meta.json b/grid2op/data_test/runner_data/res_agent_1.10.0/01/episode_meta.json new file mode 100644 index 000000000..bbea98371 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/01/episode_meta.json @@ -0,0 +1,11 @@ +{ + "agent_seed": null, + "backend_type": "PandaPowerBackend_rte_case5_example", + "chronics_max_timestep": "100", + "chronics_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/chronics/01", + "cumulative_reward": 15.008170127868652, + "env_seed": null, + "env_type": "Environment_rte_case5_example", + "grid_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/grid.json", + "nb_timestep_played": 3 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/01/episode_times.json b/grid2op/data_test/runner_data/res_agent_1.10.0/01/episode_times.json new file mode 100644 index 000000000..ee458534b --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/01/episode_times.json @@ -0,0 +1,12 @@ +{ + "Agent": { + "total": 9.590800618752837e-05 + }, + "Env": { + "apply_act": 0.012887014978332445, + "observation_computation": 0.0026607280306052417, + "powerflow_computation": 0.0951700690202415, + "total": 0.11071781202917919 + }, + "total": 0.11303038001642562 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/01/grid2op.info b/grid2op/data_test/runner_data/res_agent_1.10.0/01/grid2op.info new file mode 100644 index 000000000..83661e42c --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/01/grid2op.info @@ -0,0 +1,3 @@ +{ + "version": "1.10.0" +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/01/observations.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/01/observations.npz new file mode 100644 index 000000000..c1a569bb2 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/01/observations.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/01/opponent_attack.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/01/opponent_attack.npz new file mode 100644 index 000000000..e05f26912 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/01/opponent_attack.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/01/other_rewards.json b/grid2op/data_test/runner_data/res_agent_1.10.0/01/other_rewards.json new file mode 100644 index 000000000..ca609c0c6 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/01/other_rewards.json @@ -0,0 +1,5 @@ +[ + {}, + {}, + {} +] \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/01/rewards.npz b/grid2op/data_test/runner_data/res_agent_1.10.0/01/rewards.npz new file mode 100644 index 000000000..aa51b2760 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.0/01/rewards.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/dict_action_space.json b/grid2op/data_test/runner_data/res_agent_1.10.0/dict_action_space.json new file mode 100644 index 000000000..22d803beb --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/dict_action_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_ENV": null, + "_init_subtype": "grid2op.Action.topologyAction.TopologyAction", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.0", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/dict_attack_space.json b/grid2op/data_test/runner_data/res_agent_1.10.0/dict_attack_space.json new file mode 100644 index 000000000..92a2bce99 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/dict_attack_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_ENV": null, + "_init_subtype": "grid2op.Action.dontAct.DontAct", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.0", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/dict_env_modification_space.json b/grid2op/data_test/runner_data/res_agent_1.10.0/dict_env_modification_space.json new file mode 100644 index 000000000..859a0efd0 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/dict_env_modification_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_ENV": null, + "_init_subtype": "grid2op.Action.completeAction.CompleteAction", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.0", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.0/dict_observation_space.json b/grid2op/data_test/runner_data/res_agent_1.10.0/dict_observation_space.json new file mode 100644 index 000000000..c334d19e0 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.0/dict_observation_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_ENV": null, + "_init_subtype": "grid2op.Observation.completeObservation.CompleteObservation", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.0", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/00/_parameters.json b/grid2op/data_test/runner_data/res_agent_1.10.1/00/_parameters.json new file mode 100644 index 000000000..ce75edee3 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/00/_parameters.json @@ -0,0 +1,23 @@ +{ + "ACTIVATE_STORAGE_LOSS": true, + "ALARM_BEST_TIME": 12, + "ALARM_WINDOW_SIZE": 12, + "ALERT_TIME_WINDOW": 12, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "HARD_OVERFLOW_THRESHOLD": 2.0, + "IGNORE_MIN_UP_DOWN_TIME": true, + "INIT_STORAGE_CAPACITY": 0.5, + "LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION": false, + "MAX_LINE_STATUS_CHANGED": 1, + "MAX_SIMULATE_PER_EPISODE": -1, + "MAX_SIMULATE_PER_STEP": -1, + "MAX_SUB_CHANGED": 1, + "NB_TIMESTEP_COOLDOWN_LINE": 0, + "NB_TIMESTEP_COOLDOWN_SUB": 0, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 2, + "NB_TIMESTEP_RECONNECTION": 10, + "NO_OVERFLOW_DISCONNECTION": false, + "SOFT_OVERFLOW_THRESHOLD": 1.0 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/00/actions.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/00/actions.npz new file mode 100644 index 000000000..59252e167 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/00/actions.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/00/agent_exec_times.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/00/agent_exec_times.npz new file mode 100644 index 000000000..5e4ae71ba Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/00/agent_exec_times.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/00/disc_lines_cascading_failure.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/00/disc_lines_cascading_failure.npz new file mode 100644 index 000000000..298421483 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/00/disc_lines_cascading_failure.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/00/env_modifications.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/00/env_modifications.npz new file mode 100644 index 000000000..e685d248b Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/00/env_modifications.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/00/episode_meta.json b/grid2op/data_test/runner_data/res_agent_1.10.1/00/episode_meta.json new file mode 100644 index 000000000..52ca85dcc --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/00/episode_meta.json @@ -0,0 +1,11 @@ +{ + "agent_seed": null, + "backend_type": "PandaPowerBackend_rte_case5_example", + "chronics_max_timestep": "100", + "chronics_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/chronics/00", + "cumulative_reward": 18.121200561523438, + "env_seed": null, + "env_type": "Environment_rte_case5_example", + "grid_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/grid.json", + "nb_timestep_played": 5 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/00/episode_times.json b/grid2op/data_test/runner_data/res_agent_1.10.1/00/episode_times.json new file mode 100644 index 000000000..b3a3b1320 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/00/episode_times.json @@ -0,0 +1,12 @@ +{ + "Agent": { + "total": 0.00013155499982531182 + }, + "Env": { + "apply_act": 0.01763039100296737, + "observation_computation": 0.005023140000048443, + "powerflow_computation": 0.22569150700110185, + "total": 0.24834503800411767 + }, + "total": 0.25136325499988743 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/00/grid2op.info b/grid2op/data_test/runner_data/res_agent_1.10.1/00/grid2op.info new file mode 100644 index 000000000..bad939ea2 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/00/grid2op.info @@ -0,0 +1,3 @@ +{ + "version": "1.10.1" +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/00/observations.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/00/observations.npz new file mode 100644 index 000000000..ca66eefd0 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/00/observations.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/00/opponent_attack.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/00/opponent_attack.npz new file mode 100644 index 000000000..e05f26912 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/00/opponent_attack.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/00/other_rewards.json b/grid2op/data_test/runner_data/res_agent_1.10.1/00/other_rewards.json new file mode 100644 index 000000000..3f83ec75a --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/00/other_rewards.json @@ -0,0 +1,7 @@ +[ + {}, + {}, + {}, + {}, + {} +] \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/00/rewards.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/00/rewards.npz new file mode 100644 index 000000000..4bbf03b41 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/00/rewards.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/01/_parameters.json b/grid2op/data_test/runner_data/res_agent_1.10.1/01/_parameters.json new file mode 100644 index 000000000..ce75edee3 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/01/_parameters.json @@ -0,0 +1,23 @@ +{ + "ACTIVATE_STORAGE_LOSS": true, + "ALARM_BEST_TIME": 12, + "ALARM_WINDOW_SIZE": 12, + "ALERT_TIME_WINDOW": 12, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "HARD_OVERFLOW_THRESHOLD": 2.0, + "IGNORE_MIN_UP_DOWN_TIME": true, + "INIT_STORAGE_CAPACITY": 0.5, + "LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION": false, + "MAX_LINE_STATUS_CHANGED": 1, + "MAX_SIMULATE_PER_EPISODE": -1, + "MAX_SIMULATE_PER_STEP": -1, + "MAX_SUB_CHANGED": 1, + "NB_TIMESTEP_COOLDOWN_LINE": 0, + "NB_TIMESTEP_COOLDOWN_SUB": 0, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 2, + "NB_TIMESTEP_RECONNECTION": 10, + "NO_OVERFLOW_DISCONNECTION": false, + "SOFT_OVERFLOW_THRESHOLD": 1.0 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/01/actions.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/01/actions.npz new file mode 100644 index 000000000..4b299530a Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/01/actions.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/01/agent_exec_times.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/01/agent_exec_times.npz new file mode 100644 index 000000000..6a893674b Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/01/agent_exec_times.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/01/disc_lines_cascading_failure.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/01/disc_lines_cascading_failure.npz new file mode 100644 index 000000000..45bff0200 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/01/disc_lines_cascading_failure.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/01/env_modifications.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/01/env_modifications.npz new file mode 100644 index 000000000..08e6436df Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/01/env_modifications.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/01/episode_meta.json b/grid2op/data_test/runner_data/res_agent_1.10.1/01/episode_meta.json new file mode 100644 index 000000000..6540a50ba --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/01/episode_meta.json @@ -0,0 +1,11 @@ +{ + "agent_seed": null, + "backend_type": "PandaPowerBackend_rte_case5_example", + "chronics_max_timestep": "100", + "chronics_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/chronics/01", + "cumulative_reward": 32.010162353515625, + "env_seed": null, + "env_type": "Environment_rte_case5_example", + "grid_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/grid.json", + "nb_timestep_played": 6 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/01/episode_times.json b/grid2op/data_test/runner_data/res_agent_1.10.1/01/episode_times.json new file mode 100644 index 000000000..4c475ffb2 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/01/episode_times.json @@ -0,0 +1,12 @@ +{ + "Agent": { + "total": 0.00017310699695372023 + }, + "Env": { + "apply_act": 0.02002973099479277, + "observation_computation": 0.006288309999945341, + "powerflow_computation": 0.3264216909992683, + "total": 0.35273973199400643 + }, + "total": 0.35641096600011224 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/01/grid2op.info b/grid2op/data_test/runner_data/res_agent_1.10.1/01/grid2op.info new file mode 100644 index 000000000..bad939ea2 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/01/grid2op.info @@ -0,0 +1,3 @@ +{ + "version": "1.10.1" +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/01/observations.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/01/observations.npz new file mode 100644 index 000000000..4fa22b71b Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/01/observations.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/01/opponent_attack.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/01/opponent_attack.npz new file mode 100644 index 000000000..e05f26912 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/01/opponent_attack.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/01/other_rewards.json b/grid2op/data_test/runner_data/res_agent_1.10.1/01/other_rewards.json new file mode 100644 index 000000000..6c9b492a2 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/01/other_rewards.json @@ -0,0 +1,8 @@ +[ + {}, + {}, + {}, + {}, + {}, + {} +] \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/01/rewards.npz b/grid2op/data_test/runner_data/res_agent_1.10.1/01/rewards.npz new file mode 100644 index 000000000..5d2f725ed Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.1/01/rewards.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/dict_action_space.json b/grid2op/data_test/runner_data/res_agent_1.10.1/dict_action_space.json new file mode 100644 index 000000000..5a03afed0 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/dict_action_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_ENV": null, + "_init_subtype": "grid2op.Action.topologyAction.TopologyAction", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.1", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/dict_attack_space.json b/grid2op/data_test/runner_data/res_agent_1.10.1/dict_attack_space.json new file mode 100644 index 000000000..366bc97c2 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/dict_attack_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_ENV": null, + "_init_subtype": "grid2op.Action.dontAct.DontAct", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.1", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/dict_env_modification_space.json b/grid2op/data_test/runner_data/res_agent_1.10.1/dict_env_modification_space.json new file mode 100644 index 000000000..f31de3038 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/dict_env_modification_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_ENV": null, + "_init_subtype": "grid2op.Action.completeAction.CompleteAction", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.1", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.1/dict_observation_space.json b/grid2op/data_test/runner_data/res_agent_1.10.1/dict_observation_space.json new file mode 100644 index 000000000..47fcd8e23 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.1/dict_observation_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_ENV": null, + "_init_subtype": "grid2op.Observation.completeObservation.CompleteObservation", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.1", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/00/_parameters.json b/grid2op/data_test/runner_data/res_agent_1.10.2/00/_parameters.json new file mode 100644 index 000000000..46aaa9417 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/00/_parameters.json @@ -0,0 +1,24 @@ +{ + "ACTIVATE_STORAGE_LOSS": true, + "ALARM_BEST_TIME": 12, + "ALARM_WINDOW_SIZE": 12, + "ALERT_TIME_WINDOW": 12, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "HARD_OVERFLOW_THRESHOLD": 2.0, + "IGNORE_INITIAL_STATE_TIME_SERIE": 0, + "IGNORE_MIN_UP_DOWN_TIME": true, + "INIT_STORAGE_CAPACITY": 0.5, + "LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION": false, + "MAX_LINE_STATUS_CHANGED": 1, + "MAX_SIMULATE_PER_EPISODE": -1, + "MAX_SIMULATE_PER_STEP": -1, + "MAX_SUB_CHANGED": 1, + "NB_TIMESTEP_COOLDOWN_LINE": 0, + "NB_TIMESTEP_COOLDOWN_SUB": 0, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 2, + "NB_TIMESTEP_RECONNECTION": 10, + "NO_OVERFLOW_DISCONNECTION": false, + "SOFT_OVERFLOW_THRESHOLD": 1.0 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/00/actions.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/00/actions.npz new file mode 100644 index 000000000..85b8288d4 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/00/actions.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/00/agent_exec_times.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/00/agent_exec_times.npz new file mode 100644 index 000000000..6b8ad1c69 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/00/agent_exec_times.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/00/disc_lines_cascading_failure.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/00/disc_lines_cascading_failure.npz new file mode 100644 index 000000000..298421483 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/00/disc_lines_cascading_failure.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/00/env_modifications.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/00/env_modifications.npz new file mode 100644 index 000000000..eb44eab67 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/00/env_modifications.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/00/episode_meta.json b/grid2op/data_test/runner_data/res_agent_1.10.2/00/episode_meta.json new file mode 100644 index 000000000..9e7ea30b0 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/00/episode_meta.json @@ -0,0 +1,11 @@ +{ + "agent_seed": null, + "backend_type": "PandaPowerBackend_rte_case5_example", + "chronics_max_timestep": "100", + "chronics_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/chronics/00", + "cumulative_reward": 22.041643142700195, + "env_seed": null, + "env_type": "Environment_rte_case5_example", + "grid_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/grid.json", + "nb_timestep_played": 6 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/00/episode_times.json b/grid2op/data_test/runner_data/res_agent_1.10.2/00/episode_times.json new file mode 100644 index 000000000..2c0755828 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/00/episode_times.json @@ -0,0 +1,12 @@ +{ + "Agent": { + "total": 0.00021546400239458308 + }, + "Env": { + "apply_act": 0.029873117000533966, + "observation_computation": 0.009064515999853029, + "powerflow_computation": 0.22099059000356647, + "total": 0.25992822300395346 + }, + "total": 0.26562659699993674 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/00/grid2op.info b/grid2op/data_test/runner_data/res_agent_1.10.2/00/grid2op.info new file mode 100644 index 000000000..2cfc68e96 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/00/grid2op.info @@ -0,0 +1,3 @@ +{ + "version": "1.10.2" +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/00/observations.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/00/observations.npz new file mode 100644 index 000000000..63d48ac1c Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/00/observations.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/00/opponent_attack.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/00/opponent_attack.npz new file mode 100644 index 000000000..e05f26912 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/00/opponent_attack.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/00/other_rewards.json b/grid2op/data_test/runner_data/res_agent_1.10.2/00/other_rewards.json new file mode 100644 index 000000000..6c9b492a2 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/00/other_rewards.json @@ -0,0 +1,8 @@ +[ + {}, + {}, + {}, + {}, + {}, + {} +] \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/00/rewards.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/00/rewards.npz new file mode 100644 index 000000000..32b50b06d Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/00/rewards.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/01/_parameters.json b/grid2op/data_test/runner_data/res_agent_1.10.2/01/_parameters.json new file mode 100644 index 000000000..46aaa9417 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/01/_parameters.json @@ -0,0 +1,24 @@ +{ + "ACTIVATE_STORAGE_LOSS": true, + "ALARM_BEST_TIME": 12, + "ALARM_WINDOW_SIZE": 12, + "ALERT_TIME_WINDOW": 12, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "HARD_OVERFLOW_THRESHOLD": 2.0, + "IGNORE_INITIAL_STATE_TIME_SERIE": 0, + "IGNORE_MIN_UP_DOWN_TIME": true, + "INIT_STORAGE_CAPACITY": 0.5, + "LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION": false, + "MAX_LINE_STATUS_CHANGED": 1, + "MAX_SIMULATE_PER_EPISODE": -1, + "MAX_SIMULATE_PER_STEP": -1, + "MAX_SUB_CHANGED": 1, + "NB_TIMESTEP_COOLDOWN_LINE": 0, + "NB_TIMESTEP_COOLDOWN_SUB": 0, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 2, + "NB_TIMESTEP_RECONNECTION": 10, + "NO_OVERFLOW_DISCONNECTION": false, + "SOFT_OVERFLOW_THRESHOLD": 1.0 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/01/actions.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/01/actions.npz new file mode 100644 index 000000000..286ce32df Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/01/actions.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/01/agent_exec_times.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/01/agent_exec_times.npz new file mode 100644 index 000000000..eeed009be Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/01/agent_exec_times.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/01/disc_lines_cascading_failure.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/01/disc_lines_cascading_failure.npz new file mode 100644 index 000000000..c50885e76 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/01/disc_lines_cascading_failure.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/01/env_modifications.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/01/env_modifications.npz new file mode 100644 index 000000000..932771c86 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/01/env_modifications.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/01/episode_meta.json b/grid2op/data_test/runner_data/res_agent_1.10.2/01/episode_meta.json new file mode 100644 index 000000000..b8a5e55c6 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/01/episode_meta.json @@ -0,0 +1,11 @@ +{ + "agent_seed": null, + "backend_type": "PandaPowerBackend_rte_case5_example", + "chronics_max_timestep": "100", + "chronics_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/chronics/01", + "cumulative_reward": 11.293323516845703, + "env_seed": null, + "env_type": "Environment_rte_case5_example", + "grid_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/grid.json", + "nb_timestep_played": 3 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/01/episode_times.json b/grid2op/data_test/runner_data/res_agent_1.10.2/01/episode_times.json new file mode 100644 index 000000000..43da57ca1 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/01/episode_times.json @@ -0,0 +1,12 @@ +{ + "Agent": { + "total": 0.00012830499872507062 + }, + "Env": { + "apply_act": 0.015697453998654964, + "observation_computation": 0.00317409500166832, + "powerflow_computation": 0.14402169799905096, + "total": 0.16289324699937424 + }, + "total": 0.16597464500046044 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/01/grid2op.info b/grid2op/data_test/runner_data/res_agent_1.10.2/01/grid2op.info new file mode 100644 index 000000000..2cfc68e96 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/01/grid2op.info @@ -0,0 +1,3 @@ +{ + "version": "1.10.2" +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/01/observations.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/01/observations.npz new file mode 100644 index 000000000..0838268cb Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/01/observations.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/01/opponent_attack.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/01/opponent_attack.npz new file mode 100644 index 000000000..e05f26912 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/01/opponent_attack.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/01/other_rewards.json b/grid2op/data_test/runner_data/res_agent_1.10.2/01/other_rewards.json new file mode 100644 index 000000000..ca609c0c6 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/01/other_rewards.json @@ -0,0 +1,5 @@ +[ + {}, + {}, + {} +] \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/01/rewards.npz b/grid2op/data_test/runner_data/res_agent_1.10.2/01/rewards.npz new file mode 100644 index 000000000..edf4f23cb Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.2/01/rewards.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/dict_action_space.json b/grid2op/data_test/runner_data/res_agent_1.10.2/dict_action_space.json new file mode 100644 index 000000000..af421492e --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/dict_action_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_GRID_CLASSES": null, + "_init_subtype": "grid2op.Action.topologyAction.TopologyAction", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.2", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/dict_attack_space.json b/grid2op/data_test/runner_data/res_agent_1.10.2/dict_attack_space.json new file mode 100644 index 000000000..4087cdfeb --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/dict_attack_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_GRID_CLASSES": null, + "_init_subtype": "grid2op.Action.dontAct.DontAct", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.2", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/dict_env_modification_space.json b/grid2op/data_test/runner_data/res_agent_1.10.2/dict_env_modification_space.json new file mode 100644 index 000000000..6eedf201d --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/dict_env_modification_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_GRID_CLASSES": null, + "_init_subtype": "grid2op.Action.completeAction.CompleteAction", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.2", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.2/dict_observation_space.json b/grid2op/data_test/runner_data/res_agent_1.10.2/dict_observation_space.json new file mode 100644 index 000000000..c1d7dd7d8 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.2/dict_observation_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_GRID_CLASSES": null, + "_init_subtype": "grid2op.Observation.completeObservation.CompleteObservation", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.2", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/00/_parameters.json b/grid2op/data_test/runner_data/res_agent_1.10.3/00/_parameters.json new file mode 100644 index 000000000..46aaa9417 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/00/_parameters.json @@ -0,0 +1,24 @@ +{ + "ACTIVATE_STORAGE_LOSS": true, + "ALARM_BEST_TIME": 12, + "ALARM_WINDOW_SIZE": 12, + "ALERT_TIME_WINDOW": 12, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "HARD_OVERFLOW_THRESHOLD": 2.0, + "IGNORE_INITIAL_STATE_TIME_SERIE": 0, + "IGNORE_MIN_UP_DOWN_TIME": true, + "INIT_STORAGE_CAPACITY": 0.5, + "LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION": false, + "MAX_LINE_STATUS_CHANGED": 1, + "MAX_SIMULATE_PER_EPISODE": -1, + "MAX_SIMULATE_PER_STEP": -1, + "MAX_SUB_CHANGED": 1, + "NB_TIMESTEP_COOLDOWN_LINE": 0, + "NB_TIMESTEP_COOLDOWN_SUB": 0, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 2, + "NB_TIMESTEP_RECONNECTION": 10, + "NO_OVERFLOW_DISCONNECTION": false, + "SOFT_OVERFLOW_THRESHOLD": 1.0 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/00/actions.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/00/actions.npz new file mode 100644 index 000000000..bc9ea2f51 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/00/actions.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/00/agent_exec_times.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/00/agent_exec_times.npz new file mode 100644 index 000000000..e57d87966 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/00/agent_exec_times.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/00/disc_lines_cascading_failure.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/00/disc_lines_cascading_failure.npz new file mode 100644 index 000000000..298421483 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/00/disc_lines_cascading_failure.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/00/env_modifications.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/00/env_modifications.npz new file mode 100644 index 000000000..6e06d3e28 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/00/env_modifications.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/00/episode_meta.json b/grid2op/data_test/runner_data/res_agent_1.10.3/00/episode_meta.json new file mode 100644 index 000000000..ef2d68f7d --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/00/episode_meta.json @@ -0,0 +1,11 @@ +{ + "agent_seed": null, + "backend_type": "PandaPowerBackend_rte_case5_example", + "chronics_max_timestep": "100", + "chronics_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/chronics/00", + "cumulative_reward": 7.773218154907227, + "env_seed": null, + "env_type": "Environment_rte_case5_example", + "grid_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/grid.json", + "nb_timestep_played": 3 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/00/episode_times.json b/grid2op/data_test/runner_data/res_agent_1.10.3/00/episode_times.json new file mode 100644 index 000000000..5d45e02a3 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/00/episode_times.json @@ -0,0 +1,12 @@ +{ + "Agent": { + "total": 7.87390000027699e-05 + }, + "Env": { + "apply_act": 0.01019013399996993, + "observation_computation": 0.002153088000000025, + "powerflow_computation": 0.053345044999957736, + "total": 0.06568826699992769 + }, + "total": 0.06722843900001862 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/00/grid2op.info b/grid2op/data_test/runner_data/res_agent_1.10.3/00/grid2op.info new file mode 100644 index 000000000..0a5b47a30 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/00/grid2op.info @@ -0,0 +1,3 @@ +{ + "version": "1.10.3" +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/00/observations.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/00/observations.npz new file mode 100644 index 000000000..b1d0ef112 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/00/observations.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/00/opponent_attack.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/00/opponent_attack.npz new file mode 100644 index 000000000..e05f26912 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/00/opponent_attack.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/00/other_rewards.json b/grid2op/data_test/runner_data/res_agent_1.10.3/00/other_rewards.json new file mode 100644 index 000000000..ca609c0c6 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/00/other_rewards.json @@ -0,0 +1,5 @@ +[ + {}, + {}, + {} +] \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/00/rewards.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/00/rewards.npz new file mode 100644 index 000000000..b1c0ccf8f Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/00/rewards.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/01/_parameters.json b/grid2op/data_test/runner_data/res_agent_1.10.3/01/_parameters.json new file mode 100644 index 000000000..46aaa9417 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/01/_parameters.json @@ -0,0 +1,24 @@ +{ + "ACTIVATE_STORAGE_LOSS": true, + "ALARM_BEST_TIME": 12, + "ALARM_WINDOW_SIZE": 12, + "ALERT_TIME_WINDOW": 12, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "HARD_OVERFLOW_THRESHOLD": 2.0, + "IGNORE_INITIAL_STATE_TIME_SERIE": 0, + "IGNORE_MIN_UP_DOWN_TIME": true, + "INIT_STORAGE_CAPACITY": 0.5, + "LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION": false, + "MAX_LINE_STATUS_CHANGED": 1, + "MAX_SIMULATE_PER_EPISODE": -1, + "MAX_SIMULATE_PER_STEP": -1, + "MAX_SUB_CHANGED": 1, + "NB_TIMESTEP_COOLDOWN_LINE": 0, + "NB_TIMESTEP_COOLDOWN_SUB": 0, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 2, + "NB_TIMESTEP_RECONNECTION": 10, + "NO_OVERFLOW_DISCONNECTION": false, + "SOFT_OVERFLOW_THRESHOLD": 1.0 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/01/actions.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/01/actions.npz new file mode 100644 index 000000000..628314c15 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/01/actions.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/01/agent_exec_times.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/01/agent_exec_times.npz new file mode 100644 index 000000000..7ac791996 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/01/agent_exec_times.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/01/disc_lines_cascading_failure.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/01/disc_lines_cascading_failure.npz new file mode 100644 index 000000000..028559449 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/01/disc_lines_cascading_failure.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/01/env_modifications.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/01/env_modifications.npz new file mode 100644 index 000000000..33e0daca8 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/01/env_modifications.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/01/episode_meta.json b/grid2op/data_test/runner_data/res_agent_1.10.3/01/episode_meta.json new file mode 100644 index 000000000..518205185 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/01/episode_meta.json @@ -0,0 +1,11 @@ +{ + "agent_seed": null, + "backend_type": "PandaPowerBackend_rte_case5_example", + "chronics_max_timestep": "100", + "chronics_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/chronics/01", + "cumulative_reward": 19.197418212890625, + "env_seed": null, + "env_type": "Environment_rte_case5_example", + "grid_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/grid.json", + "nb_timestep_played": 4 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/01/episode_times.json b/grid2op/data_test/runner_data/res_agent_1.10.3/01/episode_times.json new file mode 100644 index 000000000..f54e7013d --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/01/episode_times.json @@ -0,0 +1,12 @@ +{ + "Agent": { + "total": 0.00010630600002059509 + }, + "Env": { + "apply_act": 0.011138778999907117, + "observation_computation": 0.0032093399999553185, + "powerflow_computation": 0.08027153299991596, + "total": 0.0946196519997784 + }, + "total": 0.0964781280000011 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/01/grid2op.info b/grid2op/data_test/runner_data/res_agent_1.10.3/01/grid2op.info new file mode 100644 index 000000000..0a5b47a30 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/01/grid2op.info @@ -0,0 +1,3 @@ +{ + "version": "1.10.3" +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/01/observations.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/01/observations.npz new file mode 100644 index 000000000..af56b6e9b Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/01/observations.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/01/opponent_attack.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/01/opponent_attack.npz new file mode 100644 index 000000000..e05f26912 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/01/opponent_attack.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/01/other_rewards.json b/grid2op/data_test/runner_data/res_agent_1.10.3/01/other_rewards.json new file mode 100644 index 000000000..99bb44b9b --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/01/other_rewards.json @@ -0,0 +1,6 @@ +[ + {}, + {}, + {}, + {} +] \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/01/rewards.npz b/grid2op/data_test/runner_data/res_agent_1.10.3/01/rewards.npz new file mode 100644 index 000000000..265413287 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.10.3/01/rewards.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/dict_action_space.json b/grid2op/data_test/runner_data/res_agent_1.10.3/dict_action_space.json new file mode 100644 index 000000000..9fa976c0b --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/dict_action_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_GRID_CLASSES": null, + "_init_subtype": "grid2op.Action.topologyAction.TopologyAction", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.3", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/dict_attack_space.json b/grid2op/data_test/runner_data/res_agent_1.10.3/dict_attack_space.json new file mode 100644 index 000000000..31f90cf6e --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/dict_attack_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_GRID_CLASSES": null, + "_init_subtype": "grid2op.Action.dontAct.DontAct", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.3", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/dict_env_modification_space.json b/grid2op/data_test/runner_data/res_agent_1.10.3/dict_env_modification_space.json new file mode 100644 index 000000000..1b27c9a41 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/dict_env_modification_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_GRID_CLASSES": null, + "_init_subtype": "grid2op.Action.completeAction.CompleteAction", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.3", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.10.3/dict_observation_space.json b/grid2op/data_test/runner_data/res_agent_1.10.3/dict_observation_space.json new file mode 100644 index 000000000..e341acfff --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.10.3/dict_observation_space.json @@ -0,0 +1,220 @@ +{ + "_PATH_GRID_CLASSES": null, + "_init_subtype": "grid2op.Observation.completeObservation.CompleteObservation", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.10.3", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "n_busbar_per_sub": "2", + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/00/_parameters.json b/grid2op/data_test/runner_data/res_agent_1.9.8/00/_parameters.json new file mode 100644 index 000000000..ce75edee3 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/00/_parameters.json @@ -0,0 +1,23 @@ +{ + "ACTIVATE_STORAGE_LOSS": true, + "ALARM_BEST_TIME": 12, + "ALARM_WINDOW_SIZE": 12, + "ALERT_TIME_WINDOW": 12, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "HARD_OVERFLOW_THRESHOLD": 2.0, + "IGNORE_MIN_UP_DOWN_TIME": true, + "INIT_STORAGE_CAPACITY": 0.5, + "LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION": false, + "MAX_LINE_STATUS_CHANGED": 1, + "MAX_SIMULATE_PER_EPISODE": -1, + "MAX_SIMULATE_PER_STEP": -1, + "MAX_SUB_CHANGED": 1, + "NB_TIMESTEP_COOLDOWN_LINE": 0, + "NB_TIMESTEP_COOLDOWN_SUB": 0, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 2, + "NB_TIMESTEP_RECONNECTION": 10, + "NO_OVERFLOW_DISCONNECTION": false, + "SOFT_OVERFLOW_THRESHOLD": 1.0 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/00/actions.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/00/actions.npz new file mode 100644 index 000000000..0aba5e957 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/00/actions.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/00/agent_exec_times.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/00/agent_exec_times.npz new file mode 100644 index 000000000..b090cd369 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/00/agent_exec_times.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/00/disc_lines_cascading_failure.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/00/disc_lines_cascading_failure.npz new file mode 100644 index 000000000..7113b4a16 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/00/disc_lines_cascading_failure.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/00/env_modifications.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/00/env_modifications.npz new file mode 100644 index 000000000..eb44eab67 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/00/env_modifications.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/00/episode_meta.json b/grid2op/data_test/runner_data/res_agent_1.9.8/00/episode_meta.json new file mode 100644 index 000000000..ad6412386 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/00/episode_meta.json @@ -0,0 +1,11 @@ +{ + "agent_seed": null, + "backend_type": "PandaPowerBackend_rte_case5_example", + "chronics_max_timestep": "100", + "chronics_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/chronics/00", + "cumulative_reward": 21.58446502685547, + "env_seed": null, + "env_type": "Environment_rte_case5_example", + "grid_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/grid.json", + "nb_timestep_played": 6 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/00/episode_times.json b/grid2op/data_test/runner_data/res_agent_1.9.8/00/episode_times.json new file mode 100644 index 000000000..88d92b134 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/00/episode_times.json @@ -0,0 +1,12 @@ +{ + "Agent": { + "total": 0.0001740990001053433 + }, + "Env": { + "apply_act": 0.026373431998763408, + "observation_computation": 0.005404876999818953, + "powerflow_computation": 0.14027613799953542, + "total": 0.17205444699811778 + }, + "total": 0.17500740600007703 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/00/grid2op.info b/grid2op/data_test/runner_data/res_agent_1.9.8/00/grid2op.info new file mode 100644 index 000000000..8abd352fc --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/00/grid2op.info @@ -0,0 +1,3 @@ +{ + "version": "1.9.8" +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/00/observations.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/00/observations.npz new file mode 100644 index 000000000..019e531f5 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/00/observations.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/00/opponent_attack.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/00/opponent_attack.npz new file mode 100644 index 000000000..e05f26912 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/00/opponent_attack.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/00/other_rewards.json b/grid2op/data_test/runner_data/res_agent_1.9.8/00/other_rewards.json new file mode 100644 index 000000000..6c9b492a2 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/00/other_rewards.json @@ -0,0 +1,8 @@ +[ + {}, + {}, + {}, + {}, + {}, + {} +] \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/00/rewards.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/00/rewards.npz new file mode 100644 index 000000000..f3199c1c7 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/00/rewards.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/01/_parameters.json b/grid2op/data_test/runner_data/res_agent_1.9.8/01/_parameters.json new file mode 100644 index 000000000..ce75edee3 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/01/_parameters.json @@ -0,0 +1,23 @@ +{ + "ACTIVATE_STORAGE_LOSS": true, + "ALARM_BEST_TIME": 12, + "ALARM_WINDOW_SIZE": 12, + "ALERT_TIME_WINDOW": 12, + "ALLOW_DISPATCH_GEN_SWITCH_OFF": true, + "ENV_DC": false, + "FORECAST_DC": false, + "HARD_OVERFLOW_THRESHOLD": 2.0, + "IGNORE_MIN_UP_DOWN_TIME": true, + "INIT_STORAGE_CAPACITY": 0.5, + "LIMIT_INFEASIBLE_CURTAILMENT_STORAGE_ACTION": false, + "MAX_LINE_STATUS_CHANGED": 1, + "MAX_SIMULATE_PER_EPISODE": -1, + "MAX_SIMULATE_PER_STEP": -1, + "MAX_SUB_CHANGED": 1, + "NB_TIMESTEP_COOLDOWN_LINE": 0, + "NB_TIMESTEP_COOLDOWN_SUB": 0, + "NB_TIMESTEP_OVERFLOW_ALLOWED": 2, + "NB_TIMESTEP_RECONNECTION": 10, + "NO_OVERFLOW_DISCONNECTION": false, + "SOFT_OVERFLOW_THRESHOLD": 1.0 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/01/actions.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/01/actions.npz new file mode 100644 index 000000000..a645006a0 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/01/actions.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/01/agent_exec_times.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/01/agent_exec_times.npz new file mode 100644 index 000000000..be08c9245 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/01/agent_exec_times.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/01/disc_lines_cascading_failure.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/01/disc_lines_cascading_failure.npz new file mode 100644 index 000000000..c923cede4 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/01/disc_lines_cascading_failure.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/01/env_modifications.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/01/env_modifications.npz new file mode 100644 index 000000000..e20bcda66 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/01/env_modifications.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/01/episode_meta.json b/grid2op/data_test/runner_data/res_agent_1.9.8/01/episode_meta.json new file mode 100644 index 000000000..e098a76ab --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/01/episode_meta.json @@ -0,0 +1,11 @@ +{ + "agent_seed": null, + "backend_type": "PandaPowerBackend_rte_case5_example", + "chronics_max_timestep": "100", + "chronics_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/chronics/01", + "cumulative_reward": 5.492160797119141, + "env_seed": null, + "env_type": "Environment_rte_case5_example", + "grid_path": "/home/donnotben/Documents/grid2op_dev/grid2op/data/rte_case5_example/grid.json", + "nb_timestep_played": 2 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/01/episode_times.json b/grid2op/data_test/runner_data/res_agent_1.9.8/01/episode_times.json new file mode 100644 index 000000000..7c189ca7b --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/01/episode_times.json @@ -0,0 +1,12 @@ +{ + "Agent": { + "total": 4.793700009031454e-05 + }, + "Env": { + "apply_act": 0.009099756000068737, + "observation_computation": 0.0010339299997212947, + "powerflow_computation": 0.07825034700090328, + "total": 0.08838403300069331 + }, + "total": 0.08947097999953257 +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/01/grid2op.info b/grid2op/data_test/runner_data/res_agent_1.9.8/01/grid2op.info new file mode 100644 index 000000000..8abd352fc --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/01/grid2op.info @@ -0,0 +1,3 @@ +{ + "version": "1.9.8" +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/01/observations.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/01/observations.npz new file mode 100644 index 000000000..352d05a83 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/01/observations.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/01/opponent_attack.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/01/opponent_attack.npz new file mode 100644 index 000000000..e05f26912 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/01/opponent_attack.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/01/other_rewards.json b/grid2op/data_test/runner_data/res_agent_1.9.8/01/other_rewards.json new file mode 100644 index 000000000..a12aa7872 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/01/other_rewards.json @@ -0,0 +1,4 @@ +[ + {}, + {} +] \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/01/rewards.npz b/grid2op/data_test/runner_data/res_agent_1.9.8/01/rewards.npz new file mode 100644 index 000000000..d79af1545 Binary files /dev/null and b/grid2op/data_test/runner_data/res_agent_1.9.8/01/rewards.npz differ diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/dict_action_space.json b/grid2op/data_test/runner_data/res_agent_1.9.8/dict_action_space.json new file mode 100644 index 000000000..4b9f7aa3b --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/dict_action_space.json @@ -0,0 +1,219 @@ +{ + "_PATH_ENV": null, + "_init_subtype": "grid2op.Action.topologyAction.TopologyAction", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.9.8", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/dict_attack_space.json b/grid2op/data_test/runner_data/res_agent_1.9.8/dict_attack_space.json new file mode 100644 index 000000000..ab32ad6f2 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/dict_attack_space.json @@ -0,0 +1,219 @@ +{ + "_PATH_ENV": null, + "_init_subtype": "grid2op.Action.dontAct.DontAct", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.9.8", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/dict_env_modification_space.json b/grid2op/data_test/runner_data/res_agent_1.9.8/dict_env_modification_space.json new file mode 100644 index 000000000..a62b0e0d6 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/dict_env_modification_space.json @@ -0,0 +1,219 @@ +{ + "_PATH_ENV": null, + "_init_subtype": "grid2op.Action.completeAction.CompleteAction", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.9.8", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.9.8/dict_observation_space.json b/grid2op/data_test/runner_data/res_agent_1.9.8/dict_observation_space.json new file mode 100644 index 000000000..7194a0223 --- /dev/null +++ b/grid2op/data_test/runner_data/res_agent_1.9.8/dict_observation_space.json @@ -0,0 +1,219 @@ +{ + "_PATH_ENV": null, + "_init_subtype": "grid2op.Observation.completeObservation.CompleteObservation", + "alarms_area_lines": [], + "alarms_area_names": [], + "alarms_lines_area": {}, + "alertable_line_ids": [], + "alertable_line_names": [], + "assistant_warning_type": null, + "dim_alarms": 0, + "dim_alerts": 0, + "env_name": "rte_case5_example", + "gen_cost_per_MW": [ + 0.0, + 70.0 + ], + "gen_max_ramp_down": [ + 0.0, + 10.0 + ], + "gen_max_ramp_up": [ + 0.0, + 10.0 + ], + "gen_min_downtime": [ + 0, + 4 + ], + "gen_min_uptime": [ + 0, + 4 + ], + "gen_pmax": [ + 10.0, + 30.0 + ], + "gen_pmin": [ + 0.0, + 0.0 + ], + "gen_pos_topo_vect": [ + 4, + 8 + ], + "gen_redispatchable": [ + false, + true + ], + "gen_renewable": [ + true, + false + ], + "gen_shutdown_cost": [ + 0.0, + 1.0 + ], + "gen_startup_cost": [ + 0.0, + 2.0 + ], + "gen_to_sub_pos": [ + 4, + 2 + ], + "gen_to_subid": [ + 0, + 1 + ], + "gen_type": [ + "wind", + "thermal" + ], + "glop_version": "1.9.8", + "grid_layout": { + "sub_0": [ + 0.0, + 0.0 + ], + "sub_1": [ + 0.0, + 400.0 + ], + "sub_2": [ + 200.0, + 400.0 + ], + "sub_3": [ + 400.0, + 400.0 + ], + "sub_4": [ + 400.0, + 0.0 + ] + }, + "line_ex_pos_topo_vect": [ + 6, + 9, + 13, + 18, + 10, + 14, + 15, + 19 + ], + "line_ex_to_sub_pos": [ + 0, + 0, + 0, + 0, + 1, + 1, + 2, + 1 + ], + "line_ex_to_subid": [ + 1, + 2, + 3, + 4, + 2, + 3, + 3, + 4 + ], + "line_or_pos_topo_vect": [ + 0, + 1, + 2, + 3, + 7, + 11, + 12, + 16 + ], + "line_or_to_sub_pos": [ + 0, + 1, + 2, + 3, + 1, + 2, + 3, + 3 + ], + "line_or_to_subid": [ + 0, + 0, + 0, + 0, + 1, + 2, + 2, + 3 + ], + "load_pos_topo_vect": [ + 5, + 17, + 20 + ], + "load_to_sub_pos": [ + 5, + 4, + 2 + ], + "load_to_subid": [ + 0, + 3, + 4 + ], + "name_gen": [ + "gen_0_0", + "gen_1_1" + ], + "name_line": [ + "0_1_0", + "0_2_1", + "0_3_2", + "0_4_3", + "1_2_4", + "2_3_5", + "2_3_6", + "3_4_7" + ], + "name_load": [ + "load_0_0", + "load_3_1", + "load_4_2" + ], + "name_shunt": [], + "name_storage": [], + "name_sub": [ + "sub_0", + "sub_1", + "sub_2", + "sub_3", + "sub_4" + ], + "shunt_to_subid": [], + "storage_Emax": [], + "storage_Emin": [], + "storage_charging_efficiency": [], + "storage_discharging_efficiency": [], + "storage_loss": [], + "storage_marginal_cost": [], + "storage_max_p_absorb": [], + "storage_max_p_prod": [], + "storage_pos_topo_vect": [], + "storage_to_sub_pos": [], + "storage_to_subid": [], + "storage_type": [], + "sub_info": [ + 6, + 3, + 4, + 5, + 3 + ] +} \ No newline at end of file diff --git a/grid2op/data_test/test_detailed_topo/test_topo_connections1.txt b/grid2op/data_test/test_detailed_topo/test_topo_connections1.txt new file mode 100644 index 000000000..512c0acb3 --- /dev/null +++ b/grid2op/data_test/test_detailed_topo/test_topo_connections1.txt @@ -0,0 +1,59 @@ +switch_id node1 node2 open +0 0 10 0 +1 0 32 1 +2 0 34 1 +3 0 16 1 +4 0 4 1 +5 0 18 0 +6 0 39 0 +7 0 36 1 +8 0 26 0 +9 21 0 0 +10 1 12 0 +11 1 6 1 +12 1 14 1 +13 1 30 1 +14 1 8 0 +15 1 28 1 +16 1 24 0 +17 38 1 0 +18 23 1 0 +19 2 20 0 +20 2 10 1 +21 2 32 0 +22 2 34 0 +23 2 16 0 +24 2 4 0 +25 2 18 1 +26 2 41 0 +27 2 36 0 +28 2 26 1 +29 3 12 1 +30 3 6 0 +31 3 14 0 +32 3 30 0 +33 3 8 1 +34 3 22 0 +35 3 28 0 +36 3 24 1 +37 40 3 0 +38 4 5 0 +39 6 7 0 +40 8 9 0 +41 10 11 0 +42 12 13 0 +43 14 15 0 +44 16 17 0 +45 18 19 0 +46 20 21 1 +47 22 23 1 +48 24 25 0 +49 26 27 0 +50 28 29 0 +51 30 31 0 +52 32 33 0 +53 34 35 0 +54 36 37 0 +55 39 38 1 +56 41 40 0 + diff --git a/grid2op/data_test/test_detailed_topo/test_topo_elements1.txt b/grid2op/data_test/test_detailed_topo/test_topo_elements1.txt new file mode 100644 index 000000000..771f9d1a7 --- /dev/null +++ b/grid2op/data_test/test_detailed_topo/test_topo_elements1.txt @@ -0,0 +1,44 @@ +node element_id +0 'bbs' +1 'bbs' +2 'bbs' +3 'bbs' +4 '' +5 'el' +6 '' +7 'el' +8 '' +9 'el' +10 '' +11 'el' +12 '' +13 'el' +14 '' +15 'el' +16 '' +17 'el' +18 '' +19 'el' +20 '' +21 '' +22 '' +23 '' +24 '' +25 'el' +26 '' +27 'el' +28 '' +29 'el' +30 '' +31 'el' +32 '' +33 'el' +34 '' +35 'el' +36 '' +37 'el' +38 '' +39 '' +40 '' +41 '' + diff --git a/grid2op/data_test/test_detailed_topo/test_topo_valid1.txt b/grid2op/data_test/test_detailed_topo/test_topo_valid1.txt new file mode 100644 index 000000000..5d02eefda --- /dev/null +++ b/grid2op/data_test/test_detailed_topo/test_topo_valid1.txt @@ -0,0 +1,44 @@ +topo_id bus_id node +0 0 0 +0 0 39 +0 0 10 +0 0 11 +0 0 18 +0 0 19 +0 0 21 +0 0 26 +0 0 27 +0 1 1 +0 1 38 +0 1 8 +0 1 9 +0 1 12 +0 1 13 +0 1 23 +0 1 24 +0 1 25 +0 2 2 +0 2 3 +0 2 4 +0 2 5 +0 2 6 +0 2 7 +0 2 14 +0 2 15 +0 2 16 +0 2 17 +0 2 20 +0 2 22 +0 2 28 +0 2 29 +0 2 30 +0 2 31 +0 2 32 +0 2 33 +0 2 34 +0 2 35 +0 2 36 +0 2 37 +0 2 40 +0 2 41 + diff --git a/grid2op/gym_compat/box_gym_actspace.py b/grid2op/gym_compat/box_gym_actspace.py index 1838a4f33..0516fcf70 100644 --- a/grid2op/gym_compat/box_gym_actspace.py +++ b/grid2op/gym_compat/box_gym_actspace.py @@ -6,7 +6,7 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. -from typing import Tuple +from typing import Literal, Dict, Tuple, Any, Optional import copy import warnings import numpy as np @@ -28,6 +28,18 @@ GYM_AVAILABLE, GYMNASIUM_AVAILABLE) +POSSIBLE_KEYS = Literal["redispatch", + "curtail", + "curtail_mw", + "set_storage", + "set_bus", + "change_bus", + "set_line_status", + "change_line_status", + "raise_alert", + "raise_alarm" + ] + class __AuxBoxGymActSpace: """ @@ -77,17 +89,17 @@ class __AuxBoxGymActSpace: .. code-block:: python - gym_env.observation_space = BoxGymActSpace(env.observation_space, + gym_env.action_space = BoxGymActSpace(env.action_space, attr_to_keep=['redispatch', "curtail"]) You can also apply some basic transformation to the attribute of the action. This can be done with: .. code-block:: python - gym_env.observation_space = BoxGymActSpace(env.observation_space, - attr_to_keep=['redispatch', "curtail"], - multiply={"redispatch": env.gen_max_ramp_up}, - add={"redispatch": 0.5 * env.gen_max_ramp_up}) + gym_env.action_space = BoxGymActSpace(env.action_space, + attr_to_keep=['redispatch', "curtail"], + multiply={"redispatch": env.gen_max_ramp_up}, + add={"redispatch": 0.5 * env.gen_max_ramp_up}) In the above example, the resulting "redispatch" part of the vector will be given by the following formula: `grid2op_act = gym_act * multiply + add` @@ -190,11 +202,21 @@ def from_gym(self, gym_action): def __init__( self, - grid2op_action_space, - attr_to_keep=ALL_ATTR_CONT, - add=None, - multiply=None, - functs=None, + grid2op_action_space: ActionSpace, + attr_to_keep: Optional[Tuple[Literal["set_line_status"], + Literal["change_line_status"], + Literal["set_bus"], + Literal["change_bus"], + Literal["redispatch"], + Literal["set_storage"], + Literal["curtail"], + Literal["curtail_mw"], + Literal["raise_alarm"], + Literal["raise_alert"], + ]]=ALL_ATTR_CONT, + add: Optional[Dict[str, Any]]=None, + multiply: Optional[Dict[str, Any]]=None, + functs: Optional[Dict[str, Any]]=None, ): if not isinstance(grid2op_action_space, ActionSpace): raise RuntimeError( @@ -225,45 +247,45 @@ def __init__( self._attr_to_keep = sorted(attr_to_keep) - act_sp = grid2op_action_space + act_sp_cls = type(grid2op_action_space) self._act_space = copy.deepcopy(grid2op_action_space) - low_gen = -1.0 * act_sp.gen_max_ramp_down[act_sp.gen_redispatchable] - high_gen = 1.0 * act_sp.gen_max_ramp_up[act_sp.gen_redispatchable] - nb_redisp = act_sp.gen_redispatchable.sum() - nb_curtail = act_sp.gen_renewable.sum() + low_gen = -1.0 * act_sp_cls.gen_max_ramp_down[act_sp_cls.gen_redispatchable] + high_gen = 1.0 * act_sp_cls.gen_max_ramp_up[act_sp_cls.gen_redispatchable] + nb_redisp = act_sp_cls.gen_redispatchable.sum() + nb_curtail = act_sp_cls.gen_renewable.sum() curtail = np.full(shape=(nb_curtail,), fill_value=0.0, dtype=dt_float) curtail_mw = np.full(shape=(nb_curtail,), fill_value=0.0, dtype=dt_float) self._dict_properties = { "set_line_status": ( - np.full(shape=(act_sp.n_line,), fill_value=-1, dtype=dt_int), - np.full(shape=(act_sp.n_line,), fill_value=1, dtype=dt_int), - (act_sp.n_line,), + np.full(shape=(act_sp_cls.n_line,), fill_value=-1, dtype=dt_int), + np.full(shape=(act_sp_cls.n_line,), fill_value=1, dtype=dt_int), + (act_sp_cls.n_line,), dt_int, ), "change_line_status": ( - np.full(shape=(act_sp.n_line,), fill_value=0, dtype=dt_int), - np.full(shape=(act_sp.n_line,), fill_value=1, dtype=dt_int), - (act_sp.n_line,), + np.full(shape=(act_sp_cls.n_line,), fill_value=0, dtype=dt_int), + np.full(shape=(act_sp_cls.n_line,), fill_value=1, dtype=dt_int), + (act_sp_cls.n_line,), dt_int, ), "set_bus": ( - np.full(shape=(act_sp.dim_topo,), fill_value=-1, dtype=dt_int), - np.full(shape=(act_sp.dim_topo,), fill_value=1, dtype=dt_int), - (act_sp.dim_topo,), + np.full(shape=(act_sp_cls.dim_topo,), fill_value=-1, dtype=dt_int), + np.full(shape=(act_sp_cls.dim_topo,), fill_value=act_sp_cls.n_busbar_per_sub, dtype=dt_int), + (act_sp_cls.dim_topo,), dt_int, ), "change_bus": ( - np.full(shape=(act_sp.dim_topo,), fill_value=0, dtype=dt_int), - np.full(shape=(act_sp.dim_topo,), fill_value=1, dtype=dt_int), - (act_sp.dim_topo,), + np.full(shape=(act_sp_cls.dim_topo,), fill_value=0, dtype=dt_int), + np.full(shape=(act_sp_cls.dim_topo,), fill_value=1, dtype=dt_int), + (act_sp_cls.dim_topo,), dt_int, ), "redispatch": (low_gen, high_gen, (nb_redisp,), dt_float), "set_storage": ( - -1.0 * act_sp.storage_max_p_prod, - 1.0 * act_sp.storage_max_p_absorb, - (act_sp.n_storage,), + -1.0 * act_sp_cls.storage_max_p_prod, + 1.0 * act_sp_cls.storage_max_p_absorb, + (act_sp_cls.n_storage,), dt_float, ), "curtail": ( @@ -274,20 +296,20 @@ def __init__( ), "curtail_mw": ( curtail_mw, - 1.0 * act_sp.gen_pmax[act_sp.gen_renewable], + 1.0 * act_sp_cls.gen_pmax[act_sp_cls.gen_renewable], (nb_curtail,), dt_float, ), "raise_alarm": ( - np.full(shape=(act_sp.dim_alarms,), fill_value=0, dtype=dt_int), - np.full(shape=(act_sp.dim_alarms,), fill_value=1, dtype=dt_int), - (act_sp.dim_alarms,), + np.full(shape=(act_sp_cls.dim_alarms,), fill_value=0, dtype=dt_int), + np.full(shape=(act_sp_cls.dim_alarms,), fill_value=1, dtype=dt_int), + (act_sp_cls.dim_alarms,), dt_int, ), "raise_alert": ( - np.full(shape=(act_sp.dim_alerts,), fill_value=0, dtype=dt_int), - np.full(shape=(act_sp.dim_alerts,), fill_value=1, dtype=dt_int), - (act_sp.dim_alerts,), + np.full(shape=(act_sp_cls.dim_alerts,), fill_value=0, dtype=dt_int), + np.full(shape=(act_sp_cls.dim_alerts,), fill_value=1, dtype=dt_int), + (act_sp_cls.dim_alerts,), dt_int, ), } @@ -449,7 +471,7 @@ def _get_info(self, functs): if el in self._multiply: # special case if a 0 were entered arr_ = 1.0 * self._multiply[el] - is_nzero = arr_ != 0.0 + is_nzero = np.abs(arr_) >= 1e-7 low_ = 1.0 * low_.astype(dtype) high_ = 1.0 * high_.astype(dtype) @@ -520,7 +542,7 @@ def _handle_attribute(self, res, gym_act_this, attr_nm): setattr(res, attr_nm, gym_act_this) return res - def get_indexes(self, key: str) -> Tuple[int, int]: + def get_indexes(self, key: POSSIBLE_KEYS) -> Tuple[int, int]: """Allows to retrieve the indexes of the gym action that are concerned by the attribute name `key` given in input. @@ -563,7 +585,7 @@ def get_indexes(self, key: str) -> Tuple[int, int]: prev = where_to_put raise Grid2OpException(error_msg) - def from_gym(self, gym_act): + def from_gym(self, gym_act: np.ndarray) -> BaseAction: """ This is the function that is called to transform a gym action (in this case a numpy array!) sent by the agent @@ -607,10 +629,14 @@ def from_gym(self, gym_act): prev = where_to_put return res - def close(self): + def close(self) -> None: + """If you override this class, this function is called when the GymEnv is deleted. + + You can use it to free some memory if needed, but there is nothing to do in the general case. + """ pass - def normalize_attr(self, attr_nm: str): + def normalize_attr(self, attr_nm: POSSIBLE_KEYS)-> None: """ This function normalizes the part of the space that corresponds to the attribute `attr_nm`. @@ -654,7 +680,7 @@ def normalize_attr(self, attr_nm: str): both_finite &= curr_high > curr_low if (~both_finite).any(): - warnings.warn(f"The normalization of attribute \"{both_finite}\" cannot be performed entirely as " + warnings.warn(f"The normalization of attribute \"{attr_tmp}\" cannot be performed entirely as " f"there are some non finite value, or `high == `low` " f"for some components.") diff --git a/grid2op/gym_compat/box_gym_obsspace.py b/grid2op/gym_compat/box_gym_obsspace.py index d0aecf761..eefe71893 100644 --- a/grid2op/gym_compat/box_gym_obsspace.py +++ b/grid2op/gym_compat/box_gym_obsspace.py @@ -214,9 +214,12 @@ def __init__( self._attr_to_keep = sorted(attr_to_keep) ob_sp = grid2op_observation_space - tol_redisp = ( - ob_sp.obs_env._tol_poly - ) # add to gen_p otherwise ... well it can crash + ob_sp_cls = type(grid2op_observation_space) + # add to gen_p otherwise ... well it can crash + if ob_sp.obs_env is not None: + tol_redisp = ob_sp.obs_env._tol_poly + else: + tol_redisp = 1e-2 extra_for_losses = _compute_extra_power_for_losses(ob_sp) self._dict_properties = { @@ -263,113 +266,113 @@ def __init__( dt_int, ), "gen_p": ( - np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float) + np.full(shape=(ob_sp_cls.n_gen,), fill_value=0.0, dtype=dt_float) - tol_redisp - extra_for_losses, - ob_sp.gen_pmax + tol_redisp + extra_for_losses, - (ob_sp.n_gen,), + ob_sp_cls.gen_pmax + tol_redisp + extra_for_losses, + (ob_sp_cls.n_gen,), dt_float, ), "gen_q": ( - np.full(shape=(ob_sp.n_gen,), fill_value=-np.inf, dtype=dt_float), - np.full(shape=(ob_sp.n_gen,), fill_value=np.inf, dtype=dt_float), - (ob_sp.n_gen,), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=-np.inf, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=np.inf, dtype=dt_float), + (ob_sp_cls.n_gen,), dt_float, ), "gen_v": ( - np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float), - np.full(shape=(ob_sp.n_gen,), fill_value=np.inf, dtype=dt_float), - (ob_sp.n_gen,), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=0.0, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=np.inf, dtype=dt_float), + (ob_sp_cls.n_gen,), dt_float, ), "gen_margin_up": ( - np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float), - 1.0 * ob_sp.gen_max_ramp_up, - (ob_sp.n_gen,), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=0.0, dtype=dt_float), + 1.0 * ob_sp_cls.gen_max_ramp_up, + (ob_sp_cls.n_gen,), dt_float, ), "gen_margin_down": ( - np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float), - 1.0 * ob_sp.gen_max_ramp_down, - (ob_sp.n_gen,), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=0.0, dtype=dt_float), + 1.0 * ob_sp_cls.gen_max_ramp_down, + (ob_sp_cls.n_gen,), dt_float, ), "gen_theta": ( - np.full(shape=(ob_sp.n_gen,), fill_value=-180., dtype=dt_float), - np.full(shape=(ob_sp.n_gen,), fill_value=180., dtype=dt_float), - (ob_sp.n_gen,), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=-180., dtype=dt_float), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=180., dtype=dt_float), + (ob_sp_cls.n_gen,), dt_float, ), "load_p": ( - np.full(shape=(ob_sp.n_load,), fill_value=-np.inf, dtype=dt_float), - np.full(shape=(ob_sp.n_load,), fill_value=+np.inf, dtype=dt_float), - (ob_sp.n_load,), + np.full(shape=(ob_sp_cls.n_load,), fill_value=-np.inf, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_load,), fill_value=+np.inf, dtype=dt_float), + (ob_sp_cls.n_load,), dt_float, ), "load_q": ( - np.full(shape=(ob_sp.n_load,), fill_value=-np.inf, dtype=dt_float), - np.full(shape=(ob_sp.n_load,), fill_value=+np.inf, dtype=dt_float), - (ob_sp.n_load,), + np.full(shape=(ob_sp_cls.n_load,), fill_value=-np.inf, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_load,), fill_value=+np.inf, dtype=dt_float), + (ob_sp_cls.n_load,), dt_float, ), "load_v": ( - np.full(shape=(ob_sp.n_load,), fill_value=0.0, dtype=dt_float), - np.full(shape=(ob_sp.n_load,), fill_value=np.inf, dtype=dt_float), - (ob_sp.n_load,), + np.full(shape=(ob_sp_cls.n_load,), fill_value=0.0, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_load,), fill_value=np.inf, dtype=dt_float), + (ob_sp_cls.n_load,), dt_float, ), "load_theta": ( - np.full(shape=(ob_sp.n_load,), fill_value=-180., dtype=dt_float), - np.full(shape=(ob_sp.n_load,), fill_value=180., dtype=dt_float), - (ob_sp.n_load,), + np.full(shape=(ob_sp_cls.n_load,), fill_value=-180., dtype=dt_float), + np.full(shape=(ob_sp_cls.n_load,), fill_value=180., dtype=dt_float), + (ob_sp_cls.n_load,), dt_float, ), "p_or": ( - np.full(shape=(ob_sp.n_line,), fill_value=-np.inf, dtype=dt_float), - np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float), - (ob_sp.n_line,), + np.full(shape=(ob_sp_cls.n_line,), fill_value=-np.inf, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_line,), fill_value=np.inf, dtype=dt_float), + (ob_sp_cls.n_line,), dt_float, ), "q_or": ( - np.full(shape=(ob_sp.n_line,), fill_value=-np.inf, dtype=dt_float), - np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float), - (ob_sp.n_line,), + np.full(shape=(ob_sp_cls.n_line,), fill_value=-np.inf, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_line,), fill_value=np.inf, dtype=dt_float), + (ob_sp_cls.n_line,), dt_float, ), "a_or": ( - np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float), - np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float), - (ob_sp.n_line,), + np.full(shape=(ob_sp_cls.n_line,), fill_value=0.0, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_line,), fill_value=np.inf, dtype=dt_float), + (ob_sp_cls.n_line,), dt_float, ), "v_or": ( - np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float), - np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float), - (ob_sp.n_line,), + np.full(shape=(ob_sp_cls.n_line,), fill_value=0.0, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_line,), fill_value=np.inf, dtype=dt_float), + (ob_sp_cls.n_line,), dt_float, ), "theta_or": ( - np.full(shape=(ob_sp.n_line,), fill_value=-180., dtype=dt_float), - np.full(shape=(ob_sp.n_line,), fill_value=180., dtype=dt_float), - (ob_sp.n_line,), + np.full(shape=(ob_sp_cls.n_line,), fill_value=-180., dtype=dt_float), + np.full(shape=(ob_sp_cls.n_line,), fill_value=180., dtype=dt_float), + (ob_sp_cls.n_line,), dt_float, ), "p_ex": ( - np.full(shape=(ob_sp.n_line,), fill_value=-np.inf, dtype=dt_float), - np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float), - (ob_sp.n_line,), + np.full(shape=(ob_sp_cls.n_line,), fill_value=-np.inf, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_line,), fill_value=np.inf, dtype=dt_float), + (ob_sp_cls.n_line,), dt_float, ), "q_ex": ( - np.full(shape=(ob_sp.n_line,), fill_value=-np.inf, dtype=dt_float), - np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float), - (ob_sp.n_line,), + np.full(shape=(ob_sp_cls.n_line,), fill_value=-np.inf, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_line,), fill_value=np.inf, dtype=dt_float), + (ob_sp_cls.n_line,), dt_float, ), "a_ex": ( - np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float), - np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float), - (ob_sp.n_line,), + np.full(shape=(ob_sp_cls.n_line,), fill_value=0.0, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_line,), fill_value=np.inf, dtype=dt_float), + (ob_sp_cls.n_line,), dt_float, ), "v_ex": ( @@ -379,135 +382,135 @@ def __init__( dt_float, ), "theta_ex": ( - np.full(shape=(ob_sp.n_line,), fill_value=-180., dtype=dt_float), - np.full(shape=(ob_sp.n_line,), fill_value=180., dtype=dt_float), - (ob_sp.n_line,), + np.full(shape=(ob_sp_cls.n_line,), fill_value=-180., dtype=dt_float), + np.full(shape=(ob_sp_cls.n_line,), fill_value=180., dtype=dt_float), + (ob_sp_cls.n_line,), dt_float, ), "rho": ( - np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float), - np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float), - (ob_sp.n_line,), + np.full(shape=(ob_sp_cls.n_line,), fill_value=0.0, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_line,), fill_value=np.inf, dtype=dt_float), + (ob_sp_cls.n_line,), dt_float, ), "line_status": ( - np.full(shape=(ob_sp.n_line,), fill_value=0, dtype=dt_int), - np.full(shape=(ob_sp.n_line,), fill_value=1, dtype=dt_int), - (ob_sp.n_line,), + np.full(shape=(ob_sp_cls.n_line,), fill_value=0, dtype=dt_int), + np.full(shape=(ob_sp_cls.n_line,), fill_value=1, dtype=dt_int), + (ob_sp_cls.n_line,), dt_int, ), "timestep_overflow": ( np.full( - shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).min, dtype=dt_int + shape=(ob_sp_cls.n_line,), fill_value=np.iinfo(dt_int).min, dtype=dt_int ), np.full( - shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int + shape=(ob_sp_cls.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int ), - (ob_sp.n_line,), + (ob_sp_cls.n_line,), dt_int, ), "topo_vect": ( - np.full(shape=(ob_sp.dim_topo,), fill_value=-1, dtype=dt_int), - np.full(shape=(ob_sp.dim_topo,), fill_value=2, dtype=dt_int), - (ob_sp.dim_topo,), + np.full(shape=(ob_sp_cls.dim_topo,), fill_value=-1, dtype=dt_int), + np.full(shape=(ob_sp_cls.dim_topo,), fill_value=ob_sp_cls.n_busbar_per_sub, dtype=dt_int), + (ob_sp_cls.dim_topo,), dt_int, ), "time_before_cooldown_line": ( - np.full(shape=(ob_sp.n_line,), fill_value=0, dtype=dt_int), + np.full(shape=(ob_sp_cls.n_line,), fill_value=0, dtype=dt_int), np.full( - shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int + shape=(ob_sp_cls.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int ), - (ob_sp.n_line,), + (ob_sp_cls.n_line,), dt_int, ), "time_before_cooldown_sub": ( - np.full(shape=(ob_sp.n_sub,), fill_value=0, dtype=dt_int), + np.full(shape=(ob_sp_cls.n_sub,), fill_value=0, dtype=dt_int), np.full( - shape=(ob_sp.n_sub,), fill_value=np.iinfo(dt_int).max, dtype=dt_int + shape=(ob_sp_cls.n_sub,), fill_value=np.iinfo(dt_int).max, dtype=dt_int ), - (ob_sp.n_sub,), + (ob_sp_cls.n_sub,), dt_int, ), "time_next_maintenance": ( - np.full(shape=(ob_sp.n_line,), fill_value=-1, dtype=dt_int), + np.full(shape=(ob_sp_cls.n_line,), fill_value=-1, dtype=dt_int), np.full( - shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int + shape=(ob_sp_cls.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int ), - (ob_sp.n_line,), + (ob_sp_cls.n_line,), dt_int, ), "duration_next_maintenance": ( - np.full(shape=(ob_sp.n_line,), fill_value=0, dtype=dt_int), + np.full(shape=(ob_sp_cls.n_line,), fill_value=0, dtype=dt_int), np.full( - shape=(ob_sp.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int + shape=(ob_sp_cls.n_line,), fill_value=np.iinfo(dt_int).max, dtype=dt_int ), - (ob_sp.n_line,), + (ob_sp_cls.n_line,), dt_int, ), "target_dispatch": ( - np.minimum(ob_sp.gen_pmin, -ob_sp.gen_pmax), - np.maximum(-ob_sp.gen_pmin, +ob_sp.gen_pmax), - (ob_sp.n_gen,), + np.minimum(ob_sp_cls.gen_pmin, -ob_sp_cls.gen_pmax), + np.maximum(-ob_sp_cls.gen_pmin, +ob_sp_cls.gen_pmax), + (ob_sp_cls.n_gen,), dt_float, ), "actual_dispatch": ( - np.minimum(ob_sp.gen_pmin, -ob_sp.gen_pmax), - np.maximum(-ob_sp.gen_pmin, +ob_sp.gen_pmax), - (ob_sp.n_gen,), + np.minimum(ob_sp_cls.gen_pmin, -ob_sp_cls.gen_pmax), + np.maximum(-ob_sp_cls.gen_pmin, +ob_sp_cls.gen_pmax), + (ob_sp_cls.n_gen,), dt_float, ), "storage_charge": ( - np.full(shape=(ob_sp.n_storage,), fill_value=0, dtype=dt_float), - 1.0 * ob_sp.storage_Emax, - (ob_sp.n_storage,), + np.full(shape=(ob_sp_cls.n_storage,), fill_value=0, dtype=dt_float), + 1.0 * ob_sp_cls.storage_Emax, + (ob_sp_cls.n_storage,), dt_float, ), "storage_power_target": ( - -1.0 * ob_sp.storage_max_p_prod, - 1.0 * ob_sp.storage_max_p_absorb, - (ob_sp.n_storage,), + -1.0 * ob_sp_cls.storage_max_p_prod, + 1.0 * ob_sp_cls.storage_max_p_absorb, + (ob_sp_cls.n_storage,), dt_float, ), "storage_power": ( - -1.0 * ob_sp.storage_max_p_prod, - 1.0 * ob_sp.storage_max_p_absorb, - (ob_sp.n_storage,), + -1.0 * ob_sp_cls.storage_max_p_prod, + 1.0 * ob_sp_cls.storage_max_p_absorb, + (ob_sp_cls.n_storage,), dt_float, ), "storage_theta": ( - np.full(shape=(ob_sp.n_storage,), fill_value=-180., dtype=dt_float), - np.full(shape=(ob_sp.n_storage,), fill_value=180., dtype=dt_float), - (ob_sp.n_storage,), + np.full(shape=(ob_sp_cls.n_storage,), fill_value=-180., dtype=dt_float), + np.full(shape=(ob_sp_cls.n_storage,), fill_value=180., dtype=dt_float), + (ob_sp_cls.n_storage,), dt_float, ), "curtailment": ( - np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float), - np.full(shape=(ob_sp.n_gen,), fill_value=1.0, dtype=dt_float), - (ob_sp.n_gen,), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=0.0, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=1.0, dtype=dt_float), + (ob_sp_cls.n_gen,), dt_float, ), "curtailment_limit": ( - np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float), - np.full(shape=(ob_sp.n_gen,), fill_value=1.0, dtype=dt_float), - (ob_sp.n_gen,), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=0.0, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=1.0, dtype=dt_float), + (ob_sp_cls.n_gen,), dt_float, ), "curtailment_mw": ( - np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float), - 1.0 * ob_sp.gen_pmax, - (ob_sp.n_gen,), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=0.0, dtype=dt_float), + 1.0 * ob_sp_cls.gen_pmax, + (ob_sp_cls.n_gen,), dt_float, ), "curtailment_limit_mw": ( - np.full(shape=(ob_sp.n_gen,), fill_value=0.0, dtype=dt_float), - 1.0 * ob_sp.gen_pmax, - (ob_sp.n_gen,), + np.full(shape=(ob_sp_cls.n_gen,), fill_value=0.0, dtype=dt_float), + 1.0 * ob_sp_cls.gen_pmax, + (ob_sp_cls.n_gen,), dt_float, ), "thermal_limit": ( - np.full(shape=(ob_sp.n_line,), fill_value=0.0, dtype=dt_float), - np.full(shape=(ob_sp.n_line,), fill_value=np.inf, dtype=dt_float), - (ob_sp.n_line,), + np.full(shape=(ob_sp_cls.n_line,), fill_value=0.0, dtype=dt_float), + np.full(shape=(ob_sp_cls.n_line,), fill_value=np.inf, dtype=dt_float), + (ob_sp_cls.n_line,), dt_float, ), "is_alarm_illegal": ( @@ -523,13 +526,13 @@ def __init__( dt_int, ), "last_alarm": ( - np.full(shape=(ob_sp.dim_alarms,), fill_value=-1, dtype=dt_int), + np.full(shape=(ob_sp_cls.dim_alarms,), fill_value=-1, dtype=dt_int), np.full( - shape=(ob_sp.dim_alarms,), + shape=(ob_sp_cls.dim_alarms,), fill_value=np.iinfo(dt_int).max, dtype=dt_int, ), - (ob_sp.dim_alarms,), + (ob_sp_cls.dim_alarms,), dt_int, ), "attention_budget": ( @@ -552,45 +555,45 @@ def __init__( ), # alert stuff "active_alert": ( - np.full(shape=(ob_sp.dim_alerts,), fill_value=False, dtype=dt_bool), - np.full(shape=(ob_sp.dim_alerts,), fill_value=True, dtype=dt_bool), - (ob_sp.dim_alerts,), + np.full(shape=(ob_sp_cls.dim_alerts,), fill_value=False, dtype=dt_bool), + np.full(shape=(ob_sp_cls.dim_alerts,), fill_value=True, dtype=dt_bool), + (ob_sp_cls.dim_alerts,), dt_bool, ), "time_since_last_alert": ( - np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int), - np.full(shape=(ob_sp.dim_alerts,), fill_value=np.iinfo(dt_int).max, dtype=dt_int), - (ob_sp.dim_alerts,), + np.full(shape=(ob_sp_cls.dim_alerts,), fill_value=-1, dtype=dt_int), + np.full(shape=(ob_sp_cls.dim_alerts,), fill_value=np.iinfo(dt_int).max, dtype=dt_int), + (ob_sp_cls.dim_alerts,), dt_int, ), "alert_duration": ( - np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int), - np.full(shape=(ob_sp.dim_alerts,), fill_value=np.iinfo(dt_int).max, dtype=dt_int), - (ob_sp.dim_alerts,), + np.full(shape=(ob_sp_cls.dim_alerts,), fill_value=-1, dtype=dt_int), + np.full(shape=(ob_sp_cls.dim_alerts,), fill_value=np.iinfo(dt_int).max, dtype=dt_int), + (ob_sp_cls.dim_alerts,), dt_int, ), "total_number_of_alert": ( - np.full(shape=(1 if ob_sp.dim_alerts else 0,), fill_value=-1, dtype=dt_int), - np.full(shape=(1 if ob_sp.dim_alerts else 0,), fill_value=np.iinfo(dt_int).max, dtype=dt_int), - (1 if ob_sp.dim_alerts else 0,), + np.full(shape=(1 if ob_sp_cls.dim_alerts else 0,), fill_value=-1, dtype=dt_int), + np.full(shape=(1 if ob_sp_cls.dim_alerts else 0,), fill_value=np.iinfo(dt_int).max, dtype=dt_int), + (1 if ob_sp_cls.dim_alerts else 0,), dt_int, ), "time_since_last_attack": ( - np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int), - np.full(shape=(ob_sp.dim_alerts,), fill_value=np.iinfo(dt_int).max, dtype=dt_int), - (ob_sp.dim_alerts,), + np.full(shape=(ob_sp_cls.dim_alerts,), fill_value=-1, dtype=dt_int), + np.full(shape=(ob_sp_cls.dim_alerts,), fill_value=np.iinfo(dt_int).max, dtype=dt_int), + (ob_sp_cls.dim_alerts,), dt_int, ), "was_alert_used_after_attack": ( - np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int), - np.full(shape=(ob_sp.dim_alerts,), fill_value=1, dtype=dt_int), - (ob_sp.dim_alerts,), + np.full(shape=(ob_sp_cls.dim_alerts,), fill_value=-1, dtype=dt_int), + np.full(shape=(ob_sp_cls.dim_alerts,), fill_value=1, dtype=dt_int), + (ob_sp_cls.dim_alerts,), dt_int, ), "attack_under_alert": ( - np.full(shape=(ob_sp.dim_alerts,), fill_value=-1, dtype=dt_int), - np.full(shape=(ob_sp.dim_alerts,), fill_value=1, dtype=dt_int), - (ob_sp.dim_alerts,), + np.full(shape=(ob_sp_cls.dim_alerts,), fill_value=-1, dtype=dt_int), + np.full(shape=(ob_sp_cls.dim_alerts,), fill_value=1, dtype=dt_int), + (ob_sp_cls.dim_alerts,), dt_int, ), } @@ -908,7 +911,7 @@ def normalize_attr(self, attr_nm: str): both_finite &= curr_high > curr_low if (~both_finite).any(): - warnings.warn(f"The normalization of attribute \"{both_finite}\" cannot be performed entirely as " + warnings.warn(f"The normalization of attribute \"{attr_nm}\" cannot be performed entirely as " f"there are some non finite value, or `high == `low` " f"for some components.") diff --git a/grid2op/gym_compat/discrete_gym_actspace.py b/grid2op/gym_compat/discrete_gym_actspace.py index e059a04b8..4e89c448a 100644 --- a/grid2op/gym_compat/discrete_gym_actspace.py +++ b/grid2op/gym_compat/discrete_gym_actspace.py @@ -8,10 +8,10 @@ import copy import warnings -# from gym.spaces import Discrete +from typing import Literal, Dict, Tuple, Any, Optional from grid2op.Exceptions import Grid2OpException -from grid2op.Action import ActionSpace +from grid2op.Action import ActionSpace, BaseAction from grid2op.Converter import IdToAct from grid2op.gym_compat.utils import (ALL_ATTR_FOR_DISCRETE, @@ -19,12 +19,6 @@ GYM_AVAILABLE, GYMNASIUM_AVAILABLE) -# TODO test that it works normally -# TODO test the casting in dt_int or dt_float depending on the data -# TODO test the scaling -# TODO doc -# TODO test the function part - class __AuxDiscreteActSpace: """ @@ -215,9 +209,18 @@ class __AuxDiscreteActSpace: def __init__( self, - grid2op_action_space, - attr_to_keep=ALL_ATTR_FOR_DISCRETE, - nb_bins=None, + grid2op_action_space : ActionSpace, + attr_to_keep: Optional[Tuple[Literal["set_line_status"], + Literal["set_line_status_simple"], + Literal["change_line_status"], + Literal["set_bus"], + Literal["change_bus"], + Literal["redispatch"], + Literal["set_storage"], + Literal["curtail"], + Literal["curtail_mw"], + ]]=ALL_ATTR_FOR_DISCRETE, + nb_bins : Dict[Literal["redispatch", "set_storage", "curtail", "curtail_mw"], int]=None, action_list=None, ): @@ -274,8 +277,6 @@ def __init__( "set_storage": act_sp.get_all_unitary_storage, "curtail": act_sp.get_all_unitary_curtail, "curtail_mw": act_sp.get_all_unitary_curtail, - # "raise_alarm": act_sp.get_all_unitary_alarm, - # "raise_alert": act_sp.get_all_unitary_alert, "set_line_status_simple": act_sp.get_all_unitary_line_set_simple, } @@ -319,7 +320,7 @@ def _get_info(self): self.converter = converter return self.converter.n - def from_gym(self, gym_act): + def from_gym(self, gym_act: int) -> BaseAction: """ This is the function that is called to transform a gym action (in this case a numpy array!) sent by the agent @@ -339,7 +340,11 @@ def from_gym(self, gym_act): res = self.converter.all_actions[int(gym_act)] return res - def close(self): + def close(self) -> None: + """If you override this class, this function is called when the GymEnv is deleted. + + You can use it to free some memory if needed, but there is nothing to do in the general case. + """ pass @@ -363,7 +368,7 @@ def close(self): from gymnasium.spaces import Discrete from grid2op.gym_compat.box_gym_actspace import BoxGymnasiumActSpace from grid2op.gym_compat.continuous_to_discrete import ContinuousToDiscreteConverterGymnasium - DiscreteActSpaceGymnasium = type("MultiDiscreteActSpaceGymnasium", + DiscreteActSpaceGymnasium = type("DiscreteActSpaceGymnasium", (__AuxDiscreteActSpace, Discrete, ), {"_gymnasium": True, "_DiscreteType": Discrete, diff --git a/grid2op/gym_compat/gym_act_space.py b/grid2op/gym_compat/gym_act_space.py index 8bc428e2e..984de4127 100644 --- a/grid2op/gym_compat/gym_act_space.py +++ b/grid2op/gym_compat/gym_act_space.py @@ -18,7 +18,7 @@ from grid2op.Action import BaseAction, ActionSpace from grid2op.dtypes import dt_int, dt_bool, dt_float from grid2op.Converter.Converters import Converter -from grid2op.gym_compat.utils import GYM_AVAILABLE, GYMNASIUM_AVAILABLE +from grid2op.gym_compat.utils import GYM_AVAILABLE, GYMNASIUM_AVAILABLE, ActType class __AuxGymActionSpace: @@ -126,24 +126,35 @@ def __init__(self, env, converter=None, dict_variables=None): env, (Environment, MultiMixEnvironment, BaseMultiProcessEnvironment) ): # action_space is an environment - self.initial_act_space = env.action_space - self._init_env = env + # self.initial_act_space = env.action_space + # self._init_env = env + self._template_act = env.action_space() + self._converter = None + self.__is_converter = False elif isinstance(env, ActionSpace) and converter is None: warnings.warn( "It is now deprecated to initialize an Converter with an " "action space. Please use an environment instead." ) - self.initial_act_space = env - self._init_env = None + self._converter = None + self._template_act = env() + self.__is_converter = False + elif isinstance(env, type(self)): + self._template_act = env._template_act.copy() + self._converter = env._converter + self.__is_converter = env.__is_converter else: raise RuntimeError( "GymActionSpace must be created with an Environment or an ActionSpace (or a Converter)" ) dict_ = {} + # TODO Make sure it works well ! if converter is not None and isinstance(converter, Converter): # a converter allows to ... convert the data so they have specific gym space - self.initial_act_space = converter + # self.initial_act_space = converter + self._converter = converter + self._template_act = converter.init_action_space() dict_ = converter.get_gym_dict(type(self)) self.__is_converter = True elif converter is not None: @@ -155,7 +166,7 @@ def __init__(self, env, converter=None, dict_variables=None): ) else: self._fill_dict_act_space( - dict_, self.initial_act_space, dict_variables=dict_variables + dict_, dict_variables=dict_variables ) dict_ = self._fix_dict_keys(dict_) self.__is_converter = False @@ -194,11 +205,11 @@ def reencode_space(self, key, fun): If an attribute has been ignored, for example by :func`GymEnv.keep_only_obs_attr` or and is now present here, it will be re added in the final observation """ - if self._init_env is None: - raise RuntimeError( - "Impossible to reencode a space that has been initialized with an " - "action space as input. Please provide a valid" - ) + # if self._init_env is None: + # raise RuntimeError( + # "Impossible to reencode a space that has been initialized with an " + # "action space as input. Please provide a valid" + # ) if self.__is_converter: raise RuntimeError( "Impossible to reencode a space that is a converter space." @@ -224,13 +235,15 @@ def reencode_space(self, key, fun): else: raise RuntimeError(f"Impossible to find key {key} in your action space") my_dict[key2] = fun - res = type(self)(env=self._init_env, dict_variables=my_dict) + res = type(self)(env=self, dict_variables=my_dict) return res - def _fill_dict_act_space(self, dict_, action_space, dict_variables): + def _fill_dict_act_space(self, dict_, dict_variables): # TODO what about dict_variables !!! for attr_nm, sh, dt in zip( - action_space.attr_list_vect, action_space.shape, action_space.dtype + type(self._template_act).attr_list_vect, + self._template_act.shapes(), + self._template_act.dtypes() ): if sh == 0: # do not add "empty" (=0 dimension) arrays to gym otherwise it crashes @@ -248,7 +261,9 @@ def _fill_dict_act_space(self, dict_, action_space, dict_variables): if attr_nm == "_set_line_status": my_type = type(self)._BoxType(low=-1, high=1, shape=shape, dtype=dt) elif attr_nm == "_set_topo_vect": - my_type = type(self)._BoxType(low=-1, high=2, shape=shape, dtype=dt) + my_type = type(self)._BoxType(low=-1, + high=type(self._template_act).n_busbar_per_sub, + shape=shape, dtype=dt) elif dt == dt_bool: # boolean observation space my_type = self._boolean_type(sh) @@ -261,28 +276,28 @@ def _fill_dict_act_space(self, dict_, action_space, dict_variables): SpaceType = type(self)._BoxType if attr_nm == "prod_p": - low = action_space.gen_pmin - high = action_space.gen_pmax + low = type(self._template_act).gen_pmin + high = type(self._template_act).gen_pmax shape = None elif attr_nm == "prod_v": # voltages can't be negative low = 0.0 elif attr_nm == "_redispatch": # redispatch - low = -1.0 * action_space.gen_max_ramp_down - high = 1.0 * action_space.gen_max_ramp_up - low[~action_space.gen_redispatchable] = 0.0 - high[~action_space.gen_redispatchable] = 0.0 + low = -1.0 * type(self._template_act).gen_max_ramp_down + high = 1.0 * type(self._template_act).gen_max_ramp_up + low[~type(self._template_act).gen_redispatchable] = 0.0 + high[~type(self._template_act).gen_redispatchable] = 0.0 elif attr_nm == "_curtail": # curtailment - low = np.zeros(action_space.n_gen, dtype=dt_float) - high = np.ones(action_space.n_gen, dtype=dt_float) - low[~action_space.gen_renewable] = -1.0 - high[~action_space.gen_renewable] = -1.0 + low = np.zeros(type(self._template_act).n_gen, dtype=dt_float) + high = np.ones(type(self._template_act).n_gen, dtype=dt_float) + low[~type(self._template_act).gen_renewable] = -1.0 + high[~type(self._template_act).gen_renewable] = -1.0 elif attr_nm == "_storage_power": # storage power - low = -1.0 * action_space.storage_max_p_prod - high = 1.0 * action_space.storage_max_p_absorb + low = -1.0 * type(self._template_act).storage_max_p_prod + high = 1.0 * type(self._template_act).storage_max_p_absorb my_type = SpaceType(low=low, high=high, shape=shape, dtype=dt) if my_type is None: @@ -315,10 +330,10 @@ def from_gym(self, gymlike_action: OrderedDict) -> object: if self.__is_converter: # case where the action space comes from a converter, in this case the converter takes the # delegation to convert the action to openai gym - res = self.initial_act_space.convert_action_from_gym(gymlike_action) + res = self._converter.convert_action_from_gym(gymlike_action) else: # case where the action space is a "simple" action space - res = self.initial_act_space() + res = self._template_act.copy() for k, v in gymlike_action.items(): internal_k = self.keys_human_2_grid2op[k] if internal_k in self._keys_encoding: @@ -345,7 +360,7 @@ def to_gym(self, action: object) -> OrderedDict: """ if self.__is_converter: - gym_action = self.initial_act_space.convert_action_to_gym(action) + gym_action = self._converter.convert_action_to_gym(action) else: # in that case action should be an instance of grid2op BaseAction assert isinstance( diff --git a/grid2op/gym_compat/gym_obs_space.py b/grid2op/gym_compat/gym_obs_space.py index d427f4230..170435d05 100644 --- a/grid2op/gym_compat/gym_obs_space.py +++ b/grid2op/gym_compat/gym_obs_space.py @@ -85,17 +85,46 @@ class __AuxGymObservationSpace: `env.gen_pmin` and `env.gen_pmax` are not always ensured in grid2op) """ - + ALLOWED_ENV_CLS = (Environment, MultiMixEnvironment, BaseMultiProcessEnvironment) def __init__(self, env, dict_variables=None): if not isinstance( - env, (Environment, MultiMixEnvironment, BaseMultiProcessEnvironment) + env, type(self).ALLOWED_ENV_CLS + (type(self), ) ): raise RuntimeError( "GymActionSpace must be created with an Environment of an ActionSpace (or a Converter)" ) - self._init_env = env - self.initial_obs_space = self._init_env.observation_space + # self._init_env = env + if isinstance(env, type(self).ALLOWED_ENV_CLS): + init_env_cls = type(env) + if init_env_cls._CLS_DICT_EXTENDED is None: + # make sure the _CLS_DICT_EXTENDED exists + tmp_ = {} + init_env_cls._make_cls_dict_extended(init_env_cls, res=tmp_, as_list=False, copy_=False, _topo_vect_only=False) + self.init_env_cls_dict = init_env_cls._CLS_DICT_EXTENDED.copy() + # retrieve an empty observation an disable the forecast feature + self.initial_obs = env.observation_space.get_empty_observation() + self.initial_obs._obs_env = None + self.initial_obs._ptr_kwargs_env = None + + if env.observation_space.obs_env is not None: + self._tol_poly = env.observation_space.obs_env._tol_poly + else: + self._tol_poly = 1e-2 + self._env_params = env.parameters + self._opp_attack_max_duration = env._oppSpace.attack_max_duration + elif isinstance(env, type(self)): + self.init_env_cls_dict = env.init_env_cls_dict.copy() + + # retrieve an empty observation an disable the forecast feature + self.initial_obs = env.initial_obs + + self._tol_poly = env._tol_poly + self._env_params = env._env_params + self._opp_attack_max_duration = env._opp_attack_max_duration + else: + raise RuntimeError("Unknown way to build a gym observation space") + dict_ = {} # will represent the gym.Dict space if dict_variables is None: @@ -105,48 +134,48 @@ def __init__(self, env, dict_variables=None): type(self)._BoxType( low=0., high=np.inf, - shape=(self._init_env.n_line, ), + shape=(self.init_env_cls_dict["n_line"], ), dtype=dt_float, ), "theta_or": type(self)._BoxType( low=-180., high=180., - shape=(self._init_env.n_line, ), + shape=(self.init_env_cls_dict["n_line"], ), dtype=dt_float, ), "theta_ex": type(self)._BoxType( low=-180., high=180., - shape=(self._init_env.n_line, ), + shape=(self.init_env_cls_dict["n_line"], ), dtype=dt_float, ), "load_theta": type(self)._BoxType( low=-180., high=180., - shape=(self._init_env.n_load, ), + shape=(self.init_env_cls_dict["n_load"], ), dtype=dt_float, ), "gen_theta": type(self)._BoxType( low=-180., high=180., - shape=(self._init_env.n_gen, ), + shape=(self.init_env_cls_dict["n_gen"], ), dtype=dt_float, ) } - if self._init_env.n_storage: + if self.init_env_cls_dict["n_storage"]: dict_variables["storage_theta"] = type(self)._BoxType( low=-180., high=180., - shape=(self._init_env.n_storage, ), + shape=(self.init_env_cls_dict["n_storage"], ), dtype=dt_float, ) self._fill_dict_obs_space( - dict_, env.observation_space, env.parameters, env._oppSpace, dict_variables + dict_, dict_variables ) super().__init__(dict_, dict_variables=dict_variables) # super should point to _BaseGymSpaceConverter @@ -202,11 +231,11 @@ def reencode_space(self, key, fun): f"Impossible to find key {key} in your observation space" ) my_dict[key] = fun - res = type(self)(self._init_env, my_dict) + res = type(self)(self, my_dict) return res def _fill_dict_obs_space( - self, dict_, observation_space, env_params, opponent_space, dict_variables={} + self, dict_, dict_variables={} ): for attr_nm in dict_variables: # case where the user specified a dedicated encoding @@ -214,17 +243,17 @@ def _fill_dict_obs_space( # none is by default to disable this feature continue if isinstance(dict_variables[attr_nm], type(self)._SpaceType): - if hasattr(observation_space._template_obj, attr_nm): + if hasattr(self.initial_obs, attr_nm): # add it only if attribute exists in the observation dict_[attr_nm] = dict_variables[attr_nm] else: dict_[attr_nm] = dict_variables[attr_nm].my_space - + # by default consider all attributes that are vectorized for attr_nm, sh, dt in zip( - observation_space.attr_list_vect, - observation_space.shape, - observation_space.dtype, + type(self.initial_obs).attr_list_vect, + self.initial_obs.shapes(), + self.initial_obs.dtypes(), ): if sh == 0: # do not add "empty" (=0 dimension) arrays to gym otherwise it crashes @@ -252,14 +281,16 @@ def _fill_dict_obs_space( elif attr_nm == "day_of_week": my_type = type(self)._DiscreteType(n=8) elif attr_nm == "topo_vect": - my_type = type(self)._BoxType(low=-1, high=2, shape=shape, dtype=dt) + my_type = type(self)._BoxType(low=-1, + high=self.init_env_cls_dict["n_busbar_per_sub"], + shape=shape, dtype=dt) elif attr_nm == "time_before_cooldown_line": my_type = type(self)._BoxType( low=0, high=max( - env_params.NB_TIMESTEP_COOLDOWN_LINE, - env_params.NB_TIMESTEP_RECONNECTION, - opponent_space.attack_max_duration, + self._env_params.NB_TIMESTEP_COOLDOWN_LINE, + self._env_params.NB_TIMESTEP_RECONNECTION, + self._opp_attack_max_duration, ), shape=shape, dtype=dt, @@ -267,7 +298,7 @@ def _fill_dict_obs_space( elif attr_nm == "time_before_cooldown_sub": my_type = type(self)._BoxType( low=0, - high=env_params.NB_TIMESTEP_COOLDOWN_SUB, + high=self._env_params.NB_TIMESTEP_COOLDOWN_SUB, shape=shape, dtype=dt, ) @@ -312,17 +343,17 @@ def _fill_dict_obs_space( shape = (sh,) SpaceType = type(self)._BoxType if attr_nm == "gen_p" or attr_nm == "gen_p_before_curtail": - low = copy.deepcopy(observation_space.gen_pmin) - high = copy.deepcopy(observation_space.gen_pmax) + low = copy.deepcopy(self.init_env_cls_dict["gen_pmin"]) + high = copy.deepcopy(self.init_env_cls_dict["gen_pmax"]) shape = None # for redispatching - low -= observation_space.obs_env._tol_poly - high += observation_space.obs_env._tol_poly + low -= self._tol_poly + high += self._tol_poly # for "power losses" that are not properly computed in the original data extra_for_losses = _compute_extra_power_for_losses( - observation_space + self.init_env_cls_dict ) low -= extra_for_losses high += extra_for_losses @@ -341,17 +372,17 @@ def _fill_dict_obs_space( elif attr_nm == "target_dispatch" or attr_nm == "actual_dispatch": # TODO check that to be sure low = np.minimum( - observation_space.gen_pmin, -observation_space.gen_pmax + self.init_env_cls_dict["gen_pmin"], -self.init_env_cls_dict["gen_pmax"] ) high = np.maximum( - -observation_space.gen_pmin, +observation_space.gen_pmax + -self.init_env_cls_dict["gen_pmin"], +self.init_env_cls_dict["gen_pmax"] ) elif attr_nm == "storage_power" or attr_nm == "storage_power_target": - low = -observation_space.storage_max_p_prod - high = observation_space.storage_max_p_absorb + low = -self.init_env_cls_dict["storage_max_p_prod"] + high = self.init_env_cls_dict["storage_max_p_absorb"] elif attr_nm == "storage_charge": - low = np.zeros(observation_space.n_storage, dtype=dt_float) - high = observation_space.storage_Emax + low = np.zeros(self.init_env_cls_dict["n_storage"], dtype=dt_float) + high = self.init_env_cls_dict["storage_Emax"] elif ( attr_nm == "curtailment" or attr_nm == "curtailment_limit" @@ -367,10 +398,10 @@ def _fill_dict_obs_space( high = np.inf elif attr_nm == "gen_margin_up": low = 0.0 - high = observation_space.gen_max_ramp_up + high = self.init_env_cls_dict["gen_max_ramp_up"] elif attr_nm == "gen_margin_down": low = 0.0 - high = observation_space.gen_max_ramp_down + high = self.init_env_cls_dict["gen_max_ramp_down"] # curtailment, curtailment_limit, gen_p_before_curtail my_type = SpaceType(low=low, high=high, shape=shape, dtype=dt) @@ -394,7 +425,7 @@ def from_gym(self, gymlike_observation: spaces.dict.OrderedDict) -> BaseObservat grid2oplike_observation: :class:`grid2op.Observation.BaseObservation` The corresponding grid2op observation """ - res = self.initial_obs_space.get_empty_observation() + res = self.initial_obs.copy() for k, v in gymlike_observation.items(): try: res._assign_attr_from_name(k, v) diff --git a/grid2op/gym_compat/gymenv.py b/grid2op/gym_compat/gymenv.py index 5a000ffc1..db6c59a40 100644 --- a/grid2op/gym_compat/gymenv.py +++ b/grid2op/gym_compat/gymenv.py @@ -7,11 +7,18 @@ # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. import numpy as np +from typing import Literal, Dict, Tuple, Any, Optional, Union, Generic from grid2op.dtypes import dt_int from grid2op.Chronics import Multifolder -from grid2op.gym_compat.utils import GYM_AVAILABLE, GYMNASIUM_AVAILABLE -from grid2op.gym_compat.utils import (check_gym_version, sample_seed) +from grid2op.Environment import Environment +from grid2op.typing_variables import STEP_INFO_TYPING, RESET_OPTIONS_TYPING +from grid2op.gym_compat.utils import (GYM_AVAILABLE, + GYMNASIUM_AVAILABLE, + check_gym_version, + sample_seed, + ObsType, + ActType) def conditional_decorator(condition): @@ -22,8 +29,10 @@ def decorator(func): return NotImplementedError() # anything that is not a callbe anyway return decorator +_TIME_SERIE_ID = "time serie id" +RESET_INFO_GYM_TYPING = Dict[Literal["time serie id", "seed", "grid2op_env_seed", "underlying_env_seeds"], Any] -class __AuxGymEnv: +class __AuxGymEnv(Generic[ObsType, ActType]): """ fully implements the openAI gym API by using the :class:`GymActionSpace` and :class:`GymObservationSpace` for compliance with openAI gym. @@ -45,7 +54,7 @@ class behave differently depending on the version of gym you have installed ! - :class:`GymEnv_Modern` for gym >= 0.26 .. warning:: - Depending on the presence absence of gymnasium and gym packages this class might behave differently. + Depending on the presence absence of `gymnasium` and `gym` packages this class might behave differently. In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy, no more maintained) RL packages. The behaviour is the following: @@ -95,29 +104,45 @@ class behave differently depending on the version of gym you have installed ! an action is represented through an OrderedDict (`from collection import OrderedDict`) """ - def __init__(self, env_init, shuffle_chronics=True, render_mode="rgb_array"): - check_gym_version(type(self)._gymnasium) + def __init__(self, + env_init: Environment, + shuffle_chronics:Optional[bool]=True, + render_mode: Literal["rgb_array"]="rgb_array", + with_forecast: bool=False): + cls = type(self) + check_gym_version(cls._gymnasium) + self.action_space = cls._ActionSpaceType(env_init) + self.observation_space = cls._ObservationSpaceType(env_init) + self.reward_range = env_init.reward_range + self.metadata = env_init.metadata self.init_env = env_init.copy() - self.action_space = type(self)._ActionSpaceType(self.init_env) - self.observation_space = type(self)._ObservationSpaceType(self.init_env) - self.reward_range = self.init_env.reward_range - self.metadata = self.init_env.metadata self.init_env.render_mode = render_mode self._shuffle_chronics = shuffle_chronics + if not with_forecast: + # default in grid2op 1.10.3 + # to improve pickle compatibility and speed + self.init_env.deactivate_forecast() + self.init_env._observation_space.obs_env.close() + self.init_env._observation_space.obs_env = None + self.init_env._observation_space._ObsEnv_class = None + self.init_env._last_obs._obs_env = None + self.init_env._last_obs._ptr_kwargs_env = False + self.init_env.current_obs._obs_env = None + self.init_env.current_obs._ptr_kwargs_env = False super().__init__() # super should reference either gym.Env or gymnasium.Env if not hasattr(self, "_np_random"): # for older version of gym it does not exist self._np_random = np.random.RandomState() - def _aux_step(self, gym_action): + def _aux_step(self, gym_action: ActType) -> Tuple[ObsType, float, bool, STEP_INFO_TYPING]: # used for gym < 0.26 g2op_act = self.action_space.from_gym(gym_action) g2op_obs, reward, done, info = self.init_env.step(g2op_act) gym_obs = self.observation_space.to_gym(g2op_obs) return gym_obs, float(reward), done, info - def _aux_step_new(self, gym_action): + def _aux_step_new(self, gym_action: ActType) -> Tuple[ObsType, float, bool, bool, STEP_INFO_TYPING]: # used for gym >= 0.26 # TODO refacto with _aux_step g2op_act = self.action_space.from_gym(gym_action) @@ -126,7 +151,10 @@ def _aux_step_new(self, gym_action): truncated = False # see https://github.com/openai/gym/pull/2752 return gym_obs, float(reward), terminated, truncated, info - def _aux_reset(self, seed=None, return_info=None, options=None): + def _aux_reset(self, + seed: Optional[int]=None, + return_info: Optional[bool]=None, + options: RESET_OPTIONS_TYPING=None) -> Union[ObsType, Tuple[ObsType, RESET_INFO_GYM_TYPING]]: # used for gym < 0.26 if self._shuffle_chronics and isinstance( self.init_env.chronics_handler.real_data, Multifolder @@ -136,12 +164,12 @@ def _aux_reset(self, seed=None, return_info=None, options=None): if seed is not None: seed_, next_seed, underlying_env_seeds = self._aux_seed(seed) - g2op_obs = self.init_env.reset() + g2op_obs = self.init_env.reset(options=options) gym_obs = self.observation_space.to_gym(g2op_obs) if return_info: chron_id = self.init_env.chronics_handler.get_id() - info = {"time serie id": chron_id} + info = {_TIME_SERIE_ID: chron_id} if seed is not None: info["seed"] = seed info["grid2op_env_seed"] = next_seed @@ -150,23 +178,27 @@ def _aux_reset(self, seed=None, return_info=None, options=None): else: return gym_obs - def _aux_reset_new(self, seed=None, options=None): + def _aux_reset_new(self, + seed: Optional[int]=None, + options: RESET_OPTIONS_TYPING=None) -> Tuple[ObsType,RESET_INFO_GYM_TYPING]: # used for gym > 0.26 - if self._shuffle_chronics and isinstance( - self.init_env.chronics_handler.real_data, Multifolder - ): + if (self._shuffle_chronics and + isinstance(self.init_env.chronics_handler.real_data, Multifolder) and + (options is not None and _TIME_SERIE_ID not in options)): self.init_env.chronics_handler.sample_next_chronics() - super().reset(seed=seed) + super().reset(seed=seed) # seed gymnasium env if seed is not None: self._aux_seed_spaces() seed, next_seed, underlying_env_seeds = self._aux_seed_g2op(seed) - - g2op_obs = self.init_env.reset() + + # we don't seed grid2op with reset as it is done + # earlier + g2op_obs = self.init_env.reset(seed=None, options=options) gym_obs = self.observation_space.to_gym(g2op_obs) chron_id = self.init_env.chronics_handler.get_id() - info = {"time serie id": chron_id} + info = {_TIME_SERIE_ID: chron_id} if seed is not None: info["seed"] = seed info["grid2op_env_seed"] = next_seed @@ -177,7 +209,7 @@ def render(self): """for compatibility with open ai gym render function""" return self.init_env.render() - def close(self): + def close(self) -> None: if hasattr(self, "init_env") and self.init_env is not None: self.init_env.close() del self.init_env @@ -199,13 +231,13 @@ def _aux_seed_spaces(self): self.observation_space.seed(next_seed) def _aux_seed_g2op(self, seed): - # then seed the underlying grid2op env - max_ = np.iinfo(dt_int).max - next_seed = sample_seed(max_, self._np_random) - underlying_env_seeds = self.init_env.seed(next_seed) - return seed, next_seed, underlying_env_seeds + # then seed the underlying grid2op env + max_ = np.iinfo(dt_int).max + next_seed = sample_seed(max_, self._np_random) + underlying_env_seeds = self.init_env.seed(next_seed) + return seed, next_seed, underlying_env_seeds - def _aux_seed(self, seed=None): + def _aux_seed(self, seed: Optional[int]=None): # deprecated in gym >=0.26 if seed is not None: # seed the gym env @@ -232,13 +264,13 @@ def __del__(self): _AuxGymEnv.__doc__ = __AuxGymEnv.__doc__ class GymEnv_Legacy(_AuxGymEnv): # for old version of gym - def reset(self, *args, **kwargs): + def reset(self, *args, **kwargs) -> ObsType: return self._aux_reset(*args, **kwargs) - def step(self, action): + def step(self, action: ActType) -> Tuple[ObsType, float, bool, STEP_INFO_TYPING]: return self._aux_step(action) - def seed(self, seed): + def seed(self, seed: Optional[int]) -> None: # defined only on some cases return self._aux_seed(seed) @@ -246,12 +278,15 @@ def seed(self, seed): class GymEnv_Modern(_AuxGymEnv): # for new version of gym def reset(self, - *, - seed=None, - options=None,): + *, + seed: Optional[int]=None, + options: RESET_OPTIONS_TYPING = None) -> Tuple[ + ObsType, + RESET_INFO_GYM_TYPING + ]: return self._aux_reset_new(seed, options) - def step(self, action): + def step(self, action : ActType) -> Tuple[ObsType, float, bool, bool, STEP_INFO_TYPING]: return self._aux_step_new(action) GymEnv_Legacy.__doc__ = __AuxGymEnv.__doc__ GymEnv_Modern.__doc__ = __AuxGymEnv.__doc__ @@ -270,13 +305,60 @@ def step(self, action): _AuxGymnasiumEnv.__doc__ = __AuxGymEnv.__doc__ class GymnasiumEnv(_AuxGymnasiumEnv): - # for new version of gym + # for gymnasium def reset(self, - *, - seed=None, - options=None,): + *, + seed: Optional[int]=None, + options: RESET_OPTIONS_TYPING = None) -> Tuple[ + ObsType, + RESET_INFO_GYM_TYPING + ]: + """This function will reset the underlying grid2op environment + and return the next state of the grid (as the gymnasium observation) + and some other information. + + Parameters + ---------- + seed : Optional[int], optional + The seed for this new environment, by default None + options : RESET_OPTIONS_TYPING, optional + See the documentation of :func:`grid2op.Environment.Environment.reset` + for more information about it, by default None + + Returns + ------- + Tuple[ ObsType, RESET_INFO_GYM_TYPING ] + _description_ + """ return self._aux_reset_new(seed, options) - def step(self, action): + def step(self, action: ActType) -> Tuple[ObsType, float, bool, bool, STEP_INFO_TYPING]: + """Run one timestep of the environment’s dynamics using the agent actions. + + When the end of an episode is reached (terminated or truncated), + it is necessary to call reset() to reset this environment’s state for the next episode. + + Parameters + ---------- + action : ``ActType`` + An action that can be process by the :func:`grid2op.gym_compat.gym_act_space.GymActionSpace.from_gym` + (given in the form of a gymnasium action belonging to a gymnasium space.). + + For example it can be a sorted dictionary if you are using default + :class:`grid2op.gym_compat.gym_act_space.GymActionSpace` + or a numpy array if you are using :class:`grid2op.gym_compat.box_gym_actspace.BoxGymnasiumActSpace` + + Returns + ------- + Tuple[ObsType, float, bool, bool, STEP_INFO_TYPING] + + - observation: an instance of the current observation space (can be a dictionary, a numpy array etc.) + - reward: the reward for the previous action + - truncated: whether the environment was terminated + - done: whether the environment is done + - info: other information, see :func:`grid2op.Environment.BaseEnv.step` for more + information about the available informations. + + """ return self._aux_step_new(action) - GymnasiumEnv.__doc__ = __AuxGymEnv.__doc__ \ No newline at end of file + GymnasiumEnv.__doc__ = __AuxGymEnv.__doc__ diff --git a/grid2op/gym_compat/multidiscrete_gym_actspace.py b/grid2op/gym_compat/multidiscrete_gym_actspace.py index a92620389..60999fd9c 100644 --- a/grid2op/gym_compat/multidiscrete_gym_actspace.py +++ b/grid2op/gym_compat/multidiscrete_gym_actspace.py @@ -9,9 +9,11 @@ import copy import warnings import numpy as np +from typing import Literal, Dict, Tuple, Any, Optional from grid2op.Action import ActionSpace from grid2op.dtypes import dt_int, dt_bool, dt_float +from grid2op.Exceptions import Grid2OpException from grid2op.gym_compat.utils import (ALL_ATTR, ATTR_DISCRETE, @@ -39,7 +41,7 @@ class __AuxMultiDiscreteActSpace: - "change_line_status": `n_line` dimensions, each containing 2 elements "CHANGE", "DONT CHANGE" and affecting the powerline status (connected / disconnected) - "set_bus": `dim_topo` dimensions, each containing 4 choices: "DISCONNECT", "DONT AFFECT", "CONNECT TO BUSBAR 1", - or "CONNECT TO BUSBAR 2" and affecting to which busbar an object is connected + or "CONNECT TO BUSBAR 2", "CONNECT TO BUSBAR 3", ... and affecting to which busbar an object is connected - "change_bus": `dim_topo` dimensions, each containing 2 choices: "CHANGE", "DONT CHANGE" and affect to which busbar an element is connected - "redispatch": `sum(env.gen_redispatchable)` dimensions, each containing a certain number of choices depending on the value @@ -66,6 +68,12 @@ class __AuxMultiDiscreteActSpace: - "one_sub_set": 1 single dimension. This type of representation differs from the previous one only by the fact that each step you can perform only one single action on a single substation (so unlikely to be illegal). - "one_sub_change": 1 single dimension. Same as above. + - "one_line_set": 1 single dimension. In this type of representation, you have one dimension with `1 + 2 * n_line` + elements: first is "do nothing", then next elements control the force connection or disconnection + of the powerlines (new in version 1.10.0) + - "one_line_change": 1 single dimension. In this type of representation, you have `1 + n_line` possibility + for this element. First one is "do nothing" then it controls the change of status of + any given line (new in version 1.10.0). .. warning:: @@ -169,7 +177,25 @@ class __AuxMultiDiscreteActSpace: ATTR_NEEDBUILD = 2 ATTR_NEEDBINARIZED = 3 - def __init__(self, grid2op_action_space, attr_to_keep=ALL_ATTR, nb_bins=None): + def __init__(self, + grid2op_action_space: ActionSpace, + attr_to_keep: Optional[Tuple[Literal["set_line_status"], + Literal["change_line_status"], + Literal["set_bus"], + Literal["sub_set_bus"], + Literal["one_sub_set"], + Literal["change_bus"], + Literal["sub_change_bus"], + Literal["one_sub_change"], + Literal["redispatch"], + Literal["set_storage"], + Literal["curtail"], + Literal["curtail_mw"], + Literal["one_line_set"], + Literal["one_line_change"], + ]]=ALL_ATTR, + nb_bins: Dict[Literal["redispatch", "set_storage", "curtail", "curtail_mw"], int]=None + ): check_gym_version(type(self)._gymnasium) if not isinstance(grid2op_action_space, ActionSpace): raise RuntimeError( @@ -188,7 +214,6 @@ def __init__(self, grid2op_action_space, attr_to_keep=ALL_ATTR, nb_bins=None): attr_to_keep = { el for el in attr_to_keep if grid2op_action_space.supports_type(el) } - for el in attr_to_keep: if el not in ATTR_DISCRETE: warnings.warn( @@ -201,7 +226,7 @@ def __init__(self, grid2op_action_space, attr_to_keep=ALL_ATTR, nb_bins=None): self._attr_to_keep = sorted(attr_to_keep) - act_sp = grid2op_action_space + act_sp = type(grid2op_action_space) self._act_space = copy.deepcopy(grid2op_action_space) low_gen = -1.0 * act_sp.gen_max_ramp_down @@ -214,96 +239,108 @@ def __init__(self, grid2op_action_space, attr_to_keep=ALL_ATTR, nb_bins=None): "set_line_status": ( [3 for _ in range(act_sp.n_line)], act_sp.n_line, - self.ATTR_SET, + type(self).ATTR_SET, ), "change_line_status": ( [2 for _ in range(act_sp.n_line)], act_sp.n_line, - self.ATTR_CHANGE, + type(self).ATTR_CHANGE, ), "set_bus": ( - [4 for _ in range(act_sp.dim_topo)], + [2 + act_sp.n_busbar_per_sub for _ in range(act_sp.dim_topo)], act_sp.dim_topo, - self.ATTR_SET, + type(self).ATTR_SET, ), "change_bus": ( [2 for _ in range(act_sp.dim_topo)], act_sp.dim_topo, - self.ATTR_CHANGE, + type(self).ATTR_CHANGE, ), "raise_alarm": ( [2 for _ in range(act_sp.dim_alarms)], act_sp.dim_alarms, - self.ATTR_CHANGE, + type(self).ATTR_CHANGE, ), "raise_alert": ( [2 for _ in range(act_sp.dim_alerts)], act_sp.dim_alerts, - self.ATTR_CHANGE, + type(self).ATTR_CHANGE, ), "sub_set_bus": ( None, act_sp.n_sub, - self.ATTR_NEEDBUILD, - ), # dimension will be computed on the fly, if the stuff is used + type(self).ATTR_NEEDBUILD, + ), # dimension will be computed on the fly, if the kwarg is used "sub_change_bus": ( None, act_sp.n_sub, - self.ATTR_NEEDBUILD, - ), # dimension will be computed on the fly, if the stuff is used + type(self).ATTR_NEEDBUILD, + ), # dimension will be computed on the fly, if the kwarg is used "one_sub_set": ( None, 1, - self.ATTR_NEEDBUILD, - ), # dimension will be computed on the fly, if the stuff is used + type(self).ATTR_NEEDBUILD, + ), # dimension will be computed on the fly, if the kwarg is used "one_sub_change": ( None, 1, - self.ATTR_NEEDBUILD, - ), # dimension will be computed on the fly, if the stuff is used + type(self).ATTR_NEEDBUILD, + ), # dimension will be computed on the fly, if the kwarg is used + "one_line_set": ( + None, + 1, + type(self).ATTR_NEEDBUILD, + ), # dimension will be computed on the fly, if the kwarg is used + "one_line_change": ( + None, + 1, + type(self).ATTR_NEEDBUILD, + ), # dimension will be computed on the fly, if the kwarg is used } self._nb_bins = nb_bins for el in ["redispatch", "set_storage", "curtail", "curtail_mw"]: - if el in attr_to_keep: - if el not in nb_bins: - raise RuntimeError( - f'The attribute you want to keep "{el}" is not present in the ' - f'"nb_bins". This attribute is continuous, you have to specify in how ' - f"how to convert it to a discrete space. See the documentation " - f"for more information." - ) - nb_redispatch = act_sp.gen_redispatchable.sum() - nb_renew = act_sp.gen_renewable.sum() - if el == "redispatch": - self.dict_properties[el] = ( - [nb_bins[el] for _ in range(nb_redispatch)], - nb_redispatch, - self.ATTR_NEEDBINARIZED, - ) - elif el == "curtail" or el == "curtail_mw": - self.dict_properties[el] = ( - [nb_bins[el] for _ in range(nb_renew)], - nb_renew, - self.ATTR_NEEDBINARIZED, - ) - elif el == "set_storage": - self.dict_properties[el] = ( - [nb_bins[el] for _ in range(act_sp.n_storage)], - act_sp.n_storage, - self.ATTR_NEEDBINARIZED, - ) - else: - raise RuntimeError(f'Unknown attribute "{el}"') - + self._aux_check_continuous_elements(el, attr_to_keep, nb_bins, act_sp) + self._dims = None self._functs = None # final functions that is applied to the gym action to map it to a grid2Op action - self._binarizers = None # contains all the stuff to binarize the data + self._binarizers = None # contains all the kwarg to binarize the data self._types = None nvec = self._get_info() - # initialize the base container type(self)._MultiDiscreteType.__init__(self, nvec=nvec) + def _aux_check_continuous_elements(self, el, attr_to_keep, nb_bins, act_sp): + if el in attr_to_keep: + if el not in nb_bins: + raise RuntimeError( + f'The attribute you want to keep "{el}" is not present in the ' + f'"nb_bins". This attribute is continuous, you have to specify in how ' + f"how to convert it to a discrete space. See the documentation " + f"for more information." + ) + nb_redispatch = act_sp.gen_redispatchable.sum() + nb_renew = act_sp.gen_renewable.sum() + if el == "redispatch": + self.dict_properties[el] = ( + [nb_bins[el] for _ in range(nb_redispatch)], + nb_redispatch, + self.ATTR_NEEDBINARIZED, + ) + elif el == "curtail" or el == "curtail_mw": + self.dict_properties[el] = ( + [nb_bins[el] for _ in range(nb_renew)], + nb_renew, + self.ATTR_NEEDBINARIZED, + ) + elif el == "set_storage": + self.dict_properties[el] = ( + [nb_bins[el] for _ in range(act_sp.n_storage)], + act_sp.n_storage, + self.ATTR_NEEDBINARIZED, + ) + else: + raise Grid2OpException(f'Unknown attribute "{el}"') + @staticmethod def _funct_set(vect): # gym encodes: @@ -415,22 +452,41 @@ def _get_info(self): funct = self._funct_substations elif el == "one_sub_set": # an action change only one substation, using "set" - self._sub_modifiers[ - el - ] = self._act_space.get_all_unitary_topologies_set( + self._sub_modifiers[el] = [self._act_space()] + self._sub_modifiers[el] += self._act_space.get_all_unitary_topologies_set( self._act_space ) funct = self._funct_one_substation nvec_ = [len(self._sub_modifiers[el])] elif el == "one_sub_change": # an action change only one substation, using "change" + self._sub_modifiers[el] = [self._act_space()] self._sub_modifiers[ el - ] = self._act_space.get_all_unitary_topologies_change( + ] += self._act_space.get_all_unitary_topologies_change( self._act_space ) funct = self._funct_one_substation nvec_ = [len(self._sub_modifiers[el])] + elif el == "one_line_set": + # an action change only one substation, using "change" + self._sub_modifiers[el] = [self._act_space()] + tmp = [] + for l_id in range(type(self._act_space).n_line): + tmp.append(self._act_space({"set_line_status": [(l_id, +1)]})) + tmp.append(self._act_space({"set_line_status": [(l_id, -1)]})) + self._sub_modifiers[el] += tmp + funct = self._funct_one_substation + nvec_ = [len(self._sub_modifiers[el])] + elif el == "one_line_change": + # an action change only one substation, using "change" + self._sub_modifiers[el] = [self._act_space()] + tmp = [] + for l_id in range(type(self._act_space).n_line): + tmp.append(self._act_space({"change_line_status": [l_id]})) + self._sub_modifiers[el] += tmp + funct = self._funct_one_substation + nvec_ = [len(self._sub_modifiers[el])] else: raise RuntimeError( f'Unsupported attribute "{el}" when dealing with ' @@ -473,7 +529,7 @@ def _handle_attribute(self, res, gym_act_this, attr_nm, funct, type_): """ # TODO code that ! vect = 1 * gym_act_this - if type_ == self.ATTR_NEEDBUILD: + if type_ == type(self).ATTR_NEEDBUILD: funct(res, attr_nm, vect) else: tmp = funct(vect) diff --git a/grid2op/gym_compat/utils.py b/grid2op/gym_compat/utils.py index 2e42adac1..4374ae4a1 100644 --- a/grid2op/gym_compat/utils.py +++ b/grid2op/gym_compat/utils.py @@ -29,7 +29,13 @@ GYMNASIUM_AVAILABLE = True except ImportError: GYMNASIUM_AVAILABLE = False - + +try: + from gymnasium.core import ObsType, ActType +except ImportError: + from typing import TypeVar + ObsType = TypeVar("ObsType") + ActType = TypeVar("ActType") _MIN_GYM_VERSION = version.parse("0.17.2") # this is the last gym version to use the "old" numpy prng @@ -69,6 +75,8 @@ "sub_change_bus", "one_sub_set", "one_sub_change", + "one_line_set", + "one_line_change" ) ALL_ATTR_CONT = ( @@ -96,18 +104,19 @@ def _compute_extra_power_for_losses(gridobj): to handle the "because of the power losses gen_pmin and gen_pmax can be slightly altered" """ import numpy as np - + if isinstance(gridobj, dict): + return 0.3*np.abs(gridobj["gen_pmax"]).sum() return 0.3 * np.abs(gridobj.gen_pmax).sum() def sample_seed(max_, np_random): """sample a seed based on gym version (np_random has not always the same behaviour)""" if GYM_VERSION <= _MAX_GYM_VERSION_RANDINT: - if hasattr(np_random, "randint"): + if hasattr(np_random, "integers"): + seed_ = int(np_random.integers(0, max_)) + else: # old gym behaviour seed_ = np_random.randint(max_) - else: - seed_ = int(np_random.integers(0, max_)) else: # gym finally use most recent numpy random generator seed_ = int(np_random.integers(0, max_)) diff --git a/grid2op/l2rpn_utils/wcci_2020.py b/grid2op/l2rpn_utils/wcci_2020.py index 9293326f6..3636fbfa0 100644 --- a/grid2op/l2rpn_utils/wcci_2020.py +++ b/grid2op/l2rpn_utils/wcci_2020.py @@ -18,50 +18,48 @@ class ActionWCCI2020(PlayableAction): "redispatch", } - attr_list_vect = [ - "_set_line_status", - "_switch_line_status", - "_set_topo_vect", - "_change_bus_vect", - '_redispatch' - ] + attr_list_vect = ['_set_line_status', + '_set_topo_vect', + '_change_bus_vect', + '_switch_line_status', + '_redispatch'] attr_list_set = set(attr_list_vect) pass class ObservationWCCI2020(CompleteObservation): attr_list_vect = [ - 'year', - 'month', - 'day', - 'hour_of_day', - 'minute_of_hour', - 'day_of_week', - "gen_p", - "gen_q", - "gen_v", - 'load_p', - 'load_q', - 'load_v', - 'p_or', - 'q_or', - 'v_or', - 'a_or', - 'p_ex', - 'q_ex', - 'v_ex', - 'a_ex', - 'rho', - 'line_status', - 'timestep_overflow', - 'topo_vect', - 'time_before_cooldown_line', - 'time_before_cooldown_sub', - 'time_next_maintenance', - 'duration_next_maintenance', - 'target_dispatch', - 'actual_dispatch' - ] + "year", + "month", + "day", + "hour_of_day", + "minute_of_hour", + "day_of_week", + "gen_p", + "gen_q", + "gen_v", + "load_p", + "load_q", + "load_v", + "p_or", + "q_or", + "v_or", + "a_or", + "p_ex", + "q_ex", + "v_ex", + "a_ex", + "rho", + "line_status", + "timestep_overflow", + "topo_vect", + "time_before_cooldown_line", + "time_before_cooldown_sub", + "time_next_maintenance", + "duration_next_maintenance", + "target_dispatch", + "actual_dispatch" + ] attr_list_json = [ "storage_charge", "storage_power_target", diff --git a/grid2op/simulator/simulator.py b/grid2op/simulator/simulator.py index 41dd719e9..8f5ba6943 100644 --- a/grid2op/simulator/simulator.py +++ b/grid2op/simulator/simulator.py @@ -7,6 +7,11 @@ # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. import copy from typing import Optional, Tuple +try: + from typing import Self +except ImportError: + from typing_extensions import Self + import numpy as np import os from scipy.optimize import minimize @@ -70,7 +75,7 @@ def __init__( f"inheriting from BaseEnv" ) if env.backend._can_be_copied: - self.backend = env.backend.copy() + self.backend: Backend = env.backend.copy() else: raise SimulatorError("Impossible to make a Simulator when you " "cannot copy the backend of the environment.") @@ -100,7 +105,7 @@ def converged(self) -> bool: def converged(self, values): raise SimulatorError("Cannot set this property.") - def copy(self) -> "Simulator": + def copy(self) -> Self: """Allows to perform a (deep) copy of the simulator. Returns @@ -126,7 +131,7 @@ def copy(self) -> "Simulator": res._highres_sim_counter = self._highres_sim_counter return res - def change_backend(self, backend: Backend): + def change_backend(self, backend: Backend) -> None: """You can use this function in case you want to change the "solver" use to perform the computation. For example, you could use a machine learning based model to do the computation (to accelerate them), provided @@ -311,7 +316,7 @@ def _adjust_controlable_gen( # which generators needs to be "optimized" -> the one where # the target function matter - gen_in_target = target_dispatch[self.current_obs.gen_redispatchable] != 0.0 + gen_in_target = np.abs(target_dispatch[self.current_obs.gen_redispatchable]) >= 1e-7 # compute the upper / lower bounds for the generators dispatchable = new_gen_p[self.current_obs.gen_redispatchable] @@ -339,7 +344,7 @@ def _adjust_controlable_gen( scale_objective = np.round(scale_objective, decimals=4) tmp_zeros = np.zeros((1, nb_dispatchable), dtype=float) - + # wrap everything into the proper scipy form def target(actual_dispatchable): # define my real objective @@ -398,7 +403,7 @@ def f(init): # the idea here is to chose a initial point that would be close to the # desired solution (split the (sum of the) dispatch to the available generators) x0 = 1.0 * target_dispatch_redisp - can_adjust = x0 == 0.0 + can_adjust = np.abs(x0) <= 1e-7 if (can_adjust).any(): init_sum = x0.sum() denom_adjust = (1.0 / weights[can_adjust]).sum() @@ -407,7 +412,7 @@ def f(init): denom_adjust = 1.0 x0[can_adjust] = -init_sum / (weights[can_adjust] * denom_adjust) - res = f(x0) + res = f(x0.astype(float)) if res.success: return res.x else: @@ -475,8 +480,8 @@ def _fix_redisp_curtailment_storage( target_dispatch = self.current_obs.target_dispatch + act.redispatch # if previous setpoint was say -2 and at this step I redispatch of # say + 4 then the real setpoint should be +2 (and not +4) - new_vect_redisp = (act.redispatch != 0.0) & ( - self.current_obs.target_dispatch == 0.0 + new_vect_redisp = (np.abs(act.redispatch) >= 1e-7) & ( + np.abs(self.current_obs.target_dispatch) <= 1e-7 ) target_dispatch[new_vect_redisp] += self.current_obs.actual_dispatch[ new_vect_redisp diff --git a/grid2op/tests/BaseBackendTest.py b/grid2op/tests/BaseBackendTest.py index ad24c2ca6..3ffbea5d8 100644 --- a/grid2op/tests/BaseBackendTest.py +++ b/grid2op/tests/BaseBackendTest.py @@ -63,7 +63,7 @@ def comb(n, k): from grid2op.Rules import RulesChecker from grid2op.Rules import AlwaysLegal from grid2op.Action._backendAction import _BackendAction -from grid2op.Backend import Backend, PandaPowerBackend +from grid2op.Backend import PandaPowerBackend import pdb @@ -97,6 +97,7 @@ def get_casefile(self): return "test_case14.json" def test_load_file(self): + self.skip_if_needed() backend = self.make_backend_with_glue_code() path_matpower = self.get_path() case_file = self.get_casefile() @@ -177,8 +178,8 @@ def test_load_file(self): assert np.all(backend.get_topo_vect() == np.ones(np.sum(backend.sub_info))) - conv = backend.runpf() - assert conv, "powerflow diverge it is not supposed to!" + conv, *_ = backend.runpf() + assert conv, f"powerflow diverge it is not supposed to! Error {_}" with warnings.catch_warnings(): warnings.filterwarnings("ignore") @@ -199,8 +200,8 @@ def test_assert_grid_correct(self): backend.load_grid(path_matpower, case_file) type(backend).set_env_name("TestLoadingCase_env2_test_assert_grid_correct") backend.assert_grid_correct() - conv = backend.runpf() - assert conv, "powerflow diverge it is not supposed to!" + conv, *_ = backend.runpf() + assert conv, f"powerflow diverge it is not supposed to! Error {_}" backend.assert_grid_correct_after_powerflow() @@ -262,8 +263,8 @@ def test_theta_ok(self): def test_runpf_dc(self): self.skip_if_needed() - conv = self.backend.runpf(is_dc=True) - assert conv + conv, *_ = self.backend.runpf(is_dc=True) + assert conv, f"powerflow diverge with error {_}" true_values_dc = np.array( [ 147.83859556, @@ -317,7 +318,8 @@ def test_runpf(self): 2.80741759e01, ] ) - conv = self.backend.runpf(is_dc=False) + conv, *_ = self.backend.runpf(is_dc=False) + assert conv, f"powerflow diverge with error {_}" assert conv p_or, *_ = self.backend.lines_or_info() assert self.compare_vect(p_or, true_values_ac) @@ -325,8 +327,8 @@ def test_runpf(self): def test_voltage_convert_powerlines(self): self.skip_if_needed() # i have the correct voltages in powerlines if the formula to link mw, mvar, kv and amps is correct - conv = self.backend.runpf(is_dc=False) - assert conv, "powerflow diverge at loading" + conv, *_ = self.backend.runpf(is_dc=False) + assert conv, f"powerflow diverge at loading with error {_}" p_or, q_or, v_or, a_or = self.backend.lines_or_info() a_th = np.sqrt(p_or**2 + q_or**2) * 1e3 / (np.sqrt(3) * v_or) @@ -341,15 +343,15 @@ def test_voltages_correct_load_gen(self): # i have the right voltages to generators and load, if it's the same as the voltage (correct from the above test) # of the powerline connected to it. - conv = self.backend.runpf(is_dc=False) - assert conv, "powerflow diverge at loading" + conv, *_ = self.backend.runpf(is_dc=False) + assert conv, f"powerflow diverge at loading with error {_}" load_p, load_q, load_v = self.backend.loads_info() gen_p, gen__q, gen_v = self.backend.generators_info() p_or, q_or, v_or, a_or = self.backend.lines_or_info() p_ex, q_ex, v_ex, a_ex = self.backend.lines_ex_info() for c_id, sub_id in enumerate(self.backend.load_to_subid): - l_ids = np.where(self.backend.line_or_to_subid == sub_id)[0] + l_ids = np.nonzero(self.backend.line_or_to_subid == sub_id)[0] if len(l_ids): l_id = l_ids[0] assert ( @@ -357,7 +359,7 @@ def test_voltages_correct_load_gen(self): ), "problem for load {}".format(c_id) continue - l_ids = np.where(self.backend.line_ex_to_subid == sub_id)[0] + l_ids = np.nonzero(self.backend.line_ex_to_subid == sub_id)[0] if len(l_ids): l_id = l_ids[0] assert ( @@ -367,7 +369,7 @@ def test_voltages_correct_load_gen(self): assert False, "load {} has not been checked".format(c_id) for g_id, sub_id in enumerate(self.backend.gen_to_subid): - l_ids = np.where(self.backend.line_or_to_subid == sub_id)[0] + l_ids = np.nonzero(self.backend.line_or_to_subid == sub_id)[0] if len(l_ids): l_id = l_ids[0] assert ( @@ -375,7 +377,7 @@ def test_voltages_correct_load_gen(self): ), "problem for generator {}".format(g_id) continue - l_ids = np.where(self.backend.line_ex_to_subid == sub_id)[0] + l_ids = np.nonzero(self.backend.line_ex_to_subid == sub_id)[0] if len(l_ids): l_id = l_ids[0] assert ( @@ -384,33 +386,37 @@ def test_voltages_correct_load_gen(self): continue assert False, "generator {} has not been checked".format(g_id) - def test_copy(self): + def test_copy_ac(self, is_dc=False): self.skip_if_needed() - conv = self.backend.runpf(is_dc=False) - assert conv, "powerflow diverge at loading" + conv, *_ = self.backend.runpf(is_dc=is_dc) + assert conv, f"powerflow diverge at loading with error {_}" l_id = 3 p_or_orig, *_ = self.backend.lines_or_info() - adn_backend_cpy = self.backend.copy() + backend_cpy = self.backend.copy() self.backend._disconnect_line(l_id) - conv = self.backend.runpf(is_dc=False) - assert conv - conv2 = adn_backend_cpy.runpf(is_dc=False) - assert conv2 + conv, *_ = self.backend.runpf(is_dc=is_dc) + assert conv, f"original backend diverged with error {_}" + conv2 = backend_cpy.runpf(is_dc=is_dc) + assert conv2, f"copied backend diverged with error {_}" p_or_ref, *_ = self.backend.lines_or_info() - p_or, *_ = adn_backend_cpy.lines_or_info() + p_or, *_ = backend_cpy.lines_or_info() assert self.compare_vect( p_or_orig, p_or ), "the copied object affects its original 'parent'" assert ( np.abs(p_or_ref[l_id]) <= self.tol_one - ), "powerline {} has not been disconnected".format(l_id) + ), "powerline {} has not been disconnected in orig backend".format(l_id) + + def test_copy_dc(self): + self.skip_if_needed() + self.test_copy_ac(True) def test_copy2(self): self.skip_if_needed() self.backend._disconnect_line(8) - conv = self.backend.runpf(is_dc=False) + conv, *_ = self.backend.runpf(is_dc=False) p_or_orig, *_ = self.backend.lines_or_info() adn_backend_cpy = self.backend.copy() @@ -520,12 +526,12 @@ def test_pf_ac_dc(self): 5.77869057, ] ) - conv = self.backend.runpf(is_dc=True) - assert conv + conv, *_ = self.backend.runpf(is_dc=True) + assert conv, f"error {_}" p_or_orig, q_or_orig, *_ = self.backend.lines_or_info() assert np.all(q_or_orig == 0.0), "in dc mode all q must be zero" - conv = self.backend.runpf(is_dc=False) - assert conv + conv, *_ = self.backend.runpf(is_dc=False) + assert conv, f"error {_}" p_or_orig, q_or_orig, *_ = self.backend.lines_or_info() assert self.compare_vect(q_or_orig, true_values_ac) @@ -567,11 +573,11 @@ def test_disconnect_line(self): continue backend_cpy = self.backend.copy() backend_cpy._disconnect_line(i) - conv = backend_cpy.runpf() + conv, *_ = backend_cpy.runpf() assert ( conv - ), "Power flow computation does not converge if line {} is removed".format( - i + ), "Power flow computation does not converge if line {} is removed with error ".format( + i, _ ) flows = backend_cpy.get_line_status() assert not flows[i] @@ -579,7 +585,8 @@ def test_disconnect_line(self): def test_donothing_action(self): self.skip_if_needed() - conv = self.backend.runpf() + conv, *_ = self.backend.runpf() + assert conv, f"error {_}" init_flow = self.backend.get_line_flow() init_lp, *_ = self.backend.loads_info() init_gp, *_ = self.backend.generators_info() @@ -596,8 +603,8 @@ def test_donothing_action(self): # assert self.compare_vect(init_gp, after_gp) # check i didn't modify the generators # TODO here !!! problem with steady state P=C+L assert np.all(init_ls == after_ls) # check i didn't disconnect any powerlines - conv = self.backend.runpf() - assert conv, "Cannot perform a powerflow after doing nothing" + conv, *_ = self.backend.runpf() + assert conv, f"Cannot perform a powerflow after doing nothing with error {_}" after_flow = self.backend.get_line_flow() assert self.compare_vect(init_flow, after_flow) @@ -608,8 +615,8 @@ def test_apply_action_active_value(self): # also multiply by 2 # i set up the stuff to have exactly 0 losses - conv = self.backend.runpf(is_dc=True) - assert conv, "powergrid diverge after loading (even in DC)" + conv, *_ = self.backend.runpf(is_dc=True) + assert conv, f"powergrid diverge after loading (even in DC) with error {_}" init_flow, *_ = self.backend.lines_or_info() init_lp, init_l_q, *_ = self.backend.loads_info() init_gp, *_ = self.backend.generators_info() @@ -623,7 +630,8 @@ def test_apply_action_active_value(self): bk_action = self.bkact_class() bk_action += action self.backend.apply_action(bk_action) - conv = self.backend.runpf(is_dc=True) + conv, *_ = self.backend.runpf(is_dc=True) + assert conv, f"powergrid diverge with error {_}" # now the system has exactly 0 losses (ie sum load = sum gen) # i check that if i divide by 2, then everything is divided by 2 @@ -641,8 +649,8 @@ def test_apply_action_active_value(self): bk_action = self.bkact_class() bk_action += action self.backend.apply_action(bk_action) - conv = self.backend.runpf(is_dc=True) - assert conv, "Cannot perform a powerflow after doing nothing" + conv, *_ = self.backend.runpf(is_dc=True) + assert conv, "Cannot perform a powerflow after doing nothing (dc)" after_lp, after_lq, *_ = self.backend.loads_info() after_gp, *_ = self.backend.generators_info() @@ -656,10 +664,10 @@ def test_apply_action_active_value(self): # i'm in DC mode, i can't check for reactive values... assert ( np.max(np.abs(p_subs)) <= self.tolvect - ), "problem with active values, at substation" + ), "problem with active values, at substation (kirchoff for DC)" assert ( np.max(np.abs(p_bus.flatten())) <= self.tolvect - ), "problem with active values, at a bus" + ), "problem with active values, at a bus (kirchoff for DC)" assert self.compare_vect( new_pp, after_gp @@ -673,8 +681,8 @@ def test_apply_action_active_value(self): def test_apply_action_prod_v(self): self.skip_if_needed() - conv = self.backend.runpf(is_dc=False) - assert conv, "powergrid diverge after loading" + conv, *_ = self.backend.runpf(is_dc=False) + assert conv, f"powergrid diverge after loading with error {_}" prod_p_init, prod_q_init, prod_v_init = self.backend.generators_info() ratio = 1.05 action = self.action_env( @@ -683,8 +691,8 @@ def test_apply_action_prod_v(self): bk_action = self.bkact_class() bk_action += action self.backend.apply_action(bk_action) - conv = self.backend.runpf(is_dc=False) - assert conv, "Cannot perform a powerflow after modifying the powergrid" + conv, *_ = self.backend.runpf(is_dc=False) + assert conv, f"Cannot perform a powerflow after modifying the powergrid with error {_}" prod_p_after, prod_q_after, prod_v_after = self.backend.generators_info() assert self.compare_vect( @@ -694,7 +702,8 @@ def test_apply_action_prod_v(self): def test_apply_action_maintenance(self): self.skip_if_needed() # retrieve some initial data to be sure only a subpart of the _grid is modified - conv = self.backend.runpf() + conv, *_ = self.backend.runpf() + assert conv, f"powerflow diverge with , error: {_}" init_lp, *_ = self.backend.loads_info() init_gp, *_ = self.backend.generators_info() @@ -709,8 +718,8 @@ def test_apply_action_maintenance(self): self.backend.apply_action(bk_action) # compute a load flow an performs more tests - conv = self.backend.runpf() - assert conv, "Power does not converge if line {} is removed".format(19) + conv, *_ = self.backend.runpf() + assert conv, "Power does not converge if line {} is removed with error {}".format(19, _) # performs basic check after_lp, *_ = self.backend.loads_info() @@ -728,8 +737,8 @@ def test_apply_action_maintenance(self): def test_apply_action_hazard(self): self.skip_if_needed() - conv = self.backend.runpf() - assert conv, "powerflow did not converge at iteration 0" + conv, *_ = self.backend.runpf() + assert conv, f"powerflow did not converge at iteration 0, with error {_}" init_lp, *_ = self.backend.loads_info() init_gp, *_ = self.backend.generators_info() @@ -743,8 +752,8 @@ def test_apply_action_hazard(self): self.backend.apply_action(bk_action) # compute a load flow an performs more tests - conv = self.backend.runpf() - assert conv, "Power does not converge if line {} is removed".format(19) + conv, *_ = self.backend.runpf() + assert conv, "Power does not converge if line {} is removed with error {}".format(19, _) # performs basic check after_lp, *_ = self.backend.loads_info() @@ -759,7 +768,8 @@ def test_apply_action_hazard(self): def test_apply_action_disconnection(self): self.skip_if_needed() # retrieve some initial data to be sure only a subpart of the _grid is modified - conv = self.backend.runpf() + conv, *_ = self.backend.runpf() + assert conv, f"powerflow diverge with , error: {_}" init_lp, *_ = self.backend.loads_info() init_gp, *_ = self.backend.generators_info() @@ -779,10 +789,10 @@ def test_apply_action_disconnection(self): self.backend.apply_action(bk_action) # compute a load flow an performs more tests - conv = self.backend.runpf() + conv, *_ = self.backend.runpf() assert ( conv - ), "Powerflow does not converge if lines {} and {} are removed".format(17, 19) + ), "Powerflow does not converge if lines {} and {} are removed with error {}".format(17, 19, _) # performs basic check after_lp, *_ = self.backend.loads_info() @@ -846,7 +856,6 @@ def _check_kirchoff(self): assert ( np.max(np.abs(p_bus.flatten())) <= self.tolvect ), "problem with active values, at a bus" - if self.backend.shunts_data_available: assert ( np.max(np.abs(q_subs)) <= self.tolvect @@ -858,7 +867,8 @@ def _check_kirchoff(self): def test_get_topo_vect_speed(self): # retrieve some initial data to be sure only a subpart of the _grid is modified self.skip_if_needed() - conv = self.backend.runpf() + conv, *_ = self.backend.runpf() + assert conv, f"powerflow diverge with , error: {_}" init_amps_flow = self.backend.get_line_flow() # check that maintenance vector is properly taken into account @@ -869,8 +879,8 @@ def test_get_topo_vect_speed(self): bk_action += action # apply the action here self.backend.apply_action(bk_action) - conv = self.backend.runpf() - assert conv + conv, *_ = self.backend.runpf() + assert conv, f"powerflow diverge with , error: {_}" after_amps_flow = self.backend.get_line_flow() topo_vect = self.backend.get_topo_vect() @@ -940,7 +950,8 @@ def test_get_topo_vect_speed(self): def test_topo_set1sub(self): # retrieve some initial data to be sure only a subpart of the _grid is modified self.skip_if_needed() - conv = self.backend.runpf() + conv, *_ = self.backend.runpf() + assert conv, f"powerflow diverge with , error: {_}" init_amps_flow = self.backend.get_line_flow() # check that maintenance vector is properly taken into account @@ -952,8 +963,8 @@ def test_topo_set1sub(self): # apply the action here self.backend.apply_action(bk_action) - conv = self.backend.runpf() - assert conv + conv, *_ = self.backend.runpf() + assert conv, f"powerflow diverge with , error: {_}" after_amps_flow = self.backend.get_line_flow() topo_vect = self.backend.get_topo_vect() @@ -961,22 +972,22 @@ def test_topo_set1sub(self): assert np.max(topo_vect) == 2, "no buses have been changed" # check that the objects have been properly moved - load_ids = np.where(self.backend.load_to_subid == id_)[0] + load_ids = np.nonzero(self.backend.load_to_subid == id_)[0] assert np.all( topo_vect[self.backend.load_pos_topo_vect[load_ids]] == arr[self.backend.load_to_sub_pos[load_ids]] ) - lor_ids = np.where(self.backend.line_or_to_subid == id_)[0] + lor_ids = np.nonzero(self.backend.line_or_to_subid == id_)[0] assert np.all( topo_vect[self.backend.line_or_pos_topo_vect[lor_ids]] == arr[self.backend.line_or_to_sub_pos[lor_ids]] ) - lex_ids = np.where(self.backend.line_ex_to_subid == id_)[0] + lex_ids = np.nonzero(self.backend.line_ex_to_subid == id_)[0] assert np.all( topo_vect[self.backend.line_ex_pos_topo_vect[lex_ids]] == arr[self.backend.line_ex_to_sub_pos[lex_ids]] ) - gen_ids = np.where(self.backend.gen_to_subid == id_)[0] + gen_ids = np.nonzero(self.backend.gen_to_subid == id_)[0] assert np.all( topo_vect[self.backend.gen_pos_topo_vect[gen_ids]] == arr[self.backend.gen_to_sub_pos[gen_ids]] @@ -1037,7 +1048,8 @@ def test_topo_set1sub(self): def test_topo_change1sub(self): # check that switching the bus of 3 object is equivalent to set them to bus 2 (as above) self.skip_if_needed() - conv = self.backend.runpf() + conv, *_ = self.backend.runpf() + assert conv, f"powerflow diverge with , error: {_}" init_amps_flow = self.backend.get_line_flow() # check that maintenance vector is properly taken into account @@ -1050,8 +1062,8 @@ def test_topo_change1sub(self): self.backend.apply_action(bk_action) # run the powerflow - conv = self.backend.runpf() - assert conv + conv, *_ = self.backend.runpf() + assert conv, f"powerflow diverge with , error: {_}" after_amps_flow = self.backend.get_line_flow() topo_vect = self.backend.get_topo_vect() @@ -1059,22 +1071,22 @@ def test_topo_change1sub(self): assert np.max(topo_vect) == 2, "no buses have been changed" # check that the objects have been properly moved - load_ids = np.where(self.backend.load_to_subid == id_)[0] + load_ids = np.nonzero(self.backend.load_to_subid == id_)[0] assert np.all( topo_vect[self.backend.load_pos_topo_vect[load_ids]] == 1 + arr[self.backend.load_to_sub_pos[load_ids]] ) - lor_ids = np.where(self.backend.line_or_to_subid == id_)[0] + lor_ids = np.nonzero(self.backend.line_or_to_subid == id_)[0] assert np.all( topo_vect[self.backend.line_or_pos_topo_vect[lor_ids]] == 1 + arr[self.backend.line_or_to_sub_pos[lor_ids]] ) - lex_ids = np.where(self.backend.line_ex_to_subid == id_)[0] + lex_ids = np.nonzero(self.backend.line_ex_to_subid == id_)[0] assert np.all( topo_vect[self.backend.line_ex_pos_topo_vect[lex_ids]] == 1 + arr[self.backend.line_ex_to_sub_pos[lex_ids]] ) - gen_ids = np.where(self.backend.gen_to_subid == id_)[0] + gen_ids = np.nonzero(self.backend.gen_to_subid == id_)[0] assert np.all( topo_vect[self.backend.gen_pos_topo_vect[gen_ids]] == 1 + arr[self.backend.gen_to_sub_pos[gen_ids]] @@ -1111,7 +1123,8 @@ def test_topo_change_1sub_twice(self): # check that switching the bus of 3 object is equivalent to set them to bus 2 (as above) # and that setting it again is equivalent to doing nothing self.skip_if_needed() - conv = self.backend.runpf() + conv, *_ = self.backend.runpf() + assert conv, f"powerflow diverge with , error: {_}" init_amps_flow = copy.deepcopy(self.backend.get_line_flow()) # check that maintenance vector is properly taken into account @@ -1123,9 +1136,9 @@ def test_topo_change_1sub_twice(self): # apply the action here self.backend.apply_action(bk_action) - conv = self.backend.runpf() + conv, *_ = self.backend.runpf() bk_action.reset() - assert conv + assert conv, f"powerflow diverge with , error: {_}" after_amps_flow = self.backend.get_line_flow() topo_vect = self.backend.get_topo_vect() @@ -1133,22 +1146,22 @@ def test_topo_change_1sub_twice(self): assert np.max(topo_vect) == 2, "no buses have been changed" # check that the objects have been properly moved - load_ids = np.where(self.backend.load_to_subid == id_)[0] + load_ids = np.nonzero(self.backend.load_to_subid == id_)[0] assert np.all( topo_vect[self.backend.load_pos_topo_vect[load_ids]] == 1 + arr[self.backend.load_to_sub_pos[load_ids]] ) - lor_ids = np.where(self.backend.line_or_to_subid == id_)[0] + lor_ids = np.nonzero(self.backend.line_or_to_subid == id_)[0] assert np.all( topo_vect[self.backend.line_or_pos_topo_vect[lor_ids]] == 1 + arr[self.backend.line_or_to_sub_pos[lor_ids]] ) - lex_ids = np.where(self.backend.line_ex_to_subid == id_)[0] + lex_ids = np.nonzero(self.backend.line_ex_to_subid == id_)[0] assert np.all( topo_vect[self.backend.line_ex_pos_topo_vect[lex_ids]] == 1 + arr[self.backend.line_ex_to_sub_pos[lex_ids]] ) - gen_ids = np.where(self.backend.gen_to_subid == id_)[0] + gen_ids = np.nonzero(self.backend.gen_to_subid == id_)[0] assert np.all( topo_vect[self.backend.gen_pos_topo_vect[gen_ids]] == 1 + arr[self.backend.gen_to_sub_pos[gen_ids]] @@ -1186,8 +1199,8 @@ def test_topo_change_1sub_twice(self): # apply the action here self.backend.apply_action(bk_action) - conv = self.backend.runpf() - assert conv + conv, *_ = self.backend.runpf() + assert conv, f"powerflow diverge with error: {_}" after_amps_flow = self.backend.get_line_flow() assert self.compare_vect(after_amps_flow, init_amps_flow) @@ -1214,8 +1227,8 @@ def test_topo_change_2sub(self): # apply the action here self.backend.apply_action(bk_action) - conv = self.backend.runpf() - assert conv, "powerflow diverge it should not" + conv, *_ = self.backend.runpf() + assert conv, f"powerflow diverge it should not, error: {_}" # check the _grid is correct topo_vect = self.backend.get_topo_vect() @@ -1223,44 +1236,44 @@ def test_topo_change_2sub(self): assert np.max(topo_vect) == 2, "no buses have been changed" # check that the objects have been properly moved - load_ids = np.where(self.backend.load_to_subid == id_1)[0] + load_ids = np.nonzero(self.backend.load_to_subid == id_1)[0] assert np.all( topo_vect[self.backend.load_pos_topo_vect[load_ids]] == 1 + arr1[self.backend.load_to_sub_pos[load_ids]] ) - lor_ids = np.where(self.backend.line_or_to_subid == id_1)[0] + lor_ids = np.nonzero(self.backend.line_or_to_subid == id_1)[0] assert np.all( topo_vect[self.backend.line_or_pos_topo_vect[lor_ids]] == 1 + arr1[self.backend.line_or_to_sub_pos[lor_ids]] ) - lex_ids = np.where(self.backend.line_ex_to_subid == id_1)[0] + lex_ids = np.nonzero(self.backend.line_ex_to_subid == id_1)[0] assert np.all( topo_vect[self.backend.line_ex_pos_topo_vect[lex_ids]] == 1 + arr1[self.backend.line_ex_to_sub_pos[lex_ids]] ) - gen_ids = np.where(self.backend.gen_to_subid == id_1)[0] + gen_ids = np.nonzero(self.backend.gen_to_subid == id_1)[0] assert np.all( topo_vect[self.backend.gen_pos_topo_vect[gen_ids]] == 1 + arr1[self.backend.gen_to_sub_pos[gen_ids]] ) - load_ids = np.where(self.backend.load_to_subid == id_2)[0] + load_ids = np.nonzero(self.backend.load_to_subid == id_2)[0] # TODO check the topology symmetry assert np.all( topo_vect[self.backend.load_pos_topo_vect[load_ids]] == arr2[self.backend.load_to_sub_pos[load_ids]] ) - lor_ids = np.where(self.backend.line_or_to_subid == id_2)[0] + lor_ids = np.nonzero(self.backend.line_or_to_subid == id_2)[0] assert np.all( topo_vect[self.backend.line_or_pos_topo_vect[lor_ids]] == arr2[self.backend.line_or_to_sub_pos[lor_ids]] ) - lex_ids = np.where(self.backend.line_ex_to_subid == id_2)[0] + lex_ids = np.nonzero(self.backend.line_ex_to_subid == id_2)[0] assert np.all( topo_vect[self.backend.line_ex_pos_topo_vect[lex_ids]] == arr2[self.backend.line_ex_to_sub_pos[lex_ids]] ) - gen_ids = np.where(self.backend.gen_to_subid == id_2)[0] + gen_ids = np.nonzero(self.backend.gen_to_subid == id_2)[0] assert np.all( topo_vect[self.backend.gen_pos_topo_vect[gen_ids]] == arr2[self.backend.gen_to_sub_pos[gen_ids]] @@ -1684,8 +1697,8 @@ def test_next_grid_state_1overflow_envNoCF(self): self.backend.load_grid(self.path_matpower, case_file) type(self.backend).set_no_storage() self.backend.assert_grid_correct() - conv = self.backend.runpf() - assert conv, "powerflow should converge at loading" + conv, *_ = self.backend.runpf() + assert conv, f"powerflow should converge at loading, error: {_}" lines_flows_init = self.backend.get_line_flow() thermal_limit = 10 * lines_flows_init thermal_limit[self.id_first_line_disco] = ( @@ -1728,8 +1741,8 @@ def test_nb_timestep_overflow_disc0(self): self.backend.load_grid(self.path_matpower, case_file) type(self.backend).set_no_storage() self.backend.assert_grid_correct() - conv = self.backend.runpf() - assert conv, "powerflow should converge at loading" + conv, *_ = self.backend.runpf() + assert conv, f"powerflow should converge at loading, error: {_}" lines_flows_init = self.backend.get_line_flow() thermal_limit = 10 * lines_flows_init @@ -2178,11 +2191,13 @@ def tearDown(self): def test_reset_equals_reset(self): self.skip_if_needed() - # Reset backend1 with reset - self.env1.reset() - # Reset backend2 with reset - self.env2.reset() - self._compare_backends() + with warnings.catch_warnings(): + warnings.filterwarnings("error") + # Reset backend1 with reset + self.env1.reset() + # Reset backend2 with reset + self.env2.reset() + self._compare_backends() def _compare_backends(self): # Compare @@ -2728,7 +2743,7 @@ def test_issue_134(self): } ) obs, reward, done, info = env.step(action) - assert not done + assert not done, f"Episode should not have ended here, error : {info['exception']}" assert obs.line_status[LINE_ID] == False assert obs.topo_vect[obs.line_or_pos_topo_vect[LINE_ID]] == -1 assert obs.topo_vect[obs.line_ex_pos_topo_vect[LINE_ID]] == -1 diff --git a/grid2op/tests/BaseRedispTest.py b/grid2op/tests/BaseRedispTest.py index b6a4b6567..3fe3ea4e6 100644 --- a/grid2op/tests/BaseRedispTest.py +++ b/grid2op/tests/BaseRedispTest.py @@ -803,7 +803,7 @@ def test_dispatch_still_not_zero(self): assert np.all( obs.prod_p[0:2] <= obs.gen_pmax[0:2] ), "above pmax for ts {}".format(i) - except: + except Exception as exc_: pass assert np.all( obs.prod_p[0:2] >= -obs.gen_pmin[0:2] diff --git a/grid2op/tests/__init__.py b/grid2op/tests/__init__.py index 74d8f6a50..6885eeb44 100644 --- a/grid2op/tests/__init__.py +++ b/grid2op/tests/__init__.py @@ -5,5 +5,3 @@ # you can obtain one at http://mozilla.org/MPL/2.0/. # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. - -__all__ = ["BaseBackendTest", "BaseIssuesTest", "BaseRedispTest"] diff --git a/grid2op/tests/_aux_test_gym_compat.py b/grid2op/tests/_aux_test_gym_compat.py index 66dcd9710..6f574b370 100644 --- a/grid2op/tests/_aux_test_gym_compat.py +++ b/grid2op/tests/_aux_test_gym_compat.py @@ -18,7 +18,7 @@ from grid2op.Action import PlayableAction from grid2op.gym_compat import GymActionSpace, GymObservationSpace -from grid2op.gym_compat import GymEnv +from grid2op.gym_compat import GymEnv # TODO GYMENV from grid2op.gym_compat import ContinuousToDiscreteConverter from grid2op.gym_compat import ScalerAttrConverter from grid2op.gym_compat import MultiToTupleConverter @@ -793,9 +793,8 @@ def setUp(self) -> None: action_class=PlayableAction, _add_to_name=type(self).__name__ ) - self.env.seed(0) - self.env.reset() # seed part ! - self.obs_env = self.env.reset() + self.env.reset() + self.obs_env = self.env.reset(seed=0, options={"time serie id": 0}) self.env_gym = self._aux_GymEnv_cls()(self.env) def test_assert_raises_creation(self): @@ -888,7 +887,7 @@ def test_scaling(self): assert observation_space._attr_to_keep == kept_attr assert len(obs_gym) == 17 # the substract are calibrated so that the maximum is really close to 0 - assert obs_gym.max() <= 0 + assert obs_gym.max() <= 0, f"{obs_gym.max()} should be 0." assert obs_gym.max() >= -0.5 def test_functs(self): @@ -1871,8 +1870,41 @@ def test_supported_keys_discrete(self): raise RuntimeError( f"Some property of the actions are not modified for attr {attr_nm}" ) - - + + def test_discrete_multidiscrete_set(self): + """test that discrete with only set_bus has the same number of actions as mmultidiscrete with one_sub_set""" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env_gym.action_space = self._aux_DiscreteActSpace_cls()( + self.env.action_space, attr_to_keep=["set_bus"] + ) + n_disc = 1 * self.env_gym.action_space.n + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env_gym.action_space = self._aux_MultiDiscreteActSpace_cls()( + self.env.action_space, attr_to_keep=["one_sub_set"] + ) + n_multidisc = 1 * self.env_gym.action_space.nvec[0] + assert n_disc == n_multidisc, f"discrepency between discrete[set_bus] (size : {n_disc}) and multidisc[one_sub_set] (size {n_multidisc})" + + + def test_discrete_multidiscrete_change(self): + """test that discrete with only change_bus has the same number of actions as mmultidiscrete with one_sub_change""" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env_gym.action_space = self._aux_DiscreteActSpace_cls()( + self.env.action_space, attr_to_keep=["change_bus"] + ) + n_disc = 1 * self.env_gym.action_space.n + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env_gym.action_space = self._aux_MultiDiscreteActSpace_cls()( + self.env.action_space, attr_to_keep=["one_sub_change"] + ) + n_multidisc = 1 * self.env_gym.action_space.nvec[0] + assert n_disc == n_multidisc, f"discrepency between discrete[change_bus] (size : {n_disc}) and multidisc[one_sub_change] (size {n_multidisc})" + + class _AuxTestGOObsInRange: def setUp(self) -> None: self._skip_if_no_gym() diff --git a/grid2op/tests/_aux_test_some_gym_issues.py b/grid2op/tests/_aux_test_some_gym_issues.py index 5534865f4..c1c065da3 100644 --- a/grid2op/tests/_aux_test_some_gym_issues.py +++ b/grid2op/tests/_aux_test_some_gym_issues.py @@ -19,7 +19,7 @@ from test_issue_379 import Issue379Tester from test_issue_407 import Issue407Tester from test_issue_418 import Issue418Tester -from test_gym_compat import (TestGymCompatModule, +from test_defaultgym_compat import (TestGymCompatModule, TestBoxGymObsSpace, TestBoxGymActSpace, TestMultiDiscreteGymActSpace, @@ -38,6 +38,15 @@ ) from test_timeOutEnvironment import TestTOEnvGym from test_pickling import TestMultiProc +from test_alert_gym_compat import * +from test_basic_env_ls import TestBasicEnvironmentGym +from test_gym_asynch_env import * +from test_l2rpn_idf_2023 import TestL2RPNIDF2023Tester +from test_MaskedEnvironment import TestMaskedEnvironmentGym +from test_multidiscrete_act_space import * +from test_n_busbar_per_sub import TestGym_3busbars, TestGym_1busbar +from test_timeOutEnvironment import TestTOEnvGym + if __name__ == "__main__": unittest.main() diff --git a/grid2op/tests/aaa_test_backend_interface.py b/grid2op/tests/aaa_test_backend_interface.py index e45361b04..b45bd3796 100644 --- a/grid2op/tests/aaa_test_backend_interface.py +++ b/grid2op/tests/aaa_test_backend_interface.py @@ -11,6 +11,7 @@ import warnings import grid2op from grid2op.Backend import Backend +from grid2op.dtypes import dt_int from grid2op.tests.helper_path_test import HelperTests, MakeBackend, PATH_DATA from grid2op.Exceptions import BackendError, Grid2OpException @@ -38,9 +39,9 @@ def aux_get_env_name(self): """do not run nor modify ! (used for this test class only)""" return "BasicTest_load_grid_" + type(self).__name__ - def aux_make_backend(self) -> Backend: + def aux_make_backend(self, n_busbar=2) -> Backend: """do not run nor modify ! (used for this test class only)""" - backend = self.make_backend_with_glue_code() + backend = self.make_backend_with_glue_code(n_busbar=n_busbar) backend.load_grid(self.get_path(), self.get_casefile()) backend.load_redispacthing_data("tmp") # pretend there is no generator backend.load_storage_data(self.get_path()) @@ -86,17 +87,45 @@ def test_01load_grid(self): backend.close() backend = self.make_backend() + backend.env_name = "BasicTest_load_grid2_" + type(self).__name__ backend.load_grid(os.path.join(self.get_path(), self.get_casefile())) # first argument filled, second None backend.load_redispacthing_data(self.get_path()) backend.load_storage_data(self.get_path()) - backend.env_name = "BasicTest_load_grid2_" + type(self).__name__ backend.assert_grid_correct() backend.close() backend = self.make_backend() with self.assertRaises(Exception): backend.load_grid() # should raise if nothing is loaded + + if backend.shunts_data_available and not cls.shunts_data_available: + raise RuntimeError("You backend object inform grid2op that it supports shunt, but the class apparently does not. " + "Have you called `self._compute_pos_big_topo()` at the end of `load_grid` implementation ?") + if not backend.shunts_data_available and cls.shunts_data_available: + raise RuntimeError("You backend object inform grid2op that it does not support shunt, but the class apparently does. " + "Have you called `self._compute_pos_big_topo()` at the end of `load_grid` implementation ?") + if not backend.shunts_data_available: + # object does not support shunts + assert not cls.shunts_data_available + assert cls.n_shunt is None, f"Your backend does not support shunt, the class should not define `n_shunt` (cls.n_shunt should be None and not {cls.n_shunt})" + assert cls.name_shunt is None, f"Your backend does not support shunt, the class should not define `name_shunt` (cls.name_shunt should be None and not {cls.name_shunt})" + assert cls.shunt_to_subid is None, f"Your backend does not support shunt, the class should not define `shunt_to_subid` (cls.shunt_to_subid should be None and not {cls.shunt_to_subid})" + assert backend.n_shunt is None, f"Your backend does not support shunt, backend.n_shunt should be None and not {backend.n_shunt}" + assert backend.name_shunt is None, f"Your backend does not support shunt, backend.name_shunt should be None {backend.name_shunt}" + assert backend.shunt_to_subid is None, f"Your backend does not support shunt, backend.shunt_to_subid should be None {backend.shunt_to_subid}" + else: + # object does support shunts + assert cls.shunts_data_available + assert isinstance(cls.n_shunt, (int, dt_int)), f"Your backend does not support shunt, the class should define `n_shunt`as an int, found {cls.n_shunt} ({type(cls.n_shunt)})" + assert cls.name_shunt is not None, f"Your backend does not support shunt, the class should define `name_shunt` (cls.name_shunt should not be None)" + assert cls.shunt_to_subid is not None, f"Your backend does not support shunt, the class should define `shunt_to_subid` (cls.shunt_to_subid should not be None)" + # these attributes are "deleted" from the backend instance + # and only stored in the class + # assert isinstance(backend.n_shunt, (int, dt_int)), f"Your backend does support shunt, `backend.n_shunt` should be an int, found {backend.n_shunt} ({type(backend.n_shunt)})" + # assert backend.name_shunt is not None, f"Your backend does not support shunt, backend.name_shunt should not be None" + # assert backend.shunt_to_subid is not None, f"Your backend does not support shunt, backend.shunt_to_subid should not be None" + def test_02modify_load(self): """Tests the loads can be modified @@ -405,7 +434,7 @@ def test_11_modify_load_pf_getter(self): backend.apply_action(bk_act) # modification of load_p, load_q and gen_p res2 = backend.runpf(is_dc=False) - assert res2[0], "backend should not have diverge after such a little perturbation" + assert res2[0], f"backend should not have diverged after such a little perturbation. It diverges with error {res2[1]}" tmp2 = backend.loads_info() assert len(tmp) == 3, "loads_info() should return 3 elements: load_p, load_q, load_v (see doc)" load_p_after, load_q_after, load_v_after = tmp2 @@ -428,7 +457,8 @@ def test_11_modify_load_pf_getter(self): bk_act += action backend.apply_action(bk_act) # modification of load_p, load_q and gen_p res_tmp = backend.runpf(is_dc=False) - assert res_tmp[0], "backend should not have diverge after such a little perturbation" + assert res_tmp[0], (f"backend should not have diverged after such a little perturbation. " + f"It diverges with error {res_tmp[1]} for load {load_id}") tmp = backend.loads_info() assert np.abs(tmp[0][load_id] - load_p_init[load_id]) >= delta_mw / 2., f"error when trying to modify load {load_id}: check the consistency between backend.loads_info() and backend.apply_action for load_p" assert np.abs(tmp[1][load_id] - load_q_init[load_id]) >= delta_mvar / 2., f"error when trying to modify load {load_id}: check the consistency between backend.loads_info() and backend.apply_action for load_q" @@ -463,12 +493,16 @@ def test_12_modify_gen_pf_getter(self): backend.apply_action(bk_act) # modification of load_p, load_q and gen_p res2 = backend.runpf(is_dc=False) - assert res2[0], "backend should not have diverge after such a little perturbation" + assert res2[0], f"backend should not have diverged after such a little perturbation. It diverges with error {res2[1]}" tmp2 = backend.generators_info() assert len(tmp) == 3, "generators_info() should return 3 elements: gen_p, gen_q, gen_v (see doc)" gen_p_after, gen_q_after, gen_v_after = tmp2 - assert not np.allclose(gen_p_after, gen_p_init), f"gen_p does not seemed to be modified by apply_action when generators are impacted (active value): check `apply_action` for gen_p / prod_p" - assert not np.allclose(gen_v_after, gen_v_init), f"gen_v does not seemed to be modified by apply_action when generators are impacted (voltage setpoint value): check `apply_action` for gen_v / prod_v" + assert not np.allclose(gen_p_after, gen_p_init), (f"gen_p does not seemed to be modified by apply_action when " + "generators are impacted (active value): check `apply_action` " + "for gen_p / prod_p") + assert not np.allclose(gen_v_after, gen_v_init), (f"gen_v does not seemed to be modified by apply_action when " + "generators are impacted (voltage setpoint value): check `apply_action` " + "for gen_v / prod_v") # now a basic check for "one gen at a time" # NB this test cannot be done like this for "prod_v" / gen_v because two generators might be connected to the same @@ -486,7 +520,8 @@ def test_12_modify_gen_pf_getter(self): bk_act += action backend.apply_action(bk_act) res_tmp = backend.runpf(is_dc=False) - assert res_tmp[0], "backend should not have diverge after such a little perturbation" + assert res_tmp[0], (f"backend should not have diverged after such a little " + f"perturbation. It diverges with error {res_tmp[1]} for gen {gen_id}") tmp = backend.generators_info() if np.abs(tmp[0][gen_id] - gen_p_init[gen_id]) <= delta_mw / 2.: # in case of non distributed slack, backend cannot control the generator acting as the slack. @@ -541,7 +576,8 @@ def test_13_disco_reco_lines_pf_getter(self): bk_act += action1 backend.apply_action(bk_act) # disconnection of line 0 only res_disco = backend.runpf(is_dc=False) - assert res_disco[0], f"your backend diverge after disconnection of line {line_id}, which should not be the case" + # backend._grid.tell_solver_need_reset() + assert res_disco[0], f"your backend diverges after disconnection of line {line_id}, which should not be the case" tmp_or_disco = backend.lines_or_info() tmp_ex_disco = backend.lines_ex_info() assert not np.allclose(tmp_or_disco[0], p_or), f"p_or does not seemed to be modified by apply_action when a powerline is disconnected (active value): check `apply_action` for line connection disconnection" @@ -565,7 +601,7 @@ def test_13_disco_reco_lines_pf_getter(self): bk_act += action2 backend.apply_action(bk_act) # disconnection of line 0 only res_disco = backend.runpf(is_dc=False) - assert res_disco[0], f"your backend diverge after disconnection of line {line_id}, which should not be the case" + assert res_disco[0], f"your backend diverges after disconnection of line {line_id}, which should not be the case" tmp_or_reco = backend.lines_or_info() tmp_ex_reco = backend.lines_ex_info() assert not np.allclose(tmp_or_disco[0], tmp_or_reco[0]), f"p_or does not seemed to be modified by apply_action when a powerline is reconnected (active value): check `apply_action` for line connection reconnection" @@ -587,7 +623,7 @@ def _aux_check_topo_vect(self, backend : Backend): assert len(topo_vect) == dim_topo, (f"backend.get_topo_vect() should return a vector of size 'dim_topo' " f"({dim_topo}) but found size is {len(topo_vect)}. " f"Remember: shunt are not part of the topo_vect") - assert np.all(topo_vect <= 2), (f"For simple environment, we suppose there are 2 buses per substation / voltage levels. " + assert np.all(topo_vect <= type(backend).n_busbar_per_sub), (f"For simple environment, we suppose there are 2 buses per substation / voltage levels. " f"topo_vect is supposed to give the id of the busbar (in the substation) to " f"which the element is connected. This cannot be {np.max(topo_vect)}." f"NB: this test is expected to fail if you test on a grid where more " @@ -648,7 +684,8 @@ def test_14change_topology(self): bk_act += action1 backend.apply_action(bk_act) # everything on busbar 2 at sub 0 res = backend.runpf(is_dc=False) - assert res[0], "Your powerflow has diverged after the loading of the file, which should not happen" + assert res[0], (f"Your powerflow has diverged after a topological change at substation {sub_id} with error {res[1]}." + f"\nCheck `apply_action` for topology.") if not cls.shunts_data_available: warnings.warn(f"{type(self).__name__} test_14change_topology: This test is not performed in depth as your backend does not support shunts") @@ -1080,7 +1117,7 @@ def test_22_islanded_grid_stops_computation(self): bk_act += action backend.apply_action(bk_act) # mix of bus 1 and 2 on substation 1 res = backend.runpf(is_dc=False) - assert not res[0], "It is expected that your backend return `False` in case of non connected grid in AC." + assert not res[0], f"It is expected that your backend return `(False, _)` in case of non connected grid in AC." error = res[1] assert isinstance(error, Grid2OpException), f"When your backend return `False`, we expect it throws an exception inheriting from Grid2OpException (second return value), backend returned {type(error)}" if not isinstance(error, BackendError): @@ -1096,7 +1133,7 @@ def test_22_islanded_grid_stops_computation(self): bk_act += action backend.apply_action(bk_act) # mix of bus 1 and 2 on substation 1 res = backend.runpf(is_dc=True) - assert not res[0], "It is expected that your backend throws an exception inheriting from BackendError in case of non connected grid in DC." + assert not res[0], f"It is expected that your backend return `(False, _)` in case of non connected grid in DC." error = res[1] assert isinstance(error, Grid2OpException), f"When your backend return `False`, we expect it throws an exception inheriting from Grid2OpException (second return value), backend returned {type(error)}" if not isinstance(error, BackendError): @@ -1125,6 +1162,7 @@ def test_23_disco_line_v_null(self): backend.apply_action(bk_act) res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after a line disconnection, error was {res[1]}" p_or, q_or, v_or, a_or = backend.lines_or_info() p_ex, q_ex, v_ex, a_ex = backend.lines_ex_info() assert np.allclose(v_or[line_id], 0.), f"v_or should be 0. for disconnected line, but is currently {v_or[line_id]} (AC)" @@ -1141,6 +1179,7 @@ def test_23_disco_line_v_null(self): backend.apply_action(bk_act) res = backend.runpf(is_dc=True) + assert res[0], f"Your backend diverged in DC after a line disconnection, error was {res[1]}" p_or, q_or, v_or, a_or = backend.lines_or_info() p_ex, q_ex, v_ex, a_ex = backend.lines_ex_info() assert np.allclose(v_or[line_id], 0.), f"v_or should be 0. for disconnected line, but is currently {v_or[line_id]} (DC)" @@ -1177,6 +1216,7 @@ def test_24_disco_shunt_v_null(self): bk_act += action backend.apply_action(bk_act) res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after a shunt disconnection, error was {res[1]}" p_, q_, v_, bus_ = backend.shunt_info() assert np.allclose(v_[shunt_id], 0.), f"v should be 0. for disconnected shunt, but is currently {v_[shunt_id]} (AC)" assert bus_[shunt_id] == -1, f"bus_ should be -1 for disconnected shunt, but is currently {bus_[shunt_id]} (AC)" @@ -1189,6 +1229,7 @@ def test_24_disco_shunt_v_null(self): bk_act += action backend.apply_action(bk_act) res = backend.runpf(is_dc=True) + assert res[0], f"Your backend diverged in DC after a shunt disconnection, error was {res[1]}" p_, q_, v_, bus_ = backend.shunt_info() assert np.allclose(v_[shunt_id], 0.), f"v should be 0. for disconnected shunt, but is currently {v_[shunt_id]} (DC)" assert bus_[shunt_id] == -1, f"bus_ should be -1 for disconnected shunt, but is currently {bus_[shunt_id]} (DC)" @@ -1221,6 +1262,7 @@ def test_25_disco_storage_v_null(self): backend.apply_action(bk_act) res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after a storage disconnection, error was {res[1]}" p_, q_, v_ = backend.storages_info() assert np.allclose(v_[storage_id], 0.), f"v should be 0. for disconnected storage, but is currently {v_[storage_id]} (AC)" @@ -1232,6 +1274,7 @@ def test_25_disco_storage_v_null(self): bk_act += action backend.apply_action(bk_act) res = backend.runpf(is_dc=True) + assert res[0], f"Your backend diverged in DC after a storage disconnection, error was {res[1]}" p_, q_, v_ = backend.storages_info() assert np.allclose(v_[storage_id], 0.), f"v should be 0. for disconnected storage, but is currently {v_[storage_id]} (AC)" @@ -1261,7 +1304,8 @@ def test_26_copy(self): # backend can be copied backend_cpy = backend.copy() assert isinstance(backend_cpy, type(backend)), f"backend.copy() is supposed to return an object of the same type as your backend. Check backend.copy()" - backend.runpf(is_dc=False) + res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after a copy, error was {res[1]}" # now modify original one init_gen_p, *_ = backend.generators_info() init_load_p, *_ = backend.loads_info() @@ -1274,6 +1318,7 @@ def test_26_copy(self): backend.apply_action(bk_act) res = backend.runpf(is_dc=True) res_cpy = backend_cpy.runpf(is_dc=True) + assert res_cpy[0], f"Your backend diverged in DC after a copy, error was {res_cpy[1]}" p_or, *_ = backend.lines_or_info() p_or_cpy, *_ = backend_cpy.lines_or_info() @@ -1302,6 +1347,7 @@ def test_27_topo_vect_disconnect(self): cls = type(backend) res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after loading, error was {res[1]}" topo_vect_orig = self._aux_check_topo_vect(backend) # disconnect line @@ -1313,6 +1359,7 @@ def test_27_topo_vect_disconnect(self): bk_act += action backend.apply_action(bk_act) res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after a line disconnection, error was {res[1]}" topo_vect = self._aux_check_topo_vect(backend) error_msg = (f"Line {line_id} has been disconnected, yet according to 'topo_vect' " f"is still connected (origin side) to busbar {topo_vect[cls.line_or_pos_topo_vect[line_id]]}") @@ -1331,6 +1378,7 @@ def test_27_topo_vect_disconnect(self): bk_act += action backend.apply_action(bk_act) res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after a storage disconnection, error was {res[1]}" topo_vect = self._aux_check_topo_vect(backend) error_msg = (f"Storage {sto_id} has been disconnected, yet according to 'topo_vect' " f"is still connected (origin side) to busbar {topo_vect[cls.storage_pos_topo_vect[line_id]]}") @@ -1353,6 +1401,7 @@ def test_27_topo_vect_disconnect(self): bk_act += action backend.apply_action(bk_act) res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after a shunt disconnection, error was {res[1]}" topo_vect = self._aux_check_topo_vect(backend) error_msg = (f"Disconnecting a shunt should have no impact on the topo_vect vector " f"as shunt are not taken into account in this") @@ -1361,9 +1410,9 @@ def test_27_topo_vect_disconnect(self): def _aux_aux_get_line(self, el_id, el_to_subid, line_xx_to_subid): sub_id = el_to_subid[el_id] if (line_xx_to_subid == sub_id).sum() >= 2: - return True, np.where(line_xx_to_subid == sub_id)[0][0] + return True, np.nonzero(line_xx_to_subid == sub_id)[0][0] elif (line_xx_to_subid == sub_id).sum() == 1: - return False, np.where(line_xx_to_subid == sub_id)[0][0] + return False, np.nonzero(line_xx_to_subid == sub_id)[0][0] else: return None @@ -1439,6 +1488,7 @@ def _aux_check_el_generic(self, backend, busbar_id, bk_act += action backend.apply_action(bk_act) # apply the action res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after setting a {el_nm} on busbar {busbar_id}, error was {res[1]}" # now check the topology vector topo_vect = self._aux_check_topo_vect(backend) error_msg = (f"{el_nm} {el_id} has been moved to busbar {busbar_id}, yet according to 'topo_vect' " @@ -1464,6 +1514,7 @@ def test_28_topo_vect_set(self): cls = type(backend) res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after loading the grid state, error was {res[1]}" topo_vect_orig = self._aux_check_topo_vect(backend) # line or @@ -1476,6 +1527,7 @@ def test_28_topo_vect_set(self): bk_act += action backend.apply_action(bk_act) res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after setting a line (or side) on busbar 2, error was {res[1]}" topo_vect = self._aux_check_topo_vect(backend) error_msg = (f"Line {line_id} (or. side) has been moved to busbar {busbar_id}, yet according to 'topo_vect' " f"is still connected (origin side) to busbar {topo_vect[cls.line_or_pos_topo_vect[line_id]]}") @@ -1491,6 +1543,7 @@ def test_28_topo_vect_set(self): bk_act += action backend.apply_action(bk_act) res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after setting a line (ex side) on busbar 2, error was {res[1]}" topo_vect = self._aux_check_topo_vect(backend) error_msg = (f"Line {line_id} (ex. side) has been moved to busbar {busbar_id}, yet according to 'topo_vect' " f"is still connected (ext side) to busbar {topo_vect[cls.line_ex_pos_topo_vect[line_id]]}") @@ -1531,4 +1584,119 @@ def test_28_topo_vect_set(self): el_nm, el_key, el_pos_topo_vect) else: warnings.warn(f"{type(self).__name__} test_28_topo_vect_set: This test is not performed in depth as your backend does not support storage units (or there are none on the grid)") - \ No newline at end of file + + def test_29_xxx_handle_more_than_2_busbar_called(self): + """Tests that at least one of the function: + + - :func:`grid2op.Backend.Backend.can_handle_more_than_2_busbar` + - :func:`grid2op.Backend.Backend.cannot_handle_more_than_2_busbar` + + has been implemented in the :func:`grid2op.Backend.Backend.load_grid` + implementation. + + This test supposes that : + + - backend.load_grid(...) is implemented + + .. versionadded:: 1.10.0 + + """ + self.skip_if_needed() + backend = self.aux_make_backend() + assert not backend._missing_two_busbars_support_info + + def test_30_n_busbar_per_sub_ok(self): + """Tests that your backend can properly handle more than + 3 busbars (only applies if your backend supports the feature): basically that + objects can be moved to busbar 3 without trouble. + + This test supposes that : + + - backend.load_grid(...) is implemented + - backend.runpf() (AC mode) is implemented + - backend.apply_action() for all types of action + - backend.reset() is implemented + - backend.get_topo_vect() is implemented + + .. versionadded:: 1.10.0 + + """ + self.skip_if_needed() + n_busbar = 3 + backend = self.aux_make_backend(n_busbar=n_busbar) + cls = type(backend) + if cls.n_busbar_per_sub != n_busbar: + self.skipTest("Your backend does not support more than 2 busbars.") + + res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after loading the grid state, error was {res[1]}" + topo_vect_orig = self._aux_check_topo_vect(backend) + + # line or + line_id = 0 + busbar_id = n_busbar + backend.reset(self.get_path(), self.get_casefile()) + action = cls._complete_action_class() + action.update({"set_bus": {"lines_or_id": [(line_id, busbar_id)]}}) + bk_act = cls.my_bk_act_class() + bk_act += action + backend.apply_action(bk_act) + res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after setting a line (or side) on busbar 3, error was {res[1]}" + topo_vect = self._aux_check_topo_vect(backend) + error_msg = (f"Line {line_id} (or. side) has been moved to busbar {busbar_id}, yet according to 'topo_vect' " + f"is still connected (origin side) to busbar {topo_vect[cls.line_or_pos_topo_vect[line_id]]}") + assert topo_vect[cls.line_or_pos_topo_vect[line_id]] == busbar_id, error_msg + + # line ex + line_id = 0 + busbar_id = n_busbar + backend.reset(self.get_path(), self.get_casefile()) + action = cls._complete_action_class() + action.update({"set_bus": {"lines_ex_id": [(line_id, busbar_id)]}}) + bk_act = cls.my_bk_act_class() + bk_act += action + backend.apply_action(bk_act) + res = backend.runpf(is_dc=False) + assert res[0], f"Your backend diverged in AC after setting a line (ex side) on busbar 3, error was {res[1]}" + topo_vect = self._aux_check_topo_vect(backend) + error_msg = (f"Line {line_id} (ex. side) has been moved to busbar {busbar_id}, yet according to 'topo_vect' " + f"is still connected (ext side) to busbar {topo_vect[cls.line_ex_pos_topo_vect[line_id]]}") + assert topo_vect[cls.line_ex_pos_topo_vect[line_id]] == busbar_id, error_msg + + # load + backend.reset(self.get_path(), self.get_casefile()) + busbar_id = n_busbar + nb_el = cls.n_load + el_to_subid = cls.load_to_subid + el_nm = "load" + el_key = "loads_id" + el_pos_topo_vect = cls.load_pos_topo_vect + self._aux_check_el_generic(backend, busbar_id, nb_el, el_to_subid, + el_nm, el_key, el_pos_topo_vect) + + # generator + backend.reset(self.get_path(), self.get_casefile()) + busbar_id = n_busbar + nb_el = cls.n_gen + el_to_subid = cls.gen_to_subid + el_nm = "generator" + el_key = "generators_id" + el_pos_topo_vect = cls.gen_pos_topo_vect + self._aux_check_el_generic(backend, busbar_id, nb_el, el_to_subid, + el_nm, el_key, el_pos_topo_vect) + + # storage + if cls.n_storage > 0: + backend.reset(self.get_path(), self.get_casefile()) + busbar_id = n_busbar + nb_el = cls.n_storage + el_to_subid = cls.storage_to_subid + el_nm = "storage" + el_key = "storages_id" + el_pos_topo_vect = cls.storage_pos_topo_vect + self._aux_check_el_generic(backend, busbar_id, nb_el, el_to_subid, + el_nm, el_key, el_pos_topo_vect) + else: + warnings.warn(f"{type(self).__name__} test_30_n_busbar_per_sub_ok: This test is not performed in depth as your backend does not support storage units (or there are none on the grid)") + \ No newline at end of file diff --git a/grid2op/tests/automatic_classes.py b/grid2op/tests/automatic_classes.py new file mode 100644 index 000000000..f68c6f51b --- /dev/null +++ b/grid2op/tests/automatic_classes.py @@ -0,0 +1,799 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import os +import multiprocessing as mp +from typing import Optional +import warnings +import unittest +import importlib +import numpy as np +from gymnasium.vector import AsyncVectorEnv + + +import grid2op +from grid2op._glop_platform_info import _IS_WINDOWS +from grid2op.Runner import Runner +from grid2op.Agent import BaseAgent +from grid2op.Action import BaseAction +from grid2op.Observation.baseObservation import BaseObservation +from grid2op.Action.actionSpace import ActionSpace +from grid2op.Environment import (Environment, + MaskedEnvironment, + TimedOutEnvironment, + SingleEnvMultiProcess, + MultiMixEnvironment) +from grid2op.Exceptions import NoForecastAvailable +from grid2op.gym_compat import (BoxGymActSpace, + BoxGymObsSpace, + DiscreteActSpace, + MultiDiscreteActSpace) +from grid2op.gym_compat import GymEnv # TODO GYMENV + +# TODO test the runner saved classes and reload + +# TODO two envs same name => now diff classes + +# TODO test add_to_name +# TODO test noshunt +# TODO grid2op compat version + +# TODO test backend converters +# TODO test all type of backend in the observation space, including the deactivate forecast, reactivate forecast, the different backend etc. + +class _ThisAgentTest(BaseAgent): + def __init__(self, + action_space: ActionSpace, + _read_from_local_dir, + _name_cls_obs, + _name_cls_act, + ): + super().__init__(action_space) + self._read_from_local_dir = _read_from_local_dir + self._name_cls_obs = _name_cls_obs + self._name_cls_act = _name_cls_act + + def act(self, observation: BaseObservation, reward: float, done: bool = False) -> BaseAction: + supermodule_nm, module_nm = os.path.split(self._read_from_local_dir) + super_module = importlib.import_module(module_nm, supermodule_nm) + + # check observation + this_module = importlib.import_module(f"{module_nm}.{self._name_cls_obs}_file", super_module) + if hasattr(this_module, self._name_cls_obs): + this_class_obs = getattr(this_module, self._name_cls_obs) + else: + raise RuntimeError(f"class {self._name_cls_obs} not found") + assert isinstance(observation, this_class_obs) + + # check action + this_module = importlib.import_module(f"{module_nm}.{self._name_cls_act}_file", super_module) + if hasattr(this_module, self._name_cls_act): + this_class_act = getattr(this_module, self._name_cls_act) + else: + raise RuntimeError(f"class {self._name_cls_act} not found") + res = super().act(observation, reward, done) + assert isinstance(res, this_class_act) + return res + + +class AutoClassMakeTester(unittest.TestCase): + """test that the kwargs `class_in_file=False` erase the default behaviour """ + def test_in_make(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", test=True, class_in_file=False) + assert env._read_from_local_dir is None + assert not env.classes_are_in_files() + + +class AutoClassInFileTester(unittest.TestCase): + def get_env_name(self): + return "l2rpn_case14_sandbox" + + def setUp(self) -> None: + self.max_iter = 10 + return super().setUp() + + def _do_test_runner(self): + # false for multi process env + return True + + def _do_test_copy(self): + # for for multi process env + return True + + def _do_test_obs_env(self): + return True + + def _aux_make_env(self, env: Optional[Environment]=None): + if env is None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(self.get_env_name(), test=True, class_in_file=True) + assert env.classes_are_in_files() + return env + + def _aux_get_obs_cls(self): + return "CompleteObservation_{}" + + def _aux_get_act_cls(self): + return "PlayableAction_{}" + + def test_all_classes_from_file(self, + env: Optional[Environment]=None, + classes_name=None, + name_complete_obs_cls="CompleteObservation_{}", + name_observation_cls=None, + name_action_cls=None): + if classes_name is None: + classes_name = self.get_env_name() + if name_observation_cls is None: + name_observation_cls = self._aux_get_obs_cls().format(classes_name) + if name_action_cls is None: + name_action_cls = self._aux_get_act_cls().format(classes_name) + + name_action_cls = name_action_cls.format(classes_name) + env = self._aux_make_env(env) + names_cls = [f"ActionSpace_{classes_name}", + f"_BackendAction_{classes_name}", + f"CompleteAction_{classes_name}", + name_observation_cls.format(classes_name), + name_complete_obs_cls.format(classes_name), + f"DontAct_{classes_name}", + f"_ObsEnv_{classes_name}", + f"ObservationSpace_{classes_name}", + f"PandaPowerBackend_{classes_name}", + name_action_cls, + f"VoltageOnlyAction_{classes_name}" + ] + names_attr = ["action_space", + "_backend_action_class", + "_complete_action_cls", + "_observationClass", + None, # Complete Observation in the forecast ! + None, # DONT ACT not int ENV directly + None, # ObsEnv NOT IN ENV, + "observation_space", + "backend", + "_actionClass", + None, # VoltageOnlyAction not in env + ] + + # NB: these imports needs to be consistent with what is done in + # base_env.generate_classes() and gridobj.init_grid(...) + supermodule_nm, module_nm = os.path.split(env._read_from_local_dir) + super_module = importlib.import_module(module_nm, supermodule_nm) + for name_cls, name_attr in zip(names_cls, names_attr): + this_module = importlib.import_module(f"{module_nm}.{name_cls}_file", super_module) + if hasattr(this_module, name_cls): + this_class = getattr(this_module, name_cls) + else: + raise RuntimeError(f"class {name_cls} not found") + if name_attr is not None: + the_attr = getattr(env, name_attr) + if isinstance(the_attr, type): + assert the_attr is this_class, f"error for {the_attr} vs {this_class} env.{name_attr}" + else: + assert type(the_attr) is this_class, f"error for {type(the_attr)} vs {this_class} (env.{name_attr})" + assert this_class._CLS_DICT is not None, f'error for {name_cls}' + assert this_class._CLS_DICT_EXTENDED is not None, f'error for {name_cls}' + + # additional check for some attributes + if name_cls == f"ActionSpace_{classes_name}": + assert type(env._helper_action_env) is this_class + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert type(env.observation_space.obs_env._helper_action_env) is this_class, f"{type(env.observation_space.obs_env._helper_action_env)}" + if env._voltage_controler is not None: + # not in _ObsEnv + assert type(env._voltage_controler.action_space) is this_class + if env.chronics_handler.action_space is not None: + # not in _ObsEnv + assert type(env.chronics_handler.action_space) is this_class + assert env.chronics_handler.action_space is env._helper_action_env + elif name_cls == f"_BackendAction_{classes_name}": + assert env.backend.my_bk_act_class is this_class + assert isinstance(env._backend_action, this_class) + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert env.observation_space.obs_env._backend_action_class is this_class + assert env.observation_space.obs_env.backend.my_bk_act_class is this_class + assert isinstance(env.observation_space.obs_env._backend_action, this_class) + elif name_cls == f"CompleteAction_{classes_name}": + assert env.backend._complete_action_class is this_class + + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert env.observation_space.obs_env._complete_action_cls is this_class + assert env.observation_space.obs_env.backend._complete_action_class is this_class + + assert env.observation_space.obs_env._actionClass is this_class + + assert env._helper_action_env.subtype is this_class + elif name_cls == name_observation_cls.format(classes_name): + # observation of the env + assert env._observation_space.subtype is this_class + if env.current_obs is not None: + # not in _ObsEnv + assert isinstance(env.current_obs, this_class) + if env._last_obs is not None: + # not in _ObsEnv + assert isinstance(env._last_obs, this_class) + elif name_cls == name_observation_cls.format(classes_name): + # observation of the forecast + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert env._observation_space.obs_env._observation_space.subtype is this_class + if env.observation_space.obs_env.current_obs is not None: + # not in _ObsEnv + assert isinstance(env.observation_space.obs_env.current_obs, this_class) + if env.observation_space.obs_env._last_obs is not None: + # not in _ObsEnv + assert isinstance(env.observation_space.obs_env._last_obs, this_class) + elif name_cls == f"DontAct_{classes_name}": + assert env._oppSpace.action_space.subtype is this_class + assert env._opponent.action_space.subtype is this_class + elif name_cls == f"_ObsEnv_{classes_name}": + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert type(env.observation_space.obs_env) is this_class + assert isinstance(env.observation_space.obs_env, this_class) + if env.current_obs is not None and env.current_obs._obs_env is not None: + # not in _ObsEnv + assert type(env.current_obs._obs_env) is this_class, f"{type(env.current_obs._obs_env)}" + assert isinstance(env.observation_space.obs_env, this_class) + if env._last_obs is not None and env._last_obs._obs_env is not None: + # not in _ObsEnv + assert type(env._last_obs._obs_env) is this_class, f"{type(env._last_obs._obs_env)}" + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert env.current_obs._obs_env is env.observation_space.obs_env + assert env._last_obs._obs_env is env.observation_space.obs_env + elif name_cls == f"ObservationSpace_{classes_name}": + if env.observation_space.obs_env is not None: + # not in _ObsEnv + assert type(env.observation_space.obs_env._observation_space) is this_class + assert type(env.observation_space.obs_env._ptr_orig_obs_space) is this_class, f"{type(env.observation_space.obs_env._ptr_orig_obs_space)}" + + assert env.observation_space.obs_env._ptr_orig_obs_space is env._observation_space, f"{type(env.observation_space.obs_env._ptr_orig_obs_space)}" + elif name_cls == name_action_cls: + assert env._action_space.subtype is this_class + # assert env.observation_space.obs_env._actionClass is this_class # not it's a complete action apparently + elif name_cls == f"VoltageOnlyAction_{classes_name}": + if env._voltage_controler is not None: + # not in _ObsEnv + assert env._voltage_controler.action_space.subtype is this_class + # TODO test current_obs and _last_obs + + def test_all_classes_from_file_env_after_reset(self, env: Optional[Environment]=None): + """test classes are still consistent even after a call to env.reset() and obs.simulate()""" + env = self._aux_make_env(env) + obs = env.reset() + self.test_all_classes_from_file(env=env) + try: + obs.simulate(env.action_space()) + self.test_all_classes_from_file(env=env) + except NoForecastAvailable: + # cannot do this test if the "original" env is a _Forecast env: + # for l2rpn_case14_sandbox only 1 step ahead forecast are available + pass + + def test_all_classes_from_file_obsenv(self, env: Optional[Environment]=None): + """test the files are correctly generated for the "forecast env" in the + environment even after a call to obs.reset() and obs.simulate()""" + if not self._do_test_obs_env(): + self.skipTest("ObsEnv is not tested") + env = self._aux_make_env(env) + + self.test_all_classes_from_file(env=env.observation_space.obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + + # reset and check the same + obs = env.reset() + self.test_all_classes_from_file(env=env.observation_space.obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + self.test_all_classes_from_file(env=obs._obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + + # forecast and check the same + try: + obs.simulate(env.action_space()) + self.test_all_classes_from_file(env=env.observation_space.obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + self.test_all_classes_from_file(env=obs._obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + except NoForecastAvailable: + # cannot do this test if the "original" env is a _Forecast env: + # for l2rpn_case14_sandbox only 1 step ahead forecast are available + pass + + def test_all_classes_from_file_env_cpy(self, env: Optional[Environment]=None): + """test that when an environment is copied, then the copied env is consistent, + that it is consistent after a reset and that the forecast env is consistent""" + if not self._do_test_copy(): + self.skipTest("Copy is not tested") + env = self._aux_make_env(env) + env_cpy = env.copy() + self.test_all_classes_from_file(env=env_cpy) + self.test_all_classes_from_file_env_after_reset(env=env_cpy) + self.test_all_classes_from_file(env=env_cpy.observation_space.obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}" + ) + self.test_all_classes_from_file_obsenv(env=env_cpy) + + def test_all_classes_from_file_env_runner(self, env: Optional[Environment]=None): + """this test, using the defined functions above that the runner is able to create a valid env""" + if not self._do_test_runner(): + self.skipTest("Runner not tested") + env = self._aux_make_env(env) + runner = Runner(**env.get_params_for_runner()) + env_runner = runner.init_env() + self.test_all_classes_from_file(env=env_runner) + self.test_all_classes_from_file_env_after_reset(env=env_runner) + self.test_all_classes_from_file(env=env_runner.observation_space.obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + self.test_all_classes_from_file_obsenv(env=env_runner) + + # test the runner prevents the deletion of the tmp file where the classes are stored + # path_cls = env._local_dir_cls + # del env + # assert os.path.exists(path_cls.name) + env_runner = runner.init_env() + self.test_all_classes_from_file(env=env_runner) + self.test_all_classes_from_file_env_after_reset(env=env_runner) + self.test_all_classes_from_file(env=env_runner.observation_space.obs_env, + name_action_cls="CompleteAction_{}", + name_observation_cls="CompleteObservation_{}") + self.test_all_classes_from_file_obsenv(env=env_runner) + + def test_all_classes_from_file_runner_1ep(self, env: Optional[Environment]=None): + """this test that the runner is able to "run" (one type of run), but the tests on the classes + are much lighter than in test_all_classes_from_file_env_runner""" + if not self._do_test_runner(): + self.skipTest("Runner not tested") + env = self._aux_make_env(env) + this_agent = _ThisAgentTest(env.action_space, + env._read_from_local_dir, + self._aux_get_obs_cls().format(self.get_env_name()), + self._aux_get_act_cls().format(self.get_env_name()), + ) + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=this_agent) + runner.run(nb_episode=1, + max_iter=self.max_iter, + env_seeds=[0], + episode_id=[0]) + + def test_all_classes_from_file_runner_2ep_seq(self, env: Optional[Environment]=None): + """this test that the runner is able to "run" (one other type of run), but the tests on the classes + are much lighter than in test_all_classes_from_file_env_runner""" + if not self._do_test_runner(): + self.skipTest("Runner not tested") + env = self._aux_make_env(env) + this_agent = _ThisAgentTest(env.action_space, + env._read_from_local_dir, + self._aux_get_obs_cls().format(self.get_env_name()), + self._aux_get_act_cls().format(self.get_env_name()), + ) + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=this_agent) + res = runner.run(nb_episode=2, + max_iter=self.max_iter, + env_seeds=[0, 0], + episode_id=[0, 1]) + assert res[0][4] == self.max_iter + assert res[1][4] == self.max_iter + + def test_all_classes_from_file_runner_2ep_par_fork(self, env: Optional[Environment]=None): + """this test that the runner is able to "run" (one other type of run), but the tests on the classes + are much lighter than in test_all_classes_from_file_env_runner""" + if not self._do_test_runner(): + self.skipTest("Runner not tested") + if _IS_WINDOWS: + self.skipTest("no fork on windows") + env = self._aux_make_env(env) + this_agent = _ThisAgentTest(env.action_space, + env._read_from_local_dir, + self._aux_get_obs_cls().format(self.get_env_name()), + self._aux_get_act_cls().format(self.get_env_name()), + ) + ctx = mp.get_context('fork') + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=this_agent, + mp_context=ctx) + res = runner.run(nb_episode=2, + nb_process=2, + max_iter=self.max_iter, + env_seeds=[0, 0], + episode_id=[0, 1]) + assert res[0][4] == self.max_iter + assert res[1][4] == self.max_iter + + def test_all_classes_from_file_runner_2ep_par_spawn(self, env: Optional[Environment]=None): + """this test that the runner is able to "run" (one other type of run), but the tests on the classes + are much lighter than in test_all_classes_from_file_env_runner""" + if not self._do_test_runner(): + self.skipTest("Runner not tested") + env = self._aux_make_env(env) + this_agent = _ThisAgentTest(env.action_space, + env._read_from_local_dir, + self._aux_get_obs_cls().format(self.get_env_name()), + self._aux_get_act_cls().format(self.get_env_name()), + ) + ctx = mp.get_context('spawn') + runner = Runner(**env.get_params_for_runner(), + agentClass=None, + agentInstance=this_agent, + mp_context=ctx) + res = runner.run(nb_episode=2, + nb_process=2, + max_iter=self.max_iter, + env_seeds=[0, 0], + episode_id=[0, 1]) + assert res[0][4] == self.max_iter + assert res[1][4] == self.max_iter + + +class MaskedEnvAutoClassTester(AutoClassInFileTester): + + def _aux_make_env(self, env: Optional[Environment]=None): + if env is None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = MaskedEnvironment(super()._aux_make_env(), + lines_of_interest=np.array([True, True, True, True, True, True, + False, False, False, False, False, False, + False, False, False, False, False, False, + False, False])) + return env + + +class TOEnvAutoClassTester(AutoClassInFileTester): + + def _aux_make_env(self, env: Optional[Environment]=None): + if env is None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = TimedOutEnvironment(super()._aux_make_env(), + time_out_ms=1e-3) + return env + + +class ForEnvAutoClassTester(AutoClassInFileTester): + + def _aux_make_env(self, env: Optional[Environment]=None): + if env is None: + # we create the reference environment and prevent grid2op to + # to delete it (because it stores the files to the class) + self.ref_env = super()._aux_make_env() + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + obs = self.ref_env.get_obs() + res = obs.get_forecast_env() + self.max_iter = res._max_iter # otherwise it fails in the runner + else: + res = env + return res + + def tearDown(self): + if hasattr(self, "ref_env"): + self.ref_env.close() + + +# class SEMPAUtoClassTester(AutoClassInFileTester): +# """means i need to completely recode `test_all_classes_from_file` to take into account the return +# values which is a list now... and i'm not ready for it yet TODO""" +# def _do_test_runner(self): +# # false for multi process env +# return False + +# def _do_test_copy(self): +# # for for multi process env +# return False + +# def _do_test_obs_env(self): +# return False + +# def _aux_make_env(self, env: Optional[Environment]=None): +# if env is None: +# # we create the reference environment and prevent grid2op to +# # to delete it (because it stores the files to the class) +# self.ref_env = super()._aux_make_env() +# with warnings.catch_warnings(): +# warnings.filterwarnings("ignore") +# res = SingleEnvMultiProcess(self.ref_env, nb_env=2) +# else: +# res = env +# return res + +class GymEnvAutoClassTester(unittest.TestCase): + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make("l2rpn_case14_sandbox", + test=True, + class_in_file=True) + self.line_id = 3 + th_lim = self.env.get_thermal_limit() * 2. # avoid all problem in general + th_lim[self.line_id] /= 10. # make sure to get trouble in line 3 + self.env.set_thermal_limit(th_lim) + + GymEnvAutoClassTester._init_env(self.env) + + @staticmethod + def _init_env(env): + env.set_id(0) + env.seed(0) + env.reset() + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def _aux_run_envs(self, act, env_gym): + for i in range(10): + obs_in, reward, done, truncated, info = env_gym.step(act) + if i < 2: # 2 : 2 full steps already + assert obs_in["timestep_overflow"][self.line_id] == i + 1, f"error for step {i}: {obs_in['timestep_overflow'][self.line_id]}" + else: + # cooldown applied for line 3: + # - it disconnect stuff in `self.env_in` + # - it does not affect anything in `self.env_out` + assert not obs_in["line_status"][self.line_id] + + def test_gym_with_step(self): + """test the step function also disconnects (or not) the lines""" + env_gym = GymEnv(self.env) + act = {} + self._aux_run_envs(act, env_gym) + env_gym.reset() + self._aux_run_envs(act, env_gym) + + def test_gym_normal(self): + """test I can create the gym env""" + env_gym = GymEnv(self.env) + env_gym.reset() + + def test_gym_box(self): + """test I can create the gym env with box ob space and act space""" + env_gym = GymEnv(self.env) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_gym.action_space = BoxGymActSpace(self.env.action_space) + env_gym.observation_space = BoxGymObsSpace(self.env.observation_space) + env_gym.reset() + + def test_gym_discrete(self): + """test I can create the gym env with discrete act space""" + env_gym = GymEnv(self.env) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_gym.action_space = DiscreteActSpace(self.env.action_space) + env_gym.reset() + act = 0 + self._aux_run_envs(act, env_gym) + + def test_gym_multidiscrete(self): + """test I can create the gym env with multi discrete act space""" + env_gym = GymEnv(self.env) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_gym.action_space = MultiDiscreteActSpace(self.env.action_space) + env_gym.reset() + act = env_gym.action_space.sample() + act[:] = 0 + self._aux_run_envs(act, env_gym) + + def test_asynch_fork(self): + if _IS_WINDOWS: + self.skipTest("no fork on windows") + async_vect_env = AsyncVectorEnv((lambda: GymEnv(self.env), lambda: GymEnv(self.env)), + context="fork") + obs = async_vect_env.reset() + + def test_asynch_spawn(self): + async_vect_env = AsyncVectorEnv((lambda: GymEnv(self.env), lambda: GymEnv(self.env)), + context="spawn") + obs = async_vect_env.reset() + + +class MultiMixEnvAutoClassTester(AutoClassInFileTester): + def _aux_get_obs_cls(self): + return "ObservationNeurips2020_{}" + + def _aux_get_act_cls(self): + return "ActionNeurips2020_{}" + + def get_env_name(self): + return "l2rpn_neurips_2020_track2" + # TODO gym for that too + + # def _do_test_runner(self): + # return False + + def test_all_classes_from_file(self, + env: Optional[Environment]=None, + classes_name=None, + name_complete_obs_cls="CompleteObservation_{}", + name_observation_cls=None, + name_action_cls=None): + env_orig = env + env = self._aux_make_env(env) + try: + super().test_all_classes_from_file(env, + classes_name=classes_name, + name_complete_obs_cls=name_complete_obs_cls, + name_observation_cls=name_observation_cls, + name_action_cls=name_action_cls + ) + if isinstance(env, MultiMixEnvironment): + # test each mix of a multi mix + for mix in env: + super().test_all_classes_from_file(mix, + classes_name=classes_name, + name_complete_obs_cls=name_complete_obs_cls, + name_observation_cls=name_observation_cls, + name_action_cls=name_action_cls + ) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_env_after_reset(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + super().test_all_classes_from_file_env_after_reset(env) + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_env_after_reset(mix) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_obsenv(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + super().test_all_classes_from_file_obsenv(env) + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_obsenv(mix) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_env_cpy(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + super().test_all_classes_from_file_env_cpy(env) + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_env_cpy(mix) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_env_runner(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_env_runner(mix) + else: + # runner does not handle multimix + super().test_all_classes_from_file_env_runner(env) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_runner_1ep(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_runner_1ep(mix) + else: + # runner does not handle multimix + super().test_all_classes_from_file_runner_1ep(env) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_runner_2ep_seq(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_runner_2ep_seq(mix) + else: + # runner does not handle multimix + super().test_all_classes_from_file_runner_2ep_seq(env) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_runner_2ep_par_fork(self, env: Optional[Environment]=None): + if _IS_WINDOWS: + self.skipTest("no fork on windows") + env_orig = env + env = self._aux_make_env(env) + try: + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_runner_2ep_par_fork(mix) + else: + # runner does not handle multimix + super().test_all_classes_from_file_runner_2ep_par_fork(env) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_all_classes_from_file_runner_2ep_par_spawn(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + super().test_all_classes_from_file_runner_2ep_par_spawn(mix) + else: + # runner does not handle multimix + super().test_all_classes_from_file_runner_2ep_par_spawn(env) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + def test_forecast_env_basic(self, env: Optional[Environment]=None): + env_orig = env + env = self._aux_make_env(env) + try: + if isinstance(env, MultiMixEnvironment): + # test each mix of a multimix + for mix in env: + obs = mix.reset() + for_env = obs.get_forecast_env() + super().test_all_classes_from_file(for_env) + finally: + if env_orig is None: + # need to clean the env I created + env.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_fromChronix2grid.py b/grid2op/tests/fromChronix2grid.py similarity index 100% rename from grid2op/tests/test_fromChronix2grid.py rename to grid2op/tests/fromChronix2grid.py diff --git a/grid2op/tests/helper_path_test.py b/grid2op/tests/helper_path_test.py index 683b65bd8..e9f5efc3d 100644 --- a/grid2op/tests/helper_path_test.py +++ b/grid2op/tests/helper_path_test.py @@ -10,6 +10,7 @@ # root package directory # Grid2Op subdirectory # Grid2Op/tests subdirectory + import sys import os import numpy as np @@ -24,7 +25,10 @@ data_test_dir = os.path.abspath(os.path.join(grid2op_dir, "data_test")) data_dir = os.path.abspath(os.path.join(grid2op_dir, "data")) -sys.path.insert(0, grid2op_dir) +# sys.path.insert(0, grid2op_dir) # cause https://github.com/rte-france/Grid2Op/issues/577 +# because the addition of `from grid2op._create_test_suite import create_test_suite` +# in grid2op "__init__.py" + PATH_DATA = data_dir PATH_DATA_TEST = data_test_dir @@ -63,11 +67,12 @@ class MakeBackend(ABC, HelperTests): def make_backend(self, detailed_infos_for_cascading_failures=False) -> Backend: pass - def make_backend_with_glue_code(self, detailed_infos_for_cascading_failures=False, extra_name="") -> Backend: + def make_backend_with_glue_code(self, detailed_infos_for_cascading_failures=False, extra_name="", n_busbar=2) -> Backend: Backend._clear_class_attribute() bk = self.make_backend(detailed_infos_for_cascading_failures=detailed_infos_for_cascading_failures) type(bk)._clear_grid_dependant_class_attributes() type(bk).set_env_name(type(self).__name__ + extra_name) + type(bk).set_n_busbar_per_sub(n_busbar) return bk def get_path(self) -> str: diff --git a/grid2op/tests/test_Action.py b/grid2op/tests/test_Action.py index 5de72f7b9..059686f08 100644 --- a/grid2op/tests/test_Action.py +++ b/grid2op/tests/test_Action.py @@ -28,7 +28,9 @@ def _get_action_grid_class(): + GridObjects._clear_class_attribute() GridObjects.env_name = "test_action_env" + GridObjects.n_busbar_per_sub = 2 GridObjects.n_gen = 5 GridObjects.name_gen = np.array(["gen_{}".format(i) for i in range(5)]) GridObjects.n_load = 11 @@ -100,10 +102,11 @@ def _get_action_grid_class(): np.arange(GridObjects.n_sub), repeats=GridObjects.sub_info ) GridObjects.glop_version = grid2op.__version__ - GridObjects._PATH_ENV = None + GridObjects._PATH_GRID_CLASSES = None json_ = { "glop_version": grid2op.__version__, + "n_busbar_per_sub": "2", "name_gen": ["gen_0", "gen_1", "gen_2", "gen_3", "gen_4"], "name_load": [ "load_0", @@ -331,11 +334,12 @@ def _get_action_grid_class(): "dim_alerts": 0, "alertable_line_names": [], "alertable_line_ids": [], - "_PATH_ENV": None, + "_PATH_GRID_CLASSES": None, "assistant_warning_type": None } GridObjects.shunts_data_available = False my_cls = GridObjects.init_grid(GridObjects, force=True) + GridObjects._clear_class_attribute() return my_cls, json_ @@ -872,7 +876,7 @@ def test_to_vect(self): tmp[-action.n_gen :] = -1 # compute the "set_bus" vect - id_set = np.where(np.array(type(action).attr_list_vect) == "_set_topo_vect")[0][0] + id_set = np.nonzero(np.array(type(action).attr_list_vect) == "_set_topo_vect")[0][0] size_before = 0 for el in type(action).attr_list_vect[:id_set]: arr_ = action._get_array_from_attr_name(el) @@ -939,7 +943,7 @@ def test_to_vect(self): 0, ] ) - id_change = np.where(np.array(type(action).attr_list_vect) == "_change_bus_vect")[0][ + id_change = np.nonzero(np.array(type(action).attr_list_vect) == "_change_bus_vect")[0][ 0 ] size_before = 0 diff --git a/grid2op/tests/test_ActionProperties.py b/grid2op/tests/test_ActionProperties.py index 86d908e5a..5a38c3218 100644 --- a/grid2op/tests/test_ActionProperties.py +++ b/grid2op/tests/test_ActionProperties.py @@ -69,60 +69,60 @@ def test_load_set_bus_array(self): assert np.all(act.load_set_bus == tmp) # array too short - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act = self.helper_action() act.load_set_bus = tmp[:-1] assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # array too big - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act = self.helper_action() tmp2 = np.concatenate((tmp, (1,))) act.load_set_bus = tmp2 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # float vect - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act = self.helper_action() tmp3 = np.array(li_orig).astype(dt_float) act.load_set_bus = tmp3 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # one of the value too small - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act = self.helper_action() tmp4 = np.array(li_orig) tmp4[2] = -2 act.load_set_bus = tmp4 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # one of the value too large - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act = self.helper_action() tmp5 = np.array(li_orig) tmp5[2] = 3 act.load_set_bus = tmp5 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # wrong type - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act = self.helper_action() tmp6 = np.array(li_orig).astype(str) tmp6[2] = "toto" act.load_set_bus = tmp6 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" def test_load_set_bus_tuple(self): # second set of tests, with tuple @@ -132,67 +132,67 @@ def test_load_set_bus_tuple(self): # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.load_set_bus = (3.0, 1) assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.load_set_bus = (False, 1) assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.load_set_bus = ("toto", 1) assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.load_set_bus = (1, "toto") assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.load_set_bus = (11, 1) assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.load_set_bus = (-1, 1) assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # not enough element in the tuple act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.load_set_bus = (1,) assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # too much element in the tuple act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.load_set_bus = (1, 2, 3) assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" def test_load_set_bus_list_asarray(self): """test the set attribute when list are given (list convertible to array)""" @@ -206,57 +206,57 @@ def test_load_set_bus_list_asarray(self): # list too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp0 = copy.deepcopy(li_orig) tmp0.pop(0) act.load_set_bus = tmp0 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # list too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp1 = copy.deepcopy(li_orig) tmp1.append(2) act.load_set_bus = tmp1 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [float(el) for el in li_orig] act.load_set_bus = tmp3 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # one of the value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(li_orig) tmp4[2] = -2 act.load_set_bus = tmp4 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # one of the value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5[2] = 3 act.load_set_bus = tmp5 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = [str(el) for el in li_orig] tmp6[2] = "toto" act.load_set_bus = tmp6 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" def test_load_set_bus_list_oftuple(self): """test the set attribute when list are given (list of tuple)""" @@ -268,66 +268,66 @@ def test_load_set_bus_list_oftuple(self): # list of float (for the el_id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [(float(id_), new_bus) for id_, new_bus in li_orig] act.load_set_bus = tmp3 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # one of the bus value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(li_orig) tmp4[2] = (3, -2) act.load_set_bus = tmp4 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # one of the bus value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5[2] = (3, 3) act.load_set_bus = tmp5 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(li_orig) tmp6[2] = ("toto", 1) act.load_set_bus = tmp6 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(li_orig) tmp7[2] = (3, "toto") act.load_set_bus = tmp7 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(li_orig) tmp8.append((11, 1)) act.load_set_bus = tmp8 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(li_orig) tmp9.append((-1, 1)) act.load_set_bus = tmp9 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # last test, when we give a list of tuple of exactly the right size act = self.helper_action() @@ -344,66 +344,66 @@ def test_load_set_bus_dict_with_id(self): # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = {float(id_): new_bus for id_, new_bus in dict_orig.items()} act.load_set_bus = tmp3 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # one of the bus value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(dict_orig) tmp4[2] = -2 act.load_set_bus = tmp4 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # one of the bus value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(dict_orig) tmp5[2] = 3 act.load_set_bus = tmp5 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 act.load_set_bus = tmp6 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(dict_orig) tmp7[3] = "tata" act.load_set_bus = tmp7 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(dict_orig) tmp8[11] = 1 act.load_set_bus = tmp8 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(dict_orig) tmp9[-1] = 1 act.load_set_bus = tmp9 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" def test_load_set_bus_dict_with_name(self): """test the set attribute when list are given (list of tuple)""" @@ -414,13 +414,13 @@ def test_load_set_bus_dict_with_name(self): assert np.all(act.load_set_bus == [1, 0, -1, 0, 0, 2, 0, 0, 0, 0, 0]) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 # unknown load act.load_set_bus = tmp6 assert np.all( act.load_set_bus == 0 - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" def test_gen_set_bus_array(self): li_orig = [1, 2, -1, 2, 1] # because i have 5 gens @@ -432,60 +432,60 @@ def test_gen_set_bus_array(self): assert np.all(act.gen_set_bus == tmp) # array too short - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act = self.helper_action() act.gen_set_bus = tmp[:-1] assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # array too big - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act = self.helper_action() tmp2 = np.concatenate((tmp, (1,))) act.gen_set_bus = tmp2 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # float vect - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act = self.helper_action() tmp3 = np.array(li_orig).astype(dt_float) act.gen_set_bus = tmp3 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # one of the value too small - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act = self.helper_action() tmp4 = np.array(li_orig) tmp4[2] = -2 act.gen_set_bus = tmp4 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # one of the value too large - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act = self.helper_action() tmp5 = np.array(li_orig) tmp5[2] = 3 act.gen_set_bus = tmp5 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # wrong type - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act = self.helper_action() tmp6 = np.array(li_orig).astype(str) tmp6[2] = "toto" act.gen_set_bus = tmp6 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" def test_gen_set_bus_tuple(self): # second set of tests, with tuple @@ -495,67 +495,67 @@ def test_gen_set_bus_tuple(self): # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.gen_set_bus = (3.0, 1) assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.gen_set_bus = (False, 1) assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.gen_set_bus = ("toto", 1) assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.gen_set_bus = (1, "toto") assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.gen_set_bus = (6, 1) assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.gen_set_bus = (-1, 1) assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # not enough element in the tuple act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.gen_set_bus = (1,) assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # too much element in the tuple act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.gen_set_bus = (1, 2, 3) assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" def test_gen_set_bus_list_asarray(self): """test the set attribute when list are given (list convertible to array)""" @@ -569,57 +569,57 @@ def test_gen_set_bus_list_asarray(self): # list too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp0 = copy.deepcopy(li_orig) tmp0.pop(0) act.gen_set_bus = tmp0 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # list too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp1 = copy.deepcopy(li_orig) tmp1.append(2) act.gen_set_bus = tmp1 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [float(el) for el in li_orig] act.gen_set_bus = tmp3 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # one of the value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(li_orig) tmp4[2] = -2 act.gen_set_bus = tmp4 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # one of the value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5[2] = 3 act.gen_set_bus = tmp5 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = [str(el) for el in li_orig] tmp6[2] = "toto" act.gen_set_bus = tmp6 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" def test_gen_set_bus_list_oftuple(self): """test the set attribute when list are given (list of tuple)""" @@ -631,66 +631,66 @@ def test_gen_set_bus_list_oftuple(self): # list of float (for the el_id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [(float(id_), new_bus) for id_, new_bus in li_orig] act.gen_set_bus = tmp3 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # one of the bus value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(li_orig) tmp4[2] = (3, -2) act.gen_set_bus = tmp4 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # one of the bus value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5[2] = (3, 3) act.gen_set_bus = tmp5 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(li_orig) tmp6[2] = ("toto", 1) act.gen_set_bus = tmp6 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(li_orig) tmp7[2] = (3, "toto") act.gen_set_bus = tmp7 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(li_orig) tmp8.append((5, 1)) act.gen_set_bus = tmp8 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(li_orig) tmp9.append((-1, 1)) act.gen_set_bus = tmp9 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # when the list has exactly the same size act = self.helper_action() @@ -707,66 +707,66 @@ def test_gen_set_bus_dict_with_id(self): # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = {float(id_): new_bus for id_, new_bus in dict_orig.items()} act.gen_set_bus = tmp3 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # one of the bus value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(dict_orig) tmp4[2] = -2 act.gen_set_bus = tmp4 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # one of the bus value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(dict_orig) tmp5[2] = 3 act.gen_set_bus = tmp5 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 act.gen_set_bus = tmp6 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(dict_orig) tmp7[3] = "tata" act.gen_set_bus = tmp7 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(dict_orig) tmp8[11] = 1 act.gen_set_bus = tmp8 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(dict_orig) tmp9[-1] = 1 act.gen_set_bus = tmp9 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" def test_gen_set_bus_dict_with_name(self): """test the set attribute when dict are given with key = names""" @@ -777,13 +777,13 @@ def test_gen_set_bus_dict_with_name(self): assert np.all(act.gen_set_bus == [1, 0, -1, 0, 2]) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 # unknown gen act.gen_set_bus = tmp6 assert np.all( act.gen_set_bus == 0 - ), "a gen has been modified by an illegal action" + ), "a gen has been modified by an ambiguous action" def test_storage_set_bus_array(self): li_orig = [1, 2] # because i have 2 loads @@ -797,59 +797,59 @@ def test_storage_set_bus_array(self): # array too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.storage_set_bus = tmp[0] assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # array too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp2 = np.concatenate((tmp, (1,))) act.storage_set_bus = tmp2 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # float vect act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = np.array(li_orig).astype(dt_float) act.storage_set_bus = tmp3 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # one of the value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = np.array(li_orig) tmp4[1] = -2 act.storage_set_bus = tmp4 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # one of the value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = np.array(li_orig) tmp5[1] = 3 act.storage_set_bus = tmp5 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = np.array(li_orig).astype(str) tmp6[1] = "toto" act.storage_set_bus = tmp6 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" def test_storage_set_bus_tuple(self): # second set of tests, with tuple @@ -859,67 +859,67 @@ def test_storage_set_bus_tuple(self): # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.storage_set_bus = (1.0, 1) assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.storage_set_bus = (False, 1) assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.storage_set_bus = ("toto", 1) assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.storage_set_bus = (1, "toto") assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.storage_set_bus = (11, 1) assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.storage_set_bus = (-1, 1) assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # not enough element in the tuple act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.storage_set_bus = (1,) assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # too much element in the tuple act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.storage_set_bus = (1, 2, 3) assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" def test_storage_set_bus_list_asarray(self): """test the set attribute when list are given (list convertible to array)""" @@ -933,57 +933,57 @@ def test_storage_set_bus_list_asarray(self): # list too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp0 = copy.deepcopy(li_orig) tmp0.pop(0) act.storage_set_bus = tmp0 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # list too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp1 = copy.deepcopy(li_orig) tmp1.append(2) act.storage_set_bus = tmp1 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [float(el) for el in li_orig] act.storage_set_bus = tmp3 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # one of the value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(li_orig) tmp4[1] = -2 act.storage_set_bus = tmp4 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # one of the value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5[1] = 3 act.storage_set_bus = tmp5 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = [str(el) for el in li_orig] tmp6[1] = "toto" act.storage_set_bus = tmp6 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" def test_storage_set_bus_list_oftuple(self): """test the set attribute when list are given (list of tuple)""" @@ -995,66 +995,66 @@ def test_storage_set_bus_list_oftuple(self): # list of float (for the el_id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [(float(id_), new_bus) for id_, new_bus in li_orig] act.storage_set_bus = tmp3 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # one of the bus value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(li_orig) tmp4[1] = (1, -2) act.storage_set_bus = tmp4 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # one of the bus value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5[1] = (1, 3) act.storage_set_bus = tmp5 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(li_orig) tmp6[1] = ("toto", 1) act.storage_set_bus = tmp6 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(li_orig) tmp7[1] = (3, "toto") act.storage_set_bus = tmp7 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(li_orig) tmp8.append((2, 1)) act.storage_set_bus = tmp8 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(li_orig) tmp9.append((-1, 1)) act.storage_set_bus = tmp9 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # last test, when we give a list of tuple of exactly the right size act = self.helper_action() @@ -1071,66 +1071,66 @@ def test_storage_set_bus_dict_with_id(self): # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = {float(id_): new_bus for id_, new_bus in dict_orig.items()} act.storage_set_bus = tmp3 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # one of the bus value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(dict_orig) tmp4[1] = -2 act.storage_set_bus = tmp4 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # one of the bus value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(dict_orig) tmp5[1] = 3 act.storage_set_bus = tmp5 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 act.storage_set_bus = tmp6 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(dict_orig) tmp7[1] = "tata" act.storage_set_bus = tmp7 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(dict_orig) tmp8[2] = 1 act.storage_set_bus = tmp8 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(dict_orig) tmp9[-1] = 1 act.storage_set_bus = tmp9 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" def test_storage_set_bus_dict_with_name(self): """test the set attribute when list are given (list of tuple)""" @@ -1141,13 +1141,13 @@ def test_storage_set_bus_dict_with_name(self): assert np.all(act.storage_set_bus == [1, 0]) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 # unknown load act.storage_set_bus = tmp6 assert np.all( act.storage_set_bus == 0 - ), "a storage unit has been modified by an illegal action" + ), "a storage unit has been modified by an ambiguous action" def test_line_or_set_bus_array(self): li_orig = [ @@ -1182,59 +1182,59 @@ def test_line_or_set_bus_array(self): # array too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_or_set_bus = tmp[0] assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # array too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp2 = np.concatenate((tmp, (1,))) act.line_or_set_bus = tmp2 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # float vect act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = np.array(li_orig).astype(dt_float) act.line_or_set_bus = tmp3 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # one of the value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = np.array(li_orig) tmp4[1] = -2 act.line_or_set_bus = tmp4 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # one of the value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = np.array(li_orig) tmp5[1] = 3 act.line_or_set_bus = tmp5 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = np.array(li_orig).astype(str) tmp6[1] = "toto" act.line_or_set_bus = tmp6 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" def test_line_or_set_bus_tuple(self): # second set of tests, with tuple @@ -1244,67 +1244,67 @@ def test_line_or_set_bus_tuple(self): # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_or_set_bus = (1.0, 1) assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_or_set_bus = (False, 1) assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_or_set_bus = ("toto", 1) assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_or_set_bus = (1, "toto") assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_or_set_bus = (21, 1) assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_or_set_bus = (-1, 1) assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # not enough element in the tuple act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_or_set_bus = (1,) assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # too much element in the tuple act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_or_set_bus = (1, 2, 3) assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" def test_line_or_set_bus_list_asarray(self): """test the set attribute when list are given (list convertible to array)""" @@ -1318,57 +1318,57 @@ def test_line_or_set_bus_list_asarray(self): # list too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp0 = copy.deepcopy(li_orig) tmp0.pop(0) act.line_or_set_bus = tmp0 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # list too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp1 = copy.deepcopy(li_orig) tmp1.append(2) act.line_or_set_bus = tmp1 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [float(el) for el in li_orig] act.line_or_set_bus = tmp3 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # one of the value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(li_orig) tmp4[1] = -2 act.line_or_set_bus = tmp4 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # one of the value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5[1] = 3 act.line_or_set_bus = tmp5 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = [str(el) for el in li_orig] tmp6[1] = "toto" act.line_or_set_bus = tmp6 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" def test_line_or_set_bus_list_oftuple(self): """test the set attribute when list are given (list of tuple)""" @@ -1380,66 +1380,66 @@ def test_line_or_set_bus_list_oftuple(self): # list of float (for the el_id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [(float(id_), new_bus) for id_, new_bus in li_orig] act.line_or_set_bus = tmp3 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # one of the bus value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(li_orig) tmp4[1] = (1, -2) act.line_or_set_bus = tmp4 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # one of the bus value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5[1] = (1, 3) act.line_or_set_bus = tmp5 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(li_orig) tmp6[1] = ("toto", 1) act.line_or_set_bus = tmp6 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(li_orig) tmp7[1] = (3, "toto") act.line_or_set_bus = tmp7 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(li_orig) tmp8.append((21, 1)) act.line_or_set_bus = tmp8 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(li_orig) tmp9.append((-1, 1)) act.line_or_set_bus = tmp9 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # last test, when we give a list of tuple of exactly the right size act = self.helper_action() @@ -1456,66 +1456,66 @@ def test_line_or_set_bus_dict_with_id(self): # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = {float(id_): new_bus for id_, new_bus in dict_orig.items()} act.line_or_set_bus = tmp3 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # one of the bus value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(dict_orig) tmp4[1] = -2 act.line_or_set_bus = tmp4 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # one of the bus value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(dict_orig) tmp5[1] = 3 act.line_or_set_bus = tmp5 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 act.line_or_set_bus = tmp6 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(dict_orig) tmp7[1] = "tata" act.line_or_set_bus = tmp7 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(dict_orig) tmp8[21] = 1 act.line_or_set_bus = tmp8 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(dict_orig) tmp9[-1] = 1 act.line_or_set_bus = tmp9 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" def test_line_or_set_bus_dict_with_name(self): """test the set attribute when list are given (list of tuple)""" @@ -1526,13 +1526,13 @@ def test_line_or_set_bus_dict_with_name(self): assert np.all(act.line_or_set_bus == [1, 0] + [0 for _ in range(18)]) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 # unknown load act.line_or_set_bus = tmp6 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" def test_line_ex_set_bus_array(self): li_orig = [ @@ -1567,59 +1567,59 @@ def test_line_ex_set_bus_array(self): # array too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_ex_set_bus = tmp[0] assert np.all( act.line_ex_set_bus == 0 - ), "a line (ext) unit has been modified by an illegal action" + ), "a line (ext) unit has been modified by an ambiguous action" # array too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp2 = np.concatenate((tmp, (1,))) act.line_ex_set_bus = tmp2 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ext) unit has been modified by an illegal action" + ), "a line (ext) unit has been modified by an ambiguous action" # float vect act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = np.array(li_orig).astype(dt_float) act.line_ex_set_bus = tmp3 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ext) unit has been modified by an illegal action" + ), "a line (ext) unit has been modified by an ambiguous action" # one of the value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = np.array(li_orig) tmp4[1] = -2 act.line_ex_set_bus = tmp4 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ext) unit has been modified by an illegal action" + ), "a line (ext) unit has been modified by an ambiguous action" # one of the value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = np.array(li_orig) tmp5[1] = 3 act.line_ex_set_bus = tmp5 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ext) unit has been modified by an illegal action" + ), "a line (ext) unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = np.array(li_orig).astype(str) tmp6[1] = "toto" act.line_ex_set_bus = tmp6 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ext) unit has been modified by an illegal action" + ), "a line (ext) unit has been modified by an ambiguous action" def test_line_ex_set_bus_tuple(self): # second set of tests, with tuple @@ -1629,67 +1629,67 @@ def test_line_ex_set_bus_tuple(self): # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_ex_set_bus = (1.0, 1) assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_ex_set_bus = (False, 1) assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_ex_set_bus = ("toto", 1) assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_ex_set_bus = (1, "toto") assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_ex_set_bus = (21, 1) assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_ex_set_bus = (-1, 1) assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # not enough element in the tuple act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_ex_set_bus = (1,) assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # too much element in the tuple act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_ex_set_bus = (1, 2, 3) assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" def test_line_ex_set_bus_list_asarray(self): """test the set attribute when list are given (list convertible to array)""" @@ -1703,57 +1703,57 @@ def test_line_ex_set_bus_list_asarray(self): # list too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp0 = copy.deepcopy(li_orig) tmp0.pop(0) act.line_ex_set_bus = tmp0 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # list too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp1 = copy.deepcopy(li_orig) tmp1.append(2) act.line_ex_set_bus = tmp1 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [float(el) for el in li_orig] act.line_ex_set_bus = tmp3 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # one of the value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(li_orig) tmp4[1] = -2 act.line_ex_set_bus = tmp4 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # one of the value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5[1] = 3 act.line_ex_set_bus = tmp5 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = [str(el) for el in li_orig] tmp6[1] = "toto" act.line_ex_set_bus = tmp6 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" def test_line_ex_set_bus_list_oftuple(self): """test the set attribute when list are given (list of tuple)""" @@ -1765,66 +1765,66 @@ def test_line_ex_set_bus_list_oftuple(self): # list of float (for the el_id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [(float(id_), new_bus) for id_, new_bus in li_orig] act.line_ex_set_bus = tmp3 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # one of the bus value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(li_orig) tmp4[1] = (1, -2) act.line_ex_set_bus = tmp4 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # one of the bus value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5[1] = (1, 3) act.line_ex_set_bus = tmp5 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(li_orig) tmp6[1] = ("toto", 1) act.line_ex_set_bus = tmp6 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(li_orig) tmp7[1] = (3, "toto") act.line_ex_set_bus = tmp7 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(li_orig) tmp8.append((21, 1)) act.line_ex_set_bus = tmp8 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(li_orig) tmp9.append((-1, 1)) act.line_ex_set_bus = tmp9 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # last test, when we give a list of tuple of exactly the right size act = self.helper_action() @@ -1841,66 +1841,66 @@ def test_line_ex_set_bus_dict_with_id(self): # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = {float(id_): new_bus for id_, new_bus in dict_orig.items()} act.line_ex_set_bus = tmp3 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # one of the bus value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(dict_orig) tmp4[1] = -2 act.line_ex_set_bus = tmp4 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # one of the bus value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(dict_orig) tmp5[1] = 3 act.line_ex_set_bus = tmp5 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 act.line_ex_set_bus = tmp6 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(dict_orig) tmp7[1] = "tata" act.line_ex_set_bus = tmp7 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(dict_orig) tmp8[21] = 1 act.line_ex_set_bus = tmp8 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(dict_orig) tmp9[-1] = 1 act.line_ex_set_bus = tmp9 assert np.all( act.line_ex_set_bus == 0 - ), "a line (ex) unit has been modified by an illegal action" + ), "a line (ex) unit has been modified by an ambiguous action" def test_line_ex_set_bus_dict_with_name(self): """test the set attribute when list are given (list of tuple)""" @@ -1911,13 +1911,13 @@ def test_line_ex_set_bus_dict_with_name(self): assert np.all(act.line_or_set_bus == [1, 0] + [0 for _ in range(18)]) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 # unknown load act.line_or_set_bus = tmp6 assert np.all( act.line_or_set_bus == 0 - ), "a line (origin) unit has been modified by an illegal action" + ), "a line (origin) unit has been modified by an ambiguous action" def test_set_by_sub(self): # TODO more thorough testing !!! @@ -1927,18 +1927,18 @@ def test_set_by_sub(self): assert aff_subs[1] assert np.sum(aff_subs) == 1 - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.sub_set_bus = (1, (1, 1, -1, 1, 2, 3, -1)) # one too high - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.sub_set_bus = (1, (1, 1, -1, 1, 2, -2, -1)) # one too low - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.sub_set_bus = (1, (1, 1, -1, 1, 2, -1)) # too short - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.sub_set_bus = (1, (1, 1, -1, 1, 2, 1, 2, 2)) # too big - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.sub_set_bus = np.zeros(act.dim_topo + 1, dtype=int) # too long - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.sub_set_bus = np.zeros(act.dim_topo - 1, dtype=int) # too short # ok @@ -1958,24 +1958,24 @@ def test_change_by_sub(self): assert aff_subs[1] assert np.sum(aff_subs) == 1 - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.sub_change_bus = ( 1, (True, True, True, False, False, True), ) # too short - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.sub_change_bus = ( 1, (True, True, True, False, False, True, False, True), ) # too big - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.sub_change_bus = np.zeros(act.dim_topo + 1, dtype=int) # too long - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.sub_change_bus = np.zeros(act.dim_topo - 1, dtype=int) # too short - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.sub_change_bus = np.zeros(act.dim_topo - 1, dtype=int) # wrong type - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.sub_change_bus = np.zeros(act.dim_topo - 1, dtype=float) # wrong type # ok @@ -2060,59 +2060,59 @@ def test_line_set_status_array(self): # array too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_set_status = tmp[0] assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # array too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp2 = np.concatenate((tmp, (1,))) act.line_set_status = tmp2 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # float vect act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = np.array(li_orig).astype(dt_float) act.line_set_status = tmp3 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # one of the value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = np.array(li_orig) tmp4[1] = -2 act.line_set_status = tmp4 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # one of the value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = np.array(li_orig) tmp5[1] = 2 act.line_set_status = tmp5 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = np.array(li_orig).astype(str) tmp6[1] = "toto" - act.line_ex_set_bus = tmp6 + act.line_set_status = tmp6 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" def test_line_set_status_tuple(self): # second set of tests, with tuple @@ -2122,67 +2122,67 @@ def test_line_set_status_tuple(self): # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_set_status = (1.0, 1) assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_set_status = (False, 1) assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_set_status = ("toto", 1) assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_set_status = (1, "toto") assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_set_status = (21, 1) assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_set_status = (-1, 1) assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # not enough element in the tuple act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_set_status = (1,) assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # too much element in the tuple act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): act.line_set_status = (1, 2, 3) assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" def test_line_set_status_list_asarray(self): """test the set attribute when list are given (list convertible to array)""" @@ -2196,57 +2196,57 @@ def test_line_set_status_list_asarray(self): # list too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp0 = copy.deepcopy(li_orig) tmp0.pop(0) act.line_set_status = tmp0 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # list too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp1 = copy.deepcopy(li_orig) tmp1.append(1) act.line_set_status = tmp1 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [float(el) for el in li_orig] act.line_set_status = tmp3 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # one of the value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(li_orig) tmp4[1] = -2 act.line_set_status = tmp4 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # one of the value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5[1] = 2 act.line_set_status = tmp5 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = [str(el) for el in li_orig] tmp6[1] = "toto" act.line_set_status = tmp6 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" def test_line_set_status_list_oftuple(self): """test the set attribute when list are given (list of tuple)""" @@ -2258,66 +2258,66 @@ def test_line_set_status_list_oftuple(self): # list of float (for the el_id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [(float(id_), new_bus) for id_, new_bus in li_orig] act.line_set_status = tmp3 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # one of the bus value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(li_orig) tmp4[1] = (1, -2) act.line_set_status = tmp4 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # one of the bus value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5[1] = (1, 2) act.line_set_status = tmp5 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(li_orig) tmp6[1] = ("toto", 1) act.line_set_status = tmp6 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(li_orig) tmp7[1] = (3, "toto") act.line_set_status = tmp7 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(li_orig) tmp8.append((21, 1)) act.line_set_status = tmp8 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(li_orig) tmp9.append((-1, 1)) act.line_set_status = tmp9 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # last test, when we give a list of tuple of exactly the right size act = self.helper_action() @@ -2334,66 +2334,66 @@ def test_line_set_status_dict_with_id(self): # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = {float(id_): new_bus for id_, new_bus in dict_orig.items()} act.line_set_status = tmp3 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # one of the bus value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(dict_orig) tmp4[1] = -2 act.line_set_status = tmp4 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # one of the bus value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(dict_orig) tmp5[1] = 3 act.line_set_status = tmp5 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 act.line_set_status = tmp6 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(dict_orig) tmp7[1] = "tata" act.line_set_status = tmp7 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(dict_orig) tmp8[21] = 1 act.line_set_status = tmp8 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(dict_orig) tmp9[-1] = 1 act.line_set_status = tmp9 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" def test_line_set_status_dict_with_name(self): """test the set attribute when list are given (list of tuple)""" @@ -2404,13 +2404,13 @@ def test_line_set_status_dict_with_name(self): assert np.all(act.line_set_status == [1, 0] + [0 for _ in range(18)]) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 # unknown load act.line_set_status = tmp6 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" class TestChangeBus(unittest.TestCase): @@ -2462,51 +2462,51 @@ def _aux_change_bus_int(self, name_el, nb_el, prop="change_bus"): # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, 3.0) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, False) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, "toto") assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, (1, "toto")) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, nb_el + 1) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, -1) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" def test_load_change_bus_int(self): self._aux_change_bus_int("load", self.helper_action.n_load) @@ -2532,23 +2532,23 @@ def _aux_change_bus_tuple(self, name_el, nb_el, prop="change_bus"): """first set of test by giving the a tuple: should be deactivated!""" act = self.helper_action() prop_name = f"{name_el}_{prop}" - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, (1,)) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, (1, False)) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, (1, False, 3)) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" def test_load_change_bus_tuple(self): self._aux_change_bus_tuple("load", self.helper_action.n_load) @@ -2598,20 +2598,20 @@ def _aux_change_bus_arraybool(self, name_el, nb_el, prop="change_bus"): # list too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, tmp[:-1]) assert np.all( ~getattr(act, prop_name) - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # list too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp_1 = np.concatenate((tmp, (False,))) setattr(act, prop_name, tmp_1) assert np.all( ~getattr(act, prop_name) - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" def test_load_change_bus_arraybool(self): self._aux_change_bus_arraybool("load", self.helper_action.n_load) @@ -2661,21 +2661,21 @@ def _aux_change_bus_arrayint(self, name_el, nb_el, prop="change_bus"): # one id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp2 = np.concatenate((tmp, (-1,))) setattr(act, prop_name, tmp2) assert np.all( ~getattr(act, prop_name) - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" # one id too high act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = np.concatenate((tmp, (nb_el,))) setattr(act, prop_name, tmp3) assert np.all( ~getattr(act, prop_name) - ), "a load has been modified by an illegal action" + ), "a load has been modified by an ambiguous action" def test_load_change_bus_arrayint(self): self._aux_change_bus_arrayint("load", self.helper_action.n_load) @@ -2707,59 +2707,59 @@ def _aux_change_bus_listbool(self, name_el, nb_el, prop="change_bus"): li_orig = [False, True] + [False for _ in range(nb_el)] act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, li_orig) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # list too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, li_orig[:-1]) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # list too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): li_2 = copy.deepcopy(li_orig) li_2.append(True) setattr(act, prop_name, li_2) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # list mixed types (str) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): li_3 = copy.deepcopy(li_orig) li_3.append("toto") setattr(act, prop_name, li_3) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # list mixed types (float) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): li_4 = copy.deepcopy(li_orig) li_4.append(1.0) setattr(act, prop_name, li_4) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # list mixed types (int) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): li_5 = copy.deepcopy(li_orig) li_5.append(1) setattr(act, prop_name, li_5) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" def test_load_change_bus_listbool(self): self._aux_change_bus_listbool("load", nb_el=self.helper_action.n_load) @@ -2795,48 +2795,48 @@ def _aux_change_bus_listint(self, name_el, nb_el, prop="change_bus"): # one id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp2 = copy.deepcopy(li_orig) tmp2.append(-1) setattr(act, prop_name, tmp2) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # one id too high act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = copy.deepcopy(li_orig) tmp3.append(nb_el) setattr(act, prop_name, tmp3) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # one string act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(li_orig) tmp4.append("toto") setattr(act, prop_name, tmp4) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # one float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5.append(1.0) setattr(act, prop_name, tmp5) assert np.all( ~getattr(act, prop_name) - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # test it revert back to proper thing act = self.helper_action() setattr(act, prop_name, li_orig) - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(li_orig) tmp5.append(1.0) setattr(act, prop_name, tmp5) @@ -2933,80 +2933,80 @@ def _aux_change_val_tuple(self, name_el, nb_el, prop_name): # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, (3.0, 1.0)) assert np.all( getattr(act, prop_name) == this_zero - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, (False, 1.0)) assert np.all( getattr(act, prop_name) == this_zero - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, ("toto", 1.0)) assert np.all( getattr(act, prop_name) == this_zero - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, (1, "toto")) assert np.all( getattr(act, prop_name) == this_zero - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, (1, False)) assert np.all( getattr(act, prop_name) == this_zero - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, (nb_el + 1, 1.0)) assert np.all( getattr(act, prop_name) == this_zero - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, (-1, 1.0)) assert np.all( getattr(act, prop_name) == this_zero - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # tuple wrong size act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, (1,)) assert np.all( getattr(act, prop_name) == this_zero - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # tuple wrong size act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, (1, 1.0, 1)) assert np.all( getattr(act, prop_name) == this_zero - ), f"a {name_el} has been modified by an illegal action" + ), f"a {name_el} has been modified by an ambiguous action" # test correct canceling act = self.helper_action() setattr(act, prop_name, (1, 1.0)) - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, (1, 1.0, 1)) assert np.all( getattr(act, prop_name) == [0.0, 1.0] + [0.0 for _ in range(nb_el - 2)] @@ -3040,34 +3040,34 @@ def _aux_set_val_array(self, name_el, nb_el, prop_name): # array too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): setattr(act, prop_name, tmp[0]) assert np.all(getattr(act, prop_name) == 0) # array too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp2 = np.concatenate((tmp, (1,))) setattr(act, prop_name, tmp2) assert np.all(getattr(act, prop_name) == 0) # bool vect act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = np.array(li_orig).astype(dt_bool) setattr(act, prop_name, tmp3) assert np.all(getattr(act, prop_name) == 0) # int vect act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = np.array(li_orig).astype(dt_int) setattr(act, prop_name, tmp4) assert np.all(getattr(act, prop_name) == 0) # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = np.array(li_orig).astype(str) tmp6[1] = "toto" setattr(act, prop_name, tmp6) @@ -3076,7 +3076,7 @@ def _aux_set_val_array(self, name_el, nb_el, prop_name): # test reset ok act = self.helper_action() setattr(act, prop_name, tmp) # ok - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = np.array(li_orig).astype(str) tmp6[1] = "toto" setattr(act, prop_name, tmp6) @@ -3100,7 +3100,7 @@ def _aux_set_val_list_asarray(self, name_el, nb_el, prop_name): # list too short act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp0 = copy.deepcopy(li_orig) tmp0.pop(0) setattr(act, prop_name, tmp0) @@ -3108,7 +3108,7 @@ def _aux_set_val_list_asarray(self, name_el, nb_el, prop_name): # list too big act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp1 = copy.deepcopy(li_orig) tmp1.append(1.0) setattr(act, prop_name, tmp1) @@ -3116,14 +3116,14 @@ def _aux_set_val_list_asarray(self, name_el, nb_el, prop_name): # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [int(el) for el in li_orig] setattr(act, prop_name, tmp3) assert np.all(getattr(act, prop_name) == 0) # wrong type act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = [str(el) for el in li_orig] tmp6[1] = "toto" setattr(act, prop_name, tmp6) @@ -3132,7 +3132,7 @@ def _aux_set_val_list_asarray(self, name_el, nb_el, prop_name): # reset ok act = self.helper_action() setattr(act, prop_name, li_orig) - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [int(el) for el in li_orig] setattr(act, prop_name, tmp3) assert np.all(getattr(act, prop_name) == tmp) @@ -3157,35 +3157,35 @@ def _aux_set_val_list_oftuple(self, name_el, nb_el, prop_name): # list of float (for the el_id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = [(float(id_), new_bus) for id_, new_bus in li_orig] setattr(act, prop_name, tmp3) assert np.all(getattr(act, prop_name) == 0) # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(li_orig) tmp6[1] = ("toto", 1) setattr(act, prop_name, tmp6) assert np.all(getattr(act, prop_name) == 0) # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(li_orig) tmp7[1] = (3, "toto") setattr(act, prop_name, tmp7) assert np.all(getattr(act, prop_name) == 0) # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(li_orig) tmp8.append((21, 1)) setattr(act, prop_name, tmp8) assert np.all(getattr(act, prop_name) == 0) # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(li_orig) tmp9.append((-1, 1)) setattr(act, prop_name, tmp9) @@ -3214,66 +3214,66 @@ def todo_line_set_status_dict_with_id(self): # list of float act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp3 = {float(id_): new_bus for id_, new_bus in dict_orig.items()} act.line_set_status = tmp3 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # one of the bus value too small act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp4 = copy.deepcopy(dict_orig) tmp4[1] = -2 act.line_set_status = tmp4 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # one of the bus value too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp5 = copy.deepcopy(dict_orig) tmp5[1] = 3 act.line_set_status = tmp5 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # wrong type (element id) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 act.line_set_status = tmp6 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # wrong type (bus value) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp7 = copy.deepcopy(dict_orig) tmp7[1] = "tata" act.line_set_status = tmp7 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # el_id too large act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp8 = copy.deepcopy(dict_orig) tmp8[21] = 1 act.line_set_status = tmp8 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" # el_id too low act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp9 = copy.deepcopy(dict_orig) tmp9[-1] = 1 act.line_set_status = tmp9 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" def todo_line_set_status_dict_with_name(self): """test the set attribute when list are given (list of tuple)""" @@ -3284,13 +3284,13 @@ def todo_line_set_status_dict_with_name(self): assert np.all(act.line_set_status == [1, 0] + [0 for _ in range(18)]) act = self.helper_action() - with self.assertRaises(IllegalAction): + with self.assertRaises(AmbiguousAction): tmp6 = copy.deepcopy(dict_orig) tmp6["toto"] = 1 # unknown load act.line_set_status = tmp6 assert np.all( act.line_set_status == 0 - ), "a line status has been modified by an illegal action" + ), "a line status has been modified by an ambiguous action" if __name__ == "__main__": diff --git a/grid2op/tests/test_Action_iadd.py b/grid2op/tests/test_Action_iadd.py index 10f203cab..1de5fe8fa 100644 --- a/grid2op/tests/test_Action_iadd.py +++ b/grid2op/tests/test_Action_iadd.py @@ -1,3 +1,12 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + + import unittest import warnings from abc import ABC, abstractmethod diff --git a/grid2op/tests/test_Agent.py b/grid2op/tests/test_Agent.py index 30195af39..007a0fbb7 100644 --- a/grid2op/tests/test_Agent.py +++ b/grid2op/tests/test_Agent.py @@ -142,6 +142,9 @@ def test_2_busswitch(self): expected_reward = dt_float(12075.389) expected_reward = dt_float(12277.632) expected_reward = dt_float(12076.35644531 / 12.) + # 1006.363037109375 + #: Breaking change in 1.10.0: topology are not in the same order + expected_reward = dt_float(1006.34924) assert ( np.abs(cum_reward - expected_reward) <= self.tol_one ), f"The reward has not been properly computed {cum_reward} instead of {expected_reward}" diff --git a/grid2op/tests/test_AlertReward.py b/grid2op/tests/test_AlertReward.py index f95f3a568..28fff0415 100644 --- a/grid2op/tests/test_AlertReward.py +++ b/grid2op/tests/test_AlertReward.py @@ -640,10 +640,12 @@ def test_raise_illicit_alert(self) -> None: try : act = env.action_space({"raise_alert": [attackable_line_id]}) except Grid2OpException as exc_ : - assert exc_.args[0] == ('Impossible to modify the alert with your input. Please consult the ' - 'documentation. The error was:\n"Grid2OpException IllegalAction ' - '"Impossible to change a raise alert id 10 because there are only ' - '10 on the grid (and in python id starts at 0)""') + assert exc_.args[0] == ('Impossible to modify the alert with your input. ' + 'Please consult the documentation. The error ' + 'was:\n"Grid2OpException AmbiguousAction ' + '"Impossible to change a raise alert id 10 ' + 'because there are only 10 on the grid (and in ' + 'python id starts at 0)""') class TestAlertBlackout(unittest.TestCase): diff --git a/grid2op/tests/test_ChronicsHandler.py b/grid2op/tests/test_ChronicsHandler.py index c19ad2164..1fefb2bc0 100644 --- a/grid2op/tests/test_ChronicsHandler.py +++ b/grid2op/tests/test_ChronicsHandler.py @@ -1122,7 +1122,7 @@ def setUp(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") self.env = grid2op.make("rte_case14_realistic", test=True, _add_to_name=type(self).__name__) - self.env.chronics_handler.set_max_iter(self.max_iter) + self.env.set_max_iter(self.max_iter) def tearDown(self): self.env.close() @@ -1183,7 +1183,7 @@ def test_load_still(self): ) as env: # test a first time without chunks env.set_id(0) - env.chronics_handler.set_max_iter(max_iter) + env.set_max_iter(max_iter) obs = env.reset() # check that simulate is working diff --git a/grid2op/tests/test_CompactEpisodeData.py b/grid2op/tests/test_CompactEpisodeData.py new file mode 100644 index 000000000..11f9dec78 --- /dev/null +++ b/grid2op/tests/test_CompactEpisodeData.py @@ -0,0 +1,299 @@ +# Copyright (c) 2019-2020, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import tempfile +import warnings +import pdb +import unittest + +import grid2op +from grid2op.Agent import OneChangeThenNothing +from grid2op.tests.helper_path_test import * +from grid2op.Chronics import Multifolder +from grid2op.Reward import L2RPNReward +from grid2op.Backend import PandaPowerBackend +from grid2op.Runner import Runner +from grid2op.Episode import CompactEpisodeData, EpisodeData +from grid2op.dtypes import dt_float +from grid2op.Agent import BaseAgent +from grid2op.Action import TopologyAction +from grid2op.Parameters import Parameters +from grid2op.Opponent.baseActionBudget import BaseActionBudget +from grid2op.Opponent import RandomLineOpponent + + +DEBUG = True +PATH_ADN_CHRONICS_FOLDER = os.path.abspath( + os.path.join(PATH_CHRONICS, "test_multi_chronics") +) + + +class TestCompactEpisodeData(unittest.TestCase): + def setUp(self): + """ + The case file is a representation of the case14 as found in the ieee14 powergrid. + :return: + """ + self.tolvect = dt_float(1e-2) + self.tol_one = dt_float(1e-5) + self.max_iter = 10 + self.real_reward = dt_float(179.99818) + + self.init_grid_path = os.path.join(PATH_DATA_TEST_PP, "test_case14.json") + self.path_chron = PATH_ADN_CHRONICS_FOLDER + self.parameters_path = None + self.names_chronics_to_backend = { + "loads": { + "2_C-10.61": "load_1_0", + "3_C151.15": "load_2_1", + "14_C63.6": "load_13_2", + "4_C-9.47": "load_3_3", + "5_C201.84": "load_4_4", + "6_C-6.27": "load_5_5", + "9_C130.49": "load_8_6", + "10_C228.66": "load_9_7", + "11_C-138.89": "load_10_8", + "12_C-27.88": "load_11_9", + "13_C-13.33": "load_12_10", + }, + "lines": { + "1_2_1": "0_1_0", + "1_5_2": "0_4_1", + "9_10_16": "8_9_2", + "9_14_17": "8_13_3", + "10_11_18": "9_10_4", + "12_13_19": "11_12_5", + "13_14_20": "12_13_6", + "2_3_3": "1_2_7", + "2_4_4": "1_3_8", + "2_5_5": "1_4_9", + "3_4_6": "2_3_10", + "4_5_7": "3_4_11", + "6_11_11": "5_10_12", + "6_12_12": "5_11_13", + "6_13_13": "5_12_14", + "4_7_8": "3_6_15", + "4_9_9": "3_8_16", + "5_6_10": "4_5_17", + "7_8_14": "6_7_18", + "7_9_15": "6_8_19", + }, + "prods": { + "1_G137.1": "gen_0_4", + "3_G36.31": "gen_2_1", + "6_G63.29": "gen_5_2", + "2_G-56.47": "gen_1_0", + "8_G40.43": "gen_7_3", + }, + } + self.gridStateclass = Multifolder + self.backendClass = PandaPowerBackend + self.runner = Runner( + init_grid_path=self.init_grid_path, + init_env_path=self.init_grid_path, + path_chron=self.path_chron, + parameters_path=self.parameters_path, + names_chronics_to_backend=self.names_chronics_to_backend, + gridStateclass=self.gridStateclass, + backendClass=self.backendClass, + rewardClass=L2RPNReward, + other_rewards={"test": L2RPNReward}, + max_iter=self.max_iter, + name_env="test_episodedata_env", + use_compact_episode_data=True, + ) + + def test_load_ambiguous(self): + f = tempfile.mkdtemp() + + class TestSuitAgent(BaseAgent): + def __init__(self, *args, **kwargs): + BaseAgent.__init__(self, *args, **kwargs) + + def act(self, observation, reward, done=False): + # do a ambiguous action + return self.action_space( + {"set_line_status": [(0, 1)], "change_line_status": [0]} + ) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with grid2op.make("rte_case14_test", test=True, _add_to_name=type(self).__name__) as env: + my_agent = TestSuitAgent(env.action_space) + runner = Runner( + **env.get_params_for_runner(), + agentClass=None, + agentInstance=my_agent, + use_compact_episode_data=True, + ) + + # test that the right seeds are assigned to the agent + res = runner.run(nb_episode=1, max_iter=self.max_iter, path_save=f) + episode_data = CompactEpisodeData.from_disk(path=f, ep_id=res[0][1]) + assert int(episode_data.meta["chronics_max_timestep"]) == self.max_iter + assert len(episode_data.actions) == self.max_iter + assert len(episode_data.observations) == self.max_iter + 1 + assert len(episode_data.env_actions) == self.max_iter + assert len(episode_data.attacks) == self.max_iter + + def test_one_episode_with_saving(self): + f = tempfile.mkdtemp() + ( + ep_id, + episode_name, + cum_reward, + timestep, + max_ts + ) = self.runner.run_one_episode(path_save=f) + episode_data = CompactEpisodeData.from_disk(path=f, ep_id=episode_name) + assert int(episode_data.meta["chronics_max_timestep"]) == self.max_iter + assert len(episode_data.other_rewards) == self.max_iter + other_reward_idx = episode_data.other_reward_names.index("test") + other_reward = episode_data.other_rewards[:, other_reward_idx] + assert np.all(np.abs(other_reward - episode_data.rewards) <= self.tol_one) + assert np.abs(episode_data.meta["cumulative_reward"] - self.real_reward) <= self.tol_one + + + def test_collection_wrapper_after_run(self): + OneChange = OneChangeThenNothing.gen_next( + {"set_bus": {"lines_or_id": [(1, -1)]}} + ) + runner = Runner( + init_grid_path=self.init_grid_path, + init_env_path=self.init_grid_path, + path_chron=self.path_chron, + parameters_path=self.parameters_path, + names_chronics_to_backend=self.names_chronics_to_backend, + gridStateclass=self.gridStateclass, + backendClass=self.backendClass, + rewardClass=L2RPNReward, + other_rewards={"test": L2RPNReward}, + max_iter=self.max_iter, + name_env="test_episodedata_env", + agentClass=OneChange, + use_compact_episode_data=True, + ) + ep_id, ep_name, cum_reward, timestep, max_ts, episode_data = runner.run_one_episode( + max_iter=self.max_iter, detailed_output=True + ) + # Check that the type of first action is set bus + assert episode_data.action_space.from_vect(episode_data.actions[0]).get_types()[2] + + def test_len(self): + """test i can use the function "len" of the episode data""" + f = tempfile.mkdtemp() + ( + ep_id, + episode_name, + cum_reward, + timestep, + max_ts + ) = self.runner.run_one_episode(path_save=f) + episode_data = CompactEpisodeData.from_disk(path=f, ep_id=episode_name) + len(episode_data) + + def test_3_episode_with_saving(self): + f = tempfile.mkdtemp() + res = self.runner._run_sequential(nb_episode=3, path_save=f) + for i, episode_name, cum_reward, timestep, total_ts in res: + episode_data = CompactEpisodeData.from_disk(path=f, ep_id=episode_name) + assert int(episode_data.meta["chronics_max_timestep"]) == self.max_iter + assert np.abs(episode_data.meta["cumulative_reward"] - self.real_reward) <= self.tol_one + + def test_3_episode_3process_with_saving(self): + f = tempfile.mkdtemp() + nb_episode = 2 + res = self.runner._run_parrallel( + nb_episode=nb_episode, nb_process=2, path_save=f, + ) + assert len(res) == nb_episode + for i, episode_name, cum_reward, timestep, total_ts in res: + episode_data = CompactEpisodeData.from_disk(path=f, ep_id=episode_name) + assert int(episode_data.meta["chronics_max_timestep"]) == self.max_iter + assert np.abs(episode_data.meta["cumulative_reward"] - self.real_reward) <= self.tol_one + + def test_with_opponent(self): + init_budget = 1000 + opponent_attack_duration = 15 + opponent_attack_cooldown = 30 + opponent_budget_per_ts = 0.0 + opponent_action_class = TopologyAction + + LINES_ATTACKED = ["1_3_3", "1_4_4", "3_6_15", "9_10_12", "11_12_13", "12_13_14"] + + p = Parameters() + p.NO_OVERFLOW_DISCONNECTION = True + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make( + "rte_case14_realistic", + test=True, + param=p, + opponent_init_budget=init_budget, + opponent_budget_per_ts=opponent_budget_per_ts, + opponent_attack_cooldown=opponent_attack_cooldown, + opponent_attack_duration=opponent_attack_duration, + opponent_action_class=opponent_action_class, + opponent_budget_class=BaseActionBudget, + opponent_class=RandomLineOpponent, + kwargs_opponent={"lines_attacked": LINES_ATTACKED}, + _add_to_name=type(self).__name__, + ) + env.seed(0) + runner = Runner(**env.get_params_for_runner(), use_compact_episode_data=True) + + f = tempfile.mkdtemp() + res = runner.run( + nb_episode=1, + env_seeds=[4], + agent_seeds=[0], + max_iter=opponent_attack_cooldown - 1, + path_save=f, + ) + + episode_data = CompactEpisodeData.from_disk(path=f, ep_id=res[0][1]) + lines_impacted, subs_impacted = episode_data.attack_space.from_vect(episode_data.attacks[0]).get_topological_impact() + assert lines_impacted[3] + + def test_can_return_ep_data(self): + # One episode + res = self.runner.run(nb_episode=1, + episode_id=[0], + env_seeds=[0], + max_iter=self.max_iter, + add_detailed_output=True, + nb_process=1 + ) + for el in res: + assert isinstance(el[-1], CompactEpisodeData) + + # 2 episodes, sequential mode + res = self.runner.run(nb_episode=2, + episode_id=[0, 1], + env_seeds=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + nb_process=1 + ) + for el in res: + assert isinstance(el[-1], CompactEpisodeData) + + # 2 episodes, parrallel mode + res = self.runner.run(nb_episode=2, + episode_id=[0, 1], + env_seeds=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + nb_process=2 + ) + for el in res: + assert isinstance(el[-1], CompactEpisodeData) + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_Environment.py b/grid2op/tests/test_Environment.py index ac1e96df5..61106baae 100644 --- a/grid2op/tests/test_Environment.py +++ b/grid2op/tests/test_Environment.py @@ -266,13 +266,15 @@ def test_reward(self): i = 0 self.chronics_handler.next_chronics() + ch = copy.deepcopy(self.chronics_handler) + ch.cleanup_action_space() with warnings.catch_warnings(): warnings.filterwarnings("ignore") self.env = Environment( init_grid_path=os.path.join(self.path_matpower, self.case_file), backend=self.get_backend(), init_env_path=os.path.join(self.path_matpower, self.case_file), - chronics_handler=self.chronics_handler, + chronics_handler=ch, parameters=self.env_params, rewardClass=L2RPNReward, names_chronics_to_backend=self.names_chronics_to_backend, @@ -845,7 +847,7 @@ def _check_env_param(self, env, param): # type of power flow to play # if True, then it will not disconnect lines above their thermal limits assert env._no_overflow_disconnection == param.NO_OVERFLOW_DISCONNECTION - assert env._hard_overflow_threshold == param.HARD_OVERFLOW_THRESHOLD + assert (env._hard_overflow_threshold == param.HARD_OVERFLOW_THRESHOLD).all() # store actions "cooldown" assert ( diff --git a/grid2op/tests/test_EpisodeData.py b/grid2op/tests/test_EpisodeData.py index 15f231979..1b1e29535 100644 --- a/grid2op/tests/test_EpisodeData.py +++ b/grid2op/tests/test_EpisodeData.py @@ -143,6 +143,7 @@ def act(self, observation, reward, done=False): def test_one_episode_with_saving(self): f = tempfile.mkdtemp() ( + ep_id, episode_name, cum_reward, timestep, @@ -176,7 +177,7 @@ def test_collection_wrapper_after_run(self): name_env="test_episodedata_env", agentClass=OneChange, ) - _, cum_reward, timestep, max_ts, episode_data = runner.run_one_episode( + _, _, cum_reward, timestep, max_ts, episode_data = runner.run_one_episode( max_iter=self.max_iter, detailed_output=True ) # Check that the type of first action is set bus @@ -186,6 +187,7 @@ def test_len(self): """test i can use the function "len" of the episode data""" f = tempfile.mkdtemp() ( + ep_id, episode_name, cum_reward, timestep, diff --git a/grid2op/tests/test_GridObjects.py b/grid2op/tests/test_GridObjects.py index a5ee0a493..62c6ace6b 100644 --- a/grid2op/tests/test_GridObjects.py +++ b/grid2op/tests/test_GridObjects.py @@ -15,7 +15,7 @@ import grid2op from grid2op.Backend.educPandaPowerBackend import EducPandaPowerBackend -from grid2op.Exceptions import EnvError +from grid2op.Exceptions import Grid2OpException class TestAuxFunctions(unittest.TestCase): @@ -72,8 +72,8 @@ def test_auxilliary_func(self): bk_cls.line_or_pos_topo_vect = None bk_cls.line_ex_pos_topo_vect = None - # test that the grid is not correct now - with self.assertRaises(EnvError): + # test that the grid should not be correct at this stage + with self.assertRaises(Grid2OpException): bk_cls.assert_grid_correct_cls() # fill the _compute_sub_elements @@ -152,7 +152,45 @@ def test_auxilliary_func(self): ) # this should pass bk_cls.assert_grid_correct_cls() - + + def test_topo_vect_element(self): + """ + .. newinversion:: 1.10.0 + Test this utilitary function + """ + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make( + "educ_case14_storage", + test=True, + _add_to_name=type(self).__name__+"test_gridobjects_testauxfunctions", + ) + cls = type(env) + for el_id, el_pos_topo_vect in enumerate(cls.load_pos_topo_vect): + res = cls.topo_vect_element(el_pos_topo_vect) + assert "load_id" in res + assert res["load_id"] == el_id + for el_id, el_pos_topo_vect in enumerate(cls.gen_pos_topo_vect): + res = cls.topo_vect_element(el_pos_topo_vect) + assert "gen_id" in res + assert res["gen_id"] == el_id + for el_id, el_pos_topo_vect in enumerate(cls.storage_pos_topo_vect): + res = cls.topo_vect_element(el_pos_topo_vect) + assert "storage_id" in res + assert res["storage_id"] == el_id + for el_id, el_pos_topo_vect in enumerate(cls.line_or_pos_topo_vect): + res = cls.topo_vect_element(el_pos_topo_vect) + assert "line_id" in res + assert res["line_id"] == {"or": el_id} + assert "line_or_id" in res + assert res["line_or_id"] == el_id + for el_id, el_pos_topo_vect in enumerate(cls.line_ex_pos_topo_vect): + res = cls.topo_vect_element(el_pos_topo_vect) + assert "line_id" in res + assert res["line_id"] == {"ex": el_id} + assert "line_ex_id" in res + assert res["line_ex_id"] == el_id + if __name__ == "__main__": unittest.main() diff --git a/grid2op/tests/test_MaskedEnvironment.py b/grid2op/tests/test_MaskedEnvironment.py new file mode 100644 index 000000000..ef8505d78 --- /dev/null +++ b/grid2op/tests/test_MaskedEnvironment.py @@ -0,0 +1,229 @@ +# Copyright (c) 2019-2023, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import warnings +import unittest +import numpy as np + +import grid2op +from grid2op.Environment import MaskedEnvironment +from grid2op.Runner import Runner +from grid2op.gym_compat import (GymEnv, + BoxGymActSpace, + BoxGymObsSpace, + DiscreteActSpace, + MultiDiscreteActSpace) + + +class TestMaskedEnvironment(unittest.TestCase): + @staticmethod + def get_mask(): + mask = np.full(20, fill_value=False, dtype=bool) + mask[[0, 1, 4, 2, 3, 6, 5]] = True # THT part + return mask + + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env_in = MaskedEnvironment(grid2op.make("l2rpn_case14_sandbox", test=True, _add_to_name=type(self).__name__), + lines_of_interest=TestMaskedEnvironment.get_mask()) + self.env_out = MaskedEnvironment(grid2op.make("l2rpn_case14_sandbox", test=True, _add_to_name=type(self).__name__), + lines_of_interest=~TestMaskedEnvironment.get_mask()) + self.line_id = 3 + th_lim = self.env_in.get_thermal_limit() * 2. # avoid all problem in general + th_lim[self.line_id] /= 10. # make sure to get trouble in line 3 + # env_in: line is int the area + self.env_in.set_thermal_limit(th_lim) + # env_out: line is out of the area + self.env_out.set_thermal_limit(th_lim) + + TestMaskedEnvironment._init_env(self.env_in) + TestMaskedEnvironment._init_env(self.env_out) + + @staticmethod + def _init_env(env): + env.set_id(0) + env.seed(0) + env.reset() + + def tearDown(self) -> None: + self.env_in.close() + self.env_out.close() + return super().tearDown() + + def test_right_type(self): + assert isinstance(self.env_in, MaskedEnvironment) + assert isinstance(self.env_out, MaskedEnvironment) + assert hasattr(self.env_in, "_lines_of_interest") + assert hasattr(self.env_out, "_lines_of_interest") + assert self.env_in._lines_of_interest[self.line_id], "line_id should be in env_in" + assert not self.env_out._lines_of_interest[self.line_id], "line_id should not be in env_out" + + def test_ok(self): + act = self.env_in.action_space() + for i in range(10): + obs_in, reward, done, info = self.env_in.step(act) + obs_out, reward, done, info = self.env_out.step(act) + if i < 2: # 2 : 2 full steps already + assert obs_in.timestep_overflow[self.line_id] == i + 1, f"error for step {i}: {obs_in.timestep_overflow[self.line_id]}" + assert obs_out.timestep_overflow[self.line_id] == i + 1, f"error for step {i}: {obs_out.timestep_overflow[self.line_id]}" + else: + # cooldown applied for line 3: + # - it disconnect stuff in `self.env_in` + # - it does not affect anything in `self.env_out` + assert not obs_in.line_status[self.line_id] + assert obs_out.timestep_overflow[self.line_id] == i + 1, f"error for step {i}: {obs_out.timestep_overflow[self.line_id]}" + + def test_reset(self): + # timestep_overflow should be 0 initially even if the flow is too high + obs = self.env_in.reset() + assert obs.timestep_overflow[self.line_id] == 0 + assert obs.rho[self.line_id] > 1. + + +class TestMaskedEnvironmentCpy(TestMaskedEnvironment): + def setUp(self) -> None: + super().setUp() + init_int = self.env_in + init_out = self.env_out + self.env_in = self.env_in.copy() + self.env_out = self.env_out.copy() + init_int.close() + init_out.close() + + +class TestMaskedEnvironmentRunner(unittest.TestCase): + def setUp(self) -> None: + TestMaskedEnvironment.setUp(self) + self.max_iter = 10 + + def tearDown(self) -> None: + self.env_in.close() + self.env_out.close() + return super().tearDown() + + def test_runner_can_make(self): + runner = Runner(**self.env_in.get_params_for_runner()) + env2 = runner.init_env() + assert isinstance(env2, MaskedEnvironment) + assert (env2._lines_of_interest == self.env_in._lines_of_interest).all() + + def test_runner(self): + # create the runner + runner_in = Runner(**self.env_in.get_params_for_runner()) + runner_out = Runner(**self.env_out.get_params_for_runner()) + res_in, *_ = runner_in.run(nb_episode=1, max_iter=self.max_iter, env_seeds=[0], episode_id=[0], add_detailed_output=True) + res_out, *_ = runner_out.run(nb_episode=1, max_iter=self.max_iter, env_seeds=[0], episode_id=[0], add_detailed_output=True) + res_in2, *_ = runner_in.run(nb_episode=1, max_iter=self.max_iter, env_seeds=[0], episode_id=[0]) + # check correct results are obtained when agregated + assert res_in[3] == 10 + assert res_in2[3] == 10 + assert res_out[3] == 10 + assert np.allclose(res_in[2], 645.4992065) + assert np.allclose(res_in2[2], 645.4992065) + assert np.allclose(res_out[2], 645.7020874) + + # check detailed results + ep_data_in = res_in[-1] + ep_data_out = res_out[-1] + for i in range(self.max_iter + 1): + obs_in = ep_data_in.observations[i] + obs_out = ep_data_out.observations[i] + if i < 3: + assert obs_in.timestep_overflow[self.line_id] == i, f"error for step {i}: {obs_in.timestep_overflow[self.line_id]}" + assert obs_out.timestep_overflow[self.line_id] == i, f"error for step {i}: {obs_out.timestep_overflow[self.line_id]}" + else: + # cooldown applied for line 3: + # - it disconnect stuff in `self.env_in` + # - it does not affect anything in `self.env_out` + assert not obs_in.line_status[self.line_id], f"error for step {i}: line is not disconnected" + assert obs_out.timestep_overflow[self.line_id] == i, f"error for step {i}: {obs_out.timestep_overflow[self.line_id]}" + + + +class TestMaskedEnvironmentGym(unittest.TestCase): + def setUp(self) -> None: + TestMaskedEnvironment.setUp(self) + + def tearDown(self) -> None: + self.env_in.close() + self.env_out.close() + return super().tearDown() + + def _aux_run_envs(self, act, env_gym_in, env_gym_out): + for i in range(10): + obs_in, reward, done, truncated, info = env_gym_in.step(act) + obs_out, reward, done, truncated, info = env_gym_out.step(act) + if i < 2: # 2 : 2 full steps already + assert obs_in["timestep_overflow"][self.line_id] == i + 1, f"error for step {i}: {obs_in['timestep_overflow'][self.line_id]}" + assert obs_out['timestep_overflow'][self.line_id] == i + 1, f"error for step {i}: {obs_out['timestep_overflow'][self.line_id]}" + else: + # cooldown applied for line 3: + # - it disconnect stuff in `self.env_in` + # - it does not affect anything in `self.env_out` + assert not obs_in["line_status"][self.line_id] + assert obs_out["timestep_overflow"][self.line_id] == i + 1, f"error for step {i}: {obs_out['timestep_overflow'][self.line_id]}" + + def test_gym_with_step(self): + """test the step function also disconnects (or not) the lines""" + env_gym_in = GymEnv(self.env_in) + env_gym_out = GymEnv(self.env_out) + act = {} + self._aux_run_envs(act, env_gym_in, env_gym_out) + env_gym_in.reset() + env_gym_out.reset() + self._aux_run_envs(act, env_gym_in, env_gym_out) + + def test_gym_normal(self): + """test I can create the gym env""" + env_gym = GymEnv(self.env_in) + env_gym.reset() + + def test_gym_box(self): + """test I can create the gym env with box ob space and act space""" + env_gym_in = GymEnv(self.env_in) + env_gym_out = GymEnv(self.env_out) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_gym_in.action_space = BoxGymActSpace(self.env_in.action_space) + env_gym_in.observation_space = BoxGymObsSpace(self.env_in.observation_space) + env_gym_out.action_space = BoxGymActSpace(self.env_out.action_space) + env_gym_out.observation_space = BoxGymObsSpace(self.env_out.observation_space) + env_gym_in.reset() + env_gym_out.reset() + + def test_gym_discrete(self): + """test I can create the gym env with discrete act space""" + env_gym_in = GymEnv(self.env_in) + env_gym_out = GymEnv(self.env_out) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_gym_in.action_space = DiscreteActSpace(self.env_in.action_space) + env_gym_out.action_space = DiscreteActSpace(self.env_out.action_space) + env_gym_in.reset() + env_gym_out.reset() + act = 0 + self._aux_run_envs(act, env_gym_in, env_gym_out) + + def test_gym_multidiscrete(self): + """test I can create the gym env with multi discrete act space""" + env_gym_in = GymEnv(self.env_in) + env_gym_out = GymEnv(self.env_out) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_gym_in.action_space = MultiDiscreteActSpace(self.env_in.action_space) + env_gym_out.action_space = MultiDiscreteActSpace(self.env_out.action_space) + env_gym_in.reset() + env_gym_out.reset() + act = env_gym_in.action_space.sample() + act[:] = 0 + self._aux_run_envs(act, env_gym_in, env_gym_out) + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_MultiMix.py b/grid2op/tests/test_MultiMix.py index 9ee05802f..0f66ed0b0 100644 --- a/grid2op/tests/test_MultiMix.py +++ b/grid2op/tests/test_MultiMix.py @@ -297,11 +297,10 @@ def test_forecast_toggle(self): def test_bracket_access_by_name(self): mme = MultiMixEnvironment(PATH_DATA_MULTIMIX, _test=True) - mix1_env = mme["case14_001"] - assert mix1_env.name == "case14_001" + assert mix1_env.multimix_mix_name == "case14_001" mix2_env = mme["case14_002"] - assert mix2_env.name == "case14_002" + assert mix2_env.multimix_mix_name == "case14_002" with self.assertRaises(KeyError): unknown_env = mme["unknown_raise"] @@ -312,7 +311,7 @@ def test_keys_access(self): mix = mme[k] assert mix is not None assert isinstance(mix, BaseEnv) - assert mix.name == k + assert mix.multimix_mix_name == k def test_values_access(self): mme = MultiMixEnvironment(PATH_DATA_MULTIMIX, _test=True) @@ -320,7 +319,7 @@ def test_values_access(self): for v in mme.values(): assert v is not None assert isinstance(v, BaseEnv) - assert v == mme[v.name] + assert v == mme[v.multimix_mix_name] def test_values_unique(self): mme = MultiMixEnvironment(PATH_DATA_MULTIMIX, _test=True) diff --git a/grid2op/tests/test_Observation.py b/grid2op/tests/test_Observation.py index 7019c87fc..dff0b2051 100644 --- a/grid2op/tests/test_Observation.py +++ b/grid2op/tests/test_Observation.py @@ -52,6 +52,7 @@ def setUp(self): self.dict_ = { "name_gen": ["gen_1_0", "gen_2_1", "gen_5_2", "gen_7_3", "gen_0_4"], + "n_busbar_per_sub": "2", "name_load": [ "load_1_0", "load_2_1", @@ -297,7 +298,7 @@ def setUp(self): "alertable_line_names": [], "alertable_line_ids": [], "assistant_warning_type": None, - "_PATH_ENV": None, + "_PATH_GRID_CLASSES": None, } self.json_ref = { @@ -1785,7 +1786,7 @@ def aux_test_conn_mat2(self, as_csr=False): obs, reward, done, info = self.env.step( self.env.action_space({"set_bus": {"lines_or_id": [(13, 2), (14, 2)]}}) ) - assert not done + assert not done, f"failed with error {info['exception']}" assert obs.bus_connectivity_matrix(as_csr).shape == (15, 15) assert ( obs.bus_connectivity_matrix(as_csr)[14, 11] == 1.0 @@ -2199,9 +2200,9 @@ def test_space_to_dict(self): val = dict_[el] val_res = self.dict_[el] if val is None and val_res is not None: - raise AssertionError(f"val is None and val_res is not None: val_res: {val_res}") + raise AssertionError(f"{el}: val is None and val_res is not None: val_res: {val_res}") if val is not None and val_res is None: - raise AssertionError(f"val is not None and val_res is None: val {val}") + raise AssertionError(f"{el}: val is not None and val_res is None: val {val}") if val is None and val_res is None: continue @@ -2974,9 +2975,13 @@ def setUp(self): "educ_case14_storage", test=True, action_class=PlayableAction, _add_to_name=type(self).__name__ ) + self.env.reset(seed=0, options={"time serie id": 0}) self.obs = self._make_forecast_perfect(self.env) self.sim_obs = None self.step_obs = None + + def tearDown(self): + self.env.close() def test_storage_act(self): """test i can do storage actions in simulate""" diff --git a/grid2op/tests/test_PandaPowerBackendDefaultFunc.py b/grid2op/tests/test_PandaPowerBackendDefaultFunc.py index 847f1d3bb..33a290119 100644 --- a/grid2op/tests/test_PandaPowerBackendDefaultFunc.py +++ b/grid2op/tests/test_PandaPowerBackendDefaultFunc.py @@ -64,7 +64,7 @@ def get_topo_vect(self): """ otherwise there are some infinite recursions """ - res = np.full(self.dim_topo, fill_value=np.NaN, dtype=dt_int) + res = np.full(self.dim_topo, fill_value=-1, dtype=dt_int) line_status = np.concatenate( ( diff --git a/grid2op/tests/test_Runner.py b/grid2op/tests/test_Runner.py index 1d8dcd233..fbc1cbb62 100644 --- a/grid2op/tests/test_Runner.py +++ b/grid2op/tests/test_Runner.py @@ -11,6 +11,9 @@ import json import unittest import pdb +import packaging +from packaging import version +import inspect from grid2op.tests.helper_path_test import * @@ -399,9 +402,8 @@ def test_nomaxiter(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") with grid2op.make("rte_case14_test", test=True, _add_to_name=type(self).__name__) as env: + env.set_max_iter(2 * self.max_iter) runner = Runner(**env.get_params_for_runner()) - runner.gridStateclass_kwargs["max_iter"] = 2 * self.max_iter - runner.chronics_handler.set_max_iter(2 * self.max_iter) res = runner.run(nb_episode=1) for i, _, cum_reward, timestep, total_ts in res: assert int(timestep) == 2 * self.max_iter @@ -456,8 +458,15 @@ def _aux_backward(self, base_path, g2op_version_txt, g2op_version): ) except Exception as exc_: raise exc_ - - if g2op_version <= "1.4.0": + g2op_ver = "" + try: + g2op_ver = version.parse(g2op_version) + except packaging.version.InvalidVersion: + if g2op_version != "test_version": + g2op_ver = version.parse("0.0.1") + else: + g2op_ver = version.parse("1.4.1") + if g2op_ver <= version.parse("1.4.0"): assert ( EpisodeData.get_grid2op_version(full_episode_path) == "<=1.4.0" ), "wrong grid2op version stored (grid2op version <= 1.4.0)" @@ -507,6 +516,11 @@ def test_backward_compatibility(self): "1.9.5", "1.9.6", "1.9.7", + "1.9.8", + "1.10.0", + "1.10.1", + "1.10.2", + "1.10.3", ] curr_version = "test_version" assert ( @@ -625,6 +639,28 @@ def test_legal_ambiguous_nofaststorage(self): assert ep_data.ambiguous[1] assert not ep_data.ambiguous[2] assert not ep_data.ambiguous[3] + + def test_get_params(self): + """test the runner._get_params() function (used in multiprocessing context) + can indeed make a runner with all its arguments modified (proper 'copy' of the runner) + """ + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", test=True, chronics_class=ChangeNothing, + _add_to_name=type(self).__name__) + + runner = Runner(**env.get_params_for_runner(), agentClass=AgentTestLegalAmbiguous) + made_params = runner._get_params() + ok_params = inspect.signature(Runner.__init__).parameters + for k in made_params.keys(): + assert k in ok_params, f"params {k} is returned in runner._get_params() but cannot be used to make a runner" + + for k in ok_params.keys(): + if k == "self": + continue + assert k in made_params, f"params {k} is used to make a runner but is not returned in runner._get_params()" + + if __name__ == "__main__": diff --git a/grid2op/tests/test_RunnerFast.py b/grid2op/tests/test_RunnerFast.py index 1da9d05f4..b82a2017a 100644 --- a/grid2op/tests/test_RunnerFast.py +++ b/grid2op/tests/test_RunnerFast.py @@ -20,8 +20,6 @@ from grid2op.Runner import Runner from grid2op.dtypes import dt_float -warnings.simplefilter("error") - class TestRunner(HelperTests, unittest.TestCase): def setUp(self): @@ -56,7 +54,7 @@ def setUp(self): def test_one_episode(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") - _, cum_reward, timestep, max_ts = self.runner.run_one_episode( + _, _, cum_reward, timestep, max_ts = self.runner.run_one_episode( max_iter=self.max_iter ) assert int(timestep) == self.max_iter @@ -65,7 +63,7 @@ def test_one_episode(self): def test_one_episode_detailed(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") - _, cum_reward, timestep, max_ts, episode_data = self.runner.run_one_episode( + _, _, cum_reward, timestep, max_ts, episode_data = self.runner.run_one_episode( max_iter=self.max_iter, detailed_output=True ) assert int(timestep) == self.max_iter diff --git a/grid2op/tests/test_act_as_serializable_dict.py b/grid2op/tests/test_act_as_serializable_dict.py index f15f6fae1..3ac3df599 100644 --- a/grid2op/tests/test_act_as_serializable_dict.py +++ b/grid2op/tests/test_act_as_serializable_dict.py @@ -24,6 +24,7 @@ def _get_action_grid_class(): + GridObjects._clear_class_attribute() GridObjects.env_name = "test_action_serial_dict" GridObjects.n_gen = 5 GridObjects.name_gen = np.array(["gen_{}".format(i) for i in range(5)]) @@ -96,7 +97,7 @@ def _get_action_grid_class(): np.arange(GridObjects.n_sub), repeats=GridObjects.sub_info ) GridObjects.glop_version = grid2op.__version__ - GridObjects._PATH_ENV = None + GridObjects._PATH_GRID_CLASSES = None GridObjects.shunts_data_available = True GridObjects.n_shunt = 2 @@ -108,6 +109,7 @@ def _get_action_grid_class(): GridObjects.alarms_lines_area = {el: ["all"] for el in GridObjects.name_line} GridObjects.dim_alarms = 1 my_cls = GridObjects.init_grid(GridObjects, force=True) + GridObjects._clear_class_attribute() return my_cls diff --git a/grid2op/tests/test_action_set_orig_state.py b/grid2op/tests/test_action_set_orig_state.py new file mode 100644 index 000000000..228320227 --- /dev/null +++ b/grid2op/tests/test_action_set_orig_state.py @@ -0,0 +1,704 @@ +# Copyright (c) 2019-2020, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import tempfile +import numpy as np +import warnings +import unittest +try: + from lightsim2grid import LightSimBackend + LS_AVAIL = True +except ImportError: + LS_AVAIL = False + +import grid2op +from grid2op.Environment import (TimedOutEnvironment, + MaskedEnvironment, + SingleEnvMultiProcess) +from grid2op.Backend import PandaPowerBackend +from grid2op.Episode import EpisodeData +from grid2op.Opponent import FromEpisodeDataOpponent +from grid2op.Runner import Runner +from grid2op.Action import TopologyAction, DispatchAction +from grid2op.tests.helper_path_test import * +from grid2op.Chronics import (FromHandlers, + Multifolder, + MultifolderWithCache, + GridStateFromFileWithForecasts, + GridStateFromFile, + GridStateFromFileWithForecastsWithMaintenance, + GridStateFromFileWithForecastsWithoutMaintenance, + FromOneEpisodeData, + FromMultiEpisodeData, + FromNPY) +from grid2op.Chronics.handlers import CSVHandler, JSONInitStateHandler + + +# TODO test "change" is deactivated +# TODO test with "names_orig_to_backend" + + +class TestSetActOrigDefault(unittest.TestCase): + def _get_act_cls(self): + return TopologyAction + + def _get_ch_cls(self): + return Multifolder + + def _get_c_cls(self): + return GridStateFromFileWithForecasts + + def _env_path(self): + return os.path.join( + PATH_DATA_TEST, "5bus_example_act_topo_set_init" + ) + + def _names_ch_to_bk(self): + return None + + def _get_backend(self): + return PandaPowerBackend() + + def _get_gridpath(self): + return None + + def _get_envparams(self, env): + return None + + def setUp(self) -> None: + self.env_nm = self._env_path() + tmp_path = self._get_gridpath() + env_params = dict(test=True, + backend=self._get_backend(), + action_class=self._get_act_cls(), + chronics_class=self._get_ch_cls(), + data_feeding_kwargs={"gridvalueClass": self._get_c_cls()}, + _add_to_name=type(self).__name__ + ) + if tmp_path is not None: + env_params["grid_path"] = tmp_path + ch_to_bk = self._names_ch_to_bk() + if ch_to_bk is not None: + env_params["names_chronics_to_grid"] = ch_to_bk + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.env_nm, **env_params) + env_params = self._get_envparams(self.env) + if env_params is not None: + self.env.change_parameters(env_params) + self.env.change_forecast_parameters(env_params) + if issubclass(self._get_ch_cls(), MultifolderWithCache): + self.env.chronics_handler.set_filter(lambda x: True) + self.env.chronics_handler.reset() + self.env.reset(seed=0) + # some test to make sure the tests are correct + assert issubclass(self.env.action_space.subtype, self._get_act_cls()) + assert isinstance(self.env.chronics_handler.real_data, self._get_ch_cls()) + assert isinstance(self.env.chronics_handler.real_data.data, self._get_c_cls()) + assert isinstance(self.env.backend, type(self._get_backend())) + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def _aux_reset_env(self, seed, ep_id): + obs = self.env.reset(seed=seed, options={"time serie id": ep_id}) + return obs + + def _aux_make_step(self, act=None): + if act is None: + act = self.env.action_space() + return self.env.step(act) + + def _aux_get_init_act(self): + return self.env.chronics_handler.get_init_action(names_chronics_to_backend=self._names_ch_to_bk()) + + def _aux_get_act_valid(self): + # check the action in the time series folder is valid + with warnings.catch_warnings(): + warnings.filterwarnings("error") + act_init = self._aux_get_init_act() + + def test_working_setbus(self): + # ts id 0 => set_bus + self.obs = self._aux_reset_env(seed=0, ep_id=0) + + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 2 + assert self.obs.topo_vect[self.obs.load_pos_topo_vect[0]] == 2 + assert (self.obs.time_before_cooldown_line == 0).all() + assert (self.obs.time_before_cooldown_sub == 0).all() + + obs, reward, done, info = self._aux_make_step() + assert not done + assert obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 2 + assert obs.topo_vect[self.obs.load_pos_topo_vect[0]] == 2 + assert (obs.time_before_cooldown_line == 0).all() + assert (obs.time_before_cooldown_sub == 0).all() + # check the action in the time series folder is valid + self._aux_get_act_valid() + + def test_working_setstatus(self): + # ts id 1 => set_status + self.obs = self._aux_reset_env(seed=0, ep_id=1) + + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == -1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == -1 + assert not self.obs.line_status[1] + assert (self.obs.time_before_cooldown_line == 0).all() + assert (self.obs.time_before_cooldown_sub == 0).all() + + obs, reward, done, info = self._aux_make_step() + assert not done + assert obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == -1 + assert obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == -1 + assert not obs.line_status[1] + assert (obs.time_before_cooldown_line == 0).all() + assert (obs.time_before_cooldown_sub == 0).all() + # check the action in the time series folder is valid + self._aux_get_act_valid() + + def test_rules_ok(self): + """test that even if the action to set is illegal, it works (case of ts id 2)""" + self.obs = self._aux_reset_env(seed=0, ep_id=2) + + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 2 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[5]] == 2 + assert (self.obs.time_before_cooldown_line == 0).all() + assert (self.obs.time_before_cooldown_sub == 0).all() + act_init = self._aux_get_init_act() + if act_init is None: + # test not correct for multiprocessing, I stop here + return + obs, reward, done, info = self._aux_make_step(act_init) + assert info["exception"] is not None + assert info["is_illegal"] + assert obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 2 + assert obs.topo_vect[self.obs.line_ex_pos_topo_vect[5]] == 2 + assert (obs.time_before_cooldown_line == 0).all() + assert (obs.time_before_cooldown_sub == 0).all() + # check the action in the time series folder is valid + self._aux_get_act_valid() + + def test_change_bus_ignored(self, catch_warning=True): + """test that if the action to set uses change_bus then nothing is done""" + if catch_warning: + with self.assertWarns(UserWarning): + # it raises the warning "be carefull, change stuff are ignored" + self.obs = self._aux_reset_env(seed=0, ep_id=3) + else: + # no warning in the main process in multiprocessing + self.obs = self._aux_reset_env(seed=0, ep_id=3) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[2]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[2]] == 1 + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[5]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[5]] == 1 + assert self.obs.line_status[1] == 1 + assert self.obs.line_status[2] == 1 + assert self.obs.line_status[5] == 1 + # check the action in the time series folder is valid + self._aux_get_act_valid() + + +class TestSetActOrigDifferentActionCLS(TestSetActOrigDefault): + def _get_act_cls(self): + return DispatchAction + + +class TestSetAcOrigtMultiFolderWithCache(TestSetActOrigDefault): + def _get_ch_cls(self): + return MultifolderWithCache + + def test_two_reset_same(self): + """test it does not crash when the same time series is used twice""" + self.test_working_setstatus() + obs, reward, done, info = self.env.step(self.env.action_space()) + self.test_working_setstatus() + obs, reward, done, info = self.env.step(self.env.action_space()) + + +class TestSetActOrigGridStateFromFile(TestSetActOrigDefault): + def _get_c_cls(self): + return GridStateFromFile + + +class TestSetActOrigGSFFWFWM(TestSetActOrigDefault): + def _get_c_cls(self): + return GridStateFromFileWithForecastsWithMaintenance + + +class TestSetActOrigGSFFWFWoM(TestSetActOrigDefault): + def _get_c_cls(self): + return GridStateFromFileWithForecastsWithoutMaintenance + + +class TestSetActOrigFromOneEpisodeData(TestSetActOrigDefault): + def _aux_make_ep_data(self, ep_id): + runner = Runner(**self.env.get_params_for_runner()) + runner.run(nb_episode=1, + episode_id=[ep_id], + path_save=self.fn.name, + max_iter=10) + self.env.close() + + li_episode = EpisodeData.list_episode(self.fn.name) + ep_data = li_episode[0] + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self._env_path(), + chronics_class=FromOneEpisodeData, + data_feeding_kwargs={"ep_data": ep_data}, + opponent_class=FromEpisodeDataOpponent, + opponent_attack_cooldown=1, + _add_to_name=type(self).__name__ + ) + + def setUp(self) -> None: + self.fn = tempfile.TemporaryDirectory() + super().setUp() + + def tearDown(self) -> None: + self.fn.cleanup() + return super().tearDown() + + def test_working_setbus(self): + self._aux_make_ep_data(0) # episode id 0 is used for this test + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + super().test_working_setbus() + + def test_working_setstatus(self): + self._aux_make_ep_data(1) # episode id 1 is used for this test + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + super().test_working_setstatus() + + def test_rules_ok(self): + self._aux_make_ep_data(2) # episode id 2 is used for this test + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + super().test_rules_ok() + + def test_change_bus_ignored(self): + self.skipTest("This make no sense for this class (change is not used internally)") + + +class TestSetActOrigFromMultiEpisodeData(TestSetActOrigDefault): + def setUp(self) -> None: + super().setUp() + self.fn = tempfile.TemporaryDirectory() + runner = Runner(**self.env.get_params_for_runner()) + runner.run(nb_episode=3, + episode_id=[0, 1, 2], + path_save=self.fn.name, + max_iter=10) + self.env.close() + + li_episode = EpisodeData.list_episode(self.fn.name) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self._env_path(), + chronics_class=FromMultiEpisodeData, + data_feeding_kwargs={"li_ep_data": li_episode}, + opponent_class=FromEpisodeDataOpponent, + opponent_attack_cooldown=1, + _add_to_name=type(self).__name__ + ) + + + def tearDown(self) -> None: + self.fn.cleanup() + return super().tearDown() + + def test_two_reset_same(self): + """test it does not crash when the same time series is used twice""" + self.test_working_setstatus() + obs, reward, done, info = self.env.step(self.env.action_space()) + self.test_working_setstatus() + obs, reward, done, info = self.env.step(self.env.action_space()) + + def test_change_bus_ignored(self): + self.skipTest("This make no sense for this class (change is not used internally)") + + +class TestSetActOrigFromNPY(TestSetActOrigDefault): + def _aux_make_env(self, ch_id): + self.obs = self.env.reset(seed=0, options={"time serie id": ch_id}) + load_p = 1.0 * self.env.chronics_handler._real_data.data.load_p[:self.max_iter,:] + load_q = 1.0 * self.env.chronics_handler._real_data.data.load_q[:self.max_iter,:] + gen_p = 1.0 * self.env.chronics_handler._real_data.data.prod_p[:self.max_iter,:] + gen_v = np.repeat(self.obs.gen_v.reshape(1, -1), self.max_iter, axis=0) + act = self.env.action_space({"set_bus": self.obs.topo_vect}) + self.env.close() + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self._env_path(), + chronics_class=FromNPY, + data_feeding_kwargs={"load_p": load_p, + "load_q": load_q, + "prod_p": gen_p, + "prod_v": gen_v, + "init_state": act + }, + _add_to_name=type(self).__name__) + def setUp(self) -> None: + self.max_iter = 5 + super().setUp() + + def tearDown(self) -> None: + return super().tearDown() + + def test_working_setbus(self): + self._aux_make_env(0) # episode id 0 is used for this test + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + super().test_working_setbus() + + def test_working_setstatus(self): + self._aux_make_env(1) # episode id 1 is used for this test + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + super().test_working_setstatus() + + def test_rules_ok(self): + self._aux_make_env(2) # episode id 2 is used for this test + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + super().test_rules_ok() + + def test_change_bus_ignored(self): + self._aux_make_env(3) # episode id 3 is used for this test + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + super().test_change_bus_ignored() + + +class TestSetActOrigEnvCopy(TestSetActOrigDefault): + def setUp(self) -> None: + super().setUp() + env_cpy = self.env.copy() + self.env.close() + self.env = env_cpy + + +class TestSetActOrigFromHandlers(TestSetActOrigDefault): + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self._env_path(), + data_feeding_kwargs={"gridvalueClass": FromHandlers, + "gen_p_handler": CSVHandler("prod_p"), + "load_p_handler": CSVHandler("load_p"), + "gen_v_handler": CSVHandler("prod_v"), + "load_q_handler": CSVHandler("load_q"), + "init_state_handler": JSONInitStateHandler("init_state_handler") + }, + _add_to_name=type(self).__name__ + ) + + +class TestSetActOrigLightsim(TestSetActOrigDefault): + def _get_backend(self): + if not LS_AVAIL: + self.skipTest("LightSimBackend is not available") + return LightSimBackend() + + +class TestSetActOrigDiffNames(TestSetActOrigDefault): + def _get_gridpath(self): + # just to have a grid with different names + return os.path.join(PATH_DATA_TEST, "5bus_example_diff_name", "grid.json") + + def _names_ch_to_bk(self): + res = {"loads": {'load_0_0': 'tutu', 'load_3_1': 'toto', 'load_4_2': 'tata'}, + "prods": {"gen_0_0": "othername_0_0", "gen_1_1": "othername_1_1"}, + "lines": {"0_1_0": 'l_0_1_0', + "0_2_1": 'l_0_2_1', + "0_3_2": 'l_0_3_2', + "0_4_3": 'l_0_4_3', + "1_2_4": 'l_1_2_4', + "2_3_5": 'l_2_3_5', + "2_3_6": 'l_2_3_6', + "3_4_7": 'l_3_4_7'} + } + return res + + +class TestSetActOrigTOEnv(TestSetActOrigDefault): + def setUp(self) -> None: + super().setUp() + env_init = self.env + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = TimedOutEnvironment(self.env) + env_init.close() + return LightSimBackend() + + +class TestSetActOrigMaskedEnv(TestSetActOrigDefault): + def setUp(self) -> None: + super().setUp() + env_init = self.env + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = MaskedEnvironment(self.env, + lines_of_interest=np.array([1, 1, 1, 1, 0, 0, 0, 0])) + env_init.close() + + +def always_true(x): + # I can't use lambda in set_filter (lambda cannot be pickled) + return True + + +class TestSetActOrigMultiProcEnv(TestSetActOrigDefault): + def _aux_reset_env(self, seed, ep_id): + # self.env.seed(seed) + self.env.set_id(ep_id) + obs = self.env.reset() + return obs[0] + + def _aux_get_init_act(self): + return None + + def _aux_make_step(self): + obs, reward, done, info = self.env.step([self.env_init.action_space(), self.env_init.action_space()]) + return obs[0], reward[0], done[0], info[0] + + def setUp(self) -> None: + super().setUp() + self.env_init = self.env + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = SingleEnvMultiProcess(self.env, 2) + self.env.set_filter(always_true) + + def tearDown(self) -> None: + self.env_init.close() + return super().tearDown() + + def test_change_bus_ignored(self): + super().test_change_bus_ignored(catch_warning=False) + + +class TestSetActOrigForcastEnv(TestSetActOrigDefault): + def test_working_setbus(self): + super().test_working_setbus() + for_env = self.env.get_obs().get_forecast_env() + obs, reward, done, info = for_env.step(self.env.action_space()) + + def test_working_setstatus(self): + super().test_working_setstatus() + for_env = self.env.get_obs().get_forecast_env() + obs, reward, done, info = for_env.step(self.env.action_space()) + + def test_rules_ok(self): + super().test_rules_ok() + for_env = self.env.get_obs().get_forecast_env() + obs, reward, done, info = for_env.step(self.env.action_space()) + + def test_change_bus_ignored(self): + super().test_change_bus_ignored() + for_env = self.env.get_obs().get_forecast_env() + obs, reward, done, info = for_env.step(self.env.action_space()) + + +class TestSetActOrigRunner(unittest.TestCase): + def _env_path(self): + return TestSetActOrigDefault._env_path(self) + + def setUp(self) -> None: + self.env_nm = self._env_path() + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.env_nm, + test=True, + _add_to_name=type(self).__name__ + ) + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def test_right_init_act(self): + runner = Runner(**self.env.get_params_for_runner()) + res = runner.run(nb_episode=3, + episode_id=[0, 1, 2], + max_iter=10, + add_detailed_output=True) + for i, el in enumerate(res): + ep_data = el[-1] + init_obs = ep_data.observations[0] + if i == 0: + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 2 + assert init_obs.topo_vect[init_obs.load_pos_topo_vect[0]] == 2 + assert (init_obs.time_before_cooldown_line == 0).all() + assert (init_obs.time_before_cooldown_sub == 0).all() + elif i == 1: + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + assert (init_obs.time_before_cooldown_line == 0).all() + assert (init_obs.time_before_cooldown_sub == 0).all() + elif i == 2: + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 2 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[5]] == 2 + assert (init_obs.time_before_cooldown_line == 0).all() + assert (init_obs.time_before_cooldown_sub == 0).all() + else: + raise RuntimeError("Test is coded correctly") + + +class _PPNoShunt_Test(PandaPowerBackend): + shunts_data_available = False + + +class TestSetSuntState(unittest.TestCase): + def _env_path(self): + return os.path.join( + PATH_DATA_TEST, "educ_case14_storage_init_state" + ) + + def _get_backend(self): + return PandaPowerBackend() + + def setUp(self) -> None: + self.env_nm = self._env_path() + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.env_nm, + test=True, + _add_to_name=type(self).__name__ + ) + self.env_noshunt = grid2op.make(self.env_nm, + test=True, + backend=_PPNoShunt_Test(), + _add_to_name=type(self).__name__ + ) + self.env_nostor = grid2op.make(self.env_nm, + test=True, + _add_to_name=type(self).__name__, + _compat_glop_version="neurips_2020_compat" + ) + assert type(self.env_noshunt).shunts_data_available is False + assert type(self.env_nostor).n_storage == 0 + assert type(self.env).n_storage == 2 + + def test_set_shunt_state(self): + """test that the action that acts on the shunt works (when shunt are supported) + or has no impact if the backend does not support shunts""" + obs_shunt = self.env.reset(seed=0, options={"time serie id": 0}) + obs_noshunt = self.env_noshunt.reset(seed=0, options={"time serie id": 0}) + assert obs_shunt._shunt_q[0] == 0. # the action put the shunt to 0. + # in the backend with no shunt, the shunt is active and generator + # does not produce same q + assert abs(obs_shunt.gen_q[4] - obs_noshunt.gen_q[4]) > 5. + + def test_set_storage_state(self): + obs_stor = self.env.reset(seed=0, options={"time serie id": 1}) + obs_nostor = self.env_nostor.reset(seed=0, options={"time serie id": 1}) + slack_id = -1 + # the storage action is taken into account + assert obs_stor.storage_power[0] == 5. # the action set this + + # the original grid (withtout storage) + # and the grid with storage action have the same "gen_p" + # if I remove the impact of the storage unit + deltagen_p_th = ((obs_stor.gen_p - obs_stor.actual_dispatch) - obs_nostor.gen_p) + assert (np.abs(deltagen_p_th[:slack_id]) <= 1e-6).all() + + +class TestSetActOrigIgnoredParams(TestSetActOrigDefault): + """This class test that the new feature (setting the initial state in the time series + is properly ignored if the parameter says so)""" + + def _get_envparams(self, env): + param = env.parameters + param.IGNORE_INITIAL_STATE_TIME_SERIE = True + return param + + def test_working_setbus(self): + """test that it's ignored even if the action is set_status""" + self.obs = self._aux_reset_env(seed=0, ep_id=0) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.load_pos_topo_vect[0]] == 1 + assert (self.obs.time_before_cooldown_line == 0).all() + assert (self.obs.time_before_cooldown_sub == 0).all() + + obs, reward, done, info = self._aux_make_step() + assert not done + assert obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert obs.topo_vect[self.obs.load_pos_topo_vect[0]] == 1 + assert (obs.time_before_cooldown_line == 0).all() + assert (obs.time_before_cooldown_sub == 0).all() + # check the action in the time series folder is valid + self._aux_get_act_valid() + + def test_working_setstatus(self): + """test that it's ignored even if the action is set_status""" + self.obs = self._aux_reset_env(seed=0, ep_id=1) + + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == 1 + assert self.obs.line_status[1] + assert (self.obs.time_before_cooldown_line == 0).all() + assert (self.obs.time_before_cooldown_sub == 0).all() + + obs, reward, done, info = self._aux_make_step() + assert not done + assert obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == 1 + assert obs.line_status[1] + assert (obs.time_before_cooldown_line == 0).all() + assert (obs.time_before_cooldown_sub == 0).all() + # check the action in the time series folder is valid + self._aux_get_act_valid() + + def test_rules_ok(self): + """that it's ignored even if the action is illegal""" + self.obs = self._aux_reset_env(seed=0, ep_id=2) + + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[5]] == 1 + assert (self.obs.time_before_cooldown_line == 0).all() + assert (self.obs.time_before_cooldown_sub == 0).all() + act_init = self._aux_get_init_act() + if act_init is None: + # test not correct for multiprocessing, I stop here + return + obs, reward, done, info = self._aux_make_step(act_init) + assert info["exception"] is not None + assert info["is_illegal"] + assert obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert obs.topo_vect[self.obs.line_ex_pos_topo_vect[5]] == 1 + assert (obs.time_before_cooldown_line == 0).all() + assert (obs.time_before_cooldown_sub == 0).all() + # check the action in the time series folder is valid + self._aux_get_act_valid() + + def test_change_bus_ignored(self, catch_warning=True): + """test that if the action to set uses change_bus then nothing is done""" + # no warning in the main process in multiprocessing + self.obs = self._aux_reset_env(seed=0, ep_id=3) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[2]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[2]] == 1 + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[5]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[5]] == 1 + assert self.obs.line_status[1] == 1 + assert self.obs.line_status[2] == 1 + assert self.obs.line_status[5] == 1 + # check the action in the time series folder is valid + self._aux_get_act_valid() + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_action_set_orig_state_options.py b/grid2op/tests/test_action_set_orig_state_options.py new file mode 100644 index 000000000..e42dcf680 --- /dev/null +++ b/grid2op/tests/test_action_set_orig_state_options.py @@ -0,0 +1,512 @@ +# Copyright (c) 2019-2020, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + + +import warnings +import unittest + +import grid2op +from grid2op.Runner import Runner +from grid2op.tests.helper_path_test import * + + +class TestSetActOptionDefault(unittest.TestCase): + def _env_path(self): + return os.path.join( + PATH_DATA_TEST, "5bus_example_act_topo_set_init" + ) + + def setUp(self) -> None: + self.env_nm = self._env_path() + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.env_nm, + test=True + ) + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def _aux_reset_env(self, seed, ep_id, init_state): + obs = self.env.reset(seed=seed, options={"time serie id": ep_id, + "init state": init_state}) + return obs + + def _aux_make_step(self, act=None): + if act is None: + act = self.env.action_space() + return self.env.step(act) + + def _aux_get_init_act(self): + return self.env.chronics_handler.get_init_action() + + def test_combine_ts_set_bus_opt_setbus_nopb(self): + # ts id 0 => set_bus (in the time series) + self.obs = self._aux_reset_env(seed=0, ep_id=0, init_state={"set_bus": {"lines_or_id": [(0, 2)]}}) + # in the time series + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 2 + assert self.obs.topo_vect[self.obs.load_pos_topo_vect[0]] == 2 + # in the action + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[0]] == 2 + + def test_combine_ts_set_bus_opt_setbus_collision(self): + # ts id 0 => set_bus (in the time series) + self.obs = self._aux_reset_env(seed=0, ep_id=0, init_state={"set_bus": {"lines_or_id": [(1, 1)], + "loads_id": [(0, 1)]}}) + + # in the option (totally erase the time series) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.load_pos_topo_vect[0]] == 1 + + def test_combine_ts_set_bus_opt_setstat_nopb(self): + # ts id 0 => set_bus (in the time series) + self.obs = self._aux_reset_env(seed=0, ep_id=0, + init_state={"set_line_status": [(5, -1)]}) + + # in the time series + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 2 + assert self.obs.topo_vect[self.obs.load_pos_topo_vect[0]] == 2 + # in the action + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[5]] == -1 + + def test_combine_ts_set_bus_opt_setstat_collision(self): + # ts id 0 => set_bus (in the time series) + self.obs = self._aux_reset_env(seed=0, ep_id=0, + init_state={"set_bus": {"loads_id": [(0, 1)]}, + "set_line_status": [(1, -1)]}) + # in the act + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == -1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == -1 + assert not self.obs.line_status[1] + # in the time series + assert self.obs.topo_vect[self.obs.load_pos_topo_vect[0]] == 1 + + def test_combine_ts_set_status_opt_setbus_nopb(self): + # ts id 1 => set_status + self.obs = self._aux_reset_env(seed=0, ep_id=1, init_state={"set_bus": {"lines_or_id": [(5, 2)]}}) + + # in the time series + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == -1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == -1 + assert not self.obs.line_status[1] + # in the action + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[5]] == 2 + + def test_combine_ts_set_status_opt_setbus_collision(self): + # ts id 1 => set_status + self.obs = self._aux_reset_env(seed=0, ep_id=1, init_state={"set_bus": {"lines_or_id": [(1, 1)]}}) + # in the time series (erased by the action, or side) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == 1 + assert self.obs.line_status[1] + + # ts id 1 => set_status + self.obs = self._aux_reset_env(seed=0, ep_id=1, init_state={"set_bus": {"lines_ex_id": [(1, 2)]}}) + # in the time series (erased by the action, ex side) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == 2 + assert self.obs.line_status[1] + + def test_combine_ts_set_status_opt_setstat_nopb(self): + # ts id 1 => set_status + self.obs = self._aux_reset_env(seed=0, ep_id=1, init_state={"set_line_status": [(5, -1)]}) + + # in the time series + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == -1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == -1 + assert not self.obs.line_status[1] + # in the action + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[5]] == -1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[5]] == -1 + assert not self.obs.line_status[5] + + def test_combine_ts_set_status_opt_setstat_collision(self): + # ts id 1 => set_status + self.obs = self._aux_reset_env(seed=0, ep_id=1, init_state={"set_line_status": [(1, 1)]}) + + # in the time series (bus overriden by the action) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == 1 + assert self.obs.line_status[1] + return self.env.chronics_handler.get_init_action() + + def test_ignore_ts_set_bus_opt_setbus_nopb(self): + # ts id 0 => set_bus (in the time series) + self.obs = self._aux_reset_env(seed=0, ep_id=0, init_state={"set_bus": {"lines_or_id": [(5, 2)]}, "method": "ignore"}) + + # in the time series + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.load_pos_topo_vect[0]] == 1 + # in the action + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[5]] == 2 + + def test_ignore_ts_set_bus_opt_setbus_collision(self): + # TODO not tested for method = ignore (because action here totally erased action in ts) + # ts id 0 => set_bus (in the time series) + self.obs = self._aux_reset_env(seed=0, ep_id=0, init_state={"set_bus": {"lines_or_id": [(1, 1)], + "loads_id": [(0, 1)]}, + "method": "ignore"}) + + # in the option (totally erase the time series) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.load_pos_topo_vect[0]] == 1 + + def test_ignore_ts_set_bus_opt_setstat_nopb(self): + # ts id 0 => set_bus (in the time series) + self.obs = self._aux_reset_env(seed=0, ep_id=0, + init_state={"set_line_status": [(5, -1)], + "method": "ignore"}) + + # in the time series + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.load_pos_topo_vect[0]] == 1 + # in the action + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[5]] == -1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[5]] == -1 + assert not self.obs.line_status[5] + + def test_ignore_ts_set_bus_opt_setstat_collision(self): + # ts id 0 => set_bus (in the time series) + self.obs = self._aux_reset_env(seed=0, ep_id=0, + init_state={"set_bus": {"loads_id": [(0, 1)]}, + "set_line_status": [(1, -1)], + "method": "ignore"}) + # in the act + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == -1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == -1 + assert not self.obs.line_status[1] + # in the time series (ignored) + assert self.obs.topo_vect[self.obs.load_pos_topo_vect[0]] == 1 + + def test_ignore_ts_set_status_opt_setbus_nopb(self): + # ts id 1 => set_status + self.obs = self._aux_reset_env(seed=0, ep_id=1, init_state={"set_bus": {"lines_or_id": [(5, 2)]}, + "method": "ignore"}) + + # in the time series (ignored) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == 1 + assert self.obs.line_status[1] + # in the action + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[5]] == 2 + + def test_ignore_ts_set_status_opt_setbus_collision(self): + # TODO not tested for method = ignore (because action here totally erased action in ts) + # ts id 1 => set_status + self.obs = self._aux_reset_env(seed=0, ep_id=1, init_state={"set_bus": {"lines_or_id": [(1, 1)]}, + "method": "ignore"}) + # in the time series (erased by the action, or side) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == 1 + assert self.obs.line_status[1] + + # ts id 1 => set_status + self.obs = self._aux_reset_env(seed=0, ep_id=1, init_state={"set_bus": {"lines_ex_id": [(1, 2)]}, + "method": "ignore"}) + # in the time series (erased by the action, ex side) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == 2 + assert self.obs.line_status[1] + + def test_ignore_ts_set_status_opt_setstat_nopb(self): + # ts id 1 => set_status + self.obs = self._aux_reset_env(seed=0, ep_id=1, init_state={"set_line_status": [(5, -1)], + "method": "ignore"}) + + # in the time series (ignored) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == 1 + assert self.obs.line_status[1] + # in the action + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[5]] == -1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[5]] == -1 + assert not self.obs.line_status[5] + + def test_ignore_ts_set_status_opt_setstat_collision(self): + # TODO not tested for method = ignore (because action here totally erased action in ts) + + # ts id 1 => set_status + self.obs = self._aux_reset_env(seed=0, ep_id=1, init_state={"set_line_status": [(1, 1)], + "method": "ignore"}) + + # in the time series (bus overriden by the action) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == 1 + assert self.obs.line_status[1] + + def test_byact(self): + # ts id 1 => set_status + act = self.env.action_space({"set_line_status": [(1, 1)]}) + self.obs = self._aux_reset_env(seed=0, ep_id=1, init_state=act) + + # in the time series (bus overriden by the action) + assert self.obs.topo_vect[self.obs.line_or_pos_topo_vect[1]] == 1 + assert self.obs.topo_vect[self.obs.line_ex_pos_topo_vect[1]] == 1 + assert self.obs.line_status[1] + return self.env.chronics_handler.get_init_action() + + +class TestSetInitRunner(unittest.TestCase): + def _env_path(self): + return os.path.join( + PATH_DATA_TEST, "5bus_example_act_topo_set_init" + ) + + def setUp(self) -> None: + self.env_nm = self._env_path() + self.max_iter = 5 + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.env_nm, + test=True + ) + self.runner = Runner(**self.env.get_params_for_runner()) + + def tearDown(self) -> None: + self.env.close() + self.runner._clean_up() + return super().tearDown() + + def test_run_one_episode(self): + res = self.runner.run_one_episode(init_state={"set_line_status": [(1, 1)], "method": "ignore"}, + episode_id=1, + max_iter=self.max_iter, + detailed_output=True + ) + ep_data = res[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == 1 + assert init_obs.line_status[1] + + def test_run_onesingle_ep_onesingle_act(self): + # one action + res = self.runner.run(nb_episode=1, + init_states={"set_line_status": [(1, 1)], "method": "ignore"}, + episode_id=[1], + max_iter=self.max_iter, + add_detailed_output=True + ) + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == 1 + assert init_obs.line_status[1] + + # one list (of one element here) + res = self.runner.run(nb_episode=1, + init_states=[{"set_line_status": [(1, 1)], "method": "ignore"}], + episode_id=[1], + max_iter=self.max_iter, + add_detailed_output=True + ) + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == 1 + assert init_obs.line_status[1] + + # one tuple (of one element here) + res = self.runner.run(nb_episode=1, + init_states=({"set_line_status": [(1, 1)], "method": "ignore"}, ), + episode_id=[1], + max_iter=self.max_iter, + add_detailed_output=True + ) + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == 1 + assert init_obs.line_status[1] + + def test_run_two_eps_seq_onesingle_act(self, nb_process=1): + # one action + res = self.runner.run(nb_episode=2, + init_states={"set_line_status": [(1, 1)], "method": "ignore"}, + episode_id=[1, 1], + max_iter=self.max_iter, + add_detailed_output=True, + nb_process=nb_process + ) + for el in res: + ep_data = el[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == 1 + assert init_obs.line_status[1] + + # one list + res = self.runner.run(nb_episode=2, + init_states=[{"set_line_status": [(1, 1)], "method": "ignore"}, + {"set_line_status": [(1, 1)], "method": "ignore"}], + episode_id=[1, 1], + max_iter=self.max_iter, + add_detailed_output=True, + nb_process=nb_process + ) + for el in res: + ep_data = el[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == 1 + assert init_obs.line_status[1] + + # one tuple + res = self.runner.run(nb_episode=2, + init_states=({"set_line_status": [(1, 1)], "method": "ignore"}, + {"set_line_status": [(1, 1)], "method": "ignore"}), + episode_id=[1, 1], + max_iter=self.max_iter, + add_detailed_output=True, + nb_process=nb_process + ) + for el in res: + ep_data = el[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == 1 + assert init_obs.line_status[1] + + def test_run_two_eps_seq_two_acts(self, nb_process=1): + # given as list + res = self.runner.run(nb_episode=2, + init_states=[{"set_bus": {"loads_id": [(0, 1)]}, "set_line_status": [(1, -1)], "method": "ignore"}, + {"set_line_status": [(1, 1)], "method": "ignore"}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, # TODO HERE HERE + nb_process=nb_process + ) + # check for ep 0 + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + assert init_obs.topo_vect[init_obs.load_pos_topo_vect[0]] == 1 + # check for ep 1 + ep_data = res[1][-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == 1 + assert init_obs.line_status[1] + + # one tuple + res = self.runner.run(nb_episode=2, + init_states=({"set_bus": {"loads_id": [(0, 1)]}, "set_line_status": [(1, -1)], "method": "ignore"}, + {"set_line_status": [(1, 1)], "method": "ignore"}), + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + nb_process=nb_process + ) + # check for ep 0 + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + assert init_obs.topo_vect[init_obs.load_pos_topo_vect[0]] == 1 + # check for ep 1 + ep_data = res[1][-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == 1 + assert init_obs.line_status[1] + + def test_run_two_eps_par_onesingle_act(self): + self.test_run_two_eps_seq_onesingle_act(nb_process=2) + + def test_run_two_eps_par_two_acts(self): + self.test_run_two_eps_seq_two_acts(nb_process=2) + + def test_fail_when_needed(self): + # wrong type + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + init_states=1, + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + init_states=[1, {"set_line_status": [(1, 1)], "method": "ignore"}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + init_states=[{"set_line_status": [(1, 1)], "method": "ignore"}, 1], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + + # wrong size (too big) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + init_states=[{"set_line_status": [(1, 1)], "method": "ignore"}, + {"set_line_status": [(1, 1)], "method": "ignore"}, + {"set_line_status": [(1, 1)], "method": "ignore"}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + # wrong size (too small) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + init_states=[{"set_line_status": [(1, 1)], "method": "ignore"}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + + +class TestSetActOptionDefaultComplexAction(unittest.TestCase): + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make("educ_case14_storage", test=True, _add_to_name=type(self).__name__) + return super().setUp() + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def test_storage(self): + obs = self.env.reset(seed=0, + options={"time serie id": 0, + "init state": {"set_storage": [(0, 5.)]}}) + assert abs(obs.storage_power[0] - 5.) <= 1e-6 + obs, reward, done, info = self.env.step(self.env.action_space()) + assert abs(obs.storage_power[0] - 0.) <= 1e-6 + + def test_curtail(self): + obs = self.env.reset(seed=0, + options={"time serie id": 0, + "init state": {"curtail": [(3, 0.1)]}}) + assert abs(obs.curtailment_limit[3] - 0.1) <= 1e-6 + obs, reward, done, info = self.env.step(self.env.action_space()) + assert abs(obs.curtailment_limit[3] - 0.1) <= 1e-6 + + def test_redispatching(self): + obs = self.env.reset(seed=0, + options={"time serie id": 0, + "init state": {"redispatch": [(0, -1)]}}) + assert abs(obs.target_dispatch[0] - -1.) <= 1e-6 + assert abs(obs.actual_dispatch[0] - -1.) <= 1e-6 + obs, reward, done, info = self.env.step(self.env.action_space()) + assert abs(obs.target_dispatch[0] - -1.) <= 1e-6 + assert abs(obs.actual_dispatch[0] - -1.) <= 1e-6 + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_alert_obs_act.py b/grid2op/tests/test_alert_obs_act.py index e2fbc9dfa..fe2e782fb 100644 --- a/grid2op/tests/test_alert_obs_act.py +++ b/grid2op/tests/test_alert_obs_act.py @@ -148,11 +148,13 @@ def test_ambiguous_illicit_alert_action(self) -> None : act2 = self.env.action_space() try: act2.raise_alert = [self.env.dim_alerts] - except Exception as e: - assert e.args[0] == 'Impossible to modify the alert with your input. Please consult the documentation. The error was:\n"Grid2OpException IllegalAction "Impossible to change a raise alert id 10 because there are only 10 on the grid (and in python id starts at 0)""' - - # TODO : is it really illicit or rather ambiguous ? - #assert act.is_ambiguous()[0] + except Exception as exc_: + assert exc_.args[0] == ('Impossible to modify the alert with your input. ' + 'Please consult the documentation. The error ' + 'was:\n"Grid2OpException AmbiguousAction ' + '"Impossible to change a raise alert id 10 ' + 'because there are only 10 on the grid (and in ' + 'python id starts at 0)""') diff --git a/grid2op/tests/test_alert_trust_score.py b/grid2op/tests/test_alert_trust_score.py index 58a18e48d..32e0494f1 100644 --- a/grid2op/tests/test_alert_trust_score.py +++ b/grid2op/tests/test_alert_trust_score.py @@ -14,7 +14,7 @@ from grid2op.Observation import BaseObservation from grid2op.tests.helper_path_test import * -from grid2op import make +import grid2op from grid2op.Reward import _AlertTrustScore from grid2op.Parameters import Parameters from grid2op.Exceptions import Grid2OpException @@ -68,7 +68,7 @@ def test_assistant_trust_score_no_blackout_no_attack_no_alert(self) -> None : Raises: Grid2OpException: raise an exception if an attack occur """ - with make( + with grid2op.make( self.env_nm, test=True, difficulty="1", @@ -111,7 +111,7 @@ def test_assistant_trust_score_no_blackout_attack_alert(self) -> None : kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE], duration=3, steps_attack=[2]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -173,7 +173,7 @@ def test_assistant_trust_score_no_blackout_2_attack_same_time_1_alert(self) -> N kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE] + ['48_53_141'], duration=3, steps_attack=[2]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE), @@ -237,7 +237,7 @@ def test_assistant_trust_score_no_blackout_no_attack_alert(self) -> None: Raises: Grid2OpException: raise an exception if an attack occur """ - with make( + with grid2op.make( self.env_nm, test=True, difficulty="1", @@ -289,7 +289,7 @@ def test_assistant_trust_score_no_blackout_attack_no_alert(self) -> None: kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE], duration=3, steps_attack=[1]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -342,7 +342,7 @@ def test_assistant_trust_score_no_blackout_attack_alert_too_late(self) -> None : kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE], duration=3, steps_attack=[2]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE), @@ -398,7 +398,7 @@ def test_assistant_trust_score_no_blackout_attack_alert_too_early(self)-> None : kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE], duration=3, steps_attack=[2]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE), @@ -458,7 +458,7 @@ def test_assistant_trust_score_no_blackout_2_attack_same_time_no_alert(self) -> kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'], duration=3, steps_attack=[1]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -512,7 +512,7 @@ def test_assistant_trust_score_no_blackout_2_attack_same_time_2_alert(self) -> N kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'], duration=3, steps_attack=[2]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE), @@ -575,7 +575,7 @@ def test_assistant_trust_score_no_blackout_2_attack_diff_time_no_alert(self) -> kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'], duration=[1, 1], steps_attack=[1, 2]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -629,7 +629,7 @@ def test_assistant_trust_score_no_blackout_2_attack_diff_time_2_alert(self) -> N kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'], duration=[1,1], steps_attack=[2, 3]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -693,7 +693,7 @@ def test_assistant_trust_score_no_blackout_2_attack_diff_time_alert_first_attack kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'], duration=[1,1], steps_attack=[2, 3]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -760,7 +760,7 @@ def test_assistant_trust_score_no_blackout_2_attack_diff_time_alert_second_attac kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'], duration=[1,1], steps_attack=[2, 3]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -842,7 +842,7 @@ def test_assistant_trust_score_blackout_attack_nocause_blackout_no_alert(self) - kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE], duration=3, steps_attack=[3]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -904,7 +904,7 @@ def test_assistant_trust_score_blackout_attack_nocause_blackout_raise_alert(self kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE], duration=3, steps_attack=[3]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -966,7 +966,7 @@ def test_assistant_trust_score_blackout_no_attack_alert(self) -> None: """Even if there is a blackout, an we raise an alert we expect a score of 0 because there is no attack and we don't finish the scenario""" - with make( + with grid2op.make( self.env_nm, test=True, difficulty="1", @@ -1012,7 +1012,7 @@ def test_assistant_trust_score_blackout_no_attack_alert(self) -> None: def test_assistant_trust_score_blackout_no_attack_no_alert(self) -> None: """Even if there is a blackout, an we don't raise an alert we expect a score of 0 because there is no attack and we don't finish the scenario""" - with make( + with grid2op.make( self.env_nm, test=True, difficulty="1", @@ -1056,7 +1056,7 @@ def test_assistant_trust_score_blackout_no_attack_no_alert(self) -> None: def test_assistant_trust_score_blackout_no_attack_before_window_alert(self) -> None: """Even if there is a blackout, an we raise an alert too early we expect a score of 0 because there is no attack and we don't finish the scenario""" - with make( + with grid2op.make( self.env_nm, test=True, difficulty="1", @@ -1102,7 +1102,7 @@ def test_assistant_trust_score_blackout_no_attack_before_window_alert(self) -> N def test_assistant_trust_score_blackout_no_attack_before_window_no_alert(self) -> None: """Even if there is a blackout, an we raise an alert too late we expect a score of 0 because there is no attack and we don't finish the scenario""" - with make( + with grid2op.make( self.env_nm, test=True, difficulty="1", @@ -1174,7 +1174,7 @@ def test_assistant_trust_score_blackout_attack_raise_good_alert(self) -> None : kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE], duration=3, steps_attack=[3]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -1239,7 +1239,7 @@ def test_assistant_trust_score_blackout_attack_raise_alert_just_before_blackout( kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE], duration=3, steps_attack=[3]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -1304,7 +1304,7 @@ def test_assistant_trust_score_blackout_2_lines_attacked_simulaneous_only_1_aler kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE] + ['48_53_141'], duration=3, steps_attack=[3, 3]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE), @@ -1376,7 +1376,7 @@ def test_assistant_trust_score_blackout_attack_no_alert(self) -> None: kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE], duration=3, steps_attack=[3]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -1437,7 +1437,7 @@ def test_assistant_trust_score_blackout_attack_raise_alert_too_early(self) -> No kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE], duration=3, steps_attack=[3]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -1502,7 +1502,7 @@ def test_assistant_trust_score_blackout_2_lines_same_step_in_window_good_alerts kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'], duration=3, steps_attack=[3, 3]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -1570,7 +1570,7 @@ def test_assistant_trust_score_blackout_2_lines_different_step_in_window_good_a kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'], duration=[1,1], steps_attack=[3, 4]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -1641,7 +1641,7 @@ def test_assistant_trust_score_blackout_2_lines_attacked_different_step_in_windo kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'], duration=[1,1], steps_attack=[3, 4]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -1706,7 +1706,7 @@ def test_assistant_trust_score_blackout_2_lines_attacked_different_step_in_windo kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'], duration=[1,1], steps_attack=[3, 4]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -1770,7 +1770,7 @@ def test_assistant_trust_score_blackout_2_lines_attacked_different_1_in_window_1 kwargs_opponent = dict(lines_attacked=[ATTACKED_LINE]+['48_53_141'], duration=[1, 1], steps_attack=[3, 6]) - with make(self.env_nm, + with grid2op.make(self.env_nm, test=True, difficulty="1", opponent_attack_cooldown=0, @@ -1831,7 +1831,7 @@ def setUp(self) -> None: self.env_nm = os.path.join( PATH_DATA_TEST, "l2rpn_idf_2023_with_alert" ) - self.env = make(self.env_nm, test=True, difficulty="1", + self.env = grid2op.make(self.env_nm, test=True, difficulty="1", reward_class=_AlertTrustScore(**DEFAULT_PARAMS_TRUSTSCORE)) self.env.seed(0) return super().setUp() diff --git a/grid2op/tests/test_attached_envs.py b/grid2op/tests/test_attached_envs.py index d9c0742bc..0451dfb52 100644 --- a/grid2op/tests/test_attached_envs.py +++ b/grid2op/tests/test_attached_envs.py @@ -12,11 +12,12 @@ from grid2op.Action import (PowerlineSetAction, PlayableAction, DontAct) from grid2op.Observation import CompleteObservation -from grid2op.Opponent import GeometricOpponent +from grid2op.Opponent import GeometricOpponent, GeometricOpponentMultiArea import pdb # TODO refactor to have 1 base class, maybe +# TODO: test runner, gym_compat and EpisodeData class TestL2RPNNEURIPS2020_Track1(unittest.TestCase): @@ -28,11 +29,11 @@ def setUp(self) -> None: _ = self.env.reset() def test_elements(self): - assert self.env.n_sub == 36 - assert self.env.n_line == 59 - assert self.env.n_load == 37 - assert self.env.n_gen == 22 - assert self.env.n_storage == 0 + assert type(self.env).n_sub == 36 + assert type(self.env).n_line == 59 + assert type(self.env).n_load == 37 + assert type(self.env).n_gen == 22 + assert type(self.env).n_storage == 0 def test_opponent(self): assert issubclass(self.env._opponent_action_class, PowerlineSetAction) @@ -72,11 +73,11 @@ def setUp(self) -> None: _ = self.env.reset() def test_elements(self): - assert self.env.n_sub == 36 - assert self.env.n_line == 59 - assert self.env.n_load == 37 - assert self.env.n_gen == 22 - assert self.env.n_storage == 0 + assert type(self.env).n_sub == 36 + assert type(self.env).n_line == 59 + assert type(self.env).n_load == 37 + assert type(self.env).n_gen == 22 + assert type(self.env).n_storage == 0 def test_opponent(self): assert issubclass(self.env._opponent_action_class, PowerlineSetAction) @@ -121,11 +122,11 @@ def setUp(self) -> None: _ = self.env.reset() def test_elements(self): - assert self.env.n_sub == 118 - assert self.env.n_line == 186 - assert self.env.n_load == 99 - assert self.env.n_gen == 62 - assert self.env.n_storage == 0 + assert type(self.env).n_sub == 118 + assert type(self.env).n_line == 186 + assert type(self.env).n_load == 99 + assert type(self.env).n_gen == 62 + assert type(self.env).n_storage == 0 def test_opponent(self): assert issubclass(self.env._opponent_action_class, DontAct) @@ -165,11 +166,11 @@ def setUp(self) -> None: _ = self.env.reset() def test_elements(self): - assert self.env.n_sub == 14 - assert self.env.n_line == 20 - assert self.env.n_load == 11 - assert self.env.n_gen == 6 - assert self.env.n_storage == 0 + assert type(self.env).n_sub == 14 + assert type(self.env).n_line == 20 + assert type(self.env).n_load == 11 + assert type(self.env).n_gen == 6 + assert type(self.env).n_storage == 0 def test_opponent(self): assert issubclass(self.env._opponent_action_class, DontAct) @@ -209,11 +210,11 @@ def setUp(self) -> None: _ = self.env.reset() def test_elements(self): - assert self.env.n_sub == 14 - assert self.env.n_line == 20 - assert self.env.n_load == 11 - assert self.env.n_gen == 6 - assert self.env.n_storage == 0 + assert type(self.env).n_sub == 14 + assert type(self.env).n_line == 20 + assert type(self.env).n_load == 11 + assert type(self.env).n_gen == 6 + assert type(self.env).n_storage == 0 def test_opponent(self): assert issubclass(self.env._opponent_action_class, DontAct) @@ -253,11 +254,11 @@ def setUp(self) -> None: _ = self.env.reset() def test_elements(self): - assert self.env.n_sub == 14 - assert self.env.n_line == 20 - assert self.env.n_load == 11 - assert self.env.n_gen == 6 - assert self.env.n_storage == 2 + assert type(self.env).n_sub == 14 + assert type(self.env).n_line == 20 + assert type(self.env).n_load == 11 + assert type(self.env).n_gen == 6 + assert type(self.env).n_storage == 2 def test_opponent(self): assert issubclass(self.env._opponent_action_class, DontAct) @@ -288,5 +289,102 @@ def test_random_action(self): ) + +class TestL2RPNWCCI2022(unittest.TestCase): + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make("l2rpn_wcci_2022", test=True, _add_to_name=type(self).__name__) + _ = self.env.reset(seed=0) + + def test_elements(self): + assert type(self.env).n_sub == 118, f"{type(self.env).n_sub} vs 118" + assert type(self.env).n_line == 186, f"{type(self.env).n_line} vs 186" + assert type(self.env).n_load == 91, f"{type(self.env).n_load} vs 91" + assert type(self.env).n_gen == 62, f"{type(self.env).n_gen} vs 62" + assert type(self.env).n_storage == 7, f"{type(self.env).n_storage} vs 7" + + def test_opponent(self): + assert issubclass(self.env._opponent_action_class, PowerlineSetAction) + assert isinstance(self.env._opponent, GeometricOpponent) + assert self.env._opponent_action_space.n == type(self.env).n_line + + def test_action_space(self): + assert issubclass(self.env.action_space.subtype, PlayableAction) + assert self.env.action_space.n == 1567, ( + f"act space size is {self.env.action_space.n}, should be {1567}" + ) + + def test_observation_space(self): + assert issubclass(self.env.observation_space.subtype, CompleteObservation) + size_th = 4295 + assert self.env.observation_space.n == size_th, ( + f"obs space size is " + f"{self.env.observation_space.n}, " + f"should be {size_th}" + ) + + def test_random_action(self): + """test i can perform some step (random)""" + i = 0 + for i in range(10): + act = self.env.action_space.sample() + obs, reward, done, info = self.env.step(act) + if done: + break + assert i >= 1, ( + "could not perform the random action test because it games over first time step. " + "Please fix the test and try again" + ) + + +class TestL2RPNIDF2023(unittest.TestCase): + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make("l2rpn_idf_2023", test=True, _add_to_name=type(self).__name__) + _ = self.env.reset(seed=0) + + def test_elements(self): + assert type(self.env).n_sub == 118, f"{type(self.env).n_sub} vs 118" + assert type(self.env).n_line == 186, f"{type(self.env).n_line} vs 186" + assert type(self.env).n_load == 99, f"{type(self.env).n_load} vs 99" + assert type(self.env).n_gen == 62, f"{type(self.env).n_gen} vs 62" + assert type(self.env).n_storage == 7, f"{type(self.env).n_storage} vs 7" + + def test_opponent(self): + assert issubclass(self.env._opponent_action_class, PowerlineSetAction) + assert isinstance(self.env._opponent, GeometricOpponentMultiArea) + assert self.env._opponent_action_space.n == type(self.env).n_line + + def test_action_space(self): + assert issubclass(self.env.action_space.subtype, PlayableAction) + assert self.env.action_space.n == 1605, ( + f"act space size is {self.env.action_space.n}, should be {1605}" + ) + + def test_observation_space(self): + assert issubclass(self.env.observation_space.subtype, CompleteObservation) + size_th = 4460 + assert self.env.observation_space.n == size_th, ( + f"obs space size is " + f"{self.env.observation_space.n}, " + f"should be {size_th}" + ) + + def test_random_action(self): + """test i can perform some step (random)""" + i = 0 + for i in range(10): + act = self.env.action_space.sample() + obs, reward, done, info = self.env.step(act) + if done: + break + assert i >= 1, ( + "could not perform the random action test because it games over first time step. " + "Please fix the test and try again" + ) + + if __name__ == "__main__": unittest.main() diff --git a/grid2op/tests/test_attached_envs_compat.py b/grid2op/tests/test_attached_envs_compat.py index 65b170cd2..9b7904970 100644 --- a/grid2op/tests/test_attached_envs_compat.py +++ b/grid2op/tests/test_attached_envs_compat.py @@ -34,15 +34,15 @@ def setUp(self) -> None: self.env.seed(0) def test_elements(self): - assert self.env.n_sub == 36 - assert self.env.n_line == 59 - assert self.env.n_load == 37 - assert self.env.n_gen == 22 - assert self.env.n_storage == 0 + assert type(self.env).n_sub == 36 + assert type(self.env).n_line == 59 + assert type(self.env).n_load == 37 + assert type(self.env).n_gen == 22 + assert type(self.env).n_storage == 0 def test_opponent(self): assert issubclass(self.env._opponent_action_class, PowerlineSetAction) - assert self.env._opponent_action_space.n == self.env.n_line + assert self.env._opponent_action_space.n == type(self.env).n_line def test_action_space(self): assert issubclass(self.env.action_space.subtype, PlayableAction) @@ -79,11 +79,11 @@ def setUp(self) -> None: self.env.seed(0) def test_elements(self): - assert self.env.n_sub == 118 - assert self.env.n_line == 186 - assert self.env.n_load == 99 - assert self.env.n_gen == 62 - assert self.env.n_storage == 0 + assert type(self.env).n_sub == 118 + assert type(self.env).n_line == 186 + assert type(self.env).n_load == 99 + assert type(self.env).n_gen == 62 + assert type(self.env).n_storage == 0 def test_opponent(self): assert issubclass(self.env._opponent_action_class, DontAct) @@ -127,11 +127,11 @@ def setUp(self) -> None: self.env.seed(42) def test_elements(self): - assert self.env.n_sub == 14 - assert self.env.n_line == 20 - assert self.env.n_load == 11 - assert self.env.n_gen == 6 - assert self.env.n_storage == 0 + assert type(self.env).n_sub == 14 + assert type(self.env).n_line == 20 + assert type(self.env).n_load == 11 + assert type(self.env).n_gen == 6 + assert type(self.env).n_storage == 0 def test_opponent(self): assert issubclass(self.env._opponent_action_class, DontAct) @@ -172,11 +172,11 @@ def setUp(self) -> None: self.env.seed(0) def test_elements(self): - assert self.env.n_sub == 14 - assert self.env.n_line == 20 - assert self.env.n_load == 11 - assert self.env.n_gen == 6 - assert self.env.n_storage == 0 + assert type(self.env).n_sub == 14 + assert type(self.env).n_line == 20 + assert type(self.env).n_load == 11 + assert type(self.env).n_gen == 6 + assert type(self.env).n_storage == 0 def test_opponent(self): assert issubclass(self.env._opponent_action_class, DontAct) @@ -214,14 +214,14 @@ def setUp(self) -> None: _compat_glop_version=GridObjects.BEFORE_COMPAT_VERSION, _add_to_name=type(self).__name__+"test_attached_compat_4", ) - self.env.seed(0) + self.env.seed(3) # 0, 1 and 2 leads to "wrong action" (games over) def test_elements(self): - assert self.env.n_sub == 14 - assert self.env.n_line == 20 - assert self.env.n_load == 11 - assert self.env.n_gen == 6 - assert self.env.n_storage == 0 + assert type(self.env).n_sub == 14 + assert type(self.env).n_line == 20 + assert type(self.env).n_load == 11 + assert type(self.env).n_gen == 6 + assert type(self.env).n_storage == 0 def test_opponent(self): assert issubclass(self.env._opponent_action_class, DontAct) @@ -239,7 +239,9 @@ def test_same_env_as_no_storage(self): res = 0 with warnings.catch_warnings(): warnings.filterwarnings("ignore") - env = grid2op.make("educ_case14_redisp", test=True, _add_to_name=type(self).__name__+"test_same_env_as_no_storage") + env = grid2op.make("educ_case14_redisp", + test=True, + _add_to_name=type(self).__name__+"test_same_env_as_no_storage") for attr in self.env.observation_space.attr_list_vect: tmp = getattr(self.env.observation_space._template_obj, attr).shape tmp2 = getattr(env.observation_space._template_obj, attr).shape @@ -272,7 +274,6 @@ def test_random_action(self): act = self.env.action_space.sample() obs, reward, done, info = self.env.step(act) if done: - pdb.set_trace() break assert i >= 1, ( "could not perform the random action test because it games over first time step. " diff --git a/grid2op/tests/test_backend_shunt_deactivated.py b/grid2op/tests/test_backend_shunt_deactivated.py new file mode 100644 index 000000000..c9db09253 --- /dev/null +++ b/grid2op/tests/test_backend_shunt_deactivated.py @@ -0,0 +1,25 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import unittest + +from grid2op.Backend import PandaPowerBackend + +class PandaPowerNoShunt(PandaPowerBackend): + shunts_data_available = False + +from grid2op._create_test_suite import create_test_suite +from grid2op.tests.aaa_test_backend_interface import AAATestBackendAPI +class TestBackendAPI_PPNoShuntTester(AAATestBackendAPI, unittest.TestCase): + def make_backend(self, detailed_infos_for_cascading_failures=False): + return PandaPowerNoShunt(detailed_infos_for_cascading_failures=detailed_infos_for_cascading_failures) + + +# and run it with `python -m unittest gridcal_backend_tests.py` +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/grid2op/tests/test_basic_env_ls.py b/grid2op/tests/test_basic_env_ls.py new file mode 100644 index 000000000..1e1496ae1 --- /dev/null +++ b/grid2op/tests/test_basic_env_ls.py @@ -0,0 +1,353 @@ +# Copyright (c) 2019-2023, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import warnings +import unittest +import numpy as np +import tempfile +import os +import json +import packaging +from packaging import version + +import grid2op +from grid2op.Environment import Environment +from grid2op.Runner import Runner +from grid2op.gym_compat import (GymEnv, + BoxGymActSpace, + BoxGymObsSpace, + DiscreteActSpace, + MultiDiscreteActSpace) +from grid2op.Action import PlayableAction +from grid2op.Parameters import Parameters +from grid2op.Observation import CompleteObservation +from grid2op.Agent import RandomAgent +from grid2op.tests.helper_path_test import data_test_dir +from grid2op.Episode import EpisodeData + +try: + from lightsim2grid import LightSimBackend + LS_AVAIL = True +except ImportError: + LS_AVAIL = False + pass + + +class TestEnvironmentBasic(unittest.TestCase): + def setUp(self) -> None: + if not LS_AVAIL: + self.skipTest("lightsim not installed") + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make("l2rpn_case14_sandbox", + test=True, + _add_to_name=type(self).__name__, + backend=LightSimBackend()) + self.line_id = 3 + th_lim = self.env.get_thermal_limit() * 2. # avoid all problem in general + th_lim[self.line_id] /= 10. # make sure to get trouble in line 3 + self.env.set_thermal_limit(th_lim) + + TestEnvironmentBasic._init_env(self.env) + + @staticmethod + def _init_env(env): + env.set_id(0) + env.seed(0) + env.reset() + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def test_right_type(self): + assert isinstance(self.env, Environment) + + def test_ok(self): + act = self.env.action_space() + for i in range(10): + obs_in, reward, done, info = self.env.step(act) + if i < 2: # 2 : 2 full steps already + assert obs_in.timestep_overflow[self.line_id] == i + 1, f"error for step {i}: {obs_in.timestep_overflow[self.line_id]}" + else: + # cooldown applied for line 3: + # - it disconnect stuff in `self.env_in` + # - it does not affect anything in `self.env_out` + assert not obs_in.line_status[self.line_id] + + def test_reset(self): + # timestep_overflow should be 0 initially even if the flow is too high + obs = self.env.reset() + assert obs.timestep_overflow[self.line_id] == 0 + assert obs.rho[self.line_id] > 1. + + def test_can_make_2_envs(self): + env_name = "l2rpn_case14_sandbox" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(env_name, test=True, backend=LightSimBackend()) + + param = Parameters() + param.NO_OVERFLOW_DISCONNECTION = True + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env1 = grid2op.make("educ_case14_storage", + test=True, + action_class=PlayableAction, + param=param, + backend=LightSimBackend()) + + +class TestEnvironmentBasicCpy(TestEnvironmentBasic): + def setUp(self) -> None: + super().setUp() + init_int = self.env + self.env = self.env.copy() + init_int.close() + + +class TestBasicEnvironmentRunner(unittest.TestCase): + def setUp(self) -> None: + TestEnvironmentBasic.setUp(self) + self.max_iter = 10 + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def test_runner_can_make(self): + runner = Runner(**self.env.get_params_for_runner()) + env2 = runner.init_env() + assert isinstance(env2, Environment) + + def test_runner(self): + # create the runner + runner_in = Runner(**self.env.get_params_for_runner()) + res_in, *_ = runner_in.run(nb_episode=1, max_iter=self.max_iter, env_seeds=[0], episode_id=[0], add_detailed_output=True) + res_in2, *_ = runner_in.run(nb_episode=1, max_iter=self.max_iter, env_seeds=[0], episode_id=[0]) + # check correct results are obtained when agregated + assert res_in[3] == self.max_iter, f"{res_in[3]} vs {self.max_iter}" + assert res_in2[3] == self.max_iter, f"{res_in[3]} vs {self.max_iter}" + assert np.allclose(res_in[2], 645.4992065) + assert np.allclose(res_in2[2], 645.4992065) + + # check detailed results + ep_data_in = res_in[-1] + for i in range(self.max_iter + 1): + obs_in = ep_data_in.observations[i] + if i < 3: + assert obs_in.timestep_overflow[self.line_id] == i, f"error for step {i}: {obs_in.timestep_overflow[self.line_id]}" + else: + # cooldown applied for line 3: + # - it disconnect stuff in `self.env_in` + # - it does not affect anything in `self.env_out` + assert not obs_in.line_status[self.line_id], f"error for step {i}: line is not disconnected" + + def test_backward_compatibility(self): + # TODO copy paste from test_Runner + backward_comp_version = [ + "1.6.4", # minimum version for lightsim2grid + "1.6.5", + "1.7.0", + "1.7.1", + "1.7.2", + "1.8.1", + # "1.9.0", # this one is bugy I don"t know why + "1.9.1", + "1.9.2", + "1.9.3", + "1.9.4", + "1.9.5", + "1.9.6", + "1.9.7", + "1.9.8", + "1.10.0", + "1.10.1", + ] + # first check a normal run + curr_version = "test_version" + PATH_PREVIOUS_RUNNER = os.path.join(data_test_dir, "runner_data") + assert ( + "curtailment" in CompleteObservation.attr_list_vect + ), "error at the beginning" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with grid2op.make( + "rte_case5_example", test=True, + _add_to_name=type(self).__name__, + backend=LightSimBackend() + ) as env, tempfile.TemporaryDirectory() as path: + runner = Runner(**env.get_params_for_runner(), agentClass=RandomAgent) + runner.run( + nb_episode=2, + path_save=os.path.join(path, curr_version), + pbar=False, + max_iter=100, + env_seeds=[1, 0], + agent_seeds=[42, 69], + ) + # check that i can read this data generate for this runner + try: + self._aux_backward(path, curr_version, curr_version) + except Exception as exc_: + raise RuntimeError(f"error for {curr_version}") from exc_ + assert ( + "curtailment" in CompleteObservation.attr_list_vect + ), "error after the first runner" + + # check that it raises a warning if loaded on the compatibility version + grid2op_version = backward_comp_version[0] + with self.assertWarns(UserWarning, msg=f"error for {grid2op_version}"): + self._aux_backward( + PATH_PREVIOUS_RUNNER, f"res_agent_{grid2op_version}", grid2op_version + ) + + # now check the compat versions + for grid2op_version in backward_comp_version: + # check that i can read previous data stored from previous grid2Op version + # can be loaded properly + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + try: + self._aux_backward( + PATH_PREVIOUS_RUNNER, + f"res_agent_{grid2op_version}", + grid2op_version, + ) + except Exception as exc_: + raise RuntimeError(f"error for {grid2op_version}") from exc_ + assert "curtailment" in CompleteObservation.attr_list_vect, ( + f"error after the legacy version " f"{grid2op_version}" + ) + + def _aux_backward(self, base_path, g2op_version_txt, g2op_version): + # TODO copy paste from test_Runner + episode_studied = EpisodeData.list_episode( + os.path.join(base_path, g2op_version_txt) + ) + for base_path, episode_path in episode_studied: + assert "curtailment" in CompleteObservation.attr_list_vect, ( + f"error after the legacy version " f"{g2op_version}" + ) + this_episode = EpisodeData.from_disk(base_path, episode_path) + assert "curtailment" in CompleteObservation.attr_list_vect, ( + f"error after the legacy version " f"{g2op_version}" + ) + full_episode_path = os.path.join(base_path, episode_path) + with open( + os.path.join(full_episode_path, "episode_meta.json"), + "r", + encoding="utf-8", + ) as f: + meta_data = json.load(f) + nb_ts = int(meta_data["nb_timestep_played"]) + try: + assert len(this_episode.actions) == nb_ts, ( + f"wrong number of elements for actions for version " + f"{g2op_version_txt}: {len(this_episode.actions)} vs {nb_ts}" + ) + assert len(this_episode.observations) == nb_ts + 1, ( + f"wrong number of elements for observations " + f"for version {g2op_version_txt}: " + f"{len(this_episode.observations)} vs {nb_ts}" + ) + assert len(this_episode.env_actions) == nb_ts, ( + f"wrong number of elements for env_actions for " + f"version {g2op_version_txt}: " + f"{len(this_episode.env_actions)} vs {nb_ts}" + ) + except Exception as exc_: + raise exc_ + g2op_ver = "" + try: + g2op_ver = version.parse(g2op_version) + except packaging.version.InvalidVersion: + if g2op_version != "test_version": + g2op_ver = version.parse("0.0.1") + else: + g2op_ver = version.parse("1.4.1") + if g2op_ver <= version.parse("1.4.0"): + assert ( + EpisodeData.get_grid2op_version(full_episode_path) == "<=1.4.0" + ), "wrong grid2op version stored (grid2op version <= 1.4.0)" + elif g2op_version == "test_version": + assert ( + EpisodeData.get_grid2op_version(full_episode_path) + == grid2op.__version__ + ), "wrong grid2op version stored (test_version)" + else: + assert ( + EpisodeData.get_grid2op_version(full_episode_path) == g2op_version + ), "wrong grid2op version stored (>=1.5.0)" + +class TestBasicEnvironmentGym(unittest.TestCase): + def setUp(self) -> None: + TestEnvironmentBasic.setUp(self) + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def _aux_run_envs(self, act, env_gym): + for i in range(10): + obs_in, reward, done, truncated, info = env_gym.step(act) + if i < 2: # 2 : 2 full steps already + assert obs_in["timestep_overflow"][self.line_id] == i + 1, f"error for step {i}: {obs_in['timestep_overflow'][self.line_id]}" + else: + # cooldown applied for line 3: + # - it disconnect stuff in `self.env_in` + # - it does not affect anything in `self.env_out` + assert not obs_in["line_status"][self.line_id] + + def test_gym_with_step(self): + """test the step function also disconnects (or not) the lines""" + env_gym = GymEnv(self.env) + act = {} + self._aux_run_envs(act, env_gym) + env_gym.reset() + self._aux_run_envs(act, env_gym) + + def test_gym_normal(self): + """test I can create the gym env""" + env_gym = GymEnv(self.env) + env_gym.reset() + + def test_gym_box(self): + """test I can create the gym env with box ob space and act space""" + env_gym = GymEnv(self.env) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_gym.action_space = BoxGymActSpace(self.env.action_space) + env_gym.observation_space = BoxGymObsSpace(self.env.observation_space) + env_gym.reset() + + def test_gym_discrete(self): + """test I can create the gym env with discrete act space""" + env_gym = GymEnv(self.env) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_gym.action_space = DiscreteActSpace(self.env.action_space) + env_gym.reset() + act = 0 + self._aux_run_envs(act, env_gym) + + def test_gym_multidiscrete(self): + """test I can create the gym env with multi discrete act space""" + env_gym = GymEnv(self.env) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_gym.action_space = MultiDiscreteActSpace(self.env.action_space) + env_gym.reset() + act = env_gym.action_space.sample() + act[:] = 0 + self._aux_run_envs(act, env_gym) + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_chronics_npy.py b/grid2op/tests/test_chronics_npy.py index f1173a980..7bf98ee11 100644 --- a/grid2op/tests/test_chronics_npy.py +++ b/grid2op/tests/test_chronics_npy.py @@ -29,7 +29,9 @@ def setUp(self): self.env_name = "l2rpn_case14_sandbox" with warnings.catch_warnings(): warnings.filterwarnings("ignore") - self.env_ref = grid2op.make(self.env_name, test=True, _add_to_name=type(self).__name__) + self.env_ref = grid2op.make(self.env_name, + test=True, + _add_to_name=type(self).__name__) self.load_p = 1.0 * self.env_ref.chronics_handler.real_data.data.load_p self.load_q = 1.0 * self.env_ref.chronics_handler.real_data.data.load_q @@ -105,7 +107,7 @@ def test_proper_start_end_2(self): ), f"error at iteration {ts}" obs, *_ = env.step(env.action_space()) assert np.all(obs_ref.gen_p == obs.gen_p), f"error at iteration {ts}" - assert obs.max_step == END + assert obs.max_step == END - LAG, f"{obs.max_step} vs {END - LAG}" with self.assertRaises(Grid2OpException): env.step( env.action_space() diff --git a/grid2op/tests/test_compute_switch_pos.py b/grid2op/tests/test_compute_switch_pos.py new file mode 100644 index 000000000..68ecfa96c --- /dev/null +++ b/grid2op/tests/test_compute_switch_pos.py @@ -0,0 +1,181 @@ +# Copyright (c) 2023, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import unittest +import pandas as pd +import os +import numpy as np +import networkx as nx + +from grid2op.tests.helper_path_test import * +import grid2op + +from grid2op.Space import DetailedTopoDescription +from grid2op.Exceptions import ImpossibleTopology +import pdb + + +class TestComputeSwitchPos(unittest.TestCase): + # TODO detailed topo: not tested in case of shunt + def _aux_read_case(self, case_id): + path_data = os.path.join(PATH_DATA_TEST, "test_detailed_topo") + switches = pd.read_csv(os.path.join(path_data, f"test_topo_connections{case_id}.txt"), + sep=" ") + elements = pd.read_csv(os.path.join(path_data, f"test_topo_elements{case_id}.txt"), + sep=" ") + target_bus = pd.read_csv(os.path.join(path_data, f"test_topo_valid{case_id}.txt"), + sep=" ") + dtd = DetailedTopoDescription() + dtd._n_sub = 1 + all_nodes = np.unique(np.concatenate((switches["node1"].values, switches["node2"].values))) + nb_switch = switches.shape[0] + dtd.conn_node_name = np.array([None for _ in all_nodes], dtype=str) + dtd.conn_node_to_subid = np.zeros(len(all_nodes), dtype=int) + dtd.switches = np.zeros((nb_switch, 3), dtype=int) + dtd.switches[:, 0] = 0 + dtd.switches[:, 1] = switches["node1"].values + dtd.switches[:, 2] = switches["node2"].values + # fill the elements + # we do as if everything is a line here + dtd.load_to_conn_node_id = np.array([], dtype=int) + dtd.gen_to_conn_node_id = np.array([], dtype=int) + dtd.line_ex_to_conn_node_id = np.array([], dtype=int) + dtd.storage_to_conn_node_id = np.array([], dtype=int) + dtd.shunt_to_conn_node_id = np.array([], dtype=int) + # now fill the line part + mask_el = elements["element_id"] == "'el'" + dtd.line_or_to_conn_node_id = elements["node"].loc[mask_el].values + # assign the topo vect infoconn_node_to_shunt_id + dtd.conn_node_to_topovect_id = np.zeros(len(all_nodes), dtype=int) - 1 + dtd.conn_node_to_topovect_id[dtd.line_or_to_conn_node_id] = np.arange(dtd.line_or_to_conn_node_id.shape[0]) + dtd.conn_node_to_shunt_id = np.array([]) + + # fill the busbars + mask_el = elements["element_id"] == "'bbs'" + dtd.busbar_section_to_conn_node_id = elements["node"].loc[mask_el].values + dtd.busbar_section_to_subid = np.zeros(dtd.busbar_section_to_conn_node_id.shape[0], dtype=int) + dtd._from_ieee_grid = False + + # now get the results + small_df = target_bus.loc[np.isin(target_bus["node"], dtd.line_or_to_conn_node_id)] + results = np.zeros(dtd.line_or_to_conn_node_id.shape[0], dtype=int) -1 + for line_id in range(dtd.line_or_to_conn_node_id.shape[0]): + results[line_id] = small_df.loc[small_df["node"] == dtd.line_or_to_conn_node_id[line_id], "bus_id"].values[0] + results[results >= 0] += 1 # encoding starts at 0 for input data + + # specific because it's not checked + dtd._dim_topo = dtd.line_or_to_conn_node_id.shape[0] + dtd._n_shunt = 0 + dtd._n_sub = 1 + return dtd, results + + def setUp(self): + super().setUp() + + def _aux_test_switch_topo(self, dtd, results, switches, extra_str=""): + graph = nx.Graph() + graph.add_edges_from([(el[1], el[2], {"id": switch_id}) for switch_id, el in enumerate(dtd.switches) if switches[switch_id]]) + tmp = list(nx.connected_components(graph)) + expected_buses = np.unique(results[results != -1]) + assert len(tmp) == expected_buses.shape[0], f"found {len(tmp)} buses when asking for {np.unique(results).shape[0]}" + # check that element in results connected together are connected together + # and check that the elements that are not connected together are not + for el_1 in range(results.shape[0]): + th_bus_1 = results[el_1] + conn_bus_1 = dtd.line_or_to_conn_node_id[el_1] + conn_comp1 = np.array([conn_bus_1 in el for el in tmp]).nonzero()[0] + if th_bus_1 == -1: + assert conn_comp1.shape[0] == 0, f"Error for element {el_1}: it should be disconnected but does not appear to be" + continue + for el_2 in range(el_1 + 1, results.shape[0]): + th_bus_2 = results[el_2] + conn_bus_2 = dtd.line_or_to_conn_node_id[el_2] + conn_comp2 = np.array([conn_bus_2 in el for el in tmp]).nonzero()[0] + if th_bus_2 == -1: + assert conn_comp2.shape[0] == 0, f"Error for element {el_2}: it should be disconnected but does not appear to be" + elif th_bus_1 == th_bus_2: + # disconnected element should not be together + assert conn_comp1 == conn_comp2, f"Error for elements: {el_1} and {el_2}: they should be on the same bus but are not, {extra_str}" + else: + assert conn_comp1 != conn_comp2, f"Error for elements: {el_1} and {el_2}: they should NOT be on the same bus but they are, {extra_str}" + + def test_case1_standard(self): + """test I can compute this for the reference test case""" + dtd, results = self._aux_read_case("1") + dtd._aux_compute_busbars_sections() + switches = dtd.compute_switches_position(results) + self._aux_test_switch_topo(dtd, results, switches) + + def test_case1_all_samebus(self): + """test I can connect every element to the same bus, even if the said bus is not 1""" + dtd, results = self._aux_read_case("1") + dtd._aux_compute_busbars_sections() + for bus in range(dtd.busbar_section_to_subid.shape[0]): + results[:] = bus + 1 + switches = dtd.compute_switches_position(results) + self._aux_test_switch_topo(dtd, results, switches) + + def test_case1_impossible_toomuch_buses(self): + """test that when someone ask to connect something to a bus too high (too many buses) then it does not work""" + dtd, results = self._aux_read_case("1") + dtd._aux_compute_busbars_sections() + bus_id_too_high = dtd.busbar_section_to_subid.shape[0] + 1 + for el_id in range(len(results)): + els = np.array(list(dtd._conn_node_to_bbs_conn_node_id[dtd.line_or_to_conn_node_id[el_id]])) + results[el_id] = (dtd.busbar_section_to_conn_node_id == els[el_id % len(els)]).nonzero()[0][0] + 1 + # test that it works in general case with all possible buses + switches = dtd.compute_switches_position(results) + self._aux_test_switch_topo(dtd, results, switches) + + # now test that it breaks if the index of a bus it too high + for el_id in range(len(results)): + tmp = 1 * results + tmp[el_id] = bus_id_too_high + with self.assertRaises(ImpossibleTopology): + switches = dtd.compute_switches_position(tmp) + + def test_case1_impossible_connectivity(self): + """test for some more cases where it would be impossible (forced to connect busbar breaker + for some elements but not for others)""" + dtd, results = self._aux_read_case("1") + dtd._aux_compute_busbars_sections() + results[0] = 1 # to force busbar sec 0 + results[1] = 2 # to force busbar sec 1 + results[2] = 3 # to force busbar sec 3 + results[3] = 4 # to force busbar sec 4 + results[4] = 2 # is directly connected to busbar sec 1 or 3, in this first example I force it to 1 + + # now i force every element to a busbar to which it is directly connected + # so as to make sure it works + for el_id in range(4, len(results)): + els = np.array(list(dtd._conn_node_to_bbs_conn_node_id[dtd.line_or_to_conn_node_id[el_id]])) + results[el_id] = (dtd.busbar_section_to_conn_node_id == els[0]).nonzero()[0][0] + 1 + # should work + switches = dtd.compute_switches_position(results) + self._aux_test_switch_topo(dtd, results, switches) + + # here I force to connect bbs 1 or 3 to bbs 0 + # which contradicts the 4 other constraints above + results[4] = 1 + with self.assertRaises(ImpossibleTopology): + switches = dtd.compute_switches_position(results) + + def test_case1_with_disconnected_element(self): + dtd, results = self._aux_read_case("1") + dtd._aux_compute_busbars_sections() + # disconnect element one by one and check it works + for el_id in range(len(results)): + tmp = 1 * results + tmp[el_id] = -1 + switches = dtd.compute_switches_position(tmp) + self._aux_test_switch_topo(dtd, tmp, switches, f"when disconnecting element {el_id}") + + +if __name__ == "__main__": + unittest.main() + \ No newline at end of file diff --git a/grid2op/tests/test_detailed_topo.py b/grid2op/tests/test_detailed_topo.py index de5880d00..1f264fc61 100644 --- a/grid2op/tests/test_detailed_topo.py +++ b/grid2op/tests/test_detailed_topo.py @@ -12,75 +12,121 @@ import hashlib import grid2op +from grid2op.dtypes import dt_bool from grid2op.Action import BaseAction, CompleteAction from grid2op.Observation import BaseObservation from grid2op.Runner import Runner from grid2op.Backend import PandaPowerBackend -from grid2op.Space import DetailedTopoDescription +from grid2op.Space import AddDetailedTopoIEEE, DetailedTopoDescription from grid2op.Agent import BaseAgent - +from grid2op.Exceptions import AmbiguousAction import pdb -REF_HASH = '7d79e8debc7403dae95bd95a023d5627a8a760e34bb26e3adfd2b842446830d455b53aeb5d89276b0e431f9022dc1c73e77ff3ecb10df0f60aaaf65754bbdf87' +REF_HASH = 'e5ccf7cbe54cb567eec33bfd738052f81dc5ac9a1ea2cd391d98f95f804a1273d0efac3d4e00aed9a43abf6ce8bf3fc3487a6c870bd6386dd7a84c3fa8344d99' -def _aux_test_correct(detailed_topo_desc, dim_topo): - assert detailed_topo_desc is not None - assert detailed_topo_desc.load_to_busbar_id == [ - (1, 15), (2, 16), (3, 17), (4, 18), (5, 19), (8, 22), (9, 23), (10, 24), (11, 25), (12, 26), (13, 27) - ] - assert detailed_topo_desc.gen_to_busbar_id == [(1, 15), (2, 16), (5, 19), (5, 19), (7, 21), (0, 14)] +def _aux_test_correct(detailed_topo_desc : DetailedTopoDescription, gridobj, nb_bb_per_sub): + if nb_bb_per_sub == 2: + assert detailed_topo_desc is not None + assert (detailed_topo_desc.load_to_conn_node_id == np.array([28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38], dtype=np.int32)).all() + assert (detailed_topo_desc.gen_to_conn_node_id == np.array([39, 40, 41, 42, 43, 44], dtype=np.int32)).all() + assert (detailed_topo_desc.line_or_to_conn_node_id == np.array([45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64], dtype=np.int32)).all() + + # test the switches (but i don't want to copy this huge data here) + assert (detailed_topo_desc.switches.sum(axis=0) == np.array([1159, 17732, 8730])).all() + ref_1 = np.array([ 1, 6, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56, 61, + 66, 117, 91, 92, 120, 95, 96, 123, 99, 100, 126, 103, 104, + 129, 107, 108, 134, 117, 118, 137, 121, 122, 140, 125, 126, 143, + 129, 130, 146, 133, 134, 149, 137, 138, 139, 102, 103, 142, 106, + 107, 147, 116, 117, 149, 117, 118, 153, 124, 125, 148, 104, 105, + 150, 105, 106, 152, 106, 107, 155, 110, 111, 157, 111, 112, 159, + 112, 113, 162, 116, 117, 165, 120, 121, 169, 127, 128, 171, 128, + 129, 173, 129, 130, 178, 139, 140, 180, 140, 141, 183, 144, 145, + 187, 151, 152, 190, 155, 156, 183, 129, 130, 185, 130, 131, 188, + 134, 135, 192, 141, 142, 196, 148, 149, 191, 128, 129, 196, 138, + 139, 196, 133, 134, 199, 137, 138, 202, 141, 142, 203, 139, 140, + 206, 143, 144, 214, 162, 163, 217, 166, 167, 220, 170, 171, 219, + 162, 163, 225, 175, 176, 224, 167, 168, 228, 174, 175, 231, 178, + 179, 226, 158, 159, 230, 165, 166, 229, 157, 158, 233, 164, 165, + 234, 162, 163, 235, 160, 161, 239, 167, 168, 242, 171, 172]) + assert (detailed_topo_desc.switches.sum(axis=1) == ref_1).all() + hash_ = hashlib.blake2b((detailed_topo_desc.switches.tobytes())).hexdigest() + assert hash_ == REF_HASH, f"{hash_}" - # test the switches (but i don't want to copy this huge data here) - assert (detailed_topo_desc.switches.sum(axis=0) == np.array([712, 310, 902, 180])).all() - ref_1 = np.array([ 7, 8, 4, 5, 5, 6, 2, 3, 3, 4, 7, 8, 8, 9, 9, 10, 6, - 7, 4, 5, 5, 6, 11, 12, 9, 10, 6, 7, 13, 14, 22, 23, 23, 24, - 11, 12, 13, 14, 8, 9, 25, 26, 10, 11, 13, 14, 15, 16, 10, 11, 9, - 10, 10, 11, 16, 17, 17, 18, 18, 19, 27, 28, 8, 9, 28, 29, 26, 27, - 30, 31, 13, 14, 30, 31, 11, 12, 14, 15, 22, 23, 23, 24, 31, 32, 29, - 30, 14, 15, 16, 17, 25, 26, 24, 25, 18, 19, 22, 23, 27, 28, 20, 21, - 28, 29, 24, 25, 22, 23, 30, 31, 26, 27, 30, 31, 24, 25, 29, 30, 32, - 33]) - assert (detailed_topo_desc.switches.sum(axis=1) == ref_1).all() - assert hashlib.blake2b((detailed_topo_desc.switches.tobytes())).hexdigest() == REF_HASH, f"{hashlib.blake2b((detailed_topo_desc.switches.tobytes())).hexdigest()}" + assert detailed_topo_desc.switches.shape[0] == (nb_bb_per_sub + 1) * (gridobj.dim_topo + gridobj.n_shunt) + gridobj.n_sub * (nb_bb_per_sub * (nb_bb_per_sub - 1) // 2) - # siwtches to pos topo vect - ref_switches_pos_topo_vect = np.array([ 2, 2, 0, 0, 1, 1, 8, 8, 7, 7, 4, 4, 5, 5, 6, 6, 3, - 3, 12, 12, 11, 11, 10, 10, 9, 9, 18, 18, 15, 15, 16, 16, 17, 17, - 13, 13, 14, 14, 23, 23, 22, 22, 19, 19, 20, 20, 21, 21, 30, 30, 28, - 28, 29, 29, 24, 24, 25, 25, 26, 26, 27, 27, 31, 31, 33, 33, 32, 32, - 34, 34, 36, 36, 35, 35, 37, 37, 42, 42, 38, 38, 39, 39, 41, 41, 40, - 40, -1, -1, 45, 45, 44, 44, 43, 43, 48, 48, 46, 46, 47, 47, 51, 51, - 50, 50, 49, 49, 55, 55, 54, 54, 52, 52, 53, 53, 58, 58, 56, 56, 57, - 57], dtype=np.int32) - for i in range(-1, dim_topo): - assert np.sum(ref_switches_pos_topo_vect == i).sum() == 2, f"error for topo_vect_id = {i}" - assert np.all(detailed_topo_desc.switches_to_topovect_id == ref_switches_pos_topo_vect) + # test the names + cls = type(detailed_topo_desc) + dtd = detailed_topo_desc + n_bb_per_sub = nb_bb_per_sub + el_nm = "load" + nb_el = gridobj.n_load + prev_el = gridobj.n_sub * (nb_bb_per_sub * (nb_bb_per_sub - 1) // 2) + for el_nm, nb_el in zip(["load", "gen", "line_or", "line_ex", "storage", "shunt"], + [gridobj.n_load, gridobj.n_gen, gridobj.n_line, gridobj.n_line, gridobj.n_storage, gridobj.n_shunt]): + next_el = prev_el + nb_el * (1 + n_bb_per_sub) + for i, el in enumerate(dtd.conn_node_name[dtd.switches[prev_el : next_el : (1 + n_bb_per_sub), cls.CONN_NODE_1_ID_COL]]): + assert f"conn_node_{el_nm}_{i}" in el, f"error for what should be the switch connecting conn node to {el_nm} {i} to its conn node breaker" + for i, el in enumerate(dtd.conn_node_name[dtd.switches[prev_el : next_el : (1 + n_bb_per_sub), cls.CONN_NODE_2_ID_COL]]): + assert f"conn_node_breaker_{el_nm}_{i}" in el, f"error for what should be the switch connecting conn node to {el_nm} {i} to its conn node breaker" + + for bb_i in range(1, n_bb_per_sub + 1): + assert (dtd.conn_node_name[dtd.switches[prev_el : next_el : (1 + n_bb_per_sub), cls.CONN_NODE_2_ID_COL]] == + dtd.conn_node_name[dtd.switches[(prev_el + bb_i) : next_el : (1 + n_bb_per_sub), cls.CONN_NODE_1_ID_COL]]).all(), ( + f"Error for what should connect a {el_nm} breaker connection node to busbar {bb_i}") + + for i, el in enumerate(dtd.conn_node_name[dtd.switches[(prev_el + bb_i) : next_el : (1 + n_bb_per_sub), cls.CONN_NODE_2_ID_COL]]): + assert f"busbar_{bb_i-1}" in el, f"error for what should be the switch connecting conn node {el_nm} {i} (its breaker) to busbar {bb_i}" + prev_el = next_el + + # siwtches to pos topo vect + # TODO detailed topo + # ref_switches_pos_topo_vect = np.array([ 2, 2, 0, 0, 1, 1, 8, 8, 7, 7, 4, 4, 5, 5, 6, 6, 3, + # 3, 12, 12, 11, 11, 10, 10, 9, 9, 18, 18, 15, 15, 16, 16, 17, 17, + # 13, 13, 14, 14, 23, 23, 22, 22, 19, 19, 20, 20, 21, 21, 30, 30, 28, + # 28, 29, 29, 24, 24, 25, 25, 26, 26, 27, 27, 31, 31, 33, 33, 32, 32, + # 34, 34, 36, 36, 35, 35, 37, 37, 42, 42, 38, 38, 39, 39, 41, 41, 40, + # 40, -1, -1, 45, 45, 44, 44, 43, 43, 48, 48, 46, 46, 47, 47, 51, 51, + # 50, 50, 49, 49, 55, 55, 54, 54, 52, 52, 53, 53, 58, 58, 56, 56, 57, + # 57], dtype=np.int32) + # for i in range(-1, dim_topo): + # assert np.sum(ref_switches_pos_topo_vect == i).sum() == 2, f"error for topo_vect_id = {i}" + # assert np.all(detailed_topo_desc.switches_to_topovect_id == ref_switches_pos_topo_vect) -class _PPBkForTestDetTopo(PandaPowerBackend): - def load_grid(self, path=None, filename=None): - super().load_grid(path, filename) - self.detailed_topo_desc = DetailedTopoDescription.from_init_grid(self) + +class _PPBkForTestDetTopo(AddDetailedTopoIEEE, PandaPowerBackend): + pass class TestDTDAgent(BaseAgent): def act(self, observation: BaseObservation, reward: float, done: bool = False) -> BaseAction: - _aux_test_correct(type(observation).detailed_topo_desc, type(observation).dim_topo) + _aux_test_correct(type(observation).detailed_topo_desc, type(observation), type(observation).n_busbar_per_sub) return super().act(observation, reward, done) class DetailedTopoTester(unittest.TestCase): + def _aux_n_bb_per_sub(self): + return 2 + def setUp(self) -> None: + n_bb_per_sub = self._aux_n_bb_per_sub() with warnings.catch_warnings(): warnings.filterwarnings("ignore") self.env = grid2op.make( - "educ_case14_storage", - test=True, - backend=_PPBkForTestDetTopo(), - action_class=CompleteAction, - _add_to_name="DetailedTopoTester", - ) - return super().setUp() + "educ_case14_storage", + n_busbar=n_bb_per_sub, + test=True, + backend=_PPBkForTestDetTopo(), + action_class=CompleteAction, + _add_to_name=f"{type(self).__name__}_{n_bb_per_sub}", + ) + if isinstance(self, DetailedTopoTester): + # weird hack I am doing: I reuse the same method + # from another class + # to initialize the same env, but by doing so, I cannot + # "super" + return super().setUp() def tearDown(self) -> None: self.env.close() @@ -88,13 +134,13 @@ def tearDown(self) -> None: def test_init_ok(self): obs = self.env.reset() - _aux_test_correct(type(obs).detailed_topo_desc, type(obs).dim_topo) + _aux_test_correct(type(obs).detailed_topo_desc, type(obs), self._aux_n_bb_per_sub()) def test_work_simulate(self): obs = self.env.reset() - _aux_test_correct(type(obs).detailed_topo_desc, type(obs).dim_topo) + _aux_test_correct(type(obs).detailed_topo_desc, type(obs), self._aux_n_bb_per_sub()) sim_o, *_ = obs.simulate(self.env.action_space()) - _aux_test_correct(type(sim_o).detailed_topo_desc, type(sim_o).dim_topo) + _aux_test_correct(type(sim_o).detailed_topo_desc, type(sim_o), self._aux_n_bb_per_sub()) def test_runner_seq(self): obs = self.env.reset() @@ -112,174 +158,597 @@ def test_env_cpy(self): obs = self.env.reset() env_cpy = self.env.copy() obs_cpy = env_cpy.reset() - _aux_test_correct(type(obs_cpy).detailed_topo_desc, type(obs_cpy).dim_topo) + _aux_test_correct(type(obs_cpy).detailed_topo_desc, type(obs_cpy), self._aux_n_bb_per_sub()) - def test_get_loads_bus_switches(self): - """test I can acess the loads and also that the results is correctly computed by _backendaction._aux_get_bus_detailed_topo""" - obs = self.env.reset() - bk_act = self.env._backend_action - # nothing modified - loads_switches = bk_act.get_loads_bus_switches() - assert loads_switches == [] - - # I modified the position of a load - bk_act += self.env.action_space({"set_bus": {"loads_id": [(1, 2)]}}) - loads_switches = bk_act.get_loads_bus_switches() - assert loads_switches == [(1, (False, True))] # modified load 1, first switch is opened (False) second one is closed (True) - - # I modified the position of a load - bk_act += self.env.action_space({"set_bus": {"loads_id": [(1, 1)]}}) - loads_switches = bk_act.get_loads_bus_switches() - assert loads_switches == [(1, (True, False))] # modified load 1, first switch is closed (True) second one is opened (False) - - # I disconnect a load - bk_act += self.env.action_space({"set_bus": {"loads_id": [(1, -1)]}}) - loads_switches = bk_act.get_loads_bus_switches() - assert loads_switches == [(1, (False, False))] # modified load 1, first switch is closed (False) second one is opened (False) - - def test_get_xxx_bus_switches(self): - """test I can retrieve the switch of all the element types""" - - # generators - obs = self.env.reset() - bk_act = self.env._backend_action - els_switches = bk_act.get_gens_bus_switches() - assert els_switches == [] - bk_act += self.env.action_space({"set_bus": {"generators_id": [(1, 1)]}}) - els_switches = bk_act.get_gens_bus_switches() - assert els_switches == [(1, (True, False))] # modified gen 1, first switch is closed (True) second one is opened (False) - - # line or - obs = self.env.reset() - bk_act = self.env._backend_action - els_switches = bk_act.get_lines_or_bus_switches() - assert els_switches == [] - bk_act += self.env.action_space({"set_bus": {"lines_or_id": [(1, 1)]}}) - els_switches = bk_act.get_lines_or_bus_switches() - assert els_switches == [(1, (True, False))] # modified line or 1, first switch is closed (True) second one is opened (False) - - # line ex - obs = self.env.reset() - bk_act = self.env._backend_action - els_switches = bk_act.get_lines_ex_bus_switches() - assert els_switches == [] - bk_act += self.env.action_space({"set_bus": {"lines_ex_id": [(1, 1)]}}) - els_switches = bk_act.get_lines_ex_bus_switches() - assert els_switches == [(1, (True, False))] # modified line ex 1, first switch is closed (True) second one is opened (False) - - # storage - obs = self.env.reset() - bk_act = self.env._backend_action - els_switches = bk_act.get_storages_bus_switches() - assert els_switches == [] - bk_act += self.env.action_space({"set_bus": {"storages_id": [(1, 1)]}}) - els_switches = bk_act.get_storages_bus_switches() - assert els_switches == [(1, (True, False))] # modified storage 1, first switch is closed (True) second one is opened (False) - - # shunt - obs = self.env.reset() - bk_act = self.env._backend_action - els_switches = bk_act.get_shunts_bus_switches() - assert els_switches == [] - bk_act += self.env.action_space({"shunt": {"set_bus": [(0, 1)]}}) - els_switches = bk_act.get_shunts_bus_switches() - assert els_switches == [(0, (True, False))] # modified shunt 0, first switch is closed (True) second one is opened (False) - def test_compute_switches_position(self): + nb_busbar = self._aux_n_bb_per_sub() + start_id = (nb_busbar * (nb_busbar - 1) // 2) * type(self.env).n_sub + obs = self.env.reset() - busbar_connectors_state, switches_state = type(obs).detailed_topo_desc.compute_switches_position(obs.topo_vect, obs._shunt_bus) - assert np.sum(switches_state) == 60 - assert switches_state[::2].all() # all on bus 1 - assert (~switches_state[1::2]).all() # nothing on busbar 2 + dtd = type(obs).detailed_topo_desc + switches_state = dtd.compute_switches_position(obs.topo_vect, obs._shunt_bus) + assert switches_state.sum() == 120 + assert switches_state[start_id::(nb_busbar + 1)].all() # all connected + assert switches_state[(start_id + 1)::(nb_busbar + 1)].all() # all on bus 1 + assert (~switches_state[(start_id + 2)::(nb_busbar + 1)]).all() # nothing on busbar 2 # move everything to bus 2 - busbar_connectors_state, switches_state = type(obs).detailed_topo_desc.compute_switches_position(np.full(obs.topo_vect.shape, fill_value=2), - np.full(obs._shunt_bus.shape, fill_value=2)) - assert np.sum(switches_state) == 60 - assert switches_state[1::2].all() - assert (~switches_state[::2]).all() + switches_state = dtd.compute_switches_position(np.full(obs.topo_vect.shape, fill_value=2), + np.full(obs._shunt_bus.shape, fill_value=2)) + assert switches_state.sum() == 120 + assert switches_state[start_id::(nb_busbar + 1)].all() # all connected + assert switches_state[(start_id + 2)::(nb_busbar + 1)].all() # all on busbar 2 + assert (~switches_state[(start_id + 1)::(nb_busbar + 1)]).all() # nothing on busbar 1 - # now check some disconnected elements (line id 0) + # now check some disconnected elements (*eg* line id 0) topo_vect = 1 * obs.topo_vect topo_vect[type(obs).line_or_pos_topo_vect[0]] = -1 topo_vect[type(obs).line_ex_pos_topo_vect[0]] = -1 - busbar_connectors_state, switches_state = type(obs).detailed_topo_desc.compute_switches_position(topo_vect, obs._shunt_bus) - assert np.sum(switches_state) == 58 - assert switches_state[::2].sum() == 58 - assert switches_state[1::2].sum() == 0 - assert (~switches_state[type(obs).detailed_topo_desc.switches_to_topovect_id == type(obs).line_or_pos_topo_vect[0]]).all() - assert (~switches_state[type(obs).detailed_topo_desc.switches_to_topovect_id == type(obs).line_ex_pos_topo_vect[0]]).all() + switches_state = dtd.compute_switches_position(topo_vect, obs._shunt_bus) + # quickly check other elements + assert switches_state.sum() == 116 + assert switches_state[start_id::(nb_busbar + 1)].sum() == 58 + assert switches_state[(start_id + 1)::(nb_busbar + 1)].sum() == 58 # busbar 1 + assert switches_state[(start_id + 2)::(nb_busbar + 1)].sum() == 0 # busbar 2 + id_switch_or = dtd.get_switch_id_ieee(dtd.line_or_to_conn_node_id[0]) + id_switch_ex = dtd.get_switch_id_ieee(dtd.line_ex_to_conn_node_id[0]) + assert (~switches_state[id_switch_or:(id_switch_or + nb_busbar + 1)]).all() + assert (~switches_state[id_switch_ex:(id_switch_ex + nb_busbar + 1)]).all() # and now elements per elements # load 3 to bus 2 topo_vect = 1 * obs.topo_vect topo_vect[type(obs).load_pos_topo_vect[3]] = 2 - busbar_connectors_state, switches_state = type(obs).detailed_topo_desc.compute_switches_position(topo_vect, obs._shunt_bus) - assert np.sum(switches_state) == 60 - assert switches_state[::2].sum() == 59 - assert switches_state[1::2].sum() == 1 - assert not switches_state[type(obs).detailed_topo_desc.switches_to_topovect_id == type(obs).load_pos_topo_vect[3]][0] - assert switches_state[type(obs).detailed_topo_desc.switches_to_topovect_id == type(obs).load_pos_topo_vect[3]][1] + switches_state = dtd.compute_switches_position(topo_vect, obs._shunt_bus) + assert switches_state.sum() == 120 + assert switches_state[start_id::(nb_busbar + 1)].sum() == 60 + assert switches_state[(start_id + 1)::(nb_busbar + 1)].sum() == 59 # busbar 1 + assert switches_state[(start_id + 2)::(nb_busbar + 1)].sum() == 1 # busbar 2 + id_switch = dtd.get_switch_id_ieee(dtd.load_to_conn_node_id[3]) + assert switches_state[id_switch:(id_switch + nb_busbar + 1)].sum() == 2 # only 2 switches closed + assert switches_state[id_switch + 2] # busbar 2 # gen 1 to bus 2 topo_vect = 1 * obs.topo_vect topo_vect[type(obs).gen_pos_topo_vect[1]] = 2 - busbar_connectors_state, switches_state = type(obs).detailed_topo_desc.compute_switches_position(topo_vect, obs._shunt_bus) - assert np.sum(switches_state) == 60 - assert switches_state[::2].sum() == 59 - assert switches_state[1::2].sum() == 1 - assert not switches_state[type(obs).detailed_topo_desc.switches_to_topovect_id == type(obs).gen_pos_topo_vect[1]][0] - assert switches_state[type(obs).detailed_topo_desc.switches_to_topovect_id == type(obs).gen_pos_topo_vect[1]][1] + switches_state = dtd.compute_switches_position(topo_vect, obs._shunt_bus) + assert switches_state.sum() == 120 + assert switches_state[start_id::(nb_busbar + 1)].sum() == 60 + assert switches_state[(start_id + 1)::(nb_busbar + 1)].sum() == 59 # busbar 1 + assert switches_state[(start_id + 2)::(nb_busbar + 1)].sum() == 1 # busbar 2 + id_switch = dtd.get_switch_id_ieee(dtd.gen_to_conn_node_id[1]) + assert switches_state[id_switch:(id_switch + nb_busbar + 1)].sum() == 2 # only 2 switches closed + assert switches_state[id_switch + 2] # busbar 2 # line or 6 to bus 2 topo_vect = 1 * obs.topo_vect el_id = 6 topo_vect[type(obs).line_or_pos_topo_vect[el_id]] = 2 - busbar_connectors_state, switches_state = type(obs).detailed_topo_desc.compute_switches_position(topo_vect, obs._shunt_bus) - assert np.sum(switches_state) == 60 - assert switches_state[::2].sum() == 59 - assert switches_state[1::2].sum() == 1 - assert not switches_state[type(obs).detailed_topo_desc.switches_to_topovect_id == type(obs).line_or_pos_topo_vect[el_id]][0] - assert switches_state[type(obs).detailed_topo_desc.switches_to_topovect_id == type(obs).line_or_pos_topo_vect[el_id]][1] + switches_state = dtd.compute_switches_position(topo_vect, obs._shunt_bus) + assert switches_state.sum() == 120 + assert switches_state[start_id::(nb_busbar + 1)].sum() == 60 + assert switches_state[(start_id + 1)::(nb_busbar + 1)].sum() == 59 # busbar 1 + assert switches_state[(start_id + 2)::(nb_busbar + 1)].sum() == 1 # busbar 2 + id_switch = dtd.get_switch_id_ieee(dtd.line_or_to_conn_node_id[el_id]) + assert switches_state[id_switch:(id_switch + nb_busbar + 1)].sum() == 2 # only 2 switches closed + assert switches_state[id_switch + 2] # busbar 2 # line ex 9 to bus 2 topo_vect = 1 * obs.topo_vect el_id = 9 topo_vect[type(obs).line_ex_pos_topo_vect[el_id]] = 2 - busbar_connectors_state, switches_state = type(obs).detailed_topo_desc.compute_switches_position(topo_vect, obs._shunt_bus) - assert np.sum(switches_state) == 60 - assert switches_state[::2].sum() == 59 - assert switches_state[1::2].sum() == 1 - assert not switches_state[type(obs).detailed_topo_desc.switches_to_topovect_id == type(obs).line_ex_pos_topo_vect[el_id]][0] - assert switches_state[type(obs).detailed_topo_desc.switches_to_topovect_id == type(obs).line_ex_pos_topo_vect[el_id]][1] + switches_state = dtd.compute_switches_position(topo_vect, obs._shunt_bus) + assert switches_state.sum() == 120 + assert switches_state[start_id::(nb_busbar + 1)].sum() == 60 + assert switches_state[(start_id + 1)::(nb_busbar + 1)].sum() == 59 # busbar 1 + assert switches_state[(start_id + 2)::(nb_busbar + 1)].sum() == 1 # busbar 2 + id_switch = dtd.get_switch_id_ieee(dtd.line_ex_to_conn_node_id[el_id]) + assert switches_state[id_switch:(id_switch + nb_busbar + 1)].sum() == 2 # only 2 switches closed + assert switches_state[id_switch + 2] # busbar 2 # storage 0 to bus 2 topo_vect = 1 * obs.topo_vect el_id = 0 topo_vect[type(obs).storage_pos_topo_vect[el_id]] = 2 - busbar_connectors_state, switches_state = type(obs).detailed_topo_desc.compute_switches_position(topo_vect, obs._shunt_bus) - assert np.sum(switches_state) == 60 - assert switches_state[::2].sum() == 59 - assert switches_state[1::2].sum() == 1 - assert not switches_state[type(obs).detailed_topo_desc.switches_to_topovect_id == type(obs).storage_pos_topo_vect[el_id]][0] - assert switches_state[type(obs).detailed_topo_desc.switches_to_topovect_id == type(obs).storage_pos_topo_vect[el_id]][1] + switches_state = dtd.compute_switches_position(topo_vect, obs._shunt_bus) + assert switches_state.sum() == 120 + assert switches_state[start_id::(nb_busbar + 1)].sum() == 60 + assert switches_state[(start_id + 1)::(nb_busbar + 1)].sum() == 59 # busbar 1 + assert switches_state[(start_id + 2)::(nb_busbar + 1)].sum() == 1 # busbar 2 + id_switch = dtd.get_switch_id_ieee(dtd.storage_to_conn_node_id[el_id]) + assert switches_state[id_switch:(id_switch + nb_busbar + 1)].sum() == 2 # only 2 switches closed + assert switches_state[id_switch + 2] # busbar 2 # shunt 0 to bus 2 shunt_bus = 1 * obs._shunt_bus el_id = 0 shunt_bus[el_id] = 2 - busbar_connectors_state, switches_state = type(obs).detailed_topo_desc.compute_switches_position(obs.topo_vect, shunt_bus) - assert np.sum(switches_state) == 60 - assert switches_state[::2].sum() == 59 - assert switches_state[1::2].sum() == 1 - assert not switches_state[type(obs).detailed_topo_desc.switches_to_shunt_id == el_id][0] - assert switches_state[type(obs).detailed_topo_desc.switches_to_shunt_id == el_id][1] + switches_state = dtd.compute_switches_position(obs.topo_vect, shunt_bus) + assert switches_state.sum() == 120 + assert switches_state[start_id::(nb_busbar + 1)].sum() == 60 + assert switches_state[(start_id + 1)::(nb_busbar + 1)].sum() == 59 # busbar 1 + assert switches_state[(start_id + 2)::(nb_busbar + 1)].sum() == 1 # busbar 2 + id_switch = dtd.get_switch_id_ieee(dtd.shunt_to_conn_node_id[el_id]) + assert switches_state[id_switch:(id_switch + nb_busbar + 1)].sum() == 2 # only 2 switches closed + assert switches_state[id_switch + 2] # busbar 2 + + def test_get_all_switches(self): + """test I can use bkact.get_all_switches""" + nb_busbar = self._aux_n_bb_per_sub() + start_id = (nb_busbar * (nb_busbar - 1) // 2) * type(self.env).n_sub - # TODO detailed topo + obs = self.env.reset() + bk_act = self.env._backend_action + dtd = type(obs).detailed_topo_desc + + # nothing modified + switches_state = bk_act.get_all_switches() + assert switches_state.sum() == 120 + assert switches_state[start_id::(nb_busbar + 1)].all() # all connected + assert switches_state[(start_id + 1)::(nb_busbar + 1)].all() # all on bus 1 + assert (~switches_state[(start_id + 2)::(nb_busbar + 1)]).all() # nothing on busbar 2 + + # I modified the position of a "regular" element load 1 for the sake of the example + bk_act += self.env.action_space({"set_bus": {"loads_id": [(1, 2)]}}) + switches_state = bk_act.get_all_switches() + assert switches_state.sum() == 120 + assert switches_state[start_id::(nb_busbar + 1)].sum() == 60 + assert switches_state[(start_id + 1)::(nb_busbar + 1)].sum() == 59 # busbar 1 + assert switches_state[(start_id + 2)::(nb_busbar + 1)].sum() == 1 # busbar 2 + id_switch = dtd.get_switch_id_ieee(dtd.load_to_conn_node_id[1]) + assert switches_state[id_switch:(id_switch + nb_busbar + 1)].sum() == 2 # only 2 switches closed + assert switches_state[id_switch + 2] # busbar 2 + + # I disconnect it + bk_act += self.env.action_space({"set_bus": {"loads_id": [(1, -1)]}}) + switches_state = bk_act.get_all_switches() + assert switches_state.sum() == 118 + assert switches_state[start_id::(nb_busbar + 1)].sum() == 59 + assert switches_state[(start_id + 1)::(nb_busbar + 1)].sum() == 59 # busbar 1 + assert switches_state[(start_id + 2)::(nb_busbar + 1)].sum() == 0 # busbar 2 + id_switch = dtd.get_switch_id_ieee(dtd.load_to_conn_node_id[1]) + assert switches_state[id_switch:(id_switch + nb_busbar + 1)].sum() == 0 # only 2 switches closed + bk_act += self.env.action_space({"set_bus": {"loads_id": [(1, 1)]}}) + + # I modify the position of a shunt (a bit special) + bk_act += self.env.action_space({"shunt": {"set_bus": [(0, 2)]}}) + switches_state = bk_act.get_all_switches() + assert switches_state.sum() == 120 + assert switches_state[start_id::(nb_busbar + 1)].sum() == 60 + assert switches_state[(start_id + 1)::(nb_busbar + 1)].sum() == 59 # busbar 1 + assert switches_state[(start_id + 2)::(nb_busbar + 1)].sum() == 1 # busbar 2 + id_switch = dtd.get_switch_id_ieee(dtd.shunt_to_conn_node_id[0]) + assert switches_state[id_switch:(id_switch + nb_busbar + 1)].sum() == 2 # only 2 switches closed + assert switches_state[id_switch + 2] # busbar 2 + + # I disconnect it + bk_act += self.env.action_space({"shunt": {"set_bus": [(0, -1)]}}) + switches_state = bk_act.get_all_switches() + assert switches_state.sum() == 118 + assert switches_state[start_id::(nb_busbar + 1)].sum() == 59 + assert switches_state[(start_id + 1)::(nb_busbar + 1)].sum() == 59 # busbar 1 + assert switches_state[(start_id + 2)::(nb_busbar + 1)].sum() == 0 # busbar 2 + id_switch = dtd.get_switch_id_ieee(dtd.shunt_to_conn_node_id[0]) + assert switches_state[id_switch:(id_switch + nb_busbar + 1)].sum() == 0 # only 2 switches closed + + # set back it back to its original position + bk_act += self.env.action_space({"shunt": {"set_bus": [(0, 1)]}}) + switches_state = bk_act.get_all_switches() + assert switches_state.sum() == 120 + assert switches_state[start_id::(nb_busbar + 1)].sum() == 60 + assert switches_state[(start_id + 1)::(nb_busbar + 1)].sum() == 60 # busbar 1 + assert switches_state[(start_id + 2)::(nb_busbar + 1)].sum() == 0 # busbar 2 + id_switch = dtd.get_switch_id_ieee(dtd.shunt_to_conn_node_id[0]) + assert switches_state[id_switch:(id_switch + nb_busbar + 1)].sum() == 2 # only 2 switches closed + assert switches_state[id_switch + 1] # busbar 1 + + # then I disconnect a powerline (check that both ends are disconnected) + bk_act += self.env.action_space({"set_bus": {"lines_or_id": [(3, -1)]}}) + switches_state = bk_act.get_all_switches() + assert switches_state.sum() == 116 + assert switches_state[start_id::(nb_busbar + 1)].sum() == 58 + assert switches_state[(start_id + 1)::(nb_busbar + 1)].sum() == 58 # busbar 1 + assert switches_state[(start_id + 2)::(nb_busbar + 1)].sum() == 0 # busbar 2 + id_switch_or = dtd.get_switch_id_ieee(dtd.line_or_to_conn_node_id[3]) + id_switch_ex = dtd.get_switch_id_ieee(dtd.line_ex_to_conn_node_id[3]) + assert (~switches_state[id_switch_or:(id_switch_or + nb_busbar + 1)]).all() + assert (~switches_state[id_switch_ex:(id_switch_ex + nb_busbar + 1)]).all() + + def test_from_switches_position_basic(self): + nb_busbar = self._aux_n_bb_per_sub() + start_id = (nb_busbar * (nb_busbar - 1) // 2) * type(self.env).n_sub + dtd = type(self.env).detailed_topo_desc + + # all connected + switches_state = np.ones(dtd.switches.shape[0], dtype=dt_bool) + topo_vect, shunt_bus = dtd.from_switches_position(switches_state) + assert (topo_vect == 1).all() + assert (shunt_bus == 1).all() + + # connect all to bus 1 + switches_state = np.ones(dtd.switches.shape[0], dtype=dt_bool) + switches_state[:start_id] = False + for bb_id in range(1, nb_busbar + 1): + if bb_id == 1: + # busbar 1 + switches_state[(start_id + bb_id)::(nb_busbar + 1)] = True + else: + # busbar 2 or more + switches_state[(start_id + bb_id)::(nb_busbar + 1)] = False + topo_vect, shunt_bus = dtd.from_switches_position(switches_state) + assert (topo_vect == 1).all() + assert (shunt_bus == 1).all() + + # connect all to bus 2 + switches_state = np.ones(dtd.switches.shape[0], dtype=dt_bool) + switches_state[:start_id] = False + for bb_id in range(1, nb_busbar + 1): + if bb_id == 2: + # busbar 2 + switches_state[(start_id + bb_id)::(nb_busbar + 1)] = True + else: + # other busbars + switches_state[(start_id + bb_id)::(nb_busbar + 1)] = False + topo_vect, shunt_bus = dtd.from_switches_position(switches_state) + assert (topo_vect == 2).all() + assert (shunt_bus == 2).all() + + # connect all el to busbar 2, but connect all busbar together + switches_state = np.ones(dtd.switches.shape[0], dtype=dt_bool) + switches_state[:start_id] = True # connect all busbars together + for bb_id in range(1, nb_busbar + 1): + if bb_id == 2: + # busbar 2 + switches_state[(start_id + bb_id)::(nb_busbar + 1)] = True + else: + # other busbars + switches_state[(start_id + bb_id)::(nb_busbar + 1)] = False + topo_vect, shunt_bus = dtd.from_switches_position(switches_state) + assert (topo_vect == 1).all() + assert (shunt_bus == 1).all() + + # connect all el to busbar 1, but disconnect the element with their breaker + switches_state = np.ones(dtd.switches.shape[0], dtype=dt_bool) + switches_state[:start_id] = True # connect all busbars together + switches_state[(start_id)::(nb_busbar + 1)] = False # breaker + for bb_id in range(1, nb_busbar + 1): + if bb_id == 2: + # busbar 2 + switches_state[(start_id + bb_id)::(nb_busbar + 1)] = True + else: + # other busbars + switches_state[(start_id + bb_id)::(nb_busbar + 1)] = False + topo_vect, shunt_bus = dtd.from_switches_position(switches_state) + assert (topo_vect == -1).all() + assert (shunt_bus == -1).all() + + def test_from_switches_position_more_advanced(self): + nb_busbar = self._aux_n_bb_per_sub() + start_id = (nb_busbar * (nb_busbar - 1) // 2) * type(self.env).n_sub + dtd = type(self.env).detailed_topo_desc + + # if you change the env it will change... + sub_id = 1 + mask_el_this = type(self.env).grid_objects_types[:,0] == sub_id + load_this = [0] + gen_this = [0] + line_or_this = [2] # , 3, 4] + line_ex_this = [0] + + bbs_switch_bb1_bb2 = sub_id * (nb_busbar * (nb_busbar - 1) // 2) # switch between busbar 1 and busbar 2 at this substation + load_id_switch = dtd.get_switch_id_ieee(dtd.load_to_conn_node_id[load_this[0]]) + gen_id_switch = dtd.get_switch_id_ieee(dtd.gen_to_conn_node_id[gen_this[0]]) + lor_id_switch = dtd.get_switch_id_ieee(dtd.line_or_to_conn_node_id[line_or_this[0]]) + lex_id_switch = dtd.get_switch_id_ieee(dtd.line_ex_to_conn_node_id[line_ex_this[0]]) + + el_id_switch = load_id_switch + el_this = load_this + vect_topo_vect = type(self.env).load_pos_topo_vect + for el_id_switch, el_this, vect_topo_vect, tag in zip([load_id_switch, gen_id_switch, lor_id_switch, lex_id_switch], + [load_this, gen_this, line_or_this, line_ex_this], + [type(self.env).load_pos_topo_vect, + type(self.env).gen_pos_topo_vect, + type(self.env).line_or_pos_topo_vect, + type(self.env).line_ex_pos_topo_vect], + ["load", "gen", "lor", "lex"]): + # all connected + switches_state = np.ones(dtd.switches.shape[0], dtype=dt_bool) + switches_state[:start_id] = False # deactivate all busbar coupler + # assign all element to busbar 1 + for bb_id in range(1, nb_busbar + 1): + if bb_id == 1: + # busbar 1 + switches_state[(start_id + bb_id)::(nb_busbar + 1)] = True + else: + # other busbars + switches_state[(start_id + bb_id)::(nb_busbar + 1)] = False + + # disconnect the load with the breaker + switches_state[el_id_switch] = False + topo_vect, shunt_bus = dtd.from_switches_position(switches_state) + assert topo_vect[vect_topo_vect[el_this]] == -1, f"error for {tag}" + assert (topo_vect == 1).sum() == 58, f"error for {tag}" + switches_state[el_id_switch] = True + + # disconnect the load by disconnecting it of all the busbars + switches_state[(el_id_switch + 1):(el_id_switch + nb_busbar +1)] = False + topo_vect, shunt_bus = dtd.from_switches_position(switches_state) + assert topo_vect[vect_topo_vect[el_this]] == -1, f"error for {tag}" + assert (topo_vect == 1).sum() == 58, f"error for {tag}" + switches_state[(el_id_switch + 1)] = True # busbar 1 + + # now connect the load to busbar 2 + switches_state[(el_id_switch + 1)] = False # busbar 1 + switches_state[(el_id_switch + 2)] = True # busbar 2 + topo_vect, shunt_bus = dtd.from_switches_position(switches_state) + assert topo_vect[vect_topo_vect[el_this]] == 2, f"error for {tag}" + assert (topo_vect == 1).sum() == 58, f"error for {tag}" + + # load still on busbar 2, but disconnected + switches_state[(el_id_switch)] = False # busbar 1 + topo_vect, shunt_bus = dtd.from_switches_position(switches_state) + assert topo_vect[vect_topo_vect[el_this]] == -1, f"error for {tag}" + assert (topo_vect == 1).sum() == 58, f"error for {tag}" + # reset to orig state + switches_state[(el_id_switch)] = True # busbar 1 + switches_state[(el_id_switch + 1)] = True # busbar 1 + switches_state[(el_id_switch + 2)] = False # busbar 2 + + # load on busbar 2, but busbars connected + switches_state[(el_id_switch + 1)] = False # busbar 1 + switches_state[(el_id_switch + 2)] = True # busbar 2 + switches_state[bbs_switch_bb1_bb2] = True # switch between busbars + topo_vect, shunt_bus = dtd.from_switches_position(switches_state) + assert topo_vect[vect_topo_vect[el_this]] == 1, f"error for {tag}" + assert (topo_vect == 1).sum() == 59, f"error for {tag}" + + # TODO detailed topo : test_from_switches_position_more_advanced_shunt (shunts not tested above) + # TODO detailed topo add more tests + + +class DetailedTopoTester_3bb(DetailedTopoTester): + def _aux_n_bb_per_sub(self): + return 3 + + +class DetailedTopoTester_Action(unittest.TestCase): + def _aux_n_bb_per_sub(self): + return 2 + + def setUp(self) -> None: + DetailedTopoTester.setUp(self) + self.li_flag_nm = [ + "_modif_inj", + "_modif_set_bus", + "_modif_change_bus", + "_modif_set_status", + "_modif_change_status", + "_modif_redispatch", + "_modif_storage", + "_modif_curtailment", + "_modif_alarm", + "_modif_alert", + "_modif_set_switch", + "_modif_change_switch", + ] + type(self.env.action_space._template_obj).ISSUE_WARNING_SWITCH_SET_CHANGE = "never" + return super().setUp() + + def test_can_do_set(self): + act = self.env.action_space({"set_switch": [(0, 1)]}) + assert act._modif_set_switch + for flag_nm in self.li_flag_nm: + if flag_nm == "_modif_set_switch": + continue + assert not getattr(act, flag_nm) + assert act._set_switch_status[0] == 1 + assert (act._set_switch_status[1:] == 0).all() + + act = self.env.action_space({"set_switch": [(1, -1)]}) + assert act._modif_set_switch + assert act._set_switch_status[1] == -1 + assert (act._set_switch_status[0] == 0).all() + assert (act._set_switch_status[2:] == 0).all() + + # with the property + act = self.env.action_space() + act.set_switch = [(0, 1)] + assert act._modif_set_switch + for flag_nm in self.li_flag_nm: + if flag_nm == "_modif_set_switch": + continue + assert not getattr(act, flag_nm) + assert act._set_switch_status[0] == 1 + assert (act._set_switch_status[1:] == 0).all() + + act = self.env.action_space() + act.set_switch = [(1, -1)] + assert act._modif_set_switch + assert act._set_switch_status[1] == -1 + assert (act._set_switch_status[0] == 0).all() + assert (act._set_switch_status[2:] == 0).all() + + def test_can_do_change(self): + act = self.env.action_space({"change_switch": [0]}) + assert act._modif_change_switch + for flag_nm in self.li_flag_nm: + if flag_nm == "_modif_change_switch": + continue + assert not getattr(act, flag_nm) + assert act._change_switch_status[0] + assert (~act._change_switch_status[1:]).all() + # with the property + act = self.env.action_space() + act.change_switch = [0] + assert act._modif_change_switch + for flag_nm in self.li_flag_nm: + if flag_nm == "_modif_change_switch": + continue + assert not getattr(act, flag_nm) + assert act._change_switch_status[0] + assert (~act._change_switch_status[1:]).all() + + def test_ambiguous_set_switch(self): + with self.assertRaises(AmbiguousAction): + act = self.env.action_space({"set_switch": [(-1, 1)]}) + with self.assertRaises(AmbiguousAction): + act = self.env.action_space({"set_switch": [(type(self.env).detailed_topo_desc.switches.shape[0], 1)]}) + with self.assertRaises(AmbiguousAction): + act = self.env.action_space({"set_switch": [(0, -2)]}) + with self.assertRaises(AmbiguousAction): + act = self.env.action_space({"set_switch": [(0, 2)]}) + + # same sub with set_bus and set switch + act = self.env.action_space() + nb_bb = self._aux_n_bb_per_sub() + act.set_switch = [ (nb_bb * (nb_bb - 1) // 2, + 1)] + act.load_set_bus = [(0, 1)] + with self.assertRaises(AmbiguousAction): + act._check_for_ambiguity() + + # same sub with change_bus and set switch + act = self.env.action_space() + nb_bb = self._aux_n_bb_per_sub() + act.set_switch = [ (nb_bb * (nb_bb - 1) // 2, + 1)] + act.load_change_bus = [0] + with self.assertRaises(AmbiguousAction): + act._check_for_ambiguity() + + # set switch and change switch + act = self.env.action_space() + act.set_switch = [(0, 1)] + act.change_switch = [0] + with self.assertRaises(AmbiguousAction): + act._check_for_ambiguity() + + def test_ambiguous_change_switch(self): + with self.assertRaises(AmbiguousAction): + act = self.env.action_space({"change_switch": [-1]}) + with self.assertRaises(AmbiguousAction): + act = self.env.action_space({"change_switch": [type(self.env).detailed_topo_desc.switches.shape[0]]}) + + # same sub with set_bus and set switch + act = self.env.action_space() + nb_bb = self._aux_n_bb_per_sub() + act.change_switch = [nb_bb * (nb_bb - 1) // 2] + act.load_set_bus = [(0, 1)] + with self.assertRaises(AmbiguousAction): + act._check_for_ambiguity() + + # same sub with change_bus and set switch + act = self.env.action_space() + nb_bb = self._aux_n_bb_per_sub() + act.change_switch = [nb_bb * (nb_bb - 1) // 2] + act.load_change_bus = [0] + with self.assertRaises(AmbiguousAction): + act._check_for_ambiguity() + + # set switch and change switch + act = self.env.action_space() + act.set_switch = [(0, 1)] + act.change_switch = [0] + with self.assertRaises(AmbiguousAction): + act._check_for_ambiguity() + + def test_backend_action_set_switch(self): + nb_busbar = self._aux_n_bb_per_sub() + dtd = type(self.env).detailed_topo_desc + + # if you change the env it will change... + sub_id = 1 + load_this = [0] + gen_this = [0] + line_or_this = [2] + line_ex_this = [0] + + bbs_switch_bb1_bb2 = sub_id * (nb_busbar * (nb_busbar - 1) // 2) # switch between busbar 1 and busbar 2 at this substation + load_id_switch = dtd.get_switch_id_ieee(dtd.load_to_conn_node_id[load_this[0]]) + gen_id_switch = dtd.get_switch_id_ieee(dtd.gen_to_conn_node_id[gen_this[0]]) + lor_id_switch = dtd.get_switch_id_ieee(dtd.line_or_to_conn_node_id[line_or_this[0]]) + lex_id_switch = dtd.get_switch_id_ieee(dtd.line_ex_to_conn_node_id[line_ex_this[0]]) + + el_id_switch = load_id_switch + el_this = load_this + vect_topo_vect = type(self.env).load_pos_topo_vect + for el_id_switch, el_this, vect_topo_vect, tag in zip([load_id_switch, gen_id_switch, lor_id_switch, lex_id_switch], + [load_this, gen_this, line_or_this, line_ex_this], + [type(self.env).load_pos_topo_vect, + type(self.env).gen_pos_topo_vect, + type(self.env).line_or_pos_topo_vect, + type(self.env).line_ex_pos_topo_vect], + ["load", "gen", "lor", "lex"]): + nb_when_disco = 58 # number of unaffected element + if tag == "lor" or tag == "lex": + # the other extremity is impacted in case I disconnect a line + nb_when_disco = 57 + # disconnect the load with the breaker + act = self.env.action_space({"set_switch": [(el_id_switch, -1)]}) + bk_act = self.env.backend.my_bk_act_class() + bk_act += act + topo_vect = bk_act()[2].values + assert topo_vect[vect_topo_vect[el_this]] == -1, f"error for {tag}" + assert (topo_vect == 1).sum() == nb_when_disco, f"error for {tag} : {(topo_vect == 1).sum()} vs {nb_when_disco}" + + # disconnect the load by disconnecting it of all the busbars + act = self.env.action_space({"set_switch": [(el, -1) + for el in range(el_id_switch + 1, el_id_switch + nb_busbar +1)] + }) + bk_act = self.env.backend.my_bk_act_class() + bk_act += act + topo_vect = bk_act()[2].values + assert topo_vect[vect_topo_vect[el_this]] == -1, f"error for {tag}" + assert (topo_vect == 1).sum() == nb_when_disco, f"error for {tag} : {(topo_vect == 1).sum()} vs {nb_when_disco}" + + # now connect the load to busbar 2 + act = self.env.action_space({"set_switch": [(el, -1 if el != el_id_switch + 2 else 1) + for el in range(el_id_switch + 1, el_id_switch + nb_busbar +1)] + }) + bk_act = self.env.backend.my_bk_act_class() + bk_act += act + topo_vect = bk_act()[2].values + assert topo_vect[vect_topo_vect[el_this]] == 2, f"error for {tag}" + assert (topo_vect == 1).sum() == 58, f"error for {tag} : {(topo_vect == 1).sum()} vs 58" + + # load still on busbar 2, but disconnected + act = self.env.action_space({"set_switch": ([(el, -1 if el != el_id_switch + 2 else 1) + for el in range(el_id_switch + 1, el_id_switch + nb_busbar +1)]+ + [(el_id_switch, -1)]) + }) + bk_act = self.env.backend.my_bk_act_class() + bk_act += act + topo_vect = bk_act()[2].values + assert topo_vect[vect_topo_vect[el_this]] == -1, f"error for {tag}" + assert (topo_vect == 1).sum() == nb_when_disco, f"error for {tag} : {(topo_vect == 1).sum()} vs {nb_when_disco}" + + # load on busbar 2, but busbars connected + act = self.env.action_space({"set_switch": ([(el, -1 if el != el_id_switch + 2 else 1) + for el in range(el_id_switch + 1, el_id_switch + nb_busbar +1)] + + [(bbs_switch_bb1_bb2, 1)]) + }) + bk_act = self.env.backend.my_bk_act_class() + bk_act += act + topo_vect = bk_act()[2].values + assert topo_vect[vect_topo_vect[el_this]] == 1, f"error for {tag}" + assert (topo_vect == 1).sum() == 59, f"error for {tag} : {(topo_vect == 1).sum()} vs 59" + + # TODO detailed topo test print + # TODO detailed topo test to_dict + # TODO detailed topo test as_serializable_dict + # TODO detailed topo test from_dict + # TODO detailed topo test from_json (? does it exists) + + # then detailed topo + # TODO detailed topo test from_switches_position when there is a mask in the substation + # TODO detailed topo test env.step only switch + # TODO detailed topo test env.step switch and set_bus - # TODO test no shunt too - # TODO implement and test compute_switches_position !!! - # TODO test "_get_full_cls_str"(experimental_read_from_local_dir) +# TODO detailed topo test no shunt too +# TODO detailed topo test "_get_full_cls_str"(experimental_read_from_local_dir) +# TODO detailed topo test with different n_busbar_per_sub +# TODO detailed topo test action +# TODO detailed topo test observation +# TODO detailed topo test agent that do both actions on switches and with set_bus / change_bus +# TODO detailed topo test agent that act on switches but with an opponent that disconnect lines if __name__ == "__main__": unittest.main() diff --git a/grid2op/tests/test_generate_classes.py b/grid2op/tests/test_generate_classes.py index f88cdcfd8..981592485 100644 --- a/grid2op/tests/test_generate_classes.py +++ b/grid2op/tests/test_generate_classes.py @@ -20,11 +20,12 @@ class TestGenerateFile(unittest.TestCase): def _aux_assert_exists_then_delete(self, env): if isinstance(env, MultiMixEnvironment): - for mix in env: - self._aux_assert_exists_then_delete(mix) + # for mix in env: + # self._aux_assert_exists_then_delete(mix) + self._aux_assert_exists_then_delete(env.mix_envs[0]) elif isinstance(env, Environment): path = Path(env.get_path_env()) / "_grid2op_classes" - assert path.exists() + assert path.exists(), f"path {path} does not exists" shutil.rmtree(path, ignore_errors=True) else: raise RuntimeError("Unknown env type") @@ -37,33 +38,37 @@ def list_env(self): def test_can_generate(self): for env_nm in self.list_env(): - with warnings.catch_warnings(): - warnings.filterwarnings("ignore") - env = grid2op.make(env_nm, test=True, _add_to_name=type(self).__name__+"test_generate") - env.generate_classes() - self._aux_assert_exists_then_delete(env) - env.close() + try: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(env_nm, test=True, _add_to_name=type(self).__name__+"test_generate") + env.generate_classes() + finally: + self._aux_assert_exists_then_delete(env) + env.close() def test_can_load(self): + _add_to_name = type(self).__name__+"test_load" for env_nm in self.list_env(): with warnings.catch_warnings(): warnings.filterwarnings("ignore") - env = grid2op.make(env_nm, test=True, _add_to_name=type(self).__name__+"_TestGenerateFile") + env = grid2op.make(env_nm, + test=True, + _add_to_name=_add_to_name) env.generate_classes() - with warnings.catch_warnings(): warnings.filterwarnings("ignore") try: env2 = grid2op.make(env_nm, test=True, experimental_read_from_local_dir=True, - _add_to_name=type(self).__name__) + _add_to_name=_add_to_name) env2.close() except RuntimeError as exc_: raise RuntimeError(f"Error for {env_nm}") from exc_ self._aux_assert_exists_then_delete(env) env.close() + if __name__ == "__main__": unittest.main() - \ No newline at end of file diff --git a/grid2op/tests/test_gym_asynch_env.py b/grid2op/tests/test_gym_asynch_env.py new file mode 100644 index 000000000..c9eb7eb1d --- /dev/null +++ b/grid2op/tests/test_gym_asynch_env.py @@ -0,0 +1,186 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt and https://github.com/rte-france/Grid2Op/pull/319 +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import unittest +from gymnasium.spaces import Box, Discrete, MultiDiscrete, Dict +from gymnasium.vector import AsyncVectorEnv +import warnings +import numpy as np +from multiprocessing import set_start_method + +import grid2op +from grid2op.Action import PlayableAction +from grid2op.gym_compat import GymEnv, BoxGymActSpace, BoxGymObsSpace, DiscreteActSpace, MultiDiscreteActSpace + + +class AsyncGymEnvTester_Fork(unittest.TestCase): + def _aux_start_method(self): + return "fork" + + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + # this needs to be tested with pandapower backend + self.env = grid2op.make("educ_case14_storage", + test=True, + _add_to_name=type(self).__name__, + action_class=PlayableAction, + experimental_read_from_local_dir=False) + obs = self.env.reset(seed=0, options={"time serie id": 0}) + return super().setUp() + + def test_default_space_obs_act(self): + template_env = GymEnv(self.env) + template_env.action_space.seed(0) + obs = template_env.reset(seed=0, options={"time serie id": 0}) + async_vect_env = AsyncVectorEnv((lambda: GymEnv(self.env), lambda: GymEnv(self.env)), + context=self._aux_start_method()) + assert isinstance(async_vect_env.action_space, Dict) + assert isinstance(async_vect_env.observation_space, Dict) + obs, infos = async_vect_env.reset(seed=[0, 1], + options={"time serie id": 0}) + + dn_act_single = template_env.action_space.sample() + for k, v in dn_act_single.items(): + v[:] = 0 + dn_acts = {k: np.tile(v, reps=[2, 1]) for k, v in dn_act_single.items()} + obs2 = async_vect_env.step(dn_acts) + + rnd_acts_li = [template_env.action_space.sample(), template_env.action_space.sample()] + rnd_acts = {k: np.concatenate((rnd_acts_li[0][k], rnd_acts_li[1][k])) for k in rnd_acts_li[0].keys()} + obs3 = async_vect_env.step(rnd_acts) + + obs, infos = async_vect_env.reset(seed=[2, 3], + options={"time serie id": 0}, + ) + + def _aux_obs_act_vect(self, ts_id=0): + gym_env = GymEnv(self.env) + gym_env.action_space.close() + gym_env.action_space = BoxGymActSpace(self.env.action_space, attr_to_keep=["redispatch", "curtail"]) + gym_env.observation_space.close() + gym_env.observation_space = BoxGymObsSpace(self.env.observation_space, attr_to_keep=["rho"]) + gym_env.action_space.seed(0) + _ = gym_env.reset(seed=0, options={"time serie id": ts_id}) + return gym_env + + def test_space_obs_act_vect(self): + template_env = self._aux_obs_act_vect(0) + async_vect_env = AsyncVectorEnv((lambda: self._aux_obs_act_vect(1), + lambda: self._aux_obs_act_vect(2)), + context=self._aux_start_method()) + try: + assert isinstance(async_vect_env.action_space, Box) + assert isinstance(async_vect_env.observation_space, Box) + obs, infos = async_vect_env.reset(seed=[0, 1], + options={"time serie id": 0}) + + dn_act_single = template_env.action_space.sample() + dn_act_single[:] = 0 + dn_acts = np.tile(dn_act_single, reps=[2, 1]) + obs2 = async_vect_env.step(dn_acts) + + rnd_acts_li = [template_env.action_space.sample().reshape(1,-1), template_env.action_space.sample().reshape(1,-1)] + rnd_acts = np.concatenate(rnd_acts_li) + obs3 = async_vect_env.step(rnd_acts) + + obs, infos = async_vect_env.reset(seed=[2, 3], + options={"time serie id": 0}, + ) + finally: + async_vect_env.close() + template_env.close() + + def _aux_obs_vect_act_discrete(self, ts_id=0): + gym_env = GymEnv(self.env) + gym_env.observation_space.close() + gym_env.observation_space = BoxGymObsSpace(self.env.observation_space, attr_to_keep=["rho"]) + gym_env.action_space.close() + gym_env.action_space = DiscreteActSpace(self.env.action_space, attr_to_keep=["set_bus"]) + gym_env.action_space.seed(0) + _ = gym_env.reset(seed=0, options={"time serie id": ts_id}) + return gym_env + + def test_space_obs_vect_act_discrete(self): + template_env = self._aux_obs_vect_act_discrete(0) + assert isinstance(template_env.action_space, Discrete) + async_vect_env = AsyncVectorEnv((lambda: self._aux_obs_vect_act_discrete(1), + lambda: self._aux_obs_vect_act_discrete(2)), + context=self._aux_start_method()) + try: + assert isinstance(async_vect_env.action_space, MultiDiscrete) # converted to MultiDiscrete by gymnasium + assert isinstance(async_vect_env.observation_space, Box) + obs, infos = async_vect_env.reset(seed=[0, 1], + options={"time serie id": 0}) + + dn_act_single = 0 + dn_acts = np.tile(dn_act_single, reps=[2, 1]) + obs2 = async_vect_env.step(dn_acts) + + rnd_acts_li = [template_env.action_space.sample().reshape(1,-1), template_env.action_space.sample().reshape(1,-1)] + rnd_acts = np.concatenate(rnd_acts_li) + obs3 = async_vect_env.step(rnd_acts) + + obs, infos = async_vect_env.reset(seed=[2, 3], + options={"time serie id": 0}, + ) + finally: + async_vect_env.close() + template_env.close() + + def _aux_obs_vect_act_multidiscrete(self, ts_id=0): + gym_env = GymEnv(self.env) + gym_env.observation_space.close() + gym_env.observation_space = BoxGymObsSpace(self.env.observation_space, attr_to_keep=["rho"]) + gym_env.action_space.close() + gym_env.action_space = MultiDiscreteActSpace(self.env.action_space, attr_to_keep=["one_sub_set", "one_line_set"]) + gym_env.action_space.seed(0) + _ = gym_env.reset(seed=0, options={"time serie id": ts_id}) + return gym_env + + def test_space_obs_vect_act_multidiscrete(self): + template_env = self._aux_obs_vect_act_multidiscrete(0) + assert isinstance(template_env.action_space, MultiDiscrete) + async_vect_env = AsyncVectorEnv((lambda: self._aux_obs_vect_act_multidiscrete(1), + lambda: self._aux_obs_vect_act_multidiscrete(2)), + context=self._aux_start_method()) + try: + assert isinstance(async_vect_env.action_space, Box) # converted to Box by gymnasium + assert isinstance(async_vect_env.observation_space, Box) + obs, infos = async_vect_env.reset(seed=[0, 1], + options={"time serie id": 0}) + + dn_act_single = template_env.action_space.sample() + dn_act_single[:] = 0 + dn_acts = np.tile(dn_act_single, reps=[2, 1]) + obs2 = async_vect_env.step(dn_acts) + + rnd_acts_li = [template_env.action_space.sample().reshape(1,-1), template_env.action_space.sample().reshape(1,-1)] + rnd_acts = np.concatenate(rnd_acts_li) + obs3 = async_vect_env.step(rnd_acts) + + obs, infos = async_vect_env.reset(seed=[2, 3], + options={"time serie id": 0}, + ) + finally: + async_vect_env.close() + template_env.close() + + +class AsyncGymEnvTester_Spawn(AsyncGymEnvTester_Fork): + # Will be working when branch class_in_files will be merged + def _aux_start_method(self): + return "spawn" + + def setUp(self) -> None: + self.skipTest("Not handled at the moment") + return super().setUp() + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_gym_env_renderer.py b/grid2op/tests/test_gym_env_renderer.py index 4b26d89be..7a7a68fc4 100644 --- a/grid2op/tests/test_gym_env_renderer.py +++ b/grid2op/tests/test_gym_env_renderer.py @@ -12,7 +12,6 @@ import grid2op from grid2op.gym_compat import GymEnv -import numpy as np class TestGymEnvRenderer(unittest.TestCase): diff --git a/grid2op/tests/test_gymnasium_compat.py b/grid2op/tests/test_gymnasium_compat.py index c7417e26b..dd06153b3 100644 --- a/grid2op/tests/test_gymnasium_compat.py +++ b/grid2op/tests/test_gymnasium_compat.py @@ -93,7 +93,12 @@ class TestMultiDiscreteGymnasiumActSpace(_AuxTestMultiDiscreteGymActSpace, Auxil pass class TestDiscreteGymnasiumActSpace(_AuxTestDiscreteGymActSpace, AuxilliaryForTestGymnasium, unittest.TestCase): - pass + def test_class_different_from_multi_discrete(self): + from grid2op.gym_compat import (DiscreteActSpaceGymnasium, + MultiDiscreteActSpaceGymnasium) + assert DiscreteActSpaceGymnasium is not MultiDiscreteActSpaceGymnasium + assert DiscreteActSpaceGymnasium.__doc__ != MultiDiscreteActSpaceGymnasium.__doc__ + assert DiscreteActSpaceGymnasium.__name__ != MultiDiscreteActSpaceGymnasium.__name__ class TestAllGymnasiumActSpaceWithAlarm(_AuxTestAllGymActSpaceWithAlarm, AuxilliaryForTestGymnasium, unittest.TestCase): pass diff --git a/grid2op/tests/test_issue_196.py b/grid2op/tests/test_issue_196.py index c6a4b815d..08f5987d5 100644 --- a/grid2op/tests/test_issue_196.py +++ b/grid2op/tests/test_issue_196.py @@ -49,3 +49,7 @@ def test_issue_196_genp(self): # not great test as it passes with the bug... but just in the case... cannot hurt obs, *_ = self.env_gym.reset() assert obs in self.env_gym.observation_space + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_issue_446.py b/grid2op/tests/test_issue_446.py index dd0278a0b..a194b1caa 100644 --- a/grid2op/tests/test_issue_446.py +++ b/grid2op/tests/test_issue_446.py @@ -11,12 +11,15 @@ from grid2op.gym_compat import BoxGymObsSpace import numpy as np import unittest +import warnings class Issue446Tester(unittest.TestCase): def test_box_action_space(self): # We considers only redispatching actions - env = grid2op.make("l2rpn_case14_sandbox", test=True, _add_to_name=type(self).__name__) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", test=True, _add_to_name=type(self).__name__) divide = {"hour_of_day": np.ones(1)} subtract = {"hour_of_day": np.zeros(1)} diff --git a/grid2op/tests/test_issue_511.py b/grid2op/tests/test_issue_511.py index 5ff3db8de..4d5f558bf 100644 --- a/grid2op/tests/test_issue_511.py +++ b/grid2op/tests/test_issue_511.py @@ -36,7 +36,6 @@ def test_issue_set_bus(self): topo_action = self.env.action_space(act) as_dict = topo_action.as_dict() - print(as_dict) assert len(as_dict['set_bus_vect']['0']) == 2 # two objects modified def test_issue_change_bus(self): diff --git a/grid2op/tests/test_issue_591.py b/grid2op/tests/test_issue_591.py new file mode 100644 index 000000000..b50a74977 --- /dev/null +++ b/grid2op/tests/test_issue_591.py @@ -0,0 +1,62 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import os +import warnings +import unittest +import grid2op +from grid2op.utils import ScoreL2RPN2020 +from grid2op.Agent import DoNothingAgent + + +class Issue591Tester(unittest.TestCase): + def setUp(self) -> None: + self.max_iter = 10 + return super().setUp() + + def test_issue_591(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("rte_case5_example", test=True) + + ch_patterns = env.chronics_handler.reset() + ch_patterns = ch_patterns.tolist() + ch_patterns = ch_patterns[17:19] + + nb_scenario = len(ch_patterns) + agent = DoNothingAgent(env.action_space) + handler = env.chronics_handler + handler.set_filter(lambda path: path in ch_patterns) + chronics = handler.reset() + + + scorer_2020 = ScoreL2RPN2020( + env, + max_step=1, + nb_scenario=1, + env_seeds=[0 for _ in range(1)], + agent_seeds=[0 for _ in range(1)], + ) + scorer_2020.clear_all() + scorer_2020 = ScoreL2RPN2020( + env, + max_step=self.max_iter, + nb_scenario=nb_scenario, + env_seeds=[0 for _ in range(nb_scenario)], + agent_seeds=[0 for _ in range(nb_scenario)], + ) + try: + score_2020 = scorer_2020.get(agent) + finally: + scorer_2020.clear_all() + for scen_path, score, ts_survived, total_ts in zip(ch_patterns, *score_2020): + assert total_ts == self.max_iter, f"wrong number of ts {total_ts} vs {self.max_iter}" + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_issue_593.py b/grid2op/tests/test_issue_593.py new file mode 100644 index 000000000..f4a07a392 --- /dev/null +++ b/grid2op/tests/test_issue_593.py @@ -0,0 +1,38 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import grid2op +import warnings +import unittest +from grid2op.Chronics import GridStateFromFile + +class Issue593Tester(unittest.TestCase): + def test_issue_593(self): + # parameters is read from the config file, + # it should be removed "automatically" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_idf_2023", + test=True, + data_feeding_kwargs={"gridvalueClass": GridStateFromFile, + }) + + def test_issue_593_should_break(self): + # user did something wrong + # there should be an error + with self.assertRaises(TypeError): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_idf_2023", + test=True, + data_feeding_kwargs={"gridvalueClass": GridStateFromFile, + "h_forecast": [5] + }) + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/grid2op/tests/test_issue_598.py b/grid2op/tests/test_issue_598.py new file mode 100644 index 000000000..7260e8c4e --- /dev/null +++ b/grid2op/tests/test_issue_598.py @@ -0,0 +1,162 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import numpy as np +import warnings + +import grid2op +import unittest +from grid2op.Chronics import FromHandlers +from grid2op.Chronics.handlers import PerfectForecastHandler, CSVHandler + + +class Issue598Tester(unittest.TestCase): + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make("educ_case14_storage", + test=True, + data_feeding_kwargs={"gridvalueClass": FromHandlers, + "gen_p_handler": CSVHandler("prod_p"), + "load_p_handler": CSVHandler("load_p"), + "gen_v_handler": CSVHandler("prod_v"), + "load_q_handler": CSVHandler("load_q"), + "gen_p_for_handler": PerfectForecastHandler("prod_p_forecasted"), + "load_p_for_handler": PerfectForecastHandler("load_p_forecasted"), + "load_q_for_handler": PerfectForecastHandler("load_q_forecasted"), + "h_forecast": (5, 10, 15, 20, 25, 30), + } + ) + params = self.env.parameters + params.ACTIVATE_STORAGE_LOSS = False + self.env.change_parameters(params) + self.env.change_forecast_parameters(params) + self.env.reset(seed=0, options={"time serie id": 0}) + self.dn = self.env.action_space() + return super().setUp() + + def test_issue_598_dn_same_res(self): + """no redisp: simu = step""" + obs, *_ = self.env.step(self.dn) + obs_simulate, *_ = obs.simulate(self.dn, time_step=0) + #no redispatch action yet, the productions are the same after simulation on the same state + assert (np.abs(obs_simulate.prod_p - obs.prod_p) <= 1e-6).all() + + def test_issue_598_dn_redisp(self, redisp_amout=2., storage_amount=None): + """one small redispatch action before simulation and then a single simulation of do nothing + on current step (default args)""" + self.skipTest("Not sure it should be equal, waiting for the model in the issue 598") + act = self.env.action_space() + if redisp_amout is not None: + act.redispatch = {"gen_2_1": redisp_amout} + if storage_amount is not None: + act.storage_p = [(1, 10.)] + + obs, reward, done, info = self.env.step(act) + obs, reward, done, info = self.env.step(self.dn) + assert not done + assert not info["is_ambiguous"] + assert not info["is_illegal"] + print("here here here") + obs_simulate, *_ = obs.simulate(self.dn, time_step=0) + # obs.gen_p.sum() no redisp: 262.86395 + assert (np.abs(obs_simulate.prod_p - obs.prod_p) <= 1e-6).all(), f"{obs_simulate.prod_p} vs {obs.prod_p}" + assert (np.abs(obs_simulate.storage_charge - obs.storage_charge) <= 1e-6).all() + assert (np.abs(obs_simulate.storage_power - obs.storage_power) <= 1e-6).all() + assert (np.abs(obs_simulate.storage_power_target - obs.storage_power_target) <= 1e-6).all() + + def test_simulate_ok_current_step_redisp_large(self): + self.skipTest("Does not pass: redisp is not 'limited' by the simulate in this case") + self.test_issue_598_dn_redisp(redisp_amout=-10.) + + def test_simulate_ok_current_step_storage(self): + """doing a storage action. Then a simulate on current step lead to the same result""" + self.skipTest("Does not pass: storage is not the same in this case (nothing in simulate, but something in the env)") + # self.skipTest("Does not pass: curtailment is not 'limited' by the simulate in this case") + self.test_issue_598_dn_redisp(redisp_amout=None, storage_amount=10.) + + def test_simulate_step_redisp_before(self, redisp_amout=2., storage_amount=None): + """one small redispatch before simulation and then lots of simulation of do nothing (default args)""" + act = self.env.action_space() + if redisp_amout is not None: + act.redispatch = {"gen_2_1": redisp_amout} + if storage_amount is not None: + act.storage_p = [(1, 10.)] + obs, *_ = self.env.step(act) + next_obs = obs + for time_step in [1, 2, 3, 4, 5, 6]: + obs_simulate, *_ = next_obs.simulate(self.dn, time_step=1) + next_obs, *_ = self.env.step(self.dn) + assert (np.abs(obs_simulate.prod_p - next_obs.prod_p) <= 1e-6).all(), f"for h={time_step}: {obs_simulate.prod_p} vs {next_obs.prod_p}" + assert (np.abs(obs_simulate.storage_charge - next_obs.storage_charge) <= 1e-6).all() + assert (np.abs(obs_simulate.storage_power - next_obs.storage_power) <= 1e-6).all() + assert (np.abs(obs_simulate.storage_power_target - next_obs.storage_power_target) <= 1e-6).all() + + def test_simulate_step_redisp_before_from_init(self, redisp_amout=2., storage_amount=None): + """one small redispatch and then lots of simulation of do nothing (increasing the forecast horizon) + (default args) + """ + act = self.env.action_space() + if redisp_amout is not None: + act.redispatch = {"gen_2_1": redisp_amout} + if storage_amount is not None: + act.storage_p = [(1, 10.)] + obs, *_ = self.env.step(act) + for time_step in [1, 2, 3, 4, 5, 6]: + obs_simulate, *_ = obs.simulate(self.dn, time_step=time_step) + next_obs, *_ = self.env.step(self.dn) + assert (np.abs(obs_simulate.prod_p - next_obs.prod_p) <= 1e-6).all(), f"for h={time_step}: {obs_simulate.prod_p} vs {next_obs.prod_p}" + assert (np.abs(obs_simulate.storage_charge - next_obs.storage_charge) <= 1e-6).all() + assert (np.abs(obs_simulate.storage_power - next_obs.storage_power) <= 1e-6).all() + assert (np.abs(obs_simulate.storage_power_target - next_obs.storage_power_target) <= 1e-6).all() + + def test_simulate_step_redisp_before_chain(self, redisp_amout=2., storage_amount=None): + """one small redispatch and then lots of simulation of do nothing (chaining simulate on the forecasts) + (default args)""" + act = self.env.action_space() + if redisp_amout is not None: + act.redispatch = {"gen_2_1": redisp_amout} + if storage_amount is not None: + act.storage_p = [(1, 10.)] + obs, *_ = self.env.step(act) + obs_simulate = obs + for time_step in [1, 2, 3, 4, 5, 6]: + obs_simulate, *_ = obs_simulate.simulate(self.dn, time_step=1) + next_obs, *_ = self.env.step(self.dn) + assert (np.abs(obs_simulate.prod_p - next_obs.prod_p) <= 1e-6).all(), f"for h={time_step}: {obs_simulate.prod_p} vs {next_obs.prod_p}" + assert (np.abs(obs_simulate.storage_charge - next_obs.storage_charge) <= 1e-6).all() + assert (np.abs(obs_simulate.storage_power - next_obs.storage_power) <= 1e-6).all() + assert (np.abs(obs_simulate.storage_power_target - next_obs.storage_power_target) <= 1e-6).all() + + def test_simulate_step_redisp_before_large(self): + """one large redispatch before simulation and then lots of simulation of do nothing""" + self.test_simulate_step_redisp_before(redisp_amout=-10.) # -10. so that redisp cannot be satisfied at first + + def test_simulate_step_redisp_before_from_init_large(self): + """one large redispatch and then lots of simulation of do nothing (increasing the forecast horizon)""" + self.test_simulate_step_redisp_before_from_init(redisp_amout=-10.) # -10. so that redisp cannot be satisfied at first + + def test_simulate_step_redisp_before_chain_large(self): + """one large redispatch and then lots of simulation of do nothing (increasing the forecast horizon)""" + self.test_simulate_step_redisp_before_chain(redisp_amout=-10.) # -10. so that redisp cannot be satisfied at first + + def test_simulate_step_storage_before_large(self): + """one action on storage unit before simulation and then lots of simulation of do nothing""" + self.test_simulate_step_redisp_before(redisp_amout=None, storage_amount=10.) + + def test_simulate_step_redisp_storage_from_init_large(self): + """one action on storage unit and then lots of simulation of do nothing (increasing the forecast horizon)""" + self.test_simulate_step_redisp_before_from_init(redisp_amout=None, storage_amount=10.) + + def test_simulate_step_redisp_storage_chain_large(self): + """one action on storage unit and then lots of simulation of do nothing (increasing the forecast horizon)""" + self.test_simulate_step_redisp_before_chain(redisp_amout=None, storage_amount=10.) + + +# TODO when a redisp is done in the forecast +# TODO with curtailment \ No newline at end of file diff --git a/grid2op/tests/test_issue_616.py b/grid2op/tests/test_issue_616.py new file mode 100644 index 000000000..6a779da33 --- /dev/null +++ b/grid2op/tests/test_issue_616.py @@ -0,0 +1,320 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt and https://github.com/rte-france/Grid2Op/pull/319 +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import unittest +import grid2op +import tempfile +import numpy as np +import re +import os +import json +import warnings + +from grid2op.Chronics import (MultifolderWithCache, + GridStateFromFileWithForecastsWithMaintenance, + FromHandlers) +from grid2op.Chronics.handlers import (CSVHandler, + NoisyForecastHandler, + LoadQFromPHandler, + JSONMaintenanceHandler) + +from grid2op.Runner import Runner + + +class Issue616Tester(unittest.TestCase): + def setUp(self): + self.env_name = "l2rpn_case14_sandbox" + # create first env + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(self.env_name, + test=True) + + # hack for adding maintenance + dict_maint = { + "maintenance_starting_hour": 1, + "maintenance_ending_hour": 2, + "line_to_maintenance": ["1_2_2", "1_4_4", "9_10_12", "12_13_14"], + "daily_proba_per_month_maintenance": [0.7 for _ in range(12)], + "max_daily_number_per_month_maintenance": [1 for _ in range(12)], + "maintenance_day_of_week": list(range(7)) + } + self.tmp_files = [os.path.join(env.get_path_env(), + "chronics", "0000", "maintenance_meta.json"), + os.path.join(env.get_path_env(), + "chronics", "0001", "maintenance_meta.json"), + os.path.join(env.get_path_env(), + "chronics", "0000", "maintenance_meta.json"), + ] + for path in self.tmp_files: + with open(path, "w", encoding="utf-8") as f: + json.dump(fp=f, obj=dict_maint) + env.close() + # create the env with the maintenance + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env_bug = grid2op.make(self.env_name, + chronics_class=MultifolderWithCache, + data_feeding_kwargs={"gridvalueClass": GridStateFromFileWithForecastsWithMaintenance}, + test=True + ) + self.env_bug.chronics_handler.reset() + + # store the normal maintenance schedule: + self.maint_ref = (np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, + 298, 299]) + 12, + np.array([4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2])) + + def tearDown(self) -> None: + self.env_bug.close() + for el in self.tmp_files: + if os.path.exists(el): + os.remove(el) + return super().tearDown() + + def test_reset(self): + """test that the seed is used correctly in env.reset""" + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_ref = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + + obs = self.env_bug.reset(seed=1, options={"time serie id": 0}) + maint_1 = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_0 = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + + assert (maint_ref == maint_0).all() + assert (maint_ref != maint_1).any() + assert (maint_ref.nonzero()[0] == self.maint_ref[0]).all() + assert (maint_ref.nonzero()[1] == self.maint_ref[1]).all() + + def test_runner(self): + """test the runner behaves correctly""" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + runner = Runner(**self.env_bug.get_params_for_runner()) + res = runner.run(nb_episode=3, + env_seeds=[0, 1, 0], + max_iter=5, + add_detailed_output=True) + + maint_ref = np.array([ -1, -1, 300, -1, 12, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], + dtype=np.int32) + assert (res[0][-1].observations[0].time_next_maintenance == maint_ref).all() + assert (res[0][-1].observations[0].time_next_maintenance != res[1][-1].observations[0].time_next_maintenance).any() + assert (res[0][-1].observations[0].time_next_maintenance == res[2][-1].observations[0].time_next_maintenance).all() + + def test_chronics_handler_twice_reset(self): + """test the same results is obtained if the chronics handler is reset twice""" + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_ref = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + assert (maint_ref.nonzero()[0] == self.maint_ref[0]).all() + assert (maint_ref.nonzero()[1] == self.maint_ref[1]).all() + + self.env_bug.chronics_handler.reset() + maint_ref = 1. * self.env_bug.chronics_handler.real_data.data.maintenance + assert (maint_ref.nonzero()[0] == self.maint_ref[0]).all() + assert (maint_ref.nonzero()[1] == self.maint_ref[1]).all() + + +class Issue616WithHandlerTester(unittest.TestCase): + def setUp(self): + self.env_name = "l2rpn_case14_sandbox" + hs_ = [5*(i+1) for i in range(12)] + + # create first env + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(self.env_name, + test=True) + + # hack for adding maintenance + dict_maint = { + "maintenance_starting_hour": 1, + "maintenance_ending_hour": 2, + "line_to_maintenance": ["1_2_2", "1_4_4", "9_10_12", "12_13_14"], + "daily_proba_per_month_maintenance": [0.7 for _ in range(12)], + "max_daily_number_per_month_maintenance": [1 for _ in range(12)], + "maintenance_day_of_week": list(range(7)) + } + self.tmp_json = tempfile.NamedTemporaryFile(dir=os.path.join(env.get_path_env(), "chronics", "0000"), + prefix="maintenance_meta", + suffix=".json") + with open(self.tmp_json.name, "w", encoding="utf-8") as f: + json.dump(fp=f, obj=dict_maint) + + # uses the default noise: sqrt(horizon) * 0.01 : error of 8% 1h ahead + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env_bug = grid2op.make(self.env_name, + chronics_class=MultifolderWithCache, + data_feeding_kwargs={"gridvalueClass": FromHandlers, + "gen_p_handler": CSVHandler("prod_p"), + "load_p_handler": CSVHandler("load_p"), + "gen_v_handler": CSVHandler("prod_v"), + "load_q_handler": LoadQFromPHandler("load_q"), + "h_forecast": hs_, + "maintenance_handler": JSONMaintenanceHandler(json_file_name=self.tmp_json.name), + "gen_p_for_handler": NoisyForecastHandler("prod_p_forecasted"), + "load_p_for_handler": NoisyForecastHandler("load_p_forecasted"), + "load_q_for_handler": NoisyForecastHandler("load_q_forecasted"), + }, + test=True + ) + self.env_bug.chronics_handler.reset() + + # store the normal maintenance schedule: + self.maint_ref = (np.array([ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, + 310, 311]), + np.array([12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14])) + + self.load_p_ref = np.array([[22. , 87. , 45.79999924, 7. , 12. , + 28.20000076, 8.69999981, 3.5 , 5.5 , 12.69999981, + 14.80000019], + [22.44357109, 90.38361359, 46.61357117, 7.00726891, 12.49121857, + 28.84151268, 8.93680668, 3.45285726, 5.58550406, 13.10054588, + 15.43630219], + [22.48419762, 89.22782135, 45.57607269, 6.98833132, 12.35618019, + 28.45972633, 9.01393414, 3.44352579, 5.57040882, 12.96386147, + 15.2933054 ], + [21.85004234, 86.51035309, 44.29330063, 6.82195902, 11.86427689, + 28.2765255 , 8.79933834, 3.36154509, 5.33892441, 12.65522861, + 14.92921543], + [21.61282349, 86.64777374, 44.50276947, 6.68032742, 11.88705349, + 27.90019035, 8.84160995, 3.34016371, 5.30496597, 12.57473373, + 14.63777542], + [23.22621727, 92.27429962, 47.29320145, 7.25162458, 12.71661758, + 30.16255379, 9.24844837, 3.57326436, 5.57008839, 13.34719276, + 15.97459316], + [20.23793983, 81.04374695, 42.03972244, 6.25536346, 10.85489559, + 26.03334999, 8.0951767 , 3.12768173, 5.05948496, 11.49882984, + 13.89058685], + [19.92967606, 81.96430206, 41.73068237, 6.54965878, 11.13441944, + 26.10506821, 8.04672432, 3.08769631, 4.95902777, 11.50868607, + 13.94141674], + [20.64870644, 83.94567871, 42.16581726, 6.56127167, 11.38573551, + 27.0170002 , 8.39456749, 3.1841464 , 5.21042156, 11.96467113, + 14.37690353], + [19.72007751, 79.25064087, 40.82889175, 6.11044645, 10.83215523, + 25.83052444, 7.77693176, 3.05522323, 4.814291 , 11.5728159 , + 13.9799614 ], + [21.79347801, 87.17391205, 42.77978897, 6.76001358, 11.70390511, + 28.14990807, 8.67703247, 3.32955885, 5.24657774, 12.30927849, + 14.83167171], + [19.81615639, 78.61643982, 40.09531021, 6.11152506, 10.64886951, + 25.27948952, 7.87090397, 2.96316385, 4.72254229, 11.20446301, + 13.88982964], + [19.3391819 , 77.26506805, 39.22829056, 6.04922247, 10.44865608, + 24.83847427, 7.8823204 , 2.93295646, 4.76605368, 11.18189621, + 13.19830322]]) + + self.load_q_ref = np.array([15.4 , 60.899998 , 32.059998 , 4.9 , 8.4 , + 19.74 , 6.0899997, 2.45 , 3.85 , 8.889999 , + 10.36 ], dtype=np.float32) + + def tearDown(self) -> None: + self.env_bug.close() + self.tmp_json.close() + return super().tearDown() + + def test_reset(self): + """test that the seed is used correctly in env.reset""" + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_ref = 1. * self.env_bug.chronics_handler.real_data.data.maintenance_handler.maintenance + load_q_ref = 1. * obs.load_q + load_p_ref = 1. * obs.get_forecast_arrays()[0] + + obs = self.env_bug.reset(seed=1, options={"time serie id": 0}) + maint_1 = 1. * self.env_bug.chronics_handler.real_data.data.maintenance_handler.maintenance + load_q_1 = 1. * obs.load_q + load_p_1= 1. * obs.get_forecast_arrays()[0] + + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_0 = 1. * self.env_bug.chronics_handler.real_data.data.maintenance_handler.maintenance + load_q_0 = 1. * obs.load_q + load_p_0 = 1. * obs.get_forecast_arrays()[0] + + # maintenance, so JSONMaintenanceHandler + assert (maint_ref == maint_0).all() + assert (maint_ref != maint_1).any() + assert (maint_ref.nonzero()[0] == self.maint_ref[0]).all() + assert (maint_ref.nonzero()[1] == self.maint_ref[1]).all() + + # load_q, so LoadQFromPHandler + assert (load_q_ref == load_q_0).all() + # assert (load_q_ref != load_q_1).any() # it's normal it works as this is not random ! + assert (load_q_ref == self.load_q_ref).all() + + # load_p_forecasted, so NoisyForecastHandler + assert (load_p_ref == load_p_0).all() + assert (load_p_ref != load_p_1).any() + assert (np.abs(load_p_ref - self.load_p_ref) <= 1e-6).all() + + def test_runner(self): + """test the runner behaves correctly""" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + runner = Runner(**self.env_bug.get_params_for_runner()) + res = runner.run(nb_episode=3, + env_seeds=[0, 1, 0], + max_iter=5, + add_detailed_output=True) + obs = res[0][-1].observations[0] + maint_ref = 1. * obs.time_next_maintenance + load_q_ref = 1. * obs.load_q + # load_p_ref = 1. * obs.get_forecast_arrays()[0] not present in episodeData + + obs = res[1][-1].observations[0] + maint_1 = 1. * obs.time_next_maintenance + load_q_1 = 1. * obs.load_q + # load_p_1 = 1. * obs.get_forecast_arrays()[0] not present in episodeData + + obs = res[2][-1].observations[0] + maint_0 = 1. * obs.time_next_maintenance + load_q_0 = 1. * obs.load_q + # load_p_0 = 1. * obs.get_forecast_arrays()[0] not present in episodeData + + # maintenance, so JSONMaintenanceHandler + assert (maint_ref == maint_0).all() + assert (maint_ref != maint_1).any() + # TODO test against a reference data stored in the file + + # load_q, so LoadQFromPHandler + assert (load_q_ref == load_q_0).all() + # assert (load_q_ref != load_q_1).any() # it's normal it works as this is not random ! + assert (load_q_ref == self.load_q_ref).all() + + # load_p_forecasted, so NoisyForecastHandler + # assert (load_p_ref == load_p_0).all() + # assert (load_p_ref != load_p_1).any() + # TODO test that with an agent + + def test_chronics_handler_twice_reset(self): + """test the same results is obtained if the chronics handler is reset twice""" + obs = self.env_bug.reset(seed=0, options={"time serie id": 0}) + maint_ref = 1. * obs.time_next_maintenance + load_q_ref = 1. * obs.load_q + load_p_ref = 1. * obs.get_forecast_arrays()[0] + + self.env_bug.chronics_handler.reset() + maint_1 = 1. * obs.time_next_maintenance + load_q_1 = 1. * obs.load_q + load_p_1 = 1. * obs.get_forecast_arrays()[0] + + assert (np.abs(maint_ref - maint_1) <= 1e-6).all() + assert (np.abs(load_q_ref - load_q_1) <= 1e-6).all() + assert (np.abs(load_p_ref - load_p_1) <= 1e-6).all() + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_issue_617.py b/grid2op/tests/test_issue_617.py new file mode 100644 index 000000000..e9072a688 --- /dev/null +++ b/grid2op/tests/test_issue_617.py @@ -0,0 +1,102 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt and https://github.com/rte-france/Grid2Op/pull/319 +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import unittest +import pandapower as pp +import tempfile +import os +from pathlib import Path +import warnings +import copy +import numpy as np + + +from helper_path_test import PATH_DATA_TEST +import grid2op +from grid2op.Backend.pandaPowerBackend import PandaPowerBackend +from grid2op.Action.playableAction import PlayableAction +from grid2op.Observation.completeObservation import CompleteObservation +from grid2op.Reward.flatReward import FlatReward +from grid2op.Rules.DefaultRules import DefaultRules +from grid2op.Chronics.multiFolder import Multifolder +from grid2op.Chronics.gridStateFromFileWithForecasts import GridStateFromFileWithForecasts +from grid2op.Chronics import ChangeNothing + + +class Issue617Tester(unittest.TestCase): + def setUp(self): + self.env_name = "l2rpn_case14_sandbox" + # create first env + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + root_path = Path(os.path.abspath(PATH_DATA_TEST)) + self.env_path = tempfile.TemporaryDirectory(dir=root_path) + self.tol = 1e-6 + + def tearDown(self) -> None: + self.env_path.cleanup() + return super().tearDown() + + def create_config(self, env_path:Path, network, **kwargs): + thermal_limits = [10_000. * el for el in network.line.max_i_ka] # Thermal Limit in Amps (A) + with open(Path(env_path.name) / "config.py", "w") as config: + # Import Statements + config.writelines( + [f"from {value.__module__} import {value.__name__}\n" for value in kwargs.values() if hasattr(value, "__module__")] + ) + + # Config Dictionary + config.writelines( + ["config = {\n"] + + [f"'{k}':{getattr(v,'__name__', 'None')},\n" for k,v in kwargs.items()] + + [f"'thermal_limits':{thermal_limits}\n"] + + ["}\n"] + ) + return thermal_limits + + def create_pp_net(self): + network = pp.create_empty_network() + pp.create_buses(network, nr_buses=2, vn_kv=20.0) + pp.create_gen(network, bus=0, p_mw=10.0, min_p_mw=-1e9, max_p_mw=1e9, slack=True, slack_weight=1.0) + pp.create_line(network, from_bus=0, to_bus=1, length_km=10.0, std_type="NAYY 4x50 SE") + pp.create_load(network, bus=1, p_mw=10.0, controllable=False) + pp.to_json(network, Path(self.env_path.name) / "grid.json") + return network + + def test_can_make_env(self): + network = self.create_pp_net() + thermal_limits = self.create_config(self.env_path, + network, + backend=PandaPowerBackend, + action=PlayableAction, + observation_class=CompleteObservation, + reward_class=FlatReward, + gamerules_class=DefaultRules, + chronics_class=Multifolder, + grid_value_class=GridStateFromFileWithForecasts, + voltagecontroler_class=None, + names_chronics_to_grid=None) + + pp.runpp(network, numba=True, lightsim2grid=False, max_iteration=10, distributed_slack=False, init="dc", check_connectivity=False) + assert network.converged + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make(self.env_path.name, chronics_class=ChangeNothing) + assert (np.abs(env.get_thermal_limit() - thermal_limits) <= 1e-6).all() + obs = env.reset() + assert (np.abs(obs.p_or - network.res_line["p_from_mw"]) <= self.tol).all() + assert (np.abs(obs.q_or - network.res_line["q_from_mvar"]) <= self.tol).all() + assert (np.abs(obs.a_or - 1000. * network.res_line["i_from_ka"]) <= self.tol).all() + obs, reward, done, info = env.step(env.action_space()) + assert (np.abs(obs.p_or - network.res_line["p_from_mw"]) <= self.tol).all() + assert (np.abs(obs.q_or - network.res_line["q_from_mvar"]) <= self.tol).all() + assert (np.abs(obs.a_or - 1000. * network.res_line["i_from_ka"]) <= self.tol).all() + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_multi_steps_forecasts.py b/grid2op/tests/test_multi_steps_forecasts.py index 2608f3cb0..0dc7ac685 100644 --- a/grid2op/tests/test_multi_steps_forecasts.py +++ b/grid2op/tests/test_multi_steps_forecasts.py @@ -80,7 +80,7 @@ def test_chunk_size(self): def test_max_iter(self): max_iter = 4 - self.env.chronics_handler.set_max_iter(max_iter) + self.env.set_max_iter(max_iter) obs = self.env.reset() self.aux_test_for_consistent(obs) diff --git a/grid2op/tests/test_multidiscrete_act_space.py b/grid2op/tests/test_multidiscrete_act_space.py new file mode 100644 index 000000000..760c7ac91 --- /dev/null +++ b/grid2op/tests/test_multidiscrete_act_space.py @@ -0,0 +1,163 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +import unittest +import warnings +import numpy as np + +import grid2op +from grid2op.Backend import PandaPowerBackend +from grid2op.Action import CompleteAction +from grid2op.gym_compat import MultiDiscreteActSpace, GymEnv + + +class TestMultiDiscreteActSpaceOneLineChangeSet(unittest.TestCase): + def get_env_nm(self): + return "educ_case14_storage" + + def get_reset_kwargs(self) -> dict: + # seed has been tuned for the tests to pass + return dict(seed=self.seed, options={"time serie id": 0}) + + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.get_env_nm(), + backend=PandaPowerBackend(), + action_class=CompleteAction, + test=True, + _add_to_name=type(self).__name__) + self.seed = 0 + self.gym_env = GymEnv(self.env) + + def tearDown(self) -> None: + self.env.close() + self.gym_env.close() + return super().tearDown() + + def test_kwargs_ok(self): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + act_space = MultiDiscreteActSpace(self.env.action_space, attr_to_keep=["one_line_set"]) + assert act_space.nvec[0] == 1 + 2 * type(self.env).n_line + with warnings.catch_warnings(): + warnings.filterwarnings("error") + act_space = MultiDiscreteActSpace(self.env.action_space, attr_to_keep=["one_line_change"]) + assert act_space.nvec[0] == 1 + type(self.env).n_line + + def _aux_assert_flags(self, glop_act): + assert not glop_act._modif_alarm + assert not glop_act._modif_alert + assert not glop_act._modif_curtailment + assert not glop_act._modif_storage + assert not glop_act._modif_redispatch + assert not glop_act._modif_set_bus + assert not glop_act._modif_change_bus + + def test_action_ok_set(self): + act_space = MultiDiscreteActSpace(self.env.action_space, attr_to_keep=["one_line_set"]) + act_space.seed(self.seed) + for _ in range(10): + act = act_space.sample() + glop_act = act_space.from_gym(act) + self._aux_assert_flags(glop_act) + assert not glop_act._modif_change_status + lines_, subs_ = glop_act.get_topological_impact() + assert (~subs_).all() + if act[0] >= 1: # 0 is for do nothing + # 1 is connect line 0, 2 is disconnect line 0 + # 3 is connect line 1, etc. + assert glop_act._modif_set_status + assert lines_[(act[0]- 1) // 2 ] + else: + assert not glop_act._modif_set_status + assert (~lines_).all() + + glop_act = act_space.from_gym(np.array([0])) + lines_, subs_ = glop_act.get_topological_impact() + assert (~subs_).all() + assert (~lines_).all() + self._aux_assert_flags(glop_act) + assert not glop_act._modif_change_status + assert not glop_act._modif_set_status + + for i in range(1, 2 * type(self.env).n_line + 1): + glop_act = act_space.from_gym(np.array([i])) + lines_, subs_ = glop_act.get_topological_impact() + assert (~subs_).all() + self._aux_assert_flags(glop_act) + assert not glop_act._modif_change_status + assert glop_act._modif_set_status + l_id = (i- 1) // 2 + assert lines_[l_id] + assert glop_act._set_line_status[l_id] == ((i-1) % 2 == 0) * 2 - 1, f"error for {i}" + + def test_action_ok_change(self): + act_space = MultiDiscreteActSpace(self.env.action_space, attr_to_keep=["one_line_change"]) + act_space.seed(self.seed) + for _ in range(10): + act = act_space.sample() + glop_act = act_space.from_gym(act) + self._aux_assert_flags(glop_act) + assert not glop_act._modif_set_status + lines_, subs_ = glop_act.get_topological_impact() + assert (~subs_).all() + if act[0] >= 1: # 0 is for do nothing + assert glop_act._modif_change_status + assert lines_[(act[0]- 1)] + else: + assert (~lines_).all() + assert not glop_act._modif_change_status + + glop_act = act_space.from_gym(np.array([0])) + lines_, subs_ = glop_act.get_topological_impact() + assert (~subs_).all() + assert (~lines_).all() + self._aux_assert_flags(glop_act) + assert not glop_act._modif_change_status + assert not glop_act._modif_set_status + + for i in range(1, type(self.env).n_line + 1): + glop_act = act_space.from_gym(np.array([i])) + lines_, subs_ = glop_act.get_topological_impact() + assert (~subs_).all() + self._aux_assert_flags(glop_act) + assert glop_act._modif_change_status + assert not glop_act._modif_set_status + l_id = (i- 1) + assert lines_[l_id] + assert glop_act._switch_line_status[l_id], f"error for {i}" + + def test_can_combine_topo_line_set(self): + act_space = MultiDiscreteActSpace(self.env.action_space, + attr_to_keep=["one_line_set", "one_sub_set"]) + act_space.seed(self.seed) + for _ in range(10): + act = act_space.sample() + glop_act = act_space.from_gym(act) + lines_, subs_ = glop_act.get_topological_impact() + if act[0]: + assert lines_.sum() == 1 + if act[1]: + assert subs_.sum() == 1 + + def test_can_combine_topo_line_change(self): + act_space = MultiDiscreteActSpace(self.env.action_space, + attr_to_keep=["one_line_change", "one_sub_change"]) + act_space.seed(self.seed) + for _ in range(10): + act = act_space.sample() + glop_act = act_space.from_gym(act) + lines_, subs_ = glop_act.get_topological_impact() + if act[0]: + assert lines_.sum() == 1 + if act[1]: + assert subs_.sum() == 1 + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_n_busbar_per_sub.py b/grid2op/tests/test_n_busbar_per_sub.py new file mode 100644 index 000000000..cf94ad33a --- /dev/null +++ b/grid2op/tests/test_n_busbar_per_sub.py @@ -0,0 +1,2015 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import warnings +import unittest +from grid2op.tests.helper_path_test import * + +import grid2op +from grid2op.Agent import BaseAgent +from grid2op.Environment import MaskedEnvironment, TimedOutEnvironment +from grid2op.Runner import Runner +from grid2op.Backend import PandaPowerBackend +from grid2op.Space import DEFAULT_N_BUSBAR_PER_SUB +from grid2op.Action import ActionSpace, BaseAction, CompleteAction +from grid2op.Observation import BaseObservation +from grid2op.Exceptions import Grid2OpException, EnvError, AmbiguousAction +from grid2op.gym_compat import GymEnv, DiscreteActSpace, BoxGymActSpace, BoxGymObsSpace, MultiDiscreteActSpace +import pdb + + +# test on a big computer only with lots of RAM, and lots of time available... +HAS_TIME_AND_MEMORY = False + + +class _AuxFakeBackendSupport(PandaPowerBackend): + def cannot_handle_more_than_2_busbar(self): + """dont do it at home !""" + return self.can_handle_more_than_2_busbar() + + +class _AuxFakeBackendNoSupport(PandaPowerBackend): + def can_handle_more_than_2_busbar(self): + """dont do it at home !""" + return self.cannot_handle_more_than_2_busbar() + + +class _AuxFakeBackendNoCalled(PandaPowerBackend): + def can_handle_more_than_2_busbar(self): + """dont do it at home !""" + pass + def cannot_handle_more_than_2_busbar(self): + """dont do it at home !""" + pass + + +class TestRightNumberNbBus(unittest.TestCase): + """This test that, when changing n_busbar in make it is + back propagated where it needs in the class attribute (this includes + testing that the observation_space, action_space, runner, environment etc. + are all 'informed' about this feature) + + This class also tests than when the implementation of the backend does not + use the new `can_handle_more_than_2_busbar` or `cannot_handle_more_than_2_busbar` + then the legacy behaviour is used (only 2 busbar per substation even if the + user asked for a different number) + """ + def _aux_fun_test(self, env, n_busbar): + assert type(env).n_busbar_per_sub == n_busbar, f"type(env).n_busbar_per_sub = {type(env).n_busbar_per_sub} != {n_busbar}" + assert type(env.backend).n_busbar_per_sub == n_busbar, f"env.backend).n_busbar_per_sub = {type(env.backend).n_busbar_per_sub} != {n_busbar}" + assert type(env.action_space).n_busbar_per_sub == n_busbar, f"type(env.action_space).n_busbar_per_sub = {type(env.action_space).n_busbar_per_sub} != {n_busbar}" + assert type(env.observation_space).n_busbar_per_sub == n_busbar, f"type(env.observation_space).n_busbar_per_sub = {type(env.observation_space).n_busbar_per_sub} != {n_busbar}" + obs = env.reset(seed=0, options={"time serie id": 0}) + assert type(obs).n_busbar_per_sub == n_busbar, f"type(obs).n_busbar_per_sub = {type(obs).n_busbar_per_sub} != {n_busbar}" + act = env.action_space() + assert type(act).n_busbar_per_sub == n_busbar, f"type(act).n_busbar_per_sub = {type(act).n_busbar_per_sub} != {n_busbar}" + + def test_fail_if_not_int(self): + with self.assertRaises(Grid2OpException): + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar="froiy", _add_to_name=type(self).__name__+"_wrong_str") + with self.assertRaises(Grid2OpException): + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=3.5, _add_to_name=type(self).__name__+"_wrong_float") + + def test_regular_env(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, _add_to_name=type(self).__name__+"_2") + self._aux_fun_test(env, DEFAULT_N_BUSBAR_PER_SUB) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=3, _add_to_name=type(self).__name__+"_3") + self._aux_fun_test(env, 3) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=1, _add_to_name=type(self).__name__+"_3") + self._aux_fun_test(env, 1) + + def test_multimix_env(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_neurips_2020_track2", backend=_AuxFakeBackendSupport(), test=True, _add_to_name=type(self).__name__+"_2") + self._aux_fun_test(env, DEFAULT_N_BUSBAR_PER_SUB) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_neurips_2020_track2", backend=_AuxFakeBackendSupport(), test=True, n_busbar=3, _add_to_name=type(self).__name__+"_3") + self._aux_fun_test(env, 3) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_neurips_2020_track2", backend=_AuxFakeBackendSupport(), test=True, n_busbar=1, _add_to_name=type(self).__name__+"_3") + self._aux_fun_test(env, 1) + + def test_masked_env(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = MaskedEnvironment(grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, _add_to_name=type(self).__name__+"_mask_2"), + lines_of_interest=np.ones(shape=20, dtype=bool)) + self._aux_fun_test(env, DEFAULT_N_BUSBAR_PER_SUB) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = MaskedEnvironment(grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=3, _add_to_name=type(self).__name__+"_mask_3"), + lines_of_interest=np.ones(shape=20, dtype=bool)) + self._aux_fun_test(env, 3) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = MaskedEnvironment(grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=1, _add_to_name=type(self).__name__+"_mask_1"), + lines_of_interest=np.ones(shape=20, dtype=bool)) + self._aux_fun_test(env, 1) + + def test_to_env(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = TimedOutEnvironment(grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, _add_to_name=type(self).__name__+"_to_2"), + time_out_ms=3000) + self._aux_fun_test(env, DEFAULT_N_BUSBAR_PER_SUB) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = TimedOutEnvironment(grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=3, _add_to_name=type(self).__name__+"_to_3"), + time_out_ms=3000) + self._aux_fun_test(env, 3) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = TimedOutEnvironment(grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=1, _add_to_name=type(self).__name__+"_to_1"), + time_out_ms=3000) + self._aux_fun_test(env, 1) + + def test_xxxhandle_more_than_2_busbar_not_called(self): + """when using a backend that did not called the `can_handle_more_than_2_busbar_not_called` + nor the `cannot_handle_more_than_2_busbar_not_called` then it's equivalent + to not support this new feature.""" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendNoCalled(), test=True, _add_to_name=type(self).__name__+"_nocall_2") + self._aux_fun_test(env, DEFAULT_N_BUSBAR_PER_SUB) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendNoCalled(), test=True, n_busbar=3, _add_to_name=type(self).__name__+"_nocall_3") + self._aux_fun_test(env, DEFAULT_N_BUSBAR_PER_SUB) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendNoCalled(), test=True, n_busbar=1, _add_to_name=type(self).__name__+"_nocall_1") + self._aux_fun_test(env, DEFAULT_N_BUSBAR_PER_SUB) + + def test_cannot_handle_more_than_2_busbar_not_called(self): + """when using a backend that called `cannot_handle_more_than_2_busbar_not_called` then it's equivalent + to not support this new feature.""" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendNoSupport(), test=True, _add_to_name=type(self).__name__+"_dontcalled_2") + self._aux_fun_test(env, DEFAULT_N_BUSBAR_PER_SUB) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendNoSupport(), test=True, n_busbar=3, _add_to_name=type(self).__name__+"_dontcalled_3") + self._aux_fun_test(env, DEFAULT_N_BUSBAR_PER_SUB) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendNoSupport(), test=True, n_busbar=1, _add_to_name=type(self).__name__+"_dontcalled_1") + self._aux_fun_test(env, DEFAULT_N_BUSBAR_PER_SUB) + + def test_env_copy(self): + """test env copy does work correctly""" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, _add_to_name=type(self).__name__+"_copy_2") + self._aux_fun_test(env, DEFAULT_N_BUSBAR_PER_SUB) + env_cpy = env.copy() + self._aux_fun_test(env_cpy, DEFAULT_N_BUSBAR_PER_SUB) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=3, _add_to_name=type(self).__name__+"_copy_3") + self._aux_fun_test(env, 3) + env_cpy = env.copy() + self._aux_fun_test(env_cpy, 3) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=1, _add_to_name=type(self).__name__+"_copy_1") + self._aux_fun_test(env, 1) + env_cpy = env.copy() + self._aux_fun_test(env_cpy, 1) + + def test_two_env_same_name(self): + """test i can load 2 env with the same name but different n_busbar""" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_2 = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, _add_to_name=type(self).__name__+"_same_name") + self._aux_fun_test(env_2, DEFAULT_N_BUSBAR_PER_SUB) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_3 = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=3, _add_to_name=type(self).__name__+"_same_name") + self._aux_fun_test(env_3, 3) # check env_3 has indeed 3 buses + self._aux_fun_test(env_2, DEFAULT_N_BUSBAR_PER_SUB) # check env_2 is not modified + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_1 = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=1, _add_to_name=type(self).__name__+"_same_name") + self._aux_fun_test(env_1, 1) # check env_1 has indeed 3 buses + self._aux_fun_test(env_3, 3) # check env_3 is not modified + self._aux_fun_test(env_2, DEFAULT_N_BUSBAR_PER_SUB) # check env_2 is not modified + + +class _TestAgentRightNbBus(BaseAgent): + def __init__(self, action_space: ActionSpace, nb_bus : int): + super().__init__(action_space) + self.nb_bus = nb_bus + assert type(self.action_space).n_busbar_per_sub == self.nb_bus + + def act(self, observation: BaseObservation, reward: float, done: bool = False) -> BaseAction: + assert type(observation).n_busbar_per_sub == self.nb_bus + return self.action_space() + + +class TestRunnerNbBus(unittest.TestCase): + """Testthe runner is compatible with the feature""" + def test_single_process(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + # 3 busbars as asked + env_3 = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=3, _add_to_name=type(self).__name__+"_3") + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + # 2 busbars only because backend does not support it + env_2 = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendNoSupport(), test=True, n_busbar=3, _add_to_name=type(self).__name__+"_2") + + agent_3 = _TestAgentRightNbBus(env_3.action_space, 3) + agent_2 = _TestAgentRightNbBus(env_2.action_space, 2) + + runner_3 = Runner(**env_3.get_params_for_runner(), agentClass=None, agentInstance=agent_3) + res = runner_3.run(nb_episode=1, max_iter=5) + + runner_2 = Runner(**env_2.get_params_for_runner(), agentClass=None, agentInstance=agent_2) + res = runner_2.run(nb_episode=1, max_iter=5) + + with self.assertRaises(AssertionError): + runner_3_ko = Runner(**env_3.get_params_for_runner(), agentClass=None, agentInstance=agent_2) + res = runner_3_ko.run(nb_episode=1, max_iter=5) + + with self.assertRaises(AssertionError): + runner_2_ko = Runner(**env_2.get_params_for_runner(), agentClass=None, agentInstance=agent_3) + res = runner_2_ko.run(nb_episode=1, max_iter=5) + + def test_two_env_same_name(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_2 = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, _add_to_name=type(self).__name__+"_same_name") + + agent_2 = _TestAgentRightNbBus(env_2.action_space, 2) + runner_2 = Runner(**env_2.get_params_for_runner(), agentClass=None, agentInstance=agent_2) + res = runner_2.run(nb_episode=1, max_iter=5) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env_3 = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=3, _add_to_name=type(self).__name__+"_same_name") + agent_3 = _TestAgentRightNbBus(env_3.action_space, 3) + runner_3 = Runner(**env_3.get_params_for_runner(), agentClass=None, agentInstance=agent_3) + res = runner_3.run(nb_episode=1, max_iter=5) + + with self.assertRaises(AssertionError): + runner_3_ko = Runner(**env_3.get_params_for_runner(), agentClass=None, agentInstance=agent_2) + res = runner_3_ko.run(nb_episode=1, max_iter=5) + + with self.assertRaises(AssertionError): + runner_2_ko = Runner(**env_2.get_params_for_runner(), agentClass=None, agentInstance=agent_3) + res = runner_2_ko.run(nb_episode=1, max_iter=5) + + def test_two_process(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + # 3 busbars as asked + env_3 = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendSupport(), test=True, n_busbar=3, _add_to_name=type(self).__name__+"_3_twocores") + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + # 2 busbars only because backend does not support it + env_2 = grid2op.make("l2rpn_case14_sandbox", backend=_AuxFakeBackendNoSupport(), test=True, n_busbar=3, _add_to_name=type(self).__name__+"_2_twocores") + + agent_3 = _TestAgentRightNbBus(env_3.action_space, 3) + agent_2 = _TestAgentRightNbBus(env_2.action_space, 2) + + runner_3 = Runner(**env_3.get_params_for_runner(), agentClass=None, agentInstance=agent_3) + res = runner_3.run(nb_episode=2, nb_process=2, max_iter=5) + + runner_2 = Runner(**env_2.get_params_for_runner(), agentClass=None, agentInstance=agent_2) + res = runner_2.run(nb_episode=2, nb_process=2, max_iter=5) + + # with self.assertRaises(multiprocessing.pool.RemoteTraceback): + with self.assertRaises(AssertionError): + runner_3_ko = Runner(**env_3.get_params_for_runner(), agentClass=None, agentInstance=agent_2) + res = runner_3_ko.run(nb_episode=2, nb_process=2, max_iter=5) + + +class TestGridObjtNbBus(unittest.TestCase): + """Test that the GridObj class is fully compatible with this feature""" + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make("l2rpn_case14_sandbox", + backend=_AuxFakeBackendSupport(), + test=True, + n_busbar=3, + _add_to_name=type(self).__name__) + return super().setUp() + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def test_global_bus_to_local_int(self): + """test the function :func:`grid2op.Space.GridObjects.global_bus_to_local_int` """ + cls_env = type(self.env) + # easy case: everything on bus 1 + res = cls_env.global_bus_to_local_int(cls_env.gen_to_subid[0], cls_env.gen_to_subid[0]) + assert res == 1 + + # bit less easy: one generator is disconnected + gen_off = 2 + res = cls_env.global_bus_to_local_int(-1, cls_env.gen_to_subid[gen_off]) + assert res == -1 + + # still a bit more complex: one gen on busbar 2 + gen_on_2 = 3 + res = cls_env.global_bus_to_local_int(cls_env.gen_to_subid[gen_on_2] + cls_env.n_sub, cls_env.gen_to_subid[gen_on_2]) + assert res == 2 + + # and now a generator on busbar 3 + gen_on_3 = 4 + res = cls_env.global_bus_to_local_int(cls_env.gen_to_subid[gen_on_3] + 2 * cls_env.n_sub, cls_env.gen_to_subid[gen_on_3]) + assert res == 3 + + with self.assertRaises(EnvError): + gen_on_4 = 4 + res = cls_env.global_bus_to_local_int(cls_env.gen_to_subid[gen_on_4] + 3 * cls_env.n_sub, cls_env.gen_to_subid[gen_on_4]) + + def test_global_bus_to_local(self): + """test the function :func:`grid2op.Space.GridObjects.global_bus_to_local` """ + cls_env = type(self.env) + # easy case: everything on bus 1 + res = cls_env.global_bus_to_local(cls_env.gen_to_subid, cls_env.gen_to_subid) + assert (res == np.ones(cls_env.n_gen, dtype=int)).all() + + # bit less easy: one generator is disconnected + gen_off = 2 + inp_vect = 1 * cls_env.gen_to_subid + inp_vect[gen_off] = -1 + res = cls_env.global_bus_to_local(inp_vect, cls_env.gen_to_subid) + vect = np.ones(cls_env.n_gen, dtype=int) + vect[gen_off] = -1 + assert (res == vect).all() + + # still a bit more complex: one gen on busbar 2 + gen_on_2 = 3 + inp_vect = 1 * cls_env.gen_to_subid + inp_vect[gen_on_2] = cls_env.gen_to_subid[gen_on_2] + cls_env.n_sub + res = cls_env.global_bus_to_local(inp_vect, cls_env.gen_to_subid) + vect = np.ones(cls_env.n_gen, dtype=int) + vect[gen_on_2] = 2 + assert (res == vect).all() + + # and now a generator on busbar 3 + gen_on_3 = 4 + inp_vect = 1 * cls_env.gen_to_subid + inp_vect[gen_on_3] = cls_env.gen_to_subid[gen_on_3] + 2 * cls_env.n_sub + res = cls_env.global_bus_to_local(inp_vect, cls_env.gen_to_subid) + vect = np.ones(cls_env.n_gen, dtype=int) + vect[gen_on_3] = 3 + assert (res == vect).all() + + # and now we mix all + inp_vect = 1 * cls_env.gen_to_subid + inp_vect[gen_off] = -1 + inp_vect[gen_on_2] = cls_env.gen_to_subid[gen_on_2] + cls_env.n_sub + inp_vect[gen_on_3] = cls_env.gen_to_subid[gen_on_3] + 2 * cls_env.n_sub + res = cls_env.global_bus_to_local(inp_vect, cls_env.gen_to_subid) + vect = np.ones(cls_env.n_gen, dtype=int) + vect[gen_off] = -1 + vect[gen_on_2] = 2 + vect[gen_on_3] = 3 + assert (res == vect).all() + + def test_local_bus_to_global_int(self): + """test the function :func:`grid2op.Space.GridObjects.local_bus_to_global_int` """ + cls_env = type(self.env) + # easy case: everything on bus 1 + res = cls_env.local_bus_to_global_int(1, cls_env.gen_to_subid[0]) + assert res == cls_env.gen_to_subid[0] + + # bit less easy: one generator is disconnected + gen_off = 2 + res = cls_env.local_bus_to_global_int(-1, cls_env.gen_to_subid[gen_off]) + assert res == -1 + + # still a bit more complex: one gen on busbar 2 + gen_on_2 = 3 + res = cls_env.local_bus_to_global_int(2, cls_env.gen_to_subid[gen_on_2]) + assert res == cls_env.gen_to_subid[gen_on_2] + cls_env.n_sub + + # and now a generator on busbar 3 + gen_on_3 = 4 + res = cls_env.local_bus_to_global_int(3, cls_env.gen_to_subid[gen_on_3]) + assert res == cls_env.gen_to_subid[gen_on_3] + 2 * cls_env.n_sub + + def test_local_bus_to_global(self): + """test the function :func:`grid2op.Space.GridObjects.local_bus_to_global` """ + cls_env = type(self.env) + # easy case: everything on bus 1 + res = cls_env.local_bus_to_global(np.ones(cls_env.n_gen, dtype=int), cls_env.gen_to_subid) + assert (res == cls_env.gen_to_subid).all() + + # bit less easy: one generator is disconnected + gen_off = 2 + vect = np.ones(cls_env.n_gen, dtype=int) + vect[gen_off] = -1 + res = cls_env.local_bus_to_global(vect, cls_env.gen_to_subid) + assert (res == cls_env.gen_to_subid).sum() == cls_env.n_gen - 1 + assert res[gen_off] == -1 + + # still a bit more complex: one gen on busbar 2 + gen_on_2 = 3 + vect = np.ones(cls_env.n_gen, dtype=int) + vect[gen_on_2] = 2 + res = cls_env.local_bus_to_global(vect, cls_env.gen_to_subid) + assert (res == cls_env.gen_to_subid).sum() == cls_env.n_gen - 1 + assert res[gen_on_2] == cls_env.gen_to_subid[gen_on_2] + cls_env.n_sub + + # and now a generator on busbar 3 + gen_on_3 = 4 + vect = np.ones(cls_env.n_gen, dtype=int) + vect[gen_on_3] = 3 + res = cls_env.local_bus_to_global(vect, cls_env.gen_to_subid) + assert (res == cls_env.gen_to_subid).sum() == cls_env.n_gen - 1 + assert res[gen_on_3] == cls_env.gen_to_subid[gen_on_3] + 2 * cls_env.n_sub + + # and now we mix all + vect = np.ones(cls_env.n_gen, dtype=int) + vect[gen_off] = -1 + vect[gen_on_2] = 2 + vect[gen_on_3] = 3 + res = cls_env.local_bus_to_global(vect, cls_env.gen_to_subid) + assert res[gen_off] == -1 + assert res[gen_on_2] == cls_env.gen_to_subid[gen_on_2] + cls_env.n_sub + assert res[gen_on_3] == cls_env.gen_to_subid[gen_on_3] + 2 * cls_env.n_sub + + +class TestAction_3busbars(unittest.TestCase): + """This class test the Agent can perform actions (and that actions are properly working) + even if there are 3 busbars per substation + """ + def get_nb_bus(self): + return 3 + + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make("educ_case14_storage", + backend=_AuxFakeBackendSupport(), + action_class=CompleteAction, + test=True, + n_busbar=self.get_nb_bus(), + _add_to_name=type(self).__name__ + f'_{self.get_nb_bus()}') + return super().setUp() + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def _aux_test_act_consistent_as_dict(self, act_as_dict, name_xxx, el_id, bus_val): + if name_xxx is not None: + # regular element in the topo_vect + assert "set_bus_vect" in act_as_dict + tmp = act_as_dict["set_bus_vect"] + assert len(tmp['modif_subs_id']) == 1 + sub_id = tmp['modif_subs_id'][0] + assert name_xxx[el_id] in tmp[sub_id] + assert tmp[sub_id][name_xxx[el_id]]["new_bus"] == bus_val + else: + # el not in topo vect (eg shunt) + assert "shunt" in act_as_dict + tmp = act_as_dict["shunt"]["shunt_bus"] + assert tmp[el_id] == bus_val + + def _aux_test_act_consistent_as_serializable_dict(self, act_as_dict, el_nms, el_id, bus_val): + if el_nms is not None: + # regular element + assert "set_bus" in act_as_dict + assert el_nms in act_as_dict["set_bus"] + tmp = act_as_dict["set_bus"][el_nms] + assert tmp == [(el_id, bus_val)] + else: + # shunts of other things not in the topo vect + assert "shunt" in act_as_dict + tmp = act_as_dict["shunt"]["shunt_bus"] + assert tmp == [(el_id, bus_val)] + + def _aux_test_action(self, act : BaseAction, name_xxx, el_id, bus_val, el_nms): + assert act.can_affect_something() + assert not act.is_ambiguous()[0] + tmp = f"{act}" # test the print does not crash + tmp = act.as_dict() # test I can convert to dict + self._aux_test_act_consistent_as_dict(tmp, name_xxx, el_id, bus_val) + tmp = act.as_serializable_dict() # test I can convert to another type of dict + self._aux_test_act_consistent_as_serializable_dict(tmp, el_nms, el_id, bus_val) + + def _aux_test_set_bus_onebus(self, nm_prop, el_id, bus_val, name_xxx, el_nms): + act = self.env.action_space() + setattr(act, nm_prop, [(el_id, bus_val)]) + self._aux_test_action(act, name_xxx, el_id, bus_val, el_nms) + + def test_set_load_bus(self): + self._aux_test_set_bus_onebus("load_set_bus", 0, -1, type(self.env).name_load, 'loads_id') + for bus in range(type(self.env).n_busbar_per_sub): + self._aux_test_set_bus_onebus("load_set_bus", 0, bus + 1, type(self.env).name_load, 'loads_id') + act = self.env.action_space() + with self.assertRaises(AmbiguousAction): + act.load_set_bus = [(0, type(self.env).n_busbar_per_sub + 1)] + + def test_set_gen_bus(self): + self._aux_test_set_bus_onebus("gen_set_bus", 0, -1, type(self.env).name_gen, 'generators_id') + for bus in range(type(self.env).n_busbar_per_sub): + self._aux_test_set_bus_onebus("gen_set_bus", 0, bus + 1, type(self.env).name_gen, 'generators_id') + act = self.env.action_space() + with self.assertRaises(AmbiguousAction): + act.gen_set_bus = [(0, type(self.env).n_busbar_per_sub + 1)] + + def test_set_storage_bus(self): + self._aux_test_set_bus_onebus("storage_set_bus", 0, -1, type(self.env).name_storage, 'storages_id') + for bus in range(type(self.env).n_busbar_per_sub): + self._aux_test_set_bus_onebus("storage_set_bus", 0, bus + 1, type(self.env).name_storage, 'storages_id') + act = self.env.action_space() + with self.assertRaises(AmbiguousAction): + act.storage_set_bus = [(0, type(self.env).n_busbar_per_sub + 1)] + + def test_set_lineor_bus(self): + self._aux_test_set_bus_onebus("line_or_set_bus", 0, -1, type(self.env).name_line, 'lines_or_id') + for bus in range(type(self.env).n_busbar_per_sub): + self._aux_test_set_bus_onebus("line_or_set_bus", 0, bus + 1, type(self.env).name_line, 'lines_or_id') + act = self.env.action_space() + with self.assertRaises(AmbiguousAction): + act.line_or_set_bus = [(0, type(self.env).n_busbar_per_sub + 1)] + + def test_set_lineex_bus(self): + self._aux_test_set_bus_onebus("line_ex_set_bus", 0, -1, type(self.env).name_line, 'lines_ex_id') + for bus in range(type(self.env).n_busbar_per_sub): + self._aux_test_set_bus_onebus("line_ex_set_bus", 0, bus + 1, type(self.env).name_line, 'lines_ex_id') + act = self.env.action_space() + with self.assertRaises(AmbiguousAction): + act.line_ex_set_bus = [(0, type(self.env).n_busbar_per_sub + 1)] + + def _aux_test_set_bus_onebus_sub_setbus(self, nm_prop, sub_id, el_id_sub, bus_val, name_xxx, el_nms): + # for now works only with lines_ex (in other words, the name_xxx and name_xxx should be + # provided by the user and it's probably not a good idea to use something + # else than type(self.env).name_line and lines_ex_id + act = self.env.action_space() + buses_val = np.zeros(type(self.env).sub_info[sub_id], dtype=int) + buses_val[el_id_sub] = bus_val + setattr(act, nm_prop, [(sub_id, buses_val)]) + el_id_in_topo_vect = np.where(act._set_topo_vect == bus_val)[0][0] + el_type = np.where(type(self.env).grid_objects_types[el_id_in_topo_vect][1:] != -1)[0][0] + el_id = type(self.env).grid_objects_types[el_id_in_topo_vect][el_type + 1] + self._aux_test_action(act, name_xxx, el_id, bus_val, el_nms) + + def test_sub_set_bus(self): + self._aux_test_set_bus_onebus_sub_setbus("sub_set_bus", 1, 0, -1, type(self.env).name_line, 'lines_ex_id') + for bus in range(type(self.env).n_busbar_per_sub): + self._aux_test_set_bus_onebus_sub_setbus("sub_set_bus", 1, 0, bus + 1, type(self.env).name_line, 'lines_ex_id') + act = self.env.action_space() + with self.assertRaises(AmbiguousAction): + act.line_ex_set_bus = [(0, type(self.env).n_busbar_per_sub + 1)] + + def test_change_deactivated(self): + assert "set_bus" in type(self.env.action_space()).authorized_keys + assert self.env.action_space.supports_type("set_bus") + + assert "change_bus" not in type(self.env.action_space()).authorized_keys + assert not self.env.action_space.supports_type("change_bus") + + def _aux_test_action_shunt(self, act : BaseAction, el_id, bus_val): + name_xxx = None + el_nms = None + # self._aux_test_action(act, type(self.env).name_shunt, el_id, bus_val, None) # does not work for a lot of reasons + assert not act.is_ambiguous()[0] + tmp = f"{act}" # test the print does not crash + tmp = act.as_dict() # test I can convert to dict + self._aux_test_act_consistent_as_dict(tmp, name_xxx, el_id, bus_val) + tmp = act.as_serializable_dict() # test I can convert to another type of dict + self._aux_test_act_consistent_as_serializable_dict(tmp, el_nms, el_id, bus_val) + + def test_shunt(self): + el_id = 0 + bus_val = -1 + act = self.env.action_space({"shunt": {"set_bus": [(el_id, bus_val)]}}) + self._aux_test_action_shunt(act, el_id, bus_val) + + for bus_val in range(type(self.env).n_busbar_per_sub): + act = self.env.action_space({"shunt": {"set_bus": [(el_id, bus_val + 1)]}}) + self._aux_test_action_shunt(act, el_id, bus_val + 1) + + act = self.env.action_space() + with self.assertRaises(AmbiguousAction): + act = self.env.action_space({"shunt": {"set_bus": [(el_id, type(self.env).n_busbar_per_sub + 1)]}}) + + +class TestAction_1busbar(TestAction_3busbars): + """This class test the Agent can perform actions (and that actions are properly working) + even if there is only 1 busbar per substation + """ + def get_nb_bus(self): + return 1 + + +class TestActionSpaceNbBus(unittest.TestCase): + """This function test the action space, basically the counting + of unique possible topologies per substation + """ + def get_nb_bus(self): + return 3 + + def get_env_nm(self): + return "educ_case14_storage" + + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.get_env_nm(), + backend=_AuxFakeBackendSupport(), + action_class=CompleteAction, + test=True, + n_busbar=self.get_nb_bus(), + _add_to_name=type(self).__name__ + f'_{self.get_nb_bus()}') + return super().setUp() + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def test_legacy_all_unitary_topologies_set_behaviour(self): + """make sure nothing broke for 2 busbars per substation even if the implementation changes""" + class SubMe(TestActionSpaceNbBus): + def get_nb_bus(self): + return 2 + + tmp = SubMe() + tmp.setUp() + res = tmp.env.action_space.get_all_unitary_topologies_set(tmp.env.action_space, _count_only=True) + res_noalone = tmp.env.action_space.get_all_unitary_topologies_set(tmp.env.action_space, + add_alone_line=False, + _count_only=True) + tmp.tearDown() + assert res == [3, 29, 5, 31, 15, 113, 4, 0, 15, 3, 3, 3, 7, 3], f"found: {res}" + assert res_noalone == [0, 25, 3, 26, 11, 109, 0, 0, 11, 0, 0, 0, 4, 0], f"found: {res_noalone}" + + class SubMe2(TestActionSpaceNbBus): + def get_nb_bus(self): + return 2 + def get_env_nm(self): + return "l2rpn_idf_2023" + tmp2 = SubMe2() + tmp2.setUp() + res = tmp2.env.action_space.get_all_unitary_topologies_set(tmp2.env.action_space, _count_only=True) + res_noalone = tmp2.env.action_space.get_all_unitary_topologies_set(tmp2.env.action_space, + add_alone_line=False, + _count_only=True) + tmp2.tearDown() + assert res == [3, 3, 7, 9, 16, 3, 3, 13, 2, 0, 57, 253, 3, 3, 241, 3, 63, 5, 29, 3, + 3, 3, 29, 7, 7, 3, 57, 3, 3, 8, 7, 31, 3, 29, 3, 3, 32, 4, 3, 29, 3, + 113, 3, 3, 13, 13, 7, 3, 65505, 3, 7, 3, 3, 125, 13, 497, 3, 3, 505, + 13, 15, 57, 2, 4, 15, 61, 3, 8, 63, 121, 4, 3, 0, 3, 31, 5, 1009, 3, + 3, 1017, 2, 7, 13, 3, 61, 3, 0, 3, 63, 25, 3, 253, 3, 31, 3, 61, 3, + 3, 3, 2033, 3, 3, 15, 13, 61, 7, 5, 3, 3, 15, 0, 0, 9, 3, 3, 0, 0, 3], f"found: {res}" + assert res_noalone == [0, 0, 4, 7, 11, 0, 0, 10, 0, 0, 53, 246, 0, 0, 236, 0, 57, 3, + 25, 0, 0, 0, 25, 4, 4, 0, 53, 0, 0, 4, 4, 26, 0, 25, 0, 0, 26, + 0, 0, 25, 0, 109, 0, 0, 10, 10, 4, 0, 65493, 0, 4, 0, 0, 119, + 10, 491, 0, 0, 498, 10, 11, 53, 0, 0, 11, 56, 0, 4, 57, 116, + 0, 0, 0, 0, 26, 3, 1002, 0, 0, 1009, 0, 4, 10, 0, 56, 0, 0, + 0, 57, 22, 0, 246, 0, 26, 0, 56, 0, 0, 0, 2025, 0, 0, 11, 10, + 56, 4, 3, 0, 0, 11, 0, 0, 7, 0, 0, 0, 0, 0], f"found: {res_noalone}" + + def test_is_ok_symmetry(self): + """test the :func:`grid2op.Action.SerializableActionSpace._is_ok_symmetry`""" + ok = np.array([1, 1, 1, 1]) + assert type(self.env.action_space)._is_ok_symmetry(2, ok), f"should not break for {ok}" + ok = np.array([1, 2, 1, 1]) + assert type(self.env.action_space)._is_ok_symmetry(2, ok), f"should not break for {ok}" + ok = np.array([1, 2, 3, 1]) + assert type(self.env.action_space)._is_ok_symmetry(3, ok), f"should not break for {ok}" + ok = np.array([1, 1, 2, 3]) + assert type(self.env.action_space)._is_ok_symmetry(3, ok), f"should not break for {ok}" + ok = np.array([1, 1, 2, 2]) + assert type(self.env.action_space)._is_ok_symmetry(4, ok), f"should not break for {ok}" + + ko = np.array([1, 3, 2, 1]) # relabel 3 -> 2, so this topology is not valid + assert not type(self.env.action_space)._is_ok_symmetry(3, ko), f"should break for {ko}" + ko = np.array([1, 1, 3, 2]) # relabel 3 -> 2, so this topology is not valid + assert not type(self.env.action_space)._is_ok_symmetry(3, ko), f"should break for {ko}" + + ko = np.array([1, 3, 2, 1]) # relabel 3 -> 2, so this topology is not valid + assert not type(self.env.action_space)._is_ok_symmetry(4, ko), f"should break for {ko}" + ko = np.array([1, 1, 3, 2]) # relabel 3 -> 2, so this topology is not valid + assert not type(self.env.action_space)._is_ok_symmetry(4, ko), f"should break for {ko}" + + def test_is_ok_line(self): + """test the :func:`grid2op.Action.SerializableActionSpace._is_ok_line`""" + lines_id = np.array([1, 3]) + n_busbar_per_sub = 2 + ok = np.array([1, 1, 1, 1]) + assert type(self.env.action_space)._is_ok_line(n_busbar_per_sub, ok, lines_id), f"should not break for {ok}" + ok = np.array([1, 2, 2, 1]) + assert type(self.env.action_space)._is_ok_line(n_busbar_per_sub, ok, lines_id), f"should not break for {ok}" + ko = np.array([1, 2, 1, 2]) # no lines on bus 1 + assert not type(self.env.action_space)._is_ok_line(n_busbar_per_sub, ko, lines_id), f"should break for {ko}" + + n_busbar_per_sub = 3 # should have no impact + ok = np.array([1, 1, 1, 1]) + assert type(self.env.action_space)._is_ok_line(n_busbar_per_sub, ok, lines_id), f"should not break for {ok}" + ok = np.array([1, 2, 2, 1]) + assert type(self.env.action_space)._is_ok_line(n_busbar_per_sub, ok, lines_id), f"should not break for {ok}" + ko = np.array([1, 2, 1, 2]) # no lines on bus 1 + assert not type(self.env.action_space)._is_ok_line(n_busbar_per_sub, ko, lines_id), f"should break for {ko}" + + def test_2_obj_per_bus(self): + """test the :func:`grid2op.Action.SerializableActionSpace._is_ok_2`""" + n_busbar_per_sub = 2 + ok = np.array([1, 1, 1, 1]) + assert type(self.env.action_space)._is_ok_2(n_busbar_per_sub, ok), f"should not break for {ok}" + ok = np.array([1, 2, 2, 1]) + assert type(self.env.action_space)._is_ok_2(n_busbar_per_sub, ok), f"should not break for {ok}" + ok = np.array([1, 2, 1, 2]) + assert type(self.env.action_space)._is_ok_2(n_busbar_per_sub, ok), f"should not break for {ok}" + + ko = np.array([1, 2, 2, 2]) # only 1 element on bus 1 + assert not type(self.env.action_space)._is_ok_2(n_busbar_per_sub, ko), f"should break for {ko}" + ko = np.array([1, 2, 1, 1]) # only 1 element on bus 2 + assert not type(self.env.action_space)._is_ok_2(n_busbar_per_sub, ko), f"should break for {ko}" + ko = np.array([1, 1, 2, 2, 3]) # only 1 element on bus 3 + assert not type(self.env.action_space)._is_ok_2(n_busbar_per_sub, ko), f"should break for {ko}" + + n_busbar_per_sub = 3 + ok = np.array([1, 1, 1, 1]) + assert type(self.env.action_space)._is_ok_2(n_busbar_per_sub, ok), f"should not break for {ok}" + ok = np.array([1, 2, 2, 1]) + assert type(self.env.action_space)._is_ok_2(n_busbar_per_sub, ok), f"should not break for {ok}" + ok = np.array([1, 2, 1, 2]) + assert type(self.env.action_space)._is_ok_2(n_busbar_per_sub, ok), f"should not break for {ok}" + + ko = np.array([1, 2, 2, 2]) # only 1 element on bus 1 + assert not type(self.env.action_space)._is_ok_2(n_busbar_per_sub, ko), f"should break for {ko}" + ko = np.array([1, 2, 1, 1]) # only 1 element on bus 2 + assert not type(self.env.action_space)._is_ok_2(n_busbar_per_sub, ko), f"should break for {ko}" + ko = np.array([1, 1, 2, 2, 3]) # only 1 element on bus 3 + assert not type(self.env.action_space)._is_ok_2(n_busbar_per_sub, ko), f"should break for {ko}" + + def test_1_busbar(self): + """test :func:`grid2op.Action.SerializableActionSpace.get_all_unitary_topologies_set` + when there are only 1 busbar per substation""" + class SubMe(TestActionSpaceNbBus): + def get_nb_bus(self): + return 1 + + tmp = SubMe() + tmp.setUp() + res = [len(tmp.env.action_space.get_all_unitary_topologies_set(tmp.env.action_space, + sub_id)) + for sub_id in range(type(tmp.env).n_sub)] + res_noalone = [len(tmp.env.action_space.get_all_unitary_topologies_set(tmp.env.action_space, + sub_id, + add_alone_line=False)) + for sub_id in range(type(tmp.env).n_sub)] + tmp.tearDown() + assert res == [0] * 14, f"found: {res}" + assert res_noalone == [0] * 14, f"found: {res_noalone}" + + class SubMe2(TestActionSpaceNbBus): + def get_nb_bus(self): + return 1 + def get_env_nm(self): + return "l2rpn_idf_2023" + + tmp2 = SubMe2() + tmp2.setUp() + res = [len(tmp2.env.action_space.get_all_unitary_topologies_set(tmp2.env.action_space, + sub_id)) + for sub_id in range(type(tmp2.env).n_sub)] + res_noalone = [len(tmp2.env.action_space.get_all_unitary_topologies_set(tmp2.env.action_space, + sub_id, + add_alone_line=False)) + for sub_id in range(type(tmp2.env).n_sub)] + tmp2.tearDown() + assert res == [0] * 118, f"found: {res}" + assert res_noalone == [0] * 118, f"found: {res_noalone}" + + def test_3_busbars(self): + """test :func:`grid2op.Action.SerializableActionSpace.get_all_unitary_topologies_set` + when there are 3 busbars per substation""" + res = self.env.action_space.get_all_unitary_topologies_set(self.env.action_space, + _count_only=True) + res_noalone = self.env.action_space.get_all_unitary_topologies_set(self.env.action_space, + add_alone_line=False, + _count_only=True) + assert res == [3, 83, 5, 106, 33, 599, 5, 0, 33, 3, 3, 3, 10, 3], f"found: {res}" + assert res_noalone == [0, 37, 3, 41, 11, 409, 0, 0, 11, 0, 0, 0, 4, 0], f"found: {res_noalone}" + class SubMe2(TestActionSpaceNbBus): + def get_nb_bus(self): + return 3 + def get_env_nm(self): + return "l2rpn_idf_2023" + tmp2 = SubMe2() + tmp2.setUp() + th_vals = [0, 0, 4, 7, 11, 0, 0, 10, 0, 0, 125, 2108, 0, 0, 1711, 0, 162, 3, 37, 0, 0, 0, 37, + 4, 4, 0, 125, 0, 0, 4, 4, 41, 0, 37, 0, 0, 41, 0, 0, 37, 0, 409, 0, 0, 10, 10, 4, 0] + for sub_id, th_val in zip(list(range(48)), th_vals): + res_noalone = tmp2.env.action_space.get_all_unitary_topologies_set(tmp2.env.action_space, + sub_id=sub_id, + add_alone_line=False, + _count_only=True) + assert res_noalone[0] == th_val, f"error for sub_id {sub_id}: {res_noalone} vs {th_val}" + + if HAS_TIME_AND_MEMORY: + # takes 850s (13 minutes) + res_noalone = tmp2.env.action_space.get_all_unitary_topologies_set(tmp2.env.action_space, + sub_id=48, + add_alone_line=False, + _count_only=True) + assert res_noalone == 20698545, f"error for sub_id {48}: {res_noalone}" + tmp2.tearDown() + + def test_legacy_all_unitary_line_set_behaviour(self): + """make sure nothing broke for 2 busbars per substation even if the implementation changes""" + class SubMe(TestActionSpaceNbBus): + def get_nb_bus(self): + return 2 + + tmp = SubMe() + tmp.setUp() + res = len(tmp.env.action_space.get_all_unitary_line_set(tmp.env.action_space)) + res_simple = len(tmp.env.action_space.get_all_unitary_line_set_simple(tmp.env.action_space)) + tmp.tearDown() + assert res == 5 * 20, f"found: {res}" + assert res_simple == 2 * 20, f"found: {res_simple}" + + class SubMe2(TestActionSpaceNbBus): + def get_nb_bus(self): + return 2 + def get_env_nm(self): + return "l2rpn_idf_2023" + + tmp2 = SubMe2() + tmp2.setUp() + res = len(tmp2.env.action_space.get_all_unitary_line_set(tmp2.env.action_space)) + res_simple = len(tmp2.env.action_space.get_all_unitary_line_set_simple(tmp2.env.action_space)) + tmp2.tearDown() + assert res == 5 * 186, f"found: {res}" + assert res_simple == 2 * 186, f"found: {res_simple}" + + def test_get_all_unitary_line_set(self): + """test the :func:`grid2op.Action.SerializableActionSpace.get_all_unitary_line_set` when 3 busbars""" + res = len(self.env.action_space.get_all_unitary_line_set(self.env.action_space)) + assert res == (1 + 3*3) * 20, f"found: {res}" + res = len(self.env.action_space.get_all_unitary_line_set_simple(self.env.action_space)) + assert res == 2 * 20, f"found: {res}" + class SubMe2(TestActionSpaceNbBus): + def get_nb_bus(self): + return 3 + def get_env_nm(self): + return "l2rpn_idf_2023" + + tmp2 = SubMe2() + tmp2.setUp() + res = len(tmp2.env.action_space.get_all_unitary_line_set(tmp2.env.action_space)) + res_simple = len(tmp2.env.action_space.get_all_unitary_line_set_simple(tmp2.env.action_space)) + tmp2.tearDown() + assert res == (1 + 3 * 3) * 186, f"found: {res}" + assert res_simple == 2 * 186, f"found: {res_simple}" + + +class TestBackendActionNbBus(unittest.TestCase): + def get_nb_bus(self): + return 3 + + def get_env_nm(self): + return "educ_case14_storage" + + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.get_env_nm(), + backend=_AuxFakeBackendSupport(), + action_class=CompleteAction, + test=True, + n_busbar=self.get_nb_bus(), + _add_to_name=type(self).__name__ + f'_{self.get_nb_bus()}') + return super().setUp() + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def test_correct_last_topo(self): + line_id = 0 + id_topo_or = type(self.env).line_or_pos_topo_vect[line_id] + id_topo_ex = type(self.env).line_ex_pos_topo_vect[line_id] + + backend_action = self.env._backend_action + act = self.env.action_space({"set_bus": {"lines_or_id": [(line_id, -1)]}}) + backend_action += act + backend_action.reset() + assert backend_action.current_topo.values[id_topo_or] == -1, f"{backend_action.current_topo.values[id_topo_or]} vs -1" + assert backend_action.current_topo.values[id_topo_ex] == -1, f"{backend_action.current_topo.values[id_topo_ex]} vs -1" + assert backend_action.last_topo_registered.values[id_topo_or] == 1, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 1" + assert backend_action.last_topo_registered.values[id_topo_ex] == 1, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 1" + + act = self.env.action_space({"set_bus": {"lines_or_id": [(line_id, 2)]}}) + backend_action += act + backend_action.reset() + assert backend_action.current_topo.values[id_topo_or] == 2, f"{backend_action.current_topo.values[id_topo_or]} vs 2" + assert backend_action.current_topo.values[id_topo_ex] == 1, f"{backend_action.current_topo.values[id_topo_ex]} vs 1" + assert backend_action.last_topo_registered.values[id_topo_or] == 2, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 2" + assert backend_action.last_topo_registered.values[id_topo_ex] == 1, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 1" + + act = self.env.action_space({"set_bus": {"lines_or_id": [(line_id, -1)]}}) + backend_action += act + backend_action.reset() + assert backend_action.current_topo.values[id_topo_or] == -1, f"{backend_action.current_topo.values[id_topo_or]} vs -1" + assert backend_action.current_topo.values[id_topo_ex] == -1, f"{backend_action.current_topo.values[id_topo_ex]} vs -1" + assert backend_action.last_topo_registered.values[id_topo_or] == 2, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 2" + assert backend_action.last_topo_registered.values[id_topo_ex] == 1, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 1" + + act = self.env.action_space({"set_bus": {"lines_ex_id": [(line_id, 3)]}}) + backend_action += act + backend_action.reset() + assert backend_action.current_topo.values[id_topo_or] == 2, f"{backend_action.current_topo.values[id_topo_or]} vs 2" + assert backend_action.current_topo.values[id_topo_ex] == 3, f"{backend_action.current_topo.values[id_topo_ex]} vs 3" + assert backend_action.last_topo_registered.values[id_topo_or] == 2, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 2" + assert backend_action.last_topo_registered.values[id_topo_ex] == 3, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 3" + + act = self.env.action_space({"set_bus": {"lines_or_id": [(line_id, -1)]}}) + backend_action += act + backend_action.reset() + assert backend_action.current_topo.values[id_topo_or] == -1, f"{backend_action.current_topo.values[id_topo_or]} vs -1" + assert backend_action.current_topo.values[id_topo_ex] == -1, f"{backend_action.current_topo.values[id_topo_ex]} vs -1" + assert backend_action.last_topo_registered.values[id_topo_or] == 2, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 2" + assert backend_action.last_topo_registered.values[id_topo_ex] == 3, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 3" + + act = self.env.action_space({"set_bus": {"lines_or_id": [(line_id, -1)]}}) + backend_action += act + backend_action.reset() + assert backend_action.current_topo.values[id_topo_or] == -1, f"{backend_action.current_topo.values[id_topo_or]} vs -1" + assert backend_action.current_topo.values[id_topo_ex] == -1, f"{backend_action.current_topo.values[id_topo_ex]} vs -1" + assert backend_action.last_topo_registered.values[id_topo_or] == 2, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 2" + assert backend_action.last_topo_registered.values[id_topo_ex] == 3, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 3" + + act = self.env.action_space({"set_bus": {"lines_or_id": [(line_id, 1)]}}) + backend_action += act + backend_action.reset() + assert backend_action.current_topo.values[id_topo_or] == 1, f"{backend_action.current_topo.values[id_topo_or]} vs 1" + assert backend_action.current_topo.values[id_topo_ex] == 3, f"{backend_action.current_topo.values[id_topo_ex]} vs 3" + assert backend_action.last_topo_registered.values[id_topo_or] == 1, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 1" + assert backend_action.last_topo_registered.values[id_topo_ex] == 3, f"{backend_action.last_topo_registered.values[id_topo_or]} vs 3" + + def test_call(self): + cls = type(self.env) + line_id = 0 + id_topo_or = cls.line_or_pos_topo_vect[line_id] + id_topo_ex = cls.line_ex_pos_topo_vect[line_id] + + backend_action = self.env._backend_action + act = self.env.action_space({"set_bus": {"lines_or_id": [(line_id, -1)]}}) + backend_action += act + ( + active_bus, + (prod_p, prod_v, load_p, load_q, storage), + topo__, + shunts__, + ) = backend_action() + assert topo__.values[cls.line_or_pos_topo_vect[line_id]] == -1 + assert topo__.values[cls.line_ex_pos_topo_vect[line_id]] == -1 + backend_action.reset() + + act = self.env.action_space({"set_bus": {"lines_or_id": [(line_id, 2)]}}) + backend_action += act + ( + active_bus, + (prod_p, prod_v, load_p, load_q, storage), + topo__, + shunts__, + ) = backend_action() + assert topo__.values[cls.line_or_pos_topo_vect[line_id]] == 2 + assert topo__.values[cls.line_ex_pos_topo_vect[line_id]] == 1 + backend_action.reset() + + act = self.env.action_space({"set_bus": {"lines_or_id": [(line_id, -1)]}}) + backend_action += act + ( + active_bus, + (prod_p, prod_v, load_p, load_q, storage), + topo__, + shunts__, + ) = backend_action() + assert topo__.values[cls.line_or_pos_topo_vect[line_id]] == -1 + assert topo__.values[cls.line_ex_pos_topo_vect[line_id]] == -1 + backend_action.reset() + + act = self.env.action_space({"set_bus": {"lines_ex_id": [(line_id, 3)]}}) + backend_action += act + ( + active_bus, + (prod_p, prod_v, load_p, load_q, storage), + topo__, + shunts__, + ) = backend_action() + assert topo__.values[cls.line_or_pos_topo_vect[line_id]] == 2 + assert topo__.values[cls.line_ex_pos_topo_vect[line_id]] == 3 + backend_action.reset() + + act = self.env.action_space({"set_bus": {"lines_or_id": [(line_id, -1)]}}) + backend_action += act + ( + active_bus, + (prod_p, prod_v, load_p, load_q, storage), + topo__, + shunts__, + ) = backend_action() + assert topo__.values[cls.line_or_pos_topo_vect[line_id]] == -1 + assert topo__.values[cls.line_ex_pos_topo_vect[line_id]] == -1 + backend_action.reset() + + act = self.env.action_space({"set_bus": {"lines_or_id": [(line_id, 1)]}}) + backend_action += act + ( + active_bus, + (prod_p, prod_v, load_p, load_q, storage), + topo__, + shunts__, + ) = backend_action() + assert topo__.values[cls.line_or_pos_topo_vect[line_id]] == 1 + assert topo__.values[cls.line_ex_pos_topo_vect[line_id]] == 3 + backend_action.reset() + + +class TestPandapowerBackend_3busbars(unittest.TestCase): + def get_nb_bus(self): + return 3 + + def get_env_nm(self): + return "educ_case14_storage" + + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.get_env_nm(), + backend=PandaPowerBackend(), + action_class=CompleteAction, + test=True, + n_busbar=self.get_nb_bus(), + _add_to_name=type(self).__name__ + f'_{self.get_nb_bus()}') + self.list_loc_bus = [-1] + list(range(1, type(self.env).n_busbar_per_sub + 1)) + return super().setUp() + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def test_right_bus_made(self): + assert self.env.backend._grid.bus.shape[0] == self.get_nb_bus() * type(self.env).n_sub + assert (~self.env.backend._grid.bus.iloc[type(self.env).n_sub:]["in_service"]).all() + + @staticmethod + def _aux_find_sub(env, obj_col): + """find a sub with 4 elements, the type of elements and at least 2 lines""" + cls = type(env) + res = None + for sub_id in range(cls.n_sub): + this_sub_mask = cls.grid_objects_types[:,cls.SUB_COL] == sub_id + this_sub = cls.grid_objects_types[this_sub_mask, :] + if this_sub.shape[0] <= 3: + # not enough element + continue + if (this_sub[:, obj_col] == -1).all(): + # no load + continue + if ((this_sub[:, cls.LOR_COL] != -1) | (this_sub[:, cls.LEX_COL] != -1)).sum() <= 1: + # only 1 line + continue + el_id = this_sub[this_sub[:, obj_col] != -1, obj_col][0] + if (this_sub[:, cls.LOR_COL] != -1).any(): + line_or_id = this_sub[this_sub[:, cls.LOR_COL] != -1, cls.LOR_COL][0] + line_ex_id = None + else: + line_or_id = None + line_ex_id = this_sub[this_sub[:, cls.LEX_COL] != -1, cls.LEX_COL][0] + res = (sub_id, el_id, line_or_id, line_ex_id) + break + return res + + @staticmethod + def _aux_find_sub_shunt(env): + """find a sub with 4 elements, the type of elements and at least 2 lines""" + cls = type(env) + res = None + for el_id in range(cls.n_shunt): + sub_id = cls.shunt_to_subid[el_id] + this_sub_mask = cls.grid_objects_types[:,cls.SUB_COL] == sub_id + this_sub = cls.grid_objects_types[this_sub_mask, :] + if this_sub.shape[0] <= 3: + # not enough element + continue + if ((this_sub[:, cls.LOR_COL] != -1) | (this_sub[:, cls.LEX_COL] != -1)).sum() <= 1: + # only 1 line + continue + if (this_sub[:, cls.LOR_COL] != -1).any(): + line_or_id = this_sub[this_sub[:, cls.LOR_COL] != -1, cls.LOR_COL][0] + line_ex_id = None + else: + line_or_id = None + line_ex_id = this_sub[this_sub[:, cls.LEX_COL] != -1, cls.LEX_COL][0] + res = (sub_id, el_id, line_or_id, line_ex_id) + break + return res + + def test_move_load(self): + cls = type(self.env) + res = self._aux_find_sub(self.env, cls.LOA_COL) + if res is None: + raise RuntimeError(f"Cannot carry the test 'test_move_load' as " + "there are no suitable subastation in your grid.") + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + if line_or_id is not None: + act = self.env.action_space({"set_bus": {"loads_id": [(el_id, new_bus)], "lines_or_id": [(line_or_id, new_bus)]}}) + else: + act = self.env.action_space({"set_bus": {"loads_id": [(el_id, new_bus)], "lines_ex_id": [(line_ex_id, new_bus)]}}) + bk_act = self.env._backend_action_class() + bk_act += act + self.env.backend.apply_action(bk_act) + global_bus = sub_id + (new_bus -1) * cls.n_sub + if new_bus >= 1: + assert self.env.backend._grid.load.iloc[el_id]["bus"] == global_bus + if line_or_id is not None: + assert self.env.backend._grid.line.iloc[line_or_id]["from_bus"] == global_bus + else: + assert self.env.backend._grid.line.iloc[line_ex_id]["to_bus"] == global_bus + assert self.env.backend._grid.bus.loc[global_bus]["in_service"] + else: + assert not self.env.backend._grid.load.iloc[el_id]["in_service"] + if line_or_id is not None: + assert not self.env.backend._grid.line.iloc[line_or_id]["in_service"] + else: + assert not self.env.backend._grid.line.iloc[line_ex_id]["in_service"] + topo_vect = self.env.backend._get_topo_vect() + assert topo_vect[cls.load_pos_topo_vect[el_id]] == new_bus, f"{topo_vect[cls.load_pos_topo_vect[el_id]]} vs {new_bus}" + + def test_move_gen(self): + cls = type(self.env) + res = self._aux_find_sub(self.env, cls.GEN_COL) + if res is None: + raise RuntimeError(f"Cannot carry the test 'test_move_gen' as " + "there are no suitable subastation in your grid.") + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + if line_or_id is not None: + act = self.env.action_space({"set_bus": {"generators_id": [(el_id, new_bus)], "lines_or_id": [(line_or_id, new_bus)]}}) + else: + act = self.env.action_space({"set_bus": {"generators_id": [(el_id, new_bus)], "lines_ex_id": [(line_ex_id, new_bus)]}}) + bk_act = self.env._backend_action_class() + bk_act += act + self.env.backend.apply_action(bk_act) + global_bus = sub_id + (new_bus -1) * cls.n_sub + if new_bus >= 1: + assert self.env.backend._grid.gen.iloc[el_id]["bus"] == global_bus + if line_or_id is not None: + assert self.env.backend._grid.line.iloc[line_or_id]["from_bus"] == global_bus + else: + assert self.env.backend._grid.line.iloc[line_ex_id]["to_bus"] == global_bus + assert self.env.backend._grid.bus.loc[global_bus]["in_service"] + else: + assert not self.env.backend._grid.gen.iloc[el_id]["in_service"] + if line_or_id is not None: + assert not self.env.backend._grid.line.iloc[line_or_id]["in_service"] + else: + assert not self.env.backend._grid.line.iloc[line_ex_id]["in_service"] + topo_vect = self.env.backend._get_topo_vect() + assert topo_vect[cls.gen_pos_topo_vect[el_id]] == new_bus, f"{topo_vect[cls.gen_pos_topo_vect[el_id]]} vs {new_bus}" + + def test_move_storage(self): + cls = type(self.env) + res = self._aux_find_sub(self.env, cls.STORAGE_COL) + if res is None: + raise RuntimeError(f"Cannot carry the test 'test_move_storage' as " + "there are no suitable subastation in your grid.") + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + if line_or_id is not None: + act = self.env.action_space({"set_bus": {"storages_id": [(el_id, new_bus)], "lines_or_id": [(line_or_id, new_bus)]}}) + else: + act = self.env.action_space({"set_bus": {"storages_id": [(el_id, new_bus)], "lines_ex_id": [(line_ex_id, new_bus)]}}) + bk_act = self.env._backend_action_class() + bk_act += act + self.env.backend.apply_action(bk_act) + global_bus = sub_id + (new_bus -1) * cls.n_sub + if new_bus >= 1: + assert self.env.backend._grid.storage.iloc[el_id]["bus"] == global_bus + assert self.env.backend._grid.storage.iloc[el_id]["in_service"], f"storage should not be deactivated" + if line_or_id is not None: + assert self.env.backend._grid.line.iloc[line_or_id]["from_bus"] == global_bus + else: + assert self.env.backend._grid.line.iloc[line_ex_id]["to_bus"] == global_bus + assert self.env.backend._grid.bus.loc[global_bus]["in_service"] + else: + assert not self.env.backend._grid.storage.iloc[el_id]["in_service"], f"storage should be deactivated" + if line_or_id is not None: + assert not self.env.backend._grid.line.iloc[line_or_id]["in_service"] + else: + assert not self.env.backend._grid.line.iloc[line_ex_id]["in_service"] + topo_vect = self.env.backend._get_topo_vect() + assert topo_vect[cls.storage_pos_topo_vect[el_id]] == new_bus, f"{topo_vect[cls.storage_pos_topo_vect[el_id]]} vs {new_bus}" + + def test_move_line_or(self): + cls = type(self.env) + line_id = 0 + for new_bus in self.list_loc_bus: + act = self.env.action_space({"set_bus": {"lines_or_id": [(line_id, new_bus)]}}) + bk_act = self.env._backend_action_class() + bk_act += act + self.env.backend.apply_action(bk_act) + global_bus = cls.line_or_to_subid[line_id] + (new_bus -1) * cls.n_sub + if new_bus >= 1: + assert self.env.backend._grid.line.iloc[line_id]["from_bus"] == global_bus + assert self.env.backend._grid.bus.loc[global_bus]["in_service"] + else: + assert not self.env.backend._grid.line.iloc[line_id]["in_service"] + self.env.backend.line_status[:] = self.env.backend._get_line_status() # otherwise it's not updated + topo_vect = self.env.backend._get_topo_vect() + assert topo_vect[cls.line_or_pos_topo_vect[line_id]] == new_bus, f"{topo_vect[cls.line_or_pos_topo_vect[line_id]]} vs {new_bus}" + + def test_move_line_ex(self): + cls = type(self.env) + line_id = 0 + for new_bus in self.list_loc_bus: + act = self.env.action_space({"set_bus": {"lines_ex_id": [(line_id, new_bus)]}}) + bk_act = self.env._backend_action_class() + bk_act += act + self.env.backend.apply_action(bk_act) + global_bus = cls.line_ex_to_subid[line_id] + (new_bus -1) * cls.n_sub + if new_bus >= 1: + assert self.env.backend._grid.line.iloc[line_id]["to_bus"] == global_bus + assert self.env.backend._grid.bus.loc[global_bus]["in_service"] + else: + assert not self.env.backend._grid.line.iloc[line_id]["in_service"] + self.env.backend.line_status[:] = self.env.backend._get_line_status() # otherwise it's not updated + topo_vect = self.env.backend._get_topo_vect() + assert topo_vect[cls.line_ex_pos_topo_vect[line_id]] == new_bus, f"{topo_vect[cls.line_ex_pos_topo_vect[line_id]]} vs {new_bus}" + + def test_move_shunt(self): + cls = type(self.env) + res = self._aux_find_sub_shunt(self.env) + if res is None: + raise RuntimeError(f"Cannot carry the test 'test_move_load' as " + "there are no suitable subastation in your grid.") + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + if line_or_id is not None: + act = self.env.action_space({"shunt": {"set_bus": [(el_id, new_bus)]}, "set_bus": {"lines_or_id": [(line_or_id, new_bus)]}}) + else: + act = self.env.action_space({"shunt": {"set_bus": [(el_id, new_bus)]}, "set_bus": {"lines_ex_id": [(line_ex_id, new_bus)]}}) + bk_act = self.env._backend_action_class() + bk_act += act + self.env.backend.apply_action(bk_act) + global_bus = sub_id + (new_bus -1) * cls.n_sub + if new_bus >= 1: + assert self.env.backend._grid.shunt.iloc[el_id]["bus"] == global_bus + if line_or_id is not None: + assert self.env.backend._grid.line.iloc[line_or_id]["from_bus"] == global_bus + else: + assert self.env.backend._grid.line.iloc[line_ex_id]["to_bus"] == global_bus + assert self.env.backend._grid.bus.loc[global_bus]["in_service"] + else: + assert not self.env.backend._grid.shunt.iloc[el_id]["in_service"] + if line_or_id is not None: + assert not self.env.backend._grid.line.iloc[line_or_id]["in_service"] + else: + assert not self.env.backend._grid.line.iloc[line_ex_id]["in_service"] + + def test_check_kirchoff(self): + cls = type(self.env) + res = self._aux_find_sub(self.env, cls.LOA_COL) + if res is None: + raise RuntimeError("Cannot carry the test 'test_move_load' as " + "there are no suitable subastation in your grid.") + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + if new_bus <= -1: + continue + if line_or_id is not None: + act = self.env.action_space({"set_bus": {"loads_id": [(el_id, new_bus)], "lines_or_id": [(line_or_id, new_bus)]}}) + else: + act = self.env.action_space({"set_bus": {"loads_id": [(el_id, new_bus)], "lines_ex_id": [(line_ex_id, new_bus)]}}) + bk_act = self.env._backend_action_class() + bk_act += act + self.env.backend.apply_action(bk_act) + conv, maybe_exc = self.env.backend.runpf() + assert conv, f"error : {maybe_exc}" + p_subs, q_subs, p_bus, q_bus, diff_v_bus = self.env.backend.check_kirchoff() + # assert laws are met + assert np.abs(p_subs).max() <= 1e-5, f"error for busbar {new_bus}: {np.abs(p_subs).max():.2e}" + assert np.abs(q_subs).max() <= 1e-5, f"error for busbar {new_bus}: {np.abs(q_subs).max():.2e}" + assert np.abs(p_bus).max() <= 1e-5, f"error for busbar {new_bus}: {np.abs(p_bus).max():.2e}" + assert np.abs(q_bus).max() <= 1e-5, f"error for busbar {new_bus}: {np.abs(q_bus).max():.2e}" + assert np.abs(diff_v_bus).max() <= 1e-5, f"error for busbar {new_bus}: {np.abs(diff_v_bus).max():.2e}" + + +class TestPandapowerBackend_1busbar(TestPandapowerBackend_3busbars): + def get_nb_bus(self): + return 1 + + +class TestObservation_3busbars(unittest.TestCase): + def get_nb_bus(self): + return 3 + + def get_env_nm(self): + return "educ_case14_storage" + + def get_reset_kwargs(self) -> dict: + return dict(seed=0, options={"time serie id": 0}) + + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.get_env_nm(), + backend=PandaPowerBackend(), + action_class=CompleteAction, + test=True, + n_busbar=self.get_nb_bus(), + _add_to_name=type(self).__name__ + f'_{self.get_nb_bus()}') + param = self.env.parameters + param.NB_TIMESTEP_COOLDOWN_SUB = 0 + param.NB_TIMESTEP_COOLDOWN_LINE = 0 + param.MAX_LINE_STATUS_CHANGED = 99999 + param.MAX_SUB_CHANGED = 99999 + self.env.change_parameters(param) + self.env.change_forecast_parameters(param) + self.env.reset(**self.get_reset_kwargs()) + self.list_loc_bus = list(range(1, type(self.env).n_busbar_per_sub + 1)) + return super().setUp() + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def test_get_simulator(self): + obs = self.env.reset(**self.get_reset_kwargs()) + sim = obs.get_simulator() + assert type(sim.backend).n_busbar_per_sub == self.get_nb_bus() + res = TestPandapowerBackend_3busbars._aux_find_sub(self.env, type(self.env).LOA_COL) + if res is None: + raise RuntimeError(f"Cannot carry the test 'test_get_simulator' as " + "there are no suitable subastation in your grid.") + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + if line_or_id is not None: + act = self.env.action_space({"set_bus": {"loads_id": [(el_id, new_bus)], "lines_or_id": [(line_or_id, new_bus)]}}) + else: + act = self.env.action_space({"set_bus": {"loads_id": [(el_id, new_bus)], "lines_ex_id": [(line_ex_id, new_bus)]}}) + sim2 = sim.predict(act) + global_bus = sub_id + (new_bus -1) * type(self.env).n_sub + assert sim2.backend._grid.load["bus"].iloc[el_id] == global_bus + + def _aux_build_act(self, res, new_bus, el_keys): + """res: output of TestPandapowerBackend_3busbars._aux_find_sub""" + if res is None: + raise RuntimeError(f"Cannot carry the test as " + "there are no suitable subastation in your grid.") + (sub_id, el_id, line_or_id, line_ex_id) = res + if line_or_id is not None: + act = self.env.action_space({"set_bus": {el_keys: [(el_id, new_bus)], "lines_or_id": [(line_or_id, new_bus)]}}) + else: + act = self.env.action_space({"set_bus": {el_keys: [(el_id, new_bus)], "lines_ex_id": [(line_ex_id, new_bus)]}}) + return act + + @staticmethod + def _aux_aux_build_act(env, res, new_bus, el_keys): + """res: output of TestPandapowerBackend_3busbars._aux_find_sub""" + if res is None: + raise RuntimeError(f"Cannot carry the test as " + "there are no suitable subastation in your grid.") + (sub_id, el_id, line_or_id, line_ex_id) = res + if line_or_id is not None: + act = env.action_space({"set_bus": {el_keys: [(el_id, new_bus)], "lines_or_id": [(line_or_id, new_bus)]}}) + else: + act = env.action_space({"set_bus": {el_keys: [(el_id, new_bus)], "lines_ex_id": [(line_ex_id, new_bus)]}}) + return act + + def test_get_forecasted_env(self): + obs = self.env.reset(**self.get_reset_kwargs()) + for_env = obs.get_forecast_env() + assert type(for_env).n_busbar_per_sub == self.get_nb_bus() + for_obs = for_env.reset() + assert type(for_obs).n_busbar_per_sub == self.get_nb_bus() + res = TestPandapowerBackend_3busbars._aux_find_sub(self.env, type(self.env).LOA_COL) + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + for_env = obs.get_forecast_env() + act = self._aux_build_act(res, new_bus, "loads_id") + sim_obs, sim_r, sim_d, sim_info = for_env.step(act) + assert not sim_d, f"{sim_info['exception']}" + assert sim_obs.load_bus[el_id] == new_bus, f"{sim_obs.load_bus[el_id]} vs {new_bus}" + + def test_add(self): + obs = self.env.reset(**self.get_reset_kwargs()) + res = TestPandapowerBackend_3busbars._aux_find_sub(self.env, type(self.env).LOA_COL) + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + act = self._aux_build_act(res, new_bus, "loads_id") + obs_pus_act = obs + act + assert obs_pus_act.load_bus[el_id] == new_bus, f"{obs_pus_act.load_bus[el_id]} vs {new_bus}" + + def test_simulate(self): + obs = self.env.reset(**self.get_reset_kwargs()) + res = TestPandapowerBackend_3busbars._aux_find_sub(self.env, type(self.env).LOA_COL) + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + act = self._aux_build_act(res, new_bus, "loads_id") + sim_obs, sim_r, sim_d, sim_info = obs.simulate(act) + assert not sim_d, f"{sim_info['exception']}" + assert sim_obs.load_bus[el_id] == new_bus, f"{sim_obs.load_bus[el_id]} vs {new_bus}" + + def test_action_space_get_back_to_ref_state(self): + """test the :func:`grid2op.Action.SerializableActionSpace.get_back_to_ref_state` + when 3 busbars which could not be tested without observation""" + obs = self.env.reset(**self.get_reset_kwargs()) + res = TestPandapowerBackend_3busbars._aux_find_sub(self.env, type(self.env).LOA_COL) + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + if new_bus == 1: + # nothing to do if everything is moved to bus 1 + continue + act = self._aux_build_act(res, new_bus, "loads_id") + obs, reward, done, info = self.env.step(act) + assert not done + acts = self.env.action_space.get_back_to_ref_state(obs) + assert "substation" in acts + assert len(acts["substation"]) == 1 + act_to_ref = acts["substation"][0] + assert act_to_ref.load_set_bus[el_id] == 1 + if line_or_id is not None: + assert act_to_ref.line_or_set_bus[line_or_id] == 1 + if line_ex_id is not None: + assert act_to_ref.line_ex_set_bus[line_ex_id] == 1 + + def test_connectivity_matrix(self): + cls = type(self.env) + obs = self.env.reset(**self.get_reset_kwargs()) + res = TestPandapowerBackend_3busbars._aux_find_sub(self.env, type(self.env).LOA_COL) + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + act = self._aux_build_act(res, new_bus, "loads_id") + obs, reward, done, info = self.env.step(act) + assert not done + assert not info["exception"], "there should not have any exception (action should be legal)" + conn_mat = obs.connectivity_matrix() + assert conn_mat.shape == (cls.dim_topo, cls.dim_topo) + if new_bus == 1: + min_sub = np.sum(cls.sub_info[:sub_id]) + max_sub = min_sub + cls.sub_info[sub_id] + assert (conn_mat[min_sub:max_sub, min_sub:max_sub] == 1.).all() + else: + el_topov = cls.load_pos_topo_vect[el_id] + line_pos_topov = cls.line_or_pos_topo_vect[line_or_id] if line_or_id is not None else cls.line_ex_pos_topo_vect[line_ex_id] + line_pos_topo_other = cls.line_ex_pos_topo_vect[line_or_id] if line_or_id is not None else cls.line_or_pos_topo_vect[line_ex_id] + assert conn_mat[el_topov, line_pos_topov] == 1. + assert conn_mat[line_pos_topov, el_topov] == 1. + for el in range(cls.dim_topo): + if el == line_pos_topov: + continue + if el == el_topov: + continue + if el == line_pos_topo_other: + # other side of the line is connected to it + continue + assert conn_mat[el_topov, el] == 0., f"error for {new_bus}: ({el_topov}, {el}) appears to be connected: {conn_mat[el_topov, el]}" + assert conn_mat[el, el_topov] == 0., f"error for {new_bus}: ({el}, {el_topov}) appears to be connected: {conn_mat[el, el_topov]}" + assert conn_mat[line_pos_topov, el] == 0., f"error for {new_bus}: ({line_pos_topov}, {el}) appears to be connected: {conn_mat[line_pos_topov, el]}" + assert conn_mat[el, line_pos_topov] == 0., f"error for {new_bus}: ({el}, {line_pos_topov}) appears to be connected: {conn_mat[el, line_pos_topov]}" + + def test_bus_connectivity_matrix(self): + cls = type(self.env) + obs = self.env.reset(**self.get_reset_kwargs()) + res = TestPandapowerBackend_3busbars._aux_find_sub(self.env, type(self.env).LOA_COL) + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + act = self._aux_build_act(res, new_bus, "loads_id") + obs, reward, done, info = self.env.step(act) + assert not done + assert not info["exception"], "there should not have any exception (action should be legal)" + conn_mat, (lor_ind, lex_ind) = obs.bus_connectivity_matrix(return_lines_index=True) + if new_bus == 1: + assert conn_mat.shape == (cls.n_sub, cls.n_sub) + else: + assert conn_mat.shape == (cls.n_sub + 1, cls.n_sub + 1) + new_bus_id = lor_ind[line_or_id] if line_or_id else lex_ind[line_ex_id] + bus_other = lex_ind[line_or_id] if line_or_id else lor_ind[line_ex_id] + assert conn_mat[new_bus_id, bus_other] == 1. + assert conn_mat[bus_other, new_bus_id] == 1. + assert conn_mat[new_bus_id, sub_id] == 0. + assert conn_mat[sub_id, new_bus_id] == 0. + + def test_flow_bus_matrix(self): + cls = type(self.env) + obs = self.env.reset(**self.get_reset_kwargs()) + res = TestPandapowerBackend_3busbars._aux_find_sub(self.env, type(self.env).LOA_COL) + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + act = self._aux_build_act(res, new_bus, "loads_id") + obs, reward, done, info = self.env.step(act) + assert not done + assert not info["exception"], "there should not have any exception (action should be legal)" + conn_mat, (load_bus, prod_bus, stor_bus, lor_ind, lex_ind) = obs.flow_bus_matrix() + if new_bus == 1: + assert conn_mat.shape == (cls.n_sub, cls.n_sub) + else: + assert conn_mat.shape == (cls.n_sub + 1, cls.n_sub + 1) + new_bus_id = lor_ind[line_or_id] if line_or_id else lex_ind[line_ex_id] + bus_other = lex_ind[line_or_id] if line_or_id else lor_ind[line_ex_id] + assert conn_mat[new_bus_id, bus_other] != 0. # there are some flows from these 2 buses + assert conn_mat[bus_other, new_bus_id] != 0. # there are some flows from these 2 buses + assert conn_mat[new_bus_id, sub_id] == 0. + assert conn_mat[sub_id, new_bus_id] == 0. + + def test_get_energy_graph(self): + cls = type(self.env) + obs = self.env.reset(**self.get_reset_kwargs()) + res = TestPandapowerBackend_3busbars._aux_find_sub(self.env, type(self.env).LOA_COL) + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + act = self._aux_build_act(res, new_bus, "loads_id") + obs, reward, done, info = self.env.step(act) + assert not done + assert not info["exception"], "there should not have any exception (action should be legal)" + graph = obs.get_energy_graph() + if new_bus == 1: + assert len(graph.nodes) == cls.n_sub + continue + # if I end up here it's because new_bus >= 2 + assert len(graph.nodes) == cls.n_sub + 1 + new_bus_id = cls.n_sub # this bus has been added + bus_other = cls.line_ex_to_subid[line_or_id] if line_or_id else cls.line_or_to_subid[line_ex_id] + assert (new_bus_id, bus_other) in graph.edges + edge = graph.edges[(new_bus_id, bus_other)] + node = graph.nodes[new_bus_id] + assert node["local_bus_id"] == new_bus + assert node["global_bus_id"] == sub_id + (new_bus - 1) * cls.n_sub + if line_or_id is not None: + assert edge["bus_or"] == new_bus + assert edge["global_bus_or"] == sub_id + (new_bus - 1) * cls.n_sub + else: + assert edge["bus_ex"] == new_bus + assert edge["global_bus_ex"] == sub_id + (new_bus - 1) * cls.n_sub + + def test_get_elements_graph(self): + cls = type(self.env) + obs = self.env.reset(**self.get_reset_kwargs()) + res = TestPandapowerBackend_3busbars._aux_find_sub(self.env, type(self.env).LOA_COL) + (sub_id, el_id, line_or_id, line_ex_id) = res + for new_bus in self.list_loc_bus: + act = self._aux_build_act(res, new_bus, "loads_id") + obs, reward, done, info = self.env.step(act) + assert not done + assert not info["exception"], "there should not have any exception (action should be legal)" + graph = obs.get_elements_graph() + global_bus_id = sub_id + (new_bus - 1) * cls.n_sub + node_bus_id = graph.graph['bus_nodes_id'][global_bus_id] + node_load_id = graph.graph['load_nodes_id'][el_id] + node_line_id = graph.graph['line_nodes_id'][line_or_id] if line_or_id is not None else graph.graph['line_nodes_id'][line_ex_id] + node_load = graph.nodes[node_load_id] + node_line = graph.nodes[node_line_id] + assert len(graph.graph["bus_nodes_id"]) == cls.n_busbar_per_sub * cls.n_sub + + # check the bus + for node_id in graph.graph["bus_nodes_id"]: + assert "global_id" in graph.nodes[node_id], "key 'global_id' should be in the node" + if new_bus == 1: + for node_id in graph.graph["bus_nodes_id"][cls.n_sub:]: + assert not graph.nodes[node_id]["connected"], f"bus (global id {graph.nodes[node_id]['global_id']}) represented by node {node_id} should not be connected" + else: + for node_id in graph.graph["bus_nodes_id"][cls.n_sub:]: + if graph.nodes[node_id]['global_id'] != global_bus_id: + assert not graph.nodes[node_id]["connected"], f"bus (global id {graph.nodes[node_id]['global_id']}) represented by node {node_id} should not be connected" + else: + assert graph.nodes[node_id]["connected"], f"bus (global id {graph.nodes[node_id]['global_id']}) represented by node {node_id} should be connected" + + # check the load + edge_load_id = node_load["bus_node_id"] + assert node_load["local_bus"] == new_bus + assert node_load["global_bus"] == global_bus_id + assert (node_load_id, edge_load_id) in graph.edges + + # check lines + side = "or" if line_or_id is not None else "ex" + edge_line_id = node_line[f"bus_node_id_{side}"] + assert node_line[f"local_bus_{side}"] == new_bus + assert node_line[f"global_bus_{side}"] == global_bus_id + assert (node_line_id, edge_line_id) in graph.edges + + +class TestObservation_1busbar(TestObservation_3busbars): + def get_nb_bus(self): + return 1 + + +class TestEnv_3busbars(unittest.TestCase): + def get_nb_bus(self): + return 3 + + def get_env_nm(self): + return "educ_case14_storage" + + def get_reset_kwargs(self) -> dict: + return dict(seed=0, options={"time serie id": 0}) + + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.get_env_nm(), + backend=PandaPowerBackend(), + action_class=CompleteAction, + test=True, + n_busbar=self.get_nb_bus(), + _add_to_name=type(self).__name__ + f'_{self.get_nb_bus()}') + param = self.env.parameters + param.NB_TIMESTEP_COOLDOWN_SUB = 0 + param.NB_TIMESTEP_COOLDOWN_LINE = 0 + param.MAX_LINE_STATUS_CHANGED = 99999 + param.MAX_SUB_CHANGED = 99999 + self.env.change_parameters(param) + self.env.change_forecast_parameters(param) + self.env.reset(**self.get_reset_kwargs()) + self.list_loc_bus = list(range(1, type(self.env).n_busbar_per_sub + 1)) + self.max_iter = 10 + return super().setUp() + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def test_go_to_end(self): + self.env.set_max_iter(self.max_iter) + obs = self.env.reset(**self.get_reset_kwargs()) + i = 0 + done = False + while not done: + obs, reward, done, info = self.env.step(self.env.action_space()) + i += 1 + assert i == 10, f"{i} instead of 10" + + def test_can_put_on_3(self): + self.env.set_max_iter(self.max_iter) + obs = self.env.reset(**self.get_reset_kwargs()) + res = TestPandapowerBackend_3busbars._aux_find_sub(self.env, type(self.env).LOA_COL) + (sub_id, el_id, line_or_id, line_ex_id) = res + act = TestObservation_3busbars._aux_aux_build_act(self.env, res, self.get_nb_bus(), "loads_id") + i = 0 + done = False + while not done: + if i == 0: + obs, reward, done, info = self.env.step(act) + else: + obs, reward, done, info = self.env.step(self.env.action_space()) + i += 1 + assert i == 10, f"{i} instead of 10" + + def test_can_move_from_3(self): + if self.get_nb_bus() <= 2: + self.skipTest("Need at leat two busbars") + self.env.set_max_iter(self.max_iter) + obs = self.env.reset(**self.get_reset_kwargs()) + res = TestPandapowerBackend_3busbars._aux_find_sub(self.env, type(self.env).LOA_COL) + (sub_id, el_id, line_or_id, line_ex_id) = res + act = TestObservation_3busbars._aux_aux_build_act(self.env, res, self.get_nb_bus(), "loads_id") + i = 0 + done = False + while not done: + if i == 0: + # do the action to set on a busbar 3 + obs, reward, done, info = self.env.step(act) + assert not done + assert not info["exception"] + elif i == 1: + # do the opposite action + dict_act = obs.get_back_to_ref_state() + assert "substation" in dict_act + li_act = dict_act["substation"] + assert len(li_act) == 1 + act = li_act[0] + obs, reward, done, info = self.env.step(act) + assert not done + assert not info["exception"] + else: + obs, reward, done, info = self.env.step(self.env.action_space()) + i += 1 + assert i == 10, f"{i} instead of 10" + + def _aux_alone_done(self, key="loads_id"): + if self.get_nb_bus() <= 2: + self.skipTest("Need at leat two busbars") + obs = self.env.reset(**self.get_reset_kwargs()) + act = self.env.action_space({"set_bus": {key: [(0, self.get_nb_bus())]}}) + obs, reward, done, info = self.env.step(act) + assert done + + def test_load_alone_done(self): + self._aux_alone_done("loads_id") + + def test_gen_alone_done(self): + self._aux_alone_done("generators_id") + + def test_simulate(self): + """test the obs.simulate(...) works with different number of busbars""" + obs = self.env.reset(**self.get_reset_kwargs()) + res = TestPandapowerBackend_3busbars._aux_find_sub(self.env, type(self.env).LOA_COL) + (sub_id, el_id, line_or_id, line_ex_id) = res + act = TestObservation_3busbars._aux_aux_build_act(self.env, res, self.get_nb_bus(), "loads_id") + sim_obs, sim_r, sim_d, sim_i = obs.simulate(act) + assert not sim_d + assert not sim_i["exception"] + + +class TestEnv_1busbar(TestEnv_3busbars): + def get_nb_bus(self): + return 1 + + +class TestGym_3busbars(unittest.TestCase): + """Test the environment can be converted to gym, with proper min / max + for all type of action / observation space + """ + def get_nb_bus(self): + return 3 + + def get_env_nm(self): + return "educ_case14_storage" + + def get_reset_kwargs(self) -> dict: + # seed has been tuned for the tests to pass + return dict(seed=self.seed, options={"time serie id": 0}) + + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.get_env_nm(), + backend=PandaPowerBackend(), + action_class=CompleteAction, + test=True, + n_busbar=self.get_nb_bus(), + _add_to_name=type(self).__name__ + f'_{self.get_nb_bus()}') + param = self.env.parameters + param.NB_TIMESTEP_COOLDOWN_SUB = 0 + param.NB_TIMESTEP_COOLDOWN_LINE = 0 + param.MAX_LINE_STATUS_CHANGED = 9999999 + param.MAX_SUB_CHANGED = 99999999 + self.env.change_parameters(param) + self.env.change_forecast_parameters(param) + self.seed = 0 + self.env.reset(**self.get_reset_kwargs()) + self.list_loc_bus = list(range(1, type(self.env).n_busbar_per_sub + 1)) + self.max_iter = 10 + return super().setUp() + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def _aux_test_env(self, gym_env): + obs, info = gym_env.reset(**self.get_reset_kwargs()) + assert obs in gym_env.observation_space + act = gym_env.action_space.sample() + assert act in gym_env.action_space + obs, reward, done, truncated, info = gym_env.step(act) + if done: + print(gym_env.action_space.from_gym(act)) + print(info["exception"]) + assert not done + assert not truncated + assert obs in gym_env.observation_space + act = gym_env.action_space.sample() + assert act in gym_env.action_space + obs, reward, done, truncated, info = gym_env.step(act) + assert not done + assert not truncated + assert obs in gym_env.observation_space + + def test_gym_env(self): + gym_env = GymEnv(self.env) + self._aux_test_env(gym_env) + + def test_discrete_act(self): + gym_env = GymEnv(self.env) + gym_env.action_space.close() + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + gym_env.action_space = DiscreteActSpace(self.env.action_space) + self.seed = 5 + self._aux_test_env(gym_env) + gym_env.action_space.close() + gym_env.action_space = DiscreteActSpace(self.env.action_space, + attr_to_keep=('set_bus', )) + self.seed = 1 + self._aux_test_env(gym_env) + + def test_box_act(self): + gym_env = GymEnv(self.env) + gym_env.action_space.close() + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + gym_env.action_space = BoxGymActSpace(self.env.action_space) + self._aux_test_env(gym_env) + + def test_multidiscrete_act(self): + # BoxGymObsSpace, + gym_env = GymEnv(self.env) + gym_env.action_space.close() + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + gym_env.action_space = MultiDiscreteActSpace(self.env.action_space) + self._aux_test_env(gym_env) + gym_env.action_space.close() + gym_env.action_space = MultiDiscreteActSpace(self.env.action_space, + attr_to_keep=('set_bus', )) + self._aux_test_env(gym_env) + gym_env.action_space.close() + gym_env.action_space = MultiDiscreteActSpace(self.env.action_space, + attr_to_keep=('sub_set_bus', )) + # no seed below 1000 works, so I force illegal actions... + param = self.env.parameters + param.MAX_LINE_STATUS_CHANGED = 1 + param.MAX_SUB_CHANGED = 1 + gym_env.init_env.change_parameters(param) + gym_env.init_env.change_forecast_parameters(param) + self.seed = 1 + self._aux_test_env(gym_env) + gym_env.action_space.close() + # remove illegal actions for this test + param.MAX_LINE_STATUS_CHANGED = 99999 + param.MAX_SUB_CHANGED = 99999 + gym_env.init_env.change_parameters(param) + gym_env.init_env.change_forecast_parameters(param) + gym_env.action_space = MultiDiscreteActSpace(self.env.action_space, + attr_to_keep=('one_sub_set', )) + self.seed = 1 + self._aux_test_env(gym_env) + + def test_box_obs(self): + gym_env = GymEnv(self.env) + gym_env.observation_space.close() + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + gym_env.observation_space = BoxGymObsSpace(self.env.observation_space) + self._aux_test_env(gym_env) + + +class TestGym_1busbar(TestGym_3busbars): + def get_nb_bus(self): + return 1 + + +class TestRulesNbBus(unittest.TestCase): + """test the rules for the reco / deco of line works also when >= 3 busbars, + also ttests the act.get_impact()... + """ + def get_nb_bus(self): + return 3 + + def get_env_nm(self): + return "educ_case14_storage" + + def get_reset_kwargs(self) -> dict: + # seed has been tuned for the tests to pass + return dict(seed=self.seed, options={"time serie id": 0}) + + def setUp(self) -> None: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.get_env_nm(), + backend=PandaPowerBackend(), + action_class=CompleteAction, + test=True, + n_busbar=self.get_nb_bus(), + _add_to_name=type(self).__name__ + f'_{self.get_nb_bus()}') + self.seed = 0 + self.env.reset(**self.get_reset_kwargs()) + self.list_loc_bus = list(range(1, type(self.env).n_busbar_per_sub + 1)) + self.max_iter = 10 + return super().setUp() + + def tearDown(self) -> None: + self.env.close() + return super().tearDown() + + def _aux_get_disco_line(self, line_id, dn_act): + obs = self.env.reset(**self.get_reset_kwargs()) + obs, reward, done, info = self.env.step(self.env.action_space({"set_line_status": [(line_id, -1)]})) + obs, reward, done, info = self.env.step(dn_act) + obs, reward, done, info = self.env.step(dn_act) + obs, reward, done, info = self.env.step(dn_act) + assert obs.time_before_cooldown_line[line_id] == 0 + + def test_cooldowns(self): + """check the tables of https://grid2op.readthedocs.io/en/latest/action.html#note-on-powerline-status in order + """ + line_id = 0 + subor_id = type(self.env).line_or_to_subid[line_id] + subex_id = type(self.env).line_ex_to_subid[line_id] + dn_act = self.env.action_space() + + # first row + obs = self.env.reset(**self.get_reset_kwargs()) + obs, reward, done, info = self.env.step(self.env.action_space({"set_line_status": [(line_id, -1)]})) + assert obs.time_before_cooldown_line[line_id] == 3 + assert obs.time_before_cooldown_sub[subor_id] == 0 + assert obs.time_before_cooldown_sub[subex_id] == 0 + + # 2nd row + obs = self.env.reset(**self.get_reset_kwargs()) + obs, reward, done, info = self.env.step(self.env.action_space({"set_line_status": [(line_id, +1)]})) + assert obs.time_before_cooldown_line[line_id] == 3 + assert obs.time_before_cooldown_sub[subor_id] == 0 + assert obs.time_before_cooldown_sub[subex_id] == 0 + + # 3rd row + self._aux_get_disco_line(line_id, dn_act) + obs, reward, done, info = self.env.step(self.env.action_space({"set_line_status": [(line_id, -1)]})) + assert obs.time_before_cooldown_line[line_id] == 3 + assert obs.time_before_cooldown_sub[subor_id] == 0 + assert obs.time_before_cooldown_sub[subex_id] == 0 + + # 4th row + self._aux_get_disco_line(line_id, dn_act) + obs, reward, done, info = self.env.step(self.env.action_space({"set_line_status": [(line_id, +1)]})) + assert obs.time_before_cooldown_line[line_id] == 3 + assert obs.time_before_cooldown_sub[subor_id] == 0 + assert obs.time_before_cooldown_sub[subex_id] == 0 + + # 5th row + obs = self.env.reset(**self.get_reset_kwargs()) + obs, reward, done, info = self.env.step(self.env.action_space({"change_line_status": [line_id]})) + assert obs.time_before_cooldown_line[line_id] == 3 + assert obs.time_before_cooldown_sub[subor_id] == 0 + assert obs.time_before_cooldown_sub[subex_id] == 0 + + # 6th row + self._aux_get_disco_line(line_id, dn_act) + obs, reward, done, info = self.env.step(self.env.action_space({"change_line_status": [line_id]})) + assert obs.time_before_cooldown_line[line_id] == 3 + assert obs.time_before_cooldown_sub[subor_id] == 0 + assert obs.time_before_cooldown_sub[subex_id] == 0 + + # 7th + obs = self.env.reset(**self.get_reset_kwargs()) + obs, reward, done, info = self.env.step(self.env.action_space({"set_bus": {"lines_or_id": [(line_id, -1)]}})) + assert obs.time_before_cooldown_line[line_id] == 3 + assert obs.time_before_cooldown_sub[subor_id] == 0 + assert obs.time_before_cooldown_sub[subex_id] == 0 + + # 8th + self._aux_get_disco_line(line_id, dn_act) + obs, reward, done, info = self.env.step(self.env.action_space({"set_bus": {"lines_or_id": [(line_id, -1)]}})) + assert obs.time_before_cooldown_line[line_id] == 0 + assert obs.time_before_cooldown_sub[subor_id] == 3 + assert obs.time_before_cooldown_sub[subex_id] == 0 + + # 9th + obs = self.env.reset(**self.get_reset_kwargs()) + obs, reward, done, info = self.env.step(self.env.action_space({"set_bus": {"lines_or_id": [(line_id, 3)]}})) + assert obs.time_before_cooldown_line[line_id] == 0 + assert obs.time_before_cooldown_sub[subor_id] == 3 + assert obs.time_before_cooldown_sub[subex_id] == 0 + + # 10th + self._aux_get_disco_line(line_id, dn_act) + obs, reward, done, info = self.env.step(self.env.action_space({"set_bus": {"lines_or_id": [(line_id, 3)]}})) + assert obs.time_before_cooldown_line[line_id] == 3 + assert obs.time_before_cooldown_sub[subor_id] == 0 + assert obs.time_before_cooldown_sub[subex_id] == 0 + + # 11th and 12th => no "change bus" when nb_bus is not 2 + + +if __name__ == "__main__": + unittest.main() + \ No newline at end of file diff --git a/grid2op/tests/test_new_reset.py b/grid2op/tests/test_new_reset.py new file mode 100644 index 000000000..9977ffb80 --- /dev/null +++ b/grid2op/tests/test_new_reset.py @@ -0,0 +1,82 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import grid2op +import unittest +import warnings +import numpy as np +from grid2op.Exceptions import EnvError +from grid2op.gym_compat import GymEnv + + +class TestNewReset(unittest.TestCase): + """ + This class tests the possibility to set the seed and the time + serie id directly when calling `env.reset` + """ + + def setUp(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make("l2rpn_case14_sandbox", test=True, _add_to_name=type(self).__name__) + + def test_normal_env(self): + # original way + self.env.set_id(0) + self.env.seed(0) + obs = self.env.reset() + + # test with seed in reset + self.env.set_id(0) + obs_seed = self.env.reset(seed=0) + + # test with ts_id in reset + self.env.seed(0) + obs_ts = self.env.reset(options={"time serie id": 0}) + + # test with both + obs_both = self.env.reset(seed=0, options={"time serie id": 0}) + assert obs_seed == obs + assert obs_ts == obs + assert obs_both == obs + + def test_raise_if_wrong_key(self): + with self.assertRaises(EnvError): + obs_ts = self.env.reset(options={"time series id": 0}) + + with self.assertRaises(EnvError): + obs_ts = self.env.reset(options={"chronics id": 0}) + + def _aux_obs_equals(self, obs1, obs2): + assert obs1.keys() == obs2.keys(), f"not the same keys" + for el in obs1: + assert np.array_equal(obs1[el], obs2[el]), f"obs not equal for attribute {el}" + + def test_gym_env(self): + gym_env = GymEnv(self.env) + + # original way + gym_env.init_env.set_id(0) + gym_env.init_env.seed(0) + obs, *_ = gym_env.reset() + + # test with seed in reset + gym_env.init_env.set_id(0) + obs_seed, *_ = gym_env.reset(seed=0) + + # test with ts_id in reset + gym_env.init_env.seed(0) + obs_ts, *_ = gym_env.reset(options={"time serie id": 0}) + + # test with both + obs_both, *_ = gym_env.reset(seed=0, options={"time serie id": 0}) + + self._aux_obs_equals(obs_seed, obs) + self._aux_obs_equals(obs_ts, obs) + self._aux_obs_equals(obs_both, obs) + \ No newline at end of file diff --git a/grid2op/tests/test_pickling.py b/grid2op/tests/test_pickling.py index ea262d583..c8114d93e 100644 --- a/grid2op/tests/test_pickling.py +++ b/grid2op/tests/test_pickling.py @@ -20,13 +20,17 @@ ScalerAttrConverter, ) +_NAME_FOR_THIS_TEST = __name__ + "for_mp_test" + with warnings.catch_warnings(): # this needs to be imported in the main module for multiprocessing to work "approximately" warnings.filterwarnings("ignore") - _ = grid2op.make("l2rpn_case14_sandbox", test=True, _add_to_name=__name__+"for_mp_test") - - + _ = grid2op.make("l2rpn_case14_sandbox", + test=True, + _add_to_name=_NAME_FOR_THIS_TEST) + + class TestMultiProc(unittest.TestCase): @staticmethod def f(env_gym): @@ -41,7 +45,9 @@ def test_basic(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") env = grid2op.make( - "l2rpn_case14_sandbox", test=True, _add_to_name=__name__+"for_mp_test" + "l2rpn_case14_sandbox", + test=True, + _add_to_name=_NAME_FOR_THIS_TEST ) env_gym = GymEnv(env) @@ -71,15 +77,15 @@ def test_basic(self): ["rho", "gen_p", "load_p", "topo_vect", "actual_dispatch"] ) ob_space = ob_space.reencode_space( - "actual_dispatch", ScalerAttrConverter(substract=0.0, divide=env.gen_pmax) + "actual_dispatch", ScalerAttrConverter(substract=0.0, divide=1. * type(env).gen_pmax) ) ob_space = ob_space.reencode_space( - "gen_p", ScalerAttrConverter(substract=0.0, divide=env.gen_pmax) + "gen_p", ScalerAttrConverter(substract=0.0, divide=1. * type(env).gen_pmax) ) ob_space = ob_space.reencode_space( "load_p", ScalerAttrConverter( - substract=obs_gym["load_p"], divide=0.5 * obs_gym["load_p"] + substract=1. * obs_gym["load_p"], divide=0.5 * obs_gym["load_p"] ), ) env_gym.observation_space = ob_space @@ -95,4 +101,11 @@ def test_basic(self): if __name__ == "__main__": + with warnings.catch_warnings(): + # this needs to be imported in the main module for multiprocessing to work "approximately" + warnings.filterwarnings("ignore") + _ = grid2op.make("l2rpn_case14_sandbox", + test=True, + _add_to_name=__name__+"for_mp_test") + unittest.main() diff --git a/grid2op/tests/test_resest_options.py b/grid2op/tests/test_resest_options.py new file mode 100644 index 000000000..5a5d6b2b0 --- /dev/null +++ b/grid2op/tests/test_resest_options.py @@ -0,0 +1,291 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import warnings +import grid2op +from grid2op.Exceptions import Grid2OpException +import unittest +import pdb + + +class InitTSOptions(unittest.TestCase): + """test the "init ts" options in env.reset() """ + def setUp(self) -> None: + self.env_name = "l2rpn_case14_sandbox" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.env_name, test=True, + _add_to_name=type(self).__name__) + + def test_function_ok(self): + obs = self.env.reset() # normal reset + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 0 + + obs = self.env.reset(options={"init ts": 1}) # skip the first step, start at 5 minutes + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 5, f"{ obs.minute_of_hour} vs 5" + + obs = self.env.reset(options={"init ts": 2}) # start after 10 minutes, 2 steps + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 10, f"{ obs.minute_of_hour} vs 10" + + obs = self.env.reset(options={"init ts": 6}) # start after 6steps (30 minutes) + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 30, f"{ obs.minute_of_hour} vs 30" + + obs = self.env.reset(options={"init ts": 12}) # start at the 12th step + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 1, f"{ obs.minute_of_hour} vs 1" + assert obs.minute_of_hour == 0, f"{ obs.minute_of_hour} vs 0" + + obs = self.env.reset(options={"init ts": 12 * 24}) # start after exactly 1 day + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 7, f"{ obs.day} vs 7" + assert obs.hour_of_day == 0, f"{ obs.hour_of_day} vs 1" + assert obs.minute_of_hour == 0, f"{ obs.minute_of_hour} vs 0" + + def test_soft_overflow(self): + """check that the lines are not on soft overflow (obs.timestep_overflow == 0 just after reset)""" + line_id = 3 + obs = self.env.reset(options={"time serie id": 0}) + th_lim = 1. * self.env.get_thermal_limit() + th_lim[line_id] = 0.6 * obs.a_or[line_id] + self.env.set_thermal_limit(th_lim) + obs = self.env.reset(options={"time serie id": 0}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] > 1. + assert obs.line_status[line_id] + + obs = self.env.reset(options={"time serie id": 0}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] > 1. + assert obs.line_status[line_id] + + obs = self.env.reset(options={"time serie id": 0, "init ts": 1}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] > 1. + assert obs.line_status[line_id] + + obs = self.env.reset(options={"time serie id": 0, "init ts": 2}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] > 1. + assert obs.line_status[line_id] + + obs = self.env.reset(options={"time serie id": 0, "init ts": 6}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] > 1. + assert obs.line_status[line_id] + + def test_hard_overflow(self): + """check lines are disconnected if on hard overflow at the beginning""" + line_id = 3 + obs = self.env.reset(options={"time serie id": 0}) + th_lim = 1. * self.env.get_thermal_limit() + th_lim[line_id] = 0.4 * obs.a_or[line_id] + self.env.set_thermal_limit(th_lim) + obs = self.env.reset(options={"time serie id": 0}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] == 0. + assert not obs.line_status[line_id] + assert obs.time_before_cooldown_line[line_id] == 0 + + obs = self.env.reset(options={"time serie id": 0}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] == 0. + assert not obs.line_status[line_id] + assert obs.time_before_cooldown_line[line_id] == 0 + + obs = self.env.reset(options={"time serie id": 0, "init ts": 1}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] == 0. + assert not obs.line_status[line_id] + assert obs.time_before_cooldown_line[line_id] == 0 + + obs = self.env.reset(options={"time serie id": 0, "init ts": 2}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] == 0. + assert not obs.line_status[line_id] + assert obs.time_before_cooldown_line[line_id] == 0 + + obs = self.env.reset(options={"time serie id": 0, "init ts": 6}) + assert (obs.timestep_overflow == 0).all() + assert obs.rho[line_id] == 0. + assert not obs.line_status[line_id] + assert obs.time_before_cooldown_line[line_id] == 0 + + + def test_raise_if_args_not_correct(self): + with self.assertRaises(Grid2OpException): + # string and not int + obs = self.env.reset(options={"init ts": "treliug"}) + with self.assertRaises(Grid2OpException): + # float which is not an int + obs = self.env.reset(options={"init ts": 1.5}) + with self.assertRaises(Grid2OpException): + # value too small + obs = self.env.reset(options={"init ts": 0}) + + # should work with a float convertible to an int + obs = self.env.reset(options={"time serie id": 0, "init ts": 6.}) + + +class MaxStepOptions(unittest.TestCase): + """test the "max step" options in env.reset() """ + def setUp(self) -> None: + self.env_name = "l2rpn_case14_sandbox" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.env_name, test=True, + _add_to_name=type(self).__name__) + + def test_raise_if_args_not_correct(self): + with self.assertRaises(Grid2OpException): + # string and not int + obs = self.env.reset(options={"max step": "treliug"}) + with self.assertRaises(Grid2OpException): + # float which is not an int + obs = self.env.reset(options={"max step": 1.5}) + + with self.assertRaises(Grid2OpException): + # value too small + obs = self.env.reset(options={"max step": 0}) + + # should work with a float convertible to an int + obs = self.env.reset(options={"time serie id": 0, "max step": 6.}) + + def test_function_ok(self): + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + # enough data to be limited + obs = self.env.reset(options={"max step": 5}) + assert obs.max_step == 5, f"{obs.max_step} vs 5" + + # limit has no effect: not enough data anyway + obs = self.env.reset(options={"max step": 800}) + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + def test_no_impact_next_reset(self): + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + # enough data to be limited + obs = self.env.reset(options={"max step": 5}) + assert obs.max_step == 5, f"{obs.max_step} vs 5" + + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + def test_remember_previous_max_iter(self): + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + self.env.set_max_iter(200) + obs = self.env.reset() # normal reset + assert obs.max_step == 200, f"{obs.max_step} vs 200" + + # use the option to limit + obs = self.env.reset(options={"max step": 5}) + assert obs.max_step == 5, f"{obs.max_step} vs 5" + + # check it remembers the previous limit + obs = self.env.reset() # normal reset (but 200 were set) + assert obs.max_step == 200, f"{obs.max_step} vs 200" + + # set back the limit to "maximum in the time serie" + self.env.set_max_iter(-1) + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + # limit for this reset only + obs = self.env.reset(options={"max step": 5}) + assert obs.max_step == 5, f"{obs.max_step} vs 5" + + # check again the right limit was applied + obs = self.env.reset() # normal reset (but 575 were set back) + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + def test_max_step_and_init_ts(self): + """test that episode duration is properly computed and updated in + the observation when both max step and init ts are set at the same time""" + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 0 + + obs = self.env.reset(options={"init ts": 12 * 24, "max step": 24}) # start after exactly 1 day for 2 hours + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 7, f"{ obs.day} vs 7" + assert obs.hour_of_day == 0, f"{ obs.hour_of_day} vs 1" + assert obs.minute_of_hour == 0, f"{ obs.minute_of_hour} vs 0" + assert obs.max_step == 24, f"{obs.max_step} vs 24" + + obs = self.env.reset(options={"init ts": 12 * 24}) # start after exactly 1 day without any max + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 7, f"{ obs.day} vs 7" + assert obs.hour_of_day == 0, f"{ obs.hour_of_day} vs 1" + assert obs.minute_of_hour == 0, f"{ obs.minute_of_hour} vs 0" + assert obs.max_step == 575, f"{obs.max_step} vs 575" + + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 0 + + obs = self.env.reset(options={"max step": 288}) # don't skip anything, but last only 1 day + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6, f"{ obs.day} vs 6" + assert obs.hour_of_day == 0, f"{ obs.hour_of_day} vs 1" + assert obs.minute_of_hour == 0, f"{ obs.minute_of_hour} vs 0" + assert obs.max_step == 288, f"{obs.max_step} vs 288" + + obs = self.env.reset(options={"init ts": 12 * 24, "max step": 700}) # start after exactly 1 day for too much steps + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 7, f"{ obs.day} vs 7" + assert obs.hour_of_day == 0, f"{ obs.hour_of_day} vs 1" + assert obs.minute_of_hour == 0, f"{ obs.minute_of_hour} vs 0" + # 288 here because the limit is the time series ! + assert obs.max_step == 287, f"{obs.max_step} vs 287" + + obs = self.env.reset() # normal reset + assert obs.max_step == 575, f"{obs.max_step} vs 575" + assert obs.year == 2019 + assert obs.month == 1 + assert obs.day == 6 + assert obs.hour_of_day == 0 + assert obs.minute_of_hour == 0 + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_reset_options_runner.py b/grid2op/tests/test_reset_options_runner.py new file mode 100644 index 000000000..94da9ada1 --- /dev/null +++ b/grid2op/tests/test_reset_options_runner.py @@ -0,0 +1,910 @@ +# Copyright (c) 2024, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + + +import warnings +import unittest + +import grid2op +from grid2op.Runner import Runner +from grid2op.tests.helper_path_test import * + + +class TestResetOptionRunner(unittest.TestCase): + def _env_path(self): + return "l2rpn_case14_sandbox" + + def setUp(self) -> None: + self.env_nm = self._env_path() + self.max_iter = 5 + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make(self.env_nm, + test=True + ) + self.runner = Runner(**self.env.get_params_for_runner()) + + def tearDown(self) -> None: + self.env.close() + self.runner._clean_up() + return super().tearDown() + + def test_run_one_episode_ts_id(self): + with warnings.catch_warnings(): + warnings.filterwarnings("error") # check it does not raise any error + res = self.runner.run_one_episode(reset_options={"time serie id": 1}, + max_iter=self.max_iter, + detailed_output=True + ) + assert res[1]== '0001' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + with warnings.catch_warnings(): + warnings.filterwarnings("error") # check it does not raise any error + res = self.runner.run_one_episode(reset_options={}, + episode_id=1, + max_iter=self.max_iter, + detailed_output=True + ) + assert res[1]== '0001' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + # check the correct episode id is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run_one_episode(reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=0, + detailed_output=True + ) + assert res[1]== '0000' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + def test_run_one_episode_warning_raised_ts_id(self): + # check it does raise an error + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run_one_episode(reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=3, + detailed_output=True + ) + + def test_run_onesingle_ep_ts_id(self): + # one reset option + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 1}, + max_iter=self.max_iter + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # one list (of one element here) + res = self.runner.run(nb_episode=1, + reset_options=[{"time serie id": 1}], + max_iter=self.max_iter + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # one tuple (of one element here) + res = self.runner.run(nb_episode=1, + reset_options=({"time serie id": 1}, ), + max_iter=self.max_iter + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # check the correct episode id is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=[0] + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=[0] + ) + + def test_run_two_eps_seq_ts_id(self, nb_process=1): + # one reset option + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + nb_process=nb_process + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # one list (of one element here) + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1}, {"time serie id": 1}], + max_iter=self.max_iter, + nb_process=nb_process + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # one tuple (of one element here) + res = self.runner.run(nb_episode=2, + reset_options=({"time serie id": 1}, {"time serie id": 1}), + max_iter=self.max_iter, + nb_process=nb_process + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the correct episode id is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + + def test_run_two_eps_seq_two_options_ts_id(self, nb_process=1): + # one list (of one element here) + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 0}, {"time serie id": 1}], + max_iter=self.max_iter, + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # one tuple (of one element here) + res = self.runner.run(nb_episode=2, + reset_options=({"time serie id": 0}, {"time serie id": 1}), + max_iter=self.max_iter, + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the correct episode id is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + + def test_run_two_eps_par_ts_id(self): + self.test_run_two_eps_seq_ts_id(nb_process=2) + + def test_run_two_eps_par_two_opts_ts_id(self): + self.test_run_two_eps_seq_two_options_ts_id(nb_process=2) + + def test_fail_when_needed(self): + # wrong type + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=1, + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=[1, {"time serie id": 1}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1}, 1], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + + # wrong size (too big) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1}, + {"time serie id": 1}, + {"time serie id": 1}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + # wrong size (too small) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + # wrong key (beginning) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=[{"bleurk": 1}, {"time serie id": 1}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + # wrong key (end) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1}, {"bleurk": 1}], + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + # wrong key (when alone) + with self.assertRaises(RuntimeError): + res = self.runner.run(nb_episode=2, + reset_options={"bleurk": 1}, + episode_id=[0, 1], + max_iter=self.max_iter, + add_detailed_output=True, + ) + + def test_run_one_episode_max_it(self): + with warnings.catch_warnings(): + warnings.filterwarnings("error") # check it does not raise any error + res = self.runner.run_one_episode(reset_options={"max step": self.max_iter, "time serie id": 1}, + detailed_output=True + ) + assert res[1]== '0001' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + with warnings.catch_warnings(): + warnings.filterwarnings("error") # check it does not raise any error + res = self.runner.run_one_episode(reset_options={"time serie id": 1}, + max_iter=self.max_iter, + detailed_output=True + ) + assert res[1]== '0001' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + # check the correct max iter is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run_one_episode(reset_options={"time serie id": 1, "max step": self.max_iter + 1}, + max_iter=self.max_iter, + episode_id=0, + detailed_output=True + ) + assert res[1]== '0000' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + def test_run_one_episode_warning_raised_max_it(self): + # check it does raise an error + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run_one_episode(reset_options={"time serie id": 1, "max step": self.max_iter + 3}, + max_iter=self.max_iter + ) + + def test_run_onesingle_ep_max_it(self): + # one reset option + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 1, "max step": self.max_iter}, + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # one list (of one element here) + res = self.runner.run(nb_episode=1, + reset_options=[{"time serie id": 1, "max step": self.max_iter}], + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # one tuple (of one element here) + res = self.runner.run(nb_episode=1, + reset_options=({"time serie id": 1, "max step": self.max_iter}, ), + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # check the correct episode id is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 0, "max step": self.max_iter + 3}, + max_iter=self.max_iter, + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 0, "max step": self.max_iter + 3}, + max_iter=self.max_iter + ) + + def test_run_two_eps_seq_max_it(self, nb_process=1): + # one reset option + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1, "max step": self.max_iter }, + nb_process=nb_process + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # one list (of the same element here) + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1, "max step": self.max_iter}, + {"time serie id": 1, "max step": self.max_iter}], + nb_process=nb_process + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # one tuple (of the same element here) + res = self.runner.run(nb_episode=2, + reset_options=({"time serie id": 1, "max step": self.max_iter}, + {"time serie id": 1, "max step": self.max_iter}), + nb_process=nb_process + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the correct "max iter" is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=2, + reset_options={"max step": self.max_iter + 3}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=2, + reset_options={"max step": self.max_iter + 3}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + + def test_run_two_eps_seq_two_options_max_it(self, nb_process=1): + # one list (of two different elements here) + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 0, "max step": self.max_iter + 1}, + {"time serie id": 1, "max step": self.max_iter + 2}], + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + 1 + assert res[0][4] == self.max_iter + 1 + assert res[1][1]== '0001' + assert res[1][3] == self.max_iter + 2 + assert res[1][4] == self.max_iter + 2 + + # one tuple (of two different elements here) + res = self.runner.run(nb_episode=2, + reset_options=({"time serie id": 0, "max step": self.max_iter + 1}, + {"time serie id": 1, "max step": self.max_iter + 2}), + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + 1 + assert res[0][4] == self.max_iter + 1 + assert res[1][1]== '0001' + assert res[1][3] == self.max_iter + 2 + assert res[1][4] == self.max_iter + 2 + + # check the correct max iter is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=2, + reset_options={"max step": self.max_iter + 1}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=2, + reset_options={"max step": self.max_iter + 1}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process + ) + + def test_run_two_eps_par_max_it(self): + self.test_run_two_eps_seq_max_it(nb_process=2) + + def test_run_two_eps_par_two_opts_max_it(self): + self.test_run_two_eps_seq_two_options_max_it(nb_process=2) + + def test_run_one_episode_init_act(self): + with warnings.catch_warnings(): + warnings.filterwarnings("error") # check it does not raise any error + res = self.runner.run_one_episode(reset_options={"max step": self.max_iter, "time serie id": 1, + "init state": {"set_line_status": [(1, -1)], "method": "ignore"}}, + detailed_output=True + ) + assert res[1]== '0001' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + ep_data = res[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + + with warnings.catch_warnings(): + warnings.filterwarnings("error") # check it does not raise any error + res = self.runner.run_one_episode(reset_options={"time serie id": 1}, + max_iter=self.max_iter, + init_state={"set_line_status": [(1, -1)], "method": "ignore"}, + detailed_output=True + ) + assert res[1]== '0001' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + ep_data = res[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + + # check the correct init state is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run_one_episode(reset_options={"time serie id": 1, + "max step": self.max_iter + 1, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"} + }, + max_iter=self.max_iter, + episode_id=0, + init_state={"set_line_status": [(1, -1)], "method": "ignore"}, + detailed_output=True + ) + assert res[1]== '0000' + assert res[3] == self.max_iter + assert res[4] == self.max_iter + + ep_data = res[-1] + init_obs = ep_data.observations[0] + # line 1 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + # line 0 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == 1 + assert init_obs.line_status[0] + + def test_run_one_episode_warning_raised_init_act(self): + # check it does raise an error + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run_one_episode(reset_options={"time serie id": 1, + "max step": self.max_iter + 3, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + init_state={"set_line_status": [(1, -1)], "method": "ignore"}, + ) + + def test_run_onesingle_ep_init_act(self): + # one reset option + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"} + }, + add_detailed_output=True + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + + # one list (of one element here) + res = self.runner.run(nb_episode=1, + reset_options=[{"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"} + }], + add_detailed_output=True + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + + # one tuple (of one element here) + res = self.runner.run(nb_episode=1, + reset_options=({"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"} + }, ), + add_detailed_output=True + ) + assert res[0][1]== '0001' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + + # check the correct init action is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 0, + "max step": self.max_iter + 3, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + max_iter=self.max_iter, + init_states={"set_line_status": [(1, -1)], "method": "ignore"}, + add_detailed_output=True + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + assert res[0][4] == self.max_iter + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + # line 1 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + # line 0 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == 1 + assert init_obs.line_status[0] + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=1, + reset_options={"time serie id": 0, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + max_iter=self.max_iter, + init_states={"set_line_status": [(1, -1)], "method": "ignore"}, + add_detailed_output=True + ) + + def test_run_two_eps_seq_init_act(self, nb_process=1): + # one reset option + res = self.runner.run(nb_episode=2, + reset_options={"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"} + }, + nb_process=nb_process, + add_detailed_output=True + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + ep_data = el[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + + # one list (of the same element here) + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + {"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}], + nb_process=nb_process, + add_detailed_output=True + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + ep_data = el[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + + # one tuple (of the same element here) + res = self.runner.run(nb_episode=2, + reset_options=({"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + {"time serie id": 1, + "max step": self.max_iter, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}), + nb_process=nb_process, + add_detailed_output=True + ) + for el in res: + assert el[1]== '0001' + assert el[3] == self.max_iter + assert el[4] == self.max_iter + ep_data = el[-1] + init_obs = ep_data.observations[0] + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + + # check the correct "init state" is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=2, + reset_options={"init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process, + init_states={"set_line_status": [(1, -1)], "method": "ignore"}, + add_detailed_output=True + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + ep_data = el[-1] + init_obs = ep_data.observations[0] + # line 1 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + # line 0 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == 1 + assert init_obs.line_status[0] + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=2, + reset_options={"init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process, + init_states={"set_line_status": [(1, -1)], "method": "ignore"}, + add_detailed_output=True + ) + + def test_run_two_eps_seq_two_options_init_act(self, nb_process=1): + # one list (of two different elements here) + res = self.runner.run(nb_episode=2, + reset_options=[{"time serie id": 0, + "max step": self.max_iter + 1, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + {"time serie id": 1, + "max step": self.max_iter + 2, + "init state": {"set_line_status": [(1, -1)], "method": "ignore"}}], + nb_process=nb_process, + add_detailed_output=True + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + 1 + assert res[0][4] == self.max_iter + 1 + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + # line 0 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + # line 1 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == 1 + assert init_obs.line_status[1] + + assert res[1][1]== '0001' + assert res[1][3] == self.max_iter + 2 + assert res[1][4] == self.max_iter + 2 + ep_data = res[1][-1] + init_obs = ep_data.observations[0] + # line 1 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + # line 0 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == 1 + assert init_obs.line_status[0] + + # one tuple (of two different elements here) + res = self.runner.run(nb_episode=2, + reset_options=({"time serie id": 0, + "max step": self.max_iter + 1, + "init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + {"time serie id": 1, + "max step": self.max_iter + 2, + "init state": {"set_line_status": [(1, -1)], "method": "ignore"}}), + nb_process=nb_process, + add_detailed_output=True + ) + assert res[0][1]== '0000' + assert res[0][3] == self.max_iter + 1 + assert res[0][4] == self.max_iter + 1 + ep_data = res[0][-1] + init_obs = ep_data.observations[0] + # line 0 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == -1 + assert not init_obs.line_status[0] + # line 1 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == 1 + assert init_obs.line_status[1] + + assert res[1][1]== '0001' + assert res[1][3] == self.max_iter + 2 + assert res[1][4] == self.max_iter + 2 + ep_data = res[1][-1] + init_obs = ep_data.observations[0] + # line 1 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + # line 0 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == 1 + assert init_obs.line_status[0] + + # check the correct init state is used + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + res = self.runner.run(nb_episode=2, + reset_options={"init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process, + add_detailed_output=True, + init_states={"set_line_status": [(1, -1)], "method": "ignore"} + ) + assert res[0][1]== '0000' + assert res[1][1]== '0001' + for el in res: + assert el[3] == self.max_iter + assert el[4] == self.max_iter + ep_data = el[-1] + init_obs = ep_data.observations[0] + # line 1 is disco + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[1]] == -1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[1]] == -1 + assert not init_obs.line_status[1] + # line 0 should not + assert init_obs.topo_vect[init_obs.line_or_pos_topo_vect[0]] == 1 + assert init_obs.topo_vect[init_obs.line_ex_pos_topo_vect[0]] == 1 + assert init_obs.line_status[0] + + # check the warning is raised + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + res = self.runner.run(nb_episode=2, + reset_options={"init state": {"set_line_status": [(0, -1)], "method": "ignore"}}, + max_iter=self.max_iter, + episode_id=[0, 1], + nb_process=nb_process, + add_detailed_output=True, + init_states={"set_line_status": [(1, -1)], "method": "ignore"} + ) + + def test_run_two_eps_par_init_act(self): + self.test_run_two_eps_seq_init_act(nb_process=2) + + def test_run_two_eps_par_two_opts_init_act(self): + self.test_run_two_eps_seq_two_options_init_act(nb_process=2) + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/typing_variables.py b/grid2op/typing_variables.py new file mode 100644 index 000000000..0d0c03968 --- /dev/null +++ b/grid2op/typing_variables.py @@ -0,0 +1,64 @@ +# Copyright (c) 2019-2020, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +from typing import Dict, Literal, Any, Union, List +import numpy as np + +#: type hints corresponding to the "info" part of the env.step return value +STEP_INFO_TYPING = Dict[Literal["disc_lines", + "is_illegal", + "is_ambiguous", + "is_dispatching_illegal", + "is_illegal_reco", + "reason_alarm_illegal", + "reason_alert_illegal", + "opponent_attack_line", + "opponent_attack_sub", + "exception", + "detailed_infos_for_cascading_failures", + "rewards", + "time_series_id"], + Any] + +#: Dict representing an action +DICT_ACT_TYPING = Dict[Literal["set_line_status", + "change_line_status", + "set_bus", + "change_bus", + "redispatch", + "set_storage", + "curtail", + "raise_alarm", + "raise_alert", + "injection", + "hazards", + "maintenance", + "shunt"], + Any] +# TODO improve that (especially the Any part) + +#: type hints for the "options" flag of reset function +RESET_OPTIONS_TYPING = Union[Dict[Literal["time serie id"], int], + Dict[Literal["init state"], DICT_ACT_TYPING], + Dict[Literal["init ts"], int], + Dict[Literal["max step"], int], + None] + +#: type hints for a "GridObject" when converted to a dictionary +CLS_AS_DICT_TYPING = Dict[str, + Union[int, # eg n_sub, or n_line + str, # eg name_shunt, name_load + np.ndarray, # eg load_to_subid, gen_pos_topo_vect + List[Union[int, str, float, bool]]] + ] + +#: n_busbar_per_sub +N_BUSBAR_PER_SUB_TYPING = Union[int, # one for all substation + List[int], # give info for all substations + Dict[str, int] # give information for some substation + ] diff --git a/grid2op/utils/l2rpn_idf_2023_scores.py b/grid2op/utils/l2rpn_idf_2023_scores.py index 307cf3881..6655de254 100644 --- a/grid2op/utils/l2rpn_idf_2023_scores.py +++ b/grid2op/utils/l2rpn_idf_2023_scores.py @@ -114,13 +114,13 @@ def __init__( score_names=score_names, add_nb_highres_sim=add_nb_highres_sim, ) - weights=np.array([weight_op_score,weight_assistant_score,weight_nres_score]) + weights=np.array([weight_op_score, weight_assistant_score, weight_nres_score]) total_weights = weights.sum() - if total_weights != 1.0: + if abs(total_weights - 1.0) >= 1e-8: raise Grid2OpException( 'The weights of each component of the score shall sum to 1' ) - if np.any(weights <0): + if np.any(weights < 0.): raise Grid2OpException( 'All weights should be positive' ) diff --git a/setup.py b/setup.py index 68b3586f9..db3c36bf2 100644 --- a/setup.py +++ b/setup.py @@ -23,7 +23,7 @@ def my_test_suite(): pkgs = { "required": [ - "numpy>=1.20", + "numpy>=1.20,<2", # disable numpy 2 for now "scipy>=1.4.1", "pandas>=1.0.3", "pandapower>=2.2.2", @@ -73,7 +73,7 @@ def my_test_suite(): "numba", "gym>=0.26", "gymnasium", - "stable-baselines3>=2.0", + # "stable-baselines3>=2.0", "nbconvert", "jinja2" ], @@ -84,7 +84,6 @@ def my_test_suite(): } pkgs["extras"]["test"] += pkgs["extras"]["optional"] pkgs["extras"]["test"] += pkgs["extras"]["plot"] -pkgs["extras"]["test"] += pkgs["extras"]["chronix2grid"] pkgs["extras"]["test"] += pkgs["extras"]["gymnasium"] if sys.version_info.minor <= 7: diff --git a/utils/make_release.py b/utils/make_release.py index 057f9342c..c91346dd1 100644 --- a/utils/make_release.py +++ b/utils/make_release.py @@ -84,7 +84,7 @@ def modify_and_push_docker(version, # grid2op version version)) # TODO re.search(reg_, "0.0.4-rc1").group("prerelease") -> rc1 (if regex_version is the official one) - if re.search(f".*\.(rc|pre|dev)[0-9]+$", version) is not None: + if re.search(f".*(\\.|-)(rc|pre|dev)[0-9]+$", version) is not None: is_prerelease = True print("This is a pre release, docker will NOT be pushed, github tag will NOT be made") time.sleep(2)