diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 83781f12e..4f2eb32f3 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -52,3 +52,7 @@ jobs: run: "python -m nox -s build" env: PLATFORM: ${{ matrix.os.image }} + - name: "Install the package" + run: "python -m nox -s install" + env: + PLATFORM: ${{ matrix.os.image }} diff --git a/README.rst b/README.rst index 7b9c0663e..3a5f9bb4b 100644 --- a/README.rst +++ b/README.rst @@ -19,7 +19,7 @@ eTraGo Optimization of flexibility options for transmission grids based on PyPSA A speciality in this context is that transmission grids are described by the -380, 220 and 110 kV in Germany. Conventionally the 110kV grid is part of the +380, 220 and 110 kV in Germany. Conventionally, the 110kV grid is part of the distribution grid. The integration of the transmission and 'upper' distribution grid is part of eTraGo. @@ -27,6 +27,8 @@ The focus of optimization are flexibility options with a special focus on energy storages. Grid expansion measures are not part of this tool and will be instead part of 'eGo' https://github.com/openego/eGo +eTraGo is documented on `readthedocs `_. + .. warning:: From now on eTraGo depends on a sector coupled data-model. This is not published on the oedb yet, the data has to be created using @@ -35,7 +37,6 @@ instead part of 'eGo' https://github.com/openego/eGo When you want to use eTraGo for optimizations, please use the latest release 0.8.0. - Installation ============ eTraGo is designed as a Python package therefore it is mandatory to have @@ -88,7 +89,7 @@ you create a virtual environment (where you like it) and activate it: .. code-block:: bash - $ virtualenv venv --clear -p python3.7 + $ virtualenv venv --clear -p python3.8 $ source venv/bin/activate $ cd venv diff --git a/doc/about.rst b/doc/about.rst index 3bad066df..a7b91cf1e 100644 --- a/doc/about.rst +++ b/doc/about.rst @@ -15,15 +15,17 @@ energy storage and grid expansion measures. -The open_eGo project +Research projects ==================== -This software project is part of the research project +This software project was initially developed in the research project `open_eGo `_. +It is constantly further developed in different reserach projects, +e.g. `eGon `_ and `PoWerD `_. The OpenEnergy Platform ======================= -Within this project we developed the OpenEnergy Platform which this software +Within the open_eGo project we developed the OpenEnergy Platform which this software is using in order to get and store the in- and output data. Before you start to calculate a registration on the platform is needed. For more information see `openenergy-platform `_ and login. @@ -39,12 +41,12 @@ this platform. -Model overview -============== +Tool overview +============= -.. figure:: images/eTraGo_model.png +.. figure:: images/ego_tools.svg :align: center :scale: 75% @@ -63,17 +65,18 @@ eGo The python package eGo is a toolbox and application which connects the tool eTraGo (optimization of flexibility options at transmission grid level) and eDisGo (optimization of distribution grids). All those python -packages are part of the research project +packages were initially developed in the research project `open_eGo `_. `Learn more here `_. -Dataprocessing -============== +Data model creation +=================== +For the eGon project the python-tool `eGon-data `_ was implemented, which creates input data for the optimization tools `eTraGo `_, `ding0 `_ and `eDisGo `_ and delivers for example data on grid topologies, demands/demand curves and generation capacities in a high spatial resolution. The outputs of egon-data are published under open source and open data licenses. + +eGon-data is a further development of the `Data processing `_ developed in the former research project `open_eGo `_. It aims for an extensions of the data models as well as for a better replicability and manageability of the data preparation and processing. +The resulting data set serves as an input for the optimization tools `eTraGo `_, `ding0 `_ and `eDisGo `_ and delivers for example data on grid topologies, demands/demand curves and generation capacities in a high spatial resolution. The outputs of egon-data are published under open source and open data licenses. -For the open_eGo project several python packages are developed which are feeded -by the input data of the data processing. The Dataprocessing is writen in -SQL and Python. `Learn more here `_. ego.io ====== @@ -97,7 +100,7 @@ medium and low voltage power distribution grids based on open LICENSE ======= -© Copyright 2015-2018 +© Copyright 2015-2023 Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems and diff --git a/doc/api.rst b/doc/api.rst index 83b7a11a8..d65337975 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -1,8 +1,11 @@ -.. make doc-string generated documentation appear here +API +==== .. toctree:: :maxdepth: 7 :glob: :titlesonly: - API + api/etrago.cluster + api/etrago.tools + api/appl.rst diff --git a/doc/api/appl.rst b/doc/api/appl.rst new file mode 100644 index 000000000..fe45967b0 --- /dev/null +++ b/doc/api/appl.rst @@ -0,0 +1,7 @@ +etrago.appl module +------------------- + +.. automodule:: etrago.appl + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file diff --git a/doc/api/etrago.cluster.rst b/doc/api/etrago.cluster.rst index 1e2fc858b..40fc8db3c 100644 --- a/doc/api/etrago.cluster.rst +++ b/doc/api/etrago.cluster.rst @@ -1,18 +1,31 @@ -etrago\.cluster package +etrago.cluster package ======================= -Submodules ----------- +etrago.cluster.disaggregation module +----------------------------------------- -etrago\.cluster\.networkclustering module +.. automodule:: etrago.cluster.disaggregation + :members: + :undoc-members: + :show-inheritance: + +etrago.cluster.electrical module ----------------------------------------- -.. automodule:: etrago.cluster.networkclustering +.. automodule:: etrago.cluster.electrical :members: :undoc-members: :show-inheritance: -etrago\.cluster\.snapshot module +etrago.cluster.gas module +----------------------------------------- + +.. automodule:: etrago.cluster.gas + :members: + :undoc-members: + :show-inheritance: + +etrago.cluster.snapshot module -------------------------------- .. automodule:: etrago.cluster.snapshot @@ -20,11 +33,10 @@ etrago\.cluster\.snapshot module :undoc-members: :show-inheritance: -Module contents ---------------- +etrago.cluster.spatial module +-------------------------------- -.. automodule:: etrago.cluster +.. automodule:: etrago.cluster.spatial :members: :undoc-members: :show-inheritance: - diff --git a/doc/api/etrago.rst b/doc/api/etrago.rst deleted file mode 100644 index a09ffc783..000000000 --- a/doc/api/etrago.rst +++ /dev/null @@ -1,30 +0,0 @@ -etrago package -============== - -Subpackages ------------ - -.. toctree:: - - etrago.cluster - etrago.tools - -Submodules ----------- - -etrago\.appl module -------------------- - -.. automodule:: etrago.appl - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: etrago - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/api/etrago.tools.rst b/doc/api/etrago.tools.rst index 33586cbf9..2b5495d38 100644 --- a/doc/api/etrago.tools.rst +++ b/doc/api/etrago.tools.rst @@ -1,56 +1,66 @@ -etrago\.tools package +etrago.tools package ===================== -Submodules ----------- +etrago.tools.calc\_results module +---------------------------------- -etrago\.tools\.io module ------------------------- +.. automodule:: etrago.tools.calc_results + :members: + :undoc-members: + :show-inheritance: -.. automodule:: etrago.tools.io +etrago.tools.constraints module +---------------------------------- + +.. automodule:: etrago.tools.constraints :members: :undoc-members: :show-inheritance: +etrago.tools.execute module +---------------------------------- -etrago\.tools\.extendable module ------------------------- +.. automodule:: etrago.tools.execute + :members: + :undoc-members: + :show-inheritance: + +etrago.tools.extendable module +---------------------------------- .. automodule:: etrago.tools.extendable :members: :undoc-members: :show-inheritance: +etrago.tools.io module +------------------------ -etrago\.tools\.plot module --------------------------- - -.. automodule:: etrago.tools.plot +.. automodule:: etrago.tools.io :members: :undoc-members: :show-inheritance: -etrago\.tools\.snapshot\_clustering module ------------------------------------------- +etrago.tools.network module +----------------------------- -.. automodule:: etrago.tools.snapshot_clustering +.. automodule:: etrago.tools.network :members: :undoc-members: :show-inheritance: -etrago\.tools\.utilities module -------------------------------- +etrago.tools.plot module +--------------------------- -.. automodule:: etrago.tools.utilities +.. automodule:: etrago.tools.plot :members: :undoc-members: :show-inheritance: +etrago.tools.utilities module +------------------------------- -Module contents ---------------- - -.. automodule:: etrago.tools +.. automodule:: etrago.tools.utilities :members: :undoc-members: :show-inheritance: diff --git a/doc/api/modules.rst b/doc/api/modules.rst deleted file mode 100644 index fda8d67db..000000000 --- a/doc/api/modules.rst +++ /dev/null @@ -1,7 +0,0 @@ -etrago -====== - -.. toctree:: - :maxdepth: 4 - - etrago diff --git a/doc/conf.py b/doc/conf.py index 313e191cb..5fc0bd671 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -122,7 +122,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = "English" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: diff --git a/doc/developer_notes.rst b/doc/developer_notes.rst index 37a3bc4ff..a4a3d036a 100644 --- a/doc/developer_notes.rst +++ b/doc/developer_notes.rst @@ -15,7 +15,7 @@ Installation for Developers .. code-block:: bash - $ virtualenv --clear -p python3.7 etrago`` + $ virtualenv --clear -p python3.8 etrago`` $ cd etrago/ $ source bin/activate diff --git a/doc/howToUse.rst b/doc/howToUse.rst index c1f782a4b..918fc3c85 100644 --- a/doc/howToUse.rst +++ b/doc/howToUse.rst @@ -5,7 +5,7 @@ How to use eTraGo? After you installed eTraGo you would typically start optimization runs by executing the ‘appl.py’ which is situated in -``./eTrago/etrago/`` (e.g by ``python3 appl.py``). +``./eTrago/etrago/`` (e.g by ``python3 appl.py`` from the terminal). eTraGo doesn't have a graphical user interface, the ‘appl.py’ is used as a simple user interface which can be edited with @@ -13,7 +13,7 @@ the preferred python-editor. Here parameters, calculation methods and scenario settings are set in a python dictionary called 'args'. To run the desired calculation, it is crucial to understand these parameters. -In addition, some of them contradict the usage of others. +In addition, some of them contradict the usage of others. You find the documentation of all defined parameters from the 'args' here: :func:`etrago.appl.run_etrago`. @@ -35,7 +35,7 @@ For more specific or extensive changes you are highly invited to write code and add new functionalities. Once the calculation has finished the PyPSA network of the Etrago-object will -contain all results. Som main results (e.g. anuual system costs) are calculated +contain all results. Some main results (e.g. anuual system costs) are calculated by :meth:`etrago.calc_results` and can be accesed via 'etrago.results'. You can use several plotting functions from the :meth:`etrago.tools.plot` in order to visualize the results. For example @@ -52,7 +52,7 @@ Examples and tutorial notebooks -.. toctree:: - :maxdepth: 7 +**eTraGo version 0.5.1:** +`etrago_OpenMod_Zuerich18 `_. - OpenMod +**eTraGo version 0.9:** Tutorial notebook will be available in the next release. diff --git a/doc/images/eTraGo_model.png b/doc/images/eTraGo_model.png deleted file mode 100644 index 1be7f3a97..000000000 Binary files a/doc/images/eTraGo_model.png and /dev/null differ diff --git a/doc/images/ego_tools.svg b/doc/images/ego_tools.svg new file mode 100644 index 000000000..37c6edb3b --- /dev/null +++ b/doc/images/ego_tools.svg @@ -0,0 +1,679 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/images/modelling_concept.png b/doc/images/modelling_concept.png new file mode 100644 index 000000000..d0f756e8b Binary files /dev/null and b/doc/images/modelling_concept.png differ diff --git a/doc/installation.rst b/doc/installation.rst index 75128c3ca..a62a615a3 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -20,7 +20,7 @@ you create a virtual environment (where you like it) and activate it: .. code-block:: bash - $ virtualenv venv --clear -p python3.7 + $ virtualenv venv --clear -p python3.8 $ source venv/bin/activate $ cd venv @@ -30,7 +30,7 @@ install eTraGo with the pip command, as previously explained. Linux and Ubuntu ================ -The Package eTraGo is tested with Ubuntu 16.04, 18.04 and 20.04 inside the virtual +The Package eTraGo is tested with Ubuntu 16.04, 18.04, 20.04 and 22.04 inside the virtual environments of `virtualenv `_. The installation is shown above. @@ -40,14 +40,15 @@ Windows or Mac OSX users ======================== For Windows and/or Mac OSX user we highly recommend to install and use Anaconda -for you Python3 installation. First install anaconda including python 3.5 or +for your Python3 installation. First install Conda including python 3.8 or higher version from https://www.anaconda.com/download/ and open an anaconda prompt as administrator and run: .. code-block:: bash $ conda config --add channels conda-forge - $ conda install shapely + $ conda create -n etrago_env python=3.8 + $ conda activate etrago_env $ pip install eTraGo @@ -73,7 +74,7 @@ Python package for the REST-API used by the OpenEnergy Platform (OEP). In order to connect eTraGo via the oedialect with the oedb you have to create an account at `openenergy-platform.org/login `_. -You can name the `'db' `_ +You can name the `'db' `_ argument of the 'args' of the :func:`etrago.appl.etrago` as you wish. Once the :func:`etrago.appl.etrago` is executed you will be asked to enter how you want to connect to which database. If you want to use @@ -89,7 +90,7 @@ oedialect, a local postgresql database or the old psycopg2 developer connection. Once you have created a connection (which is saved in the config.ini) you do not have to enter the connection parameter again. The software will take the connection parameter -which corresponds to the entry at the `'db' `_ argument. +which corresponds to the entry at the `'db' `_ argument. oedialect connection diff --git a/doc/theoretical_background.rst b/doc/theoretical_background.rst index 73e4d96ce..6df61b821 100644 --- a/doc/theoretical_background.rst +++ b/doc/theoretical_background.rst @@ -7,158 +7,112 @@ Theoretical Background Definitions and Units ===================== -eTraGo executes the Open Source software PyPSA to perform power flow -simulations and uses its definitions and -`units `_. - +eTraGo is based on the open source tool `PyPSA `_ and uses its definitions and units. Assumptions on Data =================== -eTraGo fetches its necessary input data from the OpenEnergy Platform including -load, generation, grid and scenario-related data. More details can be found in -the `Data-Processing `_. +eTraGo fetches the input data from the `OpenEnergy Platform `_. The data includes electricity and gas grid topology as well as data on energy supply and load for the considered sectors (electricity, gas, heat and e-mobility) plus data on flexibility potential deriving from those sectors e.g. Dynamic Line Rating, Demand Side Management and flexibility potentials arising from e-mobility. More details on the data model can be found in the documentaton of `eGon-data `_. + +At the moment, there are two scenarios available basing on scenario C2035 of the network expansion plan ([NEP]_), version 2021. The base one is called eGon2035. To analyse the effect of flexibility options, there is an eGon2035_lowflex scenario available which depicts a lower penetration of flexibilities. More scenarios are being developed. The eGon100RE scenario is being implemented which is characterised by a 100% renewable generation. Analog to the scenario above, a eGon100RE_lowflex scenario will be available. + +You can see the modeling concepts of the scenarios in the figure below. The components marked green have exogenous capacity and endogenous dispatch whereas the components marked in red are optimised endogenously in capacity and dispatch. -As overview, the Open Source grid structure is developed by processing data -from `OpenStreetMap (OSM) `_ to obtain -geo-referenced locations of substations and links equal or above the 110 kV -voltage level. OSM also provides information about residential, retail, -industrial and agricultural areas which is used with standardized profiles to -obtain load data. Generation data of solar and wind rely on weather data from -[coastdat-2]_. Both, load and generation data, match the annual amount for the -year 2011. eTraGo enables the investigation of three scenarios - Status Quo, -NEP 2035 and eGo100. Status Quo corresponds to the actual grid, NEP2035 -follows assumptions for the year 2035 by [NEP2015]_ and eGo100 assumes to -operate the future energy system completely by renewables [ehighway2050]_. +.. figure:: images/modelling_concept.png + :align: center + :scale: 75% Methods -=========== - -PyPSA ------ -The power flow simulations are performed by the Open Source tool -`PyPSA `_ with a linear approximation for the -optimization of power flows in general. Expecting that eTraGo fulfills the -assumptions to perfom a LOPF (small voltage angle differences, branch -resistances negligible to their reactances, voltage magnitudes can be kept at -nominal values) since it focuses on the extra-high and high voltage levels. As -objective value of the optimization, the overall system costs are considered. - - -Clustering approaches ---------------- - -EHV-Clustering -^^^^^^^^^^^^^^ - -This method maps an input network to an output network with the nodes of the -extra-high voltage level. All nodes with a voltage level below the extra-high -voltage level are mapped to their nearest neighboring node in the extra-high -voltage level with the -`dijkstra algorithm `_ -(110 kV ---> 220,380 kV). - -K-Means Clustering -^^^^^^^^^^^^^^^^^^ - -This `method `_ maps an input -network to a new output network with an adjustable number of nodes and new -coordinates. The algorithm sets these coordinates randomly and minimizes a -certain parameter like for example the distances between old coordinates and -their nearest neighbor in the set of new coordinates. The method was -implemented by `Hoersch et al. `_ within -PyPSA. - -Snapshot skipping -^^^^^^^^^^^^^^^^^ -This method simplifies the simulation temporally by considering every n-th -snapshot of a given time series. The regarded snapshots are weighted by the -number of neglected snapshots to ensure a comparable calculation of costs. -This method assumes the chosen snapshots to be represenative for the next n-th -snapshots in the time series. - -Snapshot-Clustering -^^^^^^^^^^^^^^^^^^^ -This method aggregate given time series for various time intervals like i.e. -days using the `tsam `_ package. Contrary to -snapshot skipping, this approach averages a certain period of snapshots -instead of choosing a representative snapshot. - - -Storage expansion ------------------ -To evaluate the amount of storage units in future energy systems, the possible -installation of new storage units at every node in the network is allowed. The -size and operation of these storages are part of the optimization problem. - -Two types of storage technologies are considered - batteries and hydrogen in -underground caverns. Li-Ion battery storages as representatives for short-term -(several hours) technologies, which can be installed at every node. -Underground hydrogen storages represent long-term or seasonal (weeks) -technologies and can be build at every node with appropriate salt formations -in the underground. The storage parameters for both types are reached by -[Acatech2015]_, the information about salt formations are given by [BGR]_. - - -Grid expansion --------------- -The grid expansion is realized by extending the capacities of existing -lines and substations. These capacities are regarded as part of the -optimization problem, whereby the possible extension is unlimited. With respect -to the different voltage levels and lengths MVA-specific costs are considered -in the linear optimization of the power flow. Besides, several planned grid -expansion scenarios from the German grid development plan can be considered as -possible additional power lines by using the 'scn_extension' argument. +======= + + +Optimisation with PyPSA +----------------------- + +Within eTraGo, the fetched data model is translated into a `PyPSA `_-network. The optimisation is performed with a linear approximation assuming eTraGo to fulfill the assumptions to perfom a LOPF (as those are small voltage angle differences, branch resistances negligible to their reactances, voltage magnitudes can be kept at nominal values) since it focuses on the extra-high and high voltage levels. As objective value of the optimisation, the overall system costs are considered. + +With the argument ‘pf_post_lopf’, after the LOPF a non-linear power flow simulation can be conducted. + + +Complexity Reduction +--------------------- + +The data model is characterised by a high spatial (abou 8,000 electrical and 600 gas nodes) and temporal resolution (8,760 timesteps). To reduce the complexity of the resulting optimisation problem, several methods can be applied. + + +Reduction in spatial dimension: +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The **ehv clustering** maps all electrical nodes with a voltage level below the extra-high voltage level to their nearest neighboring node in the extra-high voltage level with the Dijkstra’s algorithm (110 kV —> 220 / 380 kV). + +The **k-means Clustering** reduces the electrical or gas network to an adjustable number of nodes by considering the geographical position of the respective nodes. This method has been implemented within PyPSA by [Hoersch]_. + +The **k-medoids Dijkstra Clustering** aggregates nodes considering the network topology. First, a k-medoids Clustering is used dividing the original nodes of the network into groups by their geographical positions while identifiying the geographical medoid nodes per cluster. Afterwards, the original nodes in the original network are assigned to the former identified medoids considering the original network’s topology applying a Dijkstra’s algorithm considering the line lengths. Afterall, the original nodes are represented by one aggregated node per cluster at the position of the former identified medoid node. + +In general, the clustering of the **sector-coupled system** is divided into two steps: +First, the electrical and gas grid are clustered independently using one of the methods described above. Afterwards, nodes of the other sectors (hydrogen, heat, e-mobility and DSM nodes) are mapped according to their connection to electricity or gas buses and aggregated to one node per carrier. + +After optimising the spatially reduced network, a **spatial disaggregation** can be conducted. + + +Reduction in temporal dimension: +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The method **Skip Snapshots** implies a downsampling to every nth time step. The considered snapshots are weighted respectively to account for the analysis of one whole year. + +By using the method called **Segmentation**, a hierarchical clustering of consecutive timesteps to segments with variable lengths is applied [Pineda]_. + +The **Snapshot Clustering on Typical Periods** implies a hierarchical clustering of time periods with a predefined length (e.g. days or weeks) to typical periods. Those typical periods are weighted according to the number of periods in their cluster. This method optionally includes the linkage of the typical periods in a second time layer to account for the intertemporal dependencies following [Kotzur]_. + +By applying a 2-level-approach, a **temporal disaggregation** can be conducted. This means optimising dispatch using the fullcomplex time series in the second step after having optimised grid and storage expansion using the complexity-reduced time series in the first step. + + +Grid and Storage / Store expansion +----------------------------------- + +The grid expansion is realized by extending the capacities of existing lines and substations. These capacities are considered as part of the optimisation problem whereby the possible extension is unlimited. With respect to the different voltage levels and lengths, MVA-specific costs are considered in the optimisation. + +As shown in the figure above, several options to store energy are part of the modeling concept. Extendable batteries (modeled as storage units) are assigned to every node in the electrical grid. A minimum installed capacity is being considered to account for home batteries ([NEP]_). The expansion and operation is part of the optimisation. Furthermore, two types of hydrogen stores (modeled as stores) are available. Overground stores are optimised in operation and dispatch without limitations whereas underground stores depicting saltcaverns are limited by geographical conditions ([BGR]_). Additionally, heat stores part of the optimisation in terms of power and energy without upper limits. Miscellaneous Features --------- -Several features were developed to enhance the functionality of eTraGo. As -appropriate computer setting, the 'solver_options' and a 'generator_noise' are -possible arguments. The latter adds a reproducible small random noise to the -marginal costs of each generator in order to prevent an optima plateau. The -specific solver options depend on the applied solver like for example Gurobi, -CPLEX or GLPK. Considering reproducibility, the 'load_cluster' argument -enables to load a former calculated clustered network. Besides, -'line_grouping' provides a grouping of lines which connect the same buses. -The 'branch_capacity_factor' adds a factor to adapt all line capacities in order -to consider (n-1) security. Because the average number of HV systems is much -smaller than the one of eHV lines, you can choose factors for 'HV' and 'eHV'. -The 'load_shedding' argument is used for debugging complex grids in order to avoid -infeasibilities. It introduces a very expensive generator at each bus to meet -the demand. When optimizing storage units and grid expansion without limiting -constraints, the need for load shedding should not be existent. The -'minimize_loading' argument forces to minimize the loading of the lines next -to the costs. 'Parallelization' provides the opportunity to devide the -optimization problem into a given number of sub-problems. For a group of -snapshots the problem will be solved separately. This functionality can -only be used for problems which do not have dependencies from one snapshot -to another. Therefore this option can not be used -with the optimization of storage units due to their state of charge. +---------------------- +Several features were developed to enhance the functionality of eTraGo. -References -========== +To customize computation settings, ‘solver_options’ and ‘generator_noise’ should be adapted. The latter adds a reproducible small random noise to the marginal costs of each generator in order to prevent an optima plateau. The specific solver options depend on the applied solver (e.g. Gurobi, CPLEX or GLPK). + +In ‚extendable‘ you can adapt the type of components you want to be optimised in capacity and set upper limits for gird expansion inside Germany and of lines to foreign countries. -.. [NEP2015] Übertragungsnetzbetreiber Deutschland. (2015).: - *Netzentwicklungsplan Strom 2025*, Version 2015, 1. Entwurf, 2015. - (``_) +The ‚extra_functionality‘-argument allows to consider extra constraints like limits for energy imort and export or minimal renewable shares in generation. -.. [coastdat-2] coastDat-2 (2017).: - Hindcast model ``_ +‘branch_capacity_factor’ adds a factor to adapt all line capacities in order to consider (n-1) security. Because the average number of HV systems is much smaller than the one of eHV lines, you can choose factors for ‘HV’ and ‘eHV’ separately. -.. [ehighway2050] e-Highway2050. (2015).: - e-HIGHWAY 2050 Modular Development Plan of the Pan-European Transmission - System 2050 - database per country. Retrieved from - (``_) +The ‘load_shedding’-argument is used for debugging complex grids in order to avoid infeasibilities. It introduces a very expensive generator at each bus to meet the demand. When optimising storage units and grid expansion without limiting constraints, the need for load shedding should not be existent. -.. [Acatech2015] 'Flexibilitätskonzepte für die Stromversorgung 2050 - ``_' +With ‘foreign_lines‘ you can adapt the foreign lines to be modeled as DC-links (e.g. to avoid loop flows). + + +References +========== -.. [BGR] 'Salzstruktur in Norddeutschland <>'_. 2015.: - Data provided by the Federal Institute for Geosciences and Natural - Resources (Bundesanstalt für Geowissenschaften und Rohstoffe, BGR) +.. [NEP] Übertragungsnetzbetreiber Deutschland (2021): + *Netzentwicklungsplan Strom 2035*, Version 2021, 1. Entwurf. 2021. + +.. [Hoersch] Jonas Hoersch et al. (2017): + *The role of spatial scale in joint optimisations of generation and transmission for European highly renewable scenarios*. 2017. + ``_ + +.. [Pineda] Salvador Pineda et al. (2018): + *Chronological Time-Period Clustering for Optimal Capacity Expansion Planning With Storage*. 2018. + ``_ + +.. [Kotzur] Leander Kotzur et al. (2018): + *Time series aggregation for energy system design: Modeling seasonal storage*. 2018. + ``_ + +.. [BGR] Bundesanstalt fuer Geowissenschaften und Rohstoffe et al. (2020): + *nSpEE-DS - Teilprojekt Bewertungskriterien und Potenzialabschätzung*. 2020. + ``_ diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst index c7894943c..87ed81be6 100644 --- a/doc/whatsnew.rst +++ b/doc/whatsnew.rst @@ -8,6 +8,7 @@ These are new features and improvements of note in each release. :local: :backlinks: top +.. include:: whatsnew/v0_9_0.rst .. include:: whatsnew/v0_8_0.rst .. include:: whatsnew/v0_7_2.rst .. include:: whatsnew/v0_7_1.rst diff --git a/doc/whatsnew/v0_5_1.rst b/doc/whatsnew/v0_5_1.rst index ac1f83a9f..ad37004f1 100644 --- a/doc/whatsnew/v0_5_1.rst +++ b/doc/whatsnew/v0_5_1.rst @@ -1,5 +1,5 @@ Release 0.5.1 (February 01, 2018) -++++++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++ eTraGo works with ego.io 0.3.0 diff --git a/doc/whatsnew/v0_6_1.rst b/doc/whatsnew/v0_6_1.rst index 5b9ec69d5..630828a3b 100644 --- a/doc/whatsnew/v0_6_1.rst +++ b/doc/whatsnew/v0_6_1.rst @@ -1,5 +1,5 @@ Release 0.6.1 (Juli 18, 2018) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++ eTraGo works with pypi and is suitable for eGo 0.2.0 Added features diff --git a/doc/whatsnew/v0_7_0.rst b/doc/whatsnew/v0_7_0.rst index e690bd5e2..abedaaf9d 100644 --- a/doc/whatsnew/v0_7_0.rst +++ b/doc/whatsnew/v0_7_0.rst @@ -1,9 +1,9 @@ Release 0.7.0 (September 6, 2018) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++ eTraGo is able to produce feasible non-linear power flows based on optimization results and allows the disaggregation of clustered results to original spatial complexities. Added features --------------- +--------------- * The pf_post_lopf function was improved. Due to changes in the data set now the non-linear power flow (pf) creates feasible solutions. If network optimization is turned on, a second lopf which regards the updated reactances and optimizes only dispatch is performed before the pf is executed. * The disaggregation method was included. When using a network clustering method to reduce the spatial complexity of the given network, a disaggregation method can be used afterwards to distribute the nodal results (generation and storage timeseries) to the original complexity. The method 'disaggregation': 'uniform' can be used as an interface functionality for distribution grid planning tools like eDisGo. diff --git a/doc/whatsnew/v0_7_1.rst b/doc/whatsnew/v0_7_1.rst index 53eebb24c..d7e692677 100644 --- a/doc/whatsnew/v0_7_1.rst +++ b/doc/whatsnew/v0_7_1.rst @@ -1,5 +1,5 @@ Release 0.7.1 (October 25, 2018) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++ A minor release adding new options for additional constraints, modelling assumptions and plotting. Added features diff --git a/doc/whatsnew/v0_7_2.rst b/doc/whatsnew/v0_7_2.rst index 58ee28ca6..c972f76bc 100644 --- a/doc/whatsnew/v0_7_2.rst +++ b/doc/whatsnew/v0_7_2.rst @@ -1,5 +1,5 @@ Release 0.7.2 (Juni 15, 2020) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++ A minor release adding the following features. Added features diff --git a/doc/whatsnew/v0_8_0.rst b/doc/whatsnew/v0_8_0.rst index 420ce9e08..b4f28df3e 100644 --- a/doc/whatsnew/v0_8_0.rst +++ b/doc/whatsnew/v0_8_0.rst @@ -1,5 +1,5 @@ Release 0.8.0 (April 8, 2021) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++ eTraGo has now a more object-oriented programming design. Added features diff --git a/doc/whatsnew/v0_9_0.rst b/doc/whatsnew/v0_9_0.rst new file mode 100644 index 000000000..5fb2fdf5b --- /dev/null +++ b/doc/whatsnew/v0_9_0.rst @@ -0,0 +1,28 @@ +Release 0.9.0 (XXXXX, 2023) +++++++++++++++++++++++++++++ + +Added features +-------------- + +* eTraGo is now compatible with Python 3.8 +* eTraGo can now import and optimize networks that include other energy sectors such as gas, heating and mobility +* Various flexibility options from different energy sectors can be considered in the optimization: +- Weather dependent capacity of transmission lines (Dynamic Line Rating) +- Demand Side Management +- Flexible charging of electric vehicles +- Heat and hydrogen stores +- Power2Hydrogen, Hydrogen2Power +- Methanation and Steam Methane Reforming +* eTraGo arguments can now be partially provided and updated +* eTraGo can now import datamodels from databases without using the ego.io +* Existing clustering methods were adapted to be able to reduce the complexity of not electrical sectors +* Improvement of the ehv clustering (much faster now) +* A new clustering method named "k-medoids Dijkstra Clustering" (can be called by "kmedoids-dijkstra") was implemented. This method considers the electrical distance between the buses in the network. It is also available for the methane grid. +* It is possible to select if foreign buses are considered or not during the clustering process. +* The number of CPUs used to perform the clustering can be provided by the user. +* Some more options are available to conduct a reduction in temporal dimension: +- segmentation: clustering of adjacent hours to segments of variable length +- clustering to typical periods extended to cluster on weeks and months +* A temporal disaggregation is available through a 2-level-approach including a dispatch optimization on the temporally fullcomplex model. To limit the RAM usage, you can optionally divide the optimisation problem into a chosen number of slices. +* New plotting functions to visualize the optimization results from all the included energy sectors were implemented +* Functions to analyze results were updated to consider new sectors diff --git a/etrago/appl.py b/etrago/appl.py index 73e4cb200..cdf3f3ad8 100644 --- a/etrago/appl.py +++ b/etrago/appl.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -21,13 +21,14 @@ """ This is the application file for the tool eTraGo. Define your connection parameters and power flow settings before executing -the function etrago. +the function run_etrago. """ import datetime import os import os.path + import numpy as np __copyright__ = ( @@ -36,8 +37,10 @@ "DLR-Institute for Networked Energy Systems" ) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, lukasol, wolfbunke, mariusves, s3pp" - +__author__ = ( + "ulfmueller, lukasol, wolfbunke, mariusves, s3pp, ClaraBuettner, " + "CarlosEpia, KathiEsterl, fwitte, gnn, pieterhexen, AmeliaNadal" +) if "READTHEDOCS" not in os.environ: # Sphinx does not run this code. @@ -52,26 +55,26 @@ "method": { # Choose method and settings for optimization "type": "lopf", # type of optimization, currently only 'lopf' "n_iter": 4, # abort criterion of iterative optimization, 'n_iter' or 'threshold' - "pyomo": True, - }, # set if pyomo is used for model building + "pyomo": True, # set if pyomo is used for model building + }, "pf_post_lopf": { - "active": True, # choose if perform a pf after a lopf simulation + "active": True, # choose if perform a pf after lopf "add_foreign_lopf": True, # keep results of lopf for foreign DC-links - "q_allocation": "p_nom", - }, # allocate reactive power via 'p_nom' or 'p' + "q_allocation": "p_nom", # allocate reactive power via 'p_nom' or 'p' + }, "start_snapshot": 1, - "end_snapshot": 2, + "end_snapshot": 10, "solver": "gurobi", # glpk, cplex or gurobi "solver_options": { "BarConvTol": 1.0e-5, "FeasibilityTol": 1.0e-5, "method": 2, "crossover": 0, - "logFile": "solver_etragos.log", + "logFile": "solver_etrago.log", "threads": 4, }, "model_formulation": "kirchhoff", # angles or kirchhoff - "scn_name": "eGon2035", # a scenario: eGon2035 or eGon100RE + "scn_name": "eGon2035", # scenario: eGon2035, eGon100RE or status2019 # Scenario variations: "scn_extension": None, # None or array of extension scenarios "scn_decommissioning": None, # None or decommissioning scenario @@ -80,7 +83,9 @@ "csv_export": "results", # save results as csv: False or /path/tofolder # Settings: "extendable": { - "extendable_components": ["as_in_db"], # Array of components to optimize + "extendable_components": [ + "as_in_db" + ], # Array of components to optimize "upper_bounds_grid": { # Set upper bounds for grid expansion # lines in Germany "grid_max_D": None, # relative to existing capacity @@ -98,8 +103,8 @@ "generator_noise": 789456, # apply generator noise, False or seed number "extra_functionality": {}, # Choose function name or {} # Spatial Complexity: + "network_clustering_ehv": False, # clustering of HV buses to EHV buses "network_clustering": { - "random_state": 42, # random state for replicability of kmeans results "active": True, # choose if clustering is activated "method": "kmedoids-dijkstra", # choose clustering method: kmeans or kmedoids-dijkstra "n_clusters_AC": 30, # total number of resulting AC nodes (DE+foreign) @@ -109,330 +114,514 @@ "cluster_foreign_gas": False, # take foreign CH4 buses into account, True or False "k_elec_busmap": False, # False or path/to/busmap.csv "k_gas_busmap": False, # False or path/to/ch4_busmap.csv - "line_length_factor": 1, # - "remove_stubs": False, # remove stubs bevore kmeans clustering - "use_reduced_coordinates": False, # "bus_weight_tocsv": None, # None or path/to/bus_weight.csv "bus_weight_fromcsv": None, # None or path/to/bus_weight.csv "gas_weight_tocsv": None, # None or path/to/gas_bus_weight.csv "gas_weight_fromcsv": None, # None or path/to/gas_bus_weight.csv + "line_length_factor": 1, # Factor to multiply distance between new buses for new line lengths + "remove_stubs": False, # remove stubs bevore kmeans clustering + "use_reduced_coordinates": False, # If True, do not average cluster coordinates + "random_state": 42, # random state for replicability of clustering results "n_init": 10, # affects clustering algorithm, only change when neccesary "max_iter": 100, # affects clustering algorithm, only change when neccesary "tol": 1e-6, # affects clustering algorithm, only change when neccesary - "CPU_cores": 4, # number of cores used during clustering. "max" for all cores available. + "CPU_cores": 4, # number of cores used during clustering, "max" for all cores available. }, "sector_coupled_clustering": { "active": True, # choose if clustering is activated "carrier_data": { # select carriers affected by sector coupling - "central_heat": {"base": ["CH4", "AC"], "strategy": "simultaneous"}, + "central_heat": { + "base": ["CH4", "AC"], + "strategy": "simultaneous", # select strategy to cluster other sectors + }, }, }, - "network_clustering_ehv": False, # clustering of HV buses to EHV buses. - "disaggregation": "uniform", # None, 'mini' or 'uniform' + "disaggregation": None, # None or 'uniform' # Temporal Complexity: "snapshot_clustering": { "active": False, # choose if clustering is activated "method": "segmentation", # 'typical_periods' or 'segmentation' "extreme_periods": None, # consideration of extreme timesteps; e.g. 'append' - "how": "daily", # type of period, currently only 'daily' - only relevant for 'typical_periods' + "how": "daily", # type of period - only relevant for 'typical_periods' "storage_constraints": "soc_constraints", # additional constraints for storages - only relevant for 'typical_periods' "n_clusters": 5, # number of periods - only relevant for 'typical_periods' - "n_segments": 5, - }, # number of segments - only relevant for segmentation + "n_segments": 5, # number of segments - only relevant for segmentation + }, "skip_snapshots": 5, # False or number of snapshots to skip - "dispatch_disaggregation": False, # choose if full complex dispatch optimization should be conducted + "temporal_disaggregation": { + "active": False, # choose if temporally full complex dispatch optimization should be conducted + "no_slices": 8, # number of subproblems optimization is divided into + }, # Simplifications: "branch_capacity_factor": {"HV": 0.5, "eHV": 0.7}, # p.u. branch derating - "load_shedding": False, # meet the demand at value of loss load cost + "load_shedding": True, # meet the demand at value of loss load cost "foreign_lines": { "carrier": "AC", # 'DC' for modeling foreign lines as links - "capacity": "osmTGmod", - }, # 'osmTGmod', 'tyndp2020', 'ntc_acer' or 'thermal_acer' + "capacity": "osmTGmod", # 'osmTGmod', 'tyndp2020', 'ntc_acer' or 'thermal_acer' + }, "comments": None, } def run_etrago(args, json_path): - """The etrago function works with following arguments: - - - Parameters - ---------- - - db : str - ``'oedb'``, - Name of Database session setting stored in *config.ini* of *.egoio* - - gridversion : NoneType or str - ``'v0.4.6'``, - Name of the data version number of oedb: state ``'None'`` for - model_draft (sand-box) or an explicit version number - (e.g. 'v0.4.6') for the grid schema. - - method : dict - {'type': 'lopf', 'n_iter': 5, 'pyomo': True}, - Choose 'lopf' for 'type'. In case of extendable lines, several lopfs - have to be performed. Choose either 'n_init' and a fixed number of - iterations or 'thershold' and a threashold of the objective function as - abort criteria. - Set 'pyomo' to False for big optimization problems, currently only - possible when solver is 'gurobi'. - - pf_post_lopf :dict - {'active': True, 'add_foreign_lopf': True, 'q_allocation': 'p_nom'}, - Option to run a non-linear power flow (pf) directly after the - linear optimal power flow (and thus the dispatch) has finished. - If foreign lines are modeled as DC-links (see foreign_lines), results - of the lopf can be added by setting 'add_foreign_lopf'. - Reactive power can be distributed either by 'p_nom' or 'p'. - - start_snapshot : int - 1, - Start hour of the scenario year to be calculated. - - end_snapshot : int - 2, - End hour of the scenario year to be calculated. - If temporal clustering is used, the selected snapshots should cover - whole days. - - solver : str - 'glpk', - Choose your preferred solver. Current options: 'glpk' (open-source), - 'cplex' or 'gurobi'. - - solver_options: dict - Choose settings of solver to improve simulation time and result. - Options are described in documentation of choosen solver. - - model_formulation: str - 'angles' - Choose formulation of pyomo-model. - Current options: angles, cycles, kirchhoff, ptdf - - scn_name : str - 'eGon2035', + """Function to conduct optimization considering the following arguments. + + Parameters + ---------- + db : str + Name of Database session setting stored in *config.ini* of *.egoio*, e.g. + ``'oedb'``. + gridversion : None or str + Name of the data version number of oedb: state ``'None'`` for + model_draft (sand-box) or an explicit version number + (e.g. 'v0.4.6') for the grid schema. + method : dict + Choose method and settings for optimization. + The provided dictionary can have the following entries: + + * "lopf" : str + Type of optimization, currently only "lopf". Default: "lopf". + * "n_iter" : int + In case of extendable lines, several LOPFs have to be performed. + You can either set "n_iter" and specify a fixed number of iterations + or set "threshold" and specify a threshold of the objective function as + abort criteria of the iterative optimization. + Default: 4. + * "threshold" : int + In case of extendable lines, several LOPFs have to be performed. + You can either set "n_iter" and specify a fixed number of iterations + or set "threshold" and specify a threshold of the objective function as + abort criteria of the iterative optimization. Per default, "n_iter" of 4 + is set. + * "pyomo" : bool + Set to True, if pyomo is used for model building. + Set to False for big optimization problems - currently only + possible when solver is "gurobi". + + pf_post_lopf : dict + Settings for option to run a non-linear power flow (PF) directly after the + linear optimal power flow (LOPF), and thus the dispatch optimisation, has + finished. + The provided dictionary can have the following entries: + + * "active" : bool + If True, a PF is performed after the LOPF. Default: True. + * "add_foreign_lopf" : bool + If foreign lines are modeled as DC-links (see parameter `foreign_lines`), + results of the LOPF can be added by setting "add_foreign_lopf" to True. + Default: True. + * "q_allocation" : bool + Allocate reactive power to all generators at the same bus either by "p_nom" + or "p". + Default: "p_nom". + + start_snapshot : int + Start hour of the scenario year to be calculated. Default: 1. + end_snapshot : int + End hour of the scenario year to be calculated. If snapshot clustering is used + (see parameter `snapshot_clustering`), the selected snapshots should cover + the number of periods / segments. Default: 2. + solver : str + Choose your preferred solver. Current options: "glpk" (open-source), + "cplex" or "gurobi". Default: "gurobi". + solver_options : dict + Choose settings of solver to improve simulation time and result. + Options are described in documentation of chosen solver. Per default, the + following dictionary is set: + + { + "BarConvTol": 1.0e-5, + "FeasibilityTol": 1.0e-5, + "method": 2, + "crossover": 0, + "logFile": "solver_etrago.log", + "threads": 4, + } + + Make sure to reset or adapt these settings when using another solver! Otherwise, + you may run into errors. + model_formulation : str + Choose formulation of pyomo-model. + Current options are: "angles", "cycles", "kirchhoff", "ptdf". + "angels" works best for small networks, while "kirchhoff" works best for larger + networks. + Default: "kirchhoff". + scn_name : str Choose your scenario. Currently, there are two different - scenarios: 'eGon2035', 'eGon100RE'. + scenarios: "eGon2035", "eGon100RE". Default: "eGon2035". + scn_extension : None or str + This option does currently not work! - scn_extension : NoneType or list - None, Choose extension-scenarios which will be added to the existing network container. Data of the extension scenarios are located in extension-tables (e.g. model_draft.ego_grid_pf_hv_extension_bus) - with the prefix 'extension_'. - Currently there are three overlay networks: - 'nep2035_confirmed' includes all planed new lines confirmed by the - Bundesnetzagentur - 'nep2035_b2' includes all new lines planned by the - Netzentwicklungsplan 2025 in scenario 2035 B2 - 'BE_NO_NEP 2035' includes planned lines to Belgium and Norway and - adds BE and NO as electrical neighbours - - scn_decommissioning : str - None, - Choose an extra scenario which includes lines you want to decommise + with the prefix 'extension\_'. + There are three overlay networks: + + * 'nep2035_confirmed' includes all planed new lines confirmed by the + Bundesnetzagentur + * 'nep2035_b2' includes all new lines planned by the + Netzentwicklungsplan 2025 in scenario 2035 B2 + * 'BE_NO_NEP 2035' includes planned lines to Belgium and Norway and + adds BE and NO as electrical neighbours + + Default: None. + scn_decommissioning : NoneType or str + This option does currently not work! + + Choose an extra scenario which includes lines you want to decommission from the existing network. Data of the decommissioning scenarios are located in extension-tables (e.g. model_draft.ego_grid_pf_hv_extension_bus) with the prefix - 'decommissioning_'. + 'decommissioning\_'. Currently, there are two decommissioning_scenarios which are linked to extension-scenarios: - 'nep2035_confirmed' includes all lines that will be replaced in - confirmed projects - 'nep2035_b2' includes all lines that will be replaced in - NEP-scenario 2035 B2 - lpfile : obj - False, - State if and where you want to save pyomo's lp file. Options: - False or '/path/tofile.lp' + * 'nep2035_confirmed' includes all lines that will be replaced in + confirmed projects + * 'nep2035_b2' includes all lines that will be replaced in + NEP-scenario 2035 B2 - csv_export : obj - False, - State if and where you want to save results as csv files.Options: - False or '/path/tofolder'. + Default: None. + lpfile : bool or str + State if and where you want to save pyomo's lp file. Options: + False or '/path/tofile.lp'. Default: False. + csv_export : bool or str + State if and where you want to save results as csv files. Options: + False or '/path/tofolder'. Default: False. extendable : dict - {'extendable_components': ['as_in_db'], - 'upper_bounds_grid': { - 'grid_max_D': None, - 'grid_max_abs_D': { - '380':{'i':1020, 'wires':4, 'circuits':4}, - '220':{'i':1020, 'wires':4, 'circuits':4}, - '110':{'i':1020, 'wires':4, 'circuits':2}, - 'dc':0}, - 'grid_max_foreign': 4, - 'grid_max_abs_foreign': None}}, - ['network', 'storages'], Choose components you want to optimize and set upper bounds for grid expansion. - The list 'extendable_components' defines a set of components to optimize. - Settings can be added in /tools/extendable.py. - The most important possibilities: - 'as_in_db': leaves everything as it is defined in the data coming - from the database - 'network': set all lines, links and transformers in electrical - grid extendable - 'german_network': set lines and transformers in German electrical - grid extendable - 'foreign_network': set foreign lines and transformers in electrical - grid extendable - 'transformers': set all transformers extendable - 'storages' / 'stores': allow to install extendable storages - (unlimited in size) at each grid node in order to meet - the flexibility demand. - 'overlay_network': set all components of the 'scn_extension' - extendable - 'network_preselection': set only preselected lines extendable, - method is chosen in function call - Upper bounds for electrical grid expansion can be defined for lines in - Germany relative to the existing capacity using 'grid_max_D'. - Alternatively, absolute maximum capacities between two electrical buses - can be defined per voltage level using 'grid_max_abs_D'. - Upper bounds for bordercrossing electrical lines can be defined accrodingly - using 'grid_max_foreign' or 'grid_max_abs_foreign'. + The provided dictionary can have the following entries: + + * "extendable_components" : list(str) + The list defines a set of components to optimize. + Settings can be added in /tools/extendable.py. + The most important possibilities: + + * 'as_in_db' + leaves everything as it is defined in the data coming from the database + * 'network' + set all lines, links and transformers in electrical grid extendable + * 'german_network' + set lines and transformers in German electrical grid extendable + * 'foreign_network' + set foreign lines and transformers in electrical grid extendable + * 'transformers' + set all transformers extendable + * 'storages' / 'stores' + allow to install extendable storages (unlimited in size) at each grid + node in order to meet the flexibility demand + + Default: "as_in_db". + + * "upper_bounds_grid" : dict + Dictionary can have the following entries: + + * 'grid_max_D' + Upper bounds for electrical grid expansion can be defined for lines in + Germany relative to the existing capacity. Alternatively, + 'grid_max_abs_D' can be used. Per default, this is set to None and + 'grid_max_abs_D' is set. + + * 'grid_max_abs_D' + Upper bounds for electrical grid expansion can be defined for lines in + Germany as absolute maximum capacities between two electrical buses + per voltage level. Per default the following dictionary is set: + + { + "380": {"i": 1020, "wires": 4, "circuits": 4}, + "220": {"i": 1020, "wires": 4, "circuits": 4}, + "110": {"i": 1020, "wires": 4, "circuits": 2}, + "dc": 0, + } + * 'grid_max_foreign' + Upper bounds for border-crossing electrical lines can be defined + relative to the existing capacity. Alternatively, 'grid_max_abs_foreign' + can be set. + Default: 4. + * 'grid_max_abs_foreign' + Upper bounds for border-crossing electrical lines can be defined equally + to 'grid_max_abs_D' as absolute capacity per voltage level. + Default: None. generator_noise : bool or int State if you want to apply a small random noise to the marginal costs of each generator in order to prevent an optima plateau. To reproduce - a noise, choose the same integer (seed number). - + a noise, choose the same integer (seed number). Default: 789456. extra_functionality : dict or None - None, - Choose extra functionalities and their parameters for PyPSA-model. + Choose extra functionalities and their parameters. Settings can be added in /tools/constraints.py. Current options are: - 'max_line_ext': float - Maximal share of network extension in p.u. - 'min_renewable_share': float - Minimal share of renewable generation in p.u. - 'cross_border_flow': array of two floats - Limit AC cross-border-flows between Germany and its neigbouring - countries, set values in MWh for all snapshots, e.g. [-x, y] - (with x Import, y Export, positiv: export from Germany) - 'cross_border_flows_per_country': dict of cntr and array of floats - Limit AC cross-border-flows between Germany and its neigbouring - countries, set values in in MWh for each country, e.g. [-x, y] - (with x Import, y Export, positiv: export from Germany) - 'capacity_factor': dict of arrays - Limit overall energy production for each carrier, - set upper/lower limit in p.u. - 'capacity_factor_per_gen': dict of arrays - Limit overall energy production for each generator by carrier, - set upper/lower limit in p.u. - 'capacity_factor_per_cntr': dict of dict of arrays - Limit overall energy production country-wise for each carrier, - set upper/lower limit in p.u. - 'capacity_factor_per_gen_cntr': dict of dict of arrays - Limit overall energy production country-wise for each generator - by carrier, set upper/lower limit in p.u. - network_clustering : dict - {'active': True, method: 'kmedoids-dijkstra', 'n_clusters_AC': 30, - 'cluster_foreign_AC': False, method_gas: 'kmeans', - 'n_clusters_gas': 30, 'cluster_foreign_gas': False, - 'k_elec_busmap': False, 'k_ch4_busmap': False, 'line_length_factor': 1, - 'remove_stubs': False, 'use_reduced_coordinates': False, - 'bus_weight_tocsv': None, 'bus_weight_fromcsv': None, - 'gas_weight_tocsv': None, 'gas_weight_fromcsv': None, 'n_init': 10, - 'max_iter': 100, 'tol': 1e-6}, - State if you want to apply a clustering of all network buses. - When ``'active'`` is set to True, the AC buses are clustered down to - ``'n_clusters_AC'`` and ``'n_clusters_gas'``buses. If ``'cluster_foreign_AC'`` is set to False, - the AC buses outside Germany are not clustered, and the buses inside - Germany are clustered to complete ``'n_clusters'`` buses. - The weighting takes place considering generation and load at each node. CH-4 nodes also take - non-transport capacities into account. - ``'cluster_foreign_gas'`` controls whether gas buses of Germanies - neighboring countries are considered for clustering. - With ``'method'`` you can choose between two clustering methods: - k-means Clustering considering geopraphical locations of buses or - k-medoids Dijkstra Clustering considering electrical distances between buses. - With ``'k_elec_busmap'`` or ``'k_ch4_busmap'``you can choose if you - want to load cluster coordinates from a previous run for the respecting carrier. - It should be considered that once this option is set to True, the - provided number of clusters will be ignored. - Option ``'remove_stubs'`` reduces the overestimating of line meshes. - The other options affect the kmeans algorithm and should only be - changed carefully, documentation and possible settings are described - in sklearn-package (sklearn/cluster/k_means_.py). - This function doesn't work together with - ``'network_clustering_kmedoids_dijkstra`` and ``'line_grouping = True'``. - - sector_coupled_clustering : nested dict - {'active': True, 'carrier_data': { - 'central_heat': {'base': ['CH4', 'AC'], 'strategy': "simultaneous"}, - } - State if you want to apply clustering of sector coupled carriers, such - as central_heat. The approach builds on already clustered - buses (e.g. CH4 and AC) and builds clusters around the topology of the - buses with carrier ``'base'`` for all buses of a specific carrier, e.g. - ``'central_heat'``. With ``'strategy'`` it is possible to apply either - ``'consecutive'`` or ``'simultaneous'`` clustering. The consecutive - strategy clusters around the buses of the first carrier in the list. - The links to other buses are preserved. All buses, that have no - connection to the first carrier will then be clustered around the buses - of the second carrier in the list. The simultanous strategy looks for - links connecting the buses of the carriers in the list and aggregates - buses in case they have the same set of links connected. For example, - a heat bus connected to CH4 via gas boiler and to AC via heat pump will - only form a cluster with other buses, if these have the same links to - the same clusters of CH4 and AC. + * 'max_line_ext' : float + Maximal share of network extension in p.u. + * 'min_renewable_share' : float + Minimal share of renewable generation in p.u. + * 'cross_border_flow' : array of two floats + Limit AC cross-border-flows between Germany and its neighbouring + countries. Set values in MWh for all snapshots, e.g. [-x, y] + (with x Import, y Export, positive: export from Germany). + * 'cross_border_flows_per_country' : dict of cntr and array of floats + Limit AC cross-border-flows between Germany and its neighbouring + countries. Set values in MWh for each country, e.g. [-x, y] + (with x Import, y Export, positive: export from Germany). + * 'capacity_factor' : dict of arrays + Limit overall energy production for each carrier. + Set upper/lower limit in p.u. + * 'capacity_factor_per_gen' : dict of arrays + Limit overall energy production for each generator by carrier. + Set upper/lower limit in p.u. + * 'capacity_factor_per_cntr': dict of dict of arrays + Limit overall energy production country-wise for each carrier. + Set upper/lower limit in p.u. + * 'capacity_factor_per_gen_cntr': dict of dict of arrays + Limit overall energy production country-wise for each generator + by carrier. Set upper/lower limit in p.u. network_clustering_ehv : bool - False, Choose if you want to cluster the full HV/EHV dataset down to only the EHV buses. In that case, all HV buses are assigned to their closest EHV - sub-station, taking into account the shortest distance on power lines. - + substation, taking into account the shortest distance on power lines. + Default: False. + network_clustering : dict + Choose if you want to apply a clustering of all network buses and specify + settings. + The provided dictionary can have the following entries: + + * "active" : bool + If True, the AC buses are clustered down to ``'n_clusters_AC'`` + and the gas buses are clustered down to``'n_clusters_gas'``. + Default: True. + * "method" : str + Method used for AC clustering. You can choose between two clustering + methods: + * "kmeans": considers geographical locations of buses + * "kmedoids-dijkstra": considers electrical distances between buses + + Default: "kmedoids-dijkstra". + * "n_clusters_AC" : int + Defines total number of resulting AC nodes including DE and foreign nodes + if `cluster_foreign_AC` is set to True, otherwise only DE nodes. + Default: 30. + * "cluster_foreign_AC" : bool + If set to False, the AC buses outside Germany are not clustered + and the buses inside Germany are clustered to complete ``'n_clusters_AC'``. + If set to True, foreign AC buses are clustered as well and included + in number of clusters specified through ``'n_clusters_AC'``. + Default: False. + * "method_gas" : str + Method used for gas clustering. You can choose between two clustering + methods: + * "kmeans": considers geographical locations of buses + * "kmedoids-dijkstra": considers 'electrical' distances between buses + + Default: "kmedoids-dijkstra". + * "n_clusters_gas" : int + Defines total number of resulting CH4 nodes including DE and foreign nodes + if `cluster_foreign_gas` is set to True, otherwise only DE nodes. + Default: 17. + * "cluster_foreign_gas" : bool + If set to False, the gas buses outside Germany are not clustered + and the buses inside Germany are clustered to complete ``'n_clusters_gas'``. + If set to True, foreign gas buses are clustered as well and included + in number of clusters specified through ``'n_clusters_gas'``. + Default: False. + * "k_elec_busmap" : bool or str + With this option you can load cluster coordinates from a previous AC + clustering run. Options are False, in which case no previous busmap is + loaded, and path/to/busmap.csv in which case the busmap is loaded from the + specified file. Please note, that when a path is provided, the set number + of clusters will be ignored. + Default: False. + * "k_gas_busmap" : bool or str + With this option you can load cluster coordinates from a previous gas + clustering run. Options are False, in which case no previous busmap is + loaded, and path/to/busmap.csv in which case the busmap is loaded from the + specified file. Please note, that when a path is provided, the set number + of clusters will be ignored. + Default: False. + * "bus_weight_fromcsv" : None or str + In general, the weighting of AC buses takes place considering generation and + load at each node. With this option, you can load an own weighting for + the AC buses by providing a path to a csv file. If None, weighting is + conducted as described above. + Default: None. + * "bus_weight_tocsv" : None or str + Specifies whether to store the weighting of AC buses to csv or not. If + None, it is not stored. Otherwise, it is stored to the provided + path/to/bus_weight.csv. + Default: None. + * "gas_weight_fromcsv" : None or str + In general, the weighting of CH4 nodes takes place considering generation + and load at each node, as well as non-transport capacities at each node. + With this option, you can load an own weighting for the CH4 buses by + providing a path to a csv file. If None, weighting is conducted as + described above. + Default: None. + * "gas_weight_tocsv" : None or str + Specifies whether to store the weighting of gas buses to csv or not. If + None, it is not stored. Otherwise, it is stored to the provided + path/to/gas_bus_weight.csv. + Default: None. + * "line_length_factor" : float + Defines the factor to multiply the crow-flies distance + between new buses by, in order to get new line lengths. + Default: 1. + * "remove_stubs" : bool + If True, remove stubs before k-means clustering, which reduces the + overestimating of line meshes. + This option is only used within the k-means clustering. + Default: False. + * "use_reduced_coordinates" : bool + If True, do not average cluster coordinates, but take from busmap. + This option is only used within the k-means clustering. + Default: False. + * "random_state" : int + Random state for replicability of clustering results. Default: 42. + * "n_init" : int + Affects clustering algorithm, only change when necessary! Documentation + and possible settings are described in sklearn-package + (sklearn/cluster/kmeans.py). + Default: 10. + * "max_iter" : int + Affects clustering algorithm, only change when necessary! Documentation + and possible settings are described in sklearn-package + (sklearn/cluster/kmeans.py). + Default: 100. + * "tol" : float + Affects clustering algorithm, only change when necessary! Documentation + and possible settings are described in sklearn-package + (sklearn/cluster/kmeans.py). + Default: 1e-6. + * "CPU_cores" : int or str + Number of cores used in clustering. Specify a concrete number or "max" to + use all cores available. + Default: 4. + + sector_coupled_clustering : dict + Choose if you want to apply a clustering of sector coupled carriers, such + as central_heat, and specify settings. + The provided dictionary can have the following entries: + + * "active" : bool + State if you want to apply clustering of sector coupled carriers, such + as central_heat. + Default: True. + * "carrier_data" : dict[str, dict] + Keys of the dictionary specify carriers affected by sector coupling, e.g. + "central_heat". The corresponding dictionaries specify, how the carrier + should be clustered. This dictionary must contain the following entries: + + * "base" : list(str) + The approach bases on already clustered buses (AC and CH4) and builds + clusters around the topology of those buses. With this option, you can + specify the carriers to use as base. See `strategy` for more + information. + * "strategy" : str + Strategy to use in the clustering. Possible options are: + + * "consecutive" + This strategy clusters around the buses of the first carrier in the + `'base'`` list. The links to other buses are preserved. All buses, + that have no connection to the first carrier will then be clustered + around the buses of the second carrier in the list. + * "simultaneous" + This strategy looks for links connecting the buses of the carriers + in the ``'base'`` list and aggregates buses in case they have the + same set of links connected. For example, a heat bus connected to + CH4 via gas boiler and to AC via heat pump will only form a cluster + with other buses, if these have the same links to + the same clusters of CH4 and AC. + + Per default, the following dictionary is set: + { + "central_heat": { + "base": ["CH4", "AC"], + "strategy": "simultaneous", + }, + } + + disaggregation : None or str + Specify None, in order to not perform a spatial disaggregation, or the + method you want to use for the spatial disaggregation. Only possible + option is currently "uniform". snapshot_clustering : dict - {'active': False, 'method':'typical_periods', 'how': 'daily', - 'extreme_periods': None, 'storage_constraints': '', 'n_clusters': 5, 'n_segments': 5}, State if you want to apply a temporal clustering and run the optimization - only on a subset of snapshot periods. - You can choose between a method clustering to typical periods, e.g. days - or a method clustering to segments of adjacent hours. - With ``'extreme_periods'`` you define the consideration of timesteps with - extreme residual load while temporal aggregation. - With ``'how'``, ``'storage_constraints'`` and ``'n_clusters'`` you choose - the length of the periods, constraints considering the storages and the number - of clusters for the usage of the method typical_periods. - With ``'n_segments'`` you choose the number of segments for the usage of - the method segmentation. + only on a subset of snapshot periods, and specify settings. + The provided dictionary can have the following entries: + + * "active" : bool + Choose, if clustering is activated or not. If True, it is activated. + Default: False. + * "method" : str + Method to apply. Possible options are "typical_periods" and "segmentation". + Default: "segmentation". + * "extreme_periods" : None or str + Method used to consider extreme snapshots (time steps with extreme residual + load) in reduced timeseries. + Possible options are None, "append", "new_cluster_center", and + "replace_cluster_center". The default is None, in which case extreme periods + are not considered. + * "how" : str + Definition of period in case `method` is set to "typical_periods". + Possible options are "daily", "weekly", and "monthly". Default: "daily". + * "storage_constraints" : str + Defines additional constraints for storage units in case `method` is set to + "typical_periods". Possible options are "daily_bounds", "soc_constraints" + and "soc_constraints_simplified". Default: "soc_constraints". + * "n_clusters" : int + Number of clusters in case `method` is set to "typical_periods". Default: 5. + * "n_segments" : int + Number of segments in case `method` is set to "segmentation". Default: 5. skip_snapshots : bool or int - State if you only want to consider every n-th timestep - to reduce temporal complexity. - - dispatch_disaggregation : bool - State if you to apply a second lopf considering dispatch only - to disaggregate the dispatch to the whole temporal complexity. - - branch_capacity_factor : dict - {'HV': 0.5, 'eHV' : 0.7}, + State None, if you want to use all time steps, or provide a number, + if you only want to consider every n-th timestep to reduce + temporal complexity. Default: 5. + temporal_disaggregation : dict + State if you want to apply a second LOPF considering dispatch only (no capacity + optimization) to disaggregate the dispatch to the whole temporal complexity. + Be aware that a load shedding will be applied in this optimization. + The provided dictionary must have the following entries: + + * "active" : bool + Choose, if temporal disaggregation is activated or not. If True, it is + activated. + Default: False. + * "no_slices" : int + With "no_slices" the optimization problem will be calculated as a given + number of sub-problems while using some information on the state of charge + of storage units and stores from the former optimization (at the moment + only possible with skip_snapshots and extra_functionalities are + disregarded). + Default: 8. + + branch_capacity_factor : dict[str, float] Add a factor here if you want to globally change line capacities - (e.g. to "consider" an (n-1) criterion or for debugging purposes). - + (e.g. to "consider" an (n-1) criterion or for debugging purposes). The factor + specifies the p.u. branch rating, e.g. 0.5 to allow half the line capacity. + Per default, it is set to {'HV': 0.5, 'eHV' : 0.7}. load_shedding : bool - False, State here if you want to make use of the load shedding function which is helpful when debugging: a very expensive generator is set to each - bus and meets the demand when regular - generators cannot do so. - + bus and meets the demand when regular generators cannot do so. + Default: False. foreign_lines : dict - {'carrier':'AC', 'capacity': 'osmTGmod}' Choose transmission technology and capacity of foreign lines: - 'carrier': 'AC' or 'DC' - 'capacity': 'osmTGmod', 'ntc_acer' or 'thermal_acer' + + * 'carrier': 'AC' or 'DC' + * 'capacity': 'osmTGmod', 'tyndp2020', 'ntc_acer' or 'thermal_acer' + + Per default, it is set to {'carrier':'AC', 'capacity': 'osmTGmod'}. comments : str - None + Can be any comment you wish to make. Returns ------- - network : `pandas.DataFrame` - eTraGo result network based on `PyPSA network + etrago : etrago object + eTraGo containing all network information and a PyPSA network `_ """ @@ -441,34 +630,7 @@ def run_etrago(args, json_path): # import network from database etrago.build_network_from_db() - etrago.network.lines.type = "" - etrago.network.storage_units.lifetime = np.inf - etrago.network.transformers.lifetime = 40 # only temporal fix - etrago.network.lines.lifetime = 40 # only temporal fix until either the - # PyPSA network clustering function - # is changed (taking the mean) or our - # data model is altered, which will - # happen in the next data creation run - - etrago.network.lines_t.s_max_pu = etrago.network.lines_t.s_max_pu.transpose()[ - etrago.network.lines_t.s_max_pu.columns.isin(etrago.network.lines.index) - ].transpose() - - # Set gas grid links bidirectional - etrago.network.links.loc[ - etrago.network.links[etrago.network.links.carrier == "CH4"].index, "p_min_pu" - ] = -1.0 - - # Set efficiences of CHP - etrago.network.links.loc[ - etrago.network.links[etrago.network.links.carrier.str.contains("CHP")].index, - "efficiency", - ] = 0.43 - - etrago.network.links_t.p_min_pu.fillna(0.0, inplace=True) - etrago.network.links_t.p_max_pu.fillna(1.0, inplace=True) - etrago.network.links_t.efficiency.fillna(1.0, inplace=True) - + # adjust network regarding eTraGo setting etrago.adjust_network() # ehv network clustering @@ -476,12 +638,8 @@ def run_etrago(args, json_path): # spatial clustering etrago.spatial_clustering() - etrago.spatial_clustering_gas() - etrago.args["load_shedding"] = True - etrago.load_shedding() - # snapshot clustering etrago.snapshot_clustering() @@ -489,7 +647,6 @@ def run_etrago(args, json_path): etrago.skip_snapshots() # start linear optimal powerflow calculations - # needs to be adjusted for new sectors etrago.lopf() # conduct lopf with full complex timeseries for dispatch disaggregation @@ -514,11 +671,10 @@ def run_etrago(args, json_path): etrago = run_etrago(args, json_path=None) print(datetime.datetime.now()) etrago.session.close() - # plots + # plots: more in tools/plot.py # make a line loading plot - # plot_line_loading(network) - # plot stacked sum of nominal power for each generator type and timestep - # plot_stacked_gen(network, resolution="MW") - # plot to show extendable storages - # storage_distribution(network) - # extension_overlay_network(network) + # etrago.plot_grid(line_colors='line_loading', bus_sizes=0.0001, timesteps=range(2)) + # network and storage + # etrago.plot_grid(line_colors='expansion_abs', bus_colors='storage_expansion', bus_sizes=0.0001) + # flexibility usage + # etrago.flexibility_usage('DSM') diff --git a/etrago/args.json b/etrago/args.json index 08f707c58..1ebcf89a1 100644 --- a/etrago/args.json +++ b/etrago/args.json @@ -51,6 +51,7 @@ }, "generator_noise": 789456, "extra_functionality": {}, + "network_clustering_ehv": false, "network_clustering": { "active": true, "method": "kmedoids-dijkstra", @@ -58,15 +59,16 @@ "cluster_foreign_AC": false, "n_clusters_gas": 30, "cluster_foreign_gas": false, - "k_busmap": false, - "kmeans_gas_busmap": false, - "line_length_factor": 1, - "remove_stubs": false, - "use_reduced_coordinates": false, + "k_elec_busmap": false, + "k_gas_busmap": false, "bus_weight_tocsv": null, "bus_weight_fromcsv": null, "gas_weight_tocsv": null, "gas_weight_fromcsv": null, + "line_length_factor": 1, + "remove_stubs": false, + "use_reduced_coordinates": false, + "random_state":42, "n_init": 10, "max_iter": 100, "tol": 1e-06 @@ -74,39 +76,28 @@ "sector_coupled_clustering": { "active": true, "carrier_data": { - "H2_ind_load": { - "base": [ - "H2_grid" - ], - "strategy": "consecutive" - }, "central_heat": { "base": [ "CH4", "AC" ], - "strategy": "consecutive" + "strategy": "simultaneous" }, - "rural_heat": { - "base": [ - "CH4", - "AC" - ], - "strategy": "consecutive" - } - } }, - "network_clustering_ehv": false, - "disaggregation": "uniform", + "disaggregation": null, "snapshot_clustering": { "active": false, - "method": "typical_periods", + "method": "segmentation", "how": "daily", - "storage_constraints": "", + "storage_constraints": "soc_constraints", "n_clusters": 5, "n_segments": 5 }, - "skip_snapshots": 3, + "skip_snapshots": 5, + "temporal_disaggregation": { + "active": true, + "no_slices": 8, + }, "branch_capacity_factor": { "HV": 0.5, "eHV": 0.7 diff --git a/etrago/cluster/disaggregation.py b/etrago/cluster/disaggregation.py index 0b4378836..13c811e1a 100644 --- a/etrago/cluster/disaggregation.py +++ b/etrago/cluster/disaggregation.py @@ -4,10 +4,12 @@ import cProfile import time +from loguru import logger as log from pyomo.environ import Constraint from pypsa import Network import pandas as pd +from etrago.tools import noops from etrago.tools.utilities import residual_load @@ -27,7 +29,7 @@ def __init__( self.buses = pd.merge( original_network.buses, - clustering.busmap.to_frame(name="cluster"), + self.clustering.busmap.to_frame(name="cluster"), left_index=True, right_index=True, ) @@ -65,9 +67,9 @@ def construct_partial_network(self, cluster, scenario): :param cluster: Index of the cluster to disaggregate :return: Tuple of (partial_network, external_buses) where - `partial_network` is the result of the partial decomposition - and `external_buses` represent clusters adjacent to `cluster` that may - be influenced by calculations done on the partial network. + `partial_network` is the result of the partial decomposition + and `external_buses` represent clusters adjacent to `cluster` that + may be influenced by calculations done on the partial network. """ # Create an empty network @@ -76,7 +78,7 @@ def construct_partial_network(self, cluster, scenario): # find all lines that have at least one bus inside the cluster busflags = self.buses["cluster"] == cluster - def is_bus_in_cluster(conn): + def is_bus_in_cluster(conn, busflags=busflags): return busflags[conn] # Copy configurations to new network @@ -92,33 +94,34 @@ def is_bus_in_cluster(conn): line_types = ["lines", "links", "transformers"] for line_type in line_types: + rows: pd.DataFrame = getattr(self.original_network, line_type) + timeseries: dict[str, pd.DataFrame] = getattr( + self.original_network, line_type + "_t" + ) # Copy all lines that reside entirely inside the cluster ... setattr( partial_network, line_type, - filter_internal_connector( - getattr(self.original_network, line_type), - is_bus_in_cluster, - ), + filter_internal_connector(rows, is_bus_in_cluster), ) # ... and their time series # TODO: These are all time series, not just the ones from lines - # residing entirely in side the cluster. + # residing entirely inside the cluster. # Is this a problem? - setattr( - partial_network, - line_type + "_t", - getattr(self.original_network, line_type + "_t"), - ) + # I hope not, because neither is `rows.index` a subset + # of the columns of one of the values of `timeseries`, + # nor the other way around, so it's not clear how to + # align both. + setattr(partial_network, line_type + "_t", timeseries) # Copy all lines whose `bus0` lies within the cluster left_external_connectors = filter_left_external_connector( - getattr(self.original_network, line_type), is_bus_in_cluster + rows, is_bus_in_cluster ) def from_busmap(x): - return self.idx_prefix + self.clustering.busmap.loc[x] + return self.idx_prefix + self.buses.loc[x, "cluster"] if not left_external_connectors.empty: ca_option = pd.get_option("mode.chained_assignment") @@ -133,7 +136,7 @@ def from_busmap(x): # Copy all lines whose `bus1` lies within the cluster right_external_connectors = filter_right_external_connector( - getattr(self.original_network, line_type), is_bus_in_cluster + rows, is_bus_in_cluster ) if not right_external_connectors.empty: ca_option = pd.get_option("mode.chained_assignment") @@ -179,8 +182,8 @@ def from_busmap(x): self.reindex_with_prefix(externals_to_insert) # .. and insert them as well as their time series - partial_network.buses = partial_network.buses.append( - externals_to_insert + partial_network.buses = pd.concat( + [partial_network.buses, externals_to_insert] ) partial_network.buses_t = self.original_network.buses_t @@ -212,7 +215,9 @@ def from_busmap(x): setattr( partial_network, bustype, - getattr(partial_network, bustype).append(buses_to_insert), + pd.concat( + [getattr(partial_network, bustype), buses_to_insert] + ), ) # Also copy their time series @@ -228,29 +233,28 @@ def from_busmap(x): # series accordingly, but there must be bug somewhere because # using it, the time series in the clusters and sums of the # time series after disaggregation don't match up. - """ - series = getattr(self.original_network, bustype + '_t') - partial_series = type(series)() - for s in series: - partial_series[s] = series[s].loc[ - :, - getattr(partial_network, bustype) - .index.intersection(series[s].columns)] - setattr(partial_network, bustype + '_t', partial_series) - """ + + # series = getattr(self.original_network, bustype + '_t') + # partial_series = type(series)() + # for s in series: + # partial_series[s] = series[s].loc[ + # :, + # getattr(partial_network, bustype) + # .index.intersection(series[s].columns)] + # setattr(partial_network, bustype + '_t', partial_series) # Just a simple sanity check # TODO: Remove when sure that disaggregation will not go insane anymore for line_type in line_types: - assert ( - getattr(partial_network, line_type) - .bus0.isin(partial_network.buses.index) - .all() - ) - assert ( - getattr(partial_network, line_type) - .bus1.isin(partial_network.buses.index) - .all() + rows = getattr(partial_network, line_type) + + left = rows.bus0.isin(partial_network.buses.index) + right = rows.bus1.isin(partial_network.buses.index) + assert rows.loc[~(left | right), :].empty, ( + f"Not all `partial_network.{line_type}` have an endpoint," + " i.e. `bus0` or `bus1`," + f" contained in `partial_network.buses.index`." + f" Spurious additional rows:\nf{rows.loc[~(left | right), :]}" ) return partial_network, external_buses @@ -262,10 +266,11 @@ def solve(self, scenario, solver): """ Decompose each cluster into separate units and try to optimize them separately + :param scenario: :param solver: Solver that may be used to optimize partial networks """ - clusters = set(self.clustering.busmap.values) + clusters = set(self.buses.loc[:, "cluster"].values) n = len(clusters) self.stats = { "clusters": pd.DataFrame( @@ -274,9 +279,9 @@ def solve(self, scenario, solver): ) } profile = cProfile.Profile() + profile = noops for i, cluster in enumerate(sorted(clusters)): - print("---") - print("Decompose cluster %s (%d/%d)" % (cluster, i + 1, n)) + log.info(f"Decompose {cluster=} ({i + 1}/{n})") profile.enable() t = time.time() partial_network, externals = self.construct_partial_network( @@ -284,9 +289,9 @@ def solve(self, scenario, solver): ) profile.disable() self.stats["clusters"].loc[cluster, "decompose"] = time.time() - t - print( - "Decomposed in ", - self.stats["clusters"].loc[cluster, "decompose"], + log.info( + "Decomposed in " + f'{self.stats["clusters"].loc[cluster, "decompose"]}' ) t = time.time() profile.enable() @@ -295,32 +300,32 @@ def solve(self, scenario, solver): ) profile.disable() self.stats["clusters"].loc[cluster, "spread"] = time.time() - t - print( - "Result distributed in ", - self.stats["clusters"].loc[cluster, "spread"], + log.info( + "Result distributed in " + f'{self.stats["clusters"].loc[cluster, "spread"]}' ) profile.enable() t = time.time() self.transfer_results(partial_network, externals) profile.disable() self.stats["clusters"].loc[cluster, "transfer"] = time.time() - t - print( - "Results transferred in ", - self.stats["clusters"].loc[cluster, "transfer"], + log.info( + "Results transferred in " + f'{self.stats["clusters"].loc[cluster, "transfer"]}' ) profile.enable() t = time.time() - print("---") fs = (mc("sum"), mc("sum")) for bt, ts in ( ("generators", {"p": fs, "q": fs}), ("storage_units", {"p": fs, "state_of_charge": fs, "q": fs}), + ("links", {"p0": fs, "p1": fs}), ): - print("Attribute sums, {}, clustered - disaggregated:".format(bt)) + log.info(f"Attribute sums, {bt}, clustered - disaggregated:") cnb = getattr(self.clustered_network, bt) onb = getattr(self.original_network, bt) - print( + log.info( "{:>{}}: {}".format( "p_nom_opt", 4 + len("state_of_charge"), @@ -329,11 +334,11 @@ def solve(self, scenario, solver): ) ) - print("Series sums, {}, clustered - disaggregated:".format(bt)) + log.info(f"Series sums, {bt}, clustered - disaggregated:") cnb = getattr(self.clustered_network, bt + "_t") onb = getattr(self.original_network, bt + "_t") for s in ts: - print( + log.info( "{:>{}}: {}".format( s, 4 + len("state_of_charge"), @@ -343,9 +348,9 @@ def solve(self, scenario, solver): ) profile.disable() self.stats["check"] = time.time() - t - print("Checks computed in ", self.stats["check"]) + log.info(f"Checks computed in {self.stats['check']}s.") - # profile.print_stats(sort='cumtime') + profile.print_stats(sort="cumtime") def transfer_results( self, @@ -420,7 +425,7 @@ def _validate_disaggregation_generators(self, cluster, f): def extra_functionality(network, snapshots): f(network, snapshots) generators = self.original_network.generators.assign( - bus=lambda df: df.bus.map(self.clustering.busmap) + bus=lambda df: df.bus.map(self.buses.loc[:, "cluster"]) ) def construct_constraint(model, snapshot, carrier): @@ -472,7 +477,9 @@ def extra_functionality(network, snapshots): ]: generators = getattr( self.original_network, bustype_pypsa - ).assign(bus=lambda df: df.bus.map(self.clustering.busmap)) + ).assign( + bus=lambda df: df.bus.map(self.buses.loc[:, "cluster"]) + ) for suffix in suffixes: def construct_constraint(model, snapshot): @@ -519,12 +526,21 @@ class UniformDisaggregation(Disaggregation): def solve_partial_network( self, cluster, partial_network, scenario, solver=None ): + log.debug("Solving partial network.") bustypes = { + "links": { + "group_by": ("carrier", "bus1"), + "series": ("p0", "p1"), + }, "generators": {"group_by": ("carrier",), "series": ("p", "q")}, "storage_units": { "group_by": ("carrier", "max_hours"), "series": ("p", "state_of_charge", "q"), }, + "stores": { + "group_by": ("carrier",), + "series": ("e", "p"), + }, } weights = { "p": ("p_nom_opt", "p_max_pu"), @@ -536,47 +552,93 @@ def solve_partial_network( ) else ("p_nom_opt", "p_max_pu") ), + "p0": ("p_nom_opt",), + "p1": ("p_nom_opt",), "state_of_charge": ("p_nom_opt",), + "e": ("e_nom_opt",), } filters = {"q": lambda o: o.control == "PV"} + for bustype in bustypes: + # Define attributeof components which are available + if bustype == "stores": + extendable_flag = "e_nom_extendable" + nominal_capacity = "e_nom" + optimal_capacity = "e_nom_opt" + maximal_capacity = "e_nom_max" + weights["p"] = ("e_nom_opt", "e_max_pu") + else: + extendable_flag = "p_nom_extendable" + nominal_capacity = "p_nom" + optimal_capacity = "p_nom_opt" + maximal_capacity = "p_nom_max" + weights["p"] = ("p_nom_opt", "p_max_pu") + + log.debug(f"Decomposing {bustype}.") pn_t = getattr(partial_network, bustype + "_t") cl_t = getattr(self.clustered_network, bustype + "_t") pn_buses = getattr(partial_network, bustype) - cl_buses = getattr(self.clustered_network, bustype) + cl_buses = getattr(self.clustered_network, bustype)[ + lambda df: df.loc[:, "bus" if "bus" in df.columns else "bus0"] + == cluster + ] groups = product( *[ [ {"key": key, "value": value} - for value in set(pn_buses.loc[:, key]) + for value in set(cl_buses.loc[:, key]) ] for key in bustypes[bustype]["group_by"] ] ) for group in groups: - clb = cl_buses[cl_buses.bus == cluster] query = " & ".join( ["({key} == {value!r})".format(**axis) for axis in group] ) - clb = clb.query(query) + clb = cl_buses.query(query) if len(clb) == 0: continue assert len(clb) == 1, ( - "Cluster {} has {} buses for group {}.\n".format( - cluster, len(clb), group - ) - + "Should be exactly one." + f"Cluster {cluster} has {len(clb)} buses for {group=}." + "\nShould be exactly one." ) # Remove buses not belonging to the partial network pnb = pn_buses.iloc[ [ i for i, row in enumerate(pn_buses.itertuples()) - if not row.bus.startswith(self.idx_prefix) + for bus in [ + row.bus if hasattr(row, "bus") else row.bus0 + ] + if not bus.startswith(self.idx_prefix) ] ] + if bustype == "links": + index = self.buses[ + self.buses.loc[:, "cluster"] == group[1]["value"] + ].index.tolist() + query = ( + f"(carrier == {group[0]['value']!r})" + f" & (bus1 in {index})" + ) pnb = pnb.query(query) - assert not pnb.empty, ( + assert not pnb.empty or ( + # In some cases, a district heating grid is connected to a + # substation only via a resistive_heater but not e.g. by a + # heat_pump or one of the other listed `carrier`s. + # In the clustered network, there are both. + # In these cases, the `pnb` can actually be empty. + group[0]["value"] + in [ + "central_gas_boiler", + "central_heat_pump", + "central_gas_CHP_heat", + "central_gas_CHP", + "CH4", + "DC", + "OCGT", + ] + ), ( "Cluster has a bus for:" + "\n ".join( ["{key}: {value!r}".format(**axis) for axis in group] @@ -584,10 +646,12 @@ def solve_partial_network( + "\nbut no matching buses in its corresponding " + "partial network." ) + if pnb.empty: + continue if not ( - pnb.loc[:, "p_nom_extendable"].all() - or not pnb.loc[:, "p_nom_extendable"].any() + pnb.loc[:, extendable_flag].all() + or not pnb.loc[:, extendable_flag].any() ): raise NotImplementedError( "The `'p_nom_extendable'` flag for buses in the" @@ -603,56 +667,59 @@ def solve_partial_network( ) else: assert ( - pnb.loc[:, "p_nom_extendable"] - == clb.iloc[0].at["p_nom_extendable"] + pnb.loc[:, extendable_flag] + == clb.iloc[0].at[extendable_flag] ).all(), ( - "The `'p_nom_extendable'` flag for the current " - + "cluster's bus does not have the same value " - + "it has on the buses of it's partial network." + "The `'p_nom_extendable'` flag for the current" + " cluster's bus does not have the same value" + " it has on the buses of it's partial network." ) - if clb.iloc[0].at["p_nom_extendable"]: + if clb.iloc[0].at[extendable_flag]: # That means, `p_nom` got computed via optimization and we # have to distribute it into the subnetwork first. - pnb_p_nom_max = pnb.loc[:, "p_nom_max"] + pnb_p_nom_max = pnb.loc[:, maximal_capacity] + + # If upper limit is infinite, replace it by a very large + # number to avoid NaN values in the calculation + pnb_p_nom_max.replace(float("inf"), 10000000, inplace=True) + p_nom_max_global = pnb_p_nom_max.sum(axis="index") - pnb.loc[:, "p_nom_opt"] = ( - clb.iloc[0].at["p_nom_opt"] + + pnb.loc[:, optimal_capacity] = ( + clb.iloc[0].at[optimal_capacity] * pnb_p_nom_max / p_nom_max_global ) getattr(self.original_network, bustype).loc[ - pnb.index, "p_nom_opt" - ] = pnb.loc[:, "p_nom_opt"] - pnb.loc[:, "p_nom"] = pnb.loc[:, "p_nom_opt"] + pnb.index, optimal_capacity + ] = pnb.loc[:, optimal_capacity] + pnb.loc[:, nominal_capacity] = pnb.loc[:, optimal_capacity] else: # That means 'p_nom_opt' didn't get computed and is # potentially not present in the dataframe. But we want to # always use 'p_nom_opt' in the remaining code, so save a # view of the computed 'p_nom' values under 'p_nom_opt'. - pnb.loc[:, "p_nom_opt"] = pnb.loc[:, "p_nom"] + pnb.loc[:, optimal_capacity] = pnb.loc[:, nominal_capacity] # This probably shouldn't be here, but rather in # `transfer_results`, but it's easier to do it this way right # now. getattr(self.original_network, bustype).loc[ - pnb.index, "p_nom_opt" - ] = pnb.loc[:, "p_nom_opt"] - timed = ( - lambda key, series=set( - s - for s in cl_t - if not cl_t[s].empty - if not pn_t[s].columns.intersection(pnb.index).empty - ): key - in series - ) + pnb.index, optimal_capacity + ] = pnb.loc[:, optimal_capacity] + timed = lambda key, series={ # noqa: 731 + s + for s in cl_t + if not cl_t[s].empty + if not pn_t[s].columns.intersection(pnb.index).empty + }: (key in series) for s in bustypes[bustype]["series"]: if s in self.skip: continue filtered = pnb.loc[filters.get(s, slice(None))] - clt = cl_t[s].loc[:, next(clb.itertuples()).Index] + clt = cl_t[s].loc[:, clb.index[0]] weight = reduce( multiply, ( @@ -669,15 +736,27 @@ def solve_partial_network( else () ) ws = weight.sum(axis=len(loc)) - for bus_id in filtered.index: - values = clt * weight.loc[loc + (bus_id,)] / ws - pn_t[s].insert(len(pn_t[s].columns), bus_id, values) + new_columns = pd.DataFrame( + { + bus_id: clt * weight.loc[loc + (bus_id,)] / ws + for bus_id in filtered.index + } + ) + delta = abs((new_columns.sum(axis=1) - clt).sum()) + epsilon = 1e-5 + assert delta < epsilon, ( + "Sum of disaggregated time series does not match" + f" aggregated timeseries: {delta=} > {epsilon=}." + ) + pn_t[s].loc[:, new_columns.columns] = new_columns def transfer_results(self, *args, **kwargs): - kwargs["bustypes"] = ["generators", "storage_units"] + kwargs["bustypes"] = ["generators", "links", "storage_units", "stores"] kwargs["series"] = { "generators": {"p"}, + "links": {"p0", "p1"}, "storage_units": {"p", "state_of_charge"}, + "stores": {"e", "p"}, } return super().transfer_results(*args, **kwargs) @@ -688,7 +767,7 @@ def swap_series(s): def filter_internal_connector(conn, is_bus_in_cluster): return conn[ - conn.bus0.apply(is_bus_in_cluster) & conn.bus1.apply(is_bus_in_cluster) + conn.bus0.apply(is_bus_in_cluster) | conn.bus1.apply(is_bus_in_cluster) ] @@ -719,6 +798,7 @@ def update_constraints(network, externals): def run_disaggregation(self): + log.debug("Running disaggregation.") if self.clustering: disagg = self.args.get("disaggregation") skip = () if self.args["pf_post_lopf"]["active"] else ("q",) @@ -733,9 +813,9 @@ def run_disaggregation(self): ) elif disagg == "uniform": disaggregation = UniformDisaggregation( - self.disaggregated_network, - self.network, - self.clustering, + original_network=self.disaggregated_network, + clustered_network=self.network, + clustering=self.clustering, skip=skip, ) @@ -748,8 +828,7 @@ def run_disaggregation(self): self.disaggregated_network.generators_t.p.fillna(0, inplace=True) self.disaggregated_network.generators_t.q.fillna(0, inplace=True) - self.disaggregated_network.results = self.network.results - print( + log.info( "Time for overall desaggregation [min]: {:.2}".format( (time.time() - t) / 60 ) diff --git a/etrago/cluster/electrical.py b/etrago/cluster/electrical.py index 767cf5c36..e46d6b0ff 100755 --- a/etrago/cluster/electrical.py +++ b/etrago/cluster/electrical.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -26,9 +26,6 @@ if "READTHEDOCS" not in os.environ: import logging - import numpy as np - import pandas as pd - import pypsa.io as io from pypsa import Network from pypsa.networkclustering import ( aggregatebuses, @@ -37,6 +34,9 @@ get_clustering_from_busmap, ) from six import iteritems + import numpy as np + import pandas as pd + import pypsa.io as io from etrago.cluster.spatial import ( busmap_from_psql, @@ -57,13 +57,34 @@ "DLR-Institute for Networked Energy Systems" ) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "s3pp, wolfbunke, ulfmueller, lukasol" +__author__ = ( + "MGlauer, MarlonSchlemminger, mariusves, BartelsJ, gnn, lukasoldi, " + "ulfmueller, lukasol, ClaraBuettner, CarlosEpia, KathiEsterl, " + "pieterhexen, fwitte, AmeliaNadal, cjbernal071421" +) + # TODO: Workaround because of agg def _leading(busmap, df): - """ """ + """ + Returns a function that computes the leading bus_id for a given mapped + list of buses. + + Parameters + ----------- + busmap : dict + A dictionary that maps old bus_ids to new bus_ids. + df : pandas.DataFrame + A DataFrame containing network.buses data. Each row corresponds + to a unique bus + + Returns + -------- + leader : function + A function that returns the leading bus_id for the argument `x`. + """ def leader(x): ix = busmap[x.index[0]] @@ -73,10 +94,32 @@ def leader(x): def adjust_no_electric_network(etrago, busmap, cluster_met): + """ + Adjusts the non-electric network based on the electrical network + (esp. eHV network), adds the gas buses to the busmap, and creates the + new buses for the non-electric network. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class. + busmap : dict + A dictionary that maps old bus_ids to new bus_ids. + cluster_met : str + A string indicating the clustering method to be used. - network = etrago.network.copy() - # network2 is supposed to contain all the not electrical or gas buses and links - network2 = network.copy() + Returns + ------- + network : pypsa.Network + Container for all network components of the clustered network. + busmap : dict + Maps old bus_ids to new bus_ids including all sectors. + + """ + network = etrago.network + # network2 is supposed to contain all the not electrical or gas buses + # and links + network2 = network.copy(with_time=False) network2.buses = network2.buses[ (network2.buses["carrier"] != "AC") & (network2.buses["carrier"] != "CH4") @@ -89,6 +132,7 @@ def adjust_no_electric_network(etrago, busmap, cluster_met): "H2_saltcavern": "power_to_H2", "dsm": "dsm", "Li ion": "BEV charger", + "Li_ion": "BEV_charger", "rural_heat": "rural_heat_pump", } @@ -105,14 +149,14 @@ def adjust_no_electric_network(etrago, busmap, cluster_met): busmap2 = {} # Map crossborder AC buses in case that they were not part of the k-mean clustering - if not (etrago.args["network_clustering"]["cluster_foreign_AC"]) & ( + if (not etrago.args["network_clustering"]["cluster_foreign_AC"]) & ( cluster_met in ["kmeans", "kmedoids-dijkstra"] ): buses_orig = network.buses.copy() ac_buses_out = buses_orig[ (buses_orig["country"] != "DE") & (buses_orig["carrier"] == "AC") ].dropna(subset=["country", "carrier"]) - + for bus_out in ac_buses_out.index: busmap2[bus_out] = bus_out @@ -147,7 +191,6 @@ def adjust_no_electric_network(etrago, busmap, cluster_met): (no_elec_to_cluster.cluster == busmap[bus_hv]) & (no_elec_to_cluster.carrier == carry) ).any(): - bus_cluster = no_elec_to_cluster[ (no_elec_to_cluster.cluster == busmap[bus_hv]) & (no_elec_to_cluster.carrier == carry) @@ -190,12 +233,12 @@ def adjust_no_electric_network(etrago, busmap, cluster_met): | (network.buses["carrier"] == "central_heat") | (network.buses["carrier"] == "central_heat_store") ].index: - busmap2[gas_bus] = gas_bus busmap = {**busmap, **busmap2, **busmap3} - # The new buses based on the eHV network for not electrical buses are created + # The new buses based on the eHV network for not electrical buses are + # created if cluster_met in ["kmeans", "kmedoids-dijkstra"]: network.madd( "Bus", @@ -218,18 +261,17 @@ def adjust_no_electric_network(etrago, busmap, cluster_met): def cluster_on_extra_high_voltage(etrago, busmap, with_time=True): - """Main function of the EHV-Clustering approach. Creates a new clustered + """ + Main function of the EHV-Clustering approach. Creates a new clustered pypsa.Network given a busmap mapping all bus_ids to other bus_ids of the same network. Parameters ---------- - network : pypsa.Network - Container for all network components. - + etrago : Etrago + An instance of the Etrago class busmap : dict Maps old bus_ids to new bus_ids. - with_time : bool If true time-varying data will also be aggregated. @@ -237,7 +279,6 @@ def cluster_on_extra_high_voltage(etrago, busmap, with_time=True): ------- network : pypsa.Network Container for all network components of the clustered network. - busmap : dict Maps old bus_ids to new bus_ids including all sectors. """ @@ -340,18 +381,20 @@ def cluster_on_extra_high_voltage(etrago, busmap, with_time=True): network_c.determine_network_topology() - return (network_c.copy(), busmap) + return (network_c, busmap) def delete_ehv_buses_no_lines(network): """ - When there are AC buses totally isolated, this function deletes them in order - to make possible the creation of busmaps based on electrical connections - and other purposes. Additionally, it throws a warning to inform the user - in case that any correction should be done. + When there are AC buses totally isolated, this function deletes them in + order to make possible the creation of busmaps based on electrical + connections and other purposes. Additionally, it throws a warning to + inform the user in case that any correction should be done. + Parameters ---------- network : pypsa.network + Returns ------- None @@ -378,13 +421,13 @@ def delete_ehv_buses_no_lines(network): logger.info( f""" - ----------------------- WARNING --------------------------- - THE FOLLOWING BUSES WERE DELETED BECAUSE THEY WERE ISOLATED: - {delete_buses.to_list()}. - IT IS POTENTIALLY A SIGN OF A PROBLEM IN THE DATASET - ----------------------- WARNING --------------------------- + ----------------------- WARNING --------------------------- + THE FOLLOWING BUSES WERE DELETED BECAUSE THEY WERE ISOLATED: + {delete_buses.to_list()}. + IT IS POTENTIALLY A SIGN OF A PROBLEM IN THE DATASET + ----------------------- WARNING --------------------------- - """ + """ ) network.mremove("Bus", delete_buses) @@ -406,9 +449,23 @@ def delete_ehv_buses_no_lines(network): def ehv_clustering(self): + """ + Cluster the network based on Extra High Voltage (EHV) grid. - if self.args["network_clustering_ehv"]: + If `network_clustering_ehv` argument is True, the function clusters the + network based on the EHV grid. + + Parameters + ---------- + self: Etrago object pointer + The object pointer for an Etrago object. + + Returns + ------- + None + """ + if self.args["network_clustering_ehv"]: logger.info("Start ehv clustering") self.network.generators.control = "PV" @@ -423,11 +480,28 @@ def ehv_clustering(self): self.update_busmap(busmap) self.buses_by_country() + logger.info("Network clustered to EHV-grid") def select_elec_network(etrago): + """ + Selects the electric network based on the clustering settings specified + in the Etrago object. + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + + Returns + ------- + Tuple containing: + elec_network : pypsa.Network + Contains the electric network + n_clusters : int + number of clusters used in the clustering process. + """ elec_network = etrago.network.copy() settings = etrago.args["network_clustering"] if settings["cluster_foreign_AC"]: @@ -511,52 +585,93 @@ def select_elec_network(etrago): return elec_network, n_clusters -def preprocessing(etrago): - def unify_foreign_buses(etrago): - - network = etrago.network.copy() +def unify_foreign_buses(etrago): + """ + Unifies foreign AC buses into clusters using the k-medoids algorithm with + Dijkstra distance as a similarity measure. - foreign_buses = network.buses[ - (network.buses.country != "DE") & (network.buses.carrier == "AC") - ] - foreign_buses_load = foreign_buses[ - (foreign_buses.index.isin(network.loads.bus)) - & (foreign_buses.carrier == "AC") - ] + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class - lines_col = network.lines.columns - # The Dijkstra clustering works using the shortest electrical path between - # buses. In some cases, a bus has just DC connections, which are considered - # links. Therefore it is necessary to include temporarily the DC links - # into the lines table. - dc = network.links[network.links.carrier == "DC"] - str1 = "DC_" - dc.index = f"{str1}" + dc.index - lines_plus_dc = lines_plus_dc = pd.concat([network.lines, dc]) - lines_plus_dc = lines_plus_dc[lines_col] - lines_plus_dc["carrier"] = "AC" + Returns + ------- + busmap_foreign : pd.Series + A pandas series that maps the foreign buses to their respective + clusters. The series index is the bus ID and the values are the + corresponding cluster medoid IDs. + """ + network = etrago.network.copy(with_time=False) - busmap_foreign = pd.Series(dtype=str) - medoids_foreign = pd.Series(dtype=str) + foreign_buses = network.buses[ + (network.buses.country != "DE") & (network.buses.carrier == "AC") + ] + foreign_buses_load = foreign_buses[ + (foreign_buses.index.isin(network.loads.bus)) + & (foreign_buses.carrier == "AC") + ] - for country, df in foreign_buses.groupby(by="country"): - weight = df.apply( - lambda x: 1 if x.name in foreign_buses_load.index else 0, - axis=1, + lines_col = network.lines.columns + # The Dijkstra clustering works using the shortest electrical path between + # buses. In some cases, a bus has just DC connections, which are considered + # links. Therefore it is necessary to include temporarily the DC links + # into the lines table. + dc = network.links[network.links.carrier == "DC"] + str1 = "DC_" + dc.index = f"{str1}" + dc.index + lines_plus_dc = lines_plus_dc = pd.concat([network.lines, dc]) + lines_plus_dc = lines_plus_dc[lines_col] + lines_plus_dc["carrier"] = "AC" + + busmap_foreign = pd.Series(dtype=str) + + for country, df in foreign_buses.groupby(by="country"): + weight = df.apply( + lambda x: 1 if x.name in foreign_buses_load.index else 0, + axis=1, + ) + n_clusters = (foreign_buses_load.country == country).sum() + if n_clusters < len(df): + ( + busmap_country, + medoid_idx_country, + ) = kmedoids_dijkstra_clustering( + etrago, df, lines_plus_dc, weight, n_clusters ) - n_clusters = (foreign_buses_load.country == country).sum() - - busmap_country, medoid_idx_country = kmedoids_dijkstra_clustering( - etrago, df, lines_plus_dc, weight, n_clusters) medoid_idx_country.index = medoid_idx_country.index.astype(str) busmap_country = busmap_country.map(medoid_idx_country) - busmap_foreign = pd.concat([busmap_foreign, busmap_country]) - medoids_foreign = pd.concat([medoids_foreign, medoid_idx_country]) - busmap_foreign.name = "foreign" - busmap_foreign.index.name = "bus" + else: + for bus in df.index: + busmap_foreign[bus] = bus + + busmap_foreign.name = "foreign" + busmap_foreign.index.name = "bus" - return busmap_foreign + return busmap_foreign + + +def preprocessing(etrago): + """ + Preprocesses an Etrago object to prepare it for network clustering. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + + Returns + ------- + network_elec : pypsa.Network + Container for all network components of the electrical network. + weight : pandas.Series + A pandas.Series with the bus weighting data. + n_clusters : int + The number of clusters to use for network clustering. + busmap_foreign : pandas.Series + The Series object with the foreign bus mapping data. + """ network = etrago.network settings = etrago.args["network_clustering"] @@ -633,18 +748,17 @@ def unify_foreign_buses(etrago): logger.info( f""" - ----------------------- WARNING --------------------------- - THE FOLLOWING BUSES HAVE NOT COUNTRY DATA: - {network.buses[network.buses.country.isna()].index.to_list()}. - THEY WILL BE ASSIGNED TO GERMANY, BUT IT IS POTENTIALLY A - SIGN OF A PROBLEM IN THE DATASET. - ----------------------- WARNING --------------------------- + ----------------------- WARNING --------------------------- + THE FOLLOWING BUSES HAVE NOT COUNTRY DATA: + {network.buses[network.buses.country.isna()].index.to_list()}. + THEY WILL BE ASSIGNED TO GERMANY, BUT IT IS POTENTIALLY A + SIGN OF A PROBLEM IN THE DATASET. + ----------------------- WARNING --------------------------- - """ + """ ) network.buses.country.loc[network.buses.country.isna()] = "DE" - - + if settings["k_elec_busmap"] is False: busmap_foreign = unify_foreign_buses(etrago) else: @@ -655,10 +769,10 @@ def unify_foreign_buses(etrago): if settings["method"] == "kmedoids-dijkstra": lines_col = network_elec.lines.columns - # The Dijkstra clustering works using the shortest electrical path between - # buses. In some cases, a bus has just DC connections, which are considered - # links. Therefore it is necessary to include temporarily the DC links - # into the lines table. + # The Dijkstra clustering works using the shortest electrical path + # between buses. In some cases, a bus has just DC connections, which + # are considered links. Therefore it is necessary to include + # temporarily the DC links into the lines table. dc = network.links[network.links.carrier == "DC"] str1 = "DC_" dc.index = f"{str1}" + dc.index @@ -685,12 +799,34 @@ def unify_foreign_buses(etrago): def postprocessing(etrago, busmap, busmap_foreign, medoid_idx=None): + """ + Postprocessing function for network clustering. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + busmap : pandas.Series + mapping between buses and clusters + busmap_foreign : pandas.DataFrame + mapping between foreign buses and clusters + medoid_idx : pandas.DataFrame + mapping between cluster indices and medoids + + Returns + ------- + Tuple containing: + clustering : pypsa.network + Network object containing the clustered network + busmap : pandas.Series + Updated mapping between buses and clusters + """ settings = etrago.args["network_clustering"] method = settings["method"] num_clusters = settings["n_clusters_AC"] if settings["k_elec_busmap"] == False: - busmap.name = 'cluster' + busmap.name = "cluster" busmap_elec = pd.DataFrame(busmap.copy(), dtype="string") busmap_elec.index.name = "bus" busmap_elec = busmap_elec.join(busmap_foreign, how="outer") @@ -800,6 +936,33 @@ def weighting_for_scenario(network, save=None): """ def calc_availability_factor(gen): + + """ + Calculate the availability factor for a given generator. + + Parameters + ----------- + gen : pandas.DataFrame + A `pypsa.Network.generators` DataFrame. + + Returns + ------- + cf : float + The availability factor of the generator. + + Notes + ----- + Availability factor is defined as the ratio of the average power + output of the generator over the maximum power output capacity of + the generator. If the generator is time-dependent, its average power + output is calculated using the `network.generators_t` DataFrame. + Otherwise, its availability factor is obtained from the + `fixed_capacity_fac` dictionary, which contains pre-defined factors + for fixed capacity generators. If the generator's availability factor + cannot be found in the dictionary, it is assumed to be 1. + + """ + if gen["carrier"] in time_dependent: cf = network.generators_t["p_max_pu"].loc[:, gen.name].mean() else: @@ -819,9 +982,12 @@ def calc_availability_factor(gen): fixed_capacity_fac = { # A value of 1 is given to power plants where its availability # does not depend on the weather + "industrial_gas_CHP": 1, "industrial_biomass_CHP": 1, "biomass": 1, "central_biomass_CHP": 1, + "central_gas_CHP": 1, + "OCGT": 1, "other_non_renewable": 1, "run_of_river": 0.50, "reservoir": 1, @@ -833,7 +999,9 @@ def calc_availability_factor(gen): "nuclear": 1, } - gen = network.generators[["bus", "carrier", "p_nom"]].copy() + gen = network.generators[network.generators.carrier != "load shedding"][ + ["bus", "carrier", "p_nom"] + ].copy() gen["cf"] = gen.apply(calc_availability_factor, axis=1) gen["weight"] = gen["p_nom"] * gen["cf"] @@ -870,38 +1038,49 @@ def calc_availability_factor(gen): def run_spatial_clustering(self): + """ + Main method for running spatial clustering on the electrical network. + Allows for clustering based on k-means and k-medoids dijkstra. - if self.args["network_clustering"]["active"]: + Parameters + ----------- + self + The object pointer for an Etrago object containing all relevant + parameters and data + Returns + ------- + None + """ + if self.args["network_clustering"]["active"]: self.network.generators.control = "PV" elec_network, weight, n_clusters, busmap_foreign = preprocessing(self) if self.args["network_clustering"]["method"] == "kmeans": - if self.args["network_clustering"]["k_elec_busmap"] == False: - logger.info("Start k-means Clustering") - busmap = kmean_clustering(self, elec_network, weight, n_clusters) + busmap = kmean_clustering( + self, elec_network, weight, n_clusters + ) medoid_idx = pd.Series(dtype=str) else: busmap = pd.Series(dtype=str) medoid_idx = pd.Series(dtype=str) elif self.args["network_clustering"]["method"] == "kmedoids-dijkstra": - if self.args["network_clustering"]["k_elec_busmap"] == False: - logger.info("Start k-medoids Dijkstra Clustering") - + busmap, medoid_idx = kmedoids_dijkstra_clustering( self, elec_network.buses, elec_network.lines, weight, - n_clusters + n_clusters, ) + else: busmap = pd.Series(dtype=str) medoid_idx = pd.Series(dtype=str) @@ -916,7 +1095,7 @@ def run_spatial_clustering(self): else: self.disaggregated_network = self.network.copy(with_time=False) - self.network = self.clustering.network.copy() + self.network = self.clustering.network self.buses_by_country() diff --git a/etrago/cluster/gas.py b/etrago/cluster/gas.py index 2c10f175c..f0dfcbf42 100644 --- a/etrago/cluster/gas.py +++ b/etrago/cluster/gas.py @@ -1,4 +1,22 @@ # -*- coding: utf-8 -*- +# Copyright 2016-2023 Flensburg University of Applied Sciences, +# Europa-Universität Flensburg, +# Centre for Sustainable Energy Systems, +# DLR-Institute for Networked Energy Systems + +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + # File description for read-the-docs """ gas.py defines the methods to cluster gas grid networks spatially for applications within the tool eTraGo.""" @@ -6,9 +24,6 @@ import os if "READTHEDOCS" not in os.environ: - import numpy as np - import pandas as pd - import pypsa.io as io from pypsa import Network from pypsa.networkclustering import ( aggregatebuses, @@ -16,29 +31,52 @@ busmap_by_kmeans, ) from six import iteritems + import numpy as np + import pandas as pd + import pypsa.io as io from etrago.cluster.spatial import ( - sum_with_inf, group_links, kmedoids_dijkstra_clustering, + sum_with_inf, ) from etrago.tools.utilities import * +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = ( + "MGlauer, MarlonSchlemminger, mariusves, BartelsJ, gnn, lukasoldi, " + "ulfmueller, lukasol, ClaraBuettner, CarlosEpia, KathiEsterl, " + "pieterhexen, fwitte, AmeliaNadal, cjbernal071421" +) + def preprocessing(etrago): """ - Create a bus map from the clustering of buses in space with a weighting. + Preprocesses the gas network data from the given Etrago object for the + spatial clustering process of the CH4 grid. Parameters ---------- - network : pypsa.Network - The buses must have coordinates x,y. + etrago : Etrago + An instance of the Etrago class + Returns ------- - busmap : pandas.Series - Mapping of network.buses to k-means clusters (indexed by - non-negative integers). + None + + Raises + ------ + ValueError + If `settings["n_clusters_gas"]` is less than or equal to the number of + neighboring country gas buses. """ + # Create network_ch4 (grid nodes in order to create the busmap basis) network_ch4 = Network() @@ -56,6 +94,13 @@ def preprocessing(etrago): ch4_filter & (network_ch4.buses["country"] != "DE") ).sum() + network_ch4.links = network_ch4.links.loc[ + network_ch4.links["bus0"].isin(network_ch4.buses.loc[ch4_filter].index) + & network_ch4.links["bus1"].isin( + network_ch4.buses.loc[ch4_filter].index + ) + ] + # select buses dependent on whether they should be clustered in (only DE or DE+foreign) if not settings["cluster_foreign_gas"]: network_ch4.buses = network_ch4.buses.loc[ @@ -66,7 +111,8 @@ def preprocessing(etrago): msg = ( "The number of clusters for the gas sector (" + str(settings["n_clusters_gas"]) - + ") must be higher than the number of neighboring country gas buses (" + + ") must be higher than the number of neighboring country " + + "gas buses (" + str(num_neighboring_country) + ")." ) @@ -75,10 +121,6 @@ def preprocessing(etrago): else: network_ch4.buses = network_ch4.buses.loc[ch4_filter] n_clusters = settings["n_clusters_gas"] - network_ch4.links = network_ch4.links.loc[ - network_ch4.links["bus0"].isin(network_ch4.buses.index) - & network_ch4.links["bus1"].isin(network_ch4.buses.index) - ] def weighting_for_scenario(ch4_buses, save=None): """ @@ -90,15 +132,17 @@ def weighting_for_scenario(ch4_buses, save=None): ---------- ch4_buses : pandas.DataFrame Dataframe with CH4 etrago.network.buses to weight. - save: path + save : str or bool Path to save weightings to as .csv + Returns ------- weightings : pandas.Series Integer weighting for each ch4_buses.index """ - MAX_WEIGHT = 1e5 # relevant only for foreign nodes with extra high CH4 generation capacity + MAX_WEIGHT = 1e5 # relevant only for foreign nodes with extra high + # CH4 generation capacity to_neglect = [ "CH4", @@ -119,8 +163,12 @@ def weighting_for_scenario(ch4_buses, save=None): ].index # get all generators and loads related to ch4_buses generators_ = pd.Series( - etrago.network.generators.index, - index=etrago.network.generators.bus, + etrago.network.generators[ + etrago.network.generators.carrier != "load shedding" + ].index, + index=etrago.network.generators[ + etrago.network.generators.carrier != "load shedding" + ].bus, ) buses_CH4_gen = generators_.index.intersection(rel_links.keys()) loads_ = pd.Series( @@ -171,6 +219,30 @@ def weighting_for_scenario(ch4_buses, save=None): def kmean_clustering_gas(etrago, network_ch4, weight, n_clusters): + """ + Performs K-means clustering on the gas network data in the given + `network_ch4` pypsa.Network object. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + network_ch4 : pypsa.Network + A Network object containing the gas network data. + weight : str or None + The name of the bus weighting column to use for clustering. If None, + unweighted clustering is performed. + n_clusters : int + The number of clusters to create. + + Returns + ------- + busmap : pandas.Series + A pandas.Series object mapping each bus in the CH4 network to its + corresponding cluster ID + None + None is returned because k-means clustering makes no use of medoids + """ settings = etrago.args["network_clustering"] busmap = busmap_by_kmeans( @@ -187,6 +259,23 @@ def kmean_clustering_gas(etrago, network_ch4, weight, n_clusters): def get_h2_clusters(etrago, busmap_ch4): + """ + Maps H2 buses to CH4 cluster IDds and creates unique H2 cluster IDs. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + busmap_ch4 : pd.Series + A Pandas Series mapping each bus in the CH4 network to its + corresponding cluster ID. + + Returns + ------- + busmap : pd.Series + A Pandas Series mapping each bus in the combined CH4 and H2 network + to its corresponding cluster ID. + """ # Mapping of H2 buses to new CH4 cluster IDs busmap_h2 = pd.Series( busmap_ch4.loc[etrago.ch4_h2_mapping.index].values, @@ -205,9 +294,31 @@ def get_h2_clusters(etrago, busmap_ch4): def gas_postprocessing(etrago, busmap, medoid_idx=None): + """ + Performs the postprocessing for the gas grid clustering based on the + provided busmap + and returns the clustered network. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + busmap : pd.Series + A Pandas Series mapping each bus to its corresponding cluster ID. + medoid_idx : pd.Series + A pandas.Series object containing the medoid indices for the gas + network. + + Returns + ------- + network_gasgrid_c : pypsa.Network + A pypsa.Network containing the clustered network. + busmap : pd.Series + A Pandas Series mapping each bus to its corresponding cluster ID. + """ settings = etrago.args["network_clustering"] - if settings["k_gas_busmap"] == False: + if settings["k_gas_busmap"] is False: if settings["method_gas"] == "kmeans": busmap.index.name = "bus_id" busmap.name = "cluster" @@ -233,8 +344,9 @@ def gas_postprocessing(etrago, busmap, medoid_idx=None): + str(settings["n_clusters_gas"]) + "_result.csv" ) - - busmap = get_h2_clusters(etrago, busmap) + + if 'H2' in etrago.network.buses.carrier.unique(): + busmap = get_h2_clusters(etrago, busmap) # Add all other buses to busmap missing_idx = list( @@ -319,7 +431,8 @@ def gas_postprocessing(etrago, busmap, medoid_idx=None): # Overwrite p_nom of links with carrier "H2_feedin" (eGon2035 only) if etrago.args["scn_name"] == "eGon2035": - H2_energy_share = 0.05053 # H2 energy share via volumetric share outsourced in a mixture of H2 and CH4 with 15 %vol share + H2_energy_share = 0.05053 # H2 energy share via volumetric share + # outsourced in a mixture of H2 and CH4 with 15 %vol share feed_in = network_gasgrid_c.links.loc[ network_gasgrid_c.links.carrier == "H2_feedin" ] @@ -334,7 +447,8 @@ def gas_postprocessing(etrago, busmap, medoid_idx=None): | (pipeline_capacities["bus1"] == bus), "p_nom", ].sum() - # multiply total pipeline capacity with H2 energy share corresponding to volumetric share + # multiply total pipeline capacity with H2 energy share + # corresponding to volumetric share network_gasgrid_c.links.loc[ (network_gasgrid_c.links["bus1"].values == bus) & (network_gasgrid_c.links["carrier"].values == "H2_feedin"), @@ -408,12 +522,12 @@ def highestInteger(potentially_numbers): Parameters ---------- - potentially_numbers : pandas.core.series.Series + potentially_numbers : pandas.Series Series with mixed dtypes, potentially containing numbers. Returns ------- - int + highest : int Highest integer found in series. """ highest = 0 @@ -430,7 +544,8 @@ def highestInteger(potentially_numbers): def simultaneous_sector_coupling( network, busmap, carrier_based, carrier_to_cluster ): - """Cluster sector coupling technology based on multiple connected carriers. + """ + Cluster sector coupling technology based on multiple connected carriers. The topology of the sector coupling technology must be in a way, that the links connected to other sectors do only point inwards. E.g. for the heat @@ -527,7 +642,8 @@ def simultaneous_sector_coupling( def consecutive_sector_coupling( network, busmap, carrier_based, carrier_to_cluster ): - """Cluster sector coupling technology based on single connected carriers. + """ + Cluster sector coupling technology based on single connected carriers. The topology of the sector coupling technology must be in a way, that the links connected to other sectors do only point inwards. E.g. for the heat @@ -547,8 +663,8 @@ def consecutive_sector_coupling( Returns ------- - dict - Busmap for the sector coupling cluster. + busmap_sc : dict + Busmap for the sector coupled cluster. """ next_bus_id = highestInteger(busmap.values) + 1 buses_to_skip = network.buses[ @@ -639,7 +755,8 @@ def consecutive_sector_coupling( def sc_multi_carrier_based(buses_to_cluster, connected_links): - """Create busmap for sector coupled carrier based on multiple other carriers. + """ + Create busmap for sector coupled carrier based on multiple other carriers. Parameters ---------- @@ -652,8 +769,8 @@ def sc_multi_carrier_based(buses_to_cluster, connected_links): Returns ------- - dict - Busmap for the sector cupled carrier. + busmap : dict + Busmap for the sector coupled carrier. """ clusters = pd.Series() for bus_id in buses_to_cluster.index: @@ -676,7 +793,8 @@ def sc_multi_carrier_based(buses_to_cluster, connected_links): def sc_single_carrier_based(connected_links): - """Create busmap for sector coupled carrier based on single other carrier. + """ + Create busmap for sector coupled carrier based on single other carrier. Parameters ---------- @@ -686,8 +804,8 @@ def sc_single_carrier_based(connected_links): Returns ------- - dict - Busmap for the sector cupled carrier. + busmap : dict + Busmap for the sector coupled carrier. """ busmap = {} clusters = connected_links["bus0_clustered"].unique() @@ -707,6 +825,35 @@ def get_clustering_from_busmap( bus_strategies=dict(), one_port_strategies=dict(), ): + """ + Aggregates components of the given network based on a bus mapping and + returns a clustered gas grid pypsa.Network. + + Parameters + ---------- + network : pypsa.Network + The input pypsa.Network object + busmap : pandas.Sereies : + A mapping of buses to clusters + line_length_factor : float + A factor used to adjust the length of new links created during + aggregation. Default is 1.0. + with_time : bool + Determines whether to copy the time-dependent properties of the input + network to the output network. Default is True. + bus_strategies : dict + A dictionary of custom strategies to use during the aggregation step. + Default is an empty dictionary. + one_port_strategies : dict + A dictionary of custom strategies to use during the one-port component + aggregation step. Default is an empty dictionary. + + Returns + ------- + network_gasgrid_c : pypsa.Network + A new gas grid pypsa.Network object with aggregated components based + on the bus mapping. + """ network_gasgrid_c = Network() # Aggregate buses @@ -779,74 +926,90 @@ def get_clustering_from_busmap( def run_spatial_clustering_gas(self): - settings = self.args["network_clustering"] - - if settings["active"]: - self.network.generators.control = "PV" - method = settings["method_gas"] - logger.info(f"Start {method} clustering GAS") - - gas_network, weight, n_clusters = preprocessing(self) - - if method == "kmeans": - if settings["k_gas_busmap"]: - busmap = pd.read_csv( - settings["k_gas_busmap"], - index_col="bus_id", - dtype=pd.StringDtype(), - ).squeeze() - medoid_idx = None - else: - busmap, medoid_idx = kmean_clustering_gas( - self, gas_network, weight, n_clusters - ) + """ + Performs spatial clustering on the gas network using either K-means or + K-medoids-Dijkstra algorithm. Updates the network topology by aggregating + buses and links, and then performs postprocessing to finalize the changes. - elif method == "kmedoids-dijkstra": - if settings["k_gas_busmap"]: - busmap = pd.read_csv( - settings["k_gas_busmap"], - index_col="bus_id", - dtype=pd.StringDtype(), - ) - medoid_idx = pd.Series( - busmap["medoid_idx"].unique(), - index=busmap["cluster"].unique(), - dtype=pd.StringDtype(), - ) - busmap = busmap["cluster"] + Returns + -------- + None + + Raises + ------- + ValueError: If the selected method is not "kmeans" or "kmedoids-dijkstra". + + """ + if "CH4" in self.network.buses.carrier.values: + settings = self.args["network_clustering"] + + if settings["active"]: + self.network.generators.control = "PV" + method = settings["method_gas"] + logger.info(f"Start {method} clustering GAS") + + gas_network, weight, n_clusters = preprocessing(self) + + if method == "kmeans": + if settings["k_gas_busmap"]: + busmap = pd.read_csv( + settings["k_gas_busmap"], + index_col="bus_id", + dtype=pd.StringDtype(), + ).squeeze() + medoid_idx = None + else: + busmap, medoid_idx = kmean_clustering_gas( + self, gas_network, weight, n_clusters + ) + + elif method == "kmedoids-dijkstra": + if settings["k_gas_busmap"]: + busmap = pd.read_csv( + settings["k_gas_busmap"], + index_col="bus_id", + dtype=pd.StringDtype(), + ) + medoid_idx = pd.Series( + busmap["medoid_idx"].unique(), + index=busmap["cluster"].unique(), + dtype=pd.StringDtype(), + ) + busmap = busmap["cluster"] + + else: + busmap, medoid_idx = kmedoids_dijkstra_clustering( + self, + gas_network.buses, + gas_network.links, + weight, + n_clusters, + ) else: - busmap, medoid_idx = kmedoids_dijkstra_clustering( - self, - gas_network.buses, - gas_network.links, - weight, - n_clusters, + msg = ( + 'Please select "kmeans" or "kmedoids-dijkstra" as ' + "spatial clustering method for the gas network" + ) + raise ValueError(msg) + self.network, busmap = gas_postprocessing(self, busmap, medoid_idx) + + self.update_busmap(busmap) + + logger.info( + "GAS Network clustered to {} DE-buses and {} foreign buses with {} algorithm.".format( + len( + self.network.buses.loc[ + (self.network.buses.carrier == "CH4") + & (self.network.buses.country == "DE") + ] + ), + len( + self.network.buses.loc[ + (self.network.buses.carrier == "CH4") + & (self.network.buses.country != "DE") + ] + ), + method, ) - - else: - msg = ( - 'Please select "kmeans" or "kmedoids-dijkstra" as ' - "spatial clustering method for the gas network" - ) - raise ValueError(msg) - self.network, busmap = gas_postprocessing(self, busmap, medoid_idx) - - self.update_busmap(busmap) - logger.info( - "GAS Network clustered to {} DE-buses and {} foreign buses with {} algorithm.".format( - len( - self.network.buses.loc[ - (self.network.buses.carrier == "CH4") - & (self.network.buses.country == "DE") - ] - ), - len( - self.network.buses.loc[ - (self.network.buses.carrier == "CH4") - & (self.network.buses.country != "DE") - ] - ), - method, ) - ) diff --git a/etrago/cluster/snapshot.py b/etrago/cluster/snapshot.py index 3d8f7a215..fbbd1f68e 100644 --- a/etrago/cluster/snapshot.py +++ b/etrago/cluster/snapshot.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems @@ -18,116 +18,140 @@ # along with this program. If not, see . # File description for read-the-docs -""" This module contains functions for calculating representative days/weeks -based on a pyPSA network object. It is designed to be used for the `lopf` -method. Essentially the tsam package -( https://github.com/FZJ-IEK3-VSA/tsam ), which is developed by -Leander Kotzur is used. - -Remaining questions/tasks: - -- Does it makes sense to cluster normed values? -- Include scaling method for yearly sums +""" This module contains functions for reducing the complexity of a PyPSA network in temporal dimension by +a) downsampling to every n-th snapshot +b) clustering to typical periods (eg days, weeks) +c) clustering to segments of variable length +Essentially used is the tsam package +( https://github.com/FZJ-IEK3-VSA/tsam ) developed by Leander Kotzur et al. """ import pandas as pd import os -if 'READTHEDOCS' not in os.environ: + +if "READTHEDOCS" not in os.environ: import pyomo.environ as po import tsam.timeseriesaggregation as tsam -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems") +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "Simon Hilpert" +__author__ = "ClaraBuettner, ulfmueller, KathiEsterl, simnh, wheitkoetter, BartelsJ, AmeliaNadal" def snapshot_clustering(self): """ + Function to call the snapshot clustering function with the respecting method and settings. + + Raises + ------ + ValueError + When calling a non-available function. + + Returns + ------- + None. + """ - if self.args['snapshot_clustering']['active'] == True: + if self.args["snapshot_clustering"]["active"] == True: # save second network for optional dispatch disaggregation - if self.args["dispatch_disaggregation"] == True: + if self.args["temporal_disaggregation"]["active"] == True: self.network_tsa = self.network.copy() - if self.args['snapshot_clustering']['method'] == 'segmentation' : - - self.network = run(network=self.network.copy(), - n_clusters=1, - segmented_to = self.args['snapshot_clustering']['n_segments'], - extreme_periods = self.args['snapshot_clustering']['extreme_periods'] - ) - - elif self.args['snapshot_clustering']['method'] == 'typical_periods' : - - self.network = run(network=self.network.copy(), - n_clusters=self.args['snapshot_clustering']['n_clusters'], - how=self.args['snapshot_clustering']['how'], - extreme_periods = self.args['snapshot_clustering']['extreme_periods'] - ) - else : - raise ValueError("Type of clustering should be 'typical_periods' or 'segmentation'") - -def tsam_cluster(timeseries_df, - typical_periods=10, - how='daily', - extremePeriodMethod = 'None', - segmentation = False, - segment_no = 10, - segm_hoursperperiod = 24): + if self.args["snapshot_clustering"]["method"] == "segmentation": + self.network = run( + network=self.network.copy(), + n_clusters=1, + segmented_to=self.args["snapshot_clustering"]["n_segments"], + extreme_periods=self.args["snapshot_clustering"][ + "extreme_periods" + ], + ) + + elif self.args["snapshot_clustering"]["method"] == "typical_periods": + self.network = run( + network=self.network.copy(), + n_clusters=self.args["snapshot_clustering"]["n_clusters"], + how=self.args["snapshot_clustering"]["how"], + extreme_periods=self.args["snapshot_clustering"][ + "extreme_periods" + ], + ) + else: + raise ValueError( + "Type of clustering should be 'typical_periods' or 'segmentation'" + ) + + +def tsam_cluster( + timeseries_df, + typical_periods=10, + how="daily", + extremePeriodMethod="None", + segmentation=False, + segment_no=10, + segm_hoursperperiod=24, +): """ + Conducts the clustering of the snapshots for temporal aggregation with the respecting method. + Parameters ---------- - df : pd.DataFrame - DataFrame with timeseries to cluster - typical_periods: Number of typical Periods (or clusters) - how: {'daily', 'weekly'} - extremePeriodMethod: {'None','append','new_cluster_center', - 'replace_cluster_center'}, default: 'None' - Method how to integrate extreme Periods into to the typical period profiles. - 'None': No integration at all. - 'append': append typical Periods to cluster centers - 'new_cluster_center': add the extreme period as additional cluster - center. It is checked then for all Periods if they fit better - to the this new center or their original cluster center. - 'replace_cluster_center': replaces the cluster center of the - cluster where the extreme period belongs to with the periodly - profile of the extreme period. (Worst case system design) - segmentation: Is given by the run-function, can be True or False - segment_no: Only used when segmentation is true, the number of segments - segm_hoursperperiod: Only used when segmentation is true, defines the length of a cluster period + timeseries_df : pd.DataFrame + Dataframe wit timeseries to cluster. + typical_periods : int, optional + Number of clusters for typical_periods. The default is 10. + how : {'daily', 'weekly', 'monthly'}, optional + Definition of period for typical_periods. The default is 'daily'. + extremePeriodMethod : {'None','append','new_cluster_center', 'replace_cluster_center'}, optional + Method to consider extreme snapshots in reduced timeseries. The default is 'None'. + segmentation : boolean, optional + Argument to activate segmenation method. The default is False. + segment_no : int, optional + Number of segments for segmentation. The default is 10. + segm_hoursperperiod : int, optional + Only for segmentation, ensures to cluster to segments considering all snapshots. The default is 24. Returns ------- - df_cluster - cluster_weights - dates - hours - df_i_h + df_cluster : pd.DataFrame + Information on cluster after clustering to typical periods. + cluster_weights : dict + Weightings per cluster after clustering to typical periods. + dates : DatetimeIndex + Dates of clusters after clustering to typical periods. + hours : int + Hours per typical period. + df_i_h : pd.DataFrame + Information on cluster after clustering to typical periods. timeseries : pd.DataFrame - Clustered timeseries, only used if segmentation is True + Information on segments after segmentation. + """ - if how == 'daily': + if how == "daily": hours = 24 - period = ' days' + period = " days" - elif how == 'weekly': + elif how == "weekly": hours = 168 - period = ' weeks' + period = " weeks" - elif how == 'monthly': + elif how == "monthly": hours = 720 - period = ' months' + period = " months" - elif how == 'hourly': + elif how == "hourly": hours = 1 - period = ' hours' + period = " hours" if segmentation: hoursPerPeriod = segm_hoursperperiod + hours = 1 else: hoursPerPeriod = hours @@ -135,81 +159,96 @@ def tsam_cluster(timeseries_df, # residual load should not impact cluster findings, # but only be the optional parameter to choose an extreme period weight = pd.Series(data=1, index=timeseries_df.columns) - weight['residual_load'] = 0 + weight["residual_load"] = 0 weight = weight.to_dict() aggregation = tsam.TimeSeriesAggregation( timeseries_df, noTypicalPeriods=typical_periods, - extremePeriodMethod = extremePeriodMethod, - addPeakMin = ['residual_load'], - addPeakMax = ['residual_load'], + extremePeriodMethod=extremePeriodMethod, + addPeakMin=["residual_load"], + addPeakMax=["residual_load"], rescaleClusterPeriods=False, hoursPerPeriod=hoursPerPeriod, - clusterMethod='hierarchical', - segmentation = segmentation, - noSegments = segment_no, - weightDict = weight) + clusterMethod="hierarchical", + segmentation=segmentation, + noSegments=segment_no, + weightDict=weight, + ) if segmentation: - print('Snapshot clustering to ' + str(segment_no) + ' segments' + '\n' + - 'Using extreme period method: ' + extremePeriodMethod) + print( + "Snapshot clustering to " + + str(segment_no) + + " segments" + + "\n" + + "Using extreme period method: " + + extremePeriodMethod + ) else: - print('Snapshot clustering to ' + str(typical_periods) + period + '\n' + - 'Using extreme period method: ' + extremePeriodMethod) + print( + "Snapshot clustering to " + + str(typical_periods) + + period + + "\n" + + "Using extreme period method: " + + extremePeriodMethod + ) timeseries_creator = aggregation.createTypicalPeriods() timeseries = timeseries_creator.copy() - #If Segmentation is True, insert 'Dates' and 'SegmentNo' column in timeseries + # If Segmentation is True, insert 'Dates' and 'SegmentNo' column in timeseries if segmentation == True: - - weights=timeseries.index.get_level_values(2) - dates_df= timeseries_df.index.get_level_values(0) - dates=[] - segmentno=[] - wcount=0 - count=0 + weights = timeseries.index.get_level_values(2) + dates_df = timeseries_df.index.get_level_values(0) + dates = [] + segmentno = [] + wcount = 0 + count = 0 for weight in weights: dates.append(dates_df[wcount]) wcount = wcount + weight segmentno.append(count) - count = count +1 + count = count + 1 timeseries.insert(0, "dates", dates, True) timeseries.insert(1, "SegmentNo", segmentno, True) timeseries.insert(2, "SegmentDuration", weights, True) - timeseries.set_index(['dates', 'SegmentNo', 'SegmentDuration'], inplace=True) + timeseries.set_index( + ["dates", "SegmentNo", "SegmentDuration"], inplace=True + ) - if 'Unnamed: 0' in timeseries.columns: - del timeseries['Unnamed: 0'] - if 'Segment Step' in timeseries.columns: - del timeseries['Segment Step'] - #print(timeseries) + if "Unnamed: 0" in timeseries.columns: + del timeseries["Unnamed: 0"] + if "Segment Step" in timeseries.columns: + del timeseries["Segment Step"] + # print(timeseries) cluster_weights = aggregation.clusterPeriodNoOccur - clusterOrder =aggregation.clusterOrder - clusterCenterIndices= aggregation.clusterCenterIndices + clusterOrder = aggregation.clusterOrder + clusterCenterIndices = aggregation.clusterCenterIndices if segmentation == True: - - if extremePeriodMethod != 'None': - - timeseries = segmentation_extreme_periods(timeseries_df, timeseries, extremePeriodMethod) + if extremePeriodMethod != "None": + timeseries = segmentation_extreme_periods( + timeseries_df, timeseries, extremePeriodMethod + ) else: - - if extremePeriodMethod == 'new_cluster_center': + if extremePeriodMethod == "new_cluster_center": for i in aggregation.extremePeriods.keys(): clusterCenterIndices.insert( - aggregation.extremePeriods[i]['newClusterNo'], - aggregation.extremePeriods[i]['stepNo']) + aggregation.extremePeriods[i]["newClusterNo"], + aggregation.extremePeriods[i]["stepNo"], + ) - if extremePeriodMethod == 'append': + if extremePeriodMethod == "append": for i in aggregation.extremePeriods.keys(): clusterCenterIndices.insert( - aggregation.extremePeriods[i]['clusterNo'], - aggregation.extremePeriods[i]['stepNo']) + aggregation.extremePeriods[i]["clusterNo"], + aggregation.extremePeriods[i]["stepNo"], + ) # get all index for every hour of that day of the clusterCenterIndices start = [] @@ -230,218 +269,357 @@ def tsam_cluster(timeseries_df, # get the origial Datetimeindex dates = timeseries_df.iloc[nrhours].index - #get list of representative days - representative_day=[] + # get list of representative days + representative_day = [] - #cluster:medoid des jeweiligen Clusters + # cluster:medoid des jeweiligen Clusters dic_clusterCenterIndices = dict(enumerate(clusterCenterIndices)) for i in clusterOrder: representative_day.append(dic_clusterCenterIndices[i]) - #get list of last and first hour of representative days - last_hour_datetime=[] + # get list of last and first hour of representative days + last_hour_datetime = [] for i in representative_day: last_hour = i * hours + hours - 1 last_hour_datetime.append(timeseries_df.index[last_hour]) - #create a dataframe (index=nr. of day in a year/candidate) - df_cluster = pd.DataFrame({ - 'Cluster': clusterOrder, #Cluster of the day - 'RepresentativeDay': representative_day, #representative day of the cluster - 'last_hour_RepresentativeDay': last_hour_datetime}) #last hour of the cluster + # create a dataframe (index=nr. of day in a year/candidate) + df_cluster = pd.DataFrame( + { + "Cluster": clusterOrder, # Cluster of the day + "RepresentativeDay": representative_day, # representative day of the cluster + "last_hour_RepresentativeDay": last_hour_datetime, + } + ) # last hour of the cluster df_cluster.index = df_cluster.index + 1 - df_cluster.index.name = 'Candidate' + df_cluster.index.name = "Candidate" - #create a dataframe each timeseries (h) and its candiddate day (i) df_i_h + # create a dataframe each timeseries (h) and its candiddate day (i) df_i_h nr_day = [] - x = len(timeseries_df.index)/hours+1 + x = len(timeseries_df.index) / hours + 1 - for i in range(1,int(x)): - j=1 + for i in range(1, int(x)): + j = 1 while j <= hours: nr_day.append(i) - j=j+1 - df_i_h = pd.DataFrame({'Timeseries': timeseries_df.index, - 'Candidate_day': nr_day}) - df_i_h.set_index('Timeseries',inplace=True) + j = j + 1 + df_i_h = pd.DataFrame( + {"Timeseries": timeseries_df.index, "Candidate_day": nr_day} + ) + df_i_h.set_index("Timeseries", inplace=True) return df_cluster, cluster_weights, dates, hours, df_i_h, timeseries -def segmentation_extreme_periods(timeseries_df, timeseries, extremePeriodMethod): - - # find maximum / minimum value in residual load - maxi = timeseries_df['residual_load'].idxmax() - mini = timeseries_df['residual_load'].idxmin() - # add timestep if it is not already calculated - if maxi not in timeseries.index.get_level_values('dates'): +def segmentation_extreme_periods( + timeseries_df, timeseries, extremePeriodMethod +): + """ + Function to consider extreme snapshots while using segmentation. - # identifiy timestep, adapt it to timeseries-df and add it - max_val = timeseries_df.loc[maxi].copy() - max_val['SegmentNo'] = len(timeseries) - max_val['SegmentDuration'] = 1 - max_val['dates'] = max_val.name - max_val = pd.DataFrame(max_val).transpose() + Parameters + ---------- + timeseries_df : pd.DataFrame + Dataframe wit timeseries to cluster. + timeseries : pd.DataFrame + Information on segments after segmentation. + extremePeriodMethod : {'None','append','new_cluster_center', 'replace_cluster_center'}, optional + Method to consider extreme snapshots in reduced timeseries. The default is 'None'. - if extremePeriodMethod == 'append': + Raises + ------ + ValueError + When calling wrong method to consider extreme values. - max_val.set_index(['dates', 'SegmentNo', 'SegmentDuration'],inplace=True) - timeseries = timeseries.append(max_val) - timeseries = timeseries.sort_values(by='dates') + Returns + ------- + timeseries : pd.DataFrame + Information on segments including extreme snapshots after segmentation. + """ - # split up segment in which the extreme timestep was added - i=-1 - for date in timeseries.index.get_level_values('dates'): - if date < maxi: - i = i+1 - else: - timeseries['SegmentDuration_Extreme']=timeseries.index.get_level_values('SegmentDuration') - old_row = timeseries.iloc[i].copy() - old_row = pd.DataFrame(old_row).transpose() - - delta_t = timeseries.index.get_level_values('dates')[i+1]-timeseries.index.get_level_values('dates')[i] - delta_t = delta_t.total_seconds()/3600 - timeseries['SegmentDuration_Extreme'].iloc[i]=delta_t - - timeseries_df['row_no']=range(0,len(timeseries_df)) - new_row = int(timeseries_df.loc[maxi]['row_no'])+1 - new_date = timeseries_df[timeseries_df.row_no==new_row].index - - if new_date.isin(timeseries.index.get_level_values('dates')): - timeseries['dates'] = timeseries.index.get_level_values('dates') - timeseries['SegmentNo'] = timeseries.index.get_level_values('SegmentNo') - timeseries['SegmentDuration'] = timeseries['SegmentDuration_Extreme'] - timeseries.drop('SegmentDuration_Extreme', axis=1, inplace=True) - timeseries.set_index(['dates', 'SegmentNo', 'SegmentDuration'],inplace=True) - break - else: - new_row = timeseries_df.iloc[new_row].copy() - new_row.drop('row_no', inplace=True) - new_row['SegmentNo'] = len(timeseries) - new_row['SegmentDuration'] = old_row['SegmentDuration_Extreme'][0] - delta_t - 1 - new_row['dates'] = new_row.name - new_row = pd.DataFrame(new_row).transpose() - new_row.set_index(['dates', 'SegmentNo', 'SegmentDuration'],inplace=True) - for col in new_row.columns: - new_row[col][0] = old_row[col][0] - - timeseries['dates'] = timeseries.index.get_level_values('dates') - timeseries['SegmentNo'] = timeseries.index.get_level_values('SegmentNo') - timeseries['SegmentDuration'] = timeseries['SegmentDuration_Extreme'] - timeseries.drop('SegmentDuration_Extreme', axis=1, inplace=True) - timeseries.set_index(['dates', 'SegmentNo', 'SegmentDuration'],inplace=True) - timeseries = timeseries.append(new_row) - timeseries = timeseries.sort_values(by='dates') - break - - elif extremePeriodMethod == 'replace_cluster_center': - - # replace segment in which the extreme timestep was added - i=-1 - for date in timeseries.index.get_level_values('dates'): - if date < maxi: - i = i+1 - else: - if i ==-1: - i=0 - max_val['SegmentDuration'] = timeseries.index.get_level_values('SegmentDuration')[i] - max_val.set_index(['dates', 'SegmentNo', 'SegmentDuration'],inplace=True) - timeseries.drop(timeseries.index[i], inplace=True) - timeseries = timeseries.append(max_val) - timeseries = timeseries.sort_values(by='dates') + # find maximum / minimum value in residual load + maxi = timeseries_df["residual_load"].idxmax() + mini = timeseries_df["residual_load"].idxmin() + + # add timestep if it is not already calculated + if maxi not in timeseries.index.get_level_values("dates"): + # identifiy timestep, adapt it to timeseries-df and add it + max_val = timeseries_df.loc[maxi].copy() + max_val["SegmentNo"] = len(timeseries) + max_val["SegmentDuration"] = 1 + max_val["dates"] = max_val.name + max_val = pd.DataFrame(max_val).transpose() + + if extremePeriodMethod == "append": + max_val.set_index( + ["dates", "SegmentNo", "SegmentDuration"], inplace=True + ) + timeseries = timeseries.append(max_val) + timeseries = timeseries.sort_values(by="dates") + + # split up segment in which the extreme timestep was added + i = -1 + for date in timeseries.index.get_level_values("dates"): + if date < maxi: + i = i + 1 + else: + timeseries[ + "SegmentDuration_Extreme" + ] = timeseries.index.get_level_values("SegmentDuration") + old_row = timeseries.iloc[i].copy() + old_row = pd.DataFrame(old_row).transpose() + + delta_t = ( + timeseries.index.get_level_values("dates")[i + 1] + - timeseries.index.get_level_values("dates")[i] + ) + delta_t = delta_t.total_seconds() / 3600 + timeseries["SegmentDuration_Extreme"].iloc[i] = delta_t + + timeseries_df["row_no"] = range(0, len(timeseries_df)) + new_row = int(timeseries_df.loc[maxi]["row_no"]) + 1 + new_date = timeseries_df[ + timeseries_df.row_no == new_row + ].index + + if new_date.isin( + timeseries.index.get_level_values("dates") + ): + timeseries[ + "dates" + ] = timeseries.index.get_level_values("dates") + timeseries[ + "SegmentNo" + ] = timeseries.index.get_level_values("SegmentNo") + timeseries["SegmentDuration"] = timeseries[ + "SegmentDuration_Extreme" + ] + timeseries.drop( + "SegmentDuration_Extreme", axis=1, inplace=True + ) + timeseries.set_index( + ["dates", "SegmentNo", "SegmentDuration"], + inplace=True, + ) break - - else: - raise ValueError("Choose 'append' or 'replace_cluster_center' for consideration of extreme periods with segmentation method") - - # add timestep if it is not already calculated - if mini not in timeseries.index.get_level_values('dates'): - - # identifiy timestep, adapt it to timeseries-df and add it - min_val = timeseries_df.loc[mini].copy() - min_val['SegmentNo'] = len(timeseries)+1 - min_val['SegmentDuration'] = 1 - min_val['dates'] = min_val.name - min_val = pd.DataFrame(min_val).transpose() - - if extremePeriodMethod == 'append': - - min_val.set_index(['dates', 'SegmentNo', 'SegmentDuration'],inplace=True) - timeseries = timeseries.append(min_val) - timeseries = timeseries.sort_values(by='dates') - - # split up segment in which the extreme timestep was added - i=-1 - for date in timeseries.index.get_level_values('dates'): - if date < mini: - i = i+1 else: - timeseries['SegmentDuration_Extreme']=timeseries.index.get_level_values('SegmentDuration') - old_row = timeseries.iloc[i].copy() - old_row = pd.DataFrame(old_row).transpose() - - delta_t = timeseries.index.get_level_values('dates')[i+1]-timeseries.index.get_level_values('dates')[i] - delta_t = delta_t.total_seconds()/3600 - timeseries['SegmentDuration_Extreme'].iloc[i]=delta_t - - timeseries_df['row_no']=range(0,len(timeseries_df)) - new_row = int(timeseries_df.loc[mini]['row_no'])+1 - new_date = timeseries_df[timeseries_df.row_no==new_row].index - - if new_date.isin(timeseries.index.get_level_values('dates')): - timeseries['dates'] = timeseries.index.get_level_values('dates') - timeseries['SegmentNo'] = timeseries.index.get_level_values('SegmentNo') - timeseries['SegmentDuration'] = timeseries['SegmentDuration_Extreme'] - timeseries.drop('SegmentDuration_Extreme', axis=1, inplace=True) - timeseries.set_index(['dates', 'SegmentNo', 'SegmentDuration'],inplace=True) - break - else: - new_row = timeseries_df.iloc[new_row].copy() - new_row.drop('row_no', inplace=True) - new_row['SegmentNo'] = len(timeseries)+1 - new_row['SegmentDuration'] = old_row['SegmentDuration_Extreme'][0] - delta_t - 1 - new_row['dates'] = new_row.name - new_row = pd.DataFrame(new_row).transpose() - new_row.set_index(['dates', 'SegmentNo', 'SegmentDuration'],inplace=True) - for col in new_row.columns: - new_row[col][0] = old_row[col][0] - timeseries['dates'] = timeseries.index.get_level_values('dates') - timeseries['SegmentNo'] = timeseries.index.get_level_values('SegmentNo') - timeseries['SegmentDuration'] = timeseries['SegmentDuration_Extreme'] - timeseries.drop('SegmentDuration_Extreme', axis=1, inplace=True) - timeseries.set_index(['dates', 'SegmentNo', 'SegmentDuration'],inplace=True) - timeseries = timeseries.append(new_row) - timeseries = timeseries.sort_values(by='dates') + new_row = timeseries_df.iloc[new_row].copy() + new_row.drop("row_no", inplace=True) + new_row["SegmentNo"] = len(timeseries) + new_row["SegmentDuration"] = ( + old_row["SegmentDuration_Extreme"][0] - delta_t - 1 + ) + new_row["dates"] = new_row.name + new_row = pd.DataFrame(new_row).transpose() + new_row.set_index( + ["dates", "SegmentNo", "SegmentDuration"], + inplace=True, + ) + for col in new_row.columns: + new_row[col][0] = old_row[col][0] + + timeseries[ + "dates" + ] = timeseries.index.get_level_values("dates") + timeseries[ + "SegmentNo" + ] = timeseries.index.get_level_values("SegmentNo") + timeseries["SegmentDuration"] = timeseries[ + "SegmentDuration_Extreme" + ] + timeseries.drop( + "SegmentDuration_Extreme", axis=1, inplace=True + ) + timeseries.set_index( + ["dates", "SegmentNo", "SegmentDuration"], + inplace=True, + ) + timeseries = timeseries.append(new_row) + timeseries = timeseries.sort_values(by="dates") break - elif extremePeriodMethod == 'replace_cluster_center': - - # replace segment in which the extreme timestep was added - i=-1 - for date in timeseries.index.get_level_values('dates'): - if date < mini: - i = i+1 - else: - if i ==-1: - i=0 - min_val['SegmentDuration'] = timeseries.index.get_level_values('SegmentDuration')[i] - min_val.set_index(['dates', 'SegmentNo', 'SegmentDuration'],inplace=True) - timeseries.drop(timeseries.index[i], inplace=True) - timeseries = timeseries.append(min_val) - timeseries = timeseries.sort_values(by='dates') + elif extremePeriodMethod == "replace_cluster_center": + # replace segment in which the extreme timestep was added + i = -1 + for date in timeseries.index.get_level_values("dates"): + if date < maxi: + i = i + 1 + else: + if i == -1: + i = 0 + max_val[ + "SegmentDuration" + ] = timeseries.index.get_level_values("SegmentDuration")[i] + max_val.set_index( + ["dates", "SegmentNo", "SegmentDuration"], inplace=True + ) + timeseries.drop(timeseries.index[i], inplace=True) + timeseries = timeseries.append(max_val) + timeseries = timeseries.sort_values(by="dates") + break + + else: + raise ValueError( + "Choose 'append' or 'replace_cluster_center' for consideration of extreme periods with segmentation method" + ) + + # add timestep if it is not already calculated + if mini not in timeseries.index.get_level_values("dates"): + # identifiy timestep, adapt it to timeseries-df and add it + min_val = timeseries_df.loc[mini].copy() + min_val["SegmentNo"] = len(timeseries) + 1 + min_val["SegmentDuration"] = 1 + min_val["dates"] = min_val.name + min_val = pd.DataFrame(min_val).transpose() + + if extremePeriodMethod == "append": + min_val.set_index( + ["dates", "SegmentNo", "SegmentDuration"], inplace=True + ) + timeseries = timeseries.append(min_val) + timeseries = timeseries.sort_values(by="dates") + + # split up segment in which the extreme timestep was added + i = -1 + for date in timeseries.index.get_level_values("dates"): + if date < mini: + i = i + 1 + else: + timeseries[ + "SegmentDuration_Extreme" + ] = timeseries.index.get_level_values("SegmentDuration") + old_row = timeseries.iloc[i].copy() + old_row = pd.DataFrame(old_row).transpose() + + delta_t = ( + timeseries.index.get_level_values("dates")[i + 1] + - timeseries.index.get_level_values("dates")[i] + ) + delta_t = delta_t.total_seconds() / 3600 + timeseries["SegmentDuration_Extreme"].iloc[i] = delta_t + + timeseries_df["row_no"] = range(0, len(timeseries_df)) + new_row = int(timeseries_df.loc[mini]["row_no"]) + 1 + new_date = timeseries_df[ + timeseries_df.row_no == new_row + ].index + + if new_date.isin( + timeseries.index.get_level_values("dates") + ): + timeseries[ + "dates" + ] = timeseries.index.get_level_values("dates") + timeseries[ + "SegmentNo" + ] = timeseries.index.get_level_values("SegmentNo") + timeseries["SegmentDuration"] = timeseries[ + "SegmentDuration_Extreme" + ] + timeseries.drop( + "SegmentDuration_Extreme", axis=1, inplace=True + ) + timeseries.set_index( + ["dates", "SegmentNo", "SegmentDuration"], + inplace=True, + ) break + else: + new_row = timeseries_df.iloc[new_row].copy() + new_row.drop("row_no", inplace=True) + new_row["SegmentNo"] = len(timeseries) + 1 + new_row["SegmentDuration"] = ( + old_row["SegmentDuration_Extreme"][0] - delta_t - 1 + ) + new_row["dates"] = new_row.name + new_row = pd.DataFrame(new_row).transpose() + new_row.set_index( + ["dates", "SegmentNo", "SegmentDuration"], + inplace=True, + ) + for col in new_row.columns: + new_row[col][0] = old_row[col][0] + timeseries[ + "dates" + ] = timeseries.index.get_level_values("dates") + timeseries[ + "SegmentNo" + ] = timeseries.index.get_level_values("SegmentNo") + timeseries["SegmentDuration"] = timeseries[ + "SegmentDuration_Extreme" + ] + timeseries.drop( + "SegmentDuration_Extreme", axis=1, inplace=True + ) + timeseries.set_index( + ["dates", "SegmentNo", "SegmentDuration"], + inplace=True, + ) + timeseries = timeseries.append(new_row) + timeseries = timeseries.sort_values(by="dates") + break + + elif extremePeriodMethod == "replace_cluster_center": + # replace segment in which the extreme timestep was added + i = -1 + for date in timeseries.index.get_level_values("dates"): + if date < mini: + i = i + 1 + else: + if i == -1: + i = 0 + min_val[ + "SegmentDuration" + ] = timeseries.index.get_level_values("SegmentDuration")[i] + min_val.set_index( + ["dates", "SegmentNo", "SegmentDuration"], inplace=True + ) + timeseries.drop(timeseries.index[i], inplace=True) + timeseries = timeseries.append(min_val) + timeseries = timeseries.sort_values(by="dates") + break + + else: + raise ValueError( + "Choose 'append' or 'replace_cluster_center' for consideration of extreme periods with segmentation method" + ) + + if "row_no" in timeseries.columns: + timeseries.drop("row_no", axis=1, inplace=True) + + return timeseries + + +def run( + network, + n_clusters=None, + how="daily", + segmented_to=False, + extreme_periods="None", +): + """ + Function to call the respecting snapshot clustering function and export the result to a csv-file. - else: - raise ValueError("Choose 'append' or 'replace_cluster_center' for consideration of extreme periods with segmentation method") - - if 'row_no' in timeseries.columns: - timeseries.drop('row_no', axis=1, inplace=True) + Parameters + ---------- + network : pypsa.Network object + Container for all network components. + n_clusters : int, optional + Number of clusters for typical_periods. The default is None. + how : {'daily', 'weekly', 'monthly'}, optional + Definition of period for typical_periods. The default is 'daily'. + segmented_to : int, optional + Number of segments for segmentation. The default is False. + extremePeriodMethod : {'None','append','new_cluster_center', 'replace_cluster_center'}, optional + Method to consider extreme snapshots in reduced timeseries. The default is 'None'. - return timeseries + Returns + ------- + network : pypsa.Network object + Container for all network components. -def run(network, n_clusters=None, how='daily', segmented_to=False, extreme_periods='None'): - """ """ + if segmented_to is not False: segment_no = segmented_to segmentation = True @@ -451,92 +629,136 @@ def run(network, n_clusters=None, how='daily', segmented_to=False, extreme_perio segmentation = False if not extreme_periods: - extreme_periods = 'None' + extreme_periods = "None" # calculate clusters - df_cluster, cluster_weights, dates, hours, df_i_h, timeseries = tsam_cluster( - prepare_pypsa_timeseries(network), - typical_periods = n_clusters, - how='daily', - extremePeriodMethod = extreme_periods, - segmentation = segmentation, - segment_no = segment_no, - segm_hoursperperiod = network.snapshots.size) + ( + df_cluster, + cluster_weights, + dates, + hours, + df_i_h, + timeseries, + ) = tsam_cluster( + prepare_pypsa_timeseries(network), + typical_periods=n_clusters, + how="daily", + extremePeriodMethod=extreme_periods, + segmentation=segmentation, + segment_no=segment_no, + segm_hoursperperiod=network.snapshots.size, + ) if segmentation != False: - timeseries.to_csv('timeseries_segmentation=' + str(segment_no) + '.csv') + pd.DataFrame( + timeseries.reset_index(), + columns=["dates", "SegmentNo", "SegmentDuration"], + ).set_index("SegmentNo").to_csv( + "timeseries_segmentation=" + str(segment_no) + ".csv" + ) else: - if how=='daily': - howie='days' - path='typical_days' - elif how=='weekly': - howie='weeks' - path='typical_weeks' - elif how=='monthly': - howie='months' - path='typical_months' - elif how=='hourly': - howie='hours' - path='typical_hours' - df_cluster.to_csv('cluster_typical-periods=' + str(n_clusters) + howie + '.csv') + if how == "daily": + howie = "days" + path = "typical_days" + elif how == "weekly": + howie = "weeks" + path = "typical_weeks" + elif how == "monthly": + howie = "months" + path = "typical_months" + elif how == "hourly": + howie = "hours" + path = "typical_hours" + df_cluster.to_csv( + "cluster_typical-periods=" + str(n_clusters) + howie + ".csv" + ) network.cluster = df_cluster network.cluster_ts = df_i_h - update_data_frames(network, cluster_weights, dates, hours, timeseries, segmentation) + update_data_frames( + network, cluster_weights, dates, hours, timeseries, segmentation + ) return network -def prepare_pypsa_timeseries(network, normed=False): +def prepare_pypsa_timeseries(network): """ + Prepares timeseries and residual load timeseries for clustering. + + Parameters + ---------- + network : pypsa.Network object + Container for all network components. + + Returns + ------- + df : pd.DataFrame + Timeseries to be considered when clustering. + """ loads = network.loads_t.p_set.copy() - loads.columns = 'L' + loads.columns + loads.columns = "L" + loads.columns renewables = network.generators_t.p_max_pu.mul( - network.generators.p_nom[ - network.generators_t.p_max_pu.columns], axis = 1).copy() - renewables.columns = 'G' + renewables.columns + network.generators.p_nom[network.generators_t.p_max_pu.columns], axis=1 + ).copy() + renewables.columns = "G" + renewables.columns - residual_load=pd.DataFrame() - residual_load['residual_load']=loads.sum(axis=1)-renewables.sum(axis=1) + residual_load = pd.DataFrame() + residual_load["residual_load"] = loads.sum(axis=1) - renewables.sum(axis=1) df = pd.concat([renewables, loads, residual_load], axis=1) return df -def update_data_frames(network, cluster_weights, dates, hours, timeseries, segmentation): - """ Updates the snapshots, snapshots weights and the dataframes based on +def update_data_frames( + network, cluster_weights, dates, hours, timeseries, segmentation +): + """ + Updates the snapshots, snapshot weightings and the dataframes based on the original data in the network and the medoids created by clustering these original data. Parameters - ----------- - network : pyPSA network object - cluster_weights: dictionary - dates: Datetimeindex - + ---------- + network : pypsa.Network object + Container for all network components. + cluster_weights : dict + Weightings per cluster after clustering to typical periods. + dates : DatetimeIndex + Dates of clusters after clustering to typical periods. + hours : int + Hours per typical period. + timeseries : pd.DataFrame + Information on segments after segmentation. + segmentation : boolean + Checks if segmentation of clustering to typical periods has been used. Returns ------- - network + network : pypsa.Network object + Container for all network components. """ - if segmentation: - network.snapshots = timeseries.index.get_level_values(0) - network.snapshot_weightings['objective'] = pd.Series(data = timeseries.index.get_level_values(2).values, - index = timeseries.index.get_level_values(0)) - network.snapshot_weightings['stores'] = pd.Series(data = timeseries.index.get_level_values(2).values, - index = timeseries.index.get_level_values(0)) - network.snapshot_weightings['generators'] = pd.Series(data = timeseries.index.get_level_values(2).values, - index = timeseries.index.get_level_values(0)) + network.snapshot_weightings["objective"] = pd.Series( + data=timeseries.index.get_level_values(2).values, + index=timeseries.index.get_level_values(0), + ) + network.snapshot_weightings["stores"] = pd.Series( + data=timeseries.index.get_level_values(2).values, + index=timeseries.index.get_level_values(0), + ) + network.snapshot_weightings["generators"] = pd.Series( + data=timeseries.index.get_level_values(2).values, + index=timeseries.index.get_level_values(0), + ) else: - network.snapshots = dates network.snapshot_weightings = network.snapshot_weightings.loc[dates] @@ -547,9 +769,13 @@ def update_data_frames(network, cluster_weights, dates, hours, timeseries, segme snapshot_weightings.append(i) x += 1 for i in range(len(network.snapshot_weightings)): - network.snapshot_weightings['objective'][i] = snapshot_weightings[i] - network.snapshot_weightings['stores'][i] = snapshot_weightings[i] - network.snapshot_weightings['generators'][i] = snapshot_weightings[i] + network.snapshot_weightings["objective"][i] = snapshot_weightings[ + i + ] + network.snapshot_weightings["stores"][i] = snapshot_weightings[i] + network.snapshot_weightings["generators"][i] = snapshot_weightings[ + i + ] # put the snapshot in the right order network.snapshots.sort_values() @@ -561,36 +787,49 @@ def update_data_frames(network, cluster_weights, dates, hours, timeseries, segme def skip_snapshots(self): + """ + Conducts the downsapling to every n-th snapshot. + + Returns + ------- + None. + + """ # save second network for optional dispatch disaggregation - if self.args["dispatch_disaggregation"] == True and self.args['snapshot_clustering']['active'] == False: + if ( + self.args["temporal_disaggregation"]["active"] == True + and self.args["snapshot_clustering"]["active"] == False + ): self.network_tsa = self.network.copy() - n_skip = self.args['skip_snapshots'] + n_skip = self.args["skip_snapshots"] if n_skip: + last_weight = ( + int( + ( + self.network.snapshots[-1] + - self.network.snapshots[::n_skip][-1] + ).seconds + / 3600 + ) + + 1 + ) self.network.snapshots = self.network.snapshots[::n_skip] - self.network.snapshot_weightings['objective'] = n_skip - self.network.snapshot_weightings['stores'] = n_skip - self.network.snapshot_weightings['generators'] = n_skip - -#################################### -def manipulate_storage_invest(network, costs=None, wacc=0.05, lifetime=15): - # default: 4500 € / MW, high 300 €/MW - crf = (1 / wacc) - (wacc / ((1 + wacc) ** lifetime)) - network.storage_units.capital_cost = costs / crf - - -def write_lpfile(network=None, path=None): - network.model.write(path, - io_options={'symbolic_solver_labels': True}) - - -def fix_storage_capacity(network, resultspath, n_clusters): # "network" added - path = resultspath.strip('daily') - values = pd.read_csv(path + 'storage_capacity.csv')[n_clusters].values - network.storage_units.p_nom_max = values - network.storage_units.p_nom_min = values - resultspath = 'compare-' + resultspath \ No newline at end of file + self.network.snapshot_weightings["objective"] = n_skip + self.network.snapshot_weightings["stores"] = n_skip + self.network.snapshot_weightings["generators"] = n_skip + + if last_weight < n_skip: + self.network.snapshot_weightings.loc[ + self.network.snapshot_weightings.index[-1] + ]["objective"] = last_weight + self.network.snapshot_weightings.loc[ + self.network.snapshot_weightings.index[-1] + ]["stores"] = last_weight + self.network.snapshot_weightings.loc[ + self.network.snapshot_weightings.index[-1] + ]["generators"] = last_weight diff --git a/etrago/cluster/spatial.py b/etrago/cluster/spatial.py index 3dbf57d98..43af10b9b 100755 --- a/etrago/cluster/spatial.py +++ b/etrago/cluster/spatial.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -23,15 +23,12 @@ import os if "READTHEDOCS" not in os.environ: - import logging - import multiprocessing as mp from itertools import product from math import ceil from pickle import dump + import logging + import multiprocessing as mp - import networkx as nx - import numpy as np - import pandas as pd from networkx import NetworkXNoPath from pypsa.networkclustering import ( _flatten_multiindex, @@ -40,6 +37,10 @@ get_clustering_from_busmap, ) from sklearn.cluster import KMeans + from threadpoolctl import threadpool_limits + import networkx as nx + import numpy as np + import pandas as pd from etrago.tools.utilities import * @@ -52,12 +53,31 @@ "DLR-Institute for Networked Energy Systems" ) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "s3pp, wolfbunke, ulfmueller, lukasol" +__author__ = ( + "MGlauer, MarlonSchlemminger, mariusves, BartelsJ, gnn, lukasoldi, " + "ulfmueller, lukasol, ClaraBuettner, CarlosEpia, KathiEsterl, " + "pieterhexen, fwitte, AmeliaNadal, cjbernal071421" +) # TODO: Workaround because of agg def _make_consense_links(x): + """ + Ensure that all elements in the input Series `x` are identical, or that + they are all NaN. + + Parameters + ---------- + x : pandas.Series + A Series containing the values to be checked for consensus. + + Returns + ------- + object + The value of the first element in the Series `x`. + """ + v = x.iat[0] assert ( x == v @@ -159,21 +179,18 @@ def group_links(network, with_time=True, carriers=None, cus_strateg=dict()): with_time : bool says if the network object contains timedependent series. carriers : list of strings - Describe which typed of carriers should be aggregated. The default is None. + Describe which type of carriers should be aggregated. The default is + None. strategies : dictionary custom strategies to perform the aggregation Returns ------- - new_df : links aggregated based on bus0, bus1 and carrier - new_pnl : links time series aggregated + new_df : + links aggregated based on bus0, bus1 and carrier + new_pnl : + links time series aggregated """ - if carriers is None: - carriers = network.links.carrier.unique() - - links_agg_b = network.links.carrier.isin(carriers) - links = network.links.loc[links_agg_b] - grouper = [links.bus0, links.bus1, links.carrier] def normed_or_uniform(x): return ( @@ -182,6 +199,32 @@ def normed_or_uniform(x): else pd.Series(1.0 / len(x), x.index) ) + def arrange_dc_bus0_bus1(network): + dc_links = network.links[network.links.carrier == "DC"].copy() + dc_links["n0"] = dc_links.apply( + lambda x: x.bus0 if x.bus0 < x.bus1 else x.bus1, axis=1 + ) + dc_links["n1"] = dc_links.apply( + lambda x: x.bus0 if x.bus0 > x.bus1 else x.bus1, axis=1 + ) + dc_links["bus0"] = dc_links["n0"] + dc_links["bus1"] = dc_links["n1"] + dc_links.drop(columns=["n0", "n1"], inplace=True) + + network.links.drop(index=dc_links.index, inplace=True) + network.links = pd.concat([network.links, dc_links]) + + return network + + network = arrange_dc_bus0_bus1(network) + + if carriers is None: + carriers = network.links.carrier.unique() + + links_agg_b = network.links.carrier.isin(carriers) + links = network.links.loc[links_agg_b] + grouper = [links.bus0, links.bus1, links.carrier] + weighting = links.p_nom.groupby(grouper, axis=0).transform( normed_or_uniform ) @@ -224,7 +267,8 @@ def normed_or_uniform(x): def graph_from_edges(edges): - """Constructs an undirected multigraph from a list containing data on + """ + Constructs an undirected multigraph from a list containing data on weighted edges. Parameters @@ -234,13 +278,12 @@ def graph_from_edges(edges): Returns ------- - M : :class:`networkx.classes.multigraph.MultiGraph + M : :class:`networkx.classes.multigraph.MultiGraph` """ M = nx.MultiGraph() for e in edges: - n0, n1, weight, key = e M.add_edge(n0, n1, weight=weight, key=key) @@ -251,17 +294,16 @@ def graph_from_edges(edges): def gen(nodes, n, graph): # TODO There could be a more convenient way of doing this. This generators # single purpose is to prepare data for multiprocessing's starmap function. - """Generator for applying multiprocessing. + """ + Generator for applying multiprocessing. Parameters ---------- nodes : list List of nodes in the system. - n : int Number of desired multiprocessing units. - - graph : :class:`networkx.classes.multigraph.MultiGraph + graph : :class:`networkx.classes.multigraph.MultiGraph` Graph representation of an electrical grid. Returns @@ -276,14 +318,14 @@ def gen(nodes, n, graph): def shortest_path(paths, graph): - """Finds the minimum path lengths between node pairs defined in paths. + """ + Finds the minimum path lengths between node pairs defined in paths. Parameters ---------- paths : list List of pairs containing a source and a target node - - graph : :class:`networkx.classes.multigraph.MultiGraph + graph : :class:`networkx.classes.multigraph.MultiGraph` Graph representation of an electrical grid. Returns @@ -315,7 +357,8 @@ def shortest_path(paths, graph): def busmap_by_shortest_path(etrago, scn_name, fromlvl, tolvl, cpu_cores=4): - """Creates a busmap for the EHV-Clustering between voltage levels based + """ + Creates a busmap for the EHV-Clustering between voltage levels based on dijkstra shortest path. The result is automatically written to the `model_draft` on the [www.openenergy-platform.org] database with the name `ego_grid_pf_hv_busmap` and the attributes scn_name @@ -326,21 +369,16 @@ def busmap_by_shortest_path(etrago, scn_name, fromlvl, tolvl, cpu_cores=4): Parameters ---------- - network : pypsa.Network object + network : pypsa.Network Container for all network components. - session : sqlalchemy.orm.session.Session object Establishes interactions with the database. - scn_name : str Name of the scenario. - fromlvl : list List of voltage-levels to cluster. - tolvl : list List of voltage-levels to remain. - cpu_cores : int Number of CPU-cores. @@ -447,18 +485,17 @@ def busmap_by_shortest_path(etrago, scn_name, fromlvl, tolvl, cpu_cores=4): def busmap_from_psql(etrago): - """Retrieves busmap from `model_draft.ego_grid_pf_hv_busmap` on the + """ + Retrieves busmap from `model_draft.ego_grid_pf_hv_busmap` on the [www.openenergy-platform.org] by a given scenario name. If this busmap does not exist, it is created with default values. Parameters ---------- - network : pypsa.Network object + network : pypsa.Network Container for all network components. - session : sqlalchemy.orm.session.Session object Establishes interactions with the database. - scn_name : str Name of the scenario. @@ -483,7 +520,6 @@ def busmap_from_psql(etrago): filter_version = "testcase" def fetch(): - query = ( etrago.session.query( egon_etrago_hv_busmap.bus0, egon_etrago_hv_busmap.bus1 @@ -519,109 +555,107 @@ def fetch(): def kmean_clustering(etrago, selected_network, weight, n_clusters): - """Main function of the k-mean clustering approach. Maps an original + """ + Main function of the k-mean clustering approach. Maps an original network to a new one with adjustable number of nodes and new coordinates. Parameters ---------- - network : :class:`pypsa.Network + network : pypsa.Network Container for all network components. - n_clusters : int Desired number of clusters. - load_cluster : boolean Loads cluster coordinates from a former calculation. - line_length_factor : float Factor to multiply the crow-flies distance between new buses in order to get new line lengths. - remove_stubs: boolean Removes stubs and stubby trees (i.e. sequentially reducing dead-ends). - use_reduced_coordinates: boolean If True, do not average cluster coordinates, but take from busmap. - bus_weight_tocsv : str Creates a bus weighting based on conventional generation and load and save it to a csv file. - bus_weight_fromcsv : str Loads a bus weighting from a csv file to apply it to the clustering algorithm. Returns ------- - network : pypsa.Network object + network : pypsa.Network Container for all network components. """ network = etrago.network kmean_settings = etrago.args["network_clustering"] - # remove stubs - if kmean_settings["remove_stubs"]: - network.determine_network_topology() - busmap = busmap_by_stubs(network) - network.generators["weight"] = network.generators["p_nom"] - aggregate_one_ports = network.one_port_components.copy() - aggregate_one_ports.discard("Generator") - - # reset coordinates to the new reduced guys, rather than taking an - # average (copied from pypsa.networkclustering) - if kmean_settings["use_reduced_coordinates"]: - # TODO : FIX THIS HACK THAT HAS UNEXPECTED SIDE-EFFECTS, - # i.e. network is changed in place!! - network.buses.loc[busmap.index, ["x", "y"]] = network.buses.loc[ - busmap, ["x", "y"] - ].values - - clustering = get_clustering_from_busmap( - network, - busmap, - aggregate_generators_weighted=True, - one_port_strategies=strategies_one_ports(), - generator_strategies=strategies_generators(), - aggregate_one_ports=aggregate_one_ports, - line_length_factor=kmean_settings["line_length_factor"], + + with threadpool_limits(limits=kmean_settings["CPU_cores"], user_api=None): + # remove stubs + if kmean_settings["remove_stubs"]: + network.determine_network_topology() + busmap = busmap_by_stubs(network) + network.generators["weight"] = network.generators["p_nom"] + aggregate_one_ports = network.one_port_components.copy() + aggregate_one_ports.discard("Generator") + + # reset coordinates to the new reduced guys, rather than taking an + # average (copied from pypsa.networkclustering) + if kmean_settings["use_reduced_coordinates"]: + # TODO : FIX THIS HACK THAT HAS UNEXPECTED SIDE-EFFECTS, + # i.e. network is changed in place!! + network.buses.loc[ + busmap.index, ["x", "y"] + ] = network.buses.loc[busmap, ["x", "y"]].values + + clustering = get_clustering_from_busmap( + network, + busmap, + aggregate_generators_weighted=True, + one_port_strategies=strategies_one_ports(), + generator_strategies=strategies_generators(), + aggregate_one_ports=aggregate_one_ports, + line_length_factor=kmean_settings["line_length_factor"], + ) + etrago.network = clustering.network + + weight = weight.groupby(busmap.values).sum() + + # k-mean clustering + busmap = busmap_by_kmeans( + selected_network, + bus_weightings=pd.Series(weight), + n_clusters=n_clusters, + n_init=kmean_settings["n_init"], + max_iter=kmean_settings["max_iter"], + tol=kmean_settings["tol"], + random_state=kmean_settings["random_state"], ) - etrago.network = clustering.network - - weight = weight.groupby(busmap.values).sum() - - # k-mean clustering - busmap = busmap_by_kmeans( - selected_network, - bus_weightings=pd.Series(weight), - n_clusters=n_clusters, - n_init=kmean_settings["n_init"], - max_iter=kmean_settings["max_iter"], - tol=kmean_settings["tol"], - random_state=kmean_settings["random_state"], - ) return busmap def dijkstras_algorithm(buses, connections, medoid_idx, cpu_cores): - """Function for combination of k-medoids Clustering and Dijkstra's algorithm. - Creates a busmap assigning the nodes of a original network - to the nodes of a clustered network - considering the electrical distances based on Dijkstra's shortest path. - Parameters - centers - ---------- - network : pypsa.Network object - Container for all network components. - - medoid_idx : pd.Series - Indices of k-medoids - busmap_kmedoid: pd.Series - Busmap based on k-medoids clustering - cpu_cores: string - numbers of cores used during multiprocessing - Returns - ------- - busmap (format: with labels) + """ + Function for combination of k-medoids Clustering and Dijkstra's algorithm. + Creates a busmap assigning the nodes of a original network to the nodes of + a clustered network considering the electrical distances based on + Dijkstra's shortest path. + + Parameters + ---------- + network : pypsa.Network + Container for all network components. + medoid_idx : pandas.Series + Indices of k-medoids + busmap_kmedoid: pandas.Series + Busmap based on k-medoids clustering + cpu_cores: string + numbers of cores used during multiprocessing + + Returns + ------- + busmap : pandas.Series + Mapping from bus ids to medoids ids """ # original data @@ -646,8 +680,8 @@ def dijkstras_algorithm(buses, connections, medoid_idx, cpu_cores): else: cpu_cores = int(cpu_cores) - # calculation of shortest path between original points and k-medoids centers - # using multiprocessing + # calculation of shortest path between original points and k-medoids + # centers using multiprocessing p = mp.Pool(cpu_cores) chunksize = ceil(len(ppathss) / cpu_cores) container = p.starmap(shortest_path, gen(ppathss, chunksize, M)) @@ -679,57 +713,89 @@ def dijkstras_algorithm(buses, connections, medoid_idx, cpu_cores): return busmap -def kmedoids_dijkstra_clustering(etrago, buses, connections, weight, n_clusters): +def kmedoids_dijkstra_clustering( + etrago, buses, connections, weight, n_clusters +): + """ + Applies a k-medoids clustering on the given network and calls the function to conduct a Dijkstra's + algorithm afterwards for the consideration of the network's topology in the spatial clustering. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + buses : pandas.DataFrame + DataFrame with information about the buses of the network. + connections : pandas.DataFrame + DataFrame with information about the connections of the network + (links or lines). + weight : pandas.Series + Series with the weight for each bus. + n_clusters : int + The number of clusters to create. + + Returns + ------- + Tuple containing: + busmap : pandas.Series + Series containing the mapping of buses to their resp. medoids + medoid_idx : pandas.Series + Series containing the medoid indeces + """ settings = etrago.args["network_clustering"] - # remove stubs - if settings["remove_stubs"]: + # n_jobs was deprecated for the function fit(). scikit-learn recommends + # to use threadpool_limits: + # https://scikit-learn.org/stable/computing/parallelism.html + with threadpool_limits(limits=settings["CPU_cores"], user_api=None): + # remove stubs + if settings["remove_stubs"]: + logger.info( + """options remove_stubs and use_reduced_coordinates not + reasonable for k-medoids Dijkstra Clustering""" + ) - logger.info( - "options remove_stubs and use_reduced_coordinates not reasonable for k-medoids Dijkstra Clustering" + bus_weightings = pd.Series(weight) + buses_i = buses.index + points = buses.loc[buses_i, ["x", "y"]].values.repeat( + bus_weightings.reindex(buses_i).astype(int), axis=0 ) - bus_weightings = pd.Series(weight) - buses_i = buses.index - points = buses.loc[buses_i, ["x", "y"]].values.repeat( - bus_weightings.reindex(buses_i).astype(int), axis=0 - ) - - kmeans = KMeans( - init="k-means++", - n_clusters=n_clusters, - n_init=settings["n_init"], - max_iter=settings["max_iter"], - tol=settings["tol"], - random_state=settings["random_state"], - ) - kmeans.fit(points) + kmeans = KMeans( + init="k-means++", + n_clusters=n_clusters, + n_init=settings["n_init"], + max_iter=settings["max_iter"], + tol=settings["tol"], + random_state=settings["random_state"], + ) + kmeans.fit(points) - busmap = pd.Series( - data=kmeans.predict(buses.loc[buses_i, ["x", "y"]]), - index=buses_i, - dtype=object, - ) + busmap = pd.Series( + data=kmeans.predict(buses.loc[buses_i, ["x", "y"]]), + index=buses_i, + dtype=object, + ) - # identify medoids per cluster -> k-medoids clustering + # identify medoids per cluster -> k-medoids clustering - distances = pd.DataFrame( - data=kmeans.transform(buses.loc[buses_i, ["x", "y"]].values), - index=buses_i, - dtype=object, - ) - distances = distances.apply(pd.to_numeric) + distances = pd.DataFrame( + data=kmeans.transform(buses.loc[buses_i, ["x", "y"]].values), + index=buses_i, + dtype=object, + ) + distances = distances.apply(pd.to_numeric) - medoid_idx = distances.idxmin() + medoid_idx = distances.idxmin() - # dijkstra's algorithm - busmap = dijkstras_algorithm( - buses, - connections, - medoid_idx, - etrago.args["network_clustering"]["CPU_cores"], - ) - busmap.index.name = "bus_id" + # dijkstra's algorithm + busmap = dijkstras_algorithm( + buses, + connections, + medoid_idx, + etrago.args["network_clustering"]["CPU_cores"], + ) + busmap.index.name = "bus_id" return busmap, medoid_idx diff --git a/etrago/tools/__init__.py b/etrago/tools/__init__.py index 497ec4c5a..170b87144 100644 --- a/etrago/tools/__init__.py +++ b/etrago/tools/__init__.py @@ -1,8 +1,38 @@ +"""Multi purpose tools that don't fit anywhere else in eTraGo. """ -""" -__copyright__ = "tba" -__license__ = "tba" -__author__ = "tba" +__copyright__ = ( + "Copyright (C) 2023" + " Otto-von-Guericke-University Magdeburg," + " Research group for theoretical computer science" +) +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "gnn " + + +def noop(*ignored_arguments, **ignored_keyword_arguments): + """Do nothing. + + Accept all kinds of arguments, ignore them and do nothing. + """ + pass + +class Noops: + """Provide arbitrarily named methods that do nothing. + Any attribute access will return a method that does nothing, i.e. + all methods of this object are :py:func:`noop`s. Normally you don't + need to instantiate this class. All instances behave the same, so + the containing module provides one called :py:obj:`noops` which you + can import and use. + """ + + @classmethod + def __getattribute__(cls, ignored_name): + return noop + + +noops = Noops() +"""A default :py:class:`Noops` instance so you don't have to create one. +""" diff --git a/etrago/tools/constraints.py b/etrago/tools/constraints.py index 2fe25f2ef..453b076d4 100755 --- a/etrago/tools/constraints.py +++ b/etrago/tools/constraints.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -21,16 +21,19 @@ """ Constraints.py includes additional constraints for eTraGo-optimizations """ +import os import logging -import numpy as np -import pandas as pd -import pyomo.environ as po -from egoio.tools import db from pyomo.environ import Constraint from pypsa.descriptors import expand_series from pypsa.linopt import define_constraints, define_variables, get_var, linexpr from pypsa.pf import get_switchable_as_dense as get_as_dense +import numpy as np +import pandas as pd +import pyomo.environ as po + +if "READTHEDOCS" not in os.environ: + from egoio.tools import db logger = logging.getLogger(__name__) @@ -41,7 +44,8 @@ "DLR-Institute for Networked Energy Systems" ) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, s3pp, wolfbunke, mariusves, lukasol" +__author__ = """ulfmueller, s3pp, wolfbunke, mariusves, lukasol, AmeliaNadal, +CarlosEpia, ClaraBuettner, KathiEsterl""" def _get_crossborder_components(network, cntr="all"): @@ -80,20 +84,26 @@ def _get_crossborder_components(network, cntr="all"): buses_for = network.buses.index[network.buses.country == cntr] cb0 = network.lines.index[ - (network.lines.bus0.isin(buses_for)) & (network.lines.bus1.isin(buses_de)) + (network.lines.bus0.isin(buses_for)) + & (network.lines.bus1.isin(buses_de)) ] cb1 = network.lines.index[ - (network.lines.bus1.isin(buses_for)) & (network.lines.bus0.isin(buses_de)) + (network.lines.bus1.isin(buses_for)) + & (network.lines.bus0.isin(buses_de)) ] cb0_link = network.links.index[ - (network.links.bus0.isin(buses_for)) & (network.links.bus1.isin(buses_de)) - & (network.links.carrier=='DC')] + (network.links.bus0.isin(buses_for)) + & (network.links.bus1.isin(buses_de)) + & (network.links.carrier == "DC") + ] cb1_link = network.links.index[ - (network.links.bus0.isin(buses_de)) & (network.links.bus1.isin(buses_for)) - & (network.links.carrier=='DC')] + (network.links.bus0.isin(buses_de)) + & (network.links.bus1.isin(buses_for)) + & (network.links.carrier == "DC") + ] return buses_de, buses_for, cb0, cb1, cb0_link, cb1_link @@ -118,21 +128,21 @@ def _max_line_ext(self, network, snapshots): lines_snom = network.lines.s_nom_min.sum() - links_elec = network.links[network.links.carrier=='DC'] + links_elec = network.links[network.links.carrier == "DC"] links_index = links_elec.index links_pnom = links_elec.p_nom_min.sum() def _rule(m): - lines_opt = sum( - m.passive_branch_s_nom[index] for index in m.passive_branch_s_nom_index + m.passive_branch_s_nom[index] + for index in m.passive_branch_s_nom_index ) links_opt = sum(m.link_p_nom[index] for index in links_index) - return (lines_opt + links_opt) <= (lines_snom + links_pnom) * self.args[ - "extra_functionality" - ]["max_line_ext"] + return (lines_opt + links_opt) <= ( + lines_snom + links_pnom + ) * self.args["extra_functionality"]["max_line_ext"] network.model.max_line_ext = Constraint(rule=_rule) @@ -157,7 +167,7 @@ def _max_line_ext_nmp(self, network, snapshots): lines_snom = network.lines.s_nom.sum() - links_elec = network.links[network.links.carrier=='DC'] + links_elec = network.links[network.links.carrier == "DC"] links_index = links_elec.index links_pnom = links_elec.p_nom_min.sum() @@ -165,14 +175,15 @@ def _max_line_ext_nmp(self, network, snapshots): def _rule(m): lines_opt = sum( - m.passive_branch_s_nom[index] for index in m.passive_branch_s_nom_index + m.passive_branch_s_nom[index] + for index in m.passive_branch_s_nom_index ) links_opt = sum(m.link_p_nom[index] for index in links_index) - return (lines_opt + links_opt) <= (lines_snom + links_pnom) * self.args[ - "extra_functionality" - ]["max_line_ext"] + return (lines_opt + links_opt) <= ( + lines_snom + links_pnom + ) * self.args["extra_functionality"]["max_line_ext"] network.model.max_line_ext = Constraint(rule=_rule) @@ -196,10 +207,20 @@ def _min_renewable_share_nmp(self, network, snapshots): """ - renewables = ['biomass', 'central_biomass_CHP', 'industrial_biomass_CHP', - 'solar', 'solar_rooftop', 'wind_offshore', 'wind_onshore', - 'run_of_river', 'other_renewable', - 'central_biomass_CHP_heat', 'solar_thermal_collector', 'geo_thermal'] + renewables = [ + "biomass", + "central_biomass_CHP", + "industrial_biomass_CHP", + "solar", + "solar_rooftop", + "wind_offshore", + "wind_onshore", + "run_of_river", + "other_renewable", + "central_biomass_CHP_heat", + "solar_thermal_collector", + "geo_thermal", + ] res = network.generators.index[network.generators.carrier.isin(renewables)] @@ -214,7 +235,9 @@ def _min_renewable_share_nmp(self, network, snapshots): renew_production = linexpr((1, renew)).sum().sum() total_production = ( - linexpr((-self.args["extra_functionality"]["min_renewable_share"], total)) + linexpr( + (-self.args["extra_functionality"]["min_renewable_share"], total) + ) .sum() .sum() ) @@ -243,17 +266,29 @@ def _min_renewable_share(self, network, snapshots): """ - renewables = ['biomass', 'central_biomass_CHP', 'industrial_biomass_CHP', - 'solar', 'solar_rooftop', 'wind_offshore', 'wind_onshore', - 'run_of_river', 'other_renewable', - 'central_biomass_CHP_heat', 'solar_thermal_collector', 'geo_thermal'] + renewables = [ + "biomass", + "central_biomass_CHP", + "industrial_biomass_CHP", + "solar", + "solar_rooftop", + "wind_offshore", + "wind_onshore", + "run_of_river", + "other_renewable", + "CH4_biogas", + "central_biomass_CHP_heat", + "solar_thermal_collector", + "geo_thermal", + ] - res = list(network.generators.index[network.generators.carrier.isin(renewables)]) + res = list( + network.generators.index[network.generators.carrier.isin(renewables)] + ) total = list(network.generators.index) def _rule(m): - renewable_production = sum( m.generator_p[gen, sn] * network.snapshot_weightings.generators[sn] for gen in res @@ -279,23 +314,31 @@ def _cross_border_flow(self, network, snapshots): Extra_functionality that limits overall AC crossborder flows from/to Germany. Add key 'cross_border_flow' and array with minimal and maximal import/export Example: {'cross_border_flow': [-x, y]} (with x Import, y Export) + + Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization + Returns ------- None. """ - buses_de, buses_for, cb0, cb1, cb0_link, cb1_link = _get_crossborder_components( - network - ) - - export = ( - pd.Series(data=self.args["extra_functionality"]["cross_border_flow"]) + ( + buses_de, + buses_for, + cb0, + cb1, + cb0_link, + cb1_link, + ) = _get_crossborder_components(network) + + export = pd.Series( + data=self.args["extra_functionality"]["cross_border_flow"] ) def _rule_min(m): @@ -361,23 +404,31 @@ def _cross_border_flow_nmp(self, network, snapshots): Extra_functionality that limits overall crossborder flows from/to Germany. Add key 'cross_border_flow' and array with minimal and maximal import/export Example: {'cross_border_flow': [-x, y]} (with x Import, y Export) + + Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization + Returns ------- None. """ - buses_de, buses_for, cb0, cb1, cb0_link, cb1_link = _get_crossborder_components( - network - ) - - export = ( - pd.Series(data=self.args["extra_functionality"]["cross_border_flow"]) + ( + buses_de, + buses_for, + cb0, + cb1, + cb0_link, + cb1_link, + ) = _get_crossborder_components(network) + + export = pd.Series( + data=self.args["extra_functionality"]["cross_border_flow"] ) cb0_flow = ( @@ -422,11 +473,14 @@ def _cross_border_flow_per_country_nmp(self, network, snapshots): Add key 'cross_border_flow_per_country' to args.extra_functionality and define dictionary of country keys and desired limitations of im/exports in MWh Example: {'cross_border_flow_per_country': {'DK':[-X, Y], 'FR':[0,0]}} + + Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization + Returns ------- None. @@ -437,11 +491,9 @@ def _cross_border_flow_per_country_nmp(self, network, snapshots): countries = network.buses.country.unique() - export_per_country = ( - pd.DataFrame( - data=self.args["extra_functionality"]["cross_border_flow_per_country"] - ).transpose() - ) + export_per_country = pd.DataFrame( + data=self.args["extra_functionality"]["cross_border_flow_per_country"] + ).transpose() for cntr in export_per_country.index: if cntr in countries: @@ -510,11 +562,14 @@ def _cross_border_flow_per_country(self, network, snapshots): Add key 'cross_border_flow_per_country' to args.extra_functionality and define dictionary of country keys and desired limitations of im/exports in MWh Example: {'cross_border_flow_per_country': {'DK':[-X, Y], 'FR':[0,0]}} + + Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization + Returns ------- None. @@ -525,11 +580,9 @@ def _cross_border_flow_per_country(self, network, snapshots): countries = network.buses.country.unique() - export_per_country = ( - pd.DataFrame( - data=self.args["extra_functionality"]["cross_border_flow_per_country"] - ).transpose() - ) + export_per_country = pd.DataFrame( + data=self.args["extra_functionality"]["cross_border_flow_per_country"] + ).transpose() for cntr in export_per_country.index: if cntr in countries: @@ -557,12 +610,14 @@ def _rule_min(m): for sn in snapshots ) - sum( - m.link_p[link, sn] * network.snapshot_weightings.objective[sn] + m.link_p[link, sn] + * network.snapshot_weightings.objective[sn] for link in cb0_link for sn in snapshots ) + sum( - m.link_p[link, sn] * network.snapshot_weightings.objective[sn] + m.link_p[link, sn] + * network.snapshot_weightings.objective[sn] for link in cb1_link for sn in snapshots ) @@ -590,12 +645,14 @@ def _rule_max(m): for sn in snapshots ) - sum( - m.link_p[link, sn] * network.snapshot_weightings.objective[sn] + m.link_p[link, sn] + * network.snapshot_weightings.objective[sn] for link in cb0_link for sn in snapshots ) + sum( - m.link_p[link, sn] * network.snapshot_weightings.objective[sn] + m.link_p[link, sn] + * network.snapshot_weightings.objective[sn] for link in cb1_link for sn in snapshots ) @@ -670,7 +727,6 @@ def _capacity_factor(self, network, snapshots): a dictonary as a fraction of generation potential. Example: 'capacity_factor': {'run_of_river': [0, 0.5], 'solar': [0.1, 1]} - Parameters ---------- network : :class:`pypsa.Network @@ -691,9 +747,9 @@ def _capacity_factor(self, network, snapshots): gens, potential = _generation_potential(network, c, cntr="all") def _rule_max(m): - dispatch = sum( - m.generator_p[gen, sn] * network.snapshot_weightings.generators[sn] + m.generator_p[gen, sn] + * network.snapshot_weightings.generators[sn] for gen in gens for sn in snapshots ) @@ -703,9 +759,9 @@ def _rule_max(m): setattr(network.model, "max_flh_" + c, Constraint(rule=_rule_max)) def _rule_min(m): - dispatch = sum( - m.generator_p[gen, sn] * network.snapshot_weightings.generators[sn] + m.generator_p[gen, sn] + * network.snapshot_weightings.generators[sn] for gen in gens for sn in snapshots ) @@ -723,7 +779,6 @@ def _capacity_factor_nmp(self, network, snapshots): a dictonary as a fraction of generation potential. Example: 'capacity_factor': {'run_of_river': [0, 0.5], 'solar': [0.1, 1]} - Parameters ---------- network : :class:`pypsa.Network @@ -799,9 +854,9 @@ def _capacity_factor_per_cntr(self, network, snapshots): if len(gens) > 0: def _rule_max(m): - dispatch = sum( - m.generator_p[gen, sn] * network.snapshot_weightings.generators[sn] + m.generator_p[gen, sn] + * network.snapshot_weightings.generators[sn] for gen in gens for sn in snapshots ) @@ -809,13 +864,15 @@ def _rule_max(m): return dispatch <= factor[1] * potential setattr( - network.model, "max_flh_" + cntr + "_" + c, Constraint(rule=_rule_max) + network.model, + "max_flh_" + cntr + "_" + c, + Constraint(rule=_rule_max), ) def _rule_min(m): - dispatch = sum( - m.generator_p[gen, sn] * network.snapshot_weightings.generators[sn] + m.generator_p[gen, sn] + * network.snapshot_weightings.generators[sn] for gen in gens for sn in snapshots ) @@ -823,11 +880,19 @@ def _rule_min(m): return dispatch >= factor[0] * potential setattr( - network.model, "min_flh_" + cntr + "_" + c, Constraint(rule=_rule_min) + network.model, + "min_flh_" + cntr + "_" + c, + Constraint(rule=_rule_min), ) else: - print('Carrier '+c+' is not available in '+cntr+'. Skipping this constraint.') + print( + "Carrier " + + c + + " is not available in " + + cntr + + ". Skipping this constraint." + ) def _capacity_factor_per_cntr_nmp(self, network, snapshots): @@ -860,7 +925,6 @@ def _capacity_factor_per_cntr_nmp(self, network, snapshots): gens, potential = _generation_potential(network, c, cntr) if len(gens) > 0: - generation = ( get_var(network, "Generator", "p") .loc[snapshots, gens] @@ -885,7 +949,13 @@ def _capacity_factor_per_cntr_nmp(self, network, snapshots): ) else: - print('Carrier '+c+' is not available in '+cntr+'. Skipping this constraint.') + print( + "Carrier " + + c + + " is not available in " + + cntr + + ". Skipping this constraint." + ) def _capacity_factor_per_gen(self, network, snapshots): @@ -897,7 +967,6 @@ def _capacity_factor_per_gen(self, network, snapshots): Example: 'capacity_factor_per_gen': {'run_of_river': [0, 0.5], 'solar': [0.1, 1]} - Parameters ---------- network : :class:`pypsa.Network @@ -935,9 +1004,9 @@ def _capacity_factor_per_gen(self, network, snapshots): ) def _rule_max(m): - dispatch = sum( - m.generator_p[g, sn] * network.snapshot_weightings.generators[sn] + m.generator_p[g, sn] + * network.snapshot_weightings.generators[sn] for sn in snapshots ) @@ -946,9 +1015,9 @@ def _rule_max(m): setattr(network.model, "max_flh_" + g, Constraint(rule=_rule_max)) def _rule_min(m): - dispatch = sum( - m.generator_p[g, sn] * network.snapshot_weightings.generators[sn] + m.generator_p[g, sn] + * network.snapshot_weightings.generators[sn] for sn in snapshots ) @@ -966,7 +1035,6 @@ def _capacity_factor_per_gen_nmp(self, network, snapshots): Example: 'capacity_factor_per_gen': {'run_of_river': [0, 0.5], 'solar': [0.1, 1]} - Parameters ---------- network : :class:`pypsa.Network @@ -1051,7 +1119,6 @@ def _capacity_factor_per_gen_cntr(self, network, snapshots): """ arg = self.args["extra_functionality"]["capacity_factor_per_gen_cntr"] for cntr in arg.keys(): - carrier = arg[cntr].keys() snapshots = network.snapshots for c in carrier: @@ -1066,14 +1133,14 @@ def _capacity_factor_per_gen_cntr(self, network, snapshots): ] if len(gens) > 0: - for g in gens: if c in ["wind_onshore", "wind_offshore", "solar"]: potential = ( ( network.generators.p_nom[g] * network.generators_t.p_max_pu[g].mul( - network.snapshot_weightings.generators, axis=0 + network.snapshot_weightings.generators, + axis=0, ) ) .sum() @@ -1086,9 +1153,9 @@ def _capacity_factor_per_gen_cntr(self, network, snapshots): ) def _rule_max(m): - dispatch = sum( - m.generator_p[g, sn] * network.snapshot_weightings.generators[sn] + m.generator_p[g, sn] + * network.snapshot_weightings.generators[sn] for sn in snapshots ) return dispatch <= factor[1] * potential @@ -1100,9 +1167,9 @@ def _rule_max(m): ) def _rule_min(m): - dispatch = sum( - m.generator_p[g, sn] * network.snapshot_weightings.generators[sn] + m.generator_p[g, sn] + * network.snapshot_weightings.generators[sn] for sn in snapshots ) return dispatch >= factor[0] * potential @@ -1114,7 +1181,13 @@ def _rule_min(m): ) else: - print('Carrier '+c+' is not available in '+cntr+'. Skipping this constraint.') + print( + "Carrier " + + c + + " is not available in " + + cntr + + ". Skipping this constraint." + ) def _capacity_factor_per_gen_cntr_nmp(self, network, snapshots): @@ -1142,7 +1215,6 @@ def _capacity_factor_per_gen_cntr_nmp(self, network, snapshots): """ arg = self.args["extra_functionality"]["capacity_factor_per_gen_cntr"] for cntr in arg.keys(): - carrier = arg[cntr].keys() for c in carrier: @@ -1156,14 +1228,14 @@ def _capacity_factor_per_gen_cntr_nmp(self, network, snapshots): ] if len(gens) > 0: - for g in gens: if c in ["wind_onshore", "wind_offshore", "solar"]: potential = ( ( network.generators.p_nom[g] * network.generators_t.p_max_pu[g].mul( - network.snapshot_weightings.generators, axis=0 + network.snapshot_weightings.generators, + axis=0, ) ) .sum() @@ -1199,7 +1271,13 @@ def _capacity_factor_per_gen_cntr_nmp(self, network, snapshots): ) else: - print('Carrier '+c+' is not available in '+cntr+'. Skipping this constraint.') + print( + "Carrier " + + c + + " is not available in " + + cntr + + ". Skipping this constraint." + ) def read_max_gas_generation(self): @@ -1230,7 +1308,6 @@ def read_max_gas_generation(self): "biogas": 14450103 }, # [MWh] Value from reference p-e-s run used in eGon-data } - engine = db.connection(section=self.args["db"]) try: sql = f""" @@ -1257,7 +1334,7 @@ def add_ch4_constraints(self, network, snapshots): Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization @@ -1292,16 +1369,18 @@ def add_ch4_constraints(self, network, snapshots): factor = arg[c] def _rule_max(m): - dispatch = sum( - m.generator_p[gen, sn] * network.snapshot_weightings.generators[sn] + m.generator_p[gen, sn] + * network.snapshot_weightings.generators[sn] for gen in gens for sn in snapshots ) return dispatch <= factor * (n_snapshots / 8760) - setattr(network.model, "max_flh_DE_" + c, Constraint(rule=_rule_max)) + setattr( + network.model, "max_flh_DE_" + c, Constraint(rule=_rule_max) + ) # Add contraints for neigbouring countries gen_abroad = network.generators[ @@ -1317,9 +1396,9 @@ def _rule_max(m): factor = network.generators.e_nom_max[g] def _rule_max(m): - dispatch = sum( - m.generator_p[g, sn] * network.snapshot_weightings.generators[sn] + m.generator_p[g, sn] + * network.snapshot_weightings.generators[sn] for sn in snapshots ) @@ -1344,7 +1423,7 @@ def add_ch4_constraints_nmp(self, network, snapshots): Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization @@ -1423,7 +1502,21 @@ def add_ch4_constraints_nmp(self, network, snapshots): def snapshot_clustering_daily_bounds(self, network, snapshots): - # This will bound the storage level to 0.5 max_level every 24th hour. + """ + Bound the storage level to 0.5 max_level every 24th hour. + + Parameters + ---------- + network : :class:`pypsa.Network` + Overall container of PyPSA + snapshots : pandas.DatetimeIndex + List of timesteps that will be constrained + + Returns + ------- + None + + """ sus = network.storage_units # take every first hour of the clustered days network.model.period_starts = network.snapshot_weightings.index[0::24] @@ -1438,7 +1531,8 @@ def day_rule(m, s, p): soc of the last hour of the day (i.e. + 23 hours) """ return ( - m.state_of_charge[s, p] == m.state_of_charge[s, p + pd.Timedelta(hours=23)] + m.state_of_charge[s, p] + == m.state_of_charge[s, p + pd.Timedelta(hours=23)] ) network.model.period_bound = Constraint( @@ -1447,6 +1541,21 @@ def day_rule(m, s, p): def snapshot_clustering_daily_bounds_nmp(self, network, snapshots): + """ + Bound the storage level to 0.5 max_level every 24th hour. + + Parameters + ---------- + network : :class:`pypsa.Network` + Overall container of PyPSA + snapshots : pandas.DatetimeIndex + List of timesteps that will be constrained + + Returns + ------- + None + + """ c = "StorageUnit" @@ -1454,20 +1563,28 @@ def snapshot_clustering_daily_bounds_nmp(self, network, snapshots): period_ends = period_starts + pd.Timedelta(hours=23) eh = expand_series( - network.snapshot_weightings.objective[period_ends], network.storage_units.index + network.snapshot_weightings.objective[period_ends], + network.storage_units.index, ) # elapsed hours eff_stand = expand_series(1 - network.df(c).standing_loss, period_ends).T - eff_dispatch = expand_series(network.df(c).efficiency_dispatch, period_ends).T + eff_dispatch = expand_series( + network.df(c).efficiency_dispatch, period_ends + ).T eff_store = expand_series(network.df(c).efficiency_store, period_ends).T soc = get_var(network, c, "state_of_charge").loc[period_ends, :] - soc_peroid_start = get_var(network, c, "state_of_charge").loc[period_starts] + soc_peroid_start = get_var(network, c, "state_of_charge").loc[ + period_starts + ] coeff_var = [ (-1, soc), - (-1 / eff_dispatch * eh, get_var(network, c, "p_dispatch").loc[period_ends, :]), + ( + -1 / eff_dispatch * eh, + get_var(network, c, "p_dispatch").loc[period_ends, :], + ), (eff_store * eh, get_var(network, c, "p_store").loc[period_ends, :]), ] @@ -1480,14 +1597,42 @@ def masked_term(coeff, var, cols): .values ) - lhs += masked_term(eff_stand, soc_peroid_start, network.storage_units.index) + lhs += masked_term( + eff_stand, soc_peroid_start, network.storage_units.index + ) rhs = -get_as_dense(network, c, "inflow", period_ends).mul(eh) define_constraints(network, lhs, "==", rhs, "daily_bounds") -def snapshot_clustering_seasonal_storage(self, network, snapshots, simplified=False): +def snapshot_clustering_seasonal_storage( + self, network, snapshots, simplified=False +): + """ + Depicts intertemporal dependencies of storage units and stores when using + snapshot clustering to typical periods for temporal complexity reduction. + + According to: + L. Kotzur et al: 'Time series aggregation for energy + system design: + Modeling seasonal storage', 2018 + + Parameters + ---------- + network : :class:`pypsa.Network` + Overall container of PyPSA + snapshots : list + A list of datetime objects representing the timestamps of the snapshots + to be clustered. + simplified : bool, optional + A flag indicating whether to use a simplified version of the model that + does not include intra-temporal constraints and variables. + + Returns + ------- + None + """ sus = network.storage_units sto = network.stores @@ -1508,13 +1653,15 @@ def snapshot_clustering_seasonal_storage(self, network, snapshots, simplified=Fa network.model.candidates = po.Set(initialize=candidates, ordered=True) if simplified == False: - # create intra soc variable for each storage/store and each hour - network.model.state_of_charge_intra = po.Var(sus.index, network.snapshots) - network.model.state_of_charge_intra_store = po.Var(sto.index, network.snapshots) + network.model.state_of_charge_intra = po.Var( + sus.index, network.snapshots + ) + network.model.state_of_charge_intra_store = po.Var( + sto.index, network.snapshots + ) else: - network.model.state_of_charge_intra_max = po.Var( sus.index, network.model.candidates ) @@ -1529,8 +1676,12 @@ def snapshot_clustering_seasonal_storage(self, network, snapshots, simplified=Fa ) # create intra soc variable for each storage and each hour - network.model.state_of_charge_intra = po.Var(sus.index, network.snapshots) - network.model.state_of_charge_intra_store = po.Var(sto.index, network.snapshots) + network.model.state_of_charge_intra = po.Var( + sus.index, network.snapshots + ) + network.model.state_of_charge_intra_store = po.Var( + sto.index, network.snapshots + ) def intra_max(model, st, h): cand = network.cluster_ts["Candidate_day"][h] @@ -1597,7 +1748,9 @@ def intra_soc_rule(m, s, h): and h in network.snapshot_weightings[0::720].index ): expr = m.state_of_charge_intra[s, h] == 0 - elif self.args["snapshot_clustering"]["how"] == "daily" and h.hour == 0: + elif ( + self.args["snapshot_clustering"]["how"] == "daily" and h.hour == 0 + ): expr = m.state_of_charge_intra[s, h] == 0 else: expr = m.state_of_charge_intra[s, h] == m.state_of_charge_intra[ @@ -1611,7 +1764,6 @@ def intra_soc_rule(m, s, h): return expr def intra_soc_rule_store(m, s, h): - if ( self.args["snapshot_clustering"]["how"] == "weekly" and h in network.snapshot_weightings[0::168].index @@ -1622,7 +1774,9 @@ def intra_soc_rule_store(m, s, h): and h in network.snapshot_weightings[0::720].index ): expr = m.state_of_charge_intra_store[s, h] == 0 - elif self.args["snapshot_clustering"]["how"] == "daily" and h.hour == 0: + elif ( + self.args["snapshot_clustering"]["how"] == "daily" and h.hour == 0 + ): expr = m.state_of_charge_intra_store[s, h] == 0 else: expr = ( @@ -1672,9 +1826,9 @@ def inter_storage_soc_rule(m, s, i): hrs = 720 else: hrs = 24 - expr = m.state_of_charge_inter[s, i + 1] == m.state_of_charge_inter[ - s, i - ] * ( + expr = m.state_of_charge_inter[ + s, i + 1 + ] == m.state_of_charge_inter[s, i] * ( 1 - network.storage_units.at[s, "standing_loss"] ) ** hrs + m.state_of_charge_intra[ s, last_hour @@ -1689,7 +1843,6 @@ def inter_storage_soc_rule(m, s, i): return expr def inter_store_soc_rule(m, s, i): - if i == network.model.candidates[-1]: last_hour = network.cluster["last_hour_RepresentativeDay"][i] expr = po.Constraint.Skip @@ -1743,15 +1896,18 @@ def total_state_of_charge(m, s, h): return ( m.state_of_charge[s, h] == m.state_of_charge_intra[s, h] - + m.state_of_charge_inter[s, network.cluster_ts["Candidate_day"][h]] + + m.state_of_charge_inter[ + s, network.cluster_ts["Candidate_day"][h] + ] ) def total_state_of_charge_store(m, s, h): - return ( m.store_e[s, h] == m.state_of_charge_intra_store[s, h] - + m.state_of_charge_inter_store[s, network.cluster_ts["Candidate_day"][h]] + + m.state_of_charge_inter_store[ + s, network.cluster_ts["Candidate_day"][h] + ] ) network.model.total_storage_constraint = po.Constraint( @@ -1785,7 +1941,9 @@ def state_of_charge_lower(m, s, h): if self.args["snapshot_clustering"]["how"] == "weekly": hrs = 168 candidate = network.cluster_ts["Candidate_day"][h] - last_hour = network.cluster.loc[candidate]["last_hour_RepresentativeDay"] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] first_hour = last_hour - pd.DateOffset(hours=167) period_start = network.cluster_ts.index[0::168][candidate - 1] delta_t = h - period_start @@ -1793,7 +1951,9 @@ def state_of_charge_lower(m, s, h): elif self.args["snapshot_clustering"]["how"] == "monthly": hrs = 720 candidate = network.cluster_ts["Candidate_day"][h] - last_hour = network.cluster.loc[candidate]["last_hour_RepresentativeDay"] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] first_hour = last_hour - pd.DateOffset(hours=719) period_start = network.cluster_ts.index[0::720][candidate - 1] delta_t = h - period_start @@ -1811,18 +1971,21 @@ def state_of_charge_lower(m, s, h): return ( m.state_of_charge_intra[s, intra_hour] - + m.state_of_charge_inter[s, network.cluster_ts["Candidate_day"][h]] + + m.state_of_charge_inter[ + s, network.cluster_ts["Candidate_day"][h] + ] * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs ### >= 0 ) def state_of_charge_lower_store(m, s, h): - # Choose datetime of representive day if self.args["snapshot_clustering"]["how"] == "weekly": hrs = 168 candidate = network.cluster_ts["Candidate_day"][h] - last_hour = network.cluster.loc[candidate]["last_hour_RepresentativeDay"] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] first_hour = last_hour - pd.DateOffset(hours=167) period_start = network.cluster_ts.index[0::168][candidate - 1] delta_t = h - period_start @@ -1830,7 +1993,9 @@ def state_of_charge_lower_store(m, s, h): elif self.args["snapshot_clustering"]["how"] == "monthly": hrs = 720 candidate = network.cluster_ts["Candidate_day"][h] - last_hour = network.cluster.loc[candidate]["last_hour_RepresentativeDay"] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] first_hour = last_hour - pd.DateOffset(hours=719) period_start = network.cluster_ts.index[0::720][candidate - 1] delta_t = h - period_start @@ -1847,13 +2012,18 @@ def state_of_charge_lower_store(m, s, h): intra_hour = pd.to_datetime(date + " " + hour) if "DSM" in s: - low = network.stores.e_nom[s] * network.stores_t.e_min_pu.at[intra_hour, s] + low = ( + network.stores.e_nom[s] + * network.stores_t.e_min_pu.at[intra_hour, s] + ) else: low = 0 return ( m.state_of_charge_intra_store[s, intra_hour] - + m.state_of_charge_inter_store[s, network.cluster_ts["Candidate_day"][h]] + + m.state_of_charge_inter_store[ + s, network.cluster_ts["Candidate_day"][h] + ] * (1 - network.stores.at[s, "standing_loss"]) ** hrs ### >= low ) @@ -1875,14 +2045,17 @@ def state_of_charge_lower_simplified(m, s, h): hrs = 24 # 0 return ( - m.state_of_charge_intra_min[s, network.cluster_ts["Candidate_day"][h]] - + m.state_of_charge_inter[s, network.cluster_ts["Candidate_day"][h]] + m.state_of_charge_intra_min[ + s, network.cluster_ts["Candidate_day"][h] + ] + + m.state_of_charge_inter[ + s, network.cluster_ts["Candidate_day"][h] + ] * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs ### >= 0 ) def state_of_charge_lower_store_simplified(m, s, h): - if self.args["snapshot_clustering"]["how"] == "weekly": hrs = 168 elif self.args["snapshot_clustering"]["how"] == "monthly": @@ -1918,20 +2091,29 @@ def state_of_charge_lower_store_simplified(m, s, h): ).split(" ")[0] hour = str(h).split(" ")[1] intra_hour = pd.to_datetime(date + " " + hour) - low = network.stores.e_nom[s] * network.stores_t.e_min_pu.at[intra_hour, s] + low = ( + network.stores.e_nom[s] + * network.stores_t.e_min_pu.at[intra_hour, s] + ) else: low = 0 return ( - m.state_of_charge_intra_store_min[s, network.cluster_ts["Candidate_day"][h]] - + m.state_of_charge_inter_store[s, network.cluster_ts["Candidate_day"][h]] + m.state_of_charge_intra_store_min[ + s, network.cluster_ts["Candidate_day"][h] + ] + + m.state_of_charge_inter_store[ + s, network.cluster_ts["Candidate_day"][h] + ] * (1 - network.stores.at[s, "standing_loss"]) ** hrs ### >= low ) if simplified: network.model.state_of_charge_lower = po.Constraint( - sus.index, network.cluster_ts.index, rule=state_of_charge_lower_simplified + sus.index, + network.cluster_ts.index, + rule=state_of_charge_lower_simplified, ) network.model.state_of_charge_lower_store = po.Constraint( sto.index, @@ -1944,7 +2126,9 @@ def state_of_charge_lower_store_simplified(m, s, h): sus.index, network.cluster_ts.index, rule=state_of_charge_lower ) network.model.state_of_charge_lower_store = po.Constraint( - sto.index, network.cluster_ts.index, rule=state_of_charge_lower_store + sto.index, + network.cluster_ts.index, + rule=state_of_charge_lower_store, ) network.model.del_component("state_of_charge_upper") @@ -1958,12 +2142,13 @@ def state_of_charge_lower_store_simplified(m, s, h): network.model.del_component("store_e_upper_index_1") def state_of_charge_upper(m, s, h): - # Choose datetime of representive day if self.args["snapshot_clustering"]["how"] == "weekly": hrs = 168 candidate = network.cluster_ts["Candidate_day"][h] - last_hour = network.cluster.loc[candidate]["last_hour_RepresentativeDay"] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] first_hour = last_hour - pd.DateOffset(hours=167) period_start = network.cluster_ts.index[0::168][candidate - 1] delta_t = h - period_start @@ -1971,7 +2156,9 @@ def state_of_charge_upper(m, s, h): elif self.args["snapshot_clustering"]["how"] == "monthly": hrs = 720 candidate = network.cluster_ts["Candidate_day"][h] - last_hour = network.cluster.loc[candidate]["last_hour_RepresentativeDay"] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] first_hour = last_hour - pd.DateOffset(hours=719) period_start = network.cluster_ts.index[0::720][candidate - 1] delta_t = h - period_start @@ -1994,18 +2181,21 @@ def state_of_charge_upper(m, s, h): return ( m.state_of_charge_intra[s, intra_hour] - + m.state_of_charge_inter[s, network.cluster_ts["Candidate_day"][h]] + + m.state_of_charge_inter[ + s, network.cluster_ts["Candidate_day"][h] + ] * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs ### <= p_nom * network.storage_units.at[s, "max_hours"] ) def state_of_charge_upper_store(m, s, h): - # Choose datetime of representive day if self.args["snapshot_clustering"]["how"] == "weekly": hrs = 168 candidate = network.cluster_ts["Candidate_day"][h] - last_hour = network.cluster.loc[candidate]["last_hour_RepresentativeDay"] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] first_hour = last_hour - pd.DateOffset(hours=167) period_start = network.cluster_ts.index[0::168][candidate - 1] delta_t = h - period_start @@ -2013,7 +2203,9 @@ def state_of_charge_upper_store(m, s, h): elif self.args["snapshot_clustering"]["how"] == "monthly": hrs = 720 candidate = network.cluster_ts["Candidate_day"][h] - last_hour = network.cluster.loc[candidate]["last_hour_RepresentativeDay"] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] first_hour = last_hour - pd.DateOffset(hours=719) period_start = network.cluster_ts.index[0::720][candidate - 1] delta_t = h - period_start @@ -2042,13 +2234,14 @@ def state_of_charge_upper_store(m, s, h): return ( m.state_of_charge_intra_store[s, intra_hour] - + m.state_of_charge_inter_store[s, network.cluster_ts["Candidate_day"][h]] + + m.state_of_charge_inter_store[ + s, network.cluster_ts["Candidate_day"][h] + ] * (1 - network.stores.at[s, "standing_loss"]) ** hrs ### <= e_nom ) def state_of_charge_upper_simplified(m, s, h): - if self.args["snapshot_clustering"]["how"] == "weekly": hrs = 168 elif self.args["snapshot_clustering"]["how"] == "monthly": @@ -2062,14 +2255,17 @@ def state_of_charge_upper_simplified(m, s, h): p_nom = network.storage_units.p_nom[s] return ( - m.state_of_charge_intra_max[s, network.cluster_ts["Candidate_day"][h]] - + m.state_of_charge_inter[s, network.cluster_ts["Candidate_day"][h]] + m.state_of_charge_intra_max[ + s, network.cluster_ts["Candidate_day"][h] + ] + + m.state_of_charge_inter[ + s, network.cluster_ts["Candidate_day"][h] + ] * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs ### <= p_nom * network.storage_units.at[s, "max_hours"] ) def state_of_charge_upper_store_simplified(m, s, h): - if self.args["snapshot_clustering"]["how"] == "weekly": hrs = 168 elif self.args["snapshot_clustering"]["how"] == "monthly": @@ -2087,7 +2283,9 @@ def state_of_charge_upper_store_simplified(m, s, h): "last_hour_RepresentativeDay" ] first_hour = last_hour - pd.DateOffset(hours=167) - period_start = network.cluster_ts.index[0::168][candidate - 1] + period_start = network.cluster_ts.index[0::168][ + candidate - 1 + ] delta_t = h - period_start intra_hour = first_hour + delta_t @@ -2097,7 +2295,9 @@ def state_of_charge_upper_store_simplified(m, s, h): "last_hour_RepresentativeDay" ] first_hour = last_hour - pd.DateOffset(hours=719) - period_start = network.cluster_ts.index[0::720][candidate - 1] + period_start = network.cluster_ts.index[0::720][ + candidate - 1 + ] delta_t = h - period_start intra_hour = first_hour + delta_t @@ -2105,7 +2305,9 @@ def state_of_charge_upper_store_simplified(m, s, h): date = str( network.snapshots[ network.snapshots.dayofyear - 1 - == network.cluster["RepresentativeDay"][h.dayofyear] + == network.cluster["RepresentativeDay"][ + h.dayofyear + ] ][0] ).split(" ")[0] hour = str(h).split(" ")[1] @@ -2119,16 +2321,21 @@ def state_of_charge_upper_store_simplified(m, s, h): e_nom = network.stores.e_nom[s] return ( - m.state_of_charge_intra_store_max[s, network.cluster_ts["Candidate_day"][h]] - + m.state_of_charge_inter_store[s, network.cluster_ts["Candidate_day"][h]] + m.state_of_charge_intra_store_max[ + s, network.cluster_ts["Candidate_day"][h] + ] + + m.state_of_charge_inter_store[ + s, network.cluster_ts["Candidate_day"][h] + ] * (1 - network.stores.at[s, "standing_loss"]) ** hrs ### <= e_nom ) if simplified: - network.model.state_of_charge_upper = po.Constraint( - sus.index, network.cluster_ts.index, rule=state_of_charge_upper_simplified + sus.index, + network.cluster_ts.index, + rule=state_of_charge_upper_simplified, ) network.model.state_of_charge_upper_store = po.Constraint( sto.index, @@ -2137,12 +2344,13 @@ def state_of_charge_upper_store_simplified(m, s, h): ) else: - network.model.state_of_charge_upper = po.Constraint( sus.index, network.cluster_ts.index, rule=state_of_charge_upper ) network.model.state_of_charge_upper_store = po.Constraint( - sto.index, network.cluster_ts.index, rule=state_of_charge_upper_store + sto.index, + network.cluster_ts.index, + rule=state_of_charge_upper_store, ) def cyclic_state_of_charge(m, s): @@ -2151,7 +2359,9 @@ def cyclic_state_of_charge(m, s): There are small differences to original results. """ last_day = network.cluster.index[-1] - last_calc_hour = network.cluster["last_hour_RepresentativeDay"][last_day] + last_calc_hour = network.cluster["last_hour_RepresentativeDay"][ + last_day + ] last_inter = m.state_of_charge_inter[s, last_day] last_intra = m.state_of_charge_intra[s, last_calc_hour] first_day = network.cluster.index[0] @@ -2181,9 +2391,10 @@ def cyclic_state_of_charge(m, s): ) def cyclic_state_of_charge_store(m, s): - last_day = network.cluster.index[-1] - last_calc_hour = network.cluster["last_hour_RepresentativeDay"][last_day] + last_calc_hour = network.cluster["last_hour_RepresentativeDay"][ + last_day + ] last_inter = m.state_of_charge_inter_store[s, last_day] last_intra = m.state_of_charge_intra_store[s, last_calc_hour] first_day = network.cluster.index[0] @@ -2202,7 +2413,8 @@ def cyclic_state_of_charge_store(m, s): first_intra = m.state_of_charge_intra_store[s, first_calc_hour] expr = first_intra + first_inter == ( - (last_intra + last_inter) * (1 - network.stores.at[s, "standing_loss"]) + (last_intra + last_inter) + * (1 - network.stores.at[s, "standing_loss"]) + m.store_p[s, last_calc_hour] ) @@ -2217,6 +2429,27 @@ def cyclic_state_of_charge_store(m, s): def snapshot_clustering_seasonal_storage_hourly(self, network, snapshots): + """ + Depicts intertemporal dependencies of storage units and stores when using + snapshot clustering to typical periods for temporal complexity reduction. + + According to: + L. Kotzur et al: 'Time series aggregation for energy + system design: + Modeling seasonal storage', 2018 + + Parameters + ---------- + network : :class:`pypsa.Network` + Overall container of PyPSA + snapshots : list + A list of datetime objects representing the timestamps of the snapshots + to be clustered. + + Returns + ------- + None + """ # TODO: updaten mit stores (Sektorkopplung) @@ -2238,7 +2471,6 @@ def snapshot_clustering_seasonal_storage_hourly(self, network, snapshots): network.model.storages = network.storage_units.index def set_soc_all(m, s, h): - if h == self.args["start_snapshot"]: prev = ( network.cluster.index.get_level_values(0)[-1] @@ -2249,11 +2481,13 @@ def set_soc_all(m, s, h): else: prev = h - 1 - cluster_hour = network.cluster["Hour"][h + 1 - self.args["start_snapshot"]] + cluster_hour = network.cluster["Hour"][ + h + 1 - self.args["start_snapshot"] + ] - expr = m.state_of_charge_all[s, h] == m.state_of_charge_all[s, prev] * ( - 1 - network.storage_units.at[s, "standing_loss"] - ) - ( + expr = m.state_of_charge_all[s, h] == m.state_of_charge_all[ + s, prev + ] * (1 - network.storage_units.at[s, "standing_loss"]) - ( m.storage_p_dispatch[s, cluster_hour] / network.storage_units.at[s, "efficiency_dispatch"] - network.storage_units.at[s, "efficiency_store"] @@ -2268,7 +2502,6 @@ def set_soc_all(m, s, h): ) def soc_equals_soc_all(m, s, h): - hour = (h.dayofyear - 1) * 24 + h.hour return m.state_of_charge_all[s, hour] == m.state_of_charge[s, h] @@ -2283,7 +2516,6 @@ def soc_equals_soc_all(m, s, h): network.model.del_component("state_of_charge_upper_index_1") def state_of_charge_upper(m, s, h): - if network.storage_units.p_nom_extendable[s]: p_nom = m.storage_p_nom[s] else: @@ -2302,6 +2534,30 @@ def state_of_charge_upper(m, s, h): def snapshot_clustering_seasonal_storage_nmp(self, n, sns, simplified=False): + """ + Depicts intertemporal dependencies of storage units and stores when using + snapshot clustering to typical periods for temporal complexity reduction. + + According to: + L. Kotzur et al: 'Time series aggregation for energy + system design: + Modeling seasonal storage', 2018 + + Parameters + ---------- + n : :class:`pypsa.Network` + Overall container of PyPSA + sns : list + A list of datetime objects representing the timestamps of the snapshots + to be clustered. + simplified : bool, optional + A flag indicating whether to use a simplified version of the model that + does not include intra-temporal constraints and variables. + + Returns + ------- + None + """ # TODO: so noch nicht korrekt... # TODO: updaten mit stores (Sektorkopplung) @@ -2367,7 +2623,12 @@ def snapshot_clustering_seasonal_storage_nmp(self, n, sns, simplified=False): coeff_var = [ (-1, soc_total), (1, soc_intra), - (1, soc_inter.loc[n.cluster_ts.loc[sns, "Candidate_day"]].set_index(sns)), + ( + 1, + soc_inter.loc[n.cluster_ts.loc[sns, "Candidate_day"]].set_index( + sns + ), + ), ] lhs, *axes = linexpr(*coeff_var, return_axes=True) @@ -2375,6 +2636,91 @@ def snapshot_clustering_seasonal_storage_nmp(self, n, sns, simplified=False): def snapshot_clustering_seasonal_storage_hourly_nmp(self, n, sns): + """ + Depicts intertemporal dependencies of storage units and stores when using + snapshot clustering to typical periods for temporal complexity reduction. + + According to: + L. Kotzur et al: 'Time series aggregation for energy + system design: + Modeling seasonal storage', 2018 + + Parameters + ---------- + n : :class:`pypsa.Network` + Overall container of PyPSA + sns : list + A list of datetime objects representing the timestamps of the snapshots + to be clustered. + + Returns + ------- + None + """ + + print("TODO") + + # TODO: implementieren + + +def split_dispatch_disaggregation_constraints(self, n, sns): + """ + Add constraints for state of charge of storage units and stores + when separating the optimization into smaller subproblems + while conducting thedispatch_disaggregation in temporally fully resolved network + + The state of charge at the end of each slice is set to the value + calculated in the optimization with the temporally reduced network + to account to ensure compatibility and to reproduce saisonality + + Parameters + ---------- + network : :class:`pypsa.Network` + Overall container of PyPSA + snapshots : pandas.DatetimeIndex + List of timesteps considered in the optimization + + Returns + ------- + None. + """ + tsa_hour = sns[sns.isin(self.conduct_dispatch_disaggregation.index)] + if len(tsa_hour) > 1: + tsa_hour = tsa_hour[-1] + else: + tsa_hour = tsa_hour[0] + n.model.soc_values = self.conduct_dispatch_disaggregation.loc[tsa_hour] + + sus = n.storage_units.index + # for stores, exclude emob and dsm because of their special constraints + sto = n.stores[ + ~n.stores.carrier.isin(["battery storage", "battery_storage", "dsm"]) + ].index + + def disaggregation_sus_soc(m, s, h): + """ + Sets soc at the end of the time slice in disptach_disaggregation + to value calculated in temporally reduced lopf without slices. + """ + return m.state_of_charge[s, h] == m.soc_values[s] + + n.model.split_dispatch_sus_soc = po.Constraint( + sus, sns[-1:], rule=disaggregation_sus_soc + ) + + def disaggregation_sto_soc(m, s, h): + """ + Sets soc at the end of the time slice in disptach_disaggregation + to value calculated in temporally reduced lopf without slices. + """ + return m.store_e[s, h] == m.soc_values[s] + + n.model.split_dispatch_sto_soc = po.Constraint( + sto, sns[-1:], rule=disaggregation_sto_soc + ) + + +def split_dispatch_disaggregation_constraints_nmp(self, n, sns): print("TODO") @@ -2382,8 +2728,9 @@ def snapshot_clustering_seasonal_storage_hourly_nmp(self, n, sns): class Constraints: - def __init__(self, args): + def __init__(self, args, conduct_dispatch_disaggregation): self.args = args + self.conduct_dispatch_disaggregation = conduct_dispatch_disaggregation def functionality(self, network, snapshots): """Add constraints to pypsa-model using extra-functionality. @@ -2392,25 +2739,30 @@ def functionality(self, network, snapshots): Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA snapshots : pandas.DatetimeIndex - List of timesteps considered in the optimization + List of timesteps considered in the optimization """ - if self.args["method"]["pyomo"]: - add_chp_constraints(network, snapshots) - add_ch4_constraints(self, network, snapshots) - else: - add_chp_constraints_nmp(network) - add_ch4_constraints_nmp(self, network, snapshots) + if "CH4" in network.buses.carrier.values: + if self.args["method"]["pyomo"]: + add_chp_constraints(network, snapshots) + if self.args["scn_name"] != "status2019": + add_ch4_constraints(self, network, snapshots) + else: + add_chp_constraints_nmp(network) + if self.args["scn_name"] != "status2019": + add_ch4_constraints_nmp(self, network, snapshots) for constraint in self.args["extra_functionality"].keys(): try: type(network.model) try: eval("_" + constraint + "(self, network, snapshots)") - logger.info("Added extra_functionality {}".format(constraint)) + logger.info( + "Added extra_functionality {}".format(constraint) + ) except: logger.warning( "Constraint {} not defined".format(constraint) @@ -2421,33 +2773,35 @@ def functionality(self, network, snapshots): try: eval("_" + constraint + "_nmp(self, network, snapshots)") logger.info( - "Added extra_functionality {} without pyomo".format(constraint) + "Added extra_functionality {} without pyomo".format( + constraint + ) ) except: - logger.warning("Constraint {} not defined".format(constraint)) + logger.warning( + "Constraint {} not defined".format(constraint) + ) if ( self.args["snapshot_clustering"]["active"] and self.args["snapshot_clustering"]["method"] == "typical_periods" ): - if ( self.args["snapshot_clustering"]["storage_constraints"] == "daily_bounds" ): - if self.args["method"]["pyomo"]: snapshot_clustering_daily_bounds(self, network, snapshots) else: - snapshot_clustering_daily_bounds_nmp(self, network, snapshots) + snapshot_clustering_daily_bounds_nmp( + self, network, snapshots + ) elif ( self.args["snapshot_clustering"]["storage_constraints"] == "soc_constraints" ): - if self.args["snapshot_clustering"]["how"] == "hourly": - if self.args["method"]["pyomo"]: snapshot_clustering_seasonal_storage_hourly( self, network, snapshots @@ -2457,9 +2811,10 @@ def functionality(self, network, snapshots): self, network, snapshots ) else: - if self.args["method"]["pyomo"]: - snapshot_clustering_seasonal_storage(self, network, snapshots) + snapshot_clustering_seasonal_storage( + self, network, snapshots + ) else: snapshot_clustering_seasonal_storage_nmp( self, network, snapshots @@ -2469,9 +2824,7 @@ def functionality(self, network, snapshots): self.args["snapshot_clustering"]["storage_constraints"] == "soc_constraints_simplified" ): - if self.args["snapshot_clustering"]["how"] == "hourly": - logger.info( "soc_constraints_simplified not possible while hourly clustering -> changed to soc_constraints" ) @@ -2500,6 +2853,16 @@ def functionality(self, network, snapshots): + " [daily_bounds, soc_constraints, soc_constraints_simplified]" ) + if self.conduct_dispatch_disaggregation is not False: + if self.args["method"]["pyomo"]: + split_dispatch_disaggregation_constraints( + self, network, snapshots + ) + else: + split_dispatch_disaggregation_constraints_nmp( + self, network, snapshots + ) + def add_chp_constraints_nmp(n): """ @@ -2537,7 +2900,6 @@ def add_chp_constraints_nmp(n): ].index.unique() for i in ch4_nodes_with_chp: - elec_chp = n.links[ (n.links.carrier == "central_gas_CHP") & (n.links.bus0 == i) ].index @@ -2550,16 +2912,20 @@ def add_chp_constraints_nmp(n): # backpressure lhs_1 = sum( - c_m * n.links.at[h_chp, "efficiency"] * link_p[h_chp] for h_chp in heat_chp + c_m * n.links.at[h_chp, "efficiency"] * link_p[h_chp] + for h_chp in heat_chp ) lhs_2 = sum( - n.links.at[e_chp, "efficiency"] * link_p[e_chp] for e_chp in elec_chp + n.links.at[e_chp, "efficiency"] * link_p[e_chp] + for e_chp in elec_chp ) lhs = linexpr((1, lhs_1), (1, lhs_2)) - define_constraints(n, lhs, "<=", 0, "chplink_" + str(i), "backpressure") + define_constraints( + n, lhs, "<=", 0, "chplink_" + str(i), "backpressure" + ) # top_iso_fuel_line lhs, *ax = linexpr( @@ -2618,9 +2984,9 @@ def add_chp_constraints(network, snapshots): ].index.unique() for i in ch4_nodes_with_chp: - elec_chp = network.links[ - (network.links.carrier == "central_gas_CHP") & (network.links.bus0 == i) + (network.links.carrier == "central_gas_CHP") + & (network.links.bus0 == i) ].index heat_chp = network.links[ @@ -2638,7 +3004,8 @@ def backpressure(model, snapshot): ) rhs = sum( - network.links.at[e_chp, "efficiency"] * model.link_p[e_chp, snapshot] + network.links.at[e_chp, "efficiency"] + * model.link_p[e_chp, snapshot] for e_chp in elec_chp ) @@ -2652,13 +3019,13 @@ def backpressure(model, snapshot): # Guarantees p_g1 +c_v p_b1 \leq p_g1_nom def top_iso_fuel_line(model, snapshot): - - lhs = sum(model.link_p[h_chp, snapshot] for h_chp in heat_chp) + sum( - model.link_p[e_chp, snapshot] for e_chp in elec_chp - ) + lhs = sum( + model.link_p[h_chp, snapshot] for h_chp in heat_chp + ) + sum(model.link_p[e_chp, snapshot] for e_chp in elec_chp) rhs = network.links[ - (network.links.carrier == "central_gas_CHP") & (network.links.bus0 == i) + (network.links.carrier == "central_gas_CHP") + & (network.links.bus0 == i) ].p_nom.sum() return lhs <= rhs diff --git a/etrago/tools/execute.py b/etrago/tools/execute.py index 0e8c688cc..f7a13412a 100755 --- a/etrago/tools/execute.py +++ b/etrago/tools/execute.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -19,7 +19,7 @@ # File description """ -execute.py defines optimization and simulation methods for Etrago object. +execute.py defines optimization and simulation methods for the etrago object. """ import os @@ -45,23 +45,30 @@ "DLR-Institute for Networked Energy Systems" ) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, s3pp, wolfbunke, mariusves, lukasol" +__author__ = ( + "ulfmueller, s3pp, wolfbunke, mariusves, lukasol, KathiEsterl, " + "ClaraBuettner, CarlosEpia, AmeliaNadal" +) def update_electrical_parameters(network, l_snom_pre, t_snom_pre): - """ Update electrical parameters of active branch components - considering s_nom of previous iteration + considering s_nom of previous iteration. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. l_snom_pre: pandas.Series - s_nom of ac-lines in previous iteration + s_nom of ac-lines in previous iteration. t_snom_pre: pandas.Series - s_nom of transformers in previous iteration + s_nom of transformers in previous iteration. + + Returns + ------- + None. + """ network.lines.x[network.lines.s_nom_extendable] = ( @@ -103,22 +110,20 @@ def update_electrical_parameters(network, l_snom_pre, t_snom_pre): return l_snom_pre, t_snom_pre -def run_lopf( - etrago, extra_functionality, method, dispatch_disaggregation=False -): - """Function that performs lopf with or without pyomo - +def run_lopf(etrago, extra_functionality, method): + """ + Function that performs lopf with or without pyomo Parameters ---------- - etrago : :class:`etrago.Etrago - Transmission grid object - extra_functionality: str + etrago : etrago object + eTraGo containing all network information and a PyPSA network. + extra_functionality: dict Define extra constranits. method: dict Choose 'n_iter' and integer for fixed number of iterations or 'threshold' and derivation of objective in percent for variable number - of iteration until the threshold of the objective function is reached + of iteration until the threshold of the objective function is reached. Returns ------- @@ -128,35 +133,70 @@ def run_lopf( x = time.time() - if dispatch_disaggregation: + if etrago.conduct_dispatch_disaggregation is not False: + # parameters defining the start and end per slices + no_slices = etrago.args["temporal_disaggregation"]["no_slices"] + skipped = etrago.network.snapshot_weightings.iloc[0].objective + transits = np.where( + etrago.network_tsa.snapshots.isin( + etrago.conduct_dispatch_disaggregation.index + ) + )[0] if method["pyomo"]: - etrago.network_tsa.lopf( - etrago.network_tsa.snapshots, - solver_name=etrago.args["solver"], - solver_options=etrago.args["solver_options"], - pyomo=True, - extra_functionality=extra_functionality, - formulation=etrago.args["model_formulation"], - ) + # repeat the optimization for all slices + for i in range(0, no_slices): + # keep information on the initial state of charge for the respectng slice + initial = transits[i - 1] + soc_initial = etrago.conduct_dispatch_disaggregation.loc[ + [etrago.network_tsa.snapshots[initial]] + ].transpose() + etrago.network_tsa.storage_units.state_of_charge_initial = ( + soc_initial + ) + etrago.network_tsa.stores.e_initial = soc_initial + etrago.network_tsa.stores.e_initial.fillna(0, inplace=True) + # the state of charge at the end of each slice is set within + # split_dispatch_disaggregation_constraints in constraints.py + + # adapt start and end snapshot of respecting slice + start = transits[i - 1] + skipped + end = transits[i] + (skipped - 1) + if i == 0: + start = 0 + if i == no_slices - 1: + end = len(etrago.network_tsa.snapshots) + + etrago.network_tsa.lopf( + etrago.network_tsa.snapshots[start : end + 1], + solver_name=etrago.args["solver"], + solver_options=etrago.args["solver_options"], + pyomo=True, + extra_functionality=extra_functionality, + formulation=etrago.args["model_formulation"], + ) - if etrago.network.results["Solver"][0]["Status"] != "ok": - raise Exception("LOPF not solved.") + if etrago.network_tsa.results["Solver"][0]["Status"] != "ok": + raise Exception("LOPF not solved.") else: - status, termination_condition = network_lopf( - etrago.network_tsa, - solver_name=etrago.args["solver"], - solver_options=etrago.args["solver_options"], - extra_functionality=extra_functionality, - formulation=etrago.args["model_formulation"], - ) + for i in range(0, no_slices): + status, termination_condition = network_lopf( + etrago.network_tsa, + etrago.network_tsa.snapshots[start : end + 1], + solver_name=etrago.args["solver"], + solver_options=etrago.args["solver_options"], + extra_functionality=extra_functionality, + formulation=etrago.args["model_formulation"], + ) - if status != "ok": - raise Exception("LOPF not solved.") + if status != "ok": + raise Exception("LOPF not solved.") - else: + etrago.network_tsa.storage_units.state_of_charge_initial = 0 + etrago.network_tsa.stores.e_initial = 0 + else: if method["pyomo"]: etrago.network.lopf( etrago.network.snapshots, @@ -192,33 +232,48 @@ def iterate_lopf( etrago, extra_functionality, method={"n_iter": 4, "pyomo": True}, - dispatch_disaggregation=False, ): - """ Run optimization of lopf. If network extension is included, the specified number of iterations is calculated to consider reactance changes. Parameters ---------- - etrago : :class:`etrago.Etrago - Transmission grid object - extra_functionality: str + etrago : etrago object + eTraGo containing all network information and a PyPSA network. + extra_functionality: dict Define extra constranits. method: dict Choose 'n_iter' and integer for fixed number of iterations or 'threshold' and derivation of objective in percent for variable number - of iteration until the threshold of the objective function is reached + of iteration until the threshold of the objective function is reached. """ args = etrago.args + path = args["csv_export"] + lp_path = args["lpfile"] + + if ( + args["temporal_disaggregation"]["active"] is True + and etrago.conduct_dispatch_disaggregation is False + ): + if not args["csv_export"] is False: + path = path + "/temporally_reduced" - if dispatch_disaggregation: + if not args["lpfile"] is False: + lp_path = lp_path[0:-3] + "_temporally_reduced.lp" + + if etrago.conduct_dispatch_disaggregation is not False: + if not args["lpfile"] is False: + lp_path = lp_path[0:-3] + "_dispatch_disaggregation.lp" etrago.network_tsa.lines["s_nom"] = etrago.network.lines["s_nom_opt"] etrago.network_tsa.lines["s_nom_extendable"] = False + etrago.network_tsa.links["p_nom"] = etrago.network.links["p_nom_opt"] + etrago.network_tsa.links["p_nom_extendable"] = False + etrago.network_tsa.transformers["s_nom"] = etrago.network.transformers[ "s_nom_opt" ] @@ -232,33 +287,21 @@ def iterate_lopf( etrago.network_tsa.stores["e_nom"] = etrago.network.stores["e_nom_opt"] etrago.network_tsa.stores["e_nom_extendable"] = False - etrago.network_tsa.links["p_nom"] = etrago.network.links["p_nom_opt"] - etrago.network_tsa.links["p_nom_extendable"] = False + etrago.network_tsa.storage_units.cyclic_state_of_charge = False + etrago.network_tsa.stores.e_cyclic = False args["snapshot_clustering"]["active"] = False args["skip_snapshots"] = False args["extendable"] = [] - if args["csv_export"] != False: - args["csv_export"] = ( - args["csv_export"] + "/dispatch_disaggregation" - ) - - if not args["lpfile"] is False: - args["lpfile"] = ( - args["lpfile"][0:-3] + "_dispatch_disaggregation.lp" - ) - network = etrago.network_tsa else: - network = etrago.network # if network is extendable, iterate lopf # to include changes of electrical parameters if network.lines.s_nom_extendable.any(): - # Initialise s_nom_pre (s_nom_opt of previous iteration) # to s_nom for first lopf: l_snom_pre = network.lines.s_nom.copy() @@ -269,12 +312,11 @@ def iterate_lopf( n_iter = method["n_iter"] for i in range(1, (1 + n_iter)): - run_lopf(etrago, extra_functionality, method) if args["csv_export"] != False: - path = args["csv_export"] + "/lopf_iteration_" + str(i) - etrago.export_to_csv(path) + path_it = path + "/lopf_iteration_" + str(i) + etrago.export_to_csv(path_it) if i < n_iter: l_snom_pre, t_snom_pre = update_electrical_parameters( @@ -285,7 +327,6 @@ def iterate_lopf( # function is reached if "threshold" in method: - run_lopf(etrago, extra_functionality, method) diff_obj = network.objective * method["threshold"] / 100 @@ -294,7 +335,6 @@ def iterate_lopf( # Stop after 100 iterations to aviod unending loop while i <= 100: - if i == 100: print("Maximum number of iterations reached.") break @@ -309,32 +349,25 @@ def iterate_lopf( i += 1 if args["csv_export"] != False: - path = args["csv_export"] + "/lopf_iteration_" + str(i) - etrago.export_to_csv(path) + path_it = path + "/lopf_iteration_" + str(i) + etrago.export_to_csv(path_it) if abs(pre - network.objective) <= diff_obj: print("Threshold reached after " + str(i) + " iterations.") break else: - run_lopf( - etrago, extra_functionality, method, dispatch_disaggregation=True - ) - - if args["csv_export"] != False: - path = args["csv_export"] - etrago.export_to_csv(path) + run_lopf(etrago, extra_functionality, method) if not args["lpfile"] is False: - network.model.write( - args["lpfile"], io_options={"symbolic_solver_labels": True} - ) + network.model.write(lp_path) return network def lopf(self): - """Functions that runs lopf accordning to arguments + """ + Functions that runs lopf according to arguments. Returns ------- @@ -344,35 +377,116 @@ def lopf(self): x = time.time() + self.conduct_dispatch_disaggregation = False + iterate_lopf( - self, Constraints(self.args).functionality, method=self.args["method"] + self, + Constraints( + self.args, self.conduct_dispatch_disaggregation + ).functionality, + method=self.args["method"], ) y = time.time() z = (y - x) / 60 logger.info("Time for LOPF [min]: {}".format(round(z, 2))) + if self.args["csv_export"] != False: + path = self.args["csv_export"] + if self.args["temporal_disaggregation"]["active"] is True: + path = path + "/temporally_reduced" + self.export_to_csv(path) + def dispatch_disaggregation(self): + """ + Function running the tempral disaggregation meaning the optimization + of dispatch in the temporally fully resolved network; therfore, the problem + is reduced to smaller subproblems by slicing the whole considered time span + while keeping inforation on the state of charge of storage units and stores + to ensure compatibility and to reproduce saisonality. - if self.args["dispatch_disaggregation"] == True: + Returns + ------- + None. + + """ + if self.args["temporal_disaggregation"]["active"]: x = time.time() + if self.args["temporal_disaggregation"]["no_slices"]: + # split dispatch_disaggregation into subproblems + # keep some information on soc in beginning and end of slices + # to ensure compatibility and to reproduce saisonality + + # define number of slices and corresponding slice length + no_slices = self.args["temporal_disaggregation"]["no_slices"] + slice_len = int(len(self.network.snapshots) / no_slices) + # transition snapshots defining start and end of slices + transits = self.network.snapshots[0::slice_len] + if len(transits) > 1: + transits = transits[1:] + if transits[-1] != self.network_tsa.snapshots[-1]: + transits = transits.insert( + (len(transits)), self.network.snapshots[-1] + ) + # for stores, exclude emob and dsm because of their special constraints + sto = self.network.stores[ + ~self.network.stores.carrier.isin( + ["battery_storage", "battery storage", "dsm"] + ) + ] + + # save state of charge of storage units and stores at those transition snapshots + self.conduct_dispatch_disaggregation = pd.DataFrame( + columns=self.network.storage_units.index.append(sto.index), + index=transits, + ) + for storage in self.network.storage_units.index: + self.conduct_dispatch_disaggregation[ + storage + ] = self.network.storage_units_t.state_of_charge[storage] + for store in sto.index: + self.conduct_dispatch_disaggregation[ + store + ] = self.network.stores_t.e[store] + + extra_func = self.args["extra_functionality"] + self.args["extra_functionality"] = {} + + load_shedding = self.args["load_shedding"] + if not load_shedding: + self.args["load_shedding"] = True + self.load_shedding(temporal_disaggregation=True) + iterate_lopf( self, - Constraints(self.args).functionality, + Constraints( + self.args, self.conduct_dispatch_disaggregation + ).functionality, method=self.args["method"], - dispatch_disaggregation=True, ) + # switch to temporally fully resolved network as standard network, + # temporally reduced network is stored in network_tsa network1 = self.network.copy() self.network = self.network_tsa.copy() self.network_tsa = network1.copy() + network1 = 0 + + # keep original settings + + if self.args["temporal_disaggregation"]["no_slices"]: + self.args["extra_functionality"] = extra_func + self.args["load_shedding"] = load_shedding self.network.lines["s_nom_extendable"] = self.network_tsa.lines[ "s_nom_extendable" ] + self.network.links["p_nom_extendable"] = self.network_tsa.links[ + "p_nom_extendable" + ] self.network.transformers.s_nom_extendable = ( self.network_tsa.transformers.s_nom_extendable ) @@ -382,9 +496,15 @@ def dispatch_disaggregation(self): self.network.stores["e_nom_extendable"] = self.network_tsa.stores[ "e_nom_extendable" ] - self.network.links["p_nom_extendable"] = self.network_tsa.links[ - "p_nom_extendable" - ] + self.network.storage_units.cyclic_state_of_charge = ( + self.network_tsa.storage_units.cyclic_state_of_charge + ) + self.network.stores.e_cyclic = self.network_tsa.stores.e_cyclic + + if self.args["csv_export"] != False: + path = self.args["csv_export"] + self.export_to_csv(path) + self.export_to_csv(path + "/temporal_disaggregaton") y = time.time() z = (y - x) / 60 @@ -392,7 +512,8 @@ def dispatch_disaggregation(self): def run_pf_post_lopf(self): - """Functions that runs pf_post_lopf accordning to arguments + """ + Function that runs pf_post_lopf according to arguments. Returns ------- @@ -401,16 +522,12 @@ def run_pf_post_lopf(self): """ if self.args["pf_post_lopf"]["active"]: - pf_post_lopf(self) def pf_post_lopf(etrago, calc_losses=False): - """ Function that prepares and runs non-linar load flow using PyPSA pf. - - If crossborder lines are DC-links, pf is only applied on german network. Crossborder flows are still considerd due to the active behavior of links. To return a network containing the whole grid, the optimised solution of the @@ -418,11 +535,11 @@ def pf_post_lopf(etrago, calc_losses=False): Parameters ---------- - etrago : :class:`etrago.Etrago - Transmission grid object + etrago : etrago object + eTraGo containing all network information and a PyPSA network. add_foreign_lopf: boolean Choose if foreign results of lopf should be added to the network when - foreign lines are DC + foreign lines are DC. q_allocation: str Choose allocation of reactive power. Possible settings are listed in distribute_q function. @@ -437,19 +554,19 @@ def pf_post_lopf(etrago, calc_losses=False): def drop_foreign_components(network): """ Function that drops foreign components which are only connected via - DC-links and saves their optimization results in pandas.DataFrame - + DC-links and saves their optimization results in pd.DataFrame. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. Returns ------- None. """ + # Create series for constant loads constant_loads = network.loads[network.loads.p_set != 0]["p_set"] for load in constant_loads.index: @@ -544,9 +661,20 @@ def drop_foreign_components(network): def import_gen_from_links(network): """ - create gas generators from links in order to not lose them when - dropping non-electric carriers + Creates gas generators from links in order to not lose them when + dropping non-electric carriers. + + Parameters + ---------- + network : pypsa.Network object + Container for all network components. + + Returns + ------- + None. + """ + # Discard all generators < 1kW discard_gen = network.links[network.links["p_nom"] <= 0.001].index network.links.drop(discard_gen, inplace=True) @@ -695,6 +823,11 @@ def import_gen_from_links(network): network.generators.carrier == "load shedding" ] = "PQ" + # Assign storage units control strategy + network.storage_units.control[ + network.storage_units.bus.isin(ac_bus.index) + ] = "PV" + # Find out the name of the main subnetwork main_subnet = str(network.buses.sub_network.value_counts().argmax()) @@ -773,24 +906,25 @@ def import_gen_from_links(network): def distribute_q(network, allocation="p_nom"): - - """Function that distributes reactive power at bus to all installed + """ + Function that distributes reactive power at bus to all installed generators and storages. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. allocation: str Choose key to distribute reactive power: 'p_nom' to dirstribute via p_nom - 'p' to distribute via p_set + 'p' to distribute via p_set. Returns ------- - + None. """ + ac_bus = network.buses[network.buses.carrier == "AC"] gen_elec = network.generators[ @@ -907,19 +1041,19 @@ def distribute_q(network, allocation="p_nom"): def calc_line_losses(network, converged): - """Calculate losses per line with PF result data + """ + Calculate losses per line with PF result data. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA - converged : pandas.series - list of snapshots with their status (converged or not) - s0 : series - apparent power of line - i0 : series - current of line + network : pypsa.Network object + Container for all network components. + converged : pd.Series + List of snapshots with their status (converged or not). + + Returns ------- + None. """ # Line losses @@ -973,25 +1107,21 @@ def calc_line_losses(network, converged): f"Note: {(~converged).sum()} snapshot(s) was/were excluded " + "because the PF did not converge" ) - return def set_slack(network): - """ - Function that chosses the bus with the maximum installed power as slack + Function that chosses the bus with the maximum installed power as slack. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. Returns ------- - network : :class:`pypsa.Network - Overall container of PyPSA - - + network : pypsa.Network object + Container for all network components. """ diff --git a/etrago/tools/extendable.py b/etrago/tools/extendable.py index 9707ca341..6bb566585 100644 --- a/etrago/tools/extendable.py +++ b/etrago/tools/extendable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -19,7 +19,7 @@ # File description """ -Extendable.py defines function to set PyPSA-components extendable. +Extendable.py defines function to set PyPSA components extendable. """ from etrago.tools.utilities import convert_capital_costs, find_snapshots @@ -39,7 +39,7 @@ "DLR-Institute for Networked Energy Systems" ) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, s3pp, wolfbunke, mariusves, lukasol" +__author__ = "ulfmueller, s3pp, wolfbunke, mariusves, lukasol, ClaraBuettner, KathiEsterl, CarlosEpia" def extendable( @@ -54,30 +54,26 @@ def extendable( grid_max_foreign=4, grid_max_abs_foreign=None, ): - """ - Function that sets selected components extendable - - 'network' for all lines, links and transformers - 'german_network' for all lines, links and transformers located in Germany - 'foreign_network' for all foreign lines, links and transformers - 'transformers' for all transformers - 'storages' for extendable storages - 'overlay_network' for lines, links and trafos in extension scenerio(s) + Function that sets selected components extendable. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA - args : dict - Arguments set in appl.py - + grid_max_D : int, optional + Upper bounds for electrical grid expansion relative to existing capacity. The default is None. + grid_max_abs_D : dict, optional + Absolute upper bounds for electrical grid expansion in Germany. + grid_max_foreign : int, optional + Upper bounds for expansion of electrical foreign lines relative to the existing capacity. The default is 4. + grid_max_abs_foreign : dict, optional + Absolute upper bounds for expansion of foreign electrical grid. The default is None. Returns ------- - network : :class:`pypsa.Network - Overall container of PyPSA + None. + """ + network = self.network extendable_settings = self.args["extendable"] @@ -99,11 +95,15 @@ def extendable( network.transformers.s_nom_max = float("inf") if not network.links.empty: - network.links.loc[network.links.carrier == "DC", "p_nom_extendable"] = True + network.links.loc[ + network.links.carrier == "DC", "p_nom_extendable" + ] = True network.links.loc[ network.links.carrier == "DC", "p_nom_min" ] = network.links.p_nom - network.links.loc[network.links.carrier == "DC", "p_nom_max"] = float("inf") + network.links.loc[ + network.links.carrier == "DC", "p_nom_max" + ] = float("inf") if "german_network" in extendable_settings["extendable_components"]: buses = network.buses[network.buses.country == "DE"] @@ -157,15 +157,18 @@ def extendable( if "foreign_network" in extendable_settings["extendable_components"]: buses = network.buses[network.buses.country != "DE"] network.lines.loc[ - network.lines.bus0.isin(buses.index) | network.lines.bus1.isin(buses.index), + network.lines.bus0.isin(buses.index) + | network.lines.bus1.isin(buses.index), "s_nom_extendable", ] = True network.lines.loc[ - network.lines.bus0.isin(buses.index) | network.lines.bus1.isin(buses.index), + network.lines.bus0.isin(buses.index) + | network.lines.bus1.isin(buses.index), "s_nom_min", ] = network.lines.s_nom network.lines.loc[ - network.lines.bus0.isin(buses.index) | network.lines.bus1.isin(buses.index), + network.lines.bus0.isin(buses.index) + | network.lines.bus1.isin(buses.index), "s_nom_max", ] = float("inf") @@ -272,14 +275,14 @@ def extendable( ): for c in ext_stores: network.stores.loc[ - (network.stores.carrier == c) & (network.stores.capital_cost == 0), + (network.stores.carrier == c) + & (network.stores.capital_cost == 0), "capital_cost", ] = network.stores.loc[ (network.stores.carrier == c), "capital_cost" ].max() if "foreign_storage" in extendable_settings["extendable_components"]: - foreign_battery = network.storage_units[ ( network.storage_units.bus.isin( @@ -304,9 +307,9 @@ def extendable( foreign_battery, "p_nom_max" ] = network.storage_units.loc[foreign_battery, "p_nom"] - network.storage_units.loc[foreign_battery, "p_nom"] = network.storage_units.loc[ - foreign_battery, "p_nom_min" - ] + network.storage_units.loc[ + foreign_battery, "p_nom" + ] = network.storage_units.loc[foreign_battery, "p_nom_min"] network.storage_units.loc[ foreign_battery, "capital_cost" @@ -330,7 +333,8 @@ def extendable( == ("extension_" + self.args["scn_extension"][i]), "s_nom_max", ] = network.lines.s_nom[ - network.lines.scn_name == ("extension_" + self.args["scn_extension"][i]) + network.lines.scn_name + == ("extension_" + self.args["scn_extension"][i]) ] network.links.loc[ @@ -449,10 +453,28 @@ def extendable( grid_max_foreign * network.transformers.s_nom ) - return network - def snommax(i=1020, u=380, wires=4, circuits=4): + """ + Function to calculate limitation for capacity expansion. + + Parameters + ---------- + i : int, optional + Current. The default is 1020. + u : int, optional + Voltage level. The default is 380. + wires : int, optional + Number of wires per line. The default is 4. + circuits : int, optional + Number of circuits. The default is 4. + + Returns + ------- + s_nom_max : float + Limitation for capacity expansion. + + """ s_nom_max = (i * u * sqrt(3) * wires * circuits) / 1000 return s_nom_max @@ -467,6 +489,23 @@ def line_max_abs( "dc": 0, }, ): + """ + Function to calculate limitation for capacity expansion of lines in network. + + Parameters + ---------- + network : pypsa.Network object + Container for all network components. + buses : pypsa.Network buses + Considered buses in network. + line_max_abs : dict, optional + Line parameters considered to calculate maximum capacity. + + Returns + ------- + None. + + """ # calculate the cables of the route between two buses cables = network.lines.groupby(["bus0", "bus1"]).cables.sum() cables2 = network.lines.groupby(["bus1", "bus0"]).cables.sum() @@ -547,6 +586,21 @@ def line_max_abs( def transformer_max_abs(network, buses): + """ + Function to calculate limitation for capacity expansion of transformers in network. + + Parameters + ---------- + network : pypsa.Network object + Container for all network components. + buses : pypsa.Network buses + Considered buses in network. + + Returns + ------- + None. + + """ # To determine the maximum extendable capacity of a transformer, the sum of # the maximum capacities of the lines connected to it is calculated for each @@ -571,8 +625,12 @@ def transformer_max_abs(network, buses): trafo_smax_0 = network.transformers.bus0.map(smax_bus["s_nom_max_bus"]) trafo_smax_1 = network.transformers.bus1.map(smax_bus["s_nom_max_bus"]) - trafo_pmax_0 = network.transformers.bus0.map(pmax_links_bus["p_nom_max_bus"]) / 2 - trafo_pmax_1 = network.transformers.bus1.map(pmax_links_bus["p_nom_max_bus"]) / 2 + trafo_pmax_0 = ( + network.transformers.bus0.map(pmax_links_bus["p_nom_max_bus"]) / 2 + ) + trafo_pmax_1 = ( + network.transformers.bus1.map(pmax_links_bus["p_nom_max_bus"]) / 2 + ) trafo_smax = pd.concat( [trafo_smax_0, trafo_smax_1, trafo_pmax_0, trafo_pmax_1], axis=1 ) @@ -596,17 +654,16 @@ def transformer_max_abs(network, buses): def extension_preselection(etrago, method, days=3): - """ Function that preselects lines which are extendend in snapshots leading to overloading to reduce nubmer of extension variables. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. args : dict - Arguments set in appl.py + Arguments set in appl.py. method: str Choose method of selection: 'extreme_situations' for remarkable timsteps @@ -617,8 +674,8 @@ def extension_preselection(etrago, method, days=3): Returns ------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. """ network = etrago.network args = etrago.args @@ -650,19 +707,27 @@ def extension_preselection(etrago, method, days=3): network.transformers.loc[:, "s_nom_max"] = np.inf network = convert_capital_costs(network, 1, 1) - extended_lines = network.lines.index[network.lines.s_nom_opt > network.lines.s_nom] - extended_links = network.links.index[network.links.p_nom_opt > network.links.p_nom] + extended_lines = network.lines.index[ + network.lines.s_nom_opt > network.lines.s_nom + ] + extended_links = network.links.index[ + network.links.p_nom_opt > network.links.p_nom + ] x = time.time() for i in range(int(snapshots.value_counts().sum())): if i > 0: network.lopf(snapshots[i], solver_name=args["solver"]) extended_lines = extended_lines.append( - network.lines.index[network.lines.s_nom_opt > network.lines.s_nom] + network.lines.index[ + network.lines.s_nom_opt > network.lines.s_nom + ] ) extended_lines = extended_lines.drop_duplicates() extended_links = extended_links.append( - network.links.index[network.links.p_nom_opt > network.links.p_nom] + network.links.index[ + network.links.p_nom_opt > network.links.p_nom + ] ) extended_links = extended_links.drop_duplicates() @@ -671,13 +736,17 @@ def extension_preselection(etrago, method, days=3): network.lines.loc[ ~network.lines.index.isin(extended_lines), "s_nom_extendable" ] = False - network.lines.loc[network.lines.s_nom_extendable, "s_nom_min"] = network.lines.s_nom + network.lines.loc[ + network.lines.s_nom_extendable, "s_nom_min" + ] = network.lines.s_nom network.lines.loc[network.lines.s_nom_extendable, "s_nom_max"] = np.inf network.links.loc[ ~network.links.index.isin(extended_links), "p_nom_extendable" ] = False - network.links.loc[network.links.p_nom_extendable, "p_nom_min"] = network.links.p_nom + network.links.loc[ + network.links.p_nom_extendable, "p_nom_min" + ] = network.links.p_nom network.links.loc[network.links.p_nom_extendable, "p_nom_max"] = np.inf network.snapshot_weightings = weighting @@ -694,13 +763,12 @@ def extension_preselection(etrago, method, days=3): def print_expansion_costs(network): - """Function that prints network and storage investment costs - + """Function that prints network and storage investment costs. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. Returns ------- @@ -714,11 +782,16 @@ def print_expansion_costs(network): ext_trafos = network.transformers[network.transformers.s_nom_extendable] if not ext_storage.empty: - storage_costs = (ext_storage.p_nom_opt * ext_storage.capital_cost).sum() + storage_costs = ( + ext_storage.p_nom_opt * ext_storage.capital_cost + ).sum() if not ext_lines.empty: network_costs = ( - ((ext_lines.s_nom_opt - ext_lines.s_nom) * ext_lines.capital_cost).sum() + ( + (ext_lines.s_nom_opt - ext_lines.s_nom) + * ext_lines.capital_cost + ).sum() + (ext_links.p_nom_opt - ext_links.p_nom) * ext_links.capital_cost ).sum() @@ -726,7 +799,8 @@ def print_expansion_costs(network): network_costs = ( network_costs + ( - (ext_trafos.s_nom_opt - ext_trafos.s_nom) * ext_trafos.capital_cost + (ext_trafos.s_nom_opt - ext_trafos.s_nom) + * ext_trafos.capital_cost ).sum() ) @@ -740,4 +814,4 @@ def print_expansion_costs(network): print( "Investment costs for all lines and transformers in selected snapshots [EUR]:", round(network_costs, 2), - ) \ No newline at end of file + ) diff --git a/etrago/tools/io.py b/etrago/tools/io.py index bc5109ed8..5bb2b019e 100644 --- a/etrago/tools/io.py +++ b/etrago/tools/io.py @@ -24,10 +24,8 @@ Input/output operations between powerflow schema in the oedb and PyPSA. Additionally oedb wrapper classes to instantiate PyPSA network objects. - Attributes ----------- - +----------- packagename: str Package containing orm class definitions temp_ormclass: str @@ -36,7 +34,7 @@ Orm class name of table with carrier id to carrier name datasets Notes ------ +------- A configuration file connecting the chosen optimization method with components to be queried is needed for NetworkScenario class. """ @@ -48,22 +46,20 @@ "DLR-Institute for Networked Energy Systems" ) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, mariusves" +__author__ = "ulfmueller, mariusves, pieterhexen, ClaraBuettner" -import pypsa from importlib import import_module -import pandas as pd -from collections import OrderedDict -import json import os + import numpy as np +import pandas as pd +import pypsa if "READTHEDOCS" not in os.environ: - from geoalchemy2.shape import to_shape + import logging + from sqlalchemy.orm.exc import NoResultFound - from sqlalchemy import and_, func, or_, create_engine import saio - import logging logger = logging.getLogger(__name__) @@ -76,7 +72,6 @@ class ScenarioBase: Parameters ---------- - session : sqla.orm.session.Session Handles conversations with the database. version : str @@ -84,7 +79,6 @@ class ScenarioBase: """ def __init__(self, engine, session, version=None): - global carr_ormclass saio.register_schema("grid", engine) @@ -120,7 +114,6 @@ def __init__( temp_id=1, **kwargs, ): - self.scn_name = scn_name self.start_snapshot = start_snapshot self.end_snapshot = end_snapshot @@ -139,7 +132,9 @@ def __repr__(self): r = "NetworkScenario: %s" % self.scn_name if not self.network: - r += "\nTo create a PyPSA network call .build_network()." + r += """ + \nTo create a PyPSA network call .build_network(). + """ return r @@ -177,11 +172,8 @@ def configure_timeindex(self): ) self.timeindex = timeindex[self.start_snapshot - 1 : self.end_snapshot] - """ pandas.tseries.index.DateTimeIndex : - Index of snapshots or timesteps. """ def id_to_source(self): - ormclass = self._mapped["Source"] query = self.session.query(ormclass) @@ -207,7 +199,6 @@ def fetch_by_relname(self, name): from saio.grid import ( egon_etrago_bus, egon_etrago_generator, - egon_etrago_load, egon_etrago_line, egon_etrago_link, egon_etrago_load, @@ -263,7 +254,6 @@ def series_fetch_by_relname(self, network, name, pypsa_name): from saio.grid import ( egon_etrago_bus_timeseries, egon_etrago_generator_timeseries, - egon_etrago_load_timeseries, egon_etrago_line_timeseries, egon_etrago_link_timeseries, egon_etrago_load_timeseries, @@ -320,11 +310,19 @@ def series_fetch_by_relname(self, network, name, pypsa_name): df_all.index = df_all.index.astype(str) if not df_all.isnull().all().all(): - + # Fill empty lists with default values from pypsa if col in network.component_attrs[pypsa_name].index: - df_all.fillna( - network.component_attrs[pypsa_name].default[col], - inplace=True, + df_all.loc[df_all.anon_1.isnull(), "anon_1"] = df_all.loc[ + df_all.anon_1.isnull(), "anon_1" + ].apply( + lambda x: [ + float( + network.component_attrs[pypsa_name].default[ + col + ] + ) + ] + * len(network.snapshots) ) df = df_all.anon_1.apply(pd.Series).transpose() @@ -339,7 +337,7 @@ def series_fetch_by_relname(self, network, name, pypsa_name): def build_network(self, network=None, *args, **kwargs): """Core method to construct PyPSA Network object.""" - if network != None: + if network is not None: network = network else: @@ -356,7 +354,6 @@ def build_network(self, network=None, *args, **kwargs): "Storage", "Store", ]: - pypsa_comp = "StorageUnit" if comp == "Storage" else comp if comp[-1] == "s": @@ -400,17 +397,17 @@ def clear_results_db(session): from egoio.db_tables.model_draft import ( EgoGridPfHvResultBus as BusResult, EgoGridPfHvResultBusT as BusTResult, - EgoGridPfHvResultStorage as StorageResult, - EgoGridPfHvResultStorageT as StorageTResult, EgoGridPfHvResultGenerator as GeneratorResult, EgoGridPfHvResultGeneratorT as GeneratorTResult, EgoGridPfHvResultLine as LineResult, EgoGridPfHvResultLineT as LineTResult, EgoGridPfHvResultLoad as LoadResult, EgoGridPfHvResultLoadT as LoadTResult, + EgoGridPfHvResultMeta as ResultMeta, + EgoGridPfHvResultStorage as StorageResult, + EgoGridPfHvResultStorageT as StorageTResult, EgoGridPfHvResultTransformer as TransformerResult, EgoGridPfHvResultTransformerT as TransformerTResult, - EgoGridPfHvResultMeta as ResultMeta, ) print("Are you sure that you want to clear all results in the OEDB?") @@ -464,7 +461,7 @@ def results_to_oedb(session, network, args, grid="hv", safe_results=False): """ # Update generator_ids when k_means clustering to get integer ids - if args["network_clustering_kmeans"] != False: + if args["network_clustering_kmeans"]: new_index = pd.DataFrame(index=network.generators.index) new_index["new"] = range(len(network.generators)) @@ -485,17 +482,17 @@ def results_to_oedb(session, network, args, grid="hv", safe_results=False): from egoio.db_tables.model_draft import ( EgoGridPfHvResultBus as BusResult, EgoGridPfHvResultBusT as BusTResult, - EgoGridPfHvResultStorage as StorageResult, - EgoGridPfHvResultStorageT as StorageTResult, EgoGridPfHvResultGenerator as GeneratorResult, EgoGridPfHvResultGeneratorT as GeneratorTResult, EgoGridPfHvResultLine as LineResult, EgoGridPfHvResultLineT as LineTResult, EgoGridPfHvResultLoad as LoadResult, EgoGridPfHvResultLoadT as LoadTResult, + EgoGridPfHvResultMeta as ResultMeta, + EgoGridPfHvResultStorage as StorageResult, + EgoGridPfHvResultStorageT as StorageTResult, EgoGridPfHvResultTransformer as TransformerResult, EgoGridPfHvResultTransformerT as TransformerTResult, - EgoGridPfHvResultMeta as ResultMeta, EgoGridPfHvSource as Source, ) else: @@ -761,35 +758,46 @@ def run_sql_script(conn, scriptname="results_md2grid.sql"): def extension(self, **kwargs): """ Function that adds an additional network to the existing network container. - The new network can include every PyPSA-component (e.g. buses, lines, links). + The new network can include every PyPSA-component (e.g. buses, lines, + links). To connect it to the existing network, transformers are needed. - All components and its timeseries of the additional scenario need to be inserted in the fitting 'model_draft.ego_grid_pf_hv_extension_' table. - The scn_name in the tables have to be labled with 'extension_' + scn_name (e.g. 'extension_nep2035'). + All components and its timeseries of the additional scenario need to be + inserted in the fitting 'model_draft.ego_grid_pf_hv_extension\_' table. + The scn_name in the tables have to be labled with 'extension\_' + scn_name + (e.g. 'extension_nep2035'). Until now, the tables include three additional scenarios: - 'nep2035_confirmed': all new lines and needed transformers planed in the 'Netzentwicklungsplan 2035' (NEP2035) that have been confirmed by the Bundesnetzagentur (BNetzA) + 'nep2035_confirmed': all new lines and needed transformers planed in the + 'Netzentwicklungsplan 2035' (NEP2035) that have been confirmed by the + Bundesnetzagentur (BNetzA) - 'nep2035_b2': all new lines and needed transformers planned in the NEP 2035 in the scenario 2035 B2 + 'nep2035_b2': all new lines and needed transformers planned in the NEP 2035 + in the scenario 2035 B2 - 'BE_NO_NEP 2035': DC-lines and transformers to connect the upcomming electrical-neighbours Belgium and Norway - Generation, loads and its timeseries in Belgium and Norway for scenario 'NEP 2035' + 'BE_NO_NEP 2035': DC-lines and transformers to connect the upcomming + electrical-neighbours Belgium and Norway + Generation, loads and its timeseries in Belgium and Norway for scenario + 'NEP 2035' - - Parameters - ----- - network : The existing network container (e.g. scenario 'NEP 2035') - session : session-data - overlay_scn_name : Name of the additional scenario (WITHOUT 'extension_') - start_snapshot, end_snapshot: Simulation time + Parameters + ----------- + network : + The existing network container (e.g. scenario 'NEP 2035') + session : + session-data + overlay_scn_name : + Name of the additional scenario (WITHOUT 'extension\_') + start_snapshot : + end_snapshot: + Simulation time Returns - ------ - network : Network container including existing and additional network + ------- + Network container including existing and additional network """ if self.args["scn_extension"] is not None: - if self.args["gridversion"] is None: ormcls_prefix = "EgoGridPfHvExtension" else: @@ -823,25 +831,26 @@ def decommissioning(self, **kwargs): Currently, only lines can be decommissioned. All components of the decommissioning scenario need to be inserted in - the fitting 'model_draft.ego_grid_pf_hv_extension_' table. - The scn_name in the tables have to be labled with 'decommissioning_' + the fitting 'model_draft.ego_grid_pf_hv_extension\_' table. + The scn_name in the tables have to be labled with 'decommissioning\_' + scn_name (e.g. 'decommissioning_nep2035'). - Parameters - ----- - network : The existing network container (e.g. scenario 'NEP 2035') - session : session-data - overlay_scn_name : Name of the decommissioning scenario - + ----------- + network : + The existing network container (e.g. scenario 'NEP 2035') + session : + session-data + overlay_scn_name : + Name of the decommissioning scenario Returns ------ - network : Network container including decommissioning + Network container including decommissioning """ if self.args["scn_decommissioning"] is not None: - if self.args["gridversion"] == None: + if self.args["gridversion"] is None: ormclass = getattr( import_module("egoio.db_tables.model_draft"), "EgoGridPfHvExtensionLine", @@ -875,7 +884,7 @@ def decommissioning(self, **kwargs): self.network.lines.index == idx ] = self.network.lines.s_nom_min - ### Drop decommissioning-lines from existing network + # Drop decommissioning-lines from existing network self.network.lines = self.network.lines[ ~self.network.lines.index.isin(df_decommisionning.index) ] @@ -885,18 +894,20 @@ def distance(x0, x1, y0, y1): """ Function that calculates the square of the distance between two points. - Parameters - ----- - x0: x - coordinate of point 0 - x1: x - coordinate of point 1 - y0: y - coordinate of point 0 - y1: y - coordinate of point 1 - + --------- + x0 : + x - coordinate of point 0 + x1 : + x - coordinate of point 1 + y0 : + y - coordinate of point 0 + y1 : + y - coordinate of point 1 Returns - ------ - distance : float + -------- + distance : float square of distance """ @@ -909,20 +920,19 @@ def distance(x0, x1, y0, y1): def calc_nearest_point(bus1, network): """ - Function that finds the geographical nearest point in a network from a given bus. - + Function that finds the geographical nearest point in a network from a + given bus. Parameters - ----- - bus1: float + ----------- + bus1 : float id of bus - network: Pypsa network container + network : Pypsa network container network including the comparable buses - Returns - ------ - bus0 : float + ------- + bus0 : float bus_id of nearest point """ @@ -986,7 +996,9 @@ def add_ch4_h2_correspondence(self): """ - sql = f"""SELECT "bus_H2", "bus_CH4", scn_name FROM grid.egon_etrago_ch4_h2;""" + sql = """ + SELECT "bus_H2", "bus_CH4", scn_name FROM grid.egon_etrago_ch4_h2; + """ table = pd.read_sql(sql, self.engine) diff --git a/etrago/tools/network.py b/etrago/tools/network.py index ebefc8db7..8bd954326 100644 --- a/etrago/tools/network.py +++ b/etrago/tools/network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -23,12 +23,15 @@ """ import logging +import os -from egoio.tools import db from pypsa.components import Network from sqlalchemy.orm import sessionmaker import pandas as pd +if "READTHEDOCS" not in os.environ: + from egoio.tools import db + from etrago import __version__ from etrago.cluster.disaggregation import run_disaggregation from etrago.cluster.electrical import ehv_clustering, run_spatial_clustering @@ -86,6 +89,7 @@ get_args_setting, get_clustering_data, load_shedding, + manual_fixes_datamodel, set_branch_capacity, set_line_costs, set_q_foreign_loads, @@ -97,16 +101,26 @@ logger = logging.getLogger(__name__) +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = """AmeliaNadal, CarlosEpia, ClaraBuettner, KathiEsterl, gnn, + fwitte, ulfmueller, pieterhexen""" + class Etrago: """ - Object containing pypsa.Network including the transmission grid, - input parameters and optimization results. + Object containing pypsa.Network including the transmission grid, not + electric sectors, input parameters and optimization results. Parameters ---------- args : dict - Dictionary including all inpu parameters. + Dictionary including all input parameters. csv_folder_name : string Name of folder from which to import CSVs of network data. name : string, default "" @@ -305,6 +319,8 @@ def __init__( adjust_CH4_gen_carriers = adjust_CH4_gen_carriers + manual_fixes_datamodel = manual_fixes_datamodel + def dc_lines(self): return self.filter_links_by_carrier("DC", like=False) @@ -331,7 +347,8 @@ def build_network_from_db(self): self.decommissioning() - self.add_ch4_h2_correspondence() + if "H2" in self.network.buses.carrier: + self.add_ch4_h2_correspondence() logger.info("Imported network from db") @@ -346,6 +363,8 @@ def adjust_network(self): """ + self.manual_fixes_datamodel() + self.geolocation_buses() self.load_shedding() diff --git a/etrago/tools/plot.py b/etrago/tools/plot.py index 6a227ab54..72197de70 100644 --- a/etrago/tools/plot.py +++ b/etrago/tools/plot.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -26,12 +26,10 @@ import os from matplotlib import pyplot as plt -from pyproj import Proj, transform import matplotlib import matplotlib.patches as mpatches import numpy as np import pandas as pd -import tilemapbase cartopy_present = True try: @@ -42,13 +40,15 @@ except ImportError: cartopy_present = False from pypsa.plot import draw_map_cartopy -from shapely.geometry import LineString, MultiPoint, Point, Polygon -import geopandas as gpd logger = logging.getLogger(__name__) if "READTHEDOCS" not in os.environ: from geoalchemy2.shape import to_shape + import geopandas as gpd + from pyproj import Proj, transform + from shapely.geometry import LineString, MultiPoint, Point, Polygon + import tilemapbase __copyright__ = ( "Flensburg University of Applied Sciences, " @@ -57,7 +57,8 @@ "DLR-Institute for Networked Energy Systems" ) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, MarlonSchlemminger, mariusves, lukasol" +__author__ = """ulfmueller, MarlonSchlemminger, mariusves, lukasol, ClaraBuettner, +CarlosEpia, pieterhexen, gnn, fwitte, lukasol, KathiEsterl, BartelsJ""" def set_epsg_network(network): @@ -117,6 +118,17 @@ def plot_osm(x, y, zoom, alpha=0.4): def coloring(): + """ + Return a dictionary with a color assign to each kind of carrier used in + etrago.network. This is used for plotting porpuses. + + Returns + ------- + colors : dict + Color for each kind of carrier. + + """ + colors = { "load": "red", "DC": "blue", @@ -180,6 +192,7 @@ def coloring(): "battery": "blue", "pumped_hydro": "indigo", "BEV charger": "indigo", + "BEV_charger": "indigo", } return colors @@ -209,9 +222,11 @@ def plot_line_loading_diff(networkA, networkB, timestep=0, osm=False): osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm + """ if osm != False: if set_epsg_network.counter == 0: @@ -228,21 +243,26 @@ def shiftedColorMap( data with a negative min and positive max and you want the middle of the colormap's dynamic range to be at zero - Input - ----- - cmap : The matplotlib colormap to be altered - start : Offset from lowest point in the colormap's range. - Defaults to 0.0 (no lower ofset). Should be between - 0.0 and `midpoint`. - midpoint : The new center of the colormap. Defaults to - 0.5 (no shift). Should be between 0.0 and 1.0. In - general, this should be 1 - vmax/(vmax + abs(vmin)) - For example if your data range from -15.0 to +5.0 and - you want the center of the colormap at 0.0, `midpoint` - should be set to 1 - 5/(5 + 15)) or 0.75 - stop : Offset from highets point in the colormap's range. - Defaults to 1.0 (no upper ofset). Should be between - `midpoint` and 1.0. + Parameters + ----------- + cmap : + The matplotlib colormap to be altered + start : + Offset from lowest point in the colormap's range. + Defaults to 0.0 (no lower ofset). Should be between + 0.0 and `midpoint`. + midpoint : + The new center of the colormap. Defaults to + 0.5 (no shift). Should be between 0.0 and 1.0. In + general, this should be 1 - vmax/(vmax + abs(vmin)) + For example if your data range from -15.0 to +5.0 and + you want the center of the colormap at 0.0, `midpoint` + should be set to 1 - 5/(5 + 15)) or 0.75 + stop : + Offset from highets point in the colormap's range. + Defaults to 1.0 (no upper ofset). Should be between + `midpoint` and 1.0. + """ cdict = {"red": [], "green": [], "blue": [], "alpha": []} @@ -339,9 +359,10 @@ def network_expansion_diff( osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm """ if osm != False: @@ -412,6 +433,10 @@ def plot_residual_load(network): Parameters ---------- network : PyPSA network containter + + Returns + ------- + Plot """ renewables = network.generators[ @@ -564,13 +589,12 @@ def plot_gen_diff( """ Plot difference in generation between two networks grouped by carrier type - Parameters ---------- networkA : PyPSA network container with switches networkB : PyPSA network container without switches - leave_out_carriers : list of carriers to leave out (default to all small - carriers) + leave_out_carriers : + list of carriers to leave out (default to all small carriers) Returns ------- @@ -582,7 +606,7 @@ def gen_by_c(network): pd.concat( [ network.generators_t.p.mul( - etwork.snapshot_weightings, axis=0 + network.snapshot_weightings, axis=0 )[ network.generators[ network.generators.control != "Slack" @@ -629,7 +653,6 @@ def plot_voltage(network, boundaries=[], osm=False): """ Plot voltage at buses as hexbin - Parameters ---------- network : PyPSA network container @@ -637,9 +660,10 @@ def plot_voltage(network, boundaries=[], osm=False): osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm Returns ------- @@ -916,6 +940,10 @@ def gen_dist_diff( filename : str Specify filename If not given, figure will be show directly + + Returns + ------- + None. """ if techs is None: techs = networkA.generators.carrier.unique() @@ -1026,17 +1054,27 @@ def nodal_gen_dispatch( osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm + + Returns + ------- + None. """ if osm != False: if set_epsg_network.counter == 0: set_epsg_network(network) fig, ax = plot_osm(osm["x"], osm["y"], osm["zoom"]) + elif (osm == False) and cartopy_present: + fig, ax = plt.subplots( + subplot_kw={"projection": ccrs.PlateCarree()}, figsize=(5, 5) + ) + else: - fig, ax = plt.subplots(1, 1) + fig, ax = plt.subplots(figsize=(5, 5)) if techs: gens = network.generators[network.generators.carrier.isin(techs)] @@ -1099,10 +1137,9 @@ def nodal_gen_dispatch( } dispatch = dispatch.abs() subcolors = {"negative": "red", "positive": "green"} - import cartopy.crs as ccrs - fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()}) network.plot( + geomap=(cartopy_present | osm), bus_sizes=dispatch * scaling, bus_colors=colors, line_widths=0.2, @@ -1190,6 +1227,10 @@ def storage_p_soc(network, mean="1H", filename=None): Defines over how many snapshots the p and soc values will averaged. filename : path to folder + Returns + ------- + None. + """ sbatt = network.storage_units.index[ @@ -1323,6 +1364,10 @@ def storage_soc_sorted(network, filename=None): filename : path to folder + Returns + ------- + None. + """ sbatt = network.storage_units.index[ (network.storage_units.p_nom_opt > 1) @@ -1639,30 +1684,28 @@ def plot_background_grid(network, ax): None. """ - - network.plot( - ax=ax, - line_colors="grey", - link_colors="grey", - bus_sizes=0, - line_widths=0.5, - link_widths=0.3, # 0.55, - geomap=True, - projection=ccrs.PlateCarree(), - color_geomap=True, - ) - - network.plot( - ax=ax, - line_colors="grey", - link_colors="grey", - bus_sizes=0, - line_widths=0.5, - link_widths=0.3, # 0.55, - geomap=True, - projection=ccrs.PlateCarree(), - color_geomap=True, - ) + if cartopy_present: + network.plot( + ax=ax, + line_colors="grey", + link_colors="grey", + bus_sizes=0, + line_widths=0.5, + link_widths=0.3, # 0.55, + geomap=True, + projection=ccrs.PlateCarree(), + color_geomap=True, + ) + else: + network.plot( + ax=ax, + line_colors="grey", + link_colors="grey", + bus_sizes=0, + line_widths=0.5, + link_widths=0.3, # 0.55, + geomap=False, + ) def demand_side_management(self, buses, snapshots, agg="5h", used=False): @@ -1681,7 +1724,7 @@ def demand_side_management(self, buses, snapshots, agg="5h", used=False): Returns ------- - potential : pandas.DataFrame + df : pandas.DataFrame Shifting potential (and usage) of power (MW) and energy (MWh) """ @@ -1761,7 +1804,7 @@ def bev_flexibility_potential( Returns ------- - potential : pandas.DataFrame + df : pandas.DataFrame Shifting potential (and usage) of power (MW) and energy (MWh) """ @@ -1868,7 +1911,7 @@ def heat_stores( Returns ------- - potential : pandas.DataFrame + df : pandas.DataFrame Shifting potential (and usage) of power (MW) and energy (MWh) """ @@ -1948,7 +1991,7 @@ def hydrogen_stores( Returns ------- - potential : pandas.DataFrame + df : pandas.DataFrame Shifting potential (and usage) of power (MW) and energy (MWh) """ @@ -2101,9 +2144,7 @@ def flexibility_usage( fig_e.savefig(pre_path + f"stored_e_{flexibility}") -def plot_carrier( - network, carrier_links=["AC"], carrier_buses=["AC"], cartopy=True -): +def plot_carrier(network, carrier_links=["AC"], carrier_buses=["AC"]): """ Parameters ---------- @@ -2126,7 +2167,7 @@ def plot_carrier( line_colors = "lightblue" # Set background - if cartopy == True: + if cartopy_present: plt.rcParams["figure.autolayout"] = True fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()}) draw_map_cartopy(ax, color_geomap=True) @@ -2155,7 +2196,7 @@ def plot_carrier( title = "" network.plot( - geomap=True, + geomap=cartopy_present, bus_sizes=bus_sizes, link_widths=link_width, line_widths=line_widths, @@ -2196,35 +2237,39 @@ def plot_grid( ): """Function that plots etrago.network and results for lines and buses - - Parameters ---------- line_colors : str Set static line color or attribute to plot e.g. 'expansion_abs' Current options: - 'line_loading': mean line loading in p.u. in selected timesteps - 'v_nom': nominal voltage of lines - 'expansion_abs': absolute network expansion in MVA - 'expansion_rel': network expansion in p.u. of existing capacity - 'q_flow_max': maximal reactive flows + + * 'line_loading': mean line loading in p.u. in selected timesteps + * 'v_nom': nominal voltage of lines + * 'expansion_abs': absolute network expansion in MVA + * 'expansion_rel': network expansion in p.u. of existing capacity + * 'q_flow_max': maximal reactive flows + bus_sizes : float, optional Size of buses. The default is 0.001. bus_colors : str, optional Set static bus color or attribute to plot. The default is 'grey'. Current options: - 'nodal_production_balance': net producer/consumer in selected timeteps - 'storage_expansion': storage expansion per bus and technology - 'storage_distribution': installed storage units per bus - 'gen_dist': dispatch per carrier in selected timesteps + + * 'nodal_production_balance': net producer/consumer in selected timeteps + * 'storage_expansion': storage expansion per bus and technology + * 'storage_distribution': installed storage units per bus + * 'gen_dist': dispatch per carrier in selected timesteps + timesteps : array, optional Timesteps consideredd in time depended plots. The default is range(2). osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm. The default is False. + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm. The default is False. + boundaries: array Set fixed boundaries of heatmap axis. The default is None. filename: str or None @@ -2263,11 +2308,14 @@ def plot_grid( set_epsg_network(network) fig, ax = plot_osm(osm["x"], osm["y"], osm["zoom"]) - else: + elif (osm == False) and cartopy_present: fig, ax = plt.subplots( subplot_kw={"projection": ccrs.PlateCarree()}, figsize=(5, 5) ) + else: + fig, ax = plt.subplots(figsize=(5, 5)) + # Set line colors if line_colors == "line_loading": title = ( @@ -2374,21 +2422,36 @@ def plot_grid( else: logger.warning("bus_color {} undefined".format(bus_colors)) - ll = network.plot( - line_colors=line_colors, - link_colors=link_colors, - line_cmap=plt.cm.jet, - link_cmap=plt.cm.jet, - bus_sizes=bus_sizes, - bus_colors=bus_colors, - line_widths=line_widths, - link_widths=0, # link_widths, - flow=flow, - title=title, - geomap=False, - projection=ccrs.PlateCarree(), - color_geomap=True, - ) + if cartopy_present: + ll = network.plot( + line_colors=line_colors, + link_colors=link_colors, + line_cmap=plt.cm.jet, + link_cmap=plt.cm.jet, + bus_sizes=bus_sizes, + bus_colors=bus_colors, + line_widths=line_widths, + link_widths=0, # link_widths, + flow=flow, + title=title, + geomap=False, + projection=ccrs.PlateCarree(), + color_geomap=True, + ) + else: + ll = network.plot( + line_colors=line_colors, + link_colors=link_colors, + line_cmap=plt.cm.jet, + link_cmap=plt.cm.jet, + bus_sizes=bus_sizes, + bus_colors=bus_colors, + line_widths=line_widths, + link_widths=0, # link_widths, + flow=flow, + title=title, + geomap=False, + ) # legends for bus sizes and colors if type(bus_sizes) != float: @@ -2518,7 +2581,6 @@ def plot_clusters( self, carrier="AC", save_path=False, - cartopy=True, transmission_lines=False, gas_pipelines=False, ): @@ -2539,12 +2601,11 @@ def plot_clusters( gas_pipelines : bool, optional The default is False. Define if the original gas pipelines are plotted or not. + Returns ------- None. """ - # TODO: Make this function available for other carriers - # Create geometries new_geom = self.network.buses[ [ "carrier", @@ -2572,7 +2633,7 @@ def plot_clusters( ) # Set background - if cartopy == True: + if cartopy_present: plt.rcParams["figure.autolayout"] = True fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()}) draw_map_cartopy(ax, color_geomap=True) diff --git a/etrago/tools/utilities.py b/etrago/tools/utilities.py index 36cedb51c..348c232a6 100755 --- a/etrago/tools/utilities.py +++ b/etrago/tools/utilities.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -29,15 +29,17 @@ import math import os -from egoio.tools import db from pyomo.environ import Constraint, PositiveReals, Var -from shapely.geometry import LineString, Point -import geopandas as gpd import numpy as np import pandas as pd import pypsa import sqlalchemy.exc +if "READTHEDOCS" not in os.environ: + from egoio.tools import db + from shapely.geometry import Point + import geopandas as gpd + logger = logging.getLogger(__name__) @@ -48,10 +50,31 @@ "DLR-Institute for Networked Energy Systems" ) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, s3pp, wolfbunke, mariusves, lukasol" +__author__ = """ulfmueller, s3pp, wolfbunke, mariusves, lukasol, ClaraBuettner, +CarlosEpia, gnn, pieterhexen, fwitte, KathiEsterl, MGlauer, birgits, + AmeliaNadal, MarlonSchlemminger, wheitkoetter, jankaeh""" def filter_links_by_carrier(self, carrier, like=True): + """ + + Parameters + ---------- + carrier : list or str + name of the carriers of interest. Can be a list of carriers or single + sting. + like : bool, optional + When like set to True, the links with carrier names that includes the + carrier(s) supplied are returned, Not just exact matches. + The default is True. + + Returns + ------- + df : pandas.DataFrame object + Dataframe that contains just links with carriers of the types given + in the argument carrier. + + """ if isinstance(carrier, str): if like: df = self.network.links[ @@ -422,21 +445,21 @@ def foreign_links(self): self.geolocation_buses() -def set_q_national_loads(self, cos_phi=1): +def set_q_national_loads(self, cos_phi): """ Set q component of national loads based on the p component and cos_phi Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : :class:`pypsa.Network` + Overall container of PyPSA cos_phi : float - Choose ration of active and reactive power of foreign loads + Choose ration of active and reactive power of foreign loads Returns ------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : :class:`pypsa.Network` + Overall container of PyPSA """ network = self.network @@ -445,41 +468,45 @@ def set_q_national_loads(self, cos_phi=1): (network.buses.country == "DE") & (network.buses.carrier == "AC") ] - network.loads_t["q_set"].loc[ + # Calculate q national loads based on p and cos_phi + new_q_loads = network.loads_t["p_set"].loc[ :, network.loads.index[ - network.loads.bus.astype(str).isin(national_buses.index) - ].astype(int), - ] = network.loads_t["p_set"].loc[ - :, - network.loads.index[ - network.loads.bus.astype(str).isin(national_buses.index) + (network.loads.bus.astype(str).isin(national_buses.index)) + & (network.loads.carrier.astype(str) == "AC") ], - ] * math.tan( - math.acos(cos_phi) + ] * math.tan(math.acos(cos_phi)) + + # insert the calculated q in loads_t. Only loads without previous + # assignment are affected + network.loads_t.q_set = pd.merge( + network.loads_t.q_set, + new_q_loads, + how="inner", + right_index=True, + left_index=True, + suffixes=("", "delete_"), ) - # To avoid a problem when the index of the load is the weather year, - # the column names were temporarily set to `int` and changed back to - # `str`. - network.loads_t["q_set"].columns = network.loads_t["q_set"].columns.astype( - str + network.loads_t.q_set.drop( + [i for i in network.loads_t.q_set.columns if "delete" in i], + axis=1, + inplace=True, ) -def set_q_foreign_loads(self, cos_phi=1): +def set_q_foreign_loads(self, cos_phi): """Set reative power timeseries of loads in neighbouring countries Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + etrago : :class:`etrago.Etrago + Transmission grid object cos_phi: float Choose ration of active and reactive power of foreign loads Returns ------- - network : :class:`pypsa.Network - Overall container of PyPSA + None """ network = self.network @@ -555,7 +582,7 @@ def connected_transformer(network, busids): return network.transformers[mask] -def load_shedding(self, **kwargs): +def load_shedding(self, temporal_disaggregation=False, **kwargs): """Implement load shedding in existing network to identify feasibility problems @@ -567,20 +594,27 @@ def load_shedding(self, **kwargs): Marginal costs for load shedding p_nom : int Installed capacity of load shedding generator + Returns ------- """ + logger.debug("Shedding the load.") if self.args["load_shedding"]: + if temporal_disaggregation: + network = self.network_tsa + else: + network = self.network + marginal_cost_def = 10000 # network.generators.marginal_cost.max()*2 - p_nom_def = self.network.loads_t.p_set.max().max() + p_nom_def = network.loads_t.p_set.max().max() marginal_cost = kwargs.get("marginal_cost", marginal_cost_def) p_nom = kwargs.get("p_nom", p_nom_def) - self.network.add("Carrier", "load") + network.add("Carrier", "load") start = ( - self.network.generators.index.to_series() + network.generators.index.to_series() .str.rsplit(" ") .str[0] .astype(int) @@ -592,14 +626,14 @@ def load_shedding(self, **kwargs): if start != start: start = 0 - index = list(range(start, start + len(self.network.buses.index))) - self.network.import_components_from_dataframe( + index = list(range(start, start + len(network.buses.index))) + network.import_components_from_dataframe( pd.DataFrame( dict( marginal_cost=marginal_cost, p_nom=p_nom, carrier="load shedding", - bus=self.network.buses.index, + bus=network.buses.index, ), index=index, ), @@ -615,7 +649,9 @@ def data_manipulation_sh(network): network : :class:`pypsa.Network Overall container of PyPSA - + Returns + ------- + None """ from geoalchemy2.shape import from_shape, to_shape @@ -705,7 +741,7 @@ def export_to_csv(self, path): Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA args: dict Contains calculation settings of appl.py @@ -713,6 +749,10 @@ def export_to_csv(self, path): Choose path for csv-files. Specify `""`, `False` or `None` to not do anything. + Returns + ------- + None + """ if not path: pass @@ -762,6 +802,22 @@ def export_to_csv(self, path): def loading_minimization(network, snapshots): + """ + Minimizes the sum of the products of each element in the passive_branches + of the model. + + Parameters + ---------- + network : :class:`pypsa.Network + Overall container of PyPSA + snapshots : 'pandas.core.indexes.datetimes.DatetimeIndex' + snapshots to perform the minimization + + Returns + ------- + None + + """ network.model.number1 = Var( network.model.passive_branch_p_index, within=PositiveReals ) @@ -769,10 +825,10 @@ def loading_minimization(network, snapshots): network.model.passive_branch_p_index, within=PositiveReals ) - def cRule(model, c, l, t): + def cRule(model, c, l0, t): return ( - model.number1[c, l, t] - model.number2[c, l, t] - == model.passive_branch_p[c, l, t] + model.number1[c, l0, t] - model.number2[c, l0, t] + == model.passive_branch_p[c, l0, t] ) network.model.cRule = Constraint( @@ -786,6 +842,30 @@ def cRule(model, c, l, t): def _make_consense(component, attr): + """ + Returns a function `consense` that will be used to generate a consensus + value for the attribute `attr` of the given `component`. This consensus + value is derived from the input DataFrame `x`. If all values in the + DataFrame are equal, the consensus value will be that common value. + If all values are missing (NaN), the consensus value will be NaN. + Otherwise, an assertion error will be raised. + + Parameters + ---------- + component : str + specify the name of the component being clustered. + attr : str + specify the name of the attribute of the commponent being considered. + + Returns + ------- + function + A function that takes a DataFrame as input and returns a single value + as output when all the elements of the commponent attribute are the + same. + + """ + def consense(x): v = x.iat[0] assert (x == v).all() or x.isnull().all(), ( @@ -798,6 +878,22 @@ def consense(x): def _normed(s): + """ + Given a pandas Series `s`, normalizes the series by dividing each element + by the sum of the series. If the sum of the series is zero, returns 1.0 to + avoid division by zero errors. + + Parameters + ---------- + s : pandas.Series + A pandas Series. + + Returns + ------- + pandas.Series + A normalized pandas Series. + + """ tot = s.sum() if tot == 0: return 1.0 @@ -805,7 +901,27 @@ def _normed(s): return s / tot -def agg_series_lines(l, network): +def agg_series_lines(l0, network): + """ + Given a pandas DataFrame `l0` containing information about lines in a + network and a network object, aggregates the data in `l0` for all its + attributes. Returns a pandas Series containing the aggregated data. + + + Parameters + ---------- + l0: pandas.DataFrame + contain information about lines in a network. + network : :class:`pypsa.Network + Overall container of PyPSA + + Returns + ------- + pandas.Series + A pandas Series containing aggregated data for the lines in the + network. + + """ attrs = network.components["Line"]["attrs"] columns = set( attrs.index[attrs.static & attrs.status.str.startswith("Input")] @@ -832,27 +948,27 @@ def agg_series_lines(l, network): ) } - Line = l["Line"].iloc[0] + Line = l0["Line"].iloc[0] data = dict( - r=l["r"].sum(), - x=l["x"].sum(), - g=1.0 / (1.0 / l["g"]).sum(), - b=1.0 / (1.0 / l["b"]).sum(), - terrain_factor=l["terrain_factor"].mean(), - s_max_pu=(l["s_max_pu"] * _normed(l["s_nom"])).sum(), - s_nom=l["s_nom"].iloc[0], - s_nom_min=l["s_nom_min"].max(), - s_nom_max=l["s_nom_max"].min(), - s_nom_extendable=l["s_nom_extendable"].any(), - num_parallel=l["num_parallel"].max(), - capital_cost=(_normed(l["s_nom"]) * l["capital_cost"]).sum(), - length=l["length"].sum(), - v_ang_min=l["v_ang_min"].max(), - v_ang_max=l["v_ang_max"].min(), + r=l0["r"].sum(), + x=l0["x"].sum(), + g=1.0 / (1.0 / l0["g"]).sum(), + b=1.0 / (1.0 / l0["b"]).sum(), + terrain_factor=l0["terrain_factor"].mean(), + s_max_pu=(l0["s_max_pu"] * _normed(l0["s_nom"])).sum(), + s_nom=l0["s_nom"].iloc[0], + s_nom_min=l0["s_nom_min"].max(), + s_nom_max=l0["s_nom_max"].min(), + s_nom_extendable=l0["s_nom_extendable"].any(), + num_parallel=l0["num_parallel"].max(), + capital_cost=(_normed(l0["s_nom"]) * l0["capital_cost"]).sum(), + length=l0["length"].sum(), + v_ang_min=l0["v_ang_min"].max(), + v_ang_max=l0["v_ang_max"].min(), ) - data.update((f, consense[f](l[f])) for f in columns.difference(data)) + data.update((f, consense[f](l0[f])) for f in columns.difference(data)) return pd.Series( - data, index=[f for f in l.columns if f in columns], name=Line + data, index=[f for f in l0.columns if f in columns], name=Line ) @@ -872,7 +988,7 @@ def group_parallel_lines(network): """ - def agg_parallel_lines(l): + def agg_parallel_lines(l0): attrs = network.components["Line"]["attrs"] columns = set( attrs.index[attrs.static & attrs.status.str.startswith("Input")] @@ -904,27 +1020,27 @@ def agg_parallel_lines(l): } data = dict( - Line=l["Line"].iloc[0], - r=1.0 / (1.0 / l["r"]).sum(), - x=1.0 / (1.0 / l["x"]).sum(), - g=l["g"].sum(), - b=l["b"].sum(), - terrain_factor=l["terrain_factor"].mean(), - s_max_pu=(l["s_max_pu"] * _normed(l["s_nom"])).sum(), - s_nom=l["s_nom"].sum(), - s_nom_min=l["s_nom_min"].sum(), - s_nom_max=l["s_nom_max"].sum(), - s_nom_extendable=l["s_nom_extendable"].any(), - num_parallel=l["num_parallel"].sum(), - capital_cost=(_normed(l["s_nom"]) * l["capital_cost"]).sum(), - length=l["length"].mean(), - sub_network=consense["sub_network"](l["sub_network"]), - v_ang_min=l["v_ang_min"].max(), - v_ang_max=l["v_ang_max"].min(), - geom=l["geom"].iloc[0], + Line=l0["Line"].iloc[0], + r=1.0 / (1.0 / l0["r"]).sum(), + x=1.0 / (1.0 / l0["x"]).sum(), + g=l0["g"].sum(), + b=l0["b"].sum(), + terrain_factor=l0["terrain_factor"].mean(), + s_max_pu=(l0["s_max_pu"] * _normed(l0["s_nom"])).sum(), + s_nom=l0["s_nom"].sum(), + s_nom_min=l0["s_nom_min"].sum(), + s_nom_max=l0["s_nom_max"].sum(), + s_nom_extendable=l0["s_nom_extendable"].any(), + num_parallel=l0["num_parallel"].sum(), + capital_cost=(_normed(l0["s_nom"]) * l0["capital_cost"]).sum(), + length=l0["length"].mean(), + sub_network=consense["sub_network"](l0["sub_network"]), + v_ang_min=l0["v_ang_min"].max(), + v_ang_max=l0["v_ang_max"].min(), + geom=l0["geom"].iloc[0], ) - data.update((f, consense[f](l[f])) for f in columns.difference(data)) - return pd.Series(data, index=[f for f in l.columns if f in columns]) + data.update((f, consense[f](l0[f])) for f in columns.difference(data)) + return pd.Series(data, index=[f for f in l0.columns if f in columns]) # Make bus0 always the greattest to identify repeated lines lines_2 = network.lines.copy() @@ -956,6 +1072,7 @@ def delete_dispensable_ac_buses(etrago): Parameters ---------- etrago : etrago object + Returns ------- None. @@ -974,7 +1091,16 @@ def delete_buses(delete_buses, network): (network.storage_units.bus.isin(drop_buses)) ].to_list() network.storage_units.drop(drop_storage_units, inplace=True) - return (network.buses, network.lines, network.storage_units) + drop_generators = network.generators.index[ + (network.generators.bus.isin(drop_buses)) + ].to_list() + network.generators.drop(drop_generators, inplace=True) + return ( + network.buses, + network.lines, + network.storage_units, + network.generators, + ) def count_lines(lines): buses_in_lines = lines[["bus0", "bus1"]].drop_duplicates() @@ -1008,7 +1134,9 @@ def count(bus): b_trafo = pd.concat( [network.transformers.bus0, network.transformers.bus1] ).unique() - b_gen = network.generators.bus.unique() + b_gen = network.generators[ + network.generators.carrier != "load shedding" + ].bus.unique() b_load = network.loads.bus.unique() b_store = network.stores[network.stores.e_nom > 0].bus.unique() b_store_unit = network.storage_units[ @@ -1047,10 +1175,10 @@ def count(bus): delete_bus = [] for bus in ac_buses[ac_buses["n_lines"] == 2].index: - l = lines_cap[(lines_cap.bus0 == bus) | (lines_cap.bus1 == bus)][ + l0 = lines_cap[(lines_cap.bus0 == bus) | (lines_cap.bus1 == bus)][ "s_nom" ].unique() - if len(l) != 1: + if len(l0) != 1: delete_bus.append(bus) ac_buses.drop(delete_bus, inplace=True) @@ -1120,36 +1248,27 @@ def count(bus): new_lines_df = pd.DataFrame(columns=lines.columns).rename_axis("Lines") - for l in new_lines.index: + for l0 in new_lines.index: lines_group = ( - lines[lines.index.isin(new_lines.at[l, "lines"])] + lines[lines.index.isin(new_lines.at[l0, "lines"])] .copy() .reset_index() ) l_new = agg_series_lines(lines_group, network) - l_new["bus0"] = new_lines.at[l, "bus0"] - l_new["bus1"] = new_lines.at[l, "bus1"] - l_new["geom"] = LineString( - [ - ( - network.buses.at[l_new["bus0"], "x"], - network.buses.at[l_new["bus0"], "y"], - ), - ( - network.buses.at[l_new["bus1"], "x"], - network.buses.at[l_new["bus1"], "y"], - ), - ] - ) + l_new["bus0"] = new_lines.at[l0, "bus0"] + l_new["bus1"] = new_lines.at[l0, "bus1"] new_lines_df["s_nom_extendable"] = new_lines_df[ "s_nom_extendable" ].astype(bool) new_lines_df.loc[l_new.name] = l_new # Delete all the dispensable buses - (network.buses, network.lines, network.storage_units) = delete_buses( - ac_buses, network - ) + ( + network.buses, + network.lines, + network.storage_units, + network.generators, + ) = delete_buses(ac_buses, network) # exclude from the new lines the ones connected to deleted buses new_lines_df = new_lines_df[ @@ -1159,6 +1278,15 @@ def count(bus): etrago.network.lines = pd.concat([etrago.network.lines, new_lines_df]) + # Drop s_max_pu timeseries for deleted lines + etrago.network.lines_t.s_max_pu = ( + etrago.network.lines_t.s_max_pu.transpose()[ + etrago.network.lines_t.s_max_pu.columns.isin( + etrago.network.lines.index + ) + ].transpose() + ) + return @@ -1169,20 +1297,24 @@ def set_line_costs(self, cost110=230, cost220=290, cost380=85, costDC=375): ---------- network : :class:`pypsa.Network Overall container of PyPSA - args: dict containing settings from appl.py - cost110 : capital costs per km for 110kV lines and cables - default: 230€/MVA/km, source: costs for extra circuit in - dena Verteilnetzstudie, p. 146) - cost220 : capital costs per km for 220kV lines and cables - default: 280€/MVA/km, source: costs for extra circuit in - NEP 2025, capactity from most used 220 kV lines in model - cost380 : capital costs per km for 380kV lines and cables - default: 85€/MVA/km, source: costs for extra circuit in - NEP 2025, capactity from most used 380 kV lines in NEP - costDC : capital costs per km for DC-lines - default: 375€/MVA/km, source: costs for DC transmission line - in NEP 2035 - ------- + args: dict + containing settings from appl.py + cost110 : + capital costs per km for 110kV lines and cables + default: 230€/MVA/km, source: costs for extra circuit in + dena Verteilnetzstudie, p. 146) + cost220 : + capital costs per km for 220kV lines and cables + default: 280€/MVA/km, source: costs for extra circuit in + NEP 2025, capactity from most used 220 kV lines in model + cost380 : + capital costs per km for 380kV lines and cables + default: 85€/MVA/km, source: costs for extra circuit in + NEP 2025, capactity from most used 380 kV lines in NEP + costDC : + capital costs per km for DC-lines + default: 375€/MVA/km, source: costs for DC transmission line + in NEP 2035 """ @@ -1221,13 +1353,16 @@ def set_trafo_costs( ---------- network : :class:`pypsa.Network Overall container of PyPSA - cost110_220 : capital costs for 110/220kV transformer - default: 7500€/MVA, source: costs for extra trafo in - dena Verteilnetzstudie, p. 146; S of trafo used in osmTGmod - cost110_380 : capital costs for 110/380kV transformer - default: 17333€/MVA, source: NEP 2025 - cost220_380 : capital costs for 220/380kV transformer - default: 14166€/MVA, source: NEP 2025 + cost110_220 : + capital costs for 110/220kV transformer + default: 7500€/MVA, source: costs for extra trafo in + dena Verteilnetzstudie, p. 146; S of trafo used in osmTGmod + cost110_380 : + capital costs for 110/380kV transformer + default: 17333€/MVA, source: NEP 2025 + cost220_380 : + capital costs for 220/380kV transformer + default: 14166€/MVA, source: NEP 2025 """ @@ -1261,10 +1396,6 @@ def set_trafo_costs( def add_missing_components(self): - # Munich - # TODO: Manually adds lines between hard-coded buses. Has to be - # changed for the next dataversion and should be moved to data - # processing """ Add a missing transformer at Heizkraftwerk Nord in Munich and a missing transformer in Stuttgart. @@ -1281,6 +1412,11 @@ def add_missing_components(self): """ + # Munich + # TODO: Manually adds lines between hard-coded buses. Has to be + # changed for the next dataversion and should be moved to data + # processing + """ "https://www.swm.de/privatkunden/unternehmen/energieerzeugung" + "/heizkraftwerke.html?utm_medium=301" @@ -1510,7 +1646,6 @@ def convert_capital_costs(self): ---------- etrago : :class:`etrago.Etrago Transmission grid object - ------- """ @@ -1687,7 +1822,22 @@ def get_args_setting(self, jsonpath="scenario_setting.json"): def merge_dicts(dict1, dict2): - """Return a new dictionary by merging two dictionaries recursively.""" + """ + Return a new dictionary by merging two dictionaries recursively. + + Parameters + ---------- + dict1 : dict + dictionary 1. + dict2 : dict + dictionary 2. + + Returns + ------- + result : dict + Union of dict1 and dict2 + + """ result = deepcopy(dict1) @@ -1708,6 +1858,11 @@ def get_clustering_data(self, path): ---------- path : str Name of folder from which to import CSVs of network data. + + Returns + ------- + None + """ if (self.args["network_clustering_ehv"]) | ( @@ -1759,10 +1914,8 @@ def set_random_noise(self, sigma=0.01): ---------- etrago : :class:`etrago.Etrago Transmission grid object - seed: int seed number, needed to reproduce results - sigma: float Default: 0.01 standard deviation, small values reduce impact on dispatch @@ -1819,7 +1972,6 @@ def set_line_country_tags(network): network : :class:`pypsa.Network Overall container of PyPSA - """ transborder_lines_0 = network.lines[ @@ -1875,6 +2027,20 @@ def set_line_country_tags(network): def crossborder_capacity_tyndp2020(): + """ + This function downloads and extracts a scenario datafile for the TYNDP 2020 + (Ten-Year Network Development Plan), reads a specific sheet from the file, + filters it based on certain criteria, and then calculates the minimum + cross-border capacities for a list of European countries. The minimum + cross-border capacity is the minimum of the export and import capacities + between two countries. + + Returns + ------- + dict + Dictionary with cossborder capacities. + + """ from urllib.request import urlretrieve import zipfile @@ -2179,7 +2345,14 @@ def check_args(etrago): """ - names = ["eGon2035", "eGon100RE", "eGon2035_lowflex", "eGon100RE_lowflex"] + names = [ + "eGon2035", + "eGon100RE", + "eGon2035_lowflex", + "eGon100RE_lowflex", + "status2019", + ] + assert ( etrago.args["scn_name"] in names ), f"'scn_name' has to be in {names} but is {etrago.args['scn_name']}." @@ -2318,12 +2491,16 @@ def check_args(etrago): def drop_sectors(self, drop_carriers): """ - Manually drop secors from eTraGo network, used for debugging + Manually drop secors from network. + Makes sure the network can be calculated without the dropped sectors. Parameters ---------- drop_carriers : array List of sectors that will be dropped. + e.g. ['dsm', 'CH4', 'H2_saltcavern', 'H2_grid', + 'central_heat', 'rural_heat', 'central_heat_store', + 'rural_heat_store', 'Li ion'] means everything but AC Returns ------- @@ -2331,6 +2508,43 @@ def drop_sectors(self, drop_carriers): """ + if self.scenario.scn_name == "eGon2035": + if "CH4" in drop_carriers: + # create gas generators from links + # in order to not lose them when dropping non-electric carriers + gas_to_add = ["central_gas_CHP", "industrial_gas_CHP", "OCGT"] + gen = self.network.generators + + for i in gas_to_add: + gen_empty = gen.drop(gen.index) + gen_empty.bus = self.network.links[ + self.network.links.carrier == i + ].bus1 + gen_empty.p_nom = ( + self.network.links[self.network.links.carrier == i].p_nom + * self.network.links[ + self.network.links.carrier == i + ].efficiency + ) + gen_empty.marginal_cost = ( + self.network.links[ + self.network.links.carrier == i + ].marginal_cost + + 35.851 + ) # add fuel costs (source: NEP) + gen_empty.efficiency = 1 + gen_empty.carrier = i + gen_empty.scn_name = "eGon2035" + gen_empty.p_nom_extendable = False + gen_empty.sign = 1 + gen_empty.p_min_pu = 0 + gen_empty.p_max_pu = 1 + gen_empty.control = "PV" + gen_empty.fillna(0, inplace=True) + self.network.import_components_from_dataframe( + gen_empty, "Generator" + ) + self.network.mremove( "Bus", self.network.buses[ @@ -2363,14 +2577,18 @@ def drop_sectors(self, drop_carriers): ].index, ) + logger.info("The following sectors are dropped: " + str(drop_carriers)) + def update_busmap(self, new_busmap): """ Update busmap after any clustering process + Parameters ---------- new_busmap : dictionary busmap used to clusted the network. + Returns ------- None. @@ -2417,12 +2635,7 @@ def adjust_CH4_gen_carriers(self): FROM scenario.egon_scenario_parameters WHERE name = '{self.args["scn_name"]}';""" df = pd.read_sql(sql, engine) - # TODO: There might be a bug in here raising a `KeyError`. - # If you encounter it, that means you have live data - # to test against. Please do a `git blame` on these - # lines and follow the hints in the commit message to - # fix the bug. - marginal_cost = df["marginal_cost"] + marginal_cost = df["gas_parameters"][0]["marginal_cost"] except sqlalchemy.exc.ProgrammingError: marginal_cost = marginal_cost_def @@ -2536,3 +2749,52 @@ def residual_load(network, sector="electricity"): ) return loads_per_bus - renewable_dispatch + + +def manual_fixes_datamodel(etrago): + """Apply temporal fixes to the data model until a new egon-data run + is there + + Parameters + ---------- + etrago : :class:`Etrago + Overall container of Etrago + + Returns + ------- + None. + + """ + # Set line type + etrago.network.lines.type = "" + + # Set life time of storage_units, transformers and lines + etrago.network.storage_units.lifetime = 27.5 + etrago.network.transformers.lifetime = 40 + etrago.network.lines.lifetime = 40 + + # Set efficiences of CHP + etrago.network.links.loc[ + etrago.network.links[ + etrago.network.links.carrier.str.contains("CHP") + ].index, + "efficiency", + ] = 0.43 + + # Enlarge gas boilers as backup heat supply + etrago.network.links.loc[ + etrago.network.links[ + etrago.network.links.carrier.str.contains("gas_boiler") + ].index, + "p_nom", + ] *= 1000 + + # Set p_max_pu for run of river and reservoir + etrago.network.generators.loc[ + etrago.network.generators[ + etrago.network.generators.carrier.isin( + ["run_of_river", "reservoir"] + ) + ].index, + "p_max_pu", + ] = 0.65 diff --git a/noxfile.py b/noxfile.py index 69cf698e1..7fabe5f7e 100644 --- a/noxfile.py +++ b/noxfile.py @@ -1,3 +1,4 @@ +from pathlib import Path from pprint import pformat import nox @@ -59,3 +60,12 @@ def build(session): session.install("twine") session.run("python", "setup.py", "bdist", "bdist_wheel") session.run("twine", "check", "dist/eTraGo*") + + +@nox.session(python=["3", "3.8", "3.9", "3.10", "3.11"]) +def install(session): + """Install the package.""" + setdefaults(session) + session.env["SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL"] = "False" + session.run("python", "-mpip", "install", "--upgrade", "pip") + session.run("python", "-mpip", "install", *Path("dist").glob("*.whl")) diff --git a/requirements-doc.txt b/requirements-doc.txt index abb50f991..383cf8367 100644 --- a/requirements-doc.txt +++ b/requirements-doc.txt @@ -2,11 +2,11 @@ # Using single requirments for docs, see: # https://github.com/rtfd/readthedocs.org/issues/2070 sphinx_rtd_theme -pandas >= 0.25, <= 0.25 -pypsa >= 0.11.0, <= 0.11.0 -numpy == 1.16.2 -numpydoc == 0.7.0 +pypsa == 0.20.1 +numpydoc sqlalchemy geoalchemy2 matplotlib nbsphinx +saio +pyomo != 6.4.3 \ No newline at end of file diff --git a/setup.py b/setup.py index 4ea9b7ab6..3d9203aa9 100755 --- a/setup.py +++ b/setup.py @@ -14,7 +14,12 @@ __author__ = ( "ulfmueller, wolfbunke, BartelsJ, ClaraBuettner, gnn, " "simnh, lukasol, s3pp, MGlauer, kimvk, MarlonSchlemminger, " - "mariusves" + "mariusves", + "CarlosEpia", + "KathiEsterl", + "pieterhexen", + "fwitte", + "AmeliaNadal", ) @@ -46,11 +51,13 @@ def read(*names, **kwargs): "egoio == 0.4.7", "geoalchemy2 >= 0.3.0", "geopandas", + "loguru", "matplotlib >= 3.0.3", "oedialect", - # PyPSA uses a deprecated import that errors with Pyomo 6.4.3. - # Upstream has a fix but it's not yet released. - "pyomo != 6.4.3", + # Fix upper version limits for pyomo and pandas + # Related to problems with old pypsa version + "pandas < 2", + "pyomo>6.4, <6.6, !=6.4.3", "pypsa == 0.20.1", "rtree", "saio", @@ -59,10 +66,15 @@ def read(*names, **kwargs): "shapely", "sqlalchemy < 2", "tilemapbase == 0.4.5", - "tsam == 1.1.0", + "tsam", ], extras_require={ - "docs": ["sphinx >= 1.4", "sphinx_rtd_theme"], + "docs": [ + "nbsphinx", + "numpydoc", + "sphinx >= 1.4", + "sphinx_rtd_theme", + ], "gurobipy": ["gurobipy"], "cartopy": ["cartopy", "requests"], },