diff --git a/README.rst b/README.rst index 098db7417..3a5f9bb4b 100644 --- a/README.rst +++ b/README.rst @@ -19,7 +19,7 @@ eTraGo Optimization of flexibility options for transmission grids based on PyPSA A speciality in this context is that transmission grids are described by the -380, 220 and 110 kV in Germany. Conventionally the 110kV grid is part of the +380, 220 and 110 kV in Germany. Conventionally, the 110kV grid is part of the distribution grid. The integration of the transmission and 'upper' distribution grid is part of eTraGo. @@ -27,6 +27,8 @@ The focus of optimization are flexibility options with a special focus on energy storages. Grid expansion measures are not part of this tool and will be instead part of 'eGo' https://github.com/openego/eGo +eTraGo is documented on `readthedocs `_. + .. warning:: From now on eTraGo depends on a sector coupled data-model. This is not published on the oedb yet, the data has to be created using @@ -35,7 +37,6 @@ instead part of 'eGo' https://github.com/openego/eGo When you want to use eTraGo for optimizations, please use the latest release 0.8.0. - Installation ============ eTraGo is designed as a Python package therefore it is mandatory to have diff --git a/doc/api.rst b/doc/api.rst index 83b7a11a8..d65337975 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -1,8 +1,11 @@ -.. make doc-string generated documentation appear here +API +==== .. toctree:: :maxdepth: 7 :glob: :titlesonly: - API + api/etrago.cluster + api/etrago.tools + api/appl.rst diff --git a/doc/api/appl.rst b/doc/api/appl.rst new file mode 100644 index 000000000..fe45967b0 --- /dev/null +++ b/doc/api/appl.rst @@ -0,0 +1,7 @@ +etrago.appl module +------------------- + +.. automodule:: etrago.appl + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file diff --git a/doc/api/etrago.cluster.rst b/doc/api/etrago.cluster.rst index 1e2fc858b..40fc8db3c 100644 --- a/doc/api/etrago.cluster.rst +++ b/doc/api/etrago.cluster.rst @@ -1,18 +1,31 @@ -etrago\.cluster package +etrago.cluster package ======================= -Submodules ----------- +etrago.cluster.disaggregation module +----------------------------------------- -etrago\.cluster\.networkclustering module +.. automodule:: etrago.cluster.disaggregation + :members: + :undoc-members: + :show-inheritance: + +etrago.cluster.electrical module ----------------------------------------- -.. automodule:: etrago.cluster.networkclustering +.. automodule:: etrago.cluster.electrical :members: :undoc-members: :show-inheritance: -etrago\.cluster\.snapshot module +etrago.cluster.gas module +----------------------------------------- + +.. automodule:: etrago.cluster.gas + :members: + :undoc-members: + :show-inheritance: + +etrago.cluster.snapshot module -------------------------------- .. automodule:: etrago.cluster.snapshot @@ -20,11 +33,10 @@ etrago\.cluster\.snapshot module :undoc-members: :show-inheritance: -Module contents ---------------- +etrago.cluster.spatial module +-------------------------------- -.. automodule:: etrago.cluster +.. automodule:: etrago.cluster.spatial :members: :undoc-members: :show-inheritance: - diff --git a/doc/api/etrago.rst b/doc/api/etrago.rst deleted file mode 100644 index a09ffc783..000000000 --- a/doc/api/etrago.rst +++ /dev/null @@ -1,30 +0,0 @@ -etrago package -============== - -Subpackages ------------ - -.. toctree:: - - etrago.cluster - etrago.tools - -Submodules ----------- - -etrago\.appl module -------------------- - -.. automodule:: etrago.appl - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: etrago - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/api/etrago.tools.rst b/doc/api/etrago.tools.rst index 33586cbf9..2b5495d38 100644 --- a/doc/api/etrago.tools.rst +++ b/doc/api/etrago.tools.rst @@ -1,56 +1,66 @@ -etrago\.tools package +etrago.tools package ===================== -Submodules ----------- +etrago.tools.calc\_results module +---------------------------------- -etrago\.tools\.io module ------------------------- +.. automodule:: etrago.tools.calc_results + :members: + :undoc-members: + :show-inheritance: -.. automodule:: etrago.tools.io +etrago.tools.constraints module +---------------------------------- + +.. automodule:: etrago.tools.constraints :members: :undoc-members: :show-inheritance: +etrago.tools.execute module +---------------------------------- -etrago\.tools\.extendable module ------------------------- +.. automodule:: etrago.tools.execute + :members: + :undoc-members: + :show-inheritance: + +etrago.tools.extendable module +---------------------------------- .. automodule:: etrago.tools.extendable :members: :undoc-members: :show-inheritance: +etrago.tools.io module +------------------------ -etrago\.tools\.plot module --------------------------- - -.. automodule:: etrago.tools.plot +.. automodule:: etrago.tools.io :members: :undoc-members: :show-inheritance: -etrago\.tools\.snapshot\_clustering module ------------------------------------------- +etrago.tools.network module +----------------------------- -.. automodule:: etrago.tools.snapshot_clustering +.. automodule:: etrago.tools.network :members: :undoc-members: :show-inheritance: -etrago\.tools\.utilities module -------------------------------- +etrago.tools.plot module +--------------------------- -.. automodule:: etrago.tools.utilities +.. automodule:: etrago.tools.plot :members: :undoc-members: :show-inheritance: +etrago.tools.utilities module +------------------------------- -Module contents ---------------- - -.. automodule:: etrago.tools +.. automodule:: etrago.tools.utilities :members: :undoc-members: :show-inheritance: diff --git a/doc/api/modules.rst b/doc/api/modules.rst deleted file mode 100644 index fda8d67db..000000000 --- a/doc/api/modules.rst +++ /dev/null @@ -1,7 +0,0 @@ -etrago -====== - -.. toctree:: - :maxdepth: 4 - - etrago diff --git a/doc/conf.py b/doc/conf.py index 313e191cb..5fc0bd671 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -122,7 +122,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = "English" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: diff --git a/doc/theoretical_background.rst b/doc/theoretical_background.rst index 03bb9eb43..6df61b821 100644 --- a/doc/theoretical_background.rst +++ b/doc/theoretical_background.rst @@ -78,7 +78,7 @@ As shown in the figure above, several options to store energy are part of the mo Miscellaneous Features --------- +---------------------- Several features were developed to enhance the functionality of eTraGo. diff --git a/doc/whatsnew/v0_5_1.rst b/doc/whatsnew/v0_5_1.rst index ac1f83a9f..ad37004f1 100644 --- a/doc/whatsnew/v0_5_1.rst +++ b/doc/whatsnew/v0_5_1.rst @@ -1,5 +1,5 @@ Release 0.5.1 (February 01, 2018) -++++++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++ eTraGo works with ego.io 0.3.0 diff --git a/doc/whatsnew/v0_6_1.rst b/doc/whatsnew/v0_6_1.rst index 5b9ec69d5..630828a3b 100644 --- a/doc/whatsnew/v0_6_1.rst +++ b/doc/whatsnew/v0_6_1.rst @@ -1,5 +1,5 @@ Release 0.6.1 (Juli 18, 2018) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++ eTraGo works with pypi and is suitable for eGo 0.2.0 Added features diff --git a/doc/whatsnew/v0_7_0.rst b/doc/whatsnew/v0_7_0.rst index e690bd5e2..abedaaf9d 100644 --- a/doc/whatsnew/v0_7_0.rst +++ b/doc/whatsnew/v0_7_0.rst @@ -1,9 +1,9 @@ Release 0.7.0 (September 6, 2018) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++ eTraGo is able to produce feasible non-linear power flows based on optimization results and allows the disaggregation of clustered results to original spatial complexities. Added features --------------- +--------------- * The pf_post_lopf function was improved. Due to changes in the data set now the non-linear power flow (pf) creates feasible solutions. If network optimization is turned on, a second lopf which regards the updated reactances and optimizes only dispatch is performed before the pf is executed. * The disaggregation method was included. When using a network clustering method to reduce the spatial complexity of the given network, a disaggregation method can be used afterwards to distribute the nodal results (generation and storage timeseries) to the original complexity. The method 'disaggregation': 'uniform' can be used as an interface functionality for distribution grid planning tools like eDisGo. diff --git a/doc/whatsnew/v0_7_1.rst b/doc/whatsnew/v0_7_1.rst index 53eebb24c..d7e692677 100644 --- a/doc/whatsnew/v0_7_1.rst +++ b/doc/whatsnew/v0_7_1.rst @@ -1,5 +1,5 @@ Release 0.7.1 (October 25, 2018) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++ A minor release adding new options for additional constraints, modelling assumptions and plotting. Added features diff --git a/doc/whatsnew/v0_7_2.rst b/doc/whatsnew/v0_7_2.rst index 58ee28ca6..c972f76bc 100644 --- a/doc/whatsnew/v0_7_2.rst +++ b/doc/whatsnew/v0_7_2.rst @@ -1,5 +1,5 @@ Release 0.7.2 (Juni 15, 2020) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++ A minor release adding the following features. Added features diff --git a/doc/whatsnew/v0_8_0.rst b/doc/whatsnew/v0_8_0.rst index 420ce9e08..b4f28df3e 100644 --- a/doc/whatsnew/v0_8_0.rst +++ b/doc/whatsnew/v0_8_0.rst @@ -1,5 +1,5 @@ Release 0.8.0 (April 8, 2021) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++ eTraGo has now a more object-oriented programming design. Added features diff --git a/etrago/appl.py b/etrago/appl.py index 03e1f0319..cdf3f3ad8 100644 --- a/etrago/appl.py +++ b/etrago/appl.py @@ -136,8 +136,7 @@ }, }, }, - "network_clustering_ehv": False, # clustering of HV buses to EHV buses. - "disaggregation": "uniform", # None, 'mini' or 'uniform' + "disaggregation": None, # None or 'uniform' # Temporal Complexity: "snapshot_clustering": { "active": False, # choose if clustering is activated @@ -167,282 +166,457 @@ def run_etrago(args, json_path): """Function to conduct optimization considering the following arguments. - Parameters - ---------- - - db : str - ``'oedb'``, - Name of Database session setting stored in *config.ini* of *.egoio* - - gridversion : NoneType or str - ``'v0.4.6'``, - Name of the data version number of oedb: state ``'None'`` for - model_draft (sand-box) or an explicit version number - (e.g. 'v0.4.6') for the grid schema. - - method : dict - {'type': 'lopf', 'n_iter': 4, 'pyomo': True}, - Choose 'lopf' for 'type'. In case of extendable lines, several lopfs - have to be performed. Choose either 'n_init' and a fixed number of - iterations or 'thershold' and a threashold of the objective function as - abort criteria. - Set 'pyomo' to False for big optimization problems, currently only - possible when solver is 'gurobi'. - - pf_post_lopf : dict - {'active': True, 'add_foreign_lopf': True, 'q_allocation': 'p_nom'}, - Option to run a non-linear power flow (pf) directly after the - linear optimal power flow (and thus the dispatch) has finished. - If foreign lines are modeled as DC-links (see foreign_lines), results - of the lopf can be added by setting 'add_foreign_lopf'. - Reactive power can be distributed either by 'p_nom' or 'p'. - - start_snapshot : int - 1, - Start hour of the scenario year to be calculated. - - end_snapshot : int - 2, - End hour of the scenario year to be calculated. - If snapshot clusterung is used, the selected snapshots should cover - the number of periods / segments. - - solver : str - 'glpk', - Choose your preferred solver. Current options: 'glpk' (open-source), - 'cplex' or 'gurobi'. - - solver_options : dict - Choose settings of solver to improve simulation time and result. - Options are described in documentation of chosen solver. - - model_formulation : str - 'kirchoff', - Choose formulation of pyomo-model. - Current options: angles, cycles, kirchhoff, ptdf + Parameters + ---------- + db : str + Name of Database session setting stored in *config.ini* of *.egoio*, e.g. + ``'oedb'``. + gridversion : None or str + Name of the data version number of oedb: state ``'None'`` for + model_draft (sand-box) or an explicit version number + (e.g. 'v0.4.6') for the grid schema. + method : dict + Choose method and settings for optimization. + The provided dictionary can have the following entries: + + * "lopf" : str + Type of optimization, currently only "lopf". Default: "lopf". + * "n_iter" : int + In case of extendable lines, several LOPFs have to be performed. + You can either set "n_iter" and specify a fixed number of iterations + or set "threshold" and specify a threshold of the objective function as + abort criteria of the iterative optimization. + Default: 4. + * "threshold" : int + In case of extendable lines, several LOPFs have to be performed. + You can either set "n_iter" and specify a fixed number of iterations + or set "threshold" and specify a threshold of the objective function as + abort criteria of the iterative optimization. Per default, "n_iter" of 4 + is set. + * "pyomo" : bool + Set to True, if pyomo is used for model building. + Set to False for big optimization problems - currently only + possible when solver is "gurobi". + + pf_post_lopf : dict + Settings for option to run a non-linear power flow (PF) directly after the + linear optimal power flow (LOPF), and thus the dispatch optimisation, has + finished. + The provided dictionary can have the following entries: + + * "active" : bool + If True, a PF is performed after the LOPF. Default: True. + * "add_foreign_lopf" : bool + If foreign lines are modeled as DC-links (see parameter `foreign_lines`), + results of the LOPF can be added by setting "add_foreign_lopf" to True. + Default: True. + * "q_allocation" : bool + Allocate reactive power to all generators at the same bus either by "p_nom" + or "p". + Default: "p_nom". + + start_snapshot : int + Start hour of the scenario year to be calculated. Default: 1. + end_snapshot : int + End hour of the scenario year to be calculated. If snapshot clustering is used + (see parameter `snapshot_clustering`), the selected snapshots should cover + the number of periods / segments. Default: 2. + solver : str + Choose your preferred solver. Current options: "glpk" (open-source), + "cplex" or "gurobi". Default: "gurobi". + solver_options : dict + Choose settings of solver to improve simulation time and result. + Options are described in documentation of chosen solver. Per default, the + following dictionary is set: + + { + "BarConvTol": 1.0e-5, + "FeasibilityTol": 1.0e-5, + "method": 2, + "crossover": 0, + "logFile": "solver_etrago.log", + "threads": 4, + } + Make sure to reset or adapt these settings when using another solver! Otherwise, + you may run into errors. + model_formulation : str + Choose formulation of pyomo-model. + Current options are: "angles", "cycles", "kirchhoff", "ptdf". + "angels" works best for small networks, while "kirchhoff" works best for larger + networks. + Default: "kirchhoff". scn_name : str - 'eGon2035', Choose your scenario. Currently, there are two different - scenarios: 'eGon2035', 'eGon100RE'. + scenarios: "eGon2035", "eGon100RE". Default: "eGon2035". + scn_extension : None or str + This option does currently not work! - scn_extension : NoneType or str - None, Choose extension-scenarios which will be added to the existing network container. Data of the extension scenarios are located in extension-tables (e.g. model_draft.ego_grid_pf_hv_extension_bus) - with the prefix 'extension_'. - Currently there are three overlay networks: - 'nep2035_confirmed' includes all planed new lines confirmed by the - Bundesnetzagentur - 'nep2035_b2' includes all new lines planned by the - Netzentwicklungsplan 2025 in scenario 2035 B2 - 'BE_NO_NEP 2035' includes planned lines to Belgium and Norway and - adds BE and NO as electrical neighbours - - scn_decommissioning : NoneType or str - None, - Choose an extra scenario which includes lines you want to decommise + with the prefix 'extension\_'. + There are three overlay networks: + + * 'nep2035_confirmed' includes all planed new lines confirmed by the + Bundesnetzagentur + * 'nep2035_b2' includes all new lines planned by the + Netzentwicklungsplan 2025 in scenario 2035 B2 + * 'BE_NO_NEP 2035' includes planned lines to Belgium and Norway and + adds BE and NO as electrical neighbours + + Default: None. + scn_decommissioning : NoneType or str + This option does currently not work! + + Choose an extra scenario which includes lines you want to decommission from the existing network. Data of the decommissioning scenarios are located in extension-tables (e.g. model_draft.ego_grid_pf_hv_extension_bus) with the prefix - 'decommissioning_'. + 'decommissioning\_'. Currently, there are two decommissioning_scenarios which are linked to extension-scenarios: - 'nep2035_confirmed' includes all lines that will be replaced in - confirmed projects - 'nep2035_b2' includes all lines that will be replaced in - NEP-scenario 2035 B2 + * 'nep2035_confirmed' includes all lines that will be replaced in + confirmed projects + * 'nep2035_b2' includes all lines that will be replaced in + NEP-scenario 2035 B2 + + Default: None. lpfile : bool or str - False, State if and where you want to save pyomo's lp file. Options: - False or '/path/tofile.lp' - + False or '/path/tofile.lp'. Default: False. csv_export : bool or str - False, - State if and where you want to save results as csv files.Options: - False or '/path/tofolder'. + State if and where you want to save results as csv files. Options: + False or '/path/tofolder'. Default: False. extendable : dict - {'extendable_components': ['as_in_db'], - 'upper_bounds_grid': { - 'grid_max_D': None, - 'grid_max_abs_D': { - '380':{'i':1020, 'wires':4, 'circuits':4}, - '220':{'i':1020, 'wires':4, 'circuits':4}, - '110':{'i':1020, 'wires':4, 'circuits':2}, - 'dc':0}, - 'grid_max_foreign': 4, - 'grid_max_abs_foreign': None}}, - ['network', 'storages'], Choose components you want to optimize and set upper bounds for grid expansion. - The list 'extendable_components' defines a set of components to optimize. - Settings can be added in /tools/extendable.py. - The most important possibilities: - 'as_in_db': leaves everything as it is defined in the data coming - from the database - 'network': set all lines, links and transformers in electrical - grid extendable - 'german_network': set lines and transformers in German electrical - grid extendable - 'foreign_network': set foreign lines and transformers in electrical - grid extendable - 'transformers': set all transformers extendable - 'storages' / 'stores': allow to install extendable storages - (unlimited in size) at each grid node in order to meet - the flexibility demand. - Upper bounds for electrical grid expansion can be defined for lines in - Germany relative to the existing capacity using 'grid_max_D'. - Alternatively, absolute maximum capacities between two electrical buses - can be defined per voltage level using 'grid_max_abs_D'. - Upper bounds for bordercrossing electrical lines can be defined accrodingly - using 'grid_max_foreign' or 'grid_max_abs_foreign'. + The provided dictionary can have the following entries: + + * "extendable_components" : list(str) + The list defines a set of components to optimize. + Settings can be added in /tools/extendable.py. + The most important possibilities: + + * 'as_in_db' + leaves everything as it is defined in the data coming from the database + * 'network' + set all lines, links and transformers in electrical grid extendable + * 'german_network' + set lines and transformers in German electrical grid extendable + * 'foreign_network' + set foreign lines and transformers in electrical grid extendable + * 'transformers' + set all transformers extendable + * 'storages' / 'stores' + allow to install extendable storages (unlimited in size) at each grid + node in order to meet the flexibility demand + + Default: "as_in_db". + + * "upper_bounds_grid" : dict + Dictionary can have the following entries: + + * 'grid_max_D' + Upper bounds for electrical grid expansion can be defined for lines in + Germany relative to the existing capacity. Alternatively, + 'grid_max_abs_D' can be used. Per default, this is set to None and + 'grid_max_abs_D' is set. + + * 'grid_max_abs_D' + Upper bounds for electrical grid expansion can be defined for lines in + Germany as absolute maximum capacities between two electrical buses + per voltage level. Per default the following dictionary is set: + + { + "380": {"i": 1020, "wires": 4, "circuits": 4}, + "220": {"i": 1020, "wires": 4, "circuits": 4}, + "110": {"i": 1020, "wires": 4, "circuits": 2}, + "dc": 0, + } + * 'grid_max_foreign' + Upper bounds for border-crossing electrical lines can be defined + relative to the existing capacity. Alternatively, 'grid_max_abs_foreign' + can be set. + Default: 4. + * 'grid_max_abs_foreign' + Upper bounds for border-crossing electrical lines can be defined equally + to 'grid_max_abs_D' as absolute capacity per voltage level. + Default: None. generator_noise : bool or int State if you want to apply a small random noise to the marginal costs of each generator in order to prevent an optima plateau. To reproduce - a noise, choose the same integer (seed number). - - extra_functionality : dict - None, + a noise, choose the same integer (seed number). Default: 789456. + extra_functionality : dict or None Choose extra functionalities and their parameters. Settings can be added in /tools/constraints.py. Current options are: - 'max_line_ext': float - Maximal share of network extension in p.u. - 'min_renewable_share': float - Minimal share of renewable generation in p.u. - 'cross_border_flow': array of two floats - Limit AC cross-border-flows between Germany and its neigbouring - countries, set values in MWh for all snapshots, e.g. [-x, y] - (with x Import, y Export, positiv: export from Germany) - 'cross_border_flows_per_country': dict of cntr and array of floats - Limit AC cross-border-flows between Germany and its neigbouring - countries, set values in in MWh for each country, e.g. [-x, y] - (with x Import, y Export, positiv: export from Germany) - 'capacity_factor': dict of arrays - Limit overall energy production for each carrier, - set upper/lower limit in p.u. - 'capacity_factor_per_gen': dict of arrays - Limit overall energy production for each generator by carrier, - set upper/lower limit in p.u. - 'capacity_factor_per_cntr': dict of dict of arrays - Limit overall energy production country-wise for each carrier, - set upper/lower limit in p.u. - 'capacity_factor_per_gen_cntr': dict of dict of arrays - Limit overall energy production country-wise for each generator - by carrier, set upper/lower limit in p.u. + + * 'max_line_ext' : float + Maximal share of network extension in p.u. + * 'min_renewable_share' : float + Minimal share of renewable generation in p.u. + * 'cross_border_flow' : array of two floats + Limit AC cross-border-flows between Germany and its neighbouring + countries. Set values in MWh for all snapshots, e.g. [-x, y] + (with x Import, y Export, positive: export from Germany). + * 'cross_border_flows_per_country' : dict of cntr and array of floats + Limit AC cross-border-flows between Germany and its neighbouring + countries. Set values in MWh for each country, e.g. [-x, y] + (with x Import, y Export, positive: export from Germany). + * 'capacity_factor' : dict of arrays + Limit overall energy production for each carrier. + Set upper/lower limit in p.u. + * 'capacity_factor_per_gen' : dict of arrays + Limit overall energy production for each generator by carrier. + Set upper/lower limit in p.u. + * 'capacity_factor_per_cntr': dict of dict of arrays + Limit overall energy production country-wise for each carrier. + Set upper/lower limit in p.u. + * 'capacity_factor_per_gen_cntr': dict of dict of arrays + Limit overall energy production country-wise for each generator + by carrier. Set upper/lower limit in p.u. network_clustering_ehv : bool - False, Choose if you want to cluster the full HV/EHV dataset down to only the EHV buses. In that case, all HV buses are assigned to their closest EHV substation, taking into account the shortest distance on power lines. - + Default: False. network_clustering : dict - {'active': True, method: 'kmedoids-dijkstra', 'n_clusters_AC': 30, - 'cluster_foreign_AC': False, method_gas: 'kmeans', 'n_clusters_gas': 30, - 'cluster_foreign_gas': False, 'k_elec_busmap': False, 'k_gas_busmap': False, - 'bus_weight_tocsv': None, 'bus_weight_fromcsv': None, - 'gas_weight_tocsv': None, 'gas_weight_fromcsv': None, ' - line_length_factor': 1, 'remove_stubs': False, 'use_reduced_coordinates': False, - 'random_state': 42, 'n_init': 10, 'max_iter': 100, 'tol': 1e-6}, - State if you want to apply a clustering of all network buses. - When ``'active'`` is set to True, the AC buses are clustered down to ``'n_clusters_AC'`` - and the gas buses are clustered down to``'n_clusters_gas'``. - If ``'cluster_foreign_AC'`` is set to False, the AC buses outside Germany are not - clustered and the buses inside Germany are clustered to complete ``'n_clusters'``. - ``'cluster_foreign_gas'`` controls whether gas buses of neighboring countries - are considered for clustering. - With ``'method'`` you can choose between two clustering methods: - k-means Clustering considering geopraphical locations of buses or - k-medoids Dijkstra Clustering considering electrical distances between buses. - With ``'k_elec_busmap'`` or ``'k_ch4_busmap'``you can load cluster coordinates - from a previous run for the respecting carrier. It should be considered that - once this option is set to True, the provided number of clusters will be ignored. - In genral, the weighting takes place considering generation and load at each node. - CH4 nodes also take non-transport capacities into account. - With the ``'bus_weight_tocsv'`` you can load an own weighting for the buses - or store the weighting with ``'bus_weight_fromcsv'``. - ``'line_length_factor'`` defines the factor to multiply the crow-flies distance - between new buses in order to get new line lengths. - Option ``'remove_stubs'`` reduces the overestimating of line meshes and - ``'use_reduced_coordinates'`` does not average cluster coordinates, but take - from busmap; both only within the k-means Clustering. - The other options affect the clustering algorithm in both methods and should only be - changed carefully, documentation and possible settings are described - in sklearn-package (sklearn/cluster/k_means_.py). + Choose if you want to apply a clustering of all network buses and specify + settings. + The provided dictionary can have the following entries: + + * "active" : bool + If True, the AC buses are clustered down to ``'n_clusters_AC'`` + and the gas buses are clustered down to``'n_clusters_gas'``. + Default: True. + * "method" : str + Method used for AC clustering. You can choose between two clustering + methods: + * "kmeans": considers geographical locations of buses + * "kmedoids-dijkstra": considers electrical distances between buses + + Default: "kmedoids-dijkstra". + * "n_clusters_AC" : int + Defines total number of resulting AC nodes including DE and foreign nodes + if `cluster_foreign_AC` is set to True, otherwise only DE nodes. + Default: 30. + * "cluster_foreign_AC" : bool + If set to False, the AC buses outside Germany are not clustered + and the buses inside Germany are clustered to complete ``'n_clusters_AC'``. + If set to True, foreign AC buses are clustered as well and included + in number of clusters specified through ``'n_clusters_AC'``. + Default: False. + * "method_gas" : str + Method used for gas clustering. You can choose between two clustering + methods: + * "kmeans": considers geographical locations of buses + * "kmedoids-dijkstra": considers 'electrical' distances between buses + + Default: "kmedoids-dijkstra". + * "n_clusters_gas" : int + Defines total number of resulting CH4 nodes including DE and foreign nodes + if `cluster_foreign_gas` is set to True, otherwise only DE nodes. + Default: 17. + * "cluster_foreign_gas" : bool + If set to False, the gas buses outside Germany are not clustered + and the buses inside Germany are clustered to complete ``'n_clusters_gas'``. + If set to True, foreign gas buses are clustered as well and included + in number of clusters specified through ``'n_clusters_gas'``. + Default: False. + * "k_elec_busmap" : bool or str + With this option you can load cluster coordinates from a previous AC + clustering run. Options are False, in which case no previous busmap is + loaded, and path/to/busmap.csv in which case the busmap is loaded from the + specified file. Please note, that when a path is provided, the set number + of clusters will be ignored. + Default: False. + * "k_gas_busmap" : bool or str + With this option you can load cluster coordinates from a previous gas + clustering run. Options are False, in which case no previous busmap is + loaded, and path/to/busmap.csv in which case the busmap is loaded from the + specified file. Please note, that when a path is provided, the set number + of clusters will be ignored. + Default: False. + * "bus_weight_fromcsv" : None or str + In general, the weighting of AC buses takes place considering generation and + load at each node. With this option, you can load an own weighting for + the AC buses by providing a path to a csv file. If None, weighting is + conducted as described above. + Default: None. + * "bus_weight_tocsv" : None or str + Specifies whether to store the weighting of AC buses to csv or not. If + None, it is not stored. Otherwise, it is stored to the provided + path/to/bus_weight.csv. + Default: None. + * "gas_weight_fromcsv" : None or str + In general, the weighting of CH4 nodes takes place considering generation + and load at each node, as well as non-transport capacities at each node. + With this option, you can load an own weighting for the CH4 buses by + providing a path to a csv file. If None, weighting is conducted as + described above. + Default: None. + * "gas_weight_tocsv" : None or str + Specifies whether to store the weighting of gas buses to csv or not. If + None, it is not stored. Otherwise, it is stored to the provided + path/to/gas_bus_weight.csv. + Default: None. + * "line_length_factor" : float + Defines the factor to multiply the crow-flies distance + between new buses by, in order to get new line lengths. + Default: 1. + * "remove_stubs" : bool + If True, remove stubs before k-means clustering, which reduces the + overestimating of line meshes. + This option is only used within the k-means clustering. + Default: False. + * "use_reduced_coordinates" : bool + If True, do not average cluster coordinates, but take from busmap. + This option is only used within the k-means clustering. + Default: False. + * "random_state" : int + Random state for replicability of clustering results. Default: 42. + * "n_init" : int + Affects clustering algorithm, only change when necessary! Documentation + and possible settings are described in sklearn-package + (sklearn/cluster/kmeans.py). + Default: 10. + * "max_iter" : int + Affects clustering algorithm, only change when necessary! Documentation + and possible settings are described in sklearn-package + (sklearn/cluster/kmeans.py). + Default: 100. + * "tol" : float + Affects clustering algorithm, only change when necessary! Documentation + and possible settings are described in sklearn-package + (sklearn/cluster/kmeans.py). + Default: 1e-6. + * "CPU_cores" : int or str + Number of cores used in clustering. Specify a concrete number or "max" to + use all cores available. + Default: 4. sector_coupled_clustering : dict - {'active': True, 'carrier_data': { - 'central_heat': {'base': ['CH4', 'AC'], 'strategy': "simultaneous"}, - } - State if you want to apply clustering of sector coupled carriers, such - as central_heat. The approach builds on already clustered - buses (AC and CH4) building clusters around the topology of those buses - with carrier ``'base'`` for all buses of a specific carrier, e.g. - ``'central_heat'``. With ``'strategy'`` it is possible to apply either - ``'consecutive'`` or ``'simultaneous'`` clustering. The consecutive - strategy clusters around the buses of the first carrier in the list. - The links to other buses are preserved. All buses, that have no - connection to the first carrier will then be clustered around the buses - of the second carrier in the list. The simultanous strategy looks for - links connecting the buses of the carriers in the list and aggregates - buses in case they have the same set of links connected. For example, - a heat bus connected to CH4 via gas boiler and to AC via heat pump will - only form a cluster with other buses, if these have the same links to - the same clusters of CH4 and AC. - + Choose if you want to apply a clustering of sector coupled carriers, such + as central_heat, and specify settings. + The provided dictionary can have the following entries: + + * "active" : bool + State if you want to apply clustering of sector coupled carriers, such + as central_heat. + Default: True. + * "carrier_data" : dict[str, dict] + Keys of the dictionary specify carriers affected by sector coupling, e.g. + "central_heat". The corresponding dictionaries specify, how the carrier + should be clustered. This dictionary must contain the following entries: + + * "base" : list(str) + The approach bases on already clustered buses (AC and CH4) and builds + clusters around the topology of those buses. With this option, you can + specify the carriers to use as base. See `strategy` for more + information. + * "strategy" : str + Strategy to use in the clustering. Possible options are: + + * "consecutive" + This strategy clusters around the buses of the first carrier in the + `'base'`` list. The links to other buses are preserved. All buses, + that have no connection to the first carrier will then be clustered + around the buses of the second carrier in the list. + * "simultaneous" + This strategy looks for links connecting the buses of the carriers + in the ``'base'`` list and aggregates buses in case they have the + same set of links connected. For example, a heat bus connected to + CH4 via gas boiler and to AC via heat pump will only form a cluster + with other buses, if these have the same links to + the same clusters of CH4 and AC. + + Per default, the following dictionary is set: + { + "central_heat": { + "base": ["CH4", "AC"], + "strategy": "simultaneous", + }, + } + + disaggregation : None or str + Specify None, in order to not perform a spatial disaggregation, or the + method you want to use for the spatial disaggregation. Only possible + option is currently "uniform". snapshot_clustering : dict - {'active': False, 'method':'typical_periods', 'extreme_periods': None, 'how': 'daily', - 'storage_constraints': 'soc_constraints', 'n_clusters': 5, 'n_segments': 5}, State if you want to apply a temporal clustering and run the optimization - only on a subset of snapshot periods. - You can choose between a method clustering to typical periods, e.g. days or weeks - or a method clustering to segments of adjacent hours. - With ``'extreme_periods'`` you define the consideration of timesteps with - extreme residual load while temporal aggregation. - With ``'how'``, ``'storage_constraints'`` and ``'n_clusters'`` you choose - the length of the periods, constraints considering the storages and the number - of clusters for the usage of the method typical_periods. - With ``'n_segments'`` you choose the number of segments for the usage of - the method segmentation. + only on a subset of snapshot periods, and specify settings. + The provided dictionary can have the following entries: + + * "active" : bool + Choose, if clustering is activated or not. If True, it is activated. + Default: False. + * "method" : str + Method to apply. Possible options are "typical_periods" and "segmentation". + Default: "segmentation". + * "extreme_periods" : None or str + Method used to consider extreme snapshots (time steps with extreme residual + load) in reduced timeseries. + Possible options are None, "append", "new_cluster_center", and + "replace_cluster_center". The default is None, in which case extreme periods + are not considered. + * "how" : str + Definition of period in case `method` is set to "typical_periods". + Possible options are "daily", "weekly", and "monthly". Default: "daily". + * "storage_constraints" : str + Defines additional constraints for storage units in case `method` is set to + "typical_periods". Possible options are "daily_bounds", "soc_constraints" + and "soc_constraints_simplified". Default: "soc_constraints". + * "n_clusters" : int + Number of clusters in case `method` is set to "typical_periods". Default: 5. + * "n_segments" : int + Number of segments in case `method` is set to "segmentation". Default: 5. skip_snapshots : bool or int - State if you only want to consider every n-th timestep - to reduce temporal complexity. - + State None, if you want to use all time steps, or provide a number, + if you only want to consider every n-th timestep to reduce + temporal complexity. Default: 5. temporal_disaggregation : dict - {'active': False, 'no_slices': 4}, - State if you to apply a second lopf considering dispatch only - to disaggregate the dispatch to the whole temporal complexity. + State if you want to apply a second LOPF considering dispatch only (no capacity + optimization) to disaggregate the dispatch to the whole temporal complexity. Be aware that a load shedding will be applied in this optimization. - With "no_slices" the optimization problem will be calculated as a given - number of subproblems while using some information on the state of charge - of storage units and stores from the former optimization (at the moment - only possible with skip_snapshots; extra_functionalities disregarded) - - branch_capacity_factor : dict - {'HV': 0.5, 'eHV' : 0.7}, + The provided dictionary must have the following entries: + + * "active" : bool + Choose, if temporal disaggregation is activated or not. If True, it is + activated. + Default: False. + * "no_slices" : int + With "no_slices" the optimization problem will be calculated as a given + number of sub-problems while using some information on the state of charge + of storage units and stores from the former optimization (at the moment + only possible with skip_snapshots and extra_functionalities are + disregarded). + Default: 8. + + branch_capacity_factor : dict[str, float] Add a factor here if you want to globally change line capacities - (e.g. to "consider" an (n-1) criterion or for debugging purposes). - + (e.g. to "consider" an (n-1) criterion or for debugging purposes). The factor + specifies the p.u. branch rating, e.g. 0.5 to allow half the line capacity. + Per default, it is set to {'HV': 0.5, 'eHV' : 0.7}. load_shedding : bool - False, State here if you want to make use of the load shedding function which is helpful when debugging: a very expensive generator is set to each - bus and meets the demand when regular - generators cannot do so. - + bus and meets the demand when regular generators cannot do so. + Default: False. foreign_lines : dict - {'carrier':'AC', 'capacity': 'osmTGmod}' Choose transmission technology and capacity of foreign lines: - 'carrier': 'AC' or 'DC' - 'capacity': 'osmTGmod', 'ntc_acer' or 'thermal_acer' + + * 'carrier': 'AC' or 'DC' + * 'capacity': 'osmTGmod', 'tyndp2020', 'ntc_acer' or 'thermal_acer' + + Per default, it is set to {'carrier':'AC', 'capacity': 'osmTGmod'}. comments : str - None + Can be any comment you wish to make. Returns ------- @@ -483,7 +657,7 @@ def run_etrago(args, json_path): # spatial disaggregation # needs to be adjusted for new sectors - etrago.disaggregation() + # etrago.disaggregation() # calculate central etrago results etrago.calc_results() diff --git a/etrago/cluster/disaggregation.py b/etrago/cluster/disaggregation.py index 054525fb5..13c811e1a 100644 --- a/etrago/cluster/disaggregation.py +++ b/etrago/cluster/disaggregation.py @@ -67,9 +67,9 @@ def construct_partial_network(self, cluster, scenario): :param cluster: Index of the cluster to disaggregate :return: Tuple of (partial_network, external_buses) where - `partial_network` is the result of the partial decomposition - and `external_buses` represent clusters adjacent to `cluster` that may - be influenced by calculations done on the partial network. + `partial_network` is the result of the partial decomposition + and `external_buses` represent clusters adjacent to `cluster` that + may be influenced by calculations done on the partial network. """ # Create an empty network @@ -233,16 +233,15 @@ def from_busmap(x): # series accordingly, but there must be bug somewhere because # using it, the time series in the clusters and sums of the # time series after disaggregation don't match up. - """ - series = getattr(self.original_network, bustype + '_t') - partial_series = type(series)() - for s in series: - partial_series[s] = series[s].loc[ - :, - getattr(partial_network, bustype) - .index.intersection(series[s].columns)] - setattr(partial_network, bustype + '_t', partial_series) - """ + + # series = getattr(self.original_network, bustype + '_t') + # partial_series = type(series)() + # for s in series: + # partial_series[s] = series[s].loc[ + # :, + # getattr(partial_network, bustype) + # .index.intersection(series[s].columns)] + # setattr(partial_network, bustype + '_t', partial_series) # Just a simple sanity check # TODO: Remove when sure that disaggregation will not go insane anymore @@ -267,6 +266,7 @@ def solve(self, scenario, solver): """ Decompose each cluster into separate units and try to optimize them separately + :param scenario: :param solver: Solver that may be used to optimize partial networks """ diff --git a/etrago/cluster/electrical.py b/etrago/cluster/electrical.py index 8a7bf7549..e46d6b0ff 100755 --- a/etrago/cluster/electrical.py +++ b/etrago/cluster/electrical.py @@ -112,9 +112,9 @@ def adjust_no_electric_network(etrago, busmap, cluster_met): ------- network : pypsa.Network Container for all network components of the clustered network. - busmap : dict Maps old bus_ids to new bus_ids including all sectors. + """ network = etrago.network # network2 is supposed to contain all the not electrical or gas buses diff --git a/etrago/cluster/gas.py b/etrago/cluster/gas.py index 4385b55a2..f0dfcbf42 100644 --- a/etrago/cluster/gas.py +++ b/etrago/cluster/gas.py @@ -311,11 +311,10 @@ def gas_postprocessing(etrago, busmap, medoid_idx=None): Returns ------- - Tuple containing: - network_gasgrid_c : pypsa.Network - A pypsa.Network containing the clustered network. - busmap : pd.Series - A Pandas Series mapping each bus to its corresponding cluster ID. + network_gasgrid_c : pypsa.Network + A pypsa.Network containing the clustered network. + busmap : pd.Series + A Pandas Series mapping each bus to its corresponding cluster ID. """ settings = etrago.args["network_clustering"] @@ -933,11 +932,12 @@ def run_spatial_clustering_gas(self): buses and links, and then performs postprocessing to finalize the changes. Returns - None + -------- + None Raises - ValueError: If the selected method is not "kmeans" or - "kmedoids-dijkstra". + ------- + ValueError: If the selected method is not "kmeans" or "kmedoids-dijkstra". """ if "CH4" in self.network.buses.carrier.values: diff --git a/etrago/cluster/snapshot.py b/etrago/cluster/snapshot.py index a5c48936e..fbbd1f68e 100644 --- a/etrago/cluster/snapshot.py +++ b/etrago/cluster/snapshot.py @@ -107,8 +107,7 @@ def tsam_cluster( Number of clusters for typical_periods. The default is 10. how : {'daily', 'weekly', 'monthly'}, optional Definition of period for typical_periods. The default is 'daily'. - extremePeriodMethod : {'None','append','new_cluster_center', - 'replace_cluster_center'}, optional + extremePeriodMethod : {'None','append','new_cluster_center', 'replace_cluster_center'}, optional Method to consider extreme snapshots in reduced timeseries. The default is 'None'. segmentation : boolean, optional Argument to activate segmenation method. The default is False. @@ -324,8 +323,7 @@ def segmentation_extreme_periods( Dataframe wit timeseries to cluster. timeseries : pd.DataFrame Information on segments after segmentation. - extremePeriodMethod : {'None','append','new_cluster_center', - 'replace_cluster_center'}, optional + extremePeriodMethod : {'None','append','new_cluster_center', 'replace_cluster_center'}, optional Method to consider extreme snapshots in reduced timeseries. The default is 'None'. Raises @@ -612,8 +610,7 @@ def run( Definition of period for typical_periods. The default is 'daily'. segmented_to : int, optional Number of segments for segmentation. The default is False. - extremePeriodMethod : {'None','append','new_cluster_center', - 'replace_cluster_center'}, optional + extremePeriodMethod : {'None','append','new_cluster_center', 'replace_cluster_center'}, optional Method to consider extreme snapshots in reduced timeseries. The default is 'None'. Returns diff --git a/etrago/cluster/spatial.py b/etrago/cluster/spatial.py index bbf176b36..43af10b9b 100755 --- a/etrago/cluster/spatial.py +++ b/etrago/cluster/spatial.py @@ -186,8 +186,10 @@ def group_links(network, with_time=True, carriers=None, cus_strateg=dict()): Returns ------- - new_df : links aggregated based on bus0, bus1 and carrier - new_pnl : links time series aggregated + new_df : + links aggregated based on bus0, bus1 and carrier + new_pnl : + links time series aggregated """ def normed_or_uniform(x): @@ -276,7 +278,7 @@ def graph_from_edges(edges): Returns ------- - M : :class:`networkx.classes.multigraph.MultiGraph + M : :class:`networkx.classes.multigraph.MultiGraph` """ M = nx.MultiGraph() @@ -301,7 +303,7 @@ def gen(nodes, n, graph): List of nodes in the system. n : int Number of desired multiprocessing units. - graph : :class:`networkx.classes.multigraph.MultiGraph + graph : :class:`networkx.classes.multigraph.MultiGraph` Graph representation of an electrical grid. Returns @@ -323,7 +325,7 @@ def shortest_path(paths, graph): ---------- paths : list List of pairs containing a source and a target node - graph : :class:`networkx.classes.multigraph.MultiGraph + graph : :class:`networkx.classes.multigraph.MultiGraph` Graph representation of an electrical grid. Returns diff --git a/etrago/tools/calc_results.py b/etrago/tools/calc_results.py index b12dd79bf..fca593a97 100755 --- a/etrago/tools/calc_results.py +++ b/etrago/tools/calc_results.py @@ -40,272 +40,277 @@ def _calc_storage_expansion(self): - """ Function that calulates storage expansion in MW + """ Function that calulates storage expansion in MW + Returns + ------- + float + storage expansion in MW - Returns - ------- - float - storage expansion in MW + """ + return (self.network.storage_units.p_nom_opt - + self.network.storage_units.p_nom_min + )[self.network.storage_units.p_nom_extendable]\ + .groupby(self.network.storage_units.carrier).sum() - """ - return (self.network.storage_units.p_nom_opt - - self.network.storage_units.p_nom_min - )[self.network.storage_units.p_nom_extendable]\ - .groupby(self.network.storage_units.carrier).sum() def _calc_store_expansion(self): - """ Function that calulates store expansion in MW + """ Function that calulates store expansion in MW - Returns - ------- - float - store expansion in MW + Returns + ------- + float + store expansion in MW + + """ + return (self.network.stores.e_nom_opt - + self.network.stores.e_nom_min + )[self.network.stores.e_nom_extendable] - """ - return (self.network.stores.e_nom_opt - - self.network.stores.e_nom_min - )[self.network.stores.e_nom_extendable] def _calc_sectorcoupling_link_expansion(self): - """ Function that calulates expansion of sectorcoupling links in MW + """ Function that calulates expansion of sectorcoupling links in MW + + Returns + ------- + float + link expansion in MW (differentiating between technologies) - Returns - ------- - float - link expansion in MW (differentiating between technologies) + """ + ext_links = self.network.links[self.network.links.p_nom_extendable] - """ - ext_links = self.network.links[self.network.links.p_nom_extendable] + links = [0, 0, 0, 0] - links = [0, 0, 0, 0] + l1 = ext_links[ext_links.carrier=='H2_to_power'] + l2 = ext_links[ext_links.carrier=='power_to_H2'] + l3 = ext_links[ext_links.carrier=='H2_to_CH4'] + l4 = ext_links[ext_links.carrier=='CH4_to_H2'] - l1 = ext_links[ext_links.carrier=='H2_to_power'] - l2 = ext_links[ext_links.carrier=='power_to_H2'] - l3 = ext_links[ext_links.carrier=='H2_to_CH4'] - l4 = ext_links[ext_links.carrier=='CH4_to_H2'] + links[0] = (l1.p_nom_opt-l1.p_nom_min).sum() + links[1] = (l2.p_nom_opt-l2.p_nom_min).sum() + links[2] = (l3.p_nom_opt-l3.p_nom_min).sum() + links[3] = (l4.p_nom_opt-l4.p_nom_min).sum() - links[0] = (l1.p_nom_opt-l1.p_nom_min).sum() - links[1] = (l2.p_nom_opt-l2.p_nom_min).sum() - links[2] = (l3.p_nom_opt-l3.p_nom_min).sum() - links[3] = (l4.p_nom_opt-l4.p_nom_min).sum() + return links - return links def _calc_network_expansion(self): - """ Function that calulates electrical network expansion in MW + """ Function that calulates electrical network expansion in MW - Returns - ------- - float - network expansion (AC lines and DC links) in MW + Returns + ------- + float + network expansion (AC lines and DC links) in MW - """ + """ - network = self.network + network = self.network - lines = (network.lines.s_nom_opt - - network.lines.s_nom_min - )[network.lines.s_nom_extendable] + lines = (network.lines.s_nom_opt - + network.lines.s_nom_min + )[network.lines.s_nom_extendable] - ext_links = network.links[network.links.p_nom_extendable] - ext_dc_lines = ext_links[ext_links.carrier=='DC'] + ext_links = network.links[network.links.p_nom_extendable] + ext_dc_lines = ext_links[ext_links.carrier=='DC'] - dc_links = (ext_dc_lines.p_nom_opt - - ext_dc_lines.p_nom_min) + dc_links = (ext_dc_lines.p_nom_opt - + ext_dc_lines.p_nom_min) + + return lines, dc_links - return lines, dc_links def calc_investment_cost(self): - """ Function that calulates overall annualized investment costs. + """ Function that calulates overall annualized investment costs. - Returns - ------- - network_costs : float - Investments in line expansion (AC+DC) - link_costs : float - Investments in sectorcoupling link expansion - stor_costs : float - Investments in storage and store expansion + Returns + ------- + network_costs : float + Investments in line expansion (AC+DC) + link_costs : float + Investments in sectorcoupling link expansion + stor_costs : float + Investments in storage and store expansion - """ - network = self.network + """ + network = self.network - # electrical grid: AC lines, DC lines + # electrical grid: AC lines, DC lines - network_costs = [0, 0] + network_costs = [0, 0] - ext_lines = network.lines[network.lines.s_nom_extendable] - ext_trafos = network.transformers[network.transformers.s_nom_extendable] - ext_links = network.links[network.links.p_nom_extendable] - ext_dc_lines = ext_links[ext_links.carrier=='DC'] + ext_lines = network.lines[network.lines.s_nom_extendable] + ext_trafos = network.transformers[network.transformers.s_nom_extendable] + ext_links = network.links[network.links.p_nom_extendable] + ext_dc_lines = ext_links[ext_links.carrier=='DC'] - if not ext_lines.empty: - network_costs[0] = ((ext_lines.s_nom_opt-ext_lines.s_nom_min - )*ext_lines.capital_cost).sum() + if not ext_lines.empty: + network_costs[0] = ((ext_lines.s_nom_opt-ext_lines.s_nom_min + )*ext_lines.capital_cost).sum() - if not ext_trafos.empty: - network_costs[0] = network_costs[0]+(( - ext_trafos.s_nom_opt-ext_trafos.s_nom - )*ext_trafos.capital_cost).sum() + if not ext_trafos.empty: + network_costs[0] = network_costs[0]+(( + ext_trafos.s_nom_opt-ext_trafos.s_nom + )*ext_trafos.capital_cost).sum() - if not ext_dc_lines.empty: - network_costs[1] = ((ext_dc_lines.p_nom_opt-ext_dc_lines.p_nom_min - )*ext_dc_lines.capital_cost).sum() + if not ext_dc_lines.empty: + network_costs[1] = ((ext_dc_lines.p_nom_opt-ext_dc_lines.p_nom_min + )*ext_dc_lines.capital_cost).sum() - # links in other sectors / coupling different sectors + # links in other sectors / coupling different sectors - link_costs = 0 + link_costs = 0 - ext_links = ext_links[ext_links.carrier!='DC'] + ext_links = ext_links[ext_links.carrier!='DC'] - if not ext_links.empty: - link_costs = ((ext_links.p_nom_opt-ext_links.p_nom_min - )*ext_links.capital_cost).sum() + if not ext_links.empty: + link_costs = ((ext_links.p_nom_opt-ext_links.p_nom_min + )*ext_links.capital_cost).sum() - # storage and store costs + # storage and store costs - sto_costs = [0, 0] + sto_costs = [0, 0] - ext_storage = network.storage_units[network.storage_units.p_nom_extendable] - ext_store = network.stores[network.stores.e_nom_extendable] + ext_storage = network.storage_units[network.storage_units.p_nom_extendable] + ext_store = network.stores[network.stores.e_nom_extendable] - if not ext_storage.empty: - sto_costs[0] = (ext_storage.p_nom_opt* - ext_storage.capital_cost).sum() + if not ext_storage.empty: + sto_costs[0] = (ext_storage.p_nom_opt* + ext_storage.capital_cost).sum() - if not ext_store.empty: - sto_costs[1] = (ext_store.e_nom_opt* - ext_store.capital_cost).sum() + if not ext_store.empty: + sto_costs[1] = (ext_store.e_nom_opt* + ext_store.capital_cost).sum() + + return network_costs, link_costs, sto_costs - return network_costs, link_costs, sto_costs def calc_marginal_cost(self): - """ - Function that caluclates and returns marginal costs, considering - generation and link and storage dispatch costs - - Returns - ------- - marginal_cost : float - Annual marginal cost in EUR - - """ - network = self.network - gen = network.generators_t.p.mul( - network.snapshot_weightings.objective, axis=0).sum(axis=0).mul( - network.generators.marginal_cost).sum() - link = abs(network.links_t.p0).mul( - network.snapshot_weightings.objective, axis=0).sum(axis=0).mul( - network.links.marginal_cost).sum() - stor = network.storage_units_t.p.mul( - network.snapshot_weightings.objective, axis=0).sum(axis=0).mul( - network.storage_units.marginal_cost).sum() - marginal_cost = gen + link + stor - return marginal_cost + """ + Function that caluclates and returns marginal costs, considering + generation and link and storage dispatch costs + + Returns + ------- + marginal_cost : float + Annual marginal cost in EUR + + """ + network = self.network + gen = network.generators_t.p.mul( + network.snapshot_weightings.objective, axis=0).sum(axis=0).mul( + network.generators.marginal_cost).sum() + link = abs(network.links_t.p0).mul( + network.snapshot_weightings.objective, axis=0).sum(axis=0).mul( + network.links.marginal_cost).sum() + stor = network.storage_units_t.p.mul( + network.snapshot_weightings.objective, axis=0).sum(axis=0).mul( + network.storage_units.marginal_cost).sum() + marginal_cost = gen + link + stor + return marginal_cost + def calc_etrago_results(self): - """ Function that calculates main results of grid optimization - and adds them to Etrago object. - - Returns - ------- - None. - - """ - self.results = pd.DataFrame(columns=['unit', 'value'], - index=['annual system costs', - 'annual investment costs', - 'annual marginal costs', - 'annual electrical grid investment costs', - 'annual ac grid investment costs', - 'annual dc grid investment costs', - 'annual links investment costs', - 'annual storage+store investment costs', - 'annual electrical storage investment costs', - 'annual store investment costs', - 'battery storage expansion', - 'store expansion', - 'H2 store expansion', - 'CH4 store expansion', - 'heat store expansion', - 'storage+store expansion', - 'fuel cell links expansion', - 'electrolyzer links expansion', - 'methanisation links expansion', - 'Steam Methane Reformation links expansion', - 'abs. electrical grid expansion', - 'abs. electrical ac grid expansion', - 'abs. electrical dc grid expansion', - 'rel. electrical ac grid expansion', - 'rel. electrical dc grid expansion']) - - self.results.unit[self.results.index.str.contains('cost')] = 'EUR/a' - self.results.unit[self.results.index.str.contains('expansion')] = 'MW' - self.results.unit[self.results.index.str.contains('rel.')] = 'p.u.' - - # system costs - - self.results.value['annual ac grid investment costs'] = calc_investment_cost(self)[0][0] - self.results.value['annual dc grid investment costs'] = calc_investment_cost(self)[0][1] - self.results.value['annual electrical grid investment costs'] = sum(calc_investment_cost(self)[0]) - - self.results.value['annual links investment costs'] = calc_investment_cost(self)[1] - - self.results.value['annual electrical storage investment costs'] = calc_investment_cost(self)[2][0] - self.results.value['annual store investment costs'] = calc_investment_cost(self)[2][1] - self.results.value['annual storage+store investment costs'] = sum(calc_investment_cost(self)[2]) - - - self.results.value['annual investment costs'] = \ - sum(calc_investment_cost(self)[0]) + calc_investment_cost(self)[1] + sum(calc_investment_cost(self)[2]) - self.results.value['annual marginal costs'] = calc_marginal_cost(self) - - self.results.value['annual system costs'] = \ - self.results.value['annual investment costs'] + self.results.value['annual marginal costs'] - - # storage and store expansion - - network = self.network - - if not network.storage_units[network.storage_units.p_nom_extendable].empty: - - self.results.value['battery storage expansion'] = \ - _calc_storage_expansion(self).sum() - - store = _calc_store_expansion(self) - self.results.value['store expansion'] = store.sum() - self.results.value['H2 store expansion'] = \ - store[store.index.str.contains('H2')].sum() - self.results.value['CH4 store expansion'] = \ - store[store.index.str.contains('CH4')].sum() - self.results.value['heat store expansion'] = \ - store[store.index.str.contains('heat')].sum() - - self.results.value['storage+store expansion'] = \ - self.results.value['battery storage expansion'] + self.results.value['store expansion'] - - # links expansion - - if not network.links[network.links.p_nom_extendable].empty: - - links = _calc_sectorcoupling_link_expansion(self) - self.results.value['fuel cell links expansion'] = links[0] - self.results.value['electrolyzer links expansion'] = links[1] - self.results.value['methanisation links expansion'] = links[2] - self.results.value['Steam Methane Reformation links expansion'] = links[3] - - # grid expansion - - if not network.lines[network.lines.s_nom_extendable].empty: - - self.results.value['abs. electrical ac grid expansion'] = _calc_network_expansion(self)[0].sum() - self.results.value['abs. electrical dc grid expansion'] = _calc_network_expansion(self)[1].sum() - self.results.value['abs. electrical grid expansion'] = self.results.value['abs. electrical ac grid expansion'] + self.results.value['abs. electrical dc grid expansion'] - - ext_lines = network.lines[network.lines.s_nom_extendable] - ext_links = network.links[network.links.p_nom_extendable] - ext_dc_lines = ext_links[ext_links.carrier=='DC'] - - self.results.value['rel. electrical ac grid expansion'] = (_calc_network_expansion(self)[0].sum() / ext_lines.s_nom.sum()) - self.results.value['rel. electrical dc grid expansion'] = (_calc_network_expansion(self)[1].sum() / ext_dc_lines.p_nom.sum()) \ No newline at end of file + """ Function that calculates main results of grid optimization + and adds them to Etrago object. + + Returns + ------- + None. + + """ + self.results = pd.DataFrame(columns=['unit', 'value'], + index=['annual system costs', + 'annual investment costs', + 'annual marginal costs', + 'annual electrical grid investment costs', + 'annual ac grid investment costs', + 'annual dc grid investment costs', + 'annual links investment costs', + 'annual storage+store investment costs', + 'annual electrical storage investment costs', + 'annual store investment costs', + 'battery storage expansion', + 'store expansion', + 'H2 store expansion', + 'CH4 store expansion', + 'heat store expansion', + 'storage+store expansion', + 'fuel cell links expansion', + 'electrolyzer links expansion', + 'methanisation links expansion', + 'Steam Methane Reformation links expansion', + 'abs. electrical grid expansion', + 'abs. electrical ac grid expansion', + 'abs. electrical dc grid expansion', + 'rel. electrical ac grid expansion', + 'rel. electrical dc grid expansion']) + + self.results.unit[self.results.index.str.contains('cost')] = 'EUR/a' + self.results.unit[self.results.index.str.contains('expansion')] = 'MW' + self.results.unit[self.results.index.str.contains('rel.')] = 'p.u.' + + # system costs + + self.results.value['annual ac grid investment costs'] = calc_investment_cost(self)[0][0] + self.results.value['annual dc grid investment costs'] = calc_investment_cost(self)[0][1] + self.results.value['annual electrical grid investment costs'] = sum(calc_investment_cost(self)[0]) + + self.results.value['annual links investment costs'] = calc_investment_cost(self)[1] + + self.results.value['annual electrical storage investment costs'] = calc_investment_cost(self)[2][0] + self.results.value['annual store investment costs'] = calc_investment_cost(self)[2][1] + self.results.value['annual storage+store investment costs'] = sum(calc_investment_cost(self)[2]) + + + self.results.value['annual investment costs'] = \ + sum(calc_investment_cost(self)[0]) + calc_investment_cost(self)[1] + sum(calc_investment_cost(self)[2]) + self.results.value['annual marginal costs'] = calc_marginal_cost(self) + + self.results.value['annual system costs'] = \ + self.results.value['annual investment costs'] + self.results.value['annual marginal costs'] + + # storage and store expansion + + network = self.network + + if not network.storage_units[network.storage_units.p_nom_extendable].empty: + + self.results.value['battery storage expansion'] = \ + _calc_storage_expansion(self).sum() + + store = _calc_store_expansion(self) + self.results.value['store expansion'] = store.sum() + self.results.value['H2 store expansion'] = \ + store[store.index.str.contains('H2')].sum() + self.results.value['CH4 store expansion'] = \ + store[store.index.str.contains('CH4')].sum() + self.results.value['heat store expansion'] = \ + store[store.index.str.contains('heat')].sum() + + self.results.value['storage+store expansion'] = \ + self.results.value['battery storage expansion'] + self.results.value['store expansion'] + + # links expansion + + if not network.links[network.links.p_nom_extendable].empty: + + links = _calc_sectorcoupling_link_expansion(self) + self.results.value['fuel cell links expansion'] = links[0] + self.results.value['electrolyzer links expansion'] = links[1] + self.results.value['methanisation links expansion'] = links[2] + self.results.value['Steam Methane Reformation links expansion'] = links[3] + + # grid expansion + + if not network.lines[network.lines.s_nom_extendable].empty: + + self.results.value['abs. electrical ac grid expansion'] = _calc_network_expansion(self)[0].sum() + self.results.value['abs. electrical dc grid expansion'] = _calc_network_expansion(self)[1].sum() + self.results.value['abs. electrical grid expansion'] = self.results.value['abs. electrical ac grid expansion'] + self.results.value['abs. electrical dc grid expansion'] + + ext_lines = network.lines[network.lines.s_nom_extendable] + ext_links = network.links[network.links.p_nom_extendable] + ext_dc_lines = ext_links[ext_links.carrier=='DC'] + + self.results.value['rel. electrical ac grid expansion'] = (_calc_network_expansion(self)[0].sum() / ext_lines.s_nom.sum()) + self.results.value['rel. electrical dc grid expansion'] = (_calc_network_expansion(self)[1].sum() / ext_dc_lines.p_nom.sum()) \ No newline at end of file diff --git a/etrago/tools/constraints.py b/etrago/tools/constraints.py index 82a5037b9..453b076d4 100755 --- a/etrago/tools/constraints.py +++ b/etrago/tools/constraints.py @@ -21,9 +21,9 @@ """ Constraints.py includes additional constraints for eTraGo-optimizations """ +import os import logging -from egoio.tools import db from pyomo.environ import Constraint from pypsa.descriptors import expand_series from pypsa.linopt import define_constraints, define_variables, get_var, linexpr @@ -32,6 +32,9 @@ import pandas as pd import pyomo.environ as po +if "READTHEDOCS" not in os.environ: + from egoio.tools import db + logger = logging.getLogger(__name__) __copyright__ = ( @@ -311,11 +314,14 @@ def _cross_border_flow(self, network, snapshots): Extra_functionality that limits overall AC crossborder flows from/to Germany. Add key 'cross_border_flow' and array with minimal and maximal import/export Example: {'cross_border_flow': [-x, y]} (with x Import, y Export) + + Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization + Returns ------- None. @@ -398,11 +404,14 @@ def _cross_border_flow_nmp(self, network, snapshots): Extra_functionality that limits overall crossborder flows from/to Germany. Add key 'cross_border_flow' and array with minimal and maximal import/export Example: {'cross_border_flow': [-x, y]} (with x Import, y Export) + + Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization + Returns ------- None. @@ -464,11 +473,14 @@ def _cross_border_flow_per_country_nmp(self, network, snapshots): Add key 'cross_border_flow_per_country' to args.extra_functionality and define dictionary of country keys and desired limitations of im/exports in MWh Example: {'cross_border_flow_per_country': {'DK':[-X, Y], 'FR':[0,0]}} + + Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization + Returns ------- None. @@ -550,11 +562,14 @@ def _cross_border_flow_per_country(self, network, snapshots): Add key 'cross_border_flow_per_country' to args.extra_functionality and define dictionary of country keys and desired limitations of im/exports in MWh Example: {'cross_border_flow_per_country': {'DK':[-X, Y], 'FR':[0,0]}} + + Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization + Returns ------- None. @@ -712,7 +727,6 @@ def _capacity_factor(self, network, snapshots): a dictonary as a fraction of generation potential. Example: 'capacity_factor': {'run_of_river': [0, 0.5], 'solar': [0.1, 1]} - Parameters ---------- network : :class:`pypsa.Network @@ -765,7 +779,6 @@ def _capacity_factor_nmp(self, network, snapshots): a dictonary as a fraction of generation potential. Example: 'capacity_factor': {'run_of_river': [0, 0.5], 'solar': [0.1, 1]} - Parameters ---------- network : :class:`pypsa.Network @@ -954,7 +967,6 @@ def _capacity_factor_per_gen(self, network, snapshots): Example: 'capacity_factor_per_gen': {'run_of_river': [0, 0.5], 'solar': [0.1, 1]} - Parameters ---------- network : :class:`pypsa.Network @@ -1023,7 +1035,6 @@ def _capacity_factor_per_gen_nmp(self, network, snapshots): Example: 'capacity_factor_per_gen': {'run_of_river': [0, 0.5], 'solar': [0.1, 1]} - Parameters ---------- network : :class:`pypsa.Network @@ -1323,7 +1334,7 @@ def add_ch4_constraints(self, network, snapshots): Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization @@ -1412,7 +1423,7 @@ def add_ch4_constraints_nmp(self, network, snapshots): Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization @@ -1496,7 +1507,7 @@ def snapshot_clustering_daily_bounds(self, network, snapshots): Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps that will be constrained @@ -1535,7 +1546,7 @@ def snapshot_clustering_daily_bounds_nmp(self, network, snapshots): Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps that will be constrained @@ -1609,7 +1620,7 @@ def snapshot_clustering_seasonal_storage( Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA snapshots : list A list of datetime objects representing the timestamps of the snapshots @@ -2429,7 +2440,7 @@ def snapshot_clustering_seasonal_storage_hourly(self, network, snapshots): Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA snapshots : list A list of datetime objects representing the timestamps of the snapshots @@ -2534,7 +2545,7 @@ def snapshot_clustering_seasonal_storage_nmp(self, n, sns, simplified=False): Parameters ---------- - n : :class:`pypsa.Network + n : :class:`pypsa.Network` Overall container of PyPSA sns : list A list of datetime objects representing the timestamps of the snapshots @@ -2636,7 +2647,7 @@ def snapshot_clustering_seasonal_storage_hourly_nmp(self, n, sns): Parameters ---------- - n : :class:`pypsa.Network + n : :class:`pypsa.Network` Overall container of PyPSA sns : list A list of datetime objects representing the timestamps of the snapshots @@ -2664,7 +2675,7 @@ def split_dispatch_disaggregation_constraints(self, n, sns): Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization @@ -2728,10 +2739,10 @@ def functionality(self, network, snapshots): Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA snapshots : pandas.DatetimeIndex - List of timesteps considered in the optimization + List of timesteps considered in the optimization """ if "CH4" in network.buses.carrier.values: diff --git a/etrago/tools/io.py b/etrago/tools/io.py index d1edbbd88..5bb2b019e 100644 --- a/etrago/tools/io.py +++ b/etrago/tools/io.py @@ -24,10 +24,8 @@ Input/output operations between powerflow schema in the oedb and PyPSA. Additionally oedb wrapper classes to instantiate PyPSA network objects. - Attributes ----------- - +----------- packagename: str Package containing orm class definitions temp_ormclass: str @@ -36,7 +34,7 @@ Orm class name of table with carrier id to carrier name datasets Notes ------ +------- A configuration file connecting the chosen optimization method with components to be queried is needed for NetworkScenario class. """ @@ -74,7 +72,6 @@ class ScenarioBase: Parameters ---------- - session : sqla.orm.session.Session Handles conversations with the database. version : str @@ -175,8 +172,6 @@ def configure_timeindex(self): ) self.timeindex = timeindex[self.start_snapshot - 1 : self.end_snapshot] - """ pandas.tseries.index.DateTimeIndex : - Index of snapshots or timesteps. """ def id_to_source(self): ormclass = self._mapped["Source"] @@ -768,8 +763,8 @@ def extension(self, **kwargs): To connect it to the existing network, transformers are needed. All components and its timeseries of the additional scenario need to be - inserted in the fitting 'model_draft.ego_grid_pf_hv_extension_' table. - The scn_name in the tables have to be labled with 'extension_' + scn_name + inserted in the fitting 'model_draft.ego_grid_pf_hv_extension\_' table. + The scn_name in the tables have to be labled with 'extension\_' + scn_name (e.g. 'extension_nep2035'). Until now, the tables include three additional scenarios: @@ -785,18 +780,21 @@ def extension(self, **kwargs): Generation, loads and its timeseries in Belgium and Norway for scenario 'NEP 2035' - - Parameters - ----- - network : The existing network container (e.g. scenario 'NEP 2035') - session : session-data - overlay_scn_name : Name of the additional scenario - (WITHOUT 'extension_') - start_snapshot, end_snapshot: Simulation time + Parameters + ----------- + network : + The existing network container (e.g. scenario 'NEP 2035') + session : + session-data + overlay_scn_name : + Name of the additional scenario (WITHOUT 'extension\_') + start_snapshot : + end_snapshot: + Simulation time Returns - ------ - network : Network container including existing and additional network + ------- + Network container including existing and additional network """ if self.args["scn_extension"] is not None: @@ -833,21 +831,22 @@ def decommissioning(self, **kwargs): Currently, only lines can be decommissioned. All components of the decommissioning scenario need to be inserted in - the fitting 'model_draft.ego_grid_pf_hv_extension_' table. - The scn_name in the tables have to be labled with 'decommissioning_' + the fitting 'model_draft.ego_grid_pf_hv_extension\_' table. + The scn_name in the tables have to be labled with 'decommissioning\_' + scn_name (e.g. 'decommissioning_nep2035'). - Parameters - ----- - network : The existing network container (e.g. scenario 'NEP 2035') - session : session-data - overlay_scn_name : Name of the decommissioning scenario - + ----------- + network : + The existing network container (e.g. scenario 'NEP 2035') + session : + session-data + overlay_scn_name : + Name of the decommissioning scenario Returns ------ - network : Network container including decommissioning + Network container including decommissioning """ if self.args["scn_decommissioning"] is not None: @@ -895,18 +894,20 @@ def distance(x0, x1, y0, y1): """ Function that calculates the square of the distance between two points. - Parameters - ----- - x0: x - coordinate of point 0 - x1: x - coordinate of point 1 - y0: y - coordinate of point 0 - y1: y - coordinate of point 1 - + --------- + x0 : + x - coordinate of point 0 + x1 : + x - coordinate of point 1 + y0 : + y - coordinate of point 0 + y1 : + y - coordinate of point 1 Returns - ------ - distance : float + -------- + distance : float square of distance """ @@ -922,18 +923,16 @@ def calc_nearest_point(bus1, network): Function that finds the geographical nearest point in a network from a given bus. - Parameters - ----- - bus1: float + ----------- + bus1 : float id of bus - network: Pypsa network container + network : Pypsa network container network including the comparable buses - Returns - ------ - bus0 : float + ------- + bus0 : float bus_id of nearest point """ diff --git a/etrago/tools/network.py b/etrago/tools/network.py index a5900238f..385bd7a12 100644 --- a/etrago/tools/network.py +++ b/etrago/tools/network.py @@ -23,12 +23,15 @@ """ import logging +import os -from egoio.tools import db from pypsa.components import Network from sqlalchemy.orm import sessionmaker import pandas as pd +if "READTHEDOCS" not in os.environ: + from egoio.tools import db + from etrago import __version__ from etrago.cluster.disaggregation import run_disaggregation from etrago.cluster.electrical import ehv_clustering, run_spatial_clustering diff --git a/etrago/tools/plot.py b/etrago/tools/plot.py index 89573506d..d3ae2068a 100644 --- a/etrago/tools/plot.py +++ b/etrago/tools/plot.py @@ -26,12 +26,10 @@ import os from matplotlib import pyplot as plt -from pyproj import Proj, transform import matplotlib import matplotlib.patches as mpatches import numpy as np import pandas as pd -import tilemapbase cartopy_present = True try: @@ -42,13 +40,15 @@ except ImportError: cartopy_present = False from pypsa.plot import draw_map_cartopy -from shapely.geometry import LineString, MultiPoint, Point, Polygon -import geopandas as gpd logger = logging.getLogger(__name__) if "READTHEDOCS" not in os.environ: from geoalchemy2.shape import to_shape + import geopandas as gpd + from pyproj import Proj, transform + from shapely.geometry import LineString, MultiPoint, Point, Polygon + import tilemapbase __copyright__ = ( "Flensburg University of Applied Sciences, " @@ -220,9 +220,11 @@ def plot_line_loading_diff(networkA, networkB, timestep=0, osm=False): osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm + """ if osm != False: if set_epsg_network.counter == 0: @@ -239,21 +241,26 @@ def shiftedColorMap( data with a negative min and positive max and you want the middle of the colormap's dynamic range to be at zero - Input - ----- - cmap : The matplotlib colormap to be altered - start : Offset from lowest point in the colormap's range. - Defaults to 0.0 (no lower ofset). Should be between - 0.0 and `midpoint`. - midpoint : The new center of the colormap. Defaults to - 0.5 (no shift). Should be between 0.0 and 1.0. In - general, this should be 1 - vmax/(vmax + abs(vmin)) - For example if your data range from -15.0 to +5.0 and - you want the center of the colormap at 0.0, `midpoint` - should be set to 1 - 5/(5 + 15)) or 0.75 - stop : Offset from highets point in the colormap's range. - Defaults to 1.0 (no upper ofset). Should be between - `midpoint` and 1.0. + Parameters + ----------- + cmap : + The matplotlib colormap to be altered + start : + Offset from lowest point in the colormap's range. + Defaults to 0.0 (no lower ofset). Should be between + 0.0 and `midpoint`. + midpoint : + The new center of the colormap. Defaults to + 0.5 (no shift). Should be between 0.0 and 1.0. In + general, this should be 1 - vmax/(vmax + abs(vmin)) + For example if your data range from -15.0 to +5.0 and + you want the center of the colormap at 0.0, `midpoint` + should be set to 1 - 5/(5 + 15)) or 0.75 + stop : + Offset from highets point in the colormap's range. + Defaults to 1.0 (no upper ofset). Should be between + `midpoint` and 1.0. + """ cdict = {"red": [], "green": [], "blue": [], "alpha": []} @@ -350,9 +357,10 @@ def network_expansion_diff( osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm """ if osm != False: @@ -579,13 +587,12 @@ def plot_gen_diff( """ Plot difference in generation between two networks grouped by carrier type - Parameters ---------- networkA : PyPSA network container with switches networkB : PyPSA network container without switches - leave_out_carriers : list of carriers to leave out (default to all small - carriers) + leave_out_carriers : + list of carriers to leave out (default to all small carriers) Returns ------- @@ -644,7 +651,6 @@ def plot_voltage(network, boundaries=[], osm=False): """ Plot voltage at buses as hexbin - Parameters ---------- network : PyPSA network container @@ -652,9 +658,10 @@ def plot_voltage(network, boundaries=[], osm=False): osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm Returns ------- @@ -694,7 +701,6 @@ def curtailment(network, carrier="solar", filename=None): """ Plot curtailment of selected carrier - Parameters ---------- network : PyPSA network container @@ -704,7 +710,6 @@ def curtailment(network, carrier="solar", filename=None): filename: str or None Save figure in this direction - Returns ------- Plot @@ -1045,9 +1050,10 @@ def nodal_gen_dispatch( osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm Returns ------- @@ -1351,7 +1357,6 @@ def storage_soc_sorted(network, filename=None): ---------- network : PyPSA network container Holds topology of grid including results from powerflow analysis - filename : path to folder Returns @@ -1498,7 +1503,6 @@ def calc_ac_loading(network, timesteps): def calc_dc_loading(network, timesteps): """Calculates loading of DC-lines - Parameters ---------- network : :class:`pypsa.Network @@ -2227,35 +2231,39 @@ def plot_grid( ): """Function that plots etrago.network and results for lines and buses - - Parameters ---------- line_colors : str Set static line color or attribute to plot e.g. 'expansion_abs' Current options: - 'line_loading': mean line loading in p.u. in selected timesteps - 'v_nom': nominal voltage of lines - 'expansion_abs': absolute network expansion in MVA - 'expansion_rel': network expansion in p.u. of existing capacity - 'q_flow_max': maximal reactive flows + + * 'line_loading': mean line loading in p.u. in selected timesteps + * 'v_nom': nominal voltage of lines + * 'expansion_abs': absolute network expansion in MVA + * 'expansion_rel': network expansion in p.u. of existing capacity + * 'q_flow_max': maximal reactive flows + bus_sizes : float, optional Size of buses. The default is 0.001. bus_colors : str, optional Set static bus color or attribute to plot. The default is 'grey'. Current options: - 'nodal_production_balance': net producer/consumer in selected timeteps - 'storage_expansion': storage expansion per bus and technology - 'storage_distribution': installed storage units per bus - 'gen_dist': dispatch per carrier in selected timesteps + + * 'nodal_production_balance': net producer/consumer in selected timeteps + * 'storage_expansion': storage expansion per bus and technology + * 'storage_distribution': installed storage units per bus + * 'gen_dist': dispatch per carrier in selected timesteps + timesteps : array, optional Timesteps consideredd in time depended plots. The default is range(2). osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm. The default is False. + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm. The default is False. + boundaries: array Set fixed boundaries of heatmap axis. The default is None. filename: str or None @@ -2587,6 +2595,7 @@ def plot_clusters( gas_pipelines : bool, optional The default is False. Define if the original gas pipelines are plotted or not. + Returns ------- None. diff --git a/etrago/tools/utilities.py b/etrago/tools/utilities.py index b141b76a9..348c232a6 100755 --- a/etrago/tools/utilities.py +++ b/etrago/tools/utilities.py @@ -29,15 +29,17 @@ import math import os -from egoio.tools import db from pyomo.environ import Constraint, PositiveReals, Var -from shapely.geometry import Point -import geopandas as gpd import numpy as np import pandas as pd import pypsa import sqlalchemy.exc +if "READTHEDOCS" not in os.environ: + from egoio.tools import db + from shapely.geometry import Point + import geopandas as gpd + logger = logging.getLogger(__name__) @@ -449,15 +451,15 @@ def set_q_national_loads(self, cos_phi): Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : :class:`pypsa.Network` + Overall container of PyPSA cos_phi : float - Choose ration of active and reactive power of foreign loads + Choose ration of active and reactive power of foreign loads Returns ------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : :class:`pypsa.Network` + Overall container of PyPSA """ network = self.network @@ -592,6 +594,7 @@ def load_shedding(self, temporal_disaggregation=False, **kwargs): Marginal costs for load shedding p_nom : int Installed capacity of load shedding generator + Returns ------- @@ -738,13 +741,14 @@ def export_to_csv(self, path): Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA args: dict Contains calculation settings of appl.py path: str or False or None Choose path for csv-files. Specify `""`, `False` or `None` to not do anything. + Returns ------- None @@ -853,7 +857,6 @@ def _make_consense(component, attr): attr : str specify the name of the attribute of the commponent being considered. - Returns ------- function @@ -1294,20 +1297,24 @@ def set_line_costs(self, cost110=230, cost220=290, cost380=85, costDC=375): ---------- network : :class:`pypsa.Network Overall container of PyPSA - args: dict containing settings from appl.py - cost110 : capital costs per km for 110kV lines and cables - default: 230€/MVA/km, source: costs for extra circuit in - dena Verteilnetzstudie, p. 146) - cost220 : capital costs per km for 220kV lines and cables - default: 280€/MVA/km, source: costs for extra circuit in - NEP 2025, capactity from most used 220 kV lines in model - cost380 : capital costs per km for 380kV lines and cables - default: 85€/MVA/km, source: costs for extra circuit in - NEP 2025, capactity from most used 380 kV lines in NEP - costDC : capital costs per km for DC-lines - default: 375€/MVA/km, source: costs for DC transmission line - in NEP 2035 - ------- + args: dict + containing settings from appl.py + cost110 : + capital costs per km for 110kV lines and cables + default: 230€/MVA/km, source: costs for extra circuit in + dena Verteilnetzstudie, p. 146) + cost220 : + capital costs per km for 220kV lines and cables + default: 280€/MVA/km, source: costs for extra circuit in + NEP 2025, capactity from most used 220 kV lines in model + cost380 : + capital costs per km for 380kV lines and cables + default: 85€/MVA/km, source: costs for extra circuit in + NEP 2025, capactity from most used 380 kV lines in NEP + costDC : + capital costs per km for DC-lines + default: 375€/MVA/km, source: costs for DC transmission line + in NEP 2035 """ @@ -1346,13 +1353,16 @@ def set_trafo_costs( ---------- network : :class:`pypsa.Network Overall container of PyPSA - cost110_220 : capital costs for 110/220kV transformer - default: 7500€/MVA, source: costs for extra trafo in - dena Verteilnetzstudie, p. 146; S of trafo used in osmTGmod - cost110_380 : capital costs for 110/380kV transformer - default: 17333€/MVA, source: NEP 2025 - cost220_380 : capital costs for 220/380kV transformer - default: 14166€/MVA, source: NEP 2025 + cost110_220 : + capital costs for 110/220kV transformer + default: 7500€/MVA, source: costs for extra trafo in + dena Verteilnetzstudie, p. 146; S of trafo used in osmTGmod + cost110_380 : + capital costs for 110/380kV transformer + default: 17333€/MVA, source: NEP 2025 + cost220_380 : + capital costs for 220/380kV transformer + default: 14166€/MVA, source: NEP 2025 """ @@ -1386,10 +1396,6 @@ def set_trafo_costs( def add_missing_components(self): - # Munich - # TODO: Manually adds lines between hard-coded buses. Has to be - # changed for the next dataversion and should be moved to data - # processing """ Add a missing transformer at Heizkraftwerk Nord in Munich and a missing transformer in Stuttgart. @@ -1406,6 +1412,11 @@ def add_missing_components(self): """ + # Munich + # TODO: Manually adds lines between hard-coded buses. Has to be + # changed for the next dataversion and should be moved to data + # processing + """ "https://www.swm.de/privatkunden/unternehmen/energieerzeugung" + "/heizkraftwerke.html?utm_medium=301" @@ -1635,7 +1646,6 @@ def convert_capital_costs(self): ---------- etrago : :class:`etrago.Etrago Transmission grid object - ------- """ @@ -1848,9 +1858,11 @@ def get_clustering_data(self, path): ---------- path : str Name of folder from which to import CSVs of network data. + Returns - None ------- + None + """ if (self.args["network_clustering_ehv"]) | ( @@ -1902,10 +1914,8 @@ def set_random_noise(self, sigma=0.01): ---------- etrago : :class:`etrago.Etrago Transmission grid object - seed: int seed number, needed to reproduce results - sigma: float Default: 0.01 standard deviation, small values reduce impact on dispatch @@ -1962,7 +1972,6 @@ def set_line_country_tags(network): network : :class:`pypsa.Network Overall container of PyPSA - """ transborder_lines_0 = network.lines[ @@ -2490,8 +2499,8 @@ def drop_sectors(self, drop_carriers): drop_carriers : array List of sectors that will be dropped. e.g. ['dsm', 'CH4', 'H2_saltcavern', 'H2_grid', - 'central_heat', 'rural_heat', 'central_heat_store', - 'rural_heat_store', 'Li ion'] means everything but AC + 'central_heat', 'rural_heat', 'central_heat_store', + 'rural_heat_store', 'Li ion'] means everything but AC Returns ------- @@ -2574,10 +2583,12 @@ def drop_sectors(self, drop_carriers): def update_busmap(self, new_busmap): """ Update busmap after any clustering process + Parameters ---------- new_busmap : dictionary busmap used to clusted the network. + Returns ------- None. diff --git a/requirements-doc.txt b/requirements-doc.txt index abb50f991..383cf8367 100644 --- a/requirements-doc.txt +++ b/requirements-doc.txt @@ -2,11 +2,11 @@ # Using single requirments for docs, see: # https://github.com/rtfd/readthedocs.org/issues/2070 sphinx_rtd_theme -pandas >= 0.25, <= 0.25 -pypsa >= 0.11.0, <= 0.11.0 -numpy == 1.16.2 -numpydoc == 0.7.0 +pypsa == 0.20.1 +numpydoc sqlalchemy geoalchemy2 matplotlib nbsphinx +saio +pyomo != 6.4.3 \ No newline at end of file diff --git a/setup.py b/setup.py index 8b6dde20d..6de5c4bd5 100755 --- a/setup.py +++ b/setup.py @@ -68,7 +68,12 @@ def read(*names, **kwargs): "tsam", ], extras_require={ - "docs": ["sphinx >= 1.4", "sphinx_rtd_theme"], + "docs": [ + "nbsphinx", + "numpydoc", + "sphinx >= 1.4", + "sphinx_rtd_theme", + ], "gurobipy": ["gurobipy"], "cartopy": ["cartopy", "requests"], },