diff --git a/etrago/appl.py b/etrago/appl.py index cdf3f3ad..083d87b8 100644 --- a/etrago/appl.py +++ b/etrago/appl.py @@ -29,7 +29,6 @@ import os import os.path -import numpy as np __copyright__ = ( "Flensburg University of Applied Sciences, " @@ -144,7 +143,7 @@ "extreme_periods": None, # consideration of extreme timesteps; e.g. 'append' "how": "daily", # type of period - only relevant for 'typical_periods' "storage_constraints": "soc_constraints", # additional constraints for storages - only relevant for 'typical_periods' - "n_clusters": 5, # number of periods - only relevant for 'typical_periods' + "n_clusters": 5, # number of periods - only relevant for 'typical_periods' "n_segments": 5, # number of segments - only relevant for segmentation }, "skip_snapshots": 5, # False or number of snapshots to skip @@ -169,8 +168,8 @@ def run_etrago(args, json_path): Parameters ---------- db : str - Name of Database session setting stored in *config.ini* of *.egoio*, e.g. - ``'oedb'``. + Name of Database session setting stored in *config.ini* of *.egoio*, + e.g. ``'oedb'``. gridversion : None or str Name of the data version number of oedb: state ``'None'`` for model_draft (sand-box) or an explicit version number @@ -183,51 +182,51 @@ def run_etrago(args, json_path): Type of optimization, currently only "lopf". Default: "lopf". * "n_iter" : int In case of extendable lines, several LOPFs have to be performed. - You can either set "n_iter" and specify a fixed number of iterations - or set "threshold" and specify a threshold of the objective function as - abort criteria of the iterative optimization. + You can either set "n_iter" and specify a fixed number of + iterations or set "threshold" and specify a threshold of the + objective function as abort criteria of the iterative optimization. Default: 4. * "threshold" : int In case of extendable lines, several LOPFs have to be performed. - You can either set "n_iter" and specify a fixed number of iterations - or set "threshold" and specify a threshold of the objective function as - abort criteria of the iterative optimization. Per default, "n_iter" of 4 - is set. + You can either set "n_iter" and specify a fixed number of + iterations or set "threshold" and specify a threshold of the + objective function as abort criteria of the iterative optimization. + Per default, "n_iter" of 4 is set. * "pyomo" : bool Set to True, if pyomo is used for model building. Set to False for big optimization problems - currently only possible when solver is "gurobi". pf_post_lopf : dict - Settings for option to run a non-linear power flow (PF) directly after the - linear optimal power flow (LOPF), and thus the dispatch optimisation, has - finished. + Settings for option to run a non-linear power flow (PF) directly after + the linear optimal power flow (LOPF), and thus the dispatch + optimisation, has finished. The provided dictionary can have the following entries: * "active" : bool If True, a PF is performed after the LOPF. Default: True. * "add_foreign_lopf" : bool - If foreign lines are modeled as DC-links (see parameter `foreign_lines`), - results of the LOPF can be added by setting "add_foreign_lopf" to True. - Default: True. + If foreign lines are modeled as DC-links (see parameter + `foreign_lines`), results of the LOPF can be added by setting + "add_foreign_lopf" to True. Default: True. * "q_allocation" : bool - Allocate reactive power to all generators at the same bus either by "p_nom" - or "p". + Allocate reactive power to all generators at the same bus either + by "p_nom" or "p". Default: "p_nom". start_snapshot : int Start hour of the scenario year to be calculated. Default: 1. end_snapshot : int - End hour of the scenario year to be calculated. If snapshot clustering is used - (see parameter `snapshot_clustering`), the selected snapshots should cover - the number of periods / segments. Default: 2. + End hour of the scenario year to be calculated. If snapshot clustering + is used (see parameter `snapshot_clustering`), the selected snapshots + should cover the number of periods / segments. Default: 2. solver : str Choose your preferred solver. Current options: "glpk" (open-source), "cplex" or "gurobi". Default: "gurobi". solver_options : dict Choose settings of solver to improve simulation time and result. - Options are described in documentation of chosen solver. Per default, the - following dictionary is set: + Options are described in documentation of chosen solver. Per default, + the following dictionary is set: { "BarConvTol": 1.0e-5, @@ -238,13 +237,13 @@ def run_etrago(args, json_path): "threads": 4, } - Make sure to reset or adapt these settings when using another solver! Otherwise, - you may run into errors. + Make sure to reset or adapt these settings when using another solver! + Otherwise, you may run into errors. model_formulation : str Choose formulation of pyomo-model. Current options are: "angles", "cycles", "kirchhoff", "ptdf". - "angels" works best for small networks, while "kirchhoff" works best for larger - networks. + "angels" works best for small networks, while "kirchhoff" works best + for larger networks. Default: "kirchhoff". scn_name : str Choose your scenario. Currently, there are two different @@ -291,8 +290,8 @@ def run_etrago(args, json_path): False or '/path/tofolder'. Default: False. extendable : dict - Choose components you want to optimize and set upper bounds for grid expansion. - The provided dictionary can have the following entries: + Choose components you want to optimize and set upper bounds for grid + expansion. The provided dictionary can have the following entries: * "extendable_components" : list(str) The list defines a set of components to optimize. @@ -300,18 +299,21 @@ def run_etrago(args, json_path): The most important possibilities: * 'as_in_db' - leaves everything as it is defined in the data coming from the database + leaves everything as it is defined in the data coming from the + database * 'network' - set all lines, links and transformers in electrical grid extendable + set all lines, links and transformers in electrical grid + extendable * 'german_network' set lines and transformers in German electrical grid extendable * 'foreign_network' - set foreign lines and transformers in electrical grid extendable + set foreign lines and transformers in electrical grid + extendable * 'transformers' set all transformers extendable * 'storages' / 'stores' - allow to install extendable storages (unlimited in size) at each grid - node in order to meet the flexibility demand + allow to install extendable storages (unlimited in size) at + each grid node in order to meet the flexibility demand Default: "as_in_db". @@ -319,15 +321,16 @@ def run_etrago(args, json_path): Dictionary can have the following entries: * 'grid_max_D' - Upper bounds for electrical grid expansion can be defined for lines in - Germany relative to the existing capacity. Alternatively, - 'grid_max_abs_D' can be used. Per default, this is set to None and - 'grid_max_abs_D' is set. + Upper bounds for electrical grid expansion can be defined for + lines in Germany relative to the existing capacity. + Alternatively, 'grid_max_abs_D' can be used. Per default, this + is set to None and 'grid_max_abs_D' is set. * 'grid_max_abs_D' - Upper bounds for electrical grid expansion can be defined for lines in - Germany as absolute maximum capacities between two electrical buses - per voltage level. Per default the following dictionary is set: + Upper bounds for electrical grid expansion can be defined for + lines in Germany as absolute maximum capacities between two + electrical buses per voltage level. Per default the following + dictionary is set: { "380": {"i": 1020, "wires": 4, "circuits": 4}, @@ -336,13 +339,14 @@ def run_etrago(args, json_path): "dc": 0, } * 'grid_max_foreign' - Upper bounds for border-crossing electrical lines can be defined - relative to the existing capacity. Alternatively, 'grid_max_abs_foreign' - can be set. + Upper bounds for border-crossing electrical lines can be + defined relative to the existing capacity. Alternatively, + 'grid_max_abs_foreign' can be set. Default: 4. * 'grid_max_abs_foreign' - Upper bounds for border-crossing electrical lines can be defined equally - to 'grid_max_abs_D' as absolute capacity per voltage level. + Upper bounds for border-crossing electrical lines can be + defined equally to 'grid_max_abs_D' as absolute capacity per + voltage level. Default: None. generator_noise : bool or int @@ -385,8 +389,8 @@ def run_etrago(args, json_path): substation, taking into account the shortest distance on power lines. Default: False. network_clustering : dict - Choose if you want to apply a clustering of all network buses and specify - settings. + Choose if you want to apply a clustering of all network buses and + specify settings. The provided dictionary can have the following entries: * "active" : bool @@ -394,75 +398,81 @@ def run_etrago(args, json_path): and the gas buses are clustered down to``'n_clusters_gas'``. Default: True. * "method" : str - Method used for AC clustering. You can choose between two clustering - methods: + Method used for AC clustering. You can choose between two + clustering methods: * "kmeans": considers geographical locations of buses - * "kmedoids-dijkstra": considers electrical distances between buses + * "kmedoids-dijkstra": considers electrical distances between + buses Default: "kmedoids-dijkstra". * "n_clusters_AC" : int - Defines total number of resulting AC nodes including DE and foreign nodes - if `cluster_foreign_AC` is set to True, otherwise only DE nodes. + Defines total number of resulting AC nodes including DE and foreign + nodes if `cluster_foreign_AC` is set to True, otherwise only DE + nodes. Default: 30. * "cluster_foreign_AC" : bool If set to False, the AC buses outside Germany are not clustered - and the buses inside Germany are clustered to complete ``'n_clusters_AC'``. - If set to True, foreign AC buses are clustered as well and included - in number of clusters specified through ``'n_clusters_AC'``. + and the buses inside Germany are clustered to complete + ``'n_clusters_AC'``. If set to True, foreign AC buses are clustered + as well and included in number of clusters specified through + ``'n_clusters_AC'``. Default: False. * "method_gas" : str - Method used for gas clustering. You can choose between two clustering - methods: + Method used for gas clustering. You can choose between two + clustering methods: * "kmeans": considers geographical locations of buses - * "kmedoids-dijkstra": considers 'electrical' distances between buses + * "kmedoids-dijkstra": considers 'electrical' distances between + buses Default: "kmedoids-dijkstra". * "n_clusters_gas" : int - Defines total number of resulting CH4 nodes including DE and foreign nodes - if `cluster_foreign_gas` is set to True, otherwise only DE nodes. + Defines total number of resulting CH4 nodes including DE and + foreign nodes if `cluster_foreign_gas` is set to True, otherwise + only DE nodes. Default: 17. * "cluster_foreign_gas" : bool If set to False, the gas buses outside Germany are not clustered - and the buses inside Germany are clustered to complete ``'n_clusters_gas'``. - If set to True, foreign gas buses are clustered as well and included - in number of clusters specified through ``'n_clusters_gas'``. + and the buses inside Germany are clustered to complete + ``'n_clusters_gas'``. If set to True, foreign gas buses are + clustered as well and included in number of clusters specified + through ``'n_clusters_gas'``. Default: False. * "k_elec_busmap" : bool or str - With this option you can load cluster coordinates from a previous AC - clustering run. Options are False, in which case no previous busmap is - loaded, and path/to/busmap.csv in which case the busmap is loaded from the - specified file. Please note, that when a path is provided, the set number - of clusters will be ignored. + With this option you can load cluster coordinates from a previous + AC clustering run. Options are False, in which case no previous + busmap is loaded, and path/to/busmap.csv in which case the busmap + is loaded from the specified file. Please note, that when a path is + provided, the set number of clusters will be ignored. Default: False. * "k_gas_busmap" : bool or str - With this option you can load cluster coordinates from a previous gas - clustering run. Options are False, in which case no previous busmap is - loaded, and path/to/busmap.csv in which case the busmap is loaded from the - specified file. Please note, that when a path is provided, the set number - of clusters will be ignored. + With this option you can load cluster coordinates from a previous + gas clustering run. Options are False, in which case no previous + busmap is loaded, and path/to/busmap.csv in which case the busmap + is loaded from the specified file. Please note, that when a path is + provided, the set number of clusters will be ignored. Default: False. * "bus_weight_fromcsv" : None or str - In general, the weighting of AC buses takes place considering generation and - load at each node. With this option, you can load an own weighting for - the AC buses by providing a path to a csv file. If None, weighting is - conducted as described above. + In general, the weighting of AC buses takes place considering + generation and load at each node. With this option, you can load an + own weighting for the AC buses by providing a path to a csv file. + If None, weighting is conducted as described above. Default: None. * "bus_weight_tocsv" : None or str - Specifies whether to store the weighting of AC buses to csv or not. If - None, it is not stored. Otherwise, it is stored to the provided + Specifies whether to store the weighting of AC buses to csv or not. + If None, it is not stored. Otherwise, it is stored to the provided path/to/bus_weight.csv. Default: None. * "gas_weight_fromcsv" : None or str - In general, the weighting of CH4 nodes takes place considering generation - and load at each node, as well as non-transport capacities at each node. - With this option, you can load an own weighting for the CH4 buses by - providing a path to a csv file. If None, weighting is conducted as - described above. + In general, the weighting of CH4 nodes takes place considering + generation and load at each node, as well as non-transport + capacities at each node. With this option, you can load an own + weighting for the CH4 buses by providing a path to a csv file. If + None, weighting is conducted as described above. Default: None. * "gas_weight_tocsv" : None or str - Specifies whether to store the weighting of gas buses to csv or not. If - None, it is not stored. Otherwise, it is stored to the provided - path/to/gas_bus_weight.csv. + Specifies whether to store the weighting of gas buses to csv or + not. If None, it is not stored. Otherwise, it is stored to the + provided path/to/gas_bus_weight.csv. Default: None. * "line_length_factor" : float Defines the factor to multiply the crow-flies distance @@ -480,59 +490,62 @@ def run_etrago(args, json_path): * "random_state" : int Random state for replicability of clustering results. Default: 42. * "n_init" : int - Affects clustering algorithm, only change when necessary! Documentation - and possible settings are described in sklearn-package - (sklearn/cluster/kmeans.py). + Affects clustering algorithm, only change when necessary! + Documentation and possible settings are described in + sklearn-package (sklearn/cluster/kmeans.py). Default: 10. * "max_iter" : int - Affects clustering algorithm, only change when necessary! Documentation - and possible settings are described in sklearn-package - (sklearn/cluster/kmeans.py). + Affects clustering algorithm, only change when necessary! + Documentation and possible settings are described in + sklearn-package (sklearn/cluster/kmeans.py). Default: 100. * "tol" : float - Affects clustering algorithm, only change when necessary! Documentation - and possible settings are described in sklearn-package - (sklearn/cluster/kmeans.py). + Affects clustering algorithm, only change when necessary! + Documentation and possible settings are described in + sklearn-package (sklearn/cluster/kmeans.py). Default: 1e-6. * "CPU_cores" : int or str - Number of cores used in clustering. Specify a concrete number or "max" to - use all cores available. + Number of cores used in clustering. Specify a concrete number or + "max" to use all cores available. Default: 4. sector_coupled_clustering : dict - Choose if you want to apply a clustering of sector coupled carriers, such - as central_heat, and specify settings. + Choose if you want to apply a clustering of sector coupled carriers, + such as central_heat, and specify settings. The provided dictionary can have the following entries: * "active" : bool - State if you want to apply clustering of sector coupled carriers, such - as central_heat. + State if you want to apply clustering of sector coupled carriers, + such as central_heat. Default: True. * "carrier_data" : dict[str, dict] - Keys of the dictionary specify carriers affected by sector coupling, e.g. - "central_heat". The corresponding dictionaries specify, how the carrier - should be clustered. This dictionary must contain the following entries: + Keys of the dictionary specify carriers affected by sector + coupling, e.g. "central_heat". The corresponding dictionaries + specify, how the carrier should be clustered. This dictionary must + contain the following entries: * "base" : list(str) - The approach bases on already clustered buses (AC and CH4) and builds - clusters around the topology of those buses. With this option, you can - specify the carriers to use as base. See `strategy` for more - information. + The approach bases on already clustered buses (AC and CH4) and + builds clusters around the topology of those buses. With this + option, you can specify the carriers to use as base. See + `strategy` for more information. * "strategy" : str Strategy to use in the clustering. Possible options are: * "consecutive" - This strategy clusters around the buses of the first carrier in the - `'base'`` list. The links to other buses are preserved. All buses, - that have no connection to the first carrier will then be clustered - around the buses of the second carrier in the list. + This strategy clusters around the buses of the first + carrier in the `'base'`` list. The links to other buses are + preserved. All buses, that have no connection to the first + carrier will then be clustered around the buses of the + second carrier in the list. * "simultaneous" - This strategy looks for links connecting the buses of the carriers - in the ``'base'`` list and aggregates buses in case they have the - same set of links connected. For example, a heat bus connected to - CH4 via gas boiler and to AC via heat pump will only form a cluster - with other buses, if these have the same links to - the same clusters of CH4 and AC. + This strategy looks for links connecting the buses of the + carriers in the ``'base'`` list and aggregates buses in + case they have the same set of links connected. For + example, a heat bus connected to CH4 via gas boiler and to + AC via heat pump will only form a cluster with other buses, + if these have the same links to the same clusters of CH4 + and AC. Per default, the following dictionary is set: { @@ -547,61 +560,68 @@ def run_etrago(args, json_path): method you want to use for the spatial disaggregation. Only possible option is currently "uniform". snapshot_clustering : dict - State if you want to apply a temporal clustering and run the optimization - only on a subset of snapshot periods, and specify settings. - The provided dictionary can have the following entries: + State if you want to apply a temporal clustering and run the + optimization only on a subset of snapshot periods, and specify + settings. The provided dictionary can have the following entries: * "active" : bool - Choose, if clustering is activated or not. If True, it is activated. + Choose, if clustering is activated or not. If True, it is + activated. Default: False. * "method" : str - Method to apply. Possible options are "typical_periods" and "segmentation". + Method to apply. Possible options are "typical_periods" and + "segmentation". Default: "segmentation". * "extreme_periods" : None or str - Method used to consider extreme snapshots (time steps with extreme residual - load) in reduced timeseries. + Method used to consider extreme snapshots (time steps with extreme + residual load) in reduced timeseries. Possible options are None, "append", "new_cluster_center", and - "replace_cluster_center". The default is None, in which case extreme periods - are not considered. + "replace_cluster_center". The default is None, in which case + extreme periods are not considered. * "how" : str Definition of period in case `method` is set to "typical_periods". - Possible options are "daily", "weekly", and "monthly". Default: "daily". + Possible options are "daily", "weekly", and "monthly". + Default: "daily". * "storage_constraints" : str - Defines additional constraints for storage units in case `method` is set to - "typical_periods". Possible options are "daily_bounds", "soc_constraints" - and "soc_constraints_simplified". Default: "soc_constraints". + Defines additional constraints for storage units in case `method` + is set to "typical_periods". Possible options are "daily_bounds", + "soc_constraints" and "soc_constraints_simplified". + Default: "soc_constraints". * "n_clusters" : int - Number of clusters in case `method` is set to "typical_periods". Default: 5. + Number of clusters in case `method` is set to "typical_periods". + Default: 5. * "n_segments" : int - Number of segments in case `method` is set to "segmentation". Default: 5. + Number of segments in case `method` is set to "segmentation". + Default: 5. skip_snapshots : bool or int State None, if you want to use all time steps, or provide a number, if you only want to consider every n-th timestep to reduce temporal complexity. Default: 5. temporal_disaggregation : dict - State if you want to apply a second LOPF considering dispatch only (no capacity - optimization) to disaggregate the dispatch to the whole temporal complexity. - Be aware that a load shedding will be applied in this optimization. - The provided dictionary must have the following entries: + State if you want to apply a second LOPF considering dispatch only + (no capacity optimization) to disaggregate the dispatch to the whole + temporal complexity. Be aware that a load shedding will be applied in + this optimization. The provided dictionary must have the following + entries: * "active" : bool - Choose, if temporal disaggregation is activated or not. If True, it is - activated. + Choose, if temporal disaggregation is activated or not. If True, + it is activated. Default: False. * "no_slices" : int - With "no_slices" the optimization problem will be calculated as a given - number of sub-problems while using some information on the state of charge - of storage units and stores from the former optimization (at the moment - only possible with skip_snapshots and extra_functionalities are - disregarded). + With "no_slices" the optimization problem will be calculated as a + given number of sub-problems while using some information on the + state of charge of storage units and stores from the former + optimization (at the moment only possible with skip_snapshots and + extra_functionalities are disregarded). Default: 8. branch_capacity_factor : dict[str, float] Add a factor here if you want to globally change line capacities - (e.g. to "consider" an (n-1) criterion or for debugging purposes). The factor - specifies the p.u. branch rating, e.g. 0.5 to allow half the line capacity. - Per default, it is set to {'HV': 0.5, 'eHV' : 0.7}. + (e.g. to "consider" an (n-1) criterion or for debugging purposes). + The factor specifies the p.u. branch rating, e.g. 0.5 to allow half the + line capacity. Per default, it is set to {'HV': 0.5, 'eHV' : 0.7}. load_shedding : bool State here if you want to make use of the load shedding function which is helpful when debugging: a very expensive generator is set to each @@ -673,8 +693,12 @@ def run_etrago(args, json_path): etrago.session.close() # plots: more in tools/plot.py # make a line loading plot - # etrago.plot_grid(line_colors='line_loading', bus_sizes=0.0001, timesteps=range(2)) + # etrago.plot_grid( + # line_colors='line_loading', bus_sizes=0.0001, timesteps=range(2)) # network and storage - # etrago.plot_grid(line_colors='expansion_abs', bus_colors='storage_expansion', bus_sizes=0.0001) + # etrago.plot_grid( + # line_colors='expansion_abs', + # bus_colors='storage_expansion', + # bus_sizes=0.0001) # flexibility usage # etrago.flexibility_usage('DSM') diff --git a/etrago/cluster/__init__.py b/etrago/cluster/__init__.py index cc95f41d..5dd475b7 100644 --- a/etrago/cluster/__init__.py +++ b/etrago/cluster/__init__.py @@ -5,5 +5,3 @@ __copyright__ = "tba" __license__ = "tba" __author__ = "tba" - - diff --git a/etrago/cluster/electrical.py b/etrago/cluster/electrical.py index e46d6b0f..44fc5526 100755 --- a/etrago/cluster/electrical.py +++ b/etrago/cluster/electrical.py @@ -46,7 +46,6 @@ strategies_generators, strategies_one_ports, ) - from etrago.tools.utilities import * logger = logging.getLogger(__name__) @@ -69,7 +68,7 @@ def _leading(busmap, df): """ - Returns a function that computes the leading bus_id for a given mapped + Returns a function that computes the leading bus_id for a given mapped list of buses. Parameters @@ -95,7 +94,7 @@ def leader(x): def adjust_no_electric_network(etrago, busmap, cluster_met): """ - Adjusts the non-electric network based on the electrical network + Adjusts the non-electric network based on the electrical network (esp. eHV network), adds the gas buses to the busmap, and creates the new buses for the non-electric network. @@ -117,7 +116,7 @@ def adjust_no_electric_network(etrago, busmap, cluster_met): """ network = etrago.network - # network2 is supposed to contain all the not electrical or gas buses + # network2 is supposed to contain all the not electrical or gas buses # and links network2 = network.copy(with_time=False) network2.buses = network2.buses[ @@ -148,7 +147,8 @@ def adjust_no_electric_network(etrago, busmap, cluster_met): # eHV network busmap2 = {} - # Map crossborder AC buses in case that they were not part of the k-mean clustering + # Map crossborder AC buses in case that they were not part of the k-mean + # clustering if (not etrago.args["network_clustering"]["cluster_foreign_AC"]) & ( cluster_met in ["kmeans", "kmedoids-dijkstra"] ): @@ -178,7 +178,8 @@ def adjust_no_electric_network(etrago, busmap, cluster_met): & (network2.links["carrier"] == map_carrier[carry]) ].copy() df["elec"] = df["bus0"].isin(busmap.keys()) - df = df[df["elec"] == True] + + df = df[df["elec"]] if len(df) > 0: bus_hv = df["bus0"][0] @@ -386,9 +387,9 @@ def cluster_on_extra_high_voltage(etrago, busmap, with_time=True): def delete_ehv_buses_no_lines(network): """ - When there are AC buses totally isolated, this function deletes them in + When there are AC buses totally isolated, this function deletes them in order to make possible the creation of busmaps based on electrical - connections and other purposes. Additionally, it throws a warning to + connections and other purposes. Additionally, it throws a warning to inform the user in case that any correction should be done. Parameters @@ -411,10 +412,10 @@ def delete_ehv_buses_no_lines(network): buses_ac["with_gen"] = buses_ac.index.isin(network.generators.bus) delete_buses = buses_ac[ - (buses_ac["with_line"] == False) - & (buses_ac["with_load"] == False) - & (buses_ac["with_link"] == False) - & (buses_ac["with_gen"] == False) + (buses_ac["with_line"] is False) + & (buses_ac["with_load"] is False) + & (buses_ac["with_link"] is False) + & (buses_ac["with_gen"] is False) ].index if len(delete_buses): @@ -750,7 +751,7 @@ def preprocessing(etrago): ----------------------- WARNING --------------------------- THE FOLLOWING BUSES HAVE NOT COUNTRY DATA: - {network.buses[network.buses.country.isna()].index.to_list()}. + {network.buses[network.buses.country.isna()].index.to_list()}. THEY WILL BE ASSIGNED TO GERMANY, BUT IT IS POTENTIALLY A SIGN OF A PROBLEM IN THE DATASET. ----------------------- WARNING --------------------------- @@ -769,9 +770,9 @@ def preprocessing(etrago): if settings["method"] == "kmedoids-dijkstra": lines_col = network_elec.lines.columns - # The Dijkstra clustering works using the shortest electrical path + # The Dijkstra clustering works using the shortest electrical path # between buses. In some cases, a bus has just DC connections, which - # are considered links. Therefore it is necessary to include + # are considered links. Therefore it is necessary to include # temporarily the DC links into the lines table. dc = network.links[network.links.carrier == "DC"] str1 = "DC_" @@ -825,7 +826,7 @@ def postprocessing(etrago, busmap, busmap_foreign, medoid_idx=None): method = settings["method"] num_clusters = settings["n_clusters_AC"] - if settings["k_elec_busmap"] == False: + if not settings["k_elec_busmap"]: busmap.name = "cluster" busmap_elec = pd.DataFrame(busmap.copy(), dtype="string") busmap_elec.index.name = "bus" @@ -871,7 +872,7 @@ def postprocessing(etrago, busmap, busmap_foreign, medoid_idx=None): ) # merge busmap for foreign buses with the German buses - if settings["cluster_foreign_AC"] == False: + if not settings["cluster_foreign_AC"]: for bus in busmap_foreign.index: busmap[bus] = busmap_foreign[bus] if bus == busmap_foreign[bus]: @@ -936,7 +937,6 @@ def weighting_for_scenario(network, save=None): """ def calc_availability_factor(gen): - """ Calculate the availability factor for a given generator. @@ -952,10 +952,10 @@ def calc_availability_factor(gen): Notes ----- - Availability factor is defined as the ratio of the average power - output of the generator over the maximum power output capacity of + Availability factor is defined as the ratio of the average power + output of the generator over the maximum power output capacity of the generator. If the generator is time-dependent, its average power - output is calculated using the `network.generators_t` DataFrame. + output is calculated using the `network.generators_t` DataFrame. Otherwise, its availability factor is obtained from the `fixed_capacity_fac` dictionary, which contains pre-defined factors for fixed capacity generators. If the generator's availability factor @@ -968,7 +968,7 @@ def calc_availability_factor(gen): else: try: cf = fixed_capacity_fac[gen["carrier"]] - except: + except KeyError: print(gen) cf = 1 return cf @@ -1058,7 +1058,7 @@ def run_spatial_clustering(self): elec_network, weight, n_clusters, busmap_foreign = preprocessing(self) if self.args["network_clustering"]["method"] == "kmeans": - if self.args["network_clustering"]["k_elec_busmap"] == False: + if not self.args["network_clustering"]["k_elec_busmap"]: logger.info("Start k-means Clustering") busmap = kmean_clustering( @@ -1070,7 +1070,7 @@ def run_spatial_clustering(self): medoid_idx = pd.Series(dtype=str) elif self.args["network_clustering"]["method"] == "kmedoids-dijkstra": - if self.args["network_clustering"]["k_elec_busmap"] == False: + if not self.args["network_clustering"]["k_elec_busmap"]: logger.info("Start k-medoids Dijkstra Clustering") busmap, medoid_idx = kmedoids_dijkstra_clustering( @@ -1090,7 +1090,7 @@ def run_spatial_clustering(self): ) self.update_busmap(busmap) - if self.args["disaggregation"] != None: + if self.args["disaggregation"] is not None: self.disaggregated_network = self.network.copy() else: self.disaggregated_network = self.network.copy(with_time=False) diff --git a/etrago/cluster/gas.py b/etrago/cluster/gas.py index f0dfcbf4..b25f64e6 100644 --- a/etrago/cluster/gas.py +++ b/etrago/cluster/gas.py @@ -24,6 +24,8 @@ import os if "READTHEDOCS" not in os.environ: + import logging + from pypsa import Network from pypsa.networkclustering import ( aggregatebuses, @@ -40,7 +42,8 @@ kmedoids_dijkstra_clustering, sum_with_inf, ) - from etrago.tools.utilities import * + +logger = logging.getLogger(__name__) __copyright__ = ( "Flensburg University of Applied Sciences, " @@ -101,7 +104,8 @@ def preprocessing(etrago): ) ] - # select buses dependent on whether they should be clustered in (only DE or DE+foreign) + # select buses dependent on whether they should be clustered in + # (only DE or DE+foreign) if not settings["cluster_foreign_gas"]: network_ch4.buses = network_ch4.buses.loc[ ch4_filter & (network_ch4.buses["country"].values == "DE") @@ -344,8 +348,8 @@ def gas_postprocessing(etrago, busmap, medoid_idx=None): + str(settings["n_clusters_gas"]) + "_result.csv" ) - - if 'H2' in etrago.network.buses.carrier.unique(): + + if "H2" in etrago.network.buses.carrier.unique(): busmap = get_h2_clusters(etrago, busmap) # Add all other buses to busmap @@ -997,7 +1001,8 @@ def run_spatial_clustering_gas(self): self.update_busmap(busmap) logger.info( - "GAS Network clustered to {} DE-buses and {} foreign buses with {} algorithm.".format( + """GAS Network clustered to {} DE-buses and {} foreign buses + with {} algorithm.""".format( len( self.network.buses.loc[ (self.network.buses.carrier == "CH4") diff --git a/etrago/cluster/snapshot.py b/etrago/cluster/snapshot.py index fbbd1f68..a476e93a 100644 --- a/etrago/cluster/snapshot.py +++ b/etrago/cluster/snapshot.py @@ -18,7 +18,8 @@ # along with this program. If not, see . # File description for read-the-docs -""" This module contains functions for reducing the complexity of a PyPSA network in temporal dimension by +""" This module contains functions for reducing the complexity of a PyPSA + network in temporal dimension by a) downsampling to every n-th snapshot b) clustering to typical periods (eg days, weeks) c) clustering to segments of variable length @@ -26,11 +27,11 @@ ( https://github.com/FZJ-IEK3-VSA/tsam ) developed by Leander Kotzur et al. """ -import pandas as pd import os +import pandas as pd + if "READTHEDOCS" not in os.environ: - import pyomo.environ as po import tsam.timeseriesaggregation as tsam __copyright__ = ( @@ -39,12 +40,14 @@ "Centre for Sustainable Energy Systems" ) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ClaraBuettner, ulfmueller, KathiEsterl, simnh, wheitkoetter, BartelsJ, AmeliaNadal" +__author__ = """ClaraBuettner, ulfmueller, KathiEsterl, simnh, wheitkoetter, + BartelsJ, AmeliaNadal""" def snapshot_clustering(self): """ - Function to call the snapshot clustering function with the respecting method and settings. + Function to call the snapshot clustering function with the respecting + method and settings. Raises ------ @@ -57,9 +60,9 @@ def snapshot_clustering(self): """ - if self.args["snapshot_clustering"]["active"] == True: + if self.args["snapshot_clustering"]["active"]: # save second network for optional dispatch disaggregation - if self.args["temporal_disaggregation"]["active"] == True: + if self.args["temporal_disaggregation"]["active"]: self.network_tsa = self.network.copy() if self.args["snapshot_clustering"]["method"] == "segmentation": @@ -83,7 +86,8 @@ def snapshot_clustering(self): ) else: raise ValueError( - "Type of clustering should be 'typical_periods' or 'segmentation'" + """Type of clustering should be 'typical_periods' or + 'segmentation'""" ) @@ -97,7 +101,8 @@ def tsam_cluster( segm_hoursperperiod=24, ): """ - Conducts the clustering of the snapshots for temporal aggregation with the respecting method. + Conducts the clustering of the snapshots for temporal aggregation with the + respecting method. Parameters ---------- @@ -107,14 +112,16 @@ def tsam_cluster( Number of clusters for typical_periods. The default is 10. how : {'daily', 'weekly', 'monthly'}, optional Definition of period for typical_periods. The default is 'daily'. - extremePeriodMethod : {'None','append','new_cluster_center', 'replace_cluster_center'}, optional - Method to consider extreme snapshots in reduced timeseries. The default is 'None'. + extremePeriodMethod : {'None','append','new_cluster_center', + 'replace_cluster_center'}, optional Method to consider extreme + snapshots in reduced timeseries. The default is 'None'. segmentation : boolean, optional Argument to activate segmenation method. The default is False. segment_no : int, optional Number of segments for segmentation. The default is 10. segm_hoursperperiod : int, optional - Only for segmentation, ensures to cluster to segments considering all snapshots. The default is 24. + Only for segmentation, ensures to cluster to segments considering all + snapshots. The default is 24. Returns ------- @@ -199,8 +206,9 @@ def tsam_cluster( timeseries_creator = aggregation.createTypicalPeriods() timeseries = timeseries_creator.copy() - # If Segmentation is True, insert 'Dates' and 'SegmentNo' column in timeseries - if segmentation == True: + # If Segmentation is True, insert 'Dates' and 'SegmentNo' column in + # timeseries + if segmentation: weights = timeseries.index.get_level_values(2) dates_df = timeseries_df.index.get_level_values(0) dates = [] @@ -229,7 +237,7 @@ def tsam_cluster( clusterOrder = aggregation.clusterOrder clusterCenterIndices = aggregation.clusterCenterIndices - if segmentation == True: + if segmentation: if extremePeriodMethod != "None": timeseries = segmentation_extreme_periods( timeseries_df, timeseries, extremePeriodMethod @@ -287,7 +295,8 @@ def tsam_cluster( df_cluster = pd.DataFrame( { "Cluster": clusterOrder, # Cluster of the day - "RepresentativeDay": representative_day, # representative day of the cluster + "RepresentativeDay": representative_day, # representative day of + # the cluster "last_hour_RepresentativeDay": last_hour_datetime, } ) # last hour of the cluster @@ -323,8 +332,9 @@ def segmentation_extreme_periods( Dataframe wit timeseries to cluster. timeseries : pd.DataFrame Information on segments after segmentation. - extremePeriodMethod : {'None','append','new_cluster_center', 'replace_cluster_center'}, optional - Method to consider extreme snapshots in reduced timeseries. The default is 'None'. + extremePeriodMethod : {'None','append','new_cluster_center', + 'replace_cluster_center'}, optional method to consider extreme + snapshots in reduced timeseries. The default is 'None'. Raises ------ @@ -460,7 +470,8 @@ def segmentation_extreme_periods( else: raise ValueError( - "Choose 'append' or 'replace_cluster_center' for consideration of extreme periods with segmentation method" + """Choose 'append' or 'replace_cluster_center' for + consideration of extreme periods with segmentation method""" ) # add timestep if it is not already calculated @@ -581,7 +592,8 @@ def segmentation_extreme_periods( else: raise ValueError( - "Choose 'append' or 'replace_cluster_center' for consideration of extreme periods with segmentation method" + """Choose 'append' or 'replace_cluster_center' for + consideration of extreme periods with segmentation method""" ) if "row_no" in timeseries.columns: @@ -598,7 +610,8 @@ def run( extreme_periods="None", ): """ - Function to call the respecting snapshot clustering function and export the result to a csv-file. + Function to call the respecting snapshot clustering function and export the + result to a csv-file. Parameters ---------- @@ -610,8 +623,10 @@ def run( Definition of period for typical_periods. The default is 'daily'. segmented_to : int, optional Number of segments for segmentation. The default is False. - extremePeriodMethod : {'None','append','new_cluster_center', 'replace_cluster_center'}, optional - Method to consider extreme snapshots in reduced timeseries. The default is 'None'. + extremePeriodMethod : {'None','append','new_cluster_center', + 'replace_cluster_center'}, optional + Method to consider extreme snapshots in reduced timeseries. + The default is 'None'. Returns ------- @@ -649,7 +664,7 @@ def run( segm_hoursperperiod=network.snapshots.size, ) - if segmentation != False: + if not segmentation: pd.DataFrame( timeseries.reset_index(), columns=["dates", "SegmentNo", "SegmentDuration"], @@ -659,16 +674,12 @@ def run( else: if how == "daily": howie = "days" - path = "typical_days" elif how == "weekly": howie = "weeks" - path = "typical_weeks" elif how == "monthly": howie = "months" - path = "typical_months" elif how == "hourly": howie = "hours" - path = "typical_hours" df_cluster.to_csv( "cluster_typical-periods=" + str(n_clusters) + howie + ".csv" ) @@ -798,8 +809,8 @@ def skip_snapshots(self): # save second network for optional dispatch disaggregation if ( - self.args["temporal_disaggregation"]["active"] == True - and self.args["snapshot_clustering"]["active"] == False + self.args["temporal_disaggregation"]["active"] + and not self.args["snapshot_clustering"]["active"] ): self.network_tsa = self.network.copy() diff --git a/etrago/cluster/spatial.py b/etrago/cluster/spatial.py index 43af10b9..be78296a 100755 --- a/etrago/cluster/spatial.py +++ b/etrago/cluster/spatial.py @@ -41,8 +41,14 @@ import networkx as nx import numpy as np import pandas as pd + import pypsa - from etrago.tools.utilities import * + from etrago.tools.utilities import ( + buses_grid_linked, + buses_of_vlvl, + connected_grid_lines, + connected_transformer, + ) logger = logging.getLogger(__name__) @@ -92,7 +98,7 @@ def nan_links(x): def ext_storage(x): - v = any(x[x == True]) + v = any(x[x]) return v @@ -341,7 +347,7 @@ def shortest_path(paths, graph): df_isna = df.isnull() for s, t in paths: - while df_isna.loc[(s, t), "path_length"] == True: + while df_isna.loc[(s, t), "path_length"]: try: s_to_other = nx.single_source_dijkstra_path_length(graph, s) for t in idx.levels[1]: @@ -506,7 +512,7 @@ def busmap_from_psql(etrago): """ scn_name = ( etrago.args["scn_name"] - if etrago.args["scn_extension"] == None + if etrago.args["scn_extension"] is None else etrago.args["scn_name"] + "_ext_" + "_".join(etrago.args["scn_extension"]) @@ -717,8 +723,9 @@ def kmedoids_dijkstra_clustering( etrago, buses, connections, weight, n_clusters ): """ - Applies a k-medoids clustering on the given network and calls the function to conduct a Dijkstra's - algorithm afterwards for the consideration of the network's topology in the spatial clustering. + Applies a k-medoids clustering on the given network and calls the function + to conduct a Dijkstra's algorithm afterwards for the consideration of the + network's topology in the spatial clustering. Parameters ---------- diff --git a/etrago/tools/constraints.py b/etrago/tools/constraints.py index b3ab1b7b..9785e580 100755 --- a/etrago/tools/constraints.py +++ b/etrago/tools/constraints.py @@ -311,8 +311,9 @@ def _rule(m): def _cross_border_flow(self, network, snapshots): """ - Extra_functionality that limits overall AC crossborder flows from/to Germany. - Add key 'cross_border_flow' and array with minimal and maximal import/export + Extra_functionality that limits overall AC crossborder flows from/to + Germany. Add key 'cross_border_flow' and array with minimal and maximal + import/export Example: {'cross_border_flow': [-x, y]} (with x Import, y Export) Parameters @@ -402,7 +403,8 @@ def _rule_max(m): def _cross_border_flow_nmp(self, network, snapshots): """ Extra_functionality that limits overall crossborder flows from/to Germany. - Add key 'cross_border_flow' and array with minimal and maximal import/export + Add key 'cross_border_flow' and array with minimal and maximal + import/export Example: {'cross_border_flow': [-x, y]} (with x Import, y Export) Parameters @@ -471,7 +473,8 @@ def _cross_border_flow_per_country_nmp(self, network, snapshots): Extra_functionality that limits AC crossborder flows for each given foreign country from/to Germany. Add key 'cross_border_flow_per_country' to args.extra_functionality and - define dictionary of country keys and desired limitations of im/exports in MWh + define dictionary of country keys and desired limitations of im/exports + in MWh Example: {'cross_border_flow_per_country': {'DK':[-X, Y], 'FR':[0,0]}} Parameters @@ -560,7 +563,8 @@ def _cross_border_flow_per_country(self, network, snapshots): Extra_functionality that limits AC crossborder flows for each given foreign country from/to Germany. Add key 'cross_border_flow_per_country' to args.extra_functionality and - define dictionary of country keys and desired limitations of im/exports in MWh + define dictionary of country keys and desired limitations of im/exports + in MWh Example: {'cross_border_flow_per_country': {'DK':[-X, Y], 'FR':[0,0]}} Parameters @@ -1652,7 +1656,7 @@ def snapshot_clustering_seasonal_storage( # create set for inter-temp constraints and variables network.model.candidates = po.Set(initialize=candidates, ordered=True) - if simplified == False: + if not simplified: # create intra soc variable for each storage/store and each hour network.model.state_of_charge_intra = po.Var( sus.index, network.snapshots @@ -1959,7 +1963,7 @@ def state_of_charge_lower(m, s, h): delta_t = h - period_start intra_hour = first_hour + delta_t else: - hrs = 24 # 0 ### + hrs = 24 date = str( network.snapshots[ network.snapshots.dayofyear - 1 @@ -1974,7 +1978,7 @@ def state_of_charge_lower(m, s, h): + m.state_of_charge_inter[ s, network.cluster_ts["Candidate_day"][h] ] - * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs ### + * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs >= 0 ) @@ -2001,7 +2005,7 @@ def state_of_charge_lower_store(m, s, h): delta_t = h - period_start intra_hour = first_hour + delta_t else: - hrs = 24 # 0 ### + hrs = 24 date = str( network.snapshots[ network.snapshots.dayofyear - 1 @@ -2024,7 +2028,7 @@ def state_of_charge_lower_store(m, s, h): + m.state_of_charge_inter_store[ s, network.cluster_ts["Candidate_day"][h] ] - * (1 - network.stores.at[s, "standing_loss"]) ** hrs ### + * (1 - network.stores.at[s, "standing_loss"]) ** hrs >= low ) @@ -2042,7 +2046,7 @@ def state_of_charge_lower_simplified(m, s, h): elif self.args["snapshot_clustering"]["how"] == "monthly": hrs = 720 else: - hrs = 24 # 0 + hrs = 24 return ( m.state_of_charge_intra_min[ @@ -2051,7 +2055,7 @@ def state_of_charge_lower_simplified(m, s, h): + m.state_of_charge_inter[ s, network.cluster_ts["Candidate_day"][h] ] - * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs ### + * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs >= 0 ) @@ -2061,7 +2065,7 @@ def state_of_charge_lower_store_simplified(m, s, h): elif self.args["snapshot_clustering"]["how"] == "monthly": hrs = 720 else: - hrs = 24 # 0 + hrs = 24 if "DSM" in s: if self.args["snapshot_clustering"]["how"] == "weekly": @@ -2105,7 +2109,7 @@ def state_of_charge_lower_store_simplified(m, s, h): + m.state_of_charge_inter_store[ s, network.cluster_ts["Candidate_day"][h] ] - * (1 - network.stores.at[s, "standing_loss"]) ** hrs ### + * (1 - network.stores.at[s, "standing_loss"]) ** hrs >= low ) @@ -2184,7 +2188,7 @@ def state_of_charge_upper(m, s, h): + m.state_of_charge_inter[ s, network.cluster_ts["Candidate_day"][h] ] - * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs ### + * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs <= p_nom * network.storage_units.at[s, "max_hours"] ) @@ -2237,7 +2241,7 @@ def state_of_charge_upper_store(m, s, h): + m.state_of_charge_inter_store[ s, network.cluster_ts["Candidate_day"][h] ] - * (1 - network.stores.at[s, "standing_loss"]) ** hrs ### + * (1 - network.stores.at[s, "standing_loss"]) ** hrs <= e_nom ) @@ -2261,7 +2265,7 @@ def state_of_charge_upper_simplified(m, s, h): + m.state_of_charge_inter[ s, network.cluster_ts["Candidate_day"][h] ] - * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs ### + * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs <= p_nom * network.storage_units.at[s, "max_hours"] ) @@ -2327,7 +2331,7 @@ def state_of_charge_upper_store_simplified(m, s, h): + m.state_of_charge_inter_store[ s, network.cluster_ts["Candidate_day"][h] ] - * (1 - network.stores.at[s, "standing_loss"]) ** hrs ### + * (1 - network.stores.at[s, "standing_loss"]) ** hrs <= e_nom ) @@ -2667,7 +2671,8 @@ def split_dispatch_disaggregation_constraints(self, n, sns): """ Add constraints for state of charge of storage units and stores when separating the optimization into smaller subproblems - while conducting thedispatch_disaggregation in temporally fully resolved network + while conducting thedispatch_disaggregation in temporally fully resolved + network The state of charge at the end of each slice is set to the value calculated in the optimization with the temporally reduced network @@ -2721,7 +2726,6 @@ def disaggregation_sto_soc(m, s, h): def split_dispatch_disaggregation_constraints_nmp(self, n, sns): - print("TODO") # TODO: implementieren @@ -2826,7 +2830,8 @@ def functionality(self, network, snapshots): ): if self.args["snapshot_clustering"]["how"] == "hourly": logger.info( - "soc_constraints_simplified not possible while hourly clustering -> changed to soc_constraints" + """soc_constraints_simplified not possible while hourly + clustering -> changed to soc_constraints""" ) if self.args["method"]["pyomo"]: @@ -2849,8 +2854,10 @@ def functionality(self, network, snapshots): else: logger.error( - "If you want to use constraints considering the storage behaviour, snapshot clustering constraints must be in" - + " [daily_bounds, soc_constraints, soc_constraints_simplified]" + """If you want to use constraints considering the storage + behaviour, snapshot clustering constraints must be in + [daily_bounds, soc_constraints, + soc_constraints_simplified]""" ) if self.conduct_dispatch_disaggregation is not False: @@ -2867,8 +2874,9 @@ def functionality(self, network, snapshots): def add_chp_constraints_nmp(n): """ Limits the dispatch of combined heat and power links based on - T.Brown et. al : Synergies of sector coupling and transmission reinforcement - in a cost-optimised, highly renewable European energy system, 2018 + T.Brown et. al : Synergies of sector coupling and transmission + reinforcement in a cost-optimised, highly renewable European energy system, + 2018 Parameters ---------- @@ -2948,8 +2956,9 @@ def add_chp_constraints_nmp(n): def add_chp_constraints(network, snapshots): """ Limits the dispatch of combined heat and power links based on - T.Brown et. al : Synergies of sector coupling and transmission reinforcement - in a cost-optimised, highly renewable European energy system, 2018 + T.Brown et. al : Synergies of sector coupling and transmission + reinforcement in a cost-optimised, highly renewable European energy system, + 2018 Parameters ---------- diff --git a/etrago/tools/execute.py b/etrago/tools/execute.py index f7a13412..1175ef9e 100755 --- a/etrago/tools/execute.py +++ b/etrago/tools/execute.py @@ -146,7 +146,8 @@ def run_lopf(etrago, extra_functionality, method): if method["pyomo"]: # repeat the optimization for all slices for i in range(0, no_slices): - # keep information on the initial state of charge for the respectng slice + # keep information on the initial state of charge for the + # respectng slice initial = transits[i - 1] soc_initial = etrago.conduct_dispatch_disaggregation.loc[ [etrago.network_tsa.snapshots[initial]] @@ -314,7 +315,7 @@ def iterate_lopf( for i in range(1, (1 + n_iter)): run_lopf(etrago, extra_functionality, method) - if args["csv_export"] != False: + if not args["csv_export"]: path_it = path + "/lopf_iteration_" + str(i) etrago.export_to_csv(path_it) @@ -348,7 +349,7 @@ def iterate_lopf( i += 1 - if args["csv_export"] != False: + if not args["csv_export"]: path_it = path + "/lopf_iteration_" + str(i) etrago.export_to_csv(path_it) @@ -391,7 +392,7 @@ def lopf(self): z = (y - x) / 60 logger.info("Time for LOPF [min]: {}".format(round(z, 2))) - if self.args["csv_export"] != False: + if not self.args["csv_export"]: path = self.args["csv_export"] if self.args["temporal_disaggregation"]["active"] is True: path = path + "/temporally_reduced" @@ -431,14 +432,16 @@ def dispatch_disaggregation(self): transits = transits.insert( (len(transits)), self.network.snapshots[-1] ) - # for stores, exclude emob and dsm because of their special constraints + # for stores, exclude emob and dsm because of their special + # constraints sto = self.network.stores[ ~self.network.stores.carrier.isin( ["battery_storage", "battery storage", "dsm"] ) ] - # save state of charge of storage units and stores at those transition snapshots + # save state of charge of storage units and stores at those + # transition snapshots self.conduct_dispatch_disaggregation = pd.DataFrame( columns=self.network.storage_units.index.append(sto.index), index=transits, @@ -501,7 +504,7 @@ def dispatch_disaggregation(self): ) self.network.stores.e_cyclic = self.network_tsa.stores.e_cyclic - if self.args["csv_export"] != False: + if not self.args["csv_export"]: path = self.args["csv_export"] self.export_to_csv(path) self.export_to_csv(path + "/temporal_disaggregaton") @@ -530,8 +533,8 @@ def pf_post_lopf(etrago, calc_losses=False): Function that prepares and runs non-linar load flow using PyPSA pf. If crossborder lines are DC-links, pf is only applied on german network. Crossborder flows are still considerd due to the active behavior of links. - To return a network containing the whole grid, the optimised solution of the - foreign components can be added afterwards. + To return a network containing the whole grid, the optimised solution of + the foreign components can be added afterwards. Parameters ---------- @@ -807,7 +810,7 @@ def import_gen_from_links(network): # if foreign lines are DC, execute pf only on sub_network in Germany if (args["foreign_lines"]["carrier"] == "DC") or ( - (args["scn_extension"] != None) + (args["scn_extension"] is not None) and ("BE_NO_NEP 2035" in args["scn_extension"]) ): foreign_bus, foreign_comp, foreign_series = drop_foreign_components( @@ -831,7 +834,7 @@ def import_gen_from_links(network): # Find out the name of the main subnetwork main_subnet = str(network.buses.sub_network.value_counts().argmax()) - # Delete very small p_set and q_set values to avoid problems with the solver + # Delete very small p_set and q_set values to avoid problems when solving network.generators_t["p_set"][ np.abs(network.generators_t["p_set"]) < 0.001 ] = 0 @@ -880,7 +883,7 @@ def import_gen_from_links(network): if ( (args["foreign_lines"]["carrier"] == "DC") or ( - (args["scn_extension"] != None) + (args["scn_extension"] is not None) and ("BE_NO_NEP 2035" in args["scn_extension"]) ) ) and etrago.args["pf_post_lopf"]["add_foreign_lopf"]: @@ -892,7 +895,7 @@ def import_gen_from_links(network): foreign_series[comp][attr], comp, attr ) - if args["csv_export"] != False: + if not args["csv_export"]: path = args["csv_export"] + "/pf_post_lopf" etrago.export_to_csv(path) pf_solve.to_csv(os.path.join(path, "pf_solution.csv"), index=True) @@ -928,7 +931,7 @@ def distribute_q(network, allocation="p_nom"): ac_bus = network.buses[network.buses.carrier == "AC"] gen_elec = network.generators[ - (network.generators.bus.isin(ac_bus.index) == True) + (network.generators.bus.isin(ac_bus.index)) & (network.generators.carrier != "load shedding") ].carrier.unique() @@ -966,9 +969,9 @@ def distribute_q(network, allocation="p_nom"): ) else: print( - """WARNING: Distribution of reactive power based on active power is - currently outdated for sector coupled models. This process - will continue with the option allocation = 'p_nom'""" + """WARNING: Distribution of reactive power based on active + power is currently outdated for sector coupled models. This + process will continue with the option allocation = 'p_nom'""" ) allocation = "p_nom" @@ -989,7 +992,7 @@ def distribute_q(network, allocation="p_nom"): ac_bus = network.buses[network.buses.carrier == "AC"] gen_elec = network.generators[ - (network.generators.bus.isin(ac_bus.index) == True) + (network.generators.bus.isin(ac_bus.index)) & (network.generators.carrier != "load shedding") & (network.generators.p_nom > 0) ].sort_index() @@ -1061,8 +1064,9 @@ def calc_line_losses(network, converged): s0_lines = (network.lines_t.p0**2 + network.lines_t.q0**2).apply( np.sqrt ) - # in case some snapshots did not converge, discard them from the calculation - s0_lines.loc[converged[converged == False].index, :] = np.nan + # in case some snapshots did not converge, discard them from the + # calculation + s0_lines.loc[converged[converged is False].index, :] = np.nan # calculate current I = S / U [in A] i0_lines = np.multiply(s0_lines, 1000000) / np.multiply( network.lines.v_nom, 1000 diff --git a/etrago/tools/extendable.py b/etrago/tools/extendable.py index 6bb56658..a1da98b4 100644 --- a/etrago/tools/extendable.py +++ b/etrago/tools/extendable.py @@ -21,16 +21,14 @@ """ Extendable.py defines function to set PyPSA components extendable. """ -from etrago.tools.utilities import convert_capital_costs, find_snapshots - -from etrago.cluster.snapshot import snapshot_clustering +from math import sqrt +import time import numpy as np import pandas as pd -import time -from math import sqrt - +from etrago.cluster.snapshot import snapshot_clustering +from etrago.tools.utilities import convert_capital_costs, find_snapshots __copyright__ = ( "Flensburg University of Applied Sciences, " @@ -39,7 +37,8 @@ "DLR-Institute for Networked Energy Systems" ) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, s3pp, wolfbunke, mariusves, lukasol, ClaraBuettner, KathiEsterl, CarlosEpia" +__author__ = """ulfmueller, s3pp, wolfbunke, mariusves, lukasol, ClaraBuettner, + KathiEsterl, CarlosEpia""" def extendable( @@ -60,13 +59,16 @@ def extendable( Parameters ---------- grid_max_D : int, optional - Upper bounds for electrical grid expansion relative to existing capacity. The default is None. + Upper bounds for electrical grid expansion relative to existing + capacity. The default is None. grid_max_abs_D : dict, optional Absolute upper bounds for electrical grid expansion in Germany. grid_max_foreign : int, optional - Upper bounds for expansion of electrical foreign lines relative to the existing capacity. The default is 4. + Upper bounds for expansion of electrical foreign lines relative to the + existing capacity. The default is 4. grid_max_abs_foreign : dict, optional - Absolute upper bounds for expansion of foreign electrical grid. The default is None. + Absolute upper bounds for expansion of foreign electrical grid. + The default is None. Returns ------- @@ -77,7 +79,7 @@ def extendable( network = self.network extendable_settings = self.args["extendable"] - if not "as_in_db" in extendable_settings["extendable_components"]: + if "as_in_db" not in extendable_settings["extendable_components"]: network.lines.s_nom_extendable = False network.transformers.s_nom_extendable = False network.links.p_nom_extendable = False @@ -357,7 +359,7 @@ def extendable( # constrain network expansion to maximum - if not grid_max_abs_D == None: + if grid_max_abs_D is not None: buses = network.buses[ (network.buses.country == "DE") & (network.buses.carrier == "AC") ] @@ -373,7 +375,7 @@ def extendable( "p_nom_max", ] = grid_max_abs_D["dc"] - if not grid_max_abs_foreign == None: + if grid_max_abs_foreign is not None: foreign_buses = network.buses[ (network.buses.country != "DE") & (network.buses.carrier == "AC") ] @@ -395,7 +397,7 @@ def extendable( "p_nom_max", ] = grid_max_abs_foreign["dc"] - if not grid_max_D == None: + if grid_max_D is not None: buses = network.buses[ (network.buses.country == "DE") & (network.buses.carrier == "AC") ] @@ -421,7 +423,7 @@ def extendable( grid_max_D * network.links.p_nom ) - if not grid_max_foreign == None: + if grid_max_foreign is not None: foreign_buses = network.buses[ (network.buses.country != "DE") & (network.buses.carrier == "AC") ] @@ -490,7 +492,8 @@ def line_max_abs( }, ): """ - Function to calculate limitation for capacity expansion of lines in network. + Function to calculate limitation for capacity expansion of lines in + network. Parameters ---------- @@ -535,7 +538,8 @@ def line_max_abs( wires=line_max_abs["380"]["wires"], circuits=line_max_abs["380"]["circuits"], ) * (network.lines["cables"] / network.lines["total_cables"]) - # set the s_nom_max depending on the voltage level and the share of the route + # set the s_nom_max depending on the voltage level + # and the share of the route network.lines.loc[ (network.lines.bus0.isin(buses.index)) & (network.lines.bus1.isin(buses.index)) @@ -587,7 +591,8 @@ def line_max_abs( def transformer_max_abs(network, buses): """ - Function to calculate limitation for capacity expansion of transformers in network. + Function to calculate limitation for capacity expansion of transformers in + network. Parameters ---------- @@ -603,8 +608,8 @@ def transformer_max_abs(network, buses): """ # To determine the maximum extendable capacity of a transformer, the sum of - # the maximum capacities of the lines connected to it is calculated for each - # of its 2 sides. The smallest one is selected. + # the maximum capacities of the lines connected to it is calculated for + # each of its 2 sides. The smallest one is selected. smax_bus0 = network.lines.s_nom_max.groupby(network.lines.bus0).sum() smax_bus1 = network.lines.s_nom_max.groupby(network.lines.bus1).sum() smax_bus = pd.concat([smax_bus0, smax_bus1], axis=1) @@ -806,12 +811,14 @@ def print_expansion_costs(network): if not ext_storage.empty: print( - "Investment costs for all storage units in selected snapshots [EUR]:", + """Investment costs for all storage units in selected snapshots + [EUR]:""", round(storage_costs, 2), ) if not ext_lines.empty: print( - "Investment costs for all lines and transformers in selected snapshots [EUR]:", + """Investment costs for all lines and transformers in selected + snapshots [EUR]:""", round(network_costs, 2), ) diff --git a/etrago/tools/io.py b/etrago/tools/io.py index 5bb2b019..c87bda98 100644 --- a/etrago/tools/io.py +++ b/etrago/tools/io.py @@ -196,7 +196,7 @@ def fetch_by_relname(self, name): pd.DataFrame Component data. """ - from saio.grid import ( + from saio.grid import ( # noqa: F401 egon_etrago_bus, egon_etrago_generator, egon_etrago_line, @@ -251,7 +251,7 @@ def series_fetch_by_relname(self, network, name, pypsa_name): pd.DataFrame Component data. """ - from saio.grid import ( + from saio.grid import ( # noqa: F401 egon_etrago_bus_timeseries, egon_etrago_generator_timeseries, egon_etrago_line_timeseries, @@ -500,8 +500,8 @@ def results_to_oedb(session, network, args, grid="hv", safe_results=False): print("Uploading results to db...") # get last result id and get new one - last_res_id = session.query(func.max(ResultMeta.result_id)).scalar() - if last_res_id == None: + last_res_id = session.query(max(ResultMeta.result_id)).scalar() + if last_res_id is None: new_res_id = 1 else: new_res_id = last_res_id + 1 @@ -539,7 +539,7 @@ def results_to_oedb(session, network, args, grid="hv", safe_results=False): if network.generators.carrier[gen] not in sources.name.values: new_source = Source() new_source.source_id = ( - session.query(func.max(Source.source_id)).scalar() + 1 + session.query(max(Source.source_id)).scalar() + 1 ) new_source.name = network.generators.carrier[gen] session.add(new_source) @@ -564,7 +564,7 @@ def results_to_oedb(session, network, args, grid="hv", safe_results=False): if network.storage_units.carrier[stor] not in sources.name.values: new_source = Source() new_source.source_id = ( - session.query(func.max(Source.source_id)).scalar() + 1 + session.query(max(Source.source_id)).scalar() + 1 ) new_source.name = network.storage_units.carrier[stor] session.add(new_source) @@ -875,11 +875,6 @@ def decommissioning(self, **kwargs): row["scn_name"] == "extension_" + self.args["scn_decommissioning"] ): - v_nom_dec = df_decommisionning["v_nom"][ - (df_decommisionning.project == row["project"]) - & (df_decommisionning.project_id == row["project_id"]) - ] - self.network.lines.s_nom_min[ self.network.lines.index == idx ] = self.network.lines.s_nom_min diff --git a/etrago/tools/plot.py b/etrago/tools/plot.py index 72197de7..038d4bbc 100644 --- a/etrago/tools/plot.py +++ b/etrago/tools/plot.py @@ -45,9 +45,9 @@ if "READTHEDOCS" not in os.environ: from geoalchemy2.shape import to_shape - import geopandas as gpd from pyproj import Proj, transform from shapely.geometry import LineString, MultiPoint, Point, Polygon + import geopandas as gpd import tilemapbase __copyright__ = ( diff --git a/noxfile.py b/noxfile.py index 7fabe5f7..07366ba1 100644 --- a/noxfile.py +++ b/noxfile.py @@ -5,7 +5,14 @@ cleaned = [ "etrago/cluster/disaggregation.py", + "etrago/cluster/electrical.py", + "etrago/cluster/gas.py", + "etrago/cluster/snapshot.py", + "etrago/cluster/spatial.py", "etrago/tools/calc_results.py", + "etrago/tools/execute.py", + "etrago/tools/extendable.py", + "etrago/tools/io.py", "etrago/tools/network.py", "etrago/tools/utilities.py", "noxfile.py", @@ -50,7 +57,7 @@ def flake8(session): """Check for happy little style accidents with `flake8`.""" setdefaults(session) session.install("Flake8-pyproject", "flake8") - session.run("flake8", *cleaned) + session.run("flake8", "--ignore=E722, W605", *cleaned) @nox.session(python=["3", "3.8", "3.9", "3.10", "3.11"])