diff --git a/etrago/appl.py b/etrago/appl.py index da10a4e3..ee851d3a 100644 --- a/etrago/appl.py +++ b/etrago/appl.py @@ -134,7 +134,7 @@ }, }, "network_clustering_ehv": False, # clustering of HV buses to EHV buses. - "disaggregation": None, # None, 'mini' or 'uniform' + "disaggregation": "uniform", # None, 'mini' or 'uniform' # Temporal Complexity: "snapshot_clustering": { "active": False, # choose if clustering is activated @@ -517,7 +517,7 @@ def run_etrago(args, json_path): # spatial disaggregation # needs to be adjusted for new sectors - # etrago.disaggregation() + etrago.disaggregation() # calculate central etrago results etrago.calc_results() diff --git a/etrago/cluster/disaggregation.py b/etrago/cluster/disaggregation.py index 0b437883..054525fb 100644 --- a/etrago/cluster/disaggregation.py +++ b/etrago/cluster/disaggregation.py @@ -4,10 +4,12 @@ import cProfile import time +from loguru import logger as log from pyomo.environ import Constraint from pypsa import Network import pandas as pd +from etrago.tools import noops from etrago.tools.utilities import residual_load @@ -27,7 +29,7 @@ def __init__( self.buses = pd.merge( original_network.buses, - clustering.busmap.to_frame(name="cluster"), + self.clustering.busmap.to_frame(name="cluster"), left_index=True, right_index=True, ) @@ -76,7 +78,7 @@ def construct_partial_network(self, cluster, scenario): # find all lines that have at least one bus inside the cluster busflags = self.buses["cluster"] == cluster - def is_bus_in_cluster(conn): + def is_bus_in_cluster(conn, busflags=busflags): return busflags[conn] # Copy configurations to new network @@ -92,33 +94,34 @@ def is_bus_in_cluster(conn): line_types = ["lines", "links", "transformers"] for line_type in line_types: + rows: pd.DataFrame = getattr(self.original_network, line_type) + timeseries: dict[str, pd.DataFrame] = getattr( + self.original_network, line_type + "_t" + ) # Copy all lines that reside entirely inside the cluster ... setattr( partial_network, line_type, - filter_internal_connector( - getattr(self.original_network, line_type), - is_bus_in_cluster, - ), + filter_internal_connector(rows, is_bus_in_cluster), ) # ... and their time series # TODO: These are all time series, not just the ones from lines - # residing entirely in side the cluster. + # residing entirely inside the cluster. # Is this a problem? - setattr( - partial_network, - line_type + "_t", - getattr(self.original_network, line_type + "_t"), - ) + # I hope not, because neither is `rows.index` a subset + # of the columns of one of the values of `timeseries`, + # nor the other way around, so it's not clear how to + # align both. + setattr(partial_network, line_type + "_t", timeseries) # Copy all lines whose `bus0` lies within the cluster left_external_connectors = filter_left_external_connector( - getattr(self.original_network, line_type), is_bus_in_cluster + rows, is_bus_in_cluster ) def from_busmap(x): - return self.idx_prefix + self.clustering.busmap.loc[x] + return self.idx_prefix + self.buses.loc[x, "cluster"] if not left_external_connectors.empty: ca_option = pd.get_option("mode.chained_assignment") @@ -133,7 +136,7 @@ def from_busmap(x): # Copy all lines whose `bus1` lies within the cluster right_external_connectors = filter_right_external_connector( - getattr(self.original_network, line_type), is_bus_in_cluster + rows, is_bus_in_cluster ) if not right_external_connectors.empty: ca_option = pd.get_option("mode.chained_assignment") @@ -179,8 +182,8 @@ def from_busmap(x): self.reindex_with_prefix(externals_to_insert) # .. and insert them as well as their time series - partial_network.buses = partial_network.buses.append( - externals_to_insert + partial_network.buses = pd.concat( + [partial_network.buses, externals_to_insert] ) partial_network.buses_t = self.original_network.buses_t @@ -212,7 +215,9 @@ def from_busmap(x): setattr( partial_network, bustype, - getattr(partial_network, bustype).append(buses_to_insert), + pd.concat( + [getattr(partial_network, bustype), buses_to_insert] + ), ) # Also copy their time series @@ -242,15 +247,15 @@ def from_busmap(x): # Just a simple sanity check # TODO: Remove when sure that disaggregation will not go insane anymore for line_type in line_types: - assert ( - getattr(partial_network, line_type) - .bus0.isin(partial_network.buses.index) - .all() - ) - assert ( - getattr(partial_network, line_type) - .bus1.isin(partial_network.buses.index) - .all() + rows = getattr(partial_network, line_type) + + left = rows.bus0.isin(partial_network.buses.index) + right = rows.bus1.isin(partial_network.buses.index) + assert rows.loc[~(left | right), :].empty, ( + f"Not all `partial_network.{line_type}` have an endpoint," + " i.e. `bus0` or `bus1`," + f" contained in `partial_network.buses.index`." + f" Spurious additional rows:\nf{rows.loc[~(left | right), :]}" ) return partial_network, external_buses @@ -265,7 +270,7 @@ def solve(self, scenario, solver): :param scenario: :param solver: Solver that may be used to optimize partial networks """ - clusters = set(self.clustering.busmap.values) + clusters = set(self.buses.loc[:, "cluster"].values) n = len(clusters) self.stats = { "clusters": pd.DataFrame( @@ -274,9 +279,9 @@ def solve(self, scenario, solver): ) } profile = cProfile.Profile() + profile = noops for i, cluster in enumerate(sorted(clusters)): - print("---") - print("Decompose cluster %s (%d/%d)" % (cluster, i + 1, n)) + log.info(f"Decompose {cluster=} ({i + 1}/{n})") profile.enable() t = time.time() partial_network, externals = self.construct_partial_network( @@ -284,9 +289,9 @@ def solve(self, scenario, solver): ) profile.disable() self.stats["clusters"].loc[cluster, "decompose"] = time.time() - t - print( - "Decomposed in ", - self.stats["clusters"].loc[cluster, "decompose"], + log.info( + "Decomposed in " + f'{self.stats["clusters"].loc[cluster, "decompose"]}' ) t = time.time() profile.enable() @@ -295,32 +300,32 @@ def solve(self, scenario, solver): ) profile.disable() self.stats["clusters"].loc[cluster, "spread"] = time.time() - t - print( - "Result distributed in ", - self.stats["clusters"].loc[cluster, "spread"], + log.info( + "Result distributed in " + f'{self.stats["clusters"].loc[cluster, "spread"]}' ) profile.enable() t = time.time() self.transfer_results(partial_network, externals) profile.disable() self.stats["clusters"].loc[cluster, "transfer"] = time.time() - t - print( - "Results transferred in ", - self.stats["clusters"].loc[cluster, "transfer"], + log.info( + "Results transferred in " + f'{self.stats["clusters"].loc[cluster, "transfer"]}' ) profile.enable() t = time.time() - print("---") fs = (mc("sum"), mc("sum")) for bt, ts in ( ("generators", {"p": fs, "q": fs}), ("storage_units", {"p": fs, "state_of_charge": fs, "q": fs}), + ("links", {"p0": fs, "p1": fs}), ): - print("Attribute sums, {}, clustered - disaggregated:".format(bt)) + log.info(f"Attribute sums, {bt}, clustered - disaggregated:") cnb = getattr(self.clustered_network, bt) onb = getattr(self.original_network, bt) - print( + log.info( "{:>{}}: {}".format( "p_nom_opt", 4 + len("state_of_charge"), @@ -329,11 +334,11 @@ def solve(self, scenario, solver): ) ) - print("Series sums, {}, clustered - disaggregated:".format(bt)) + log.info(f"Series sums, {bt}, clustered - disaggregated:") cnb = getattr(self.clustered_network, bt + "_t") onb = getattr(self.original_network, bt + "_t") for s in ts: - print( + log.info( "{:>{}}: {}".format( s, 4 + len("state_of_charge"), @@ -343,9 +348,9 @@ def solve(self, scenario, solver): ) profile.disable() self.stats["check"] = time.time() - t - print("Checks computed in ", self.stats["check"]) + log.info(f"Checks computed in {self.stats['check']}s.") - # profile.print_stats(sort='cumtime') + profile.print_stats(sort="cumtime") def transfer_results( self, @@ -420,7 +425,7 @@ def _validate_disaggregation_generators(self, cluster, f): def extra_functionality(network, snapshots): f(network, snapshots) generators = self.original_network.generators.assign( - bus=lambda df: df.bus.map(self.clustering.busmap) + bus=lambda df: df.bus.map(self.buses.loc[:, "cluster"]) ) def construct_constraint(model, snapshot, carrier): @@ -472,7 +477,9 @@ def extra_functionality(network, snapshots): ]: generators = getattr( self.original_network, bustype_pypsa - ).assign(bus=lambda df: df.bus.map(self.clustering.busmap)) + ).assign( + bus=lambda df: df.bus.map(self.buses.loc[:, "cluster"]) + ) for suffix in suffixes: def construct_constraint(model, snapshot): @@ -519,12 +526,21 @@ class UniformDisaggregation(Disaggregation): def solve_partial_network( self, cluster, partial_network, scenario, solver=None ): + log.debug("Solving partial network.") bustypes = { + "links": { + "group_by": ("carrier", "bus1"), + "series": ("p0", "p1"), + }, "generators": {"group_by": ("carrier",), "series": ("p", "q")}, "storage_units": { "group_by": ("carrier", "max_hours"), "series": ("p", "state_of_charge", "q"), }, + "stores": { + "group_by": ("carrier",), + "series": ("e", "p"), + }, } weights = { "p": ("p_nom_opt", "p_max_pu"), @@ -536,47 +552,93 @@ def solve_partial_network( ) else ("p_nom_opt", "p_max_pu") ), + "p0": ("p_nom_opt",), + "p1": ("p_nom_opt",), "state_of_charge": ("p_nom_opt",), + "e": ("e_nom_opt",), } filters = {"q": lambda o: o.control == "PV"} + for bustype in bustypes: + # Define attributeof components which are available + if bustype == "stores": + extendable_flag = "e_nom_extendable" + nominal_capacity = "e_nom" + optimal_capacity = "e_nom_opt" + maximal_capacity = "e_nom_max" + weights["p"] = ("e_nom_opt", "e_max_pu") + else: + extendable_flag = "p_nom_extendable" + nominal_capacity = "p_nom" + optimal_capacity = "p_nom_opt" + maximal_capacity = "p_nom_max" + weights["p"] = ("p_nom_opt", "p_max_pu") + + log.debug(f"Decomposing {bustype}.") pn_t = getattr(partial_network, bustype + "_t") cl_t = getattr(self.clustered_network, bustype + "_t") pn_buses = getattr(partial_network, bustype) - cl_buses = getattr(self.clustered_network, bustype) + cl_buses = getattr(self.clustered_network, bustype)[ + lambda df: df.loc[:, "bus" if "bus" in df.columns else "bus0"] + == cluster + ] groups = product( *[ [ {"key": key, "value": value} - for value in set(pn_buses.loc[:, key]) + for value in set(cl_buses.loc[:, key]) ] for key in bustypes[bustype]["group_by"] ] ) for group in groups: - clb = cl_buses[cl_buses.bus == cluster] query = " & ".join( ["({key} == {value!r})".format(**axis) for axis in group] ) - clb = clb.query(query) + clb = cl_buses.query(query) if len(clb) == 0: continue assert len(clb) == 1, ( - "Cluster {} has {} buses for group {}.\n".format( - cluster, len(clb), group - ) - + "Should be exactly one." + f"Cluster {cluster} has {len(clb)} buses for {group=}." + "\nShould be exactly one." ) # Remove buses not belonging to the partial network pnb = pn_buses.iloc[ [ i for i, row in enumerate(pn_buses.itertuples()) - if not row.bus.startswith(self.idx_prefix) + for bus in [ + row.bus if hasattr(row, "bus") else row.bus0 + ] + if not bus.startswith(self.idx_prefix) ] ] + if bustype == "links": + index = self.buses[ + self.buses.loc[:, "cluster"] == group[1]["value"] + ].index.tolist() + query = ( + f"(carrier == {group[0]['value']!r})" + f" & (bus1 in {index})" + ) pnb = pnb.query(query) - assert not pnb.empty, ( + assert not pnb.empty or ( + # In some cases, a district heating grid is connected to a + # substation only via a resistive_heater but not e.g. by a + # heat_pump or one of the other listed `carrier`s. + # In the clustered network, there are both. + # In these cases, the `pnb` can actually be empty. + group[0]["value"] + in [ + "central_gas_boiler", + "central_heat_pump", + "central_gas_CHP_heat", + "central_gas_CHP", + "CH4", + "DC", + "OCGT", + ] + ), ( "Cluster has a bus for:" + "\n ".join( ["{key}: {value!r}".format(**axis) for axis in group] @@ -584,10 +646,12 @@ def solve_partial_network( + "\nbut no matching buses in its corresponding " + "partial network." ) + if pnb.empty: + continue if not ( - pnb.loc[:, "p_nom_extendable"].all() - or not pnb.loc[:, "p_nom_extendable"].any() + pnb.loc[:, extendable_flag].all() + or not pnb.loc[:, extendable_flag].any() ): raise NotImplementedError( "The `'p_nom_extendable'` flag for buses in the" @@ -603,56 +667,59 @@ def solve_partial_network( ) else: assert ( - pnb.loc[:, "p_nom_extendable"] - == clb.iloc[0].at["p_nom_extendable"] + pnb.loc[:, extendable_flag] + == clb.iloc[0].at[extendable_flag] ).all(), ( - "The `'p_nom_extendable'` flag for the current " - + "cluster's bus does not have the same value " - + "it has on the buses of it's partial network." + "The `'p_nom_extendable'` flag for the current" + " cluster's bus does not have the same value" + " it has on the buses of it's partial network." ) - if clb.iloc[0].at["p_nom_extendable"]: + if clb.iloc[0].at[extendable_flag]: # That means, `p_nom` got computed via optimization and we # have to distribute it into the subnetwork first. - pnb_p_nom_max = pnb.loc[:, "p_nom_max"] + pnb_p_nom_max = pnb.loc[:, maximal_capacity] + + # If upper limit is infinite, replace it by a very large + # number to avoid NaN values in the calculation + pnb_p_nom_max.replace(float("inf"), 10000000, inplace=True) + p_nom_max_global = pnb_p_nom_max.sum(axis="index") - pnb.loc[:, "p_nom_opt"] = ( - clb.iloc[0].at["p_nom_opt"] + + pnb.loc[:, optimal_capacity] = ( + clb.iloc[0].at[optimal_capacity] * pnb_p_nom_max / p_nom_max_global ) getattr(self.original_network, bustype).loc[ - pnb.index, "p_nom_opt" - ] = pnb.loc[:, "p_nom_opt"] - pnb.loc[:, "p_nom"] = pnb.loc[:, "p_nom_opt"] + pnb.index, optimal_capacity + ] = pnb.loc[:, optimal_capacity] + pnb.loc[:, nominal_capacity] = pnb.loc[:, optimal_capacity] else: # That means 'p_nom_opt' didn't get computed and is # potentially not present in the dataframe. But we want to # always use 'p_nom_opt' in the remaining code, so save a # view of the computed 'p_nom' values under 'p_nom_opt'. - pnb.loc[:, "p_nom_opt"] = pnb.loc[:, "p_nom"] + pnb.loc[:, optimal_capacity] = pnb.loc[:, nominal_capacity] # This probably shouldn't be here, but rather in # `transfer_results`, but it's easier to do it this way right # now. getattr(self.original_network, bustype).loc[ - pnb.index, "p_nom_opt" - ] = pnb.loc[:, "p_nom_opt"] - timed = ( - lambda key, series=set( - s - for s in cl_t - if not cl_t[s].empty - if not pn_t[s].columns.intersection(pnb.index).empty - ): key - in series - ) + pnb.index, optimal_capacity + ] = pnb.loc[:, optimal_capacity] + timed = lambda key, series={ # noqa: 731 + s + for s in cl_t + if not cl_t[s].empty + if not pn_t[s].columns.intersection(pnb.index).empty + }: (key in series) for s in bustypes[bustype]["series"]: if s in self.skip: continue filtered = pnb.loc[filters.get(s, slice(None))] - clt = cl_t[s].loc[:, next(clb.itertuples()).Index] + clt = cl_t[s].loc[:, clb.index[0]] weight = reduce( multiply, ( @@ -669,15 +736,27 @@ def solve_partial_network( else () ) ws = weight.sum(axis=len(loc)) - for bus_id in filtered.index: - values = clt * weight.loc[loc + (bus_id,)] / ws - pn_t[s].insert(len(pn_t[s].columns), bus_id, values) + new_columns = pd.DataFrame( + { + bus_id: clt * weight.loc[loc + (bus_id,)] / ws + for bus_id in filtered.index + } + ) + delta = abs((new_columns.sum(axis=1) - clt).sum()) + epsilon = 1e-5 + assert delta < epsilon, ( + "Sum of disaggregated time series does not match" + f" aggregated timeseries: {delta=} > {epsilon=}." + ) + pn_t[s].loc[:, new_columns.columns] = new_columns def transfer_results(self, *args, **kwargs): - kwargs["bustypes"] = ["generators", "storage_units"] + kwargs["bustypes"] = ["generators", "links", "storage_units", "stores"] kwargs["series"] = { "generators": {"p"}, + "links": {"p0", "p1"}, "storage_units": {"p", "state_of_charge"}, + "stores": {"e", "p"}, } return super().transfer_results(*args, **kwargs) @@ -688,7 +767,7 @@ def swap_series(s): def filter_internal_connector(conn, is_bus_in_cluster): return conn[ - conn.bus0.apply(is_bus_in_cluster) & conn.bus1.apply(is_bus_in_cluster) + conn.bus0.apply(is_bus_in_cluster) | conn.bus1.apply(is_bus_in_cluster) ] @@ -719,6 +798,7 @@ def update_constraints(network, externals): def run_disaggregation(self): + log.debug("Running disaggregation.") if self.clustering: disagg = self.args.get("disaggregation") skip = () if self.args["pf_post_lopf"]["active"] else ("q",) @@ -733,9 +813,9 @@ def run_disaggregation(self): ) elif disagg == "uniform": disaggregation = UniformDisaggregation( - self.disaggregated_network, - self.network, - self.clustering, + original_network=self.disaggregated_network, + clustered_network=self.network, + clustering=self.clustering, skip=skip, ) @@ -748,8 +828,7 @@ def run_disaggregation(self): self.disaggregated_network.generators_t.p.fillna(0, inplace=True) self.disaggregated_network.generators_t.q.fillna(0, inplace=True) - self.disaggregated_network.results = self.network.results - print( + log.info( "Time for overall desaggregation [min]: {:.2}".format( (time.time() - t) / 60 ) diff --git a/etrago/tools/__init__.py b/etrago/tools/__init__.py index 497ec4c5..170b8714 100644 --- a/etrago/tools/__init__.py +++ b/etrago/tools/__init__.py @@ -1,8 +1,38 @@ +"""Multi purpose tools that don't fit anywhere else in eTraGo. """ -""" -__copyright__ = "tba" -__license__ = "tba" -__author__ = "tba" +__copyright__ = ( + "Copyright (C) 2023" + " Otto-von-Guericke-University Magdeburg," + " Research group for theoretical computer science" +) +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "gnn " + + +def noop(*ignored_arguments, **ignored_keyword_arguments): + """Do nothing. + + Accept all kinds of arguments, ignore them and do nothing. + """ + pass + +class Noops: + """Provide arbitrarily named methods that do nothing. + Any attribute access will return a method that does nothing, i.e. + all methods of this object are :py:func:`noop`s. Normally you don't + need to instantiate this class. All instances behave the same, so + the containing module provides one called :py:obj:`noops` which you + can import and use. + """ + + @classmethod + def __getattribute__(cls, ignored_name): + return noop + + +noops = Noops() +"""A default :py:class:`Noops` instance so you don't have to create one. +""" diff --git a/etrago/tools/utilities.py b/etrago/tools/utilities.py index fddaab0e..1524e19d 100755 --- a/etrago/tools/utilities.py +++ b/etrago/tools/utilities.py @@ -571,6 +571,7 @@ def load_shedding(self, temporal_disaggregation=False, **kwargs): ------- """ + logger.debug("Shedding the load.") if self.args["load_shedding"]: if temporal_disaggregation: network = self.network_tsa diff --git a/setup.py b/setup.py index 4ea9b7ab..c9fd3aaa 100755 --- a/setup.py +++ b/setup.py @@ -46,6 +46,7 @@ def read(*names, **kwargs): "egoio == 0.4.7", "geoalchemy2 >= 0.3.0", "geopandas", + "loguru", "matplotlib >= 3.0.3", "oedialect", # PyPSA uses a deprecated import that errors with Pyomo 6.4.3.