diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 83781f12e..4f2eb32f3 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -52,3 +52,7 @@ jobs: run: "python -m nox -s build" env: PLATFORM: ${{ matrix.os.image }} + - name: "Install the package" + run: "python -m nox -s install" + env: + PLATFORM: ${{ matrix.os.image }} diff --git a/etrago/appl.py b/etrago/appl.py index 4dd577d8a..cdf3f3ad8 100644 --- a/etrago/appl.py +++ b/etrago/appl.py @@ -63,7 +63,7 @@ "q_allocation": "p_nom", # allocate reactive power via 'p_nom' or 'p' }, "start_snapshot": 1, - "end_snapshot": 2, + "end_snapshot": 10, "solver": "gurobi", # glpk, cplex or gurobi "solver_options": { "BarConvTol": 1.0e-5, @@ -74,7 +74,7 @@ "threads": 4, }, "model_formulation": "kirchhoff", # angles or kirchhoff - "scn_name": "eGon2035", # scenario: eGon2035 or eGon100RE + "scn_name": "eGon2035", # scenario: eGon2035, eGon100RE or status2019 # Scenario variations: "scn_extension": None, # None or array of extension scenarios "scn_decommissioning": None, # None or decommissioning scenario diff --git a/etrago/cluster/disaggregation.py b/etrago/cluster/disaggregation.py index a10b19b79..13c811e1a 100644 --- a/etrago/cluster/disaggregation.py +++ b/etrago/cluster/disaggregation.py @@ -4,10 +4,12 @@ import cProfile import time +from loguru import logger as log from pyomo.environ import Constraint from pypsa import Network import pandas as pd +from etrago.tools import noops from etrago.tools.utilities import residual_load @@ -27,7 +29,7 @@ def __init__( self.buses = pd.merge( original_network.buses, - clustering.busmap.to_frame(name="cluster"), + self.clustering.busmap.to_frame(name="cluster"), left_index=True, right_index=True, ) @@ -76,7 +78,7 @@ def construct_partial_network(self, cluster, scenario): # find all lines that have at least one bus inside the cluster busflags = self.buses["cluster"] == cluster - def is_bus_in_cluster(conn): + def is_bus_in_cluster(conn, busflags=busflags): return busflags[conn] # Copy configurations to new network @@ -92,33 +94,34 @@ def is_bus_in_cluster(conn): line_types = ["lines", "links", "transformers"] for line_type in line_types: + rows: pd.DataFrame = getattr(self.original_network, line_type) + timeseries: dict[str, pd.DataFrame] = getattr( + self.original_network, line_type + "_t" + ) # Copy all lines that reside entirely inside the cluster ... setattr( partial_network, line_type, - filter_internal_connector( - getattr(self.original_network, line_type), - is_bus_in_cluster, - ), + filter_internal_connector(rows, is_bus_in_cluster), ) # ... and their time series # TODO: These are all time series, not just the ones from lines - # residing entirely in side the cluster. + # residing entirely inside the cluster. # Is this a problem? - setattr( - partial_network, - line_type + "_t", - getattr(self.original_network, line_type + "_t"), - ) + # I hope not, because neither is `rows.index` a subset + # of the columns of one of the values of `timeseries`, + # nor the other way around, so it's not clear how to + # align both. + setattr(partial_network, line_type + "_t", timeseries) # Copy all lines whose `bus0` lies within the cluster left_external_connectors = filter_left_external_connector( - getattr(self.original_network, line_type), is_bus_in_cluster + rows, is_bus_in_cluster ) def from_busmap(x): - return self.idx_prefix + self.clustering.busmap.loc[x] + return self.idx_prefix + self.buses.loc[x, "cluster"] if not left_external_connectors.empty: ca_option = pd.get_option("mode.chained_assignment") @@ -133,7 +136,7 @@ def from_busmap(x): # Copy all lines whose `bus1` lies within the cluster right_external_connectors = filter_right_external_connector( - getattr(self.original_network, line_type), is_bus_in_cluster + rows, is_bus_in_cluster ) if not right_external_connectors.empty: ca_option = pd.get_option("mode.chained_assignment") @@ -179,8 +182,8 @@ def from_busmap(x): self.reindex_with_prefix(externals_to_insert) # .. and insert them as well as their time series - partial_network.buses = partial_network.buses.append( - externals_to_insert + partial_network.buses = pd.concat( + [partial_network.buses, externals_to_insert] ) partial_network.buses_t = self.original_network.buses_t @@ -212,7 +215,9 @@ def from_busmap(x): setattr( partial_network, bustype, - getattr(partial_network, bustype).append(buses_to_insert), + pd.concat( + [getattr(partial_network, bustype), buses_to_insert] + ), ) # Also copy their time series @@ -241,15 +246,15 @@ def from_busmap(x): # Just a simple sanity check # TODO: Remove when sure that disaggregation will not go insane anymore for line_type in line_types: - assert ( - getattr(partial_network, line_type) - .bus0.isin(partial_network.buses.index) - .all() - ) - assert ( - getattr(partial_network, line_type) - .bus1.isin(partial_network.buses.index) - .all() + rows = getattr(partial_network, line_type) + + left = rows.bus0.isin(partial_network.buses.index) + right = rows.bus1.isin(partial_network.buses.index) + assert rows.loc[~(left | right), :].empty, ( + f"Not all `partial_network.{line_type}` have an endpoint," + " i.e. `bus0` or `bus1`," + f" contained in `partial_network.buses.index`." + f" Spurious additional rows:\nf{rows.loc[~(left | right), :]}" ) return partial_network, external_buses @@ -265,7 +270,7 @@ def solve(self, scenario, solver): :param scenario: :param solver: Solver that may be used to optimize partial networks """ - clusters = set(self.clustering.busmap.values) + clusters = set(self.buses.loc[:, "cluster"].values) n = len(clusters) self.stats = { "clusters": pd.DataFrame( @@ -274,9 +279,9 @@ def solve(self, scenario, solver): ) } profile = cProfile.Profile() + profile = noops for i, cluster in enumerate(sorted(clusters)): - print("---") - print("Decompose cluster %s (%d/%d)" % (cluster, i + 1, n)) + log.info(f"Decompose {cluster=} ({i + 1}/{n})") profile.enable() t = time.time() partial_network, externals = self.construct_partial_network( @@ -284,9 +289,9 @@ def solve(self, scenario, solver): ) profile.disable() self.stats["clusters"].loc[cluster, "decompose"] = time.time() - t - print( - "Decomposed in ", - self.stats["clusters"].loc[cluster, "decompose"], + log.info( + "Decomposed in " + f'{self.stats["clusters"].loc[cluster, "decompose"]}' ) t = time.time() profile.enable() @@ -295,32 +300,32 @@ def solve(self, scenario, solver): ) profile.disable() self.stats["clusters"].loc[cluster, "spread"] = time.time() - t - print( - "Result distributed in ", - self.stats["clusters"].loc[cluster, "spread"], + log.info( + "Result distributed in " + f'{self.stats["clusters"].loc[cluster, "spread"]}' ) profile.enable() t = time.time() self.transfer_results(partial_network, externals) profile.disable() self.stats["clusters"].loc[cluster, "transfer"] = time.time() - t - print( - "Results transferred in ", - self.stats["clusters"].loc[cluster, "transfer"], + log.info( + "Results transferred in " + f'{self.stats["clusters"].loc[cluster, "transfer"]}' ) profile.enable() t = time.time() - print("---") fs = (mc("sum"), mc("sum")) for bt, ts in ( ("generators", {"p": fs, "q": fs}), ("storage_units", {"p": fs, "state_of_charge": fs, "q": fs}), + ("links", {"p0": fs, "p1": fs}), ): - print("Attribute sums, {}, clustered - disaggregated:".format(bt)) + log.info(f"Attribute sums, {bt}, clustered - disaggregated:") cnb = getattr(self.clustered_network, bt) onb = getattr(self.original_network, bt) - print( + log.info( "{:>{}}: {}".format( "p_nom_opt", 4 + len("state_of_charge"), @@ -329,11 +334,11 @@ def solve(self, scenario, solver): ) ) - print("Series sums, {}, clustered - disaggregated:".format(bt)) + log.info(f"Series sums, {bt}, clustered - disaggregated:") cnb = getattr(self.clustered_network, bt + "_t") onb = getattr(self.original_network, bt + "_t") for s in ts: - print( + log.info( "{:>{}}: {}".format( s, 4 + len("state_of_charge"), @@ -343,9 +348,9 @@ def solve(self, scenario, solver): ) profile.disable() self.stats["check"] = time.time() - t - print("Checks computed in ", self.stats["check"]) + log.info(f"Checks computed in {self.stats['check']}s.") - # profile.print_stats(sort='cumtime') + profile.print_stats(sort="cumtime") def transfer_results( self, @@ -420,7 +425,7 @@ def _validate_disaggregation_generators(self, cluster, f): def extra_functionality(network, snapshots): f(network, snapshots) generators = self.original_network.generators.assign( - bus=lambda df: df.bus.map(self.clustering.busmap) + bus=lambda df: df.bus.map(self.buses.loc[:, "cluster"]) ) def construct_constraint(model, snapshot, carrier): @@ -472,7 +477,9 @@ def extra_functionality(network, snapshots): ]: generators = getattr( self.original_network, bustype_pypsa - ).assign(bus=lambda df: df.bus.map(self.clustering.busmap)) + ).assign( + bus=lambda df: df.bus.map(self.buses.loc[:, "cluster"]) + ) for suffix in suffixes: def construct_constraint(model, snapshot): @@ -519,12 +526,21 @@ class UniformDisaggregation(Disaggregation): def solve_partial_network( self, cluster, partial_network, scenario, solver=None ): + log.debug("Solving partial network.") bustypes = { + "links": { + "group_by": ("carrier", "bus1"), + "series": ("p0", "p1"), + }, "generators": {"group_by": ("carrier",), "series": ("p", "q")}, "storage_units": { "group_by": ("carrier", "max_hours"), "series": ("p", "state_of_charge", "q"), }, + "stores": { + "group_by": ("carrier",), + "series": ("e", "p"), + }, } weights = { "p": ("p_nom_opt", "p_max_pu"), @@ -536,47 +552,93 @@ def solve_partial_network( ) else ("p_nom_opt", "p_max_pu") ), + "p0": ("p_nom_opt",), + "p1": ("p_nom_opt",), "state_of_charge": ("p_nom_opt",), + "e": ("e_nom_opt",), } filters = {"q": lambda o: o.control == "PV"} + for bustype in bustypes: + # Define attributeof components which are available + if bustype == "stores": + extendable_flag = "e_nom_extendable" + nominal_capacity = "e_nom" + optimal_capacity = "e_nom_opt" + maximal_capacity = "e_nom_max" + weights["p"] = ("e_nom_opt", "e_max_pu") + else: + extendable_flag = "p_nom_extendable" + nominal_capacity = "p_nom" + optimal_capacity = "p_nom_opt" + maximal_capacity = "p_nom_max" + weights["p"] = ("p_nom_opt", "p_max_pu") + + log.debug(f"Decomposing {bustype}.") pn_t = getattr(partial_network, bustype + "_t") cl_t = getattr(self.clustered_network, bustype + "_t") pn_buses = getattr(partial_network, bustype) - cl_buses = getattr(self.clustered_network, bustype) + cl_buses = getattr(self.clustered_network, bustype)[ + lambda df: df.loc[:, "bus" if "bus" in df.columns else "bus0"] + == cluster + ] groups = product( *[ [ {"key": key, "value": value} - for value in set(pn_buses.loc[:, key]) + for value in set(cl_buses.loc[:, key]) ] for key in bustypes[bustype]["group_by"] ] ) for group in groups: - clb = cl_buses[cl_buses.bus == cluster] query = " & ".join( ["({key} == {value!r})".format(**axis) for axis in group] ) - clb = clb.query(query) + clb = cl_buses.query(query) if len(clb) == 0: continue assert len(clb) == 1, ( - "Cluster {} has {} buses for group {}.\n".format( - cluster, len(clb), group - ) - + "Should be exactly one." + f"Cluster {cluster} has {len(clb)} buses for {group=}." + "\nShould be exactly one." ) # Remove buses not belonging to the partial network pnb = pn_buses.iloc[ [ i for i, row in enumerate(pn_buses.itertuples()) - if not row.bus.startswith(self.idx_prefix) + for bus in [ + row.bus if hasattr(row, "bus") else row.bus0 + ] + if not bus.startswith(self.idx_prefix) ] ] + if bustype == "links": + index = self.buses[ + self.buses.loc[:, "cluster"] == group[1]["value"] + ].index.tolist() + query = ( + f"(carrier == {group[0]['value']!r})" + f" & (bus1 in {index})" + ) pnb = pnb.query(query) - assert not pnb.empty, ( + assert not pnb.empty or ( + # In some cases, a district heating grid is connected to a + # substation only via a resistive_heater but not e.g. by a + # heat_pump or one of the other listed `carrier`s. + # In the clustered network, there are both. + # In these cases, the `pnb` can actually be empty. + group[0]["value"] + in [ + "central_gas_boiler", + "central_heat_pump", + "central_gas_CHP_heat", + "central_gas_CHP", + "CH4", + "DC", + "OCGT", + ] + ), ( "Cluster has a bus for:" + "\n ".join( ["{key}: {value!r}".format(**axis) for axis in group] @@ -584,10 +646,12 @@ def solve_partial_network( + "\nbut no matching buses in its corresponding " + "partial network." ) + if pnb.empty: + continue if not ( - pnb.loc[:, "p_nom_extendable"].all() - or not pnb.loc[:, "p_nom_extendable"].any() + pnb.loc[:, extendable_flag].all() + or not pnb.loc[:, extendable_flag].any() ): raise NotImplementedError( "The `'p_nom_extendable'` flag for buses in the" @@ -603,56 +667,59 @@ def solve_partial_network( ) else: assert ( - pnb.loc[:, "p_nom_extendable"] - == clb.iloc[0].at["p_nom_extendable"] + pnb.loc[:, extendable_flag] + == clb.iloc[0].at[extendable_flag] ).all(), ( - "The `'p_nom_extendable'` flag for the current " - + "cluster's bus does not have the same value " - + "it has on the buses of it's partial network." + "The `'p_nom_extendable'` flag for the current" + " cluster's bus does not have the same value" + " it has on the buses of it's partial network." ) - if clb.iloc[0].at["p_nom_extendable"]: + if clb.iloc[0].at[extendable_flag]: # That means, `p_nom` got computed via optimization and we # have to distribute it into the subnetwork first. - pnb_p_nom_max = pnb.loc[:, "p_nom_max"] + pnb_p_nom_max = pnb.loc[:, maximal_capacity] + + # If upper limit is infinite, replace it by a very large + # number to avoid NaN values in the calculation + pnb_p_nom_max.replace(float("inf"), 10000000, inplace=True) + p_nom_max_global = pnb_p_nom_max.sum(axis="index") - pnb.loc[:, "p_nom_opt"] = ( - clb.iloc[0].at["p_nom_opt"] + + pnb.loc[:, optimal_capacity] = ( + clb.iloc[0].at[optimal_capacity] * pnb_p_nom_max / p_nom_max_global ) getattr(self.original_network, bustype).loc[ - pnb.index, "p_nom_opt" - ] = pnb.loc[:, "p_nom_opt"] - pnb.loc[:, "p_nom"] = pnb.loc[:, "p_nom_opt"] + pnb.index, optimal_capacity + ] = pnb.loc[:, optimal_capacity] + pnb.loc[:, nominal_capacity] = pnb.loc[:, optimal_capacity] else: # That means 'p_nom_opt' didn't get computed and is # potentially not present in the dataframe. But we want to # always use 'p_nom_opt' in the remaining code, so save a # view of the computed 'p_nom' values under 'p_nom_opt'. - pnb.loc[:, "p_nom_opt"] = pnb.loc[:, "p_nom"] + pnb.loc[:, optimal_capacity] = pnb.loc[:, nominal_capacity] # This probably shouldn't be here, but rather in # `transfer_results`, but it's easier to do it this way right # now. getattr(self.original_network, bustype).loc[ - pnb.index, "p_nom_opt" - ] = pnb.loc[:, "p_nom_opt"] - timed = ( - lambda key, series=set( - s - for s in cl_t - if not cl_t[s].empty - if not pn_t[s].columns.intersection(pnb.index).empty - ): key - in series - ) + pnb.index, optimal_capacity + ] = pnb.loc[:, optimal_capacity] + timed = lambda key, series={ # noqa: 731 + s + for s in cl_t + if not cl_t[s].empty + if not pn_t[s].columns.intersection(pnb.index).empty + }: (key in series) for s in bustypes[bustype]["series"]: if s in self.skip: continue filtered = pnb.loc[filters.get(s, slice(None))] - clt = cl_t[s].loc[:, next(clb.itertuples()).Index] + clt = cl_t[s].loc[:, clb.index[0]] weight = reduce( multiply, ( @@ -669,15 +736,27 @@ def solve_partial_network( else () ) ws = weight.sum(axis=len(loc)) - for bus_id in filtered.index: - values = clt * weight.loc[loc + (bus_id,)] / ws - pn_t[s].insert(len(pn_t[s].columns), bus_id, values) + new_columns = pd.DataFrame( + { + bus_id: clt * weight.loc[loc + (bus_id,)] / ws + for bus_id in filtered.index + } + ) + delta = abs((new_columns.sum(axis=1) - clt).sum()) + epsilon = 1e-5 + assert delta < epsilon, ( + "Sum of disaggregated time series does not match" + f" aggregated timeseries: {delta=} > {epsilon=}." + ) + pn_t[s].loc[:, new_columns.columns] = new_columns def transfer_results(self, *args, **kwargs): - kwargs["bustypes"] = ["generators", "storage_units"] + kwargs["bustypes"] = ["generators", "links", "storage_units", "stores"] kwargs["series"] = { "generators": {"p"}, + "links": {"p0", "p1"}, "storage_units": {"p", "state_of_charge"}, + "stores": {"e", "p"}, } return super().transfer_results(*args, **kwargs) @@ -688,7 +767,7 @@ def swap_series(s): def filter_internal_connector(conn, is_bus_in_cluster): return conn[ - conn.bus0.apply(is_bus_in_cluster) & conn.bus1.apply(is_bus_in_cluster) + conn.bus0.apply(is_bus_in_cluster) | conn.bus1.apply(is_bus_in_cluster) ] @@ -719,6 +798,7 @@ def update_constraints(network, externals): def run_disaggregation(self): + log.debug("Running disaggregation.") if self.clustering: disagg = self.args.get("disaggregation") skip = () if self.args["pf_post_lopf"]["active"] else ("q",) @@ -733,9 +813,9 @@ def run_disaggregation(self): ) elif disagg == "uniform": disaggregation = UniformDisaggregation( - self.disaggregated_network, - self.network, - self.clustering, + original_network=self.disaggregated_network, + clustered_network=self.network, + clustering=self.clustering, skip=skip, ) @@ -748,8 +828,7 @@ def run_disaggregation(self): self.disaggregated_network.generators_t.p.fillna(0, inplace=True) self.disaggregated_network.generators_t.q.fillna(0, inplace=True) - self.disaggregated_network.results = self.network.results - print( + log.info( "Time for overall desaggregation [min]: {:.2}".format( (time.time() - t) / 60 ) diff --git a/etrago/cluster/gas.py b/etrago/cluster/gas.py index b4e90c3a8..f0dfcbf42 100644 --- a/etrago/cluster/gas.py +++ b/etrago/cluster/gas.py @@ -344,8 +344,9 @@ def gas_postprocessing(etrago, busmap, medoid_idx=None): + str(settings["n_clusters_gas"]) + "_result.csv" ) - - busmap = get_h2_clusters(etrago, busmap) + + if 'H2' in etrago.network.buses.carrier.unique(): + busmap = get_h2_clusters(etrago, busmap) # Add all other buses to busmap missing_idx = list( diff --git a/etrago/cluster/snapshot.py b/etrago/cluster/snapshot.py index 5f4e0d1c9..fbbd1f68e 100644 --- a/etrago/cluster/snapshot.py +++ b/etrago/cluster/snapshot.py @@ -650,7 +650,10 @@ def run( ) if segmentation != False: - timeseries.to_csv( + pd.DataFrame( + timeseries.reset_index(), + columns=["dates", "SegmentNo", "SegmentDuration"], + ).set_index("SegmentNo").to_csv( "timeseries_segmentation=" + str(segment_no) + ".csv" ) else: @@ -695,7 +698,7 @@ def prepare_pypsa_timeseries(network): Timeseries to be considered when clustering. """ - + loads = network.loads_t.p_set.copy() loads.columns = "L" + loads.columns @@ -792,7 +795,7 @@ def skip_snapshots(self): None. """ - + # save second network for optional dispatch disaggregation if ( self.args["temporal_disaggregation"]["active"] == True @@ -803,7 +806,6 @@ def skip_snapshots(self): n_skip = self.args["skip_snapshots"] if n_skip: - last_weight = ( int( ( @@ -831,4 +833,3 @@ def skip_snapshots(self): self.network.snapshot_weightings.loc[ self.network.snapshot_weightings.index[-1] ]["generators"] = last_weight - diff --git a/etrago/tools/__init__.py b/etrago/tools/__init__.py index 497ec4c5a..170b87144 100644 --- a/etrago/tools/__init__.py +++ b/etrago/tools/__init__.py @@ -1,8 +1,38 @@ +"""Multi purpose tools that don't fit anywhere else in eTraGo. """ -""" -__copyright__ = "tba" -__license__ = "tba" -__author__ = "tba" +__copyright__ = ( + "Copyright (C) 2023" + " Otto-von-Guericke-University Magdeburg," + " Research group for theoretical computer science" +) +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "gnn " + + +def noop(*ignored_arguments, **ignored_keyword_arguments): + """Do nothing. + + Accept all kinds of arguments, ignore them and do nothing. + """ + pass + +class Noops: + """Provide arbitrarily named methods that do nothing. + Any attribute access will return a method that does nothing, i.e. + all methods of this object are :py:func:`noop`s. Normally you don't + need to instantiate this class. All instances behave the same, so + the containing module provides one called :py:obj:`noops` which you + can import and use. + """ + + @classmethod + def __getattribute__(cls, ignored_name): + return noop + + +noops = Noops() +"""A default :py:class:`Noops` instance so you don't have to create one. +""" diff --git a/etrago/tools/constraints.py b/etrago/tools/constraints.py index 17a7716cb..453b076d4 100755 --- a/etrago/tools/constraints.py +++ b/etrago/tools/constraints.py @@ -1308,7 +1308,6 @@ def read_max_gas_generation(self): "biogas": 14450103 }, # [MWh] Value from reference p-e-s run used in eGon-data } - engine = db.connection(section=self.args["db"]) try: sql = f""" @@ -2749,10 +2748,12 @@ def functionality(self, network, snapshots): if "CH4" in network.buses.carrier.values: if self.args["method"]["pyomo"]: add_chp_constraints(network, snapshots) - add_ch4_constraints(self, network, snapshots) + if self.args["scn_name"] != "status2019": + add_ch4_constraints(self, network, snapshots) else: add_chp_constraints_nmp(network) - add_ch4_constraints_nmp(self, network, snapshots) + if self.args["scn_name"] != "status2019": + add_ch4_constraints_nmp(self, network, snapshots) for constraint in self.args["extra_functionality"].keys(): try: diff --git a/etrago/tools/network.py b/etrago/tools/network.py index 7f427627c..385bd7a12 100644 --- a/etrago/tools/network.py +++ b/etrago/tools/network.py @@ -327,7 +327,8 @@ def build_network_from_db(self): self.decommissioning() - self.add_ch4_h2_correspondence() + if "H2" in self.network.buses.carrier: + self.add_ch4_h2_correspondence() logger.info("Imported network from db") diff --git a/etrago/tools/utilities.py b/etrago/tools/utilities.py index 77ac45d30..348c232a6 100755 --- a/etrago/tools/utilities.py +++ b/etrago/tools/utilities.py @@ -599,6 +599,7 @@ def load_shedding(self, temporal_disaggregation=False, **kwargs): ------- """ + logger.debug("Shedding the load.") if self.args["load_shedding"]: if temporal_disaggregation: network = self.network_tsa @@ -2344,7 +2345,14 @@ def check_args(etrago): """ - names = ["eGon2035", "eGon100RE", "eGon2035_lowflex", "eGon100RE_lowflex"] + names = [ + "eGon2035", + "eGon100RE", + "eGon2035_lowflex", + "eGon100RE_lowflex", + "status2019", + ] + assert ( etrago.args["scn_name"] in names ), f"'scn_name' has to be in {names} but is {etrago.args['scn_name']}." @@ -2627,12 +2635,7 @@ def adjust_CH4_gen_carriers(self): FROM scenario.egon_scenario_parameters WHERE name = '{self.args["scn_name"]}';""" df = pd.read_sql(sql, engine) - # TODO: There might be a bug in here raising a `KeyError`. - # If you encounter it, that means you have live data - # to test against. Please do a `git blame` on these - # lines and follow the hints in the commit message to - # fix the bug. - marginal_cost = df["marginal_cost"] + marginal_cost = df["gas_parameters"][0]["marginal_cost"] except sqlalchemy.exc.ProgrammingError: marginal_cost = marginal_cost_def diff --git a/noxfile.py b/noxfile.py index 98f0c9eb6..9764d83de 100644 --- a/noxfile.py +++ b/noxfile.py @@ -1,3 +1,4 @@ +from pathlib import Path from pprint import pformat import nox @@ -58,3 +59,12 @@ def build(session): session.install("twine") session.run("python", "setup.py", "bdist", "bdist_wheel") session.run("twine", "check", "dist/eTraGo*") + + +@nox.session(python=["3", "3.8", "3.9", "3.10", "3.11"]) +def install(session): + """Install the package.""" + setdefaults(session) + session.env["SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL"] = "False" + session.run("python", "-mpip", "install", "--upgrade", "pip") + session.run("python", "-mpip", "install", *Path("dist").glob("*.whl")) diff --git a/setup.py b/setup.py index e84d234d5..6de5c4bd5 100755 --- a/setup.py +++ b/setup.py @@ -51,6 +51,7 @@ def read(*names, **kwargs): "egoio == 0.4.7", "geoalchemy2 >= 0.3.0", "geopandas", + "loguru", "matplotlib >= 3.0.3", "oedialect", # PyPSA uses a deprecated import that errors with Pyomo 6.4.3.