Package Reference
+This section contains auto-generated documentation of all modules and +functions.
+ +diff --git a/docs/0.6.4/build/build.svg b/docs/0.6.4/build/build.svg new file mode 100644 index 0000000..9cd2a24 --- /dev/null +++ b/docs/0.6.4/build/build.svg @@ -0,0 +1,23 @@ + + diff --git a/docs/0.6.4/docs/.buildinfo b/docs/0.6.4/docs/.buildinfo new file mode 100644 index 0000000..905242f --- /dev/null +++ b/docs/0.6.4/docs/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: b06085df235f662c9fc98550be8db68e +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/0.6.4/docs/.doctrees/PackageReference.doctree b/docs/0.6.4/docs/.doctrees/PackageReference.doctree new file mode 100644 index 0000000..85a3493 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/PackageReference.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/agentlib_mpc.data_structures.doctree b/docs/0.6.4/docs/.doctrees/agentlib_mpc.data_structures.doctree new file mode 100644 index 0000000..1016594 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/agentlib_mpc.data_structures.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/agentlib_mpc.doctree b/docs/0.6.4/docs/.doctrees/agentlib_mpc.doctree new file mode 100644 index 0000000..2db8f79 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/agentlib_mpc.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/agentlib_mpc.models.doctree b/docs/0.6.4/docs/.doctrees/agentlib_mpc.models.doctree new file mode 100644 index 0000000..7be53ae Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/agentlib_mpc.models.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/agentlib_mpc.modules.dmpc.admm.doctree b/docs/0.6.4/docs/.doctrees/agentlib_mpc.modules.dmpc.admm.doctree new file mode 100644 index 0000000..3924add Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/agentlib_mpc.modules.dmpc.admm.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/agentlib_mpc.modules.dmpc.doctree b/docs/0.6.4/docs/.doctrees/agentlib_mpc.modules.dmpc.doctree new file mode 100644 index 0000000..3ff9a32 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/agentlib_mpc.modules.dmpc.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/agentlib_mpc.modules.doctree b/docs/0.6.4/docs/.doctrees/agentlib_mpc.modules.doctree new file mode 100644 index 0000000..5da81c0 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/agentlib_mpc.modules.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/agentlib_mpc.optimization_backends.casadi_.core.doctree b/docs/0.6.4/docs/.doctrees/agentlib_mpc.optimization_backends.casadi_.core.doctree new file mode 100644 index 0000000..19e4eb2 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/agentlib_mpc.optimization_backends.casadi_.core.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/agentlib_mpc.optimization_backends.casadi_.doctree b/docs/0.6.4/docs/.doctrees/agentlib_mpc.optimization_backends.casadi_.doctree new file mode 100644 index 0000000..451e066 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/agentlib_mpc.optimization_backends.casadi_.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/agentlib_mpc.optimization_backends.doctree b/docs/0.6.4/docs/.doctrees/agentlib_mpc.optimization_backends.doctree new file mode 100644 index 0000000..fdd75e3 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/agentlib_mpc.optimization_backends.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/agentlib_mpc.utils.doctree b/docs/0.6.4/docs/.doctrees/agentlib_mpc.utils.doctree new file mode 100644 index 0000000..1ac632e Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/agentlib_mpc.utils.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/agentlib_mpc.utils.plotting.doctree b/docs/0.6.4/docs/.doctrees/agentlib_mpc.utils.plotting.doctree new file mode 100644 index 0000000..93cdba5 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/agentlib_mpc.utils.plotting.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.data_structures.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.data_structures.doctree new file mode 100644 index 0000000..39896a9 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.data_structures.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.doctree new file mode 100644 index 0000000..071929c Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.models.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.models.doctree new file mode 100644 index 0000000..39b19dc Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.models.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.InputPrediction.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.InputPrediction.doctree new file mode 100644 index 0000000..7172cea Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.InputPrediction.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.dmpc.admm.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.dmpc.admm.doctree new file mode 100644 index 0000000..6d82d01 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.dmpc.admm.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.dmpc.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.dmpc.doctree new file mode 100644 index 0000000..77d2262 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.dmpc.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.doctree new file mode 100644 index 0000000..2238be5 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.estimation.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.estimation.doctree new file mode 100644 index 0000000..0a3c57f Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.estimation.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.ml_model_training.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.ml_model_training.doctree new file mode 100644 index 0000000..4581fb6 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.modules.ml_model_training.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.optimization_backends.casadi_.core.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.optimization_backends.casadi_.core.doctree new file mode 100644 index 0000000..8f16849 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.optimization_backends.casadi_.core.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.optimization_backends.casadi_.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.optimization_backends.casadi_.doctree new file mode 100644 index 0000000..02d343f Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.optimization_backends.casadi_.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.optimization_backends.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.optimization_backends.doctree new file mode 100644 index 0000000..37bc8f3 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.optimization_backends.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.utils.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.utils.doctree new file mode 100644 index 0000000..5ad5699 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.utils.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.utils.plotting.doctree b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.utils.plotting.doctree new file mode 100644 index 0000000..c21a958 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/agentlib_mpc.utils.plotting.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/code/modules.doctree b/docs/0.6.4/docs/.doctrees/code/modules.doctree new file mode 100644 index 0000000..9c4dd9d Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/code/modules.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/environment.pickle b/docs/0.6.4/docs/.doctrees/environment.pickle new file mode 100644 index 0000000..e255ca6 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/environment.pickle differ diff --git a/docs/0.6.4/docs/.doctrees/index.doctree b/docs/0.6.4/docs/.doctrees/index.doctree new file mode 100644 index 0000000..641391c Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/index.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/installation.doctree b/docs/0.6.4/docs/.doctrees/installation.doctree new file mode 100644 index 0000000..4f6aa76 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/installation.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/modules.doctree b/docs/0.6.4/docs/.doctrees/modules.doctree new file mode 100644 index 0000000..23cc656 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/modules.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/tutorials/ADMM.doctree b/docs/0.6.4/docs/.doctrees/tutorials/ADMM.doctree new file mode 100644 index 0000000..5efff90 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/tutorials/ADMM.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/tutorials/MPC.doctree b/docs/0.6.4/docs/.doctrees/tutorials/MPC.doctree new file mode 100644 index 0000000..99c5146 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/tutorials/MPC.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/tutorials/Trajectories.doctree b/docs/0.6.4/docs/.doctrees/tutorials/Trajectories.doctree new file mode 100644 index 0000000..4f4e533 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/tutorials/Trajectories.doctree differ diff --git a/docs/0.6.4/docs/.doctrees/tutorials/tutorial.doctree b/docs/0.6.4/docs/.doctrees/tutorials/tutorial.doctree new file mode 100644 index 0000000..65a04d4 Binary files /dev/null and b/docs/0.6.4/docs/.doctrees/tutorials/tutorial.doctree differ diff --git a/docs/0.6.4/docs/.nojekyll b/docs/0.6.4/docs/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/docs/0.6.4/docs/PackageReference.html b/docs/0.6.4/docs/PackageReference.html new file mode 100644 index 0000000..b826e6c --- /dev/null +++ b/docs/0.6.4/docs/PackageReference.html @@ -0,0 +1,164 @@ + + + + +
+ + + +This section contains auto-generated documentation of all modules and +functions.
+ +
+import dataclasses
+from typing import List, Dict, Iterable, Tuple
+from itertools import chain
+
+import numpy as np
+
+import agentlib as al
+import orjson
+from agentlib.core.module import BaseModuleConfigClass
+
+import agentlib_mpc.data_structures.coordinator_datatypes as cdt
+from agentlib_mpc.data_structures import mpc_datamodels
+
+############################## Uncoordinated ADMM ##################################
+
+ADMM_PREFIX = "admm"
+MULTIPLIER_PREFIX = ADMM_PREFIX + "_lambda"
+LOCAL_PREFIX = ADMM_PREFIX + "_coupling"
+MEAN_PREFIX = ADMM_PREFIX + "_coupling_mean"
+LAG_PREFIX = ADMM_PREFIX + "_lag"
+EXCHANGE_MULTIPLIER_PREFIX = ADMM_PREFIX + "_exchange_lambda"
+EXCHANGE_LOCAL_PREFIX = ADMM_PREFIX + "_exchange"
+EXCHANGE_MEAN_PREFIX = ADMM_PREFIX + "_exchange_mean"
+
+
+[docs]@dataclasses.dataclass
+class CouplingEntry:
+ """Holds naming conventions for different optimizatin variables / parameters
+ associated with a coupling variable in consensus ADMM."""
+
+ name: str
+
+ @property
+ def local(self):
+ return f"{LOCAL_PREFIX}_{self.name}"
+
+ @property
+ def mean(self):
+ return f"{MEAN_PREFIX}_{self.name}"
+
+ @property
+ def multiplier(self):
+ return f"{MULTIPLIER_PREFIX}_{self.name}"
+
+ @property
+ def lagged(self):
+ return f"{LAG_PREFIX}_{self.name}"
+
+
+
+
+[docs]@dataclasses.dataclass
+class ExchangeEntry:
+ """Holds naming conventions for different optimizatin variables / parameters
+ associated with a coupling variable in exchange ADMM."""
+
+ name: str
+
+ @property
+ def local(self):
+ return f"{EXCHANGE_LOCAL_PREFIX}_{self.name}"
+
+ @property
+ def mean_diff(self):
+ return f"{EXCHANGE_MEAN_PREFIX}_{self.name}"
+
+ @property
+ def multiplier(self):
+ return f"{EXCHANGE_MULTIPLIER_PREFIX}_{self.name}"
+
+ @property
+ def lagged(self):
+ return f"{LAG_PREFIX}_{self.name}"
+
+[docs] def admm_variables(self):
+ return [self.local, self.mean_diff, self.multiplier, self.lagged]
+
+
+[docs]@dataclasses.dataclass
+class VariableReference(mpc_datamodels.FullVariableReference):
+ """Holds info about all variables of an MPC and their role in the optimization
+ problem."""
+
+ couplings: list[CouplingEntry] = dataclasses.field(default_factory=list)
+ exchange: list[ExchangeEntry] = dataclasses.field(default_factory=list)
+
+[docs] @classmethod
+ def from_config(cls, config: BaseModuleConfigClass):
+ """Creates an instance from a pydantic values dict which includes lists of
+ AgentVariables with the keys corresponding to 'states', 'inputs', etc.."""
+ var_ref: cls = super().from_config(config)
+ var_ref.couplings = [CouplingEntry(name=c.name) for c in config.couplings]
+ var_ref.exchange = [ExchangeEntry(name=c.name) for c in config.exchange]
+ return var_ref
+
+[docs] def all_variables(self) -> List[str]:
+ """Returns a list of all variables registered in the var_ref"""
+ full_dict = self.__dict__.copy()
+ couplings: List[CouplingEntry] = full_dict.pop("couplings")
+ exchange: List[CouplingEntry] = full_dict.pop("exchange")
+ coup_vars = []
+ for coup in couplings + exchange:
+ coup_vars.append(coup.name)
+ original_vars = list(chain.from_iterable(full_dict.values()))
+ return original_vars + coup_vars
+
+ def __contains__(self, item):
+ return item in set(self.all_variables())
+
+
+[docs]def coupling_alias(alias: str) -> str:
+ """Naming convention for local variables to send and receive."""
+ return f"{LOCAL_PREFIX}_{alias}"
+
+
+[docs]def exchange_alias(alias: str) -> str:
+ """Naming convention for local exchange variables to send and receive."""
+ return f"{EXCHANGE_LOCAL_PREFIX}_{alias}"
+
+
+############################## Coordinated ADMM ##################################
+
+# ALIASES
+ADMM_COMMUNICATION = "admm_communication"
+ADMM_SIGNUP_REQUEST = "admm_signup_request"
+
+
+glob_params: List[str] = ["penalty_factor", "prediction_horizon", "time_step"]
+
+PENALTY_FACTOR = "penalty_factor"
+
+
+[docs]@dataclasses.dataclass
+class AgentDictEntry(cdt.AgentDictEntry):
+ """Holds participating coupling variables (consensus and exchange) of a single
+ agent in ADMM. Used in the coordinator."""
+
+ coup_vars: List[str] = dataclasses.field(default_factory=lambda: [])
+ exchange_vars: List[str] = dataclasses.field(default_factory=lambda: [])
+
+
+[docs]@dataclasses.dataclass
+class ADMMParameters:
+ """Collection of parameters which have to be shared across all agents in ADMM."""
+
+ penalty_factor: float
+ prediction_horizon: int
+ time_step: float
+
+
+[docs]@dataclasses.dataclass
+class ADMMParticipation:
+ """Helper class to organize ADMM participants."""
+
+ source: al.Source
+ ready: bool = False
+ participating: bool = False
+
+
+[docs]@dataclasses.dataclass
+class CouplingVariable:
+ """Holds information about a phy"""
+
+ local_trajectories: Dict[al.Source, list] = dataclasses.field(default_factory=dict)
+ mean_trajectory: list = dataclasses.field(default_factory=lambda: [0])
+ delta_mean: np.ndarray = dataclasses.field(default_factory=lambda: np.array([0]))
+ primal_residual: np.ndarray = dataclasses.field(
+ default_factory=lambda: np.array([0])
+ )
+
+ def _relevant_sources(self, sources: Iterable[al.Source]) -> set:
+ if sources is None:
+ sources = self.local_trajectories.keys()
+ else:
+ # the remaining sources are only agents that have this variable
+ sources = set(self.local_trajectories.keys()).intersection(sources)
+ return sources
+
+ @property
+ def participants(self):
+ """Returns all agent sources that are registered to this coupling."""
+ return list(self.local_trajectories.keys())
+
+[docs] def flat_locals(self, sources: Iterable[al.Source] = None) -> list[float]:
+ """
+ Returns the flattened array of all local variables and their multipliers.
+
+ Args:
+ sources: list of sources that should be included in the update.
+ By default, all are included.
+
+ Returns:
+ flat lists of local variables and multipliers (locals, multipliers)
+ """
+ sources = self._relevant_sources(sources)
+ if not sources:
+ return []
+ local_vars = list(chain([self.local_trajectories[ls] for ls in sources]))
+ return local_vars
+
+[docs] def get_residual(self, rho: float) -> Tuple[np.ndarray, np.ndarray]:
+ """
+ Returns the primal and dual residual of the last iteration as a tuple
+ of flattened Arrays.
+ Args:
+ rho:
+
+ Returns:
+ (primal residual, dual residual)
+ """
+ primal_residual = self.primal_residual.flatten()
+ dual_residual = (rho * self.delta_mean).flatten()
+ return primal_residual, dual_residual
+
+
+[docs]@dataclasses.dataclass
+class ConsensusVariable(CouplingVariable):
+ multipliers: Dict[al.Source, list] = dataclasses.field(default_factory=lambda: {})
+
+[docs] def update_mean_trajectory(self, sources: Iterable[al.Source] = None):
+ """
+ Calculates a new mean of this variable.
+
+ Args:
+ sources: List of sources that should be included in the update.
+ If none is given, use all variables.
+ """
+ sources = self._relevant_sources(sources)
+ if not sources:
+ return
+ lists = [self.local_trajectories[ls] for ls in sources]
+ arr = np.array(lists)
+ mean = np.mean(arr, axis=0)
+ self.delta_mean = self.mean_trajectory - mean # for residual
+ self.mean_trajectory = list(mean)
+
+[docs] def update_multipliers(self, rho: float, sources: Iterable[al.Source] = None):
+ """
+ Performs the multiplier update.
+
+ Args:
+ rho: penalty parameter
+ sources: list of sources that should be included in the update.
+ By default, all are included.
+
+ Returns:
+
+ """
+ sources = self._relevant_sources(sources)
+ if not sources:
+ return
+
+ # create arrays for all trajectories and multipliers
+ traj_list = [self.local_trajectories[ls] for ls in sources]
+ mul_list = [self.multipliers[ls] for ls in sources]
+ trajectories = np.array(traj_list)
+ multipliers = np.array(mul_list)
+ mean = np.array(self.mean_trajectory)
+
+ # perform the update
+ self.primal_residual = mean - trajectories
+ new_multipliers = multipliers - rho * self.primal_residual
+
+ # cast the updated multipliers back to their sources
+ for i, src in enumerate(sources):
+ self.multipliers[src] = new_multipliers[i, :].tolist()
+
+[docs] def flat_multipliers(self, sources: Iterable[al.Source] = None) -> list[float]:
+ sources = self._relevant_sources(sources)
+ if not sources:
+ return []
+ return list(chain([self.multipliers[ls] for ls in sources]))
+
+[docs] def shift_values_by_one(self, horizon: int):
+ """Shifts the trajectories"""
+ mean_traj = self.mean_trajectory
+ shift_by = int(len(mean_traj) / horizon)
+ self.mean_trajectory = mean_traj[shift_by:] + mean_traj[-shift_by:]
+ mul_dict = self.multipliers
+ for key, mul in mul_dict.items():
+ mul_dict[key] = mul[shift_by:] + mul[-shift_by:]
+
+
+[docs]@dataclasses.dataclass
+class ExchangeVariable(CouplingVariable):
+ diff_trajectories: Dict[al.Source, list[float]] = dataclasses.field(
+ default_factory=dict
+ )
+ multiplier: list[float] = dataclasses.field(default_factory=list)
+
+[docs] def update_diff_trajectories(self, sources: Iterable[al.Source] = None):
+ """
+ Calculates a new mean of this variable.
+
+ Args:
+ sources: List of sources that should be included in the update.
+ If none is given, use all variables.
+ """
+ sources = self._relevant_sources(sources)
+ if not sources:
+ return
+ lists = [self.local_trajectories[ls] for ls in sources]
+ arr = np.array(lists)
+ mean = np.mean(arr, axis=0)
+ self.delta_mean = self.mean_trajectory - mean # for residual
+ self.mean_trajectory = list(mean)
+ for src in sources:
+ self.diff_trajectories[src] = list(self.local_trajectories[src] - mean)
+
+[docs] def update_multiplier(self, rho: float):
+ """
+ Performs the multiplier update.
+
+ Args:
+ rho: penalty parameter
+
+ Returns:
+
+ """
+
+ # perform the update
+ self.primal_residual = np.array(self.mean_trajectory)
+ self.multiplier = list(self.multiplier + rho * self.primal_residual)
+
+[docs] def shift_values_by_one(self, horizon: int):
+ """Shifts the trajectories"""
+ shift_by = int(len(self.multiplier) / horizon)
+ self.multiplier = self.multiplier[shift_by:] + self.multiplier[-shift_by:]
+ for key, diff in self.diff_trajectories.items():
+ self.diff_trajectories[key] = diff[shift_by:] + diff[-shift_by:]
+
+
+[docs]@dataclasses.dataclass
+class StructuredValue:
+ """Base Class to specify the structure of an AgentVariable Value. It will
+ be efficiently sent and deserialized."""
+
+[docs] def to_json(self) -> str:
+ """Serialize self to json bytes, can be used by the communicator."""
+ return orjson.dumps(
+ self, option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
+ ).decode()
+
+
+
+
+[docs]@dataclasses.dataclass
+class CoordinatorToAgent(StructuredValue):
+ target: str
+ mean_trajectory: Dict[str, list]
+ multiplier: Dict[str, list]
+ mean_diff_trajectory: Dict[str, list]
+ exchange_multiplier: Dict[str, list]
+ penalty_parameter: float
+
+
+[docs]@dataclasses.dataclass
+class AgentToCoordinator(StructuredValue):
+ local_trajectory: Dict[str, np.ndarray]
+ local_exchange_trajectory: Dict[str, np.ndarray]
+
+"""Stores all sorts of Dataclasses, Enums or Factories to help with the
+CasadiBackend."""
+
+import os
+import random
+import subprocess
+from contextlib import contextmanager
+from dataclasses import dataclass, field
+from logging import Logger
+from pathlib import Path
+from typing import Union, List, NamedTuple, Literal
+
+import casadi as ca
+from enum import Enum
+
+from pydantic import ConfigDict, Field, BaseModel
+
+from agentlib_mpc.data_structures import mpc_datamodels
+
+
+CaFuncInputs = Union[ca.MX, ca.SX, ca.Sparsity, ca.DM, float, int]
+DiscreteVars = List[bool]
+GUESS_PREFIX = "guess_"
+LB_PREFIX = "lb_"
+UB_PREFIX = "ub_"
+
+# Casadi Matrices specifying the input of all different types
+# of optimization parameters. Matrices consist of different variable rows
+# and have a column for each time step in the discretization.
+# There are separate matrices for each input type (as defined in the
+# System), and also for the upper and lower boundaries of variables
+# respectively.
+# Example:
+# {"x": [[1, 2], [0, 2]],
+# "lb_x": [[0, 0], [0, 0]],
+# "ub_x": [[0, 0], [0, 0]],
+# "d": [[2, 1], [1, 1]]
+# }
+MPCInputs = dict[str, ca.DM]
+
+
+[docs]class DiscretizationMethod(str, Enum):
+ collocation = "collocation"
+ multiple_shooting = "multiple_shooting"
+
+
+
+
+
+[docs]class Solvers(str, Enum):
+ ipopt = "ipopt"
+ qpoases = "qpoases"
+ sqpmethod = "sqpmethod"
+ gurobi = "gurobi"
+ bonmin = "bonmin"
+ fatrop = "fatrop"
+ proxqp = "proxqp"
+ osqp = "osqp"
+
+
+
+
+
+[docs]class CasadiDiscretizationOptions(mpc_datamodels.DiscretizationOptions):
+ model_config = ConfigDict(extra="forbid")
+
+ method: DiscretizationMethod = DiscretizationMethod.collocation
+ collocation_order: int = Field(default=3, ge=1, le=9)
+ collocation_method: CollocationMethod = CollocationMethod.legendre
+ integrator: Integrators = Integrators.cvodes
+
+
+[docs]class SolverOptions(BaseModel):
+ name: Solvers = "ipopt"
+ options: dict = Field(default={})
+ model_config = ConfigDict(extra="forbid")
+
+
+[docs]@dataclass
+class OptParMXContainer:
+ """Stores the necessary MX variables created during discretization for
+ OptimizationParameters."""
+
+ var: List[ca.MX] = field(default_factory=list) # res format
+ grid: List[float] = field(default_factory=list) # res format, mpc inputs
+
+
+[docs]@dataclass
+class OptVarMXContainer(OptParMXContainer):
+ """Stores the necessary MX variables created during discretization for
+ OptimizationVariables."""
+
+ lb: List[ca.MX] = field(default_factory=list) # res format
+ ub: List[ca.MX] = field(default_factory=list) # res format
+ guess: List[ca.MX] = field(default_factory=list) # res format
+ opt: ca.DM = None # mpc inputs
+
+
+
+
+
+
+
+
+[docs]@dataclass
+class SolverFactory:
+ """Creates a solver given an NLP and an options construct."""
+
+ do_jit: bool
+ bat_file: Path = None
+ name: str = None
+ options: SolverOptions = field(default_factory=SolverOptions)
+ logger: Logger = None
+
+[docs] def create_solver(
+ self,
+ nlp: Union[dict, str],
+ discrete: DiscreteVars = None,
+ equalities: list[bool] = None,
+ ) -> ca.Function:
+ options = self.options.options
+ solver_name = self.options.name.casefold()
+
+ if solver_name == Solvers.ipopt:
+ return self._create_ipopt_solver(nlp=nlp, options=options)
+ if solver_name == Solvers.fatrop:
+ return self._create_fatrop_solver(
+ nlp=nlp, options=options, equalities=equalities
+ )
+ if solver_name == Solvers.sqpmethod:
+ return self._create_sqpmethod_solver(nlp=nlp, options=options)
+ if solver_name == Solvers.qpoases:
+ return self._create_qpoases_solver(nlp=nlp, options=options)
+ if solver_name == Solvers.proxqp:
+ return self._create_proxqp_solver(nlp=nlp, options=options)
+ if solver_name == Solvers.osqp:
+ return self._create_osqp_solver(nlp=nlp, options=options)
+ if solver_name == Solvers.gurobi:
+ return self._create_gurobi_solver(
+ nlp=nlp, options=options, discrete=discrete
+ )
+ if solver_name == Solvers.bonmin:
+ return self._create_bonmin_solver(
+ nlp=nlp, options=options, discrete=discrete
+ )
+ raise ValueError(
+ f'Solver "{solver_name}" not recognized. Currently '
+ f"supported: {[s.value for s in Solvers]}"
+ )
+
+ def _create_fatrop_solver(self, nlp: dict, options: dict, equalities: list[bool]):
+ # equality = [True for _ in range(nlp["g"].shape[0])]
+
+ default_opts = {
+ "verbose": False,
+ "print_time": False,
+ "record_time": True,
+ "expand": True,
+ "structure_detection": "auto",
+ "equality": equalities,
+ "fatrop": {
+ "max_iter": 100,
+ "tol": 1e-4,
+ # "mu_init": 1e-2,
+ "print_level": 0,
+ },
+ }
+ default_solver_opts = options.pop("fatrop", {})
+ opts = {**default_opts, **options}
+ opts["fatrop"].update(default_solver_opts)
+ if self.do_jit:
+ opts["expand"] = False # compiled code is better not expanded
+ solver = self.make_casadi_nlp(nlp, "fatrop", opts, "nlp")
+ if not self.do_jit:
+ return solver
+ nlp = compile_solver(bat_file=self.bat_file, optimizer=solver, name=self.name)
+ return self.make_casadi_nlp(nlp, "fatrop", opts, "nlp")
+
+ def _create_ipopt_solver(self, nlp: dict, options: dict):
+ default_opts = {
+ "verbose": False,
+ "print_time": False,
+ "record_time": True,
+ "expand": True,
+ "ipopt": {
+ # "mu_init": 1e-2,
+ "max_iter": 100,
+ "tol": 1e-4,
+ "acceptable_tol": 0.1,
+ "acceptable_constr_viol_tol": 1,
+ "acceptable_iter": 5,
+ "acceptable_compl_inf_tol": 1,
+ "print_level": 0,
+ },
+ }
+ ipopt_ = options.pop("ipopt", {})
+ opts = {**default_opts, **options}
+ opts["ipopt"].update(ipopt_)
+ if self.do_jit:
+ opts["expand"] = False # compiled code is better not expanded
+ solver = self.make_casadi_nlp(nlp, "ipopt", opts, "nlp")
+ if not self.do_jit:
+ return solver
+ nlp = compile_solver(bat_file=self.bat_file, optimizer=solver, name=self.name)
+ return self.make_casadi_nlp(nlp, "ipopt", opts, "nlp")
+
+ def _create_sqpmethod_solver(self, nlp: dict, options: dict):
+ default_opts = {
+ "qpsol": "osqp",
+ "qpsol_options": {"error_on_fail": False},
+ "print_iteration": False,
+ "print_status": False,
+ "print_header": False,
+ "print_time": False,
+ "max_iter": 20,
+ "tol_du": 0.01,
+ "tol_pr": 0.0001,
+ }
+ opts = {**default_opts, **options}
+ return ca.nlpsol("mpc", "sqpmethod", nlp, opts)
+
+ def _create_qpoases_solver(self, nlp: dict, options: dict):
+ default_opts = {
+ "verbose": False,
+ "print_time": False,
+ "record_time": True,
+ "printLevel": "low",
+ }
+ opts = {**default_opts, **options}
+ return ca.qpsol("mpc", "qpoases", nlp, opts)
+
+ def _create_proxqp_solver(self, nlp: dict, options: dict):
+ default_opts = {
+ "verbose": False,
+ "print_time": False,
+ "record_time": True,
+ "proxqp": {"max_iter": 200, "eps_abs": 1e-4, "backend": "sparse"},
+ }
+ opts = {**default_opts, **options}
+ return ca.qpsol("mpc", "proxqp", nlp, opts)
+
+ def _create_osqp_solver(self, nlp: dict, options: dict):
+ default_opts = {
+ "verbose": False,
+ "print_time": False,
+ "record_time": True,
+ "osqp": {"max_iter": 200, "eps_abs": 1e-4, "verbose": False},
+ }
+ opts = {**default_opts, **options}
+ return ca.qpsol("mpc", "osqp", nlp, opts)
+
+ def _create_gurobi_solver(
+ self, nlp: dict, options: dict, discrete: DiscreteVars = None
+ ):
+ default_opts = {}
+ opts = {**default_opts, **options, "discrete": discrete}
+ return ca.qpsol("mpc", "gurobi", nlp, opts)
+
+ def _create_bonmin_solver(
+ self, nlp: dict, options: dict, discrete: DiscreteVars = None
+ ):
+ default_opts = {
+ "bonmin.bb_log_level": 0,
+ "bonmin.bb_log_interval": 1000,
+ "bonmin.nlp_log_level": 0,
+ }
+ opts = {**default_opts, **options, "discrete": discrete}
+ return ca.nlpsol("mpc", "bonmin", nlp, opts)
+
+[docs] def make_casadi_nlp(
+ self,
+ problem: Union[dict, str],
+ solver: str,
+ opts: dict,
+ problem_type: Literal["nlp", "qp"] = "nlp",
+ ):
+ ca_sol = ca.nlpsol if problem_type == "nlp" else ca.qpsol
+ try:
+ solver = ca_sol("mpc", solver, problem, opts)
+ except RuntimeError:
+ solver = ca_sol("mpc", solver, problem, {**opts, "expand": False})
+ if not self.do_jit:
+ self.logger.info(
+ "Tried setting up nlp with 'expand'=True, but your problem "
+ "formulation contains non-expandable elements (e.g. using cvodes "
+ "as integrator, or interpolation tables.)"
+ )
+ return solver
+
+
+[docs]@contextmanager
+def temporary_directory(path):
+ old_pwd = os.getcwd()
+ os.chdir(path)
+ try:
+ yield
+ finally:
+ os.chdir(old_pwd)
+
+
+[docs]def compile_solver(bat_file: Path, name: str, optimizer: ca.Function) -> str:
+ """
+ Code-generates an ipopt solver and compiles it.
+ Currently, only works on Windows! Requires a batch file that knows
+ how to setup Visual Studio command line and compiles the source code.
+
+ Returns:
+ The Path to the .dll file for the compiled solver.
+
+ Raises:
+ TypeError
+ FileNotFoundError
+ RuntimeError
+ """
+ if not name:
+ name = f"nlp_{random.randint(10, 1000)}"
+
+ base_name = name
+ file_name = f"{name}.c"
+ file = Path(file_name)
+ i = 0
+
+ # "build_batch_bat": "D:/ses-tsp/masterarbeit-miocp/02_Work/agentlib_mpc_9_approximate_miocp/examples/prod_cons_nmpc/solver_lib/compile_nlp.bat",
+
+ c_dir = Path(Path(bat_file).parent, "code_gen")
+ c_dir.mkdir(exist_ok=True)
+ batch = str(Path(bat_file).absolute())
+
+ with temporary_directory(c_dir):
+ while file.exists():
+ name = f"{base_name}_{i}"
+ file_name = f"{name}.c"
+ file = Path(file_name)
+ i = i + 1
+
+ with temporary_directory(c_dir):
+ optimizer.generate_dependencies(file_name)
+
+ try:
+ with temporary_directory(c_dir):
+ ret = subprocess.call([batch, file_name])
+ except TypeError as e:
+ # no batch file was provided
+ raise TypeError(
+ "You need to provide a batch file to "
+ "compile the solver in the backend config."
+ ) from e
+ except FileNotFoundError as e:
+ # provided batch file does not exist
+ raise FileNotFoundError from e
+ if ret != 0:
+ raise RuntimeError(
+ "The provided batch file did not exit properly, please "
+ "check it properly compiles the provided file."
+ )
+ solver_dll = Path(c_dir).joinpath(f"{name}.dll").absolute()
+ return str(solver_dll)
+
+import dataclasses
+from dataclasses import dataclass, asdict
+from enum import Enum
+
+import numpy as np
+
+from agentlib.core import Source
+
+# Parameter names
+PREDICTION_HORIZON = "prediction_horizon"
+TIME_STEP = "time_step"
+
+
+# Communication names / aliases
+REGISTRATION = "registration"
+REGISTRATION_C2A = "registration_coordinator_to_agent"
+REGISTRATION_A2C = "registration_agent_to_coordinator"
+START_ITERATION = "startIteration"
+START_ITERATION_C2A = "startIteration_coordinator_to_agent"
+START_ITERATION_A2C = "startIteration_agent_to_coordinator"
+OPTIMIZATION = "optimization"
+OPTIMIZATION_C2A = "optimization_coordinator_to_agent"
+OPTIMIZATION_A2C = "optimization_agent_to_coordinator"
+
+
+[docs]class CoordinatorStatus(str, Enum):
+ """Enum used to keep track of the status of a DMPC-Coordinator."""
+
+ sleeping = "sleeping"
+ init_iterations = "init_iterations"
+ optimization = "optimization"
+ updating = "updating"
+
+
+[docs]class AgentStatus(str, Enum):
+ """Enum used within a DMPC-Coordinator to keep track of the statuses of its
+ participating agents."""
+
+ pending = "pending" # agent is not yet fully registered
+ standby = "standby" # agent is fully registered but not initialized for opt
+ ready = "ready" # agent is ready to start a local optimization
+ busy = "busy" # agent is performing task
+
+
+[docs]@dataclass
+class OptimizationData:
+ """
+ Contains specific variables (or time series) of the agent
+ """
+
+ x: np.array = dataclasses.field(default_factory=lambda: np.array([]))
+ u: np.array = dataclasses.field(default_factory=lambda: np.array([]))
+
+[docs] def to_dict(self) -> dict:
+ inst_dict = asdict(self)
+ for key, val in inst_dict.items():
+ if isinstance(val, np.ndarray):
+ inst_dict[key] = np.array2string(val)
+ return inst_dict
+
+[docs] @classmethod
+ def from_dict(cls, data: dict):
+ for key, val in data.items():
+ try:
+ data[key] = np.frombuffer(val)
+ except (ValueError, TypeError):
+ pass
+ return cls(**data)
+
+
+[docs]@dataclass
+class RegistrationMessage:
+ """Dataclass structuring the communication during registration between a
+ participating agent and the coordinator in DMPC."""
+
+ status: AgentStatus = None
+ opts: dict = None
+ agent_id: str = None
+ coupling: list = None
+
+
+# EXECUTION
+[docs]@dataclass
+class AgentDictEntry:
+ """Dataclass holding the status of a participating agent in DMPC."""
+
+ name: str
+ optimization_data = OptimizationData()
+ status: AgentStatus = AgentStatus.pending
+
+from enum import Enum
+
+a = 1
+
+
+[docs]class InterpolationMethods(str, Enum):
+ linear = "linear"
+ previous = "previous"
+ no_interpolation = "no_interpolation"
+ spline3 = "spline3"
+
+ # this interpolation method is a custom implementation, intended for the case where
+ # the source data is sampled finer that the target data. It takes the average of
+ # all points between two adjacent time steps on the target grid.
+ # Example:
+ # source_grid: [0, 10, 20, 30, 40, 50, 60]
+ # source_data: [a, b, c, d, e, f, g]
+ # target_grid: [15, 35, 55]
+ # Will yield: [(c+d)/2, (e+f)/2, (e+f)/2]
+ # The last value is always duplicated, to get a lenght consistent with other
+ # interpolation methods
+ # This is intended for the case, where the target data is input for an intgration
+ # / prediction between two points.
+ mean_over_interval = "mean_over_interval"
+
+
+c = 2
+
+import dataclasses
+from pathlib import Path
+from typing import Literal
+
+import keras.callbacks
+import pandas as pd
+import pydantic
+from enum import Enum
+from agentlib.core.errors import ConfigurationError
+from pydantic import BaseModel
+from pydantic_core.core_schema import FieldValidationInfo
+
+
+
+
+
+
+
+
+[docs]class OutputFeature(Feature):
+ output_type: OutputType = pydantic.Field(
+ description="What kind of output this is. If 'absolute' an forward pass of the"
+ " MLModel will yield the absolute value of the featuer at the next time"
+ " step. If it is 'difference', the difference to the last time step"
+ " will be generated, so it has to be added again."
+ )
+ recursive: bool = pydantic.Field(
+ default=True,
+ description="If the output feature is recursive, it will also be used as an "
+ "input for the model. This is useful for mpc, where we want to "
+ "model the evolution of states based on their previous value. If "
+ "false, can be used to model algebraic relationships. Default is "
+ "true.",
+ )
+
+[docs] @pydantic.field_validator("recursive")
+ @classmethod
+ def non_recursive_features_have_to_be_absolute(
+ cls, recursive, info: FieldValidationInfo
+ ):
+ output_type = info.data["output_type"]
+ if not recursive and output_type == "difference":
+ raise ConfigurationError(
+ f"Output Feature {info.data['name']} was specified as a non-recursive feature"
+ f" for which the differenc in output should be learned. This "
+ f"combination is not allowed. Please set 'output_type' to "
+ f"'absolute' for non-recursive features."
+ )
+ return recursive
+
+
+[docs]@dataclasses.dataclass
+class TrainingData:
+ """Stores the data which is used to train a model."""
+
+ training_inputs: pd.DataFrame
+ training_outputs: pd.DataFrame
+ validation_inputs: pd.DataFrame
+ validation_outputs: pd.DataFrame
+ test_inputs: pd.DataFrame
+ test_outputs: pd.DataFrame
+
+[docs] def save(self, path: Path):
+ """Saves three csv files in the path location. The csv files contain the test,
+ training and validation data"""
+ training = pd.concat(
+ [self.training_inputs, self.training_outputs],
+ keys=["inputs", "outputs"],
+ axis=1,
+ )
+ validation = pd.concat(
+ [self.validation_inputs, self.validation_outputs],
+ keys=["inputs", "outputs"],
+ axis=1,
+ )
+ test = pd.concat(
+ [self.test_inputs, self.test_outputs],
+ keys=["inputs", "outputs"],
+ axis=1,
+ )
+ full = pd.concat(
+ [training, validation, test], keys=["training", "validation", "test"]
+ )
+ full.sort_index(inplace=True)
+ path.mkdir(parents=True, exist_ok=True)
+ full.to_csv(Path(path, "train_test_val_data.csv"))
+
+[docs] @classmethod
+ def load(cls, path: Path):
+ full = pd.read_csv(path, header=[0, 1], index_col=[0, 1])
+ return cls(
+ training_inputs=full.loc["training"]["inputs"],
+ test_inputs=full.loc["test"]["inputs"],
+ validation_inputs=full.loc["validation"]["inputs"],
+ training_outputs=full.loc["training"]["outputs"],
+ test_outputs=full.loc["test"]["outputs"],
+ validation_outputs=full.loc["validation"]["outputs"],
+ )
+
+
+Activation = Literal[
+ "relu",
+ "sigmoid",
+ "softmax",
+ "softplus",
+ "softsign",
+ "tanh",
+ "selu",
+ "elu",
+ "exponential",
+]
+
+
+[docs]def column_order(
+ inputs: dict[str, Feature], outputs: dict[str, OutputFeature]
+) -> list[str]:
+ """Defines the order of the columns in which Training data should be passed to
+ keras, and saved for the Serialization."""
+ ordered: list[str] = []
+ for name, feat in inputs.items():
+ for i in range(feat.lag):
+ ordered.append(name_with_lag(name, i))
+ for name, feat in outputs.items():
+ if not feat.recursive:
+ continue
+ for i in range(feat.lag):
+ ordered.append(name_with_lag(name, i))
+ return ordered
+
+
+[docs]def name_with_lag(name: str, lag: int) -> str:
+ if lag == 0:
+ return name
+ return f"{name}_{lag}"
+
+
+[docs]class EarlyStoppingCallback(pydantic.BaseModel):
+ patience: int = (1000,)
+ verbose: Literal[0, 1] = 0
+ restore_best_weights: bool = True
+ activate: bool = False
+
+[docs] def callback(self):
+ return keras.callbacks.EarlyStopping(
+ patience=self.patience,
+ verbose=self.verbose,
+ restore_best_weights=self.restore_best_weights,
+ )
+
+import dataclasses
+from pathlib import Path
+from typing import List, Union, TypeVar, Protocol, Sequence, Iterable
+from itertools import chain
+
+import attrs
+import numpy as np
+import pandas as pd
+import pydantic
+from enum import Enum, auto
+from agentlib.core import AgentVariable
+from agentlib.core.module import BaseModuleConfigClass
+
+from agentlib_mpc.data_structures.interpolation import InterpolationMethods
+from pydantic import ConfigDict
+
+
+[docs]class InitStatus(str, Enum):
+ """Keep track of the readyness status of the MPC."""
+
+ pre_module_init = auto()
+ during_update = auto()
+ ready = auto()
+
+
+[docs]class DiscretizationOptions(pydantic.BaseModel):
+ """Class defining the options to discretize an MPC. Can be extended for different
+ optimization implementations."""
+
+ model_config = ConfigDict(extra="allow")
+
+ time_step: float = pydantic.Field(
+ default=60,
+ ge=0,
+ description="Time step of the MPC.",
+ )
+ prediction_horizon: int = pydantic.Field(
+ default=5,
+ ge=0,
+ description="Prediction horizon of the MPC.",
+ )
+
+
+[docs]class Results(Protocol):
+ df: pd.DataFrame
+
+ def __getitem__(self, item: str) -> Sequence[float]: ...
+
+
+[docs]@dataclasses.dataclass
+class BaseVariableReference:
+[docs] def all_variables(self) -> List[str]:
+ """Returns a list of all variables registered in the var_ref"""
+ return list(chain.from_iterable(self.__dict__.values()))
+
+[docs] @classmethod
+ def from_config(cls, config: BaseModuleConfigClass):
+ """Creates an instance from a pydantic values dict which includes lists of
+ AgentVariables with the keys corresponding to 'states', 'inputs', etc.."""
+
+ def names_list(ls: List[AgentVariable]):
+ return [item.name for item in ls]
+
+ field_names = set(f.name for f in dataclasses.fields(cls))
+ variables = {
+ k: names_list(v) for k, v in config.__dict__.items() if k in field_names
+ }
+ return cls(**variables)
+
+ def __contains__(self, item):
+ all_variables = set(chain.from_iterable(self.__dict__.values()))
+ return item in all_variables
+
+VariableReferenceT = TypeVar("VariableReferenceT", bound=BaseVariableReference)
+
+
+[docs]@dataclasses.dataclass
+class VariableReference(BaseVariableReference):
+ states: List[str] = dataclasses.field(default_factory=list)
+ controls: List[str] = dataclasses.field(default_factory=list)
+ inputs: List[str] = dataclasses.field(default_factory=list)
+ parameters: List[str] = dataclasses.field(default_factory=list)
+ outputs: List[str] = dataclasses.field(default_factory=list)
+
+
+[docs]def r_del_u_convention(name: str) -> str:
+ """Turns the name of a control variable into its weight via convention"""
+ return f"r_del_u_{name}"
+
+
+[docs]@dataclasses.dataclass
+class FullVariableReference(VariableReference):
+ @property
+ def r_del_u(self) -> List[str]:
+ return [r_del_u_convention(cont) for cont in self.controls]
+
+
+[docs]@dataclasses.dataclass
+class MINLPVariableReference(VariableReference):
+ binary_controls: List[str] = dataclasses.field(default_factory=list)
+
+
+[docs]@dataclasses.dataclass
+class MHEVariableReference(BaseVariableReference):
+ states: List[str] = dataclasses.field(default_factory=list)
+ measured_states: List[str] = dataclasses.field(default_factory=list)
+ weights_states: List[str] = dataclasses.field(default_factory=list)
+ estimated_inputs: List[str] = dataclasses.field(default_factory=list)
+ estimated_parameters: List[str] = dataclasses.field(default_factory=list)
+ known_inputs: List[str] = dataclasses.field(default_factory=list)
+ known_parameters: List[str] = dataclasses.field(default_factory=list)
+ outputs: List[str] = dataclasses.field(default_factory=list)
+
+[docs] def all_variables(self) -> Iterable[str]:
+ """Returns a list of all variables registered in the var_ref which the MHE can
+ get from the config with get()"""
+ return (
+ set(super().all_variables())
+ - set(self.measured_states)
+ - set(self.weights_states)
+ )
+
+
+[docs]@attrs.define(slots=True, weakref_slot=False, kw_only=True)
+class MPCVariable(AgentVariable):
+ """AgentVariable used to define input variables of MPC."""
+
+ interpolation_method: InterpolationMethods = attrs.field(
+ default=InterpolationMethods.linear,
+ metadata={
+ "description": "Defines which method is used for interpolation of "
+ "boundaries or values for this variable. Default is linear.",
+ "title": "Interpolation Method",
+ },
+ )
+
+
+MPCVariables = List[MPCVariable]
+
+
+[docs]def stats_path(path: Union[Path, str]) -> Path:
+ res_file = Path(path)
+ return Path(res_file.parent, "stats_" + res_file.name)
+
+
+[docs]def cia_relaxed_results_path(path: Union[Path, str]) -> Path:
+ res_file = Path(path)
+ return Path(res_file.parent, "relaxed_" + res_file.name)
+
+
+MPCValue = Union[int, float, list[Union[int, float]], pd.Series, np.ndarray]
+
+"""Holds the classes for CasADi variables and the CasADi model."""
+
+import itertools
+import logging
+from itertools import chain
+from pathlib import Path
+from typing import Dict, Union, List, Iterable, TypeVar, Any, Optional
+
+from agentlib import AgentVariable
+import pandas as pd
+from agentlib.core.errors import ConfigurationError
+from pydantic_core.core_schema import ValidatorFunctionWrapHandler
+import casadi as ca
+from pydantic import (
+ field_validator,
+ FieldValidationInfo,
+ model_validator,
+ Field,
+)
+
+from agentlib_mpc.data_structures import ml_model_datatypes
+from agentlib_mpc.data_structures.ml_model_datatypes import OutputType, name_with_lag
+
+from agentlib_mpc.models.casadi_predictor import CasadiPredictor
+from agentlib_mpc.models.casadi_model import (
+ CasadiModel,
+ CasadiModelConfig,
+ CasadiState,
+ CasadiOutput,
+ CasadiTypes,
+)
+from agentlib_mpc.models.serialized_ml_model import (
+ SerializedMLModel,
+)
+from agentlib_mpc.utils.sampling import sample
+
+logger = logging.getLogger(__name__)
+CASADI_VERSION = float(ca.__version__[:3])
+
+
+T = TypeVar("T")
+
+
+[docs]def compute_dupes(collection: Iterable[T]) -> list[T]:
+ """Computes the duplicate elements in a collection"""
+ dupes = []
+ seen = set()
+ for element in collection:
+ if element in seen:
+ dupes.append(element)
+ else:
+ seen.add(element)
+ return dupes
+
+
+# todo
+"""
+2. Algebraic ML-Equations will consider the continuous evaluation of States during integration
+"""
+
+
+[docs]class CasadiMLModelConfig(CasadiModelConfig):
+ ml_model_sources: list[Union[SerializedMLModel, Path]] = []
+ dt: Union[float, int] = Field(
+ default=1, title="time increment", validate_default=True
+ )
+
+[docs] @field_validator("ml_model_sources", mode="before")
+ @classmethod
+ def check_or_load_models(cls, ml_model_sources, info: FieldValidationInfo):
+ # load all ANNs that are paths
+ for i, ml_model_src in enumerate(ml_model_sources):
+ if isinstance(ml_model_src, SerializedMLModel):
+ continue
+ serialized = SerializedMLModel.load_serialized_model(ml_model_src)
+ assert_recursive_outputs_are_states(serialized, info.data["outputs"])
+ ml_model_sources[i] = serialized
+
+ ml_model_sources: list[SerializedMLModel]
+ # check that all MLModels have the same time step
+ time_steps = {s_ml_model.dt for s_ml_model in ml_model_sources}
+ if len(time_steps) > 1:
+ raise ConfigurationError(
+ f"Provided MLModel's need to have the same 'dt'. Provided dt are"
+ f" {time_steps}"
+ )
+
+ # Check that model config and provided .json files match and there are no dupes
+ all_outputs = list(
+ chain.from_iterable(
+ [ml_model.output.keys() for ml_model in ml_model_sources]
+ )
+ )
+ all_inputs = list(
+ chain.from_iterable(
+ [ml_model.input.keys() for ml_model in ml_model_sources]
+ )
+ )
+ output_names = [var.name for var in info.data["states"] + info.data["outputs"]]
+ input_names = [var.name for var in info.data["inputs"] + info.data["states"]]
+
+ output_dupes = compute_dupes(all_outputs)
+ if output_dupes:
+ raise ConfigurationError(
+ f"The MLModel's that were provided define the same output multiple times."
+ f" Duplicates are: {output_dupes}"
+ )
+
+ inputs_in_ml_model_but_not_config = set(all_inputs) - set(input_names)
+ outputs_in_ml_model_but_not_config = set(all_outputs) - set(output_names)
+
+ if inputs_in_ml_model_but_not_config:
+ raise ConfigurationError(
+ f"Inputs specified by MLModels do not appear in model: "
+ f"{inputs_in_ml_model_but_not_config}"
+ )
+ if outputs_in_ml_model_but_not_config:
+ raise ConfigurationError(
+ f"Outputs specified by MLModels do not appear in model states / outputs: "
+ f"{outputs_in_ml_model_but_not_config}"
+ )
+
+ return ml_model_sources
+
+[docs] @model_validator(mode="wrap")
+ @classmethod
+ def check_dt(cls, values, handler: ValidatorFunctionWrapHandler):
+ validated: cls = handler(values)
+ ml_model_dt = validated.ml_model_sources[0].dt
+ model_dt = validated.dt
+ if model_dt != ml_model_dt:
+ logger.warning(
+ f"Time step (dt) of model and supplied MLModels does not match. Setting "
+ f"the model time step to {ml_model_dt}."
+ )
+ validated.dt = ml_model_dt
+ return validated
+
+
+[docs]def assert_recursive_outputs_are_states(
+ serialized: SerializedMLModel, outputs: dict[str, AgentVariable]
+):
+ """Raises a ConfigurationError if there are recursive ML-models for outputs."""
+ for out_name, out_feat in serialized.output.items():
+ if out_name in outputs and out_feat.recursive:
+ raise ConfigurationError(
+ f"Provided ML-model defines recursive output {out_name}, however in the"
+ f" model config it is listed under 'outputs'. A recursive model output"
+ f" can only be associated with a 'state'."
+ )
+
+
+[docs]class CasadiMLModel(CasadiModel):
+ """
+ This class is created to handle one or multiple ML models
+ used to predict the states. Compared to previous version, it is now
+ only dependent on the trained models which provides information about the lags
+ with the serialized_ML_Models. This way, there is no need to define the lags
+ again in the model class
+ """
+
+ config_type: CasadiMLModelConfig
+ config: CasadiMLModelConfig
+
+ def __init__(self, **kwargs):
+ # state variables used and predicted by the MLModel
+
+ super().__init__(**kwargs)
+ # register ARX models
+ ml_model_dict, casadi_ml_model_dict = self.register_ml_models()
+ self.ml_model_dict: Dict[str, SerializedMLModel] = ml_model_dict
+ self.casadi_ml_model_dict: Dict[str, CasadiPredictor] = casadi_ml_model_dict
+
+ # Register lagged variables
+ lags_dict, max_lag = self._create_lags_dict()
+ self.lags_dict: dict[str, int] = lags_dict
+ self.max_lag: int = max_lag
+ self.lags_mx_store: dict[str, ca.MX] = self._create_lags_mx_variables()
+ self._fill_algebraic_equations_with_bb_output()
+
+ self.past_values = self._create_past_values()
+
+ # construct a stage function for optimization and simulation
+ self.sim_step = self._make_unified_predict_function()
+
+
+
+[docs] def update_ml_models(self, *ml_models: SerializedMLModel, time: float):
+ """Updates the internal MLModels with the passed MLModels.
+
+ Warning: This function does not fully check, if the result makes sense!
+ Consider the following case:
+ You have two ml_models with outputs out1 in ml_model1, and out2 and out3 in ml_model2.
+ You call this function with an ml_model3 that defines out2.
+ This function would replace ml_model2 with ml_model3, leaving the out3 undefined, causing
+ an error in subequent functions. Try to make sure you specify all outputs when
+ supplying ml_models, that make parts of other ml_models obsolete.
+ """
+ new_outputs = set(
+ chain.from_iterable([ml_model.output.keys() for ml_model in ml_models])
+ )
+ ml_models_to_keep = []
+ for ml_model in self.config.ml_model_sources:
+ # if the outputs of the currently active ml_models are not part of the new ml_models
+ # we just got, we keep them
+ if set(ml_model.output) - new_outputs:
+ ml_models_to_keep.append(ml_model)
+ self.config.ml_model_sources = ml_models_to_keep + list(ml_models)
+
+ self.lags_dict, self.max_lag = self._create_lags_dict()
+ self._update_past_values(time)
+ self.ml_model_dict, self.casadi_ml_model_dict = self.register_ml_models()
+ self.sim_step = self._make_unified_predict_function()
+ self._assert_outputs_are_defined()
+
+ def _update_past_values(self, time: float):
+ """Generates new columns and deletes old ones in the time series data, when the
+ MLModels are updated."""
+ new_columns = set(self.lags_dict)
+ old_columns = set(self.past_values.columns)
+
+ columns_to_remove = old_columns - new_columns
+ columns_to_add = new_columns - old_columns
+
+ self.past_values.drop(columns_to_remove, inplace=True)
+ index = [time - self.dt * lag for lag in range(self.max_lag)]
+ index.reverse()
+ for col in columns_to_add:
+ value = self.get(col).value
+ for time in index:
+ self.past_values.loc[(time, col)] = value
+
+ def _create_past_values(self) -> pd.DataFrame:
+ """Creates a collection which saves a history of the model's variables that
+ are required in the lags. Must be executed after _create_lags_dict"""
+ last_values = pd.DataFrame(columns=self.lags_dict)
+ index = [-self.config.dt * lag for lag in range(self.max_lag)]
+ index.reverse()
+ values = [self.get(var_name).value for var_name in self.lags_dict]
+ for time in index:
+ last_values.loc[time] = values
+ return last_values
+
+ def _create_lags_dict(self) -> tuple[dict[str, int], int]:
+ """Creates a dictionary which holds the maximum lag of each variable"""
+ lags_dict = {}
+ for ml_model in self.config.ml_model_sources:
+ in_out = ml_model.input | ml_model.output
+ for input_name, feature in in_out.items():
+ current_lag = lags_dict.setdefault(input_name, 1)
+ if feature.lag > current_lag:
+ lags_dict[input_name] = feature.lag
+ max_lag = max(lags_dict.values())
+ return lags_dict, max_lag
+
+ def _create_lags_mx_variables(self) -> dict[str, ca.MX]:
+ """Creates symbolic CasADi MX variables for all the required lags."""
+ lags_mx_dict = {}
+ for var_name, max_lag_of_var in self.lags_dict.items():
+ for lag in range(1, max_lag_of_var):
+ l_name = name_with_lag(var_name, lag)
+ lags_mx_dict[l_name] = ca.MX.sym(l_name)
+ return lags_mx_dict
+
+[docs] def set_with_timestamp(self, name: str, value: Any, timestamp: float):
+ if name in self.past_values.columns:
+ self.past_values.loc[(timestamp, name)] = value
+ self.set(name, value)
+
+ def _fixed_during_integration(
+ self, bb_results: Optional[dict[str, ca.MX]] = None
+ ) -> dict[str, ca.MX]:
+ """Returns all variable names with their corresponding CasADi MX variable that
+ are fixed during integration.
+ Uses a heuristic to approximate blackbox defined states during integration.
+ Currently, the heuristic is to use the arithmetic middle between the start and
+ the end of the integration.
+ If the bb_results are not available, the value at the beginning is used
+
+ Args:
+ bb_results: The results of the evaluation of the blackbox functions
+ """
+ all_inputs = self._all_inputs()
+ exclude = [v.name for v in self.differentials + self.outputs]
+ # take the mean of start/finish values of variables that have already been
+ # integrated by a discrete blackbox function
+ if bb_results:
+ for bb_name, bb_res_mx in bb_results.items():
+ all_inputs[bb_name] = (all_inputs[bb_name] + bb_res_mx) / 2
+ return {name: sym for name, sym in all_inputs.items() if name not in exclude}
+
+ def _make_integrator(self, ignore_algebraics: bool = False) -> ca.Function:
+ """Creates an integrator for the white-box equations in the model. The
+ integrator takes the stacked white box differential states (in order of
+ self.differentials), and the stacked (parameters, inputs, mL_model_states) in that
+ order as the second argument.
+
+ Args:
+ ignore_algebraics: if True, algebraic equations will not be added
+ (default False)
+
+ """
+ if CASADI_VERSION < 3.6:
+ args = ({"t0": 0, "tf": self.dt},)
+ else:
+ args = (0, self.dt, {})
+ # the ml_model outputs cannot be changed during integration, so they are a
+ # parameter here
+ integration_params = self._fixed_during_integration()
+ par = ca.vertcat(*integration_params.values())
+
+ # if we have no differentials and no algebraics, this function should do nothing
+ if (not self.differentials) and (ignore_algebraics or not self.outputs):
+ return ca.Function("empty", [[], par], [[], []], ["x0", "p"], ["xf", "zf"])
+
+ x = ca.vertcat(*[sta.sym for sta in self.differentials])
+ # if we have a pure ode, we can use an ode solver which is more efficient
+ if self.differentials and (ignore_algebraics or not self.outputs):
+ ode = {
+ "x": x,
+ "p": par,
+ "ode": self.system,
+ }
+ return ca.integrator("system", "cvodes", ode, *args)
+
+ # if we have a dae or only algebraic equations, we use a dae solver
+ dae = {
+ "x": x,
+ "p": par,
+ "ode": self.system,
+ "z": ca.vertcat(*[var.sym for var in self.outputs]),
+ "alg": ca.vertcat(*self.output_equations),
+ }
+ # if there are no differential values, we create a dummy to make integrator
+ # callable
+ if not self.differentials:
+ dae.update({"x": ca.MX.sym("dummy", 1), "ode": 0})
+
+ try:
+ return ca.integrator("system", "idas", dae, *args)
+ except RuntimeError as e:
+ free_vars = e.args[0].split("since", 1)[1]
+ raise ConfigurationError(
+ "Could not create model, since some equations are not defined. Please"
+ " check that all states are either defined by an equation, or by a "
+ f"black box model. Currently undefined are: {free_vars}"
+ ) from e
+
+[docs] def initialize(self, **ignored):
+ """
+ Prepare the black- and white-box models for CasADi backend optimization and
+ simulation
+ """
+ # load blackbox models
+ pass
+
+[docs] def register_ml_models(
+ self,
+ ) -> tuple[dict[str, SerializedMLModel], dict[str, CasadiPredictor]]:
+ """
+ Loads a serialized MLModel and find the output states of the MLModel
+ Divides the differential states of the model into states determined by white-box
+ model (self._differentials) and by black-box model (self._differentials_network)
+ """
+
+ # map all outputs to their respective MLModel
+ output_to_ml_model = {}
+ ml_model_sources_dict = {
+ tuple(ml_model.output.keys()): ml_model
+ for ml_model in self.config.ml_model_sources
+ }
+ ml_model_dict: Dict[str, SerializedMLModel] = {}
+
+ for output in self.config.outputs + self.config.states:
+ for serialized_output_names, ml_model in ml_model_sources_dict.items():
+ if output.name in serialized_output_names:
+ output_to_ml_model[
+ output.name
+ ] = CasadiPredictor.from_serialized_model(ml_model)
+ ml_model_dict[output.name] = ml_model
+ casadi_ml_model_dict: Dict[str, CasadiPredictor] = output_to_ml_model
+ return ml_model_dict, casadi_ml_model_dict
+
+ def _fill_algebraic_equations_with_bb_output(self):
+ """Fills empty algebraic equations with the function defined by the
+ corresponding black box model."""
+ for variable_name, serialized_ml_model in self.ml_model_dict.items():
+ # recursive features are more like an ode, they don't represent outputs
+ if serialized_ml_model.output[variable_name].recursive:
+ continue
+ if self.get(variable_name).alg is not None:
+ raise RuntimeError("")
+ inputs = ml_model_datatypes.column_order(
+ inputs=serialized_ml_model.input, outputs=serialized_ml_model.output
+ )
+ input_mx = ca.vertcat(*(self._get_lagged_symbolic(name) for name in inputs))
+ index = list(serialized_ml_model.output).index(variable_name)
+ alg = self.casadi_ml_model_dict[variable_name].predict(input_mx)[index]
+ self.get(variable_name).alg = alg
+
+ def _evaluate_bb_models_symbolically(
+ self, bb_inputs_mx: dict[str, ca.MX]
+ ) -> dict[str, ca.MX]:
+ """
+ Returns the CasADi MX-Expressions that result from evaluating all black-box
+ models symbolically.
+ Args:
+ bb_inputs_mx: Dictionary containing the variable names and symbolic MX
+ Expressions of all variables, that are used as an input for the black-box
+ models. The MX have the dimension of the corresponding maximum lag of said
+ variable (Maximum in regard, where multiple black-box models use the same
+ input but with different lag).
+
+ Returns:
+ Two dictionaries:
+ - The first one contains all black-box outputs of the model with their
+ respective symbolic variable.
+ - The second one contains the same outputs, with an MX-Expression that
+ defines the evaluation of the black-box model
+ """
+
+ bb_result_mx: dict[str, ca.MX] = {}
+ # inputs from all MLModels of the black-box model are considered
+ for output_name, serialized_ml_model in self.ml_model_dict.items():
+ if not serialized_ml_model.output[output_name].recursive:
+ # non-recursive outputs are handled as algebraic equations in the
+ # integrator for simulation, or as constraints in MPC, so we skip them
+ continue
+
+ # for every input variable of the MLModel, create a CasAdi symbolic
+ casadi_ml_model = self.casadi_ml_model_dict[output_name]
+ columns_ordered = ml_model_datatypes.column_order(
+ inputs=serialized_ml_model.input, outputs=serialized_ml_model.output
+ )
+ # todo tanja: here, we need to lookup what the user specified for the ANN as input, instead of the original mx variable
+ ca_nn_input = ca.vertcat(*[bb_inputs_mx[name] for name in columns_ordered])
+
+ # predict the result with current MLModel and add the result to the stage function
+ output_index = list(serialized_ml_model.output).index(output_name)
+ result = casadi_ml_model.predict(ca_nn_input)[output_index]
+ if (
+ serialized_ml_model.output[output_name].output_type
+ == OutputType.difference
+ ):
+ result = result + bb_inputs_mx[output_name][0]
+
+ bb_result_mx[output_name] = result
+ return bb_result_mx
+
+[docs] def make_predict_function_for_mpc(self) -> ca.Function:
+ """Creates a prediction step function which is suitable for MPC with multiple
+ shooting."""
+ return self._make_unified_predict_function(ignore_algebraics=True)
+
+ def _get_lagged_symbolic(self, name: str):
+ """Returns the symbolic ca.MX of a variable, regardless if it is lagged or not."""
+ try:
+ return self.get(name).sym
+ except ValueError:
+ return self.lags_mx_store[name]
+
+ def _black_box_inputs(self) -> dict[str, ca.MX]:
+ """Creates a dictionary with names for all inputs of the black box functions
+ and their corresponding symbolic CasADi-variable."""
+ bb_inputs: dict[str, ca.MX] = {}
+ for name, lag in self.lags_dict.items():
+ for i in range(0, lag):
+ l_name = name_with_lag(name, i)
+ bb_inputs[l_name] = self._get_lagged_symbolic(l_name)
+ return bb_inputs
+
+ def _all_inputs(self) -> dict[str, ca.MX]:
+ """Creates a dictionary with names for all inputs of the full step function and
+ their corresponding symbolic CasADi-variable."""
+ all_variables = {var.name: var.sym for var in self.variables}
+ all_variables.update(self._black_box_inputs())
+ return all_variables
+
+ def _make_unified_predict_function(
+ self, ignore_algebraics: bool = False
+ ) -> ca.Function:
+ """
+ This function creates a predict function which combines all available MLModels,
+ their inputs and gives a unified output.
+ The constructed stage-function takes the MLModel-variables with their maximum lag as
+ input and gives the result of the MLModel as output
+
+ Args:
+ ignore_algebraics: When True, algebraic equations will be ignored and no
+ idas solver is created. Useful for MPC, where equations can be added as
+ constraints and the performance of idas is undesirable
+ """
+ # initiate in- and output dicts for constructing the stage function
+
+ # create symbolic casadi variables for all inputs used in the MLModel. Each variable
+ # has the length of its maximum lag, i.e. if two MLModels use the variable var1,
+ # one with lag 3 and one with 2, we create a symbolic variable with length 3
+
+ bb_inputs = self._black_box_inputs()
+ all_variables = self._all_inputs()
+ # evaluate the black box models
+ bb_result_mx = self._evaluate_bb_models_symbolically(bb_inputs)
+ wb_inputs = self._fixed_during_integration(bb_result_mx)
+
+ # prepare functions that order the integrator inputs and outputs when supplied
+ # with keywords names
+ differentials_dict = {var.name: var.sym for var in self.differentials}
+ if not ignore_algebraics:
+ alg_dict = {var.name: var.sym for var in self.outputs}
+ else:
+ alg_dict = {}
+ stacked_alg = ca.vertcat(*[mx for mx in alg_dict.values()])
+ diff_states = ca.vertcat(*[mx for mx in differentials_dict.values()])
+ names_to_stacked_x = ca.Function(
+ "names_to_stacked_x",
+ list(differentials_dict.values()),
+ [diff_states],
+ list(differentials_dict),
+ ["x0"],
+ )
+ stacked_x_to_names = ca.Function(
+ "stacked_x_to_names",
+ [diff_states],
+ list(differentials_dict.values()),
+ ["x0"],
+ list(differentials_dict),
+ )
+ stacked_z_to_names = ca.Function(
+ "stacked_z_to_names",
+ [stacked_alg],
+ list(alg_dict.values()),
+ ["algs"],
+ list(alg_dict),
+ )
+
+ # perform symbolic evaluation of the white box equations
+ if differentials_dict:
+ int_x0_in = names_to_stacked_x(*differentials_dict.values())
+ else:
+ # have to handle case where differentials are empty separately because
+ # CasADi will return a dict instead of an MX if the input is empty.
+ int_x0_in = ca.DM([])
+
+ int_p_in = ca.vertcat(*wb_inputs.values())
+ integrator = self._make_integrator(ignore_algebraics=ignore_algebraics)
+ int_result = integrator(x0=int_x0_in, p=int_p_in)
+ x_names = stacked_x_to_names(x0=int_result["xf"])
+ z_names = stacked_z_to_names(algs=int_result["zf"])
+
+ opts = {"allow_duplicate_io_names": True} if CASADI_VERSION >= 3.6 else {}
+ return ca.Function(
+ "full_step",
+ list(all_variables.values()),
+ list(x_names.values())
+ + list(z_names.values())
+ + list(bb_result_mx.values()),
+ list(all_variables),
+ list(x_names) + list(z_names) + list(bb_result_mx),
+ opts,
+ )
+
+[docs] def do_step(self, *, t_start, t_sample=None):
+ """
+ Simulates a time step of the simulation model. In CasADi MLModel model, both black-
+ and white-box models can be used in the simulation to be combined into a grey-box
+ """
+
+ if t_sample:
+ ...
+ assert t_sample == self.dt
+
+ ml_model_input = self.get_ml_model_values(t_start)
+ full_input = {
+ var.name: var.value for var in self.variables if var.value is not None
+ }
+ full_input.update(ml_model_input)
+
+ result = self.sim_step(**full_input)
+ end_time = t_start + self.dt
+ for var_name, value in result.items():
+ self.set_with_timestamp(var_name, value, end_time)
+
+[docs] def get_ml_model_values(self, time: float):
+ """
+ gets the inputs values with the correct lags or all MLModels
+ """
+ ml_model_inputs: dict[str, list[float]] = {}
+ for inp, lag in self.lags_dict.items():
+ if lag == 1:
+ continue
+ target_grid = [-self.dt * t for t in range(1, lag)]
+ target_grid.reverse()
+ history = self.past_values[inp].dropna()
+ res = sample(history, target_grid, current=time)
+ for i, val in enumerate(res):
+ ml_model_inputs[name_with_lag(inp, i + 1)] = val
+
+ return ml_model_inputs
+
+ @property
+ def bb_states(self) -> List[CasadiState]:
+ """List of all CasadiStates with an associated black box equation."""
+ return [var for var in self.states if var.name in self.ml_model_dict]
+
+ @property
+ def bb_outputs(self) -> List[CasadiOutput]:
+ """List of all CasadiStates with an associated black box equation."""
+ return [var for var in self.outputs if var.name in self.ml_model_dict]
+
+ @property
+ def auxiliaries(self) -> List[CasadiState]:
+ """List of all CasadiStates without an associated equation. Common
+ uses for this are slack variables that appear in cost functions and
+ constraints of optimization models."""
+ return [var for var in self.states if self._is_auxiliary(var)]
+
+ def _is_auxiliary(self, var: CasadiState):
+ """Checks whether a state does not have any function associated with it and
+ belongs to auxiliary variables"""
+ if var.ode is not None:
+ return False
+ if var.name in self.ml_model_dict:
+ return False
+ return True
+
+ def _assert_outputs_are_defined(self):
+ """Raises an Error, if the output variables are not defined with an equation"""
+ all_bb_outputs = [
+ list(ml_model.output) for ml_model in self.config.ml_model_sources
+ ]
+ all_bb_outputs_flat = set(itertools.chain.from_iterable(all_bb_outputs))
+
+ for out in self.outputs:
+ if out.alg is None and out.name not in all_bb_outputs_flat:
+ raise ValueError(
+ f"Output '{out.name}' was not initialized with an "
+ f"equation, nor is it specified by the provied blackbox models. "
+ f"Please sure you specify '{out.name}.alg' in 'setup_system()' or "
+ f"include a model in 'ml_model_sources'."
+ )
+ if out.alg is not None and out.name in all_bb_outputs:
+ raise ValueError(
+ f"Output '{out.name}' is overspecified, as it has an algebraic "
+ f"equation defined in setup_system(), but also in a provided "
+ f"blackbox model. "
+ )
+
+"""Holds the classes for CasADi variables and the CasADi model."""
+
+import json
+import logging
+import abc
+from itertools import chain
+
+from typing import List, Union, Tuple, Optional
+
+import attrs
+import pandas as pd
+from pydantic import Field, PrivateAttr, ConfigDict
+import casadi as ca
+import numpy as np
+
+from agentlib.core import Model, ModelConfig
+from agentlib.core.datamodels import (
+ ModelVariable,
+ Variability,
+ Causality,
+)
+from agentlib_mpc.data_structures.casadi_utils import ModelConstraint
+
+CasadiTypes = Union[ca.MX, ca.SX, ca.DM, ca.Sparsity]
+
+logger = logging.getLogger(__name__)
+ca_func_inputs = Union[ca.MX, ca.SX, ca.Sparsity, ca.DM]
+ca_all_inputs = Union[ca_func_inputs, np.float64, float]
+ca_constraint = Tuple[ca_all_inputs, ca_func_inputs, ca_all_inputs]
+ca_constraints = List[Tuple[ca_all_inputs, ca_func_inputs, ca_all_inputs]]
+
+
+[docs]@attrs.define(slots=True, weakref_slot=False, kw_only=True)
+class CasadiVariable(ModelVariable):
+ """Base Class for variables used in Casadi Models for simulation and
+ optimization. Implements the standard arithmetic operations,
+ so CasadiVariables can be used in equations.
+ Attributes:
+ sym: The symbolic CasADi variable used to define ode's and
+ optimization problems.
+ """
+
+ _sym: CasadiTypes = attrs.field(default=None, alias="_sym")
+
+ def __attrs_post_init__(self):
+ self._sym = self.create_sym()
+
+[docs] def create_sym(self) -> ca.MX:
+ """Ensures a symbolic MX variable is created with each CasadiVariable
+ instance, and that its dimensions are consistent."""
+ if self.value is not None:
+ if isinstance(self.value, (float, int)):
+ shape = (1, 1)
+ else:
+ shape = np.array(self.value).shape
+ if len(shape) == 1:
+ shape = (shape[0], 1)
+ else:
+ shape = (1, 1)
+ sym = ca.MX.sym(self.name, shape[0], shape[1])
+ return sym
+
+ @property
+ def sym(self) -> ca.MX:
+ return self._sym
+
+ def __add__(self, other):
+ return self._sym + other
+
+ def __radd__(self, other):
+ return other + self._sym
+
+ def __sub__(self, other):
+ return self._sym - other
+
+ def __rsub__(self, other):
+ return other - self._sym
+
+ def __mul__(self, other):
+ return self._sym * other
+
+ def __rmul__(self, other):
+ return other * self._sym
+
+ def __truediv__(self, other):
+ return self._sym / other
+
+ def __rtruediv__(self, other):
+ return other / self._sym
+
+ def __pow__(self, power, modulo=None):
+ return self._sym**power
+
+ def __rpow__(self, other):
+ return other**self._sym
+
+ def __abs__(self):
+ return ca.fabs(self._sym)
+
+ def __matmul__(self, other):
+ return self._sym @ other.sym
+
+ def __neg__(self):
+ return -self._sym
+
+ def __eq__(self, other):
+ try:
+ return self.sym == other.sym
+ except AttributeError:
+ return False
+
+ def __le__(self, other):
+ try:
+ return self.sym <= other.sym
+ except AttributeError as e:
+ raise TypeError(
+ "Cannot compare a CasadiVariable to a Non-CasadiVariable"
+ ) from e
+
+ def __lt__(self, other):
+ try:
+ return self.sym < other.sym
+ except AttributeError as e:
+ raise TypeError(
+ "Cannot compare a CasadiVariable to a Non-CasadiVariable"
+ ) from e
+
+ def __ne__(self, other):
+ try:
+ return self.sym != other.sym
+ except AttributeError:
+ return True
+
+ def __ge__(self, other):
+ try:
+ return self.sym >= other.sym
+ except AttributeError as e:
+ raise TypeError(
+ "Cannot compare a CasadiVariable to a Non-CasadiVariable"
+ ) from e
+
+ def __gt__(self, other):
+ try:
+ return self.sym > other.sym
+ except AttributeError as e:
+ raise TypeError(
+ "Cannot compare a CasadiVariable to a Non-CasadiVariable"
+ ) from e
+
+
+[docs]@attrs.define(slots=True, weakref_slot=False, kw_only=True)
+class CasadiParameter(CasadiVariable):
+ """
+ Class that stores various attributes of parameters.
+ """
+
+ def __attrs_post_init__(self):
+ super().__attrs_post_init__()
+ self.causality: Causality = Causality.parameter
+ self.variability: Variability = Variability.tunable
+
+
+[docs]@attrs.define(slots=True, weakref_slot=False, kw_only=True)
+class CasadiState(CasadiVariable):
+ """
+ Class that stores various attributes of CasADi differential variables.
+ """
+
+ _ode: Optional[CasadiTypes] = attrs.field(default=None, alias="_ode")
+
+ def __attrs_post_init__(self):
+ super().__attrs_post_init__()
+ self.causality: Causality = Causality.local
+ self.variability: Variability = Variability.continuous
+
+ @property
+ def alg(self) -> CasadiTypes:
+ raise AttributeError(
+ "Casadi States should not have .alg assignments. If you wish to provide "
+ "algebraic relationships to states, add them in the constraints."
+ )
+ return -1
+
+ @alg.setter
+ def alg(self, equation: Union[CasadiTypes, CasadiVariable]):
+ raise AttributeError(
+ "Casadi States should not have .alg assignments. Consider the following: \n"
+ " 1. If you need equality constraints in your MPC, please add them in the "
+ "constraints. \n"
+ " 2. If you use this to bundle an expression, consider using a regular "
+ "Python variable. \n"
+ " 3. Implicit algebraic equations are currently not supported."
+ )
+
+ @property
+ def ode(self) -> CasadiTypes:
+ return self._ode
+
+ @ode.setter
+ def ode(self, equation: Union[CasadiTypes, CasadiVariable]):
+ self._ode = get_symbolic(equation)
+
+[docs] def json(self, indent: int = 2, **kwargs):
+ data = self.dict(**kwargs)
+ if isinstance(self.value, pd.Series):
+ data["value"] = self.value.to_dict()
+ data.pop("_ode")
+ data.pop("_alg")
+ json.dumps(data, indent=indent)
+
+
+[docs]@attrs.define(slots=True, weakref_slot=False, kw_only=True)
+class CasadiInput(CasadiVariable):
+ """
+ Class that stores various attributes of control variables.
+ """
+
+ def __attrs_post_init__(self):
+ super().__attrs_post_init__()
+ self.causality: Causality = Causality.input
+ self.variability: Variability = Variability.continuous
+
+ @property
+ def alg(self) -> CasadiTypes:
+ raise AttributeError(
+ "Casadi Inputs should not have .alg assignments. If you wish to provide "
+ "algebraic relationships to states, add them in the constraints."
+ )
+ return -1
+
+ @alg.setter
+ def alg(self, equation: Union[CasadiTypes, CasadiVariable]):
+ raise ValueError(
+ "Cannot assign algebraic equations to inputs. If this is for an MPC, "
+ "try defining a constraint instead."
+ )
+
+
+[docs]@attrs.define(slots=True, weakref_slot=False, kw_only=True)
+class CasadiOutput(CasadiVariable):
+ """
+ Class that stores various attributes of control variables.
+ """
+
+ _alg: CasadiTypes = attrs.field(default=None, alias="_alg")
+
+ def __attrs_post_init__(self):
+ super().__attrs_post_init__()
+ self.causality: Causality = Causality.output
+ self.variability: Variability = Variability.continuous
+
+ @property
+ def alg(self) -> CasadiTypes:
+ return self._alg
+
+ @alg.setter
+ def alg(self, equation: Union[CasadiTypes, CasadiVariable]):
+ if isinstance(equation, CasadiVariable):
+ # Converts CasadiVariables to their symbolic variable. Useful in case
+ # CasadiVariables are assigned in equations as is, i.e. their math methods
+ # are not called.
+ self._alg = equation.sym
+ else:
+ self._alg = equation
+
+[docs] def json(self, **kwargs):
+ data = self.dict(**kwargs)
+ if isinstance(self.value, pd.Series):
+ data["value"] = self.value.to_dict()
+ data.pop("_alg")
+ json.dumps(data)
+
+
+[docs]class CasadiModelConfig(ModelConfig):
+ system: CasadiTypes = None
+ cost_function: CasadiTypes = None
+
+ inputs: List[CasadiInput] = Field(default=list())
+ outputs: List[CasadiOutput] = Field(default=list())
+ states: List[CasadiState] = Field(default=list())
+ parameters: List[CasadiParameter] = Field(default=list())
+ model_config = ConfigDict(validate_assignment=True, extra="forbid")
+ _types: dict[str, type] = PrivateAttr(
+ default={
+ "inputs": CasadiInput,
+ "outputs": CasadiOutput,
+ "states": CasadiState,
+ "parameters": CasadiParameter,
+ }
+ )
+
+
+[docs]class CasadiModel(Model):
+ """Base Class for CasADi models. To implement your own model, inherit
+ from this class, specify the variables (inputs, outputs, states,
+ parameters and override the setup_system() method."""
+
+ config: CasadiModelConfig
+
+ def __init__(self, **kwargs):
+ # Initializes the config
+ super().__init__(**kwargs)
+
+ self.constraints = [] # constraint functions
+ # read constraints, assign ode's and return cost function
+ self.cost_func = self.setup_system()
+ self._assert_outputs_are_defined()
+
+ # save system equations as a single casadi vector
+ system = ca.vertcat(*[sta.ode for sta in self.differentials])
+ # prevents errors in case system is empty
+ self.system = ca.reshape(system, system.shape[0], 1)
+ self.integrator = None # set in intitialize
+ self.initialize()
+
+ def _assert_outputs_are_defined(self):
+ """Raises an Error, if the output variables are not defined with an equation"""
+ for out in self.outputs:
+ if out.alg is None:
+ raise ValueError(
+ f"Output '{out.name}' was not initialized with an equation. Make "
+ f"sure you specify '{out.name}.alg' in 'setup_system()'."
+ )
+
+[docs] def do_step(self, *, t_start, t_sample=None):
+ if t_sample is None:
+ t_sample = self.dt
+ pars = self.get_input_values()
+ t_sim = 0
+ if self.differentials:
+ x0 = self.get_differential_values()
+ curr_x = x0
+ while t_sim < t_sample:
+ result = self.integrator(x0=curr_x, p=pars)
+ t_sim += self.dt
+ curr_x = result["xf"]
+ self.set_differential_values(np.array(result["xf"]).flatten())
+ else:
+ result = self.integrator(p=pars)
+ if self.outputs:
+ self.set_output_values(np.array(result["zf"]).flatten())
+
+ def _make_integrator(self) -> ca.Function:
+ """Creates the integrator to be used in do_step(). The integrator takes the
+ current state and input values as input and returns the state values and
+ algebraic values at the end of the interval."""
+ opts = {"t0": 0, "tf": self.dt}
+ par = ca.vertcat(
+ *[inp.sym for inp in chain.from_iterable([self.inputs, self.parameters])]
+ )
+ x = ca.vertcat(*[sta.sym for sta in self.differentials])
+ z = ca.vertcat(*[var.sym for var in self.outputs])
+ algebraic_equations = ca.vertcat(*self.output_equations)
+
+ if not algebraic_equations.shape[0] and self.differentials:
+ # case of pure ode
+ ode = {"x": x, "p": par, "ode": self.system}
+ integrator = ca.integrator("system", "cvodes", ode, opts)
+ elif algebraic_equations.shape[0] and self.differentials:
+ # mixed dae
+ dae = {
+ "x": x,
+ "p": par,
+ "ode": self.system,
+ "z": z,
+ "alg": algebraic_equations,
+ }
+ integrator = ca.integrator("system", "idas", dae, opts)
+
+ else:
+ # only algebraic equations
+ dae = {
+ "x": ca.MX.sym("dummy", 1),
+ "p": par,
+ "ode": 0,
+ "z": z,
+ "alg": algebraic_equations,
+ }
+ integrator_ = ca.integrator("system", "idas", dae, opts)
+ integrator = ca.Function(
+ "system", [par], [integrator_(x0=0, p=par)["zf"]], ["p"], ["zf"]
+ )
+ return integrator
+
+[docs] def initialize(self, **ignored):
+ """
+ Initializes Casadi model. Creates the integrator to be used in
+ do_step(). The integrator takes the current state and input values as
+ input and returns the state values at the end of the interval and the
+ value of the cost function integrated over the interval.
+ """
+ self.integrator = self._make_integrator()
+
+[docs] def get_constraints(self) -> List[ModelConstraint]:
+ """List of constraints of the form (lower, function, upper)."""
+ base_constraints = [
+ ModelConstraint(lb * 1, func * 1, ub * 1)
+ for lb, func, ub in self.constraints
+ ]
+ equality_constraints = [
+ ModelConstraint(0, alg, 0) for alg in self.output_equations
+ ]
+ return base_constraints + equality_constraints
+
+ @property
+ def inputs(self) -> list[CasadiInput]:
+ """Get all model inputs as a list"""
+ return list(self._inputs.values())
+
+ @property
+ def outputs(self) -> list[CasadiOutput]:
+ """Get all model outputs as a list"""
+ return list(self._outputs.values())
+
+ @property
+ def states(self) -> list[CasadiState]:
+ """Get all model states as a list"""
+ return list(self._states.values())
+
+ @property
+ def parameters(self) -> list[CasadiParameter]:
+ """Get all model parameters as a list"""
+ return list(self._parameters.values())
+
+ @property
+ def output_equations(self) -> List[CasadiTypes]:
+ """List of algebraic equations RHS in the form
+ 0 = z - g(x, z, p, ... )"""
+ return [alg_var - alg_var.alg for alg_var in self.outputs]
+
+ @property
+ def differentials(self) -> List[CasadiState]:
+ """List of all CasadiStates with an associated differential equation."""
+ return [var for var in self.states if var.ode is not None]
+
+ @property
+ def auxiliaries(self) -> List[CasadiState]:
+ """List of all CasadiStates without an associated equation. Common
+ uses for this are slack variables that appear in cost functions and
+ constraints of optimization models."""
+ return [var for var in self.states if var.ode is None]
+
+[docs] @abc.abstractmethod
+ def setup_system(self):
+ raise NotImplementedError(
+ "The ode is defined by the actual models " "inheriting from this class."
+ )
+
+[docs] def get_input_values(self):
+ return ca.vertcat(
+ *[inp.value for inp in chain.from_iterable([self.inputs, self.parameters])]
+ )
+
+[docs] def get_differential_values(self):
+ return ca.vertcat(*[sta.value for sta in self.differentials])
+
+[docs] def set_differential_values(self, values: Union[List, np.ndarray]):
+ """Sets the values for all differential variables. Provided values list MUST
+ match the order in which differentials are saved, there is no check."""
+ for state, value in zip(self.differentials, values):
+ self._states[state.name].value = value
+
+[docs] def set_output_values(self, values: Union[List, np.ndarray]):
+ """Sets the values for all outputs. Provided values list MUST match the order
+ in which outputs are saved, there is no check."""
+ for var, value in zip(self.outputs, values):
+ self._outputs[var.name].value = value
+
+
+
+ def __setattr__(self, key, value):
+ super().__setattr__(key, value)
+ # todo
+
+
+[docs]def get_symbolic(equation):
+ if isinstance(equation, CasadiVariable):
+ # Converts CasadiVariables to their symbolic variable. Useful in case
+ # CasadiVariables are assigned in equations as is, i.e. their math methods
+ # are not called.
+ return equation.sym
+ else:
+ return equation
+
+import abc
+
+import casadi as ca
+import numpy as np
+
+from enum import Enum
+from keras import layers
+
+from typing import Union, TYPE_CHECKING
+
+from agentlib_mpc.models.serialized_ml_model import (
+ SerializedMLModel,
+ SerializedLinReg,
+ SerializedGPR,
+ SerializedANN,
+ MLModels,
+)
+
+if TYPE_CHECKING:
+ from keras import Sequential
+ from agentlib_mpc.models.serialized_ml_model import CustomGPR
+ from sklearn.linear_model import LinearRegression
+
+
+[docs]class CasadiPredictor(abc.ABC):
+ """
+ Protocol for generic Casadi implementation of various ML-Model-based predictors.
+
+ Attributes:
+ serialized_model: Serialized model which will be translated to a casadi model.
+ predictor_model: Predictor model from other libraries, which are translated to
+ casadi syntax.
+ sym_input: Symbolical input of predictor. Has the necessary shape of the input.
+ prediction_function: Symbolical casadi prediction function of the given model.
+ """
+
+
+
+ def __init__(self, serialized_model: SerializedMLModel) -> None:
+ """Initialize Predictor class."""
+ self.serialized_model: SerializedMLModel = serialized_model
+ self.predictor_model: Union[Sequential, CustomGPR, LinearRegression] = (
+ serialized_model.deserialize()
+ )
+ self.sym_input: ca.MX = self._get_sym_input()
+ self.prediction_function: ca.Function = self._build_prediction_function()
+
+[docs] @classmethod
+ def from_serialized_model(cls, serialized_model: SerializedMLModel):
+ """Initialize sub predictor class."""
+ model_type = serialized_model.model_type
+ # todo return type[cls]
+ return casadi_predictors[model_type](serialized_model)
+
+ @property
+ @abc.abstractmethod
+ def input_shape(self) -> tuple[int, int]:
+ """Input shape of Predictor."""
+ pass
+
+ @property
+ def output_shape(self) -> tuple[int, int]:
+ """Output shape of Predictor."""
+ return 1, len(self.serialized_model.output)
+
+ def _get_sym_input(self):
+ """Returns symbolical input object in the required shape."""
+ return ca.MX.sym("input", 1, self.input_shape[1])
+
+ @abc.abstractmethod
+ def _build_prediction_function(self) -> ca.Function:
+ """Build the prediction function with casadi and a symbolic input."""
+ pass
+
+[docs] def predict(self, x: Union[np.ndarray, ca.MX]) -> Union[ca.DM, ca.MX]:
+ """
+ Evaluate prediction function with input data.
+ Args:
+ x: input data.
+ Returns:
+ results of evaluation of prediction function with input data.
+ """
+ return self.prediction_function(x)
+
+
+[docs]class CasadiLinReg(CasadiPredictor):
+ """
+ Generic Casadi implementation of scikit-learn LinerRegression.
+ """
+
+ def __init__(self, serialized_model: SerializedLinReg) -> None:
+ """
+ Initializes CasadiLinReg predictor.
+ Args:
+ serialized_model: SerializedLinReg object.
+ """
+ super().__init__(serialized_model)
+
+ @property
+ def input_shape(self) -> tuple[int, int]:
+ """Input shape of Predictor."""
+ return 1, self.predictor_model.coef_.shape[1]
+
+ def _build_prediction_function(self) -> ca.Function:
+ """Build the prediction function with casadi and a symbolic input."""
+ intercept = self.predictor_model.intercept_
+ coef = self.predictor_model.coef_
+ function = intercept + ca.mtimes(self.sym_input, coef.T)
+ return ca.Function("forward", [self.sym_input], [function])
+
+
+[docs]class CasadiGPR(CasadiPredictor):
+ """
+ Generic implementation of scikit-learn Gaussian Process Regressor.
+ """
+
+ def __init__(self, serialized_model: SerializedGPR) -> None:
+ super().__init__(serialized_model)
+
+ @property
+ def input_shape(self) -> tuple[int, int]:
+ """Input shape of Predictor."""
+ return 1, self.predictor_model.X_train_.shape[1]
+
+ def _build_prediction_function(self) -> ca.Function:
+ """Build the prediction function with casadi and a symbolic input."""
+ normalize = self.predictor_model.data_handling.normalize
+ scale = self.predictor_model.data_handling.scale
+ alpha = self.predictor_model.alpha_
+ if normalize:
+ normalized_inp = self._normalize(self.sym_input)
+ k_star = self._kernel(normalized_inp)
+ else:
+ k_star = self._kernel(self.sym_input)
+ f_mean = ca.mtimes(k_star.T, alpha) * scale
+ return ca.Function("forward", [self.sym_input], [f_mean])
+
+ def _kernel(
+ self,
+ x_test: ca.MX,
+ ) -> ca.MX:
+ """
+ Calculates the kernel with regard to mpc and testing data.
+ If x_train is None the internal mpc data is used.
+
+ shape(x_test) = (n_samples, n_features)
+ shape(x_train) = (n_samples, n_features)
+ """
+
+ square_distance = self._square_distance(x_test)
+ length_scale = self.predictor_model.kernel_.k1.k2.length_scale
+ constant_value = self.predictor_model.kernel_.k1.k1.constant_value
+ return np.exp((-square_distance / (2 * length_scale**2))) * constant_value
+
+ def _square_distance(self, inp: ca.MX):
+ """
+ Calculates the square distance from x_train to x_test.
+
+ shape(x_test) = (n_test_samples, n_features)
+ shape(x_train) = (n_train_samples, n_features)
+ """
+
+ x_train = self.predictor_model.X_train_
+
+ self._check_shapes(inp, x_train)
+
+ a = ca.sum2(inp**2)
+
+ b = ca.np.sum(x_train**2, axis=1, dtype=float).reshape(-1, 1)
+
+ c = -2 * ca.mtimes(x_train, inp.T)
+
+ return a + b + c
+
+ def _normalize(self, x: ca.MX):
+ mean = self.predictor_model.data_handling.mean
+ std = self.predictor_model.data_handling.std
+
+ if mean is None and std is not None:
+ raise ValueError("Mean and std are not valid.")
+
+ return (x - ca.DM(mean).T) / ca.DM(std).T
+
+ def _check_shapes(self, x_test: Union[ca.MX, np.ndarray], x_train: np.ndarray):
+ if x_test.shape[1] != x_train.shape[1]:
+ raise ValueError(
+ f"The shape of x_test {x_test.shape}[1] and x_train {x_train.shape}[1] must match."
+ )
+
+
+###################################
+### ANN ###
+###################################
+
+
+[docs]class ANNLayerTypes(str, Enum):
+ DENSE = "dense"
+ FLATTEN = "flatten"
+ BATCHNORMALIZATION = "batch_normalization"
+ LSTM = "lstm"
+ RESCALING = "rescaling"
+
+
+[docs]class Layer:
+ """
+ Single layer of an artificial neural network.
+ """
+
+ def __init__(self, layer: layers.Layer):
+ self.config = layer.get_config()
+
+ # name
+ if "name" in self.config:
+ self.name = self.config["name"]
+
+ # units
+ if "units" in self.config:
+ self.units = self.config["units"]
+
+ # activation function
+ if "activation" in self.config:
+ self.activation = self.get_activation(layer.get_config()["activation"])
+
+ # input / output shape
+ self.input_shape = layer.input.shape[1:]
+ self.output_shape = layer.output.shape[1:]
+
+ # update the dimensions to two dimensions
+ self.update_dimensions()
+
+ # symbolic input layer
+ self.input_layer = ca.MX.sym(
+ "input_layer", self.input_shape[0], self.input_shape[1]
+ )
+
+ def __str__(self):
+ ret = ""
+
+ if hasattr(self, "units"):
+ ret += f"\tunits:\t\t\t\t{self.units}\n"
+ if hasattr(self, "activation"):
+ ret += f"\tactivation:\t\t\t{self.activation.__str__()}\n"
+ if hasattr(self, "recurrent_activation"):
+ ret += f"\trec_activation:\t\t{self.recurrent_activation.__str__()}\n"
+ ret += f"\tinput_shape:\t\t{self.input_shape}\n"
+ ret += f"\toutput_shape:\t\t{self.output_shape}\n"
+
+ return ret
+
+[docs] def update_dimensions(self):
+ """
+ CasADi does only work with two dimensional arrays. So the dimensions must be updated.
+ """
+
+ if len(self.input_shape) == 1:
+ self.input_shape = (1, self.input_shape[0])
+ elif len(self.input_shape) == 2:
+ self.input_shape = (self.input_shape[0], self.input_shape[1])
+ else:
+ raise ValueError("Please check input dimensions.")
+
+ if len(self.output_shape) == 1:
+ self.output_shape = (1, self.output_shape[0])
+ elif len(self.output_shape) == 2:
+ self.output_shape = (self.output_shape[0], self.output_shape[1])
+ else:
+ raise ValueError("Please check output dimensions.")
+
+[docs] @staticmethod
+ def get_activation(function: str):
+ blank = ca.MX.sym("blank")
+
+ if function == "sigmoid":
+ return ca.Function(function, [blank], [1 / (1 + ca.exp(-blank))])
+
+ if function == "tanh":
+ return ca.Function(function, [blank], [ca.tanh(blank)])
+
+ elif function == "relu":
+ return ca.Function(function, [blank], [ca.fmax(0, blank)])
+
+ elif function == "softplus":
+ return ca.Function(function, [blank], [ca.log(1 + ca.exp(blank))])
+
+ elif function == "gaussian":
+ return ca.Function(function, [blank], [ca.exp(-(blank**2))])
+
+ elif function == "linear":
+ return ca.Function(function, [blank], [blank])
+
+ else:
+ ValueError(f"Unknown activation function:{function}")
+
+
+[docs]class Dense(Layer):
+ """
+ Fully connected layer.
+ """
+
+ def __init__(self, layer: layers.Dense):
+ super().__init__(layer)
+
+ # weights and biases
+ self.weights, self.biases = layer.get_weights()
+ self.biases = self.biases.reshape(1, self.biases.shape[0])
+
+ # check input dimension
+ if self.input_shape[1] != self.weights.shape[0]:
+ raise ValueError(
+ f"Please check the input dimensions of this layer. Layer with error: {self.name}"
+ )
+
+[docs] def forward(self, input):
+ # return forward pass
+ return self.activation(input @ self.weights + self.biases)
+
+
+[docs]class Flatten(Layer):
+[docs] def forward(self, input):
+ # flattens the input
+ f = input[0, :]
+ for row in range(1, input.shape[0]):
+ f = ca.horzcat(f, input[row, :])
+
+ return f
+
+
+[docs]class BatchNormalization(Layer):
+ """
+ Batch Normalizing layer. Make sure the axis setting is set to two.
+ """
+
+ def __init__(self, layer: layers.BatchNormalization):
+ super(BatchNormalization, self).__init__(layer)
+
+ # weights and biases
+ self.gamma = ca.np.vstack([layer.get_weights()[0]] * self.input_shape[0])
+ self.beta = ca.np.vstack([layer.get_weights()[1]] * self.input_shape[0])
+ self.mean = ca.np.vstack([layer.get_weights()[2]] * self.input_shape[0])
+ self.var = ca.np.vstack([layer.get_weights()[3]] * self.input_shape[0])
+ self.epsilon = layer.get_config()["epsilon"]
+
+ # check Dimensions
+ if self.input_shape != self.gamma.shape:
+ axis = self.config["axis"][0]
+ raise ValueError(f"Dimension mismatch. Normalized axis: {axis}")
+
+ # symbolic input layer
+ self.input_layer = ca.MX.sym(
+ "input_layer", self.input_shape[0], self.input_shape[1]
+ )
+
+[docs] def forward(self, input):
+ # forward pass
+ f = (input - self.mean) / (
+ ca.sqrt(self.var + self.epsilon)
+ ) * self.gamma + self.beta
+
+ return f
+
+
+[docs]class LSTM(Layer):
+ """
+ Long Short Term Memory cell.
+ """
+
+ def __init__(self, layer: layers.LSTM):
+ super(LSTM, self).__init__(layer)
+
+ # recurrent activation
+ self.recurrent_activation = self.get_activation(
+ layer.get_config()["recurrent_activation"]
+ )
+
+ # load weights and biases
+ W = layer.get_weights()[0]
+ U = layer.get_weights()[1]
+ b = layer.get_weights()[2]
+
+ # weights (kernel)
+ self.W_i = W[:, : self.units]
+ self.W_f = W[:, self.units : self.units * 2]
+ self.W_c = W[:, self.units * 2 : self.units * 3]
+ self.W_o = W[:, self.units * 3 :]
+
+ # weights (recurrent kernel)
+ self.U_i = U[:, : self.units]
+ self.U_f = U[:, self.units : self.units * 2]
+ self.U_c = U[:, self.units * 2 : self.units * 3]
+ self.U_o = U[:, self.units * 3 :]
+
+ # biases
+ self.b_i = ca.np.expand_dims(b[: self.units], axis=0)
+ self.b_f = ca.np.expand_dims(b[self.units : self.units * 2], axis=0)
+ self.b_c = ca.np.expand_dims(b[self.units * 2 : self.units * 3], axis=0)
+ self.b_o = ca.np.expand_dims(b[self.units * 3 :], axis=0)
+
+ # initial memory and output
+ self.h_0 = ca.np.zeros((1, self.units))
+ self.c_0 = ca.np.zeros((1, self.units))
+
+[docs] def forward(self, input):
+ # check input shape
+ if input.shape != self.input_shape:
+ raise ValueError("Dimension mismatch!")
+
+ # initial
+ c = self.c_0
+ h = self.h_0
+
+ # number of time steps
+ steps = self.input_shape[0]
+
+ # forward pass
+ for i in range(steps):
+ # input for the current step
+ x = input[i, :]
+
+ # calculate memory(c) and output(h)
+ c, h = self.step(x, c, h)
+
+ # here the output has to be transposed, because of the dense layer implementation
+ return h
+
+[docs] def step(self, x_t, c_prev, h_prev):
+ # gates
+ i_t = self.recurrent_activation(x_t @ self.W_i + h_prev @ self.U_i + self.b_i)
+ f_t = self.recurrent_activation(x_t @ self.W_f + h_prev @ self.U_f + self.b_f)
+ o_t = self.recurrent_activation(x_t @ self.W_o + h_prev @ self.U_o + self.b_o)
+ c_t = self.activation(x_t @ self.W_c + h_prev @ self.U_c + self.b_c)
+
+ # memory and output
+ c_next = f_t * c_prev + i_t * c_t
+ h_next = o_t * self.activation(c_next)
+
+ return c_next, h_next
+
+
+[docs]class CasadiANN(CasadiPredictor):
+ """
+ Generic implementations of sequential Keras models in CasADi.
+ """
+
+ def __init__(self, serialized_model: SerializedANN):
+ """
+ Supported layers:
+ - Dense (Fully connected layer)
+ - Flatten (Reduces the input dimension to 1)
+ - BatchNormalizing (Normalization)
+ - LSTM (Recurrent Cell)
+ - Rescaling
+ Args:
+ serialized_model: SerializedANN Model.
+ """
+ super().__init__(serialized_model)
+
+ @property
+ def input_shape(self) -> tuple[int, int]:
+ """Input shape of Predictor."""
+ return 1, self.predictor_model.input_shape[1]
+
+ def _build_prediction_function(self) -> ca.Function:
+ """Build the prediction function with casadi and a symbolic input."""
+ keras_layers = [layer for layer in self.predictor_model.layers]
+ casadi_layers = []
+ for keras_layer in keras_layers:
+ name = keras_layer.get_config()["name"]
+ for layer_type in ANNLayerTypes:
+ if layer_type.value in name:
+ casadi_layers.append(ann_layer_types[layer_type.value](keras_layer))
+ continue
+ function = self.sym_input
+ for casadi_layer in casadi_layers:
+ function = casadi_layer.forward(function)
+ return ca.Function("forward", [self.sym_input], [function])
+
+
+ann_layer_types = {
+ ANNLayerTypes.DENSE: Dense,
+ ANNLayerTypes.FLATTEN: Flatten,
+ ANNLayerTypes.BATCHNORMALIZATION: BatchNormalization,
+ ANNLayerTypes.LSTM: LSTM,
+}
+
+casadi_predictors = {
+ MLModels.ANN: CasadiANN,
+ MLModels.GPR: CasadiGPR,
+ MLModels.LINREG: CasadiLinReg,
+}
+
+import abc
+import json
+import logging
+import subprocess
+
+import numpy as np
+
+from enum import Enum
+from copy import deepcopy
+from keras import Sequential
+from pathlib import Path
+from pydantic import ConfigDict, Field, BaseModel
+from sklearn.gaussian_process import GaussianProcessRegressor
+from sklearn.gaussian_process.kernels import ConstantKernel, WhiteKernel, RBF
+from sklearn.linear_model import LinearRegression
+from typing import Union, Optional
+
+from agentlib_mpc.data_structures.ml_model_datatypes import OutputFeature, Feature
+
+logger = logging.getLogger(__name__)
+
+
+[docs]def get_git_revision_short_hash() -> str:
+ return (
+ subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
+ .decode("ascii")
+ .strip()
+ )
+
+
+
+
+
+[docs]class SerializedMLModel(BaseModel, abc.ABC):
+ dt: Union[float, int] = Field(
+ title="dt",
+ description="The length of time step of one prediction of Model in seconds.",
+ )
+ input: dict[str, Feature] = Field(
+ default=None,
+ title="input",
+ description="Model input variables with their lag order.",
+ )
+ output: dict[str, OutputFeature] = Field(
+ default=None,
+ title="output",
+ description="Model output variables (which are automatically also inputs, as "
+ "we need them recursively in MPC.) with their lag order.",
+ )
+ agentlib_mpc_hash: str = Field(
+ default_factory=get_git_revision_short_hash,
+ description="The commit hash of the agentlib_mpc version this was created with.",
+ )
+ training_info: Optional[dict] = Field(
+ default=None,
+ title="Training Info",
+ description="Config of Trainer class with all the meta data used for training of the Model.",
+ )
+ model_type: MLModels
+ model_config = ConfigDict(protected_namespaces=())
+
+[docs] @classmethod
+ @abc.abstractmethod
+ def serialize(
+ cls,
+ model: Union[Sequential, GaussianProcessRegressor, LinearRegression],
+ dt: Union[float, int],
+ input: dict[str, Feature],
+ output: dict[str, OutputFeature],
+ training_info: Optional[dict] = None,
+ ):
+ """
+ Args:
+ model: Machine Learning Model.
+ dt: The length of time step of one prediction of Model in seconds.
+ input: Model input variables with their lag order.
+ output: Model output variables (which are automatically also inputs, as
+ we need them recursively in MPC.) with their lag order.
+ training_info: Config of Trainer Class, which trained the Model.
+ Returns:
+ SerializedMLModel version of the passed ML Model.
+ """
+ pass
+
+[docs] @abc.abstractmethod
+ def deserialize(self):
+ """
+ Deserializes SerializedMLModel object and returns a specific Machine Learning Model object.
+ Returns:
+ MLModel: Machine Learning Model.
+ """
+ pass
+
+[docs] def save_serialized_model(self, path: Path):
+ """
+ Saves MLModel object as json string.
+ Args:
+ path: relative/absolute path which determines where the json will be saved.
+ """
+ path.parent.mkdir(parents=True, exist_ok=True)
+ with open(path, "w") as f:
+ f.write(self.model_dump_json())
+
+ # with open(path, "w") as json_file:
+ # json_file.write(self.model_dump_json())
+ # Displays the file path under which the json file has been saved.
+ logger.info(f"Model has been saved under the following path: {path}")
+
+[docs] @classmethod
+ def load_serialized_model_from_file(cls, path: Path):
+ """
+ Loads SerializedMLModel object from a json file and creates a new specific Machine Learning Model object
+ which is returned.
+
+ Args:
+ path: relative/absolute path which determines which json file will be loaded.
+ Returns:
+ SerializedMLModel object with data from json file.
+ """
+ with open(path, "r") as json_file:
+ model_data = json.load(json_file)
+ return cls.load_serialized_model_from_dict(model_data)
+
+[docs] @classmethod
+ def load_serialized_model_from_string(cls, json_string: str):
+ """
+ Loads SerializedMLModel object from a json string and creates a new specific Machine Learning Model object
+ which is returned.
+
+ Args:
+ json_string: json string which will be loaded.
+ Returns:
+ SerializedMLModel object with data from json file.
+ """
+ model_data = json.loads(json_string)
+ return cls.load_serialized_model_from_dict(model_data)
+
+[docs] @classmethod
+ def load_serialized_model_from_dict(cls, model_data: dict):
+ """
+ Loads SerializedMLModel object from a dict and creates a new specific Machine Learning Model object
+ which is returned.
+
+ Args:
+ json_string: json string which will be loaded.
+ Returns:
+ SerializedMLModel object with data from json file.
+ """
+ model_type = model_data["model_type"]
+ return serialized_models[model_type](**model_data)
+
+[docs] @classmethod
+ def load_serialized_model(cls, model_data: Union[dict, str, Path]):
+ """Loads the ML model from a source"""
+ if isinstance(model_data, dict):
+ return cls.load_serialized_model_from_dict(model_data)
+ if isinstance(model_data, (str, Path)):
+ if Path(model_data).exists():
+ return cls.load_serialized_model_from_file(model_data)
+ return cls.load_serialized_model_from_string(model_data)
+
+
+[docs]class SerializedANN(SerializedMLModel):
+ """
+ Contains Keras ANN in serialized form and offers functions to transform
+ Keras Sequential ANNs to SerializedANN objects (from_ANN) and vice versa (deserialize).
+
+ attributes:
+ structure: architecture/structure of ANN saved as json string.
+ weights: weights and biases of all layers saved as lists of np.ndarrays.
+ """
+
+ weights: list[list] = Field(
+ default=None,
+ title="weights",
+ description="The weights of the ANN.",
+ )
+ structure: str = Field(
+ default=None,
+ title="structure",
+ description="The structure of the ANN as json string.",
+ )
+ model_config = ConfigDict(arbitrary_types_allowed=True)
+ model_type: MLModels = MLModels.ANN
+
+[docs] @classmethod
+ def serialize(
+ cls,
+ model: Sequential,
+ dt: Union[float, int],
+ input: dict[str, Feature],
+ output: dict[str, OutputFeature],
+ training_info: Optional[dict] = None,
+ ):
+ """Serializes Keras Sequential ANN and returns SerializedANN object"""
+ structure = model.to_json()
+ weights = []
+ for layer in model.layers:
+ weight_l = layer.get_weights()
+ for idx in range(len(weight_l)):
+ weight_l[idx] = weight_l[idx].tolist()
+ weights.append(weight_l)
+
+ return cls(
+ structure=structure,
+ weights=weights,
+ dt=dt,
+ input=input,
+ output=output,
+ trainer_config=training_info,
+ )
+
+[docs] def deserialize(self) -> Sequential:
+ """Deserializes SerializedANN object and returns a Keras Sequential ANN."""
+ from keras import models
+
+ ann = models.model_from_json(self.structure)
+ layer_weights = []
+ for layer in self.weights:
+ l_weight = []
+ layer_weights.append(l_weight)
+ for matrix in layer:
+ l_weight.append(np.asarray(matrix))
+
+ for i, layer in enumerate(ann.layers):
+ layer.set_weights(layer_weights[i])
+ return ann
+
+[docs] def to_dict(self) -> dict:
+ """Transforms self to a dictionary and the numpy arrays to lists, so they can
+ be serialized."""
+ ann_dict = deepcopy(self.__dict__)
+ for layer in ann_dict["weights"]:
+ for idx in range(0, len(layer)):
+ layer[idx] = layer[idx].tolist()
+ return ann_dict
+
+
+[docs]class GPRDataHandlingParameters(BaseModel):
+ normalize: bool = Field(
+ default=False,
+ title="normalize",
+ description="Boolean which defines whether the input data will be normalized or not.",
+ )
+ scale: float = Field(
+ default=1.0,
+ title="scale",
+ description="Number by which the y vector is divided before training and multiplied after evaluation.",
+ )
+ mean: Optional[list] = Field(
+ default=None,
+ title="mean",
+ description="Mean values of input data for normalization. None if normalize equals to False.",
+ )
+ std: Optional[list] = Field(
+ default=None,
+ title="standard deviation",
+ description="Standard deviation of input data for normalization. None if normalize equals to False.",
+ )
+
+
+[docs]class CustomGPR(GaussianProcessRegressor):
+ """
+ Extends scikit-learn GaussianProcessRegressor with normalizing and scaling option
+ by adding the attribute data_handling, customizing the predict function accordingly
+ and adding a normalize function.
+ """
+
+ def __init__(
+ self,
+ kernel=None,
+ *,
+ alpha=1e-10,
+ optimizer="fmin_l_bfgs_b",
+ n_restarts_optimizer=0,
+ normalize_y=False,
+ copy_X_train=True,
+ random_state=None,
+ data_handling=GPRDataHandlingParameters(),
+ ):
+ super().__init__(
+ kernel=kernel,
+ alpha=alpha,
+ optimizer=optimizer,
+ n_restarts_optimizer=n_restarts_optimizer,
+ normalize_y=normalize_y,
+ copy_X_train=copy_X_train,
+ random_state=random_state,
+ )
+ self.data_handling: GPRDataHandlingParameters = data_handling
+
+[docs] def predict(self, X, return_std=False, return_cov=False):
+ """
+ Overwrite predict method of GaussianProcessRegressor to include normalization.
+ """
+ if self.data_handling.normalize:
+ X = self._normalize(X)
+ return super().predict(X, return_std, return_cov)
+
+ def _normalize(self, x: np.ndarray):
+ mean = self.data_handling.mean
+ std = self.data_handling.std
+
+ if mean is None and std is not None:
+ raise ValueError("Mean and std are not valid.")
+
+ return (x - mean) / std
+
+
+[docs]class GPRKernelParameters(BaseModel):
+ constant_value: float = Field(
+ default=1.0,
+ title="constant value",
+ description="The constant value which defines the covariance: k(x_1, x_2) = constant_value.",
+ )
+ constant_value_bounds: Union[tuple, str] = Field(
+ default=(1e-5, 1e5),
+ title="constant value bounds",
+ description="The lower and upper bound on constant_value. If set to “fixed”, "
+ "constant_value cannot be changed during hyperparameter tuning.",
+ )
+ length_scale: Union[float, list] = Field(
+ default=1.0,
+ title="length_scale",
+ description="The length scale of the kernel. If a float, an isotropic kernel "
+ "is used. If an array, an anisotropic kernel is used where each "
+ "dimension of l defines the length-scale of the respective feature "
+ "dimension.",
+ )
+ length_scale_bounds: Union[tuple, str] = Field(
+ default=(1e-5, 1e5),
+ title="length_scale_bounds",
+ description="The lower and upper bound on ‘length_scale’. If set to “fixed”, "
+ "‘length_scale’ cannot be changed during hyperparameter tuning.",
+ )
+ noise_level: float = Field(
+ default=1.0,
+ title="noise level",
+ description="Parameter controlling the noise level (variance).",
+ )
+ noise_level_bounds: Union[tuple, str] = Field(
+ default=(1e-5, 1e5),
+ title="noise level bounds",
+ description="The lower and upper bound on ‘noise_level’. If set to “fixed”, "
+ "‘noise_level’ cannot be changed during hyperparameter tuning.",
+ )
+ theta: list = Field(
+ title="theta",
+ description="Returns the (flattened, log-transformed) non-fixed gpr_parameters.",
+ )
+ model_config = ConfigDict(arbitrary_types_allowed=True)
+
+[docs] @classmethod
+ def from_model(cls, model: CustomGPR) -> "GPRKernelParameters":
+ return cls(
+ constant_value=model.kernel_.k1.k1.constant_value,
+ constant_value_bounds=model.kernel_.k1.k1.constant_value_bounds,
+ length_scale=model.kernel_.k1.k2.length_scale,
+ length_scale_bounds=model.kernel_.k1.k2.length_scale_bounds,
+ noise_level=model.kernel_.k2.noise_level,
+ noise_level_bounds=model.kernel_.k2.noise_level_bounds,
+ theta=model.kernel_.theta.tolist(),
+ )
+
+
+[docs]class GPRParameters(BaseModel):
+ alpha: Union[float, list] = Field(
+ default=1e-10,
+ title="alpha",
+ description="Value added to the diagonal of the kernel matrix during fitting. "
+ "This can prevent a potential numerical issue during fitting, by "
+ "ensuring that the calculated values form a positive definite matrix. "
+ "It can also be interpreted as the variance of additional Gaussian "
+ "measurement noise on the training observations. Note that this is "
+ "different from using a WhiteKernel. If an array is passed, it must "
+ "have the same number of entries as the data used for fitting and is "
+ "used as datapoint-dependent noise level. Allowing to specify the "
+ "noise level directly as a parameter is mainly for convenience and "
+ "for consistency with Ridge.",
+ )
+ L: list = Field(
+ title="L",
+ description="Lower-triangular Cholesky decomposition of the kernel in X_train.",
+ )
+ X_train: list = Field(
+ title="X_train",
+ description="Feature vectors or other representations of training data (also "
+ "required for prediction).",
+ )
+ y_train: list = Field(
+ title="y_train",
+ description="Target values in training data (also required for prediction).",
+ )
+ n_features_in: int = Field(
+ title="number of input features",
+ description="Number of features seen during fit.",
+ )
+ log_marginal_likelihood_value: float = Field(
+ title="log marginal likelihood value",
+ description="The log-marginal-likelihood of self.kernel_.theta.",
+ )
+ model_config = ConfigDict(arbitrary_types_allowed=True)
+
+[docs] @classmethod
+ def from_model(cls, model: CustomGPR) -> "GPRParameters":
+ return cls(
+ alpha=model.alpha_.tolist(),
+ L=model.L_.tolist(),
+ X_train=model.X_train_.tolist(),
+ y_train=model.y_train_.tolist(),
+ n_features_in=model.n_features_in_,
+ log_marginal_likelihood_value=model.log_marginal_likelihood_value_,
+ )
+
+
+[docs]class SerializedGPR(SerializedMLModel):
+ """
+ Contains scikit-learn GaussianProcessRegressor and its Kernel and provides functions to transform
+ these to SerializedGPR objects and vice versa.
+
+ Attributes:
+
+ """
+
+ data_handling: GPRDataHandlingParameters = Field(
+ default=None,
+ title="data_handling",
+ description="Information about data handling for GPR.",
+ )
+ kernel_parameters: GPRKernelParameters = Field(
+ default=None,
+ title="kernel parameters",
+ description="Parameters of kernel of the fitted GPR.",
+ )
+ gpr_parameters: GPRParameters = Field(
+ default=None,
+ title="gpr_parameters",
+ description=" GPR parameters of GPR and its Kernel and Data of fitted GPR.",
+ )
+ model_config = ConfigDict(arbitrary_types_allowed=True)
+ model_type: MLModels = MLModels.GPR
+
+[docs] @classmethod
+ def serialize(
+ cls,
+ model: CustomGPR,
+ dt: Union[float, int],
+ input: dict[str, Feature],
+ output: dict[str, OutputFeature],
+ training_info: Optional[dict] = None,
+ ):
+ """
+
+ Args:
+ model: GaussianProcessRegressor from ScikitLearn.
+ dt: The length of time step of one prediction of GPR in seconds.
+ input: GPR input variables with their lag order.
+ output: GPR output variables (which are automatically also inputs, as
+ we need them recursively in MPC.) with their lag order.
+ training_info: Config of Trainer Class, which trained the Model.
+
+ Returns:
+ SerializedGPR version of the passed GPR.
+ """
+ if not all(
+ hasattr(model, attr)
+ for attr in ["kernel_", "alpha_", "L_", "X_train_", "y_train_"]
+ ):
+ raise ValueError(
+ "To serialize a GPR, a fitted GPR must be passed, "
+ "but an unfitted GPR has been passed here."
+ )
+ kernel_parameters = GPRKernelParameters.from_model(model)
+ gpr_parameters = GPRParameters.from_model(model)
+ return cls(
+ dt=dt,
+ input=input,
+ output=output,
+ data_handling=model.data_handling,
+ kernel_parameters=kernel_parameters,
+ gpr_parameters=gpr_parameters,
+ trainer_config=training_info,
+ )
+
+[docs] def deserialize(self) -> CustomGPR:
+ """
+ Deserializes SerializedGPR object and returns a scikit learn GaussionProcessRegressor.
+ Returns:
+ gpr_fitted: GPR version of the SerializedGPR
+ """
+ # Create unfitted GPR with standard Kernel and standard Parameters and Hyperparameters.
+ kernel = ConstantKernel() * RBF() + WhiteKernel()
+ gpr_unfitted = CustomGPR(
+ kernel=kernel,
+ copy_X_train=False,
+ )
+ # make basic fit for GPR
+ gpr_fitted = self._basic_fit(gpr=gpr_unfitted)
+ # update kernel parameters
+ gpr_fitted.kernel_.k1.k1.constant_value = self.kernel_parameters.constant_value
+ gpr_fitted.kernel_.k1.k1.constant_value_bounds = (
+ self.kernel_parameters.constant_value_bounds
+ )
+ gpr_fitted.kernel_.k1.k2.length_scale = self.kernel_parameters.length_scale
+ gpr_fitted.kernel_.k1.k2.length_scale_bounds = (
+ self.kernel_parameters.length_scale_bounds
+ )
+ gpr_fitted.kernel_.k2.noise_level = self.kernel_parameters.noise_level
+ gpr_fitted.kernel_.k2.noise_level_bounds = (
+ self.kernel_parameters.noise_level_bounds
+ )
+ gpr_fitted.kernel_.theta = np.array(self.kernel_parameters.theta)
+ # update gpr_parameters
+ gpr_fitted.L_ = np.array(self.gpr_parameters.L)
+ gpr_fitted.X_train_ = np.array(self.gpr_parameters.X_train)
+ gpr_fitted.y_train_ = np.array(self.gpr_parameters.y_train)
+ gpr_fitted.alpha_ = np.array(self.gpr_parameters.alpha)
+ gpr_fitted.n_features_in_ = np.array(self.gpr_parameters.n_features_in)
+ gpr_fitted.log_marginal_likelihood_value_ = np.array(
+ self.gpr_parameters.log_marginal_likelihood_value
+ )
+ # update data handling
+ gpr_fitted.data_handling.normalize = self.data_handling.normalize
+ gpr_fitted.data_handling.scale = self.data_handling.scale
+ if self.data_handling.mean:
+ gpr_fitted.data_handling.mean = np.array(self.data_handling.mean)
+ if self.data_handling.std:
+ gpr_fitted.data_handling.std = np.array(self.data_handling.std)
+ return gpr_fitted
+
+ def _basic_fit(self, gpr: GaussianProcessRegressor):
+ """
+ Runs an easy fit to be able to populate with kernel_parameters and gpr_parameters
+ afterward and therefore really fit it.
+ Args:
+ gpr: Unfitted GPR to fit
+ Returns:
+ gpr: fitted GPR
+ """
+ x = np.ones((1, len(self.input)))
+ y = np.ones((1, len(self.output)))
+ gpr.fit(
+ X=x,
+ y=y,
+ )
+ return gpr
+
+
+[docs]class LinRegParameters(BaseModel):
+ coef: list = Field(
+ title="coefficients",
+ description="Estimated coefficients for the linear regression problem. If multiple targets are passed during the fit (y 2D), this is a 2D array of shape (n_targets, n_features), while if only one target is passed, this is a 1D array of length n_features.",
+ )
+ intercept: Union[float, list] = Field(
+ title="intercept",
+ description="Independent term in the linear model. Set to 0.0 if fit_intercept = False.",
+ )
+ n_features_in: int = Field(
+ title="number of input features",
+ description="Number of features seen during fit.",
+ )
+ rank: int = Field(
+ title="rank",
+ description="Rank of matrix X. Only available when X is dense.",
+ )
+ singular: list = Field(
+ title="singular",
+ description="Singular values of X. Only available when X is dense.",
+ )
+
+
+[docs]class SerializedLinReg(SerializedMLModel):
+ """
+ Contains scikit-learn LinearRegression and provides functions to transform
+ these to SerializedLinReg objects and vice versa.
+
+ Attributes:
+
+ """
+
+ parameters: LinRegParameters = Field(
+ title="parameters",
+ description="Parameters of kernel of the fitted linear model.",
+ )
+ model_config = ConfigDict(arbitrary_types_allowed=True)
+ model_type: MLModels = MLModels.LINREG
+
+[docs] @classmethod
+ def serialize(
+ cls,
+ model: LinearRegression,
+ dt: Union[float, int],
+ input: dict[str, Feature],
+ output: dict[str, OutputFeature],
+ training_info: Optional[dict] = None,
+ ):
+ """
+
+ Args:
+ model: LinearRegression from ScikitLearn.
+ dt: The length of time step of one prediction of LinReg in seconds.
+ input: LinReg input variables with their lag order.
+ output: LinReg output variables (which are automatically also inputs, as "
+ "we need them recursively in MPC.) with their lag order.
+ training_info: Config of Trainer Class, which trained the Model.
+
+ Returns:
+ SerializedLinReg version of the passed linear model.
+ """
+ if not all(
+ hasattr(model, attr)
+ for attr in ["coef_", "intercept_", "n_features_in_", "rank_", "singular_"]
+ ):
+ raise ValueError(
+ "To serialize a GPR, a fitted GPR must be passed, "
+ "but an unfitted GPR has been passed here."
+ )
+ parameters = {
+ "coef": model.coef_.tolist(),
+ "intercept": model.intercept_.tolist(),
+ "n_features_in": model.n_features_in_,
+ "rank": model.rank_,
+ "singular": model.singular_.tolist(),
+ }
+ parameters = LinRegParameters(**parameters)
+ return cls(
+ dt=dt,
+ input=input,
+ output=output,
+ parameters=parameters,
+ trainer_config=training_info,
+ )
+
+[docs] def deserialize(self) -> LinearRegression:
+ """
+ Deserializes SerializedLinReg object and returns a LinearRegression object of scikit-learn.
+ Returns:
+ linear_model_fitted: LinearRegression version of the SerializedLinReg
+ """
+ linear_model_unfitted = LinearRegression()
+ linear_model_fitted = self._basic_fit(linear_model=linear_model_unfitted)
+ # update parameters
+ linear_model_fitted.coef_ = np.array(self.parameters.coef)
+ linear_model_fitted.intercept_ = np.array(self.parameters.intercept)
+ linear_model_fitted.n_features_in_ = self.parameters.n_features_in
+ linear_model_fitted.rank_ = self.parameters.rank
+ linear_model_fitted.singular_ = np.array(self.parameters.singular)
+ return linear_model_fitted
+
+ def _basic_fit(self, linear_model: LinearRegression):
+ """
+ Runs an easy fit to be able to populate with parameters and gpr_parameters
+ afterward and therefore really fit it.
+ Args:
+ linear_model: Unfitted linear model to fit.
+ Returns:
+ linear_model: fitted linear model.
+ """
+ x = np.ones((1, len(self.input)))
+ y = np.ones((1, len(self.output)))
+ linear_model.fit(
+ X=x,
+ y=y,
+ )
+ return linear_model
+
+
+serialized_models = {
+ MLModels.ANN: SerializedANN,
+ MLModels.GPR: SerializedGPR,
+ MLModels.LINREG: SerializedLinReg,
+}
+
+"""
+This package contains all modules for the
+distributed model predictive control using multi agent systems.
+
+It contains classes for local optimization and global coordination.
+"""
+
+import importlib
+
+
+[docs]class ModuleImport:
+ def __init__(self, module_path: str, class_name: str):
+ self.module_path = module_path
+ self.class_name = class_name
+
+[docs] def import_class(self):
+ module = importlib.import_module(self.module_path)
+ return getattr(module, self.class_name)
+
+
+MODULE_TYPES = {
+ "data_source": ModuleImport(
+ module_path="agentlib_mpc.modules.data_source", class_name="DataSource"
+ ),
+ "mpc_basic": ModuleImport(
+ module_path="agentlib_mpc.modules.mpc", class_name="BaseMPC"
+ ),
+ "mpc": ModuleImport(module_path="agentlib_mpc.modules.mpc_full", class_name="MPC"),
+ "minlp_mpc": ModuleImport(
+ module_path="agentlib_mpc.modules.minlp_mpc", class_name="MINLPMPC"
+ ),
+ "admm": ModuleImport(
+ module_path="agentlib_mpc.modules.dmpc.admm.admm", class_name="ADMM"
+ ),
+ "admm_local": ModuleImport(
+ module_path="agentlib_mpc.modules.dmpc.admm.admm", class_name="LocalADMM"
+ ),
+ "admm_coordinated": ModuleImport(
+ module_path="agentlib_mpc.modules.dmpc.admm.admm_coordinated",
+ class_name="CoordinatedADMM",
+ ),
+ "admm_coordinator": ModuleImport(
+ module_path="agentlib_mpc.modules.dmpc.admm.admm_coordinator",
+ class_name="ADMMCoordinator",
+ ),
+ "ann_trainer": ModuleImport(
+ module_path="agentlib_mpc.modules.ml_model_training.ml_model_trainer",
+ class_name="ANNTrainer",
+ ),
+ "gpr_trainer": ModuleImport(
+ module_path="agentlib_mpc.modules.ml_model_training.ml_model_trainer",
+ class_name="GPRTrainer",
+ ),
+ "linreg_trainer": ModuleImport(
+ module_path="agentlib_mpc.modules.ml_model_training.ml_model_trainer",
+ class_name="LinRegTrainer",
+ ),
+ "ann_simulator": ModuleImport(
+ module_path="agentlib_mpc.modules.ann_simulator",
+ class_name="MLModelSimulator",
+ ),
+ "set_point_generator": ModuleImport(
+ module_path="agentlib_mpc.modules.ml_model_training.setpoint_generator",
+ class_name="SetPointGenerator",
+ ),
+ "mhe": ModuleImport(
+ module_path="agentlib_mpc.modules.estimation.mhe", class_name="MHE"
+ ),
+}
+
+import pandas as pd
+import pydantic
+from agentlib.core import AgentVariables, AgentVariable
+from agentlib.modules.utils.try_sensor import TRYSensorConfig, TRYSensor
+
+
+[docs]class TRYPredictorConfig(TRYSensorConfig):
+ prediction_length: int = pydantic.Field(
+ default=24, description="prediction length in hours"
+ )
+
+ predictions: AgentVariables = [
+ AgentVariable(
+ name="T_oda_prediction",
+ unit="K",
+ description="Air temperature 2m over ground [K]",
+ ),
+ AgentVariable(
+ name="pressure_prediction",
+ unit="hPa",
+ description="Air pressure in standard height [hPa]",
+ ),
+ AgentVariable(
+ name="wind_direction_prediction",
+ unit="°",
+ description="Wind direction 10 m above gorund " "[Grad] {0..360;999}",
+ ),
+ AgentVariable(
+ name="wind_speed_prediction",
+ unit="m/s",
+ description="Wind speed 10 m above ground [m/s]",
+ ),
+ AgentVariable(
+ name="coverage_prediction", unit="eighth", description="[eighth] {0..8;9}"
+ ),
+ AgentVariable(
+ name="absolute_humidity_prediction", unit="g/kg", description="[g/kg]"
+ ),
+ AgentVariable(
+ name="relative_humidity_prediction",
+ unit="%",
+ description="Relative humidity 2 m above ground " "[%] {1..100}",
+ ),
+ AgentVariable(
+ name="beam_direct_prediction",
+ unit="W/m^2",
+ description="Direct beam of sun (hor. plane) "
+ "[W/m^2] downwards: positive",
+ ),
+ AgentVariable(
+ name="beam_diffuse_prediction",
+ unit="/m^2",
+ description="Diffuse beam of sun (hor. plane) "
+ "[W/m^2] downwards: positive",
+ ),
+ AgentVariable(
+ name="beam_atm_prediction",
+ unit="/m^2",
+ description="Beam of atmospheric heat (hor. plane) "
+ "[W/m^2] downwards: positive",
+ ),
+ AgentVariable(
+ name="beam_terr_prediction",
+ unit="/m^2",
+ description="Beam of terrestrial heat " "[W/m^2] upwards: negative",
+ ),
+ ]
+
+
+[docs]class TRYPredictor(TRYSensor):
+ config: TRYPredictorConfig
+ _data: pd.DataFrame
+
+[docs] def process(self):
+ while True:
+ self.send_prediction()
+ self.send_measurement()
+ yield self.env.timeout(self.t_sample)
+
+[docs] def send_prediction(self):
+ start_loc = self._data.index.get_loc(self.env.now, method="pad")
+ start_time = self._data.index[start_loc]
+ end_time = start_time + self.config.prediction_length * 3600
+ for measurement_name, measurement_data in self._data.iteritems():
+ self.set(measurement_name, measurement_data.loc[start_time:end_time])
+
+[docs] def send_measurement(self):
+ data = self.get_data_now()
+ for key, val in data.items():
+ self.set(name=key, value=val)
+
+from pathlib import Path
+
+import pandas as pd
+import numpy as np
+import logging
+from pydantic import Field, field_validator, FilePath
+from typing import List, Optional, Union
+from datetime import datetime, timedelta
+
+from agentlib.core import BaseModule, BaseModuleConfig, AgentVariable
+from agentlib import Environment, Agent
+from agentlib_mpc.data_structures.interpolation import InterpolationMethods
+
+
+[docs]class DataSourceConfig(BaseModuleConfig):
+ data: Union[pd.DataFrame, FilePath] = Field(
+ title="data",
+ default=pd.DataFrame(),
+ description="Data that should be communicated during execution."
+ "Index should be either numeric or Datetime, numeric values are interpreted as seconds.",
+ validate_default=True,
+ )
+ columns: Optional[List[str]] = Field(
+ title="columns",
+ default=None,
+ description="Optional list of columns of data frame that should be sent."
+ "If ommited, all datapoint in frame are sent.",
+ )
+ t_sample: Union[float, int] = Field(
+ title="t_sample",
+ default=1,
+ description="Sample time of data source. Default is 1 s.",
+ )
+ data_offset: Optional[Union[pd.Timedelta, float]] = Field(
+ title="data_offset",
+ default=0,
+ description="Offset will be subtracted from index.",
+ )
+ interpolation_method: Optional[InterpolationMethods] = Field(
+ title="interpolation_method",
+ default=InterpolationMethods.previous,
+ description="Interpolation method used for resampling of data."
+ "Only 'linear' and 'previous' are allowed.",
+ )
+
+[docs] @field_validator("data")
+ @classmethod
+ def check_data(cls, data):
+ """Makes sure data is a data frame, and loads it if required."""
+ if isinstance(data, (str, Path)) and Path(data).is_file():
+ data = pd.read_csv(data, engine="python", index_col=0)
+ if not isinstance(data, pd.DataFrame):
+ raise ValueError(
+ f"Data {data} is not a valid DataFrame or the path is not found."
+ )
+ if data.empty:
+ raise ValueError("Dataframe 'data' is empty.")
+ return data
+
+[docs] @field_validator("interpolation_method")
+ @classmethod
+ def check_interpolation_method(cls, interpolation_method):
+ if interpolation_method not in {
+ InterpolationMethods.linear,
+ InterpolationMethods.previous,
+ }:
+ raise ValueError(
+ "Only 'linear' and 'previous' are allowed interpolation methods."
+ )
+ return interpolation_method
+
+
+[docs]class DataSource(BaseModule):
+ config: DataSourceConfig
+
+ def __init__(self, config: dict, agent: Agent):
+ super().__init__(config=config, agent=agent)
+ data = self.config.data
+ data = self.transform_index(data)
+
+ # Filter columns if specified
+ if self.config.columns:
+ columns_to_keep = [
+ col for col in self.config.columns if col in data.columns
+ ]
+ if not columns_to_keep:
+ raise ValueError("None of the specified columns exist in the dataframe")
+ data = data[columns_to_keep]
+
+ if data.empty:
+ raise ValueError("Resulting dataframe is empty after processing")
+
+[docs] def transform_index(self, data: pd.DataFrame) -> pd.DataFrame:
+ """Handles the index and ensures it is numeric, with correct offset"""
+ offset = self.config.data_offset
+ # Convert offset to seconds if it's a Timedelta
+ if isinstance(offset, pd.Timedelta):
+ offset = offset.total_seconds()
+ # Handle different index types
+ if isinstance(data.index, pd.DatetimeIndex):
+ data.index = (data.index - data.index[0]).total_seconds()
+ else:
+ # Try to convert to numeric if it's a string
+ try:
+ data.index = pd.to_numeric(data.index)
+ data.index = data.index - data.index[0]
+ except ValueError:
+ # If conversion to numeric fails, try to convert to datetune
+ try:
+ data.index = pd.to_datetime(data.index)
+ data.index = (data.index - data.index[0]).total_seconds()
+ except ValueError:
+ raise ValueError("Unable to convert index to numeric format")
+
+ data.index = data.index.astype(float) - offset
+ return data
+
+ def _get_data_at_time(
+ self,
+ timestamp: float,
+ interpolation_method: InterpolationMethods = InterpolationMethods.previous,
+ ) -> pd.Series:
+ df = self.config.data
+ after = df[df.index >= timestamp].first_valid_index()
+ before = df[df.index <= timestamp].last_valid_index()
+ if after is None:
+ self.logger.warning(
+ f"The timestamp {timestamp} is after the range of the data."
+ )
+ return df.iloc[-1]
+ if before is None:
+ self.logger.warning(
+ f"The timestamp {timestamp} is before the range of the data."
+ )
+ return df.iloc[0]
+ if before == after:
+ return df.loc[before]
+ # Extract the two points
+ df_surrounding = df.loc[[before, after]]
+ if interpolation_method == InterpolationMethods.linear:
+ return (
+ df_surrounding.reindex(df_surrounding.index.union([timestamp]))
+ .interpolate(method="index")
+ .loc[timestamp]
+ )
+ elif interpolation_method == InterpolationMethods.previous:
+ return df_surrounding.iloc[0]
+ else:
+ self.logger.warning(
+ f"Interpolation method {interpolation_method} not supported."
+ )
+ return df_surrounding.iloc[0]
+
+[docs] def process(self):
+ """Write the current data values into data_broker every t_sample"""
+ while True:
+ current_data = self._get_data_at_time(
+ self.env.now, self.config.interpolation_method
+ )
+ for index, value in current_data.items():
+ self.logger.debug(
+ f"At {self.env.now}: Sending variable {index} with value {value} to data broker."
+ )
+ variable = AgentVariable(name=index, value=value, shared=True)
+ self.agent.data_broker.send_variable(variable, copy=False)
+ yield self.env.timeout(self.config.t_sample)
+
+[docs] def register_callbacks(self):
+ """Don't do anything as this module is not event-triggered"""
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.DEBUG)
+ date_today = datetime.now()
+ time = range(5)
+ time = pd.date_range(date_today, date_today + timedelta(minutes=5), freq="min")
+ data1 = np.random.randint(1, high=100, size=len(time)) / 10
+ data2 = np.random.randint(1, high=100, size=len(time)) / 10
+ df = pd.DataFrame({"index": time, "col1": data1, "col2": data2})
+ df.set_index("index", inplace=True)
+ print("Dataframe:")
+ df.to_csv("example_df.csv")
+ print(df)
+ agent_config = {
+ "id": "my_agent_id",
+ "modules": [
+ {
+ "module_id": "My_Data_Source",
+ "type": "agentlib_mpc.data_source",
+ "data": "example_df.csv",
+ # "data_offset": pd.Timedelta("1min"),
+ # "data_offset": 60,
+ "interpolation_method": InterpolationMethods.linear,
+ "columns": ["col1", "col2"],
+ }
+ ],
+ }
+
+ logging.basicConfig(level=logging.INFO)
+ environment_config = {"rt": False, "factor": 1}
+ env = Environment(config=environment_config)
+ agent_ = Agent(config=agent_config, env=env)
+ env.run(65)
+
+from agentlib_mpc.modules.mpc_full import MPC, MPCConfig
+
+
+[docs]class DistributedMPCConfig(MPCConfig):
+ """
+ Base config class with common configurations
+ """
+
+
+[docs]class DistributedMPC(MPC):
+ """Base class which defines common interfaces among all
+ distributed mpc approaches (either optimization based,
+ game theory based or some other)."""
+
+ config: DistributedMPCConfig
+
+"""Holds functionality for ADMM modules."""
+
+import time
+import threading
+from typing import List, Dict, Tuple, Iterable, Optional, TypeVar, Union
+import queue
+from enum import Enum, auto
+
+import numpy as np
+import pandas as pd
+from agentlib.core.errors import ConfigurationError
+from pydantic import field_validator, Field
+
+from agentlib.core import (
+ Source,
+ AgentVariable,
+)
+
+from agentlib_mpc.data_structures.mpc_datamodels import MPCVariable
+from agentlib_mpc.modules.dmpc import DistributedMPC, DistributedMPCConfig
+from agentlib_mpc.optimization_backends.backend import ADMMBackend
+from agentlib.utils.validators import convert_to_list
+from agentlib_mpc.data_structures import mpc_datamodels
+import agentlib_mpc.data_structures.admm_datatypes as adt
+from agentlib_mpc.data_structures.mpc_datamodels import Results
+
+
+# noinspection PyArgumentList
+[docs]class ModuleStatus(Enum):
+ not_started = auto()
+ syncing = auto()
+ at_registration = auto()
+ optimizing = auto()
+ updating = auto()
+ waiting_for_other_agents = auto()
+ sleeping = auto()
+
+
+# noinspection PyArgumentList
+[docs]class ParticipantStatus(Enum):
+ not_participating = auto()
+ available = auto()
+ confirmed = auto()
+ not_available = auto()
+
+
+[docs]class ADMMParticipation:
+ """Holds data for the status of a shared variable of another system."""
+
+ def __init__(self, variable):
+ self.variable: AgentVariable = variable
+ self.status: ParticipantStatus = ParticipantStatus.not_participating
+ # no more than two messages should stack
+ self.received: queue.Queue = queue.Queue(maxsize=5)
+
+[docs] def empty_memory(self):
+ while True:
+ try:
+ self.received.get_nowait()
+ except queue.Empty:
+ break
+
+[docs] def de_register(self):
+ self.status = ParticipantStatus.not_participating
+ self.empty_memory()
+
+
+[docs]class ADMMConfig(DistributedMPCConfig):
+ couplings: List[mpc_datamodels.MPCVariable] = []
+ exchange: List[mpc_datamodels.MPCVariable] = []
+
+ penalty_factor: float = Field(
+ default=10,
+ ge=0,
+ description="Penalty factor of the ADMM algorithm. Should be equal for all "
+ "agents.",
+ )
+ iteration_timeout: float = Field(
+ default=20,
+ ge=0,
+ description="Maximum computation + waiting time for one iteration.",
+ )
+ registration_period: float = Field(
+ default=2,
+ ge=0,
+ description="Time spent on registration before each optimization",
+ )
+ max_iterations: float = Field(
+ default=20,
+ ge=0,
+ description="Maximum number of ADMM iterations before termination of control "
+ "step.",
+ )
+
+[docs] @field_validator(
+ "exchange", "couplings", "parameters", "inputs", "outputs", "controls", "states"
+ )
+ @classmethod
+ def check_prefixes_of_variables(cls, variables: list[AgentVariable]):
+ """Ensures no user provided variable is named with the reserved ADMM prefix."""
+ conf_err = ConfigurationError(
+ f"Do not use variables that start with "
+ f"'{adt.ADMM_PREFIX}' in an ADMM config."
+ )
+ for var in variables:
+ if var.name.startswith(adt.ADMM_PREFIX):
+ raise conf_err
+ return variables
+
+
+ADMMConfigT = TypeVar("ADMMConfigT", bound=ADMMConfig)
+
+
+[docs]class ADMM(DistributedMPC):
+ """
+ This class represents a module participating in a fully decentralized
+ Consensus-ADMM optimization for distributed MPC.
+ Agents autonomously send the values of their coupling variables, register
+ other participants and perform update steps.
+ """
+
+ config: ADMMConfig
+ var_ref: adt.VariableReference
+
+ def __init__(self, config: dict, agent):
+ self.var_qu = queue.Queue()
+ self.start_step = threading.Event()
+ self._status: ModuleStatus = ModuleStatus.syncing
+ self._registered_participants = {}
+ self._admm_variables: dict[str, AgentVariable] = {}
+ super().__init__(config=config, agent=agent)
+
+[docs] def collect_couplings_for_optimization(self):
+ """Collects updated AgentVariables only of the coupling variables."""
+ coup_vars = {}
+ for coup in self.var_ref.couplings + self.var_ref.exchange:
+ coup_vars.update(
+ {v: self._admm_variables[v] for v in coup.admm_variables()}
+ )
+ coup_vars["penalty_factor"] = self.penalty_factor_var
+ return coup_vars
+
+[docs] def process(self):
+ # this thread will perform the optimization whenever start_step is set
+ thread = threading.Thread(
+ target=self._admm_loop, daemon=True, name=f"admm_loop_{self.agent.id}"
+ )
+ thread.start()
+ self.agent.register_thread(thread=thread)
+
+ self._status: ModuleStatus = ModuleStatus.syncing
+ yield self._sync_start()
+ self.logger.info("Starting periodic execution of admm algorithm")
+
+ while True:
+ self.start_step.set()
+ yield self.env.timeout(self.config.time_step)
+
+ def _sync_start(self):
+ """Waits until time is a multiple of the time step."""
+ time_step = self.config.time_step
+ delta = time_step - (time.time() % time_step)
+ wait_time = delta
+ self.logger.info("Waiting %s s to sync admm algorithm", wait_time)
+ return self.env.timeout(wait_time)
+
+ def _admm_loop(self):
+ """Triggers the optimization whenever self.start_step is set."""
+ while True:
+ self._status: ModuleStatus = ModuleStatus.sleeping
+ self.start_step.wait()
+ self.start_step.clear()
+ self.admm_step()
+ if self.start_step.isSet():
+ self.logger.error(
+ "%s: Start of ADMM round was requested before "
+ "last one finished. Waiting until next "
+ "cycle."
+ )
+ self.start_step.clear()
+
+[docs] def admm_step(self):
+ """Performs an entire ADMM optimization."""
+
+ self._perform_registration()
+
+ # get optimization inputs
+ self._set_mean_coupling_values()
+ opt_inputs = self.collect_variables_for_optimization()
+ self.pre_computation_hook()
+
+ # reset termination criteria
+ start_iterations = self.env.time
+ admm_iter = 0
+
+ # start the ADMM iteration loop
+ while True:
+ start_opt = time.time()
+
+ # Solve local optimization
+ result = self._solve_local_optimization(
+ opt_inputs=opt_inputs,
+ current_iteration=admm_iter,
+ start_time=start_iterations,
+ )
+
+ # admm coordination step
+ self.send_coupling_values(result)
+ self._status = ModuleStatus.waiting_for_other_agents
+ self._receive_variables(start=start_opt)
+ self._status = ModuleStatus.updating
+ self._set_mean_coupling_values()
+ self.update_lambda()
+ self.reset_participants_ready()
+
+ # check termination
+ admm_iter += 1
+ if self._check_termination(admm_iter, start_iterations):
+ break
+
+ self.deregister_all_participants()
+ self.set_actuation(result)
+
+ def _solve_local_optimization(
+ self,
+ opt_inputs: Dict[str, AgentVariable],
+ current_iteration: int,
+ start_time: float,
+ ) -> Results:
+ """
+ Performs the local optimization and returns the result.
+ Args:
+ opt_inputs: dict with AgentVariables that stay constant between
+ optimizations
+ current_iteration: current iteration number
+ start_time: environment time at start of ADMM algorithm
+
+ Returns:
+ DataFrame of all optimization variables.
+ """
+ updated_couplings = self.collect_couplings_for_optimization()
+ opt_inputs.update(updated_couplings)
+ self.logger.info("Solving local optimization #%s.", current_iteration)
+ self._status: ModuleStatus = ModuleStatus.optimizing
+ result = self.optimization_backend.solve(start_time, opt_inputs)
+ self.logger.info("Solved local optimization #%s.", current_iteration)
+ return result
+
+ def _perform_registration(self):
+ """Registers participants in current round"""
+ self._status: ModuleStatus = ModuleStatus.at_registration
+ self.logger.info("Start registration of round at %s.", self.env.now)
+
+ # shift initial values for multipliers and coupling outputs
+ self._shift_and_send_coupling_outputs()
+ self._shift_multipliers()
+
+ # accept registrations within a fixed time (handled by callbacks)
+ time.sleep(self.config.registration_period)
+ self._status: ModuleStatus = ModuleStatus.updating
+ self.logger.info("%s: Finished registration of round")
+
+ def _check_termination(self, admm_iter: int, start_iteration: float) -> bool:
+ """
+
+ Args:
+ admm_iter: current iteration number
+ start_iteration: environment time at which current optimization
+ began
+
+ Returns:
+ True, if the algorithm should be terminated,
+ False, if it should continue
+ """
+ self.logger.debug("Finished iteration no. %s.", admm_iter)
+
+ # check wait_on_start_iterations
+ available_runtime = self.config.time_step - self.config.registration_period
+ if self.env.now - start_iteration > available_runtime:
+ self.logger.warning(
+ "ADMM did not converge within the specified sampling time "
+ "of %ss. Terminating current control step.",
+ self.config.time_step,
+ )
+ return True
+
+ # check maximum iterations
+ if admm_iter >= self.config.max_iterations:
+ self.logger.warning(
+ "ADMM did not converge within the maximum iteration number "
+ "of %s. Terminating current control step.",
+ self.config.max_iterations,
+ )
+ return True
+
+ return False
+
+ def _receive_variables(self, start):
+ """Wait until all coupling variables arrive from the other systems."""
+
+ timeout = self.config.iteration_timeout
+ remaining_time = max(timeout - (time.time() - start), 0)
+ for participant in self.all_coupling_statuses():
+ if participant.status == ParticipantStatus.not_participating:
+ continue
+ try:
+ var = participant.received.get(timeout=remaining_time)
+ participant.variable = var
+ participant.status = ParticipantStatus.confirmed
+ except queue.Empty:
+ participant.de_register()
+ source = participant.variable.source
+ coup = participant.variable.alias
+ self.logger.info(
+ "De-registered participant %s from "
+ "coupling %s as it was too slow.",
+ source,
+ coup,
+ )
+
+ remaining_time = max(timeout - (time.time() - start), 0)
+
+[docs] def all_coupling_statuses(self) -> Iterable[ADMMParticipation]:
+ """Gives and iterator of all ADMMParticipation that are registered."""
+ for coup_participants in self.registered_participants.values():
+ for participant in coup_participants.values():
+ yield participant
+
+ def _shift(self, sequence: List[float], grid: List[float]) -> List[float]:
+ """
+ Shifts the sequence forward by one sampling time.
+ Args:
+ sequence: Sequence of variable values.
+ grid: Timestamps belonging to the sequence starting from 0.
+
+ Returns:
+ The shifted list with the last values duplicated.
+ """
+ # get index of first grid point greater self.ts
+ index = next(x[0] for x in enumerate(grid) if x[1] >= self.config.time_step)
+ shifted = sequence[index:] + sequence[-index:]
+ return shifted
+
+ def _shift_multipliers(self):
+ """Shifts lagrange multipliers by one sampling interval. If a scalar
+ is given, expands to the correct length."""
+ for coup in self.cons_and_exchange:
+ grid = self.optimization_backend.coupling_grid
+ var = self._admm_variables[coup.multiplier]
+ val = var.value
+ if len(val) == 1:
+ val = val * len(grid)
+ val = self._shift(sequence=val, grid=grid)
+ self._admm_variables[var.name].value = val
+
+ def _shift_and_send_coupling_outputs(self):
+ """Shifts global coupling variables by one sampling interval. If a
+ scalar is given, expands to the correct length.
+ Sets the values as output to the data_broker, sending them."""
+
+ self.logger.info("Sending initial coupling outputs ...")
+ for coupling in self.cons_and_exchange:
+ grid = self.optimization_backend.coupling_grid
+ length = len(grid)
+
+ # shift output variable
+ var = self._admm_variables[coupling.local]
+ val = var.value
+
+ # expand lists that were _finished_discretization with a scalar
+ if len(val) == 1:
+ val = val * length
+ val = self._shift(sequence=val, grid=grid)
+
+ self.send_coupling_variable(var.name, val)
+
+[docs] def assert_mpc_variables_are_in_model(self):
+ unassigned_model = super().assert_mpc_variables_are_in_model()
+
+ for coup in self.config.couplings + self.config.exchange:
+ if coup.name in unassigned_model["inputs"]:
+ unassigned_model["inputs"] = self.assert_subset(
+ [coup.name], unassigned_model["inputs"], "Couplings"
+ )
+ elif coup.name in unassigned_model["outputs"]:
+ unassigned_model["outputs"] = self.assert_subset(
+ [coup.name], unassigned_model["outputs"], "Couplings"
+ )
+ elif coup.name in unassigned_model["states"]:
+ unassigned_model["states"] = self.assert_subset(
+ [coup.name], unassigned_model["states"], "Couplings"
+ )
+ return unassigned_model
+
+ @property
+ def registered_participants(self) -> Dict[str, Dict[str, ADMMParticipation]]:
+ """Dictionary containing all other agents this agent shares variables with.
+ Ordered in a two-layer form, with variables at the first layer and
+ agents at the second layer. Contains ADMMParticipation objects at
+ the base layer.
+
+ Examples:
+ self.registered_participants =
+ {'coupling_var_1': {'src_of_agent1': status_1,
+ 'src_of_agent2': status_2,
+ 'src_of_agent3': status_3}
+ 'coupling_var_1': {'src_of_agent3': status_a,
+ 'src_of_agent2': status_b,
+ 'src_of_agent4': status_c}
+ }
+ here, <status> refers to an ADMMParticipation object.
+ """
+ return self._registered_participants
+
+ @registered_participants.setter
+ def registered_participants(self, reg_par: Dict):
+ self._registered_participants = reg_par
+
+ @property
+ def cons_and_exchange(self) -> List[Union[adt.ExchangeEntry, adt.CouplingEntry]]:
+ return self.var_ref.exchange + self.var_ref.couplings
+
+[docs] def reset_participants_ready(self):
+ """Sets the ready status of all participating agents to False."""
+ for coup_participants in self.registered_participants.values():
+ for participant in coup_participants.values():
+ if participant.received.qsize():
+ participant.status = ParticipantStatus.available
+ else:
+ participant.status = ParticipantStatus.not_available
+
+[docs] def deregister_all_participants(self):
+ """Sets the participating status of all participating agents to
+ False."""
+ self.logger.info("De-registering all participants for next round.")
+ for coup_participants in self.registered_participants.values():
+ for participant in coup_participants.values():
+ participant.de_register()
+
+[docs] def participant_callback(self, variable: AgentVariable):
+ """Puts received variables in the correct queue, depending on
+ registration status of this agent."""
+ if variable.source.agent_id != self.agent.id:
+ self.receive_participant(variable)
+
+[docs] def receive_participant(self, variable: AgentVariable):
+ """Set the participation to true for the given coupling input."""
+ # Create copy just in case
+ reg_par_of_coupling = self.registered_participants[variable.alias].copy()
+
+ # add variables that were seen the first time
+ if variable.source not in reg_par_of_coupling:
+ self.logger.info(
+ "Initially registered variable '%s' from '%s'.",
+ variable.alias,
+ variable.source,
+ )
+ reg_par_of_coupling[variable.source] = ADMMParticipation(variable=variable)
+ neighbor: ADMMParticipation = reg_par_of_coupling[variable.source]
+
+ # perform registration at start of round
+ if self._status == ModuleStatus.at_registration:
+ self.logger.debug(
+ "Registered variable '%s' from '%s' for this round.",
+ variable.alias,
+ variable.source,
+ )
+ neighbor.empty_memory()
+ neighbor.status = ParticipantStatus.not_available
+ neighbor.variable = variable
+
+ # confirm new trajectory during admm iterations
+ if self._status in (
+ ModuleStatus.waiting_for_other_agents,
+ ModuleStatus.optimizing,
+ ModuleStatus.updating,
+ ):
+ try:
+ neighbor.received.put_nowait(variable)
+ neighbor.status = ParticipantStatus.available
+ self.logger.debug(
+ "Received variable '%s' from '%s' and set to " "ready: 'True'.",
+ variable.alias,
+ variable.source,
+ )
+ except queue.Full:
+ # status.de_register()
+ source = neighbor.variable.source
+ coup = neighbor.variable.alias
+ self.logger.error(
+ "De-registered participant %s from coupling %s as it "
+ "sends messages too quickly.",
+ source,
+ coup,
+ )
+ if neighbor.received.qsize() > 2:
+ self.logger.error(f"Queue is too full {neighbor.received.qsize()}")
+ neighbor.variable = variable
+
+ # Set the altered copy again
+ self.registered_participants[variable.alias] = reg_par_of_coupling
+
+[docs] def get_participants_values(self, coupling_alias: str) -> List[pd.Series]:
+ """Get the values of all agents for a coupling variable."""
+ values = []
+ for participant in self.registered_participants[coupling_alias].values():
+ if participant.status == ParticipantStatus.confirmed:
+ values.append(participant.variable.value)
+ if not values:
+ self.logger.warning("Did not get participants values for this round")
+ return values
+
+[docs] def send_coupling_values(self, solution: Results):
+ """
+ Sets the coupling outputs to the data_broker, which automatically sends them.
+
+ Args:
+ solution: Output dictionary from optimization_backend.solve().
+ """
+ self.logger.info("Sending optimal values to other agents.")
+ for coup in self.cons_and_exchange:
+ self.send_coupling_variable(coup.local, list(solution[coup.name]))
+
+ def _set_mean_coupling_values(self):
+ """Computes the current global value of a coupling variable and saves
+ it in the data_broker."""
+ for coupling in self.var_ref.couplings:
+ # Get own coupling variable version
+ own_coup_var = self._admm_variables[coupling.local]
+ own_coup_value = own_coup_var.value
+ coup_alias = own_coup_var.alias
+
+ # Get variables values:
+ other_coup_values = self.get_participants_values(coup_alias)
+
+ # Add own value
+ other_coup_values.append(own_coup_value)
+
+ # Build mean over all values
+ other_coup_values = np.array(other_coup_values)
+ mean_coup_value = list(np.mean(other_coup_values, axis=0))
+ self._admm_variables[coupling.mean].value = mean_coup_value
+ self.logger.debug(
+ "Updated mean_%s = %s", own_coup_var.name, mean_coup_value
+ )
+
+ for exchange in self.var_ref.exchange:
+ own_exchange_var = self._admm_variables[exchange.local]
+ own_exchange_value = own_exchange_var.value
+ exchange_alias = own_exchange_var.alias
+
+ # Get variables values:
+ other_coup_values = self.get_participants_values(exchange_alias)
+
+ # Add own value
+ other_coup_values.append(own_exchange_value)
+
+ # Build mean over all values
+ other_coup_values = np.array(other_coup_values)
+ mean_coup_value = np.mean(other_coup_values, axis=0)
+ mean_diff = list(own_exchange_value - mean_coup_value)
+
+ self._admm_variables[exchange.mean_diff].value = mean_diff
+ self.logger.debug(
+ "Updated mean_%s = %s", own_exchange_var.name, mean_coup_value
+ )
+
+ def _solve_local_optimization_debug(
+ self,
+ opt_inputs: Dict[str, AgentVariable],
+ current_iteration: int,
+ start_time: float,
+ ) -> pd.DataFrame:
+ """
+ USED FOR DEBUGGING, SKIPS CASADI
+ Performs the local optimization and returns the result.
+ Args:
+ opt_inputs: dict with AgentVariables that stay constant between
+ optimizations
+ current_iteration: current iteration number
+ start_time: environment time at start of ADMM algorithm
+
+ Returns:
+ DataFrame of all optimization variables.
+ """
+ updated_couplings = self.collect_couplings_for_optimization()
+ opt_inputs.update(updated_couplings)
+ self.logger.info("Solving local optimization #%s.", current_iteration)
+ self._status: ModuleStatus = ModuleStatus.optimizing
+ grid = self.optimization_backend.coupling_grid
+ result = {}
+ for coup in self.config.couplings + self.config.controls + self.config.states:
+ result[coup.name] = [coup.value] * len(grid)
+ result = pd.DataFrame(result)
+ self.logger.info("Solved local optimization #%s.", current_iteration)
+ self.logger.debug(
+ "Coupling variable #%s.", list(result[self.config.couplings[0].name])
+ )
+ return result
+
+[docs] def send_coupling_variable(self, name: str, value: mpc_datamodels.MPCValue):
+ """Sends an admm coupling variable through the data_broker and sets its
+ value locally"""
+ var = self._admm_variables[name]
+ var.value = value
+ self.agent.data_broker.send_variable(var)
+
+[docs] def update_lambda(self):
+ """
+ Performs the update of the lagrange multipliers.
+ lambda^k+1 := lambda^k - rho*(z-x_i)
+ """
+ self.logger.info("Updating lambda variables for all couplings")
+ for coupling in self.var_ref.couplings:
+ # Get current lambda value:
+ coup_name = coupling.name
+ lambda_coupling = self._admm_variables[coupling.multiplier].value
+ lambda_coupling = np.array(lambda_coupling)
+ self.logger.debug("Updating lambda_%s = %s", coup_name, lambda_coupling)
+
+ own_coup_value = self._admm_variables[coupling.local].value
+ own_coup_value = np.array(own_coup_value)
+ mean_coup_value = self._admm_variables[coupling.mean].value
+ mean_coup_value = np.array(mean_coup_value)
+
+ # Calc update
+ updated_value = lambda_coupling - self.config.penalty_factor * (
+ mean_coup_value - own_coup_value
+ )
+ updated_value = updated_value.tolist()
+ # Set value to data_broker
+ self._admm_variables[coupling.multiplier].value = updated_value
+ self.logger.info("Updated lambda_%s = %s", coupling.name, updated_value)
+
+ for exchange in self.var_ref.exchange:
+ # Get current lambda value:
+ lambda_coupling = self._admm_variables[exchange.multiplier].value
+ lambda_coupling = np.array(lambda_coupling)
+ self.logger.debug("Updating lambda_%s = %s", exchange.name, lambda_coupling)
+
+ own_coup_value = np.array(self._admm_variables[exchange.local].value)
+ diff_coup_value = np.array(self._admm_variables[exchange.mean_diff].value)
+
+ # Calc update
+ updated_value = lambda_coupling - self.config.penalty_factor * (
+ diff_coup_value - own_coup_value
+ )
+ updated_value = updated_value.tolist()
+ # Set value to data_broker
+ self._admm_variables[exchange.multiplier].value = updated_value
+ self.logger.info("Updated lambda_%s = %s", exchange.name, updated_value)
+
+[docs] def get_results(self) -> Optional[pd.DataFrame]:
+ """Read the results that were saved from the optimization backend and
+ returns them as Dataframe.
+
+ Returns:
+ (results, stats) tuple of Dataframes.
+ """
+ results_file = self.optimization_backend.config.results_file
+ if results_file is None:
+ self.logger.info("No results were saved .")
+ return None
+ try:
+ results, stats = self.read_results_file(results_file)
+ return results
+ except FileNotFoundError:
+ self.logger.error("ADMM results file %s was not found.", results_file)
+ return None
+
+ @property
+ def penalty_factor_var(self) -> MPCVariable:
+ return MPCVariable(name="penalty_factor", value=self.config.penalty_factor)
+
+ def _setup_var_ref(self) -> adt.VariableReference:
+ # Extend var_ref with coupling variables
+ return adt.VariableReference.from_config(self.config)
+
+ def _setup_optimization_backend(self) -> ADMMBackend:
+ self._admm_variables = self._create_couplings()
+ return super()._setup_optimization_backend()
+
+ def _create_couplings(self) -> dict[str, MPCVariable]:
+ """Map coupling variables based on already setup model"""
+ # Check if coupling even exist
+
+ # Map couplings:
+ _couplings = []
+ # and generate new variables for admm:
+ _admm_variables: dict[str, MPCVariable] = {}
+ for coupling in self.config.couplings:
+ coupling.source = Source(agent_id=self.agent.id)
+ coupling.shared = True
+ _couplings.append(coupling)
+
+ # Create two new variables for each coupling:
+ # 1. lambda variables in both cases.
+ include = {"unit": coupling.unit, "description": coupling.description}
+ coupling_entry = adt.CouplingEntry(name=coupling.name)
+ alias = adt.coupling_alias(coupling.alias)
+ _admm_variables[coupling_entry.multiplier] = MPCVariable(
+ name=coupling_entry.multiplier,
+ value=[0],
+ type="list",
+ source=Source(module_id=self.id),
+ **include,
+ )
+ _admm_variables[coupling_entry.local] = MPCVariable(
+ name=coupling_entry.local,
+ value=convert_to_list(coupling.value),
+ alias=alias,
+ type="list",
+ source=Source(agent_id=self.agent.id),
+ shared=True,
+ **include,
+ )
+ _admm_variables[coupling_entry.mean] = MPCVariable(
+ name=coupling_entry.mean,
+ type="list",
+ source=Source(module_id=self.id),
+ **include,
+ )
+ lag_val = coupling.value or np.nan_to_num(
+ (coupling.ub + coupling.lb) / 2, posinf=1000, neginf=1000
+ )
+ _admm_variables[coupling_entry.lagged] = MPCVariable(
+ name=coupling_entry.lagged,
+ value=lag_val,
+ source=Source(module_id=self.id),
+ **include,
+ )
+
+ # add callback to receive this value
+ broker_funcs = [
+ self.agent.data_broker.deregister_callback,
+ self.agent.data_broker.register_callback,
+ ]
+
+ for broker_func in broker_funcs:
+ broker_func(
+ alias=alias,
+ source=None,
+ callback=self.participant_callback,
+ )
+ self.registered_participants.update({alias: {}})
+
+ # Exchange variables
+ _exchange_vars = []
+ # and generate new variables for admm:
+ for exchange_var in self.config.exchange:
+ exchange_var.source = Source(agent_id=self.agent.id)
+ exchange_var.shared = True
+ _exchange_vars.append(exchange_var)
+
+ # Create two new variables for each coupling:
+ # 1. lambda variables in both cases.
+ include = {
+ "unit": exchange_var.unit,
+ "description": exchange_var.description,
+ }
+
+ exchange_entry = adt.ExchangeEntry(name=exchange_var.name)
+ alias = adt.exchange_alias(exchange_var.alias)
+ _admm_variables[exchange_entry.multiplier] = MPCVariable(
+ name=exchange_entry.multiplier,
+ value=[0],
+ type="list",
+ source=Source(module_id=self.id),
+ **include,
+ )
+ _admm_variables[exchange_entry.local] = MPCVariable(
+ name=exchange_entry.local,
+ value=convert_to_list(exchange_var.value),
+ alias=alias,
+ type="list",
+ source=Source(agent_id=self.agent.id),
+ shared=True,
+ **include,
+ )
+ _admm_variables[exchange_entry.mean_diff] = MPCVariable(
+ name=exchange_entry.mean_diff,
+ type="list",
+ source=Source(module_id=self.id),
+ **include,
+ )
+ lag_val = exchange_var.value or np.nan_to_num(
+ (exchange_var.ub + exchange_var.lb) / 2, posinf=1000, neginf=1000
+ )
+ _admm_variables[exchange_entry.lagged] = MPCVariable(
+ name=exchange_entry.lagged,
+ value=lag_val,
+ source=Source(module_id=self.id),
+ **include,
+ )
+
+ # add callback to receive this value
+ broker_funcs = [
+ self.agent.data_broker.deregister_callback,
+ self.agent.data_broker.register_callback,
+ ]
+
+ for broker_func in broker_funcs:
+ broker_func(
+ alias=alias,
+ source=None,
+ callback=self.participant_callback,
+ )
+ self.registered_participants.update({alias: {}})
+ return _admm_variables
+
+[docs] def collect_variables_for_optimization(
+ self, var_ref: mpc_datamodels.VariableReference = None
+ ) -> dict[str, AgentVariable]:
+ """Gets all variables noted in the var ref and puts them in a flat
+ dictionary."""
+ if var_ref is None:
+ var_ref = self.var_ref
+
+ # config variables
+ variables = {v: self.get(v) for v in var_ref.all_variables()}
+ for coup_entry in var_ref.exchange + var_ref.couplings:
+ lagged_admm_var = coup_entry.lagged
+ original_name = coup_entry.name
+ variable = self.get(original_name)
+ if original_name in self.history:
+ past_values = self.history[original_name]
+ variable = MPCVariable(
+ name=lagged_admm_var, value=pd.Series(past_values)
+ )
+ variables[lagged_admm_var] = variable
+
+ # history variables
+ for hist_var in self._lags_dict_seconds:
+ past_values = self.history[hist_var]
+ if not past_values:
+ # if the history of a variable is empty, fallback to the scalar value
+ continue
+
+ # create copy to not mess up scalar value of original variable in case
+ # fallback is needed
+ updated_var = variables[hist_var].copy(
+ update={"value": pd.Series(past_values)}
+ )
+ variables[hist_var] = updated_var
+
+ return {**variables, **self._internal_variables}
+
+
+[docs]class LocalADMMConfig(ADMMConfig):
+ sync_delay: float = 0.001
+ registration_delay: float = 0.1
+
+
+[docs]class LocalADMM(ADMM):
+ config: LocalADMMConfig
+
+ @property
+ def sync_delay(self) -> float:
+ """Timeout value used to sync local admm processes. Should be very
+ small."""
+ return self.config.sync_delay
+
+ @property
+ def registration_delay(self) -> float:
+ """Timeout value used to wait one on registration. Waits in real time
+ (time.sleep)"""
+ return self.config.registration_delay
+
+[docs] def process(self):
+ first_registration = True
+ while True:
+ start_round = self.env.time
+
+ # Register participants in current round
+ self.logger.info("Start registration of round at %s.", self.env.now)
+ self._status = ModuleStatus.at_registration
+ yield self.env.timeout(self.sync_delay)
+
+ # shift initial values for multipliers and coupling outputs
+ self._shift_and_send_coupling_outputs()
+ self._shift_multipliers()
+ self.pre_computation_hook()
+ yield self.env.timeout(self.sync_delay)
+ self._status = ModuleStatus.optimizing
+ self.logger.info("Finished registration of round")
+ yield self.env.timeout(self.sync_delay)
+
+ if first_registration:
+ time.sleep(self.registration_delay)
+ first_registration = False
+
+ # get optimization inputs
+ self._set_mean_coupling_values()
+ opt_inputs = self.collect_variables_for_optimization()
+ # reset termination criteria
+ start_iterations = self.env.time
+ admm_iter = 0
+
+ # start the ADMM iteration loop
+ while True:
+ # Solve local optimization
+ start_opt = time.time()
+ updated_couplings = self.collect_couplings_for_optimization()
+ opt_inputs.update(updated_couplings)
+ self.logger.info("Solving local optimization #%s.", admm_iter)
+ self._status = ModuleStatus.optimizing
+ result = self.optimization_backend.solve(start_iterations, opt_inputs)
+ self.logger.info("Solved local optimization #%s.", admm_iter)
+
+ # admm coordination step
+ yield self.env.timeout(self.sync_delay)
+ self.send_coupling_values(result)
+ yield self.env.timeout(self.sync_delay)
+ self._status = ModuleStatus.waiting_for_other_agents
+ self._receive_variables(start=start_opt)
+ yield self.env.timeout(self.sync_delay)
+ self._status = ModuleStatus.updating
+ self._set_mean_coupling_values()
+ self.update_lambda()
+ self.reset_participants_ready()
+ yield self.env.timeout(self.sync_delay)
+
+ # check termination
+ admm_iter += 1
+ if self._check_termination(admm_iter, start_iterations):
+ break
+
+ self.deregister_all_participants()
+ self.set_actuation(result)
+ self._status = ModuleStatus.sleeping
+
+ time_spent_on_sync_delay = self.env.time - start_round
+ yield self.env.timeout(self.config.time_step - time_spent_on_sync_delay)
+
+"""Module implementing the coordinated ADMM module, which works together
+with a coordinator."""
+
+from collections import namedtuple
+from typing import Dict, Optional, List
+import pandas as pd
+import pydantic
+
+from agentlib_mpc.data_structures.mpc_datamodels import MPCVariable
+from .admm import ADMM, ADMMConfig
+from agentlib_mpc.modules.dmpc.employee import MiniEmployee, MiniEmployeeConfig
+from agentlib.utils.validators import convert_to_list
+import agentlib_mpc.data_structures.coordinator_datatypes as cdt
+import agentlib_mpc.data_structures.admm_datatypes as adt
+from agentlib.core import AgentVariable, Agent
+
+
+coupInput = namedtuple("coup_input", ["mean", "lam"])
+
+
+[docs]class CoordinatedADMMConfig(MiniEmployeeConfig, ADMMConfig):
+ shared_variable_fields: list[str] = MiniEmployeeConfig.default(
+ "shared_variable_fields"
+ ) + ADMMConfig.default("shared_variable_fields")
+
+[docs] @pydantic.field_validator("couplings", "exchange")
+ def couplings_should_have_values(cls, value: List[AgentVariable]):
+ """Asserts that couplings and exchange have values, as they are needed for
+ initial guess."""
+ for var in value:
+ if var.value is None:
+ raise ValueError(
+ "Couplings and Exchange Variables should have a value, as it is "
+ "required for the initial guess."
+ )
+ return value
+
+
+[docs]class CoordinatedADMM(MiniEmployee, ADMM):
+ """
+ Module to implement an ADMM agent, which is guided by a coordinator.
+ Only optimizes based on callbacks.
+ """
+
+ config: CoordinatedADMMConfig
+
+ def __init__(self, *, config: dict, agent: Agent):
+ self._initial_setup = True # flag to check that we don't compile ipopt twice
+ super().__init__(config=config, agent=agent)
+ self._optimization_inputs: Dict[str, AgentVariable] = {}
+ self._create_coupling_alias_to_name_mapping()
+ self._result: Optional[pd.DataFrame] = None
+
+[docs] def process(self):
+ # send registration request to coordinator
+ timeout = self.config.registration_interval
+
+ while True:
+ if not self._registered_coordinator:
+ guesses, ex_guess = self._initial_coupling_values()
+ answer = adt.AgentToCoordinator(
+ local_trajectory=guesses, local_exchange_trajectory=ex_guess
+ )
+ self.set(cdt.REGISTRATION_A2C, answer.to_json())
+ yield self.env.timeout(timeout)
+
+[docs] def registration_callback(self, variable: AgentVariable):
+ """callback for registration"""
+ if self._registered_coordinator:
+ # ignore if registration has already been done
+ return
+
+ self.logger.debug(
+ f"receiving {variable.name}={variable.value} from {variable.source}"
+ )
+ # global parameters to define optimisation problem
+ value = cdt.RegistrationMessage(**variable.value)
+ if not value.agent_id == self.source.agent_id:
+ return
+ options = adt.ADMMParameters(**value.opts)
+ self._set_admm_parameters(options=options)
+ guesses, ex_guess = self._initial_coupling_values()
+ answer = adt.AgentToCoordinator(
+ local_trajectory=guesses, local_exchange_trajectory=ex_guess
+ )
+
+ self._registered_coordinator = variable.source
+ self.set(cdt.REGISTRATION_A2C, answer.to_json())
+
+ def _after_config_update(self):
+ # use some hacks to set jit false for the first time this function is called
+ if (
+ self.config.optimization_backend.get("do_jit", False)
+ and self._initial_setup
+ ):
+ do_jit = True
+ self.config.optimization_backend["do_jit"] = False
+ else:
+ do_jit = False
+ super()._after_config_update()
+ if self._initial_setup:
+ self.config.optimization_backend["do_jit"] = do_jit
+ self._initial_setup = False
+
+[docs] def get_new_measurement(self):
+ """
+ Retrieve new measurement from relevant sensors
+ Returns:
+
+ """
+ opt_inputs = self.collect_variables_for_optimization()
+ opt_inputs[adt.PENALTY_FACTOR] = self.penalty_factor_var
+ self._optimization_inputs = opt_inputs
+
+ def _create_coupling_alias_to_name_mapping(self):
+ """
+ creates a mapping of alias to the variable names for multiplier and
+ global mean that the optimization backend recognizes
+ Returns:
+
+ """
+ alias_to_input_names = {}
+ for coupling in self.var_ref.couplings:
+ coup_variable = self.get(coupling.name)
+ coup_in = coupInput(mean=coupling.mean, lam=coupling.multiplier)
+ alias_to_input_names[coup_variable.alias] = coup_in
+ for coupling in self.var_ref.exchange:
+ coup_variable = self.get(coupling.name)
+ coup_in = coupInput(mean=coupling.mean_diff, lam=coupling.multiplier)
+ alias_to_input_names[coup_variable.alias] = coup_in
+ self._alias_to_input_names = alias_to_input_names
+
+[docs] def optimize(self, variable: AgentVariable):
+ """
+ Performs the optimization given the mean trajectories and multipliers from the
+ coordinator.
+ Replies with the local optimal trajectories.
+ Returns:
+
+ """
+ # unpack message
+ updates = adt.CoordinatorToAgent.from_json(variable.value)
+ if not updates.target == self.source.agent_id:
+ return
+ self.logger.debug("Received update from Coordinator.")
+
+ # load mpc inputs and current coupling inputs of this iteration
+ opt_inputs = self._optimization_inputs.copy()
+
+ # add the coupling inputs of this iteration to the other mpc inputs
+ for alias, multiplier in updates.multiplier.items():
+ coup_in = self._alias_to_input_names[alias]
+ opt_inputs[coup_in.lam] = MPCVariable(name=coup_in.lam, value=multiplier)
+ opt_inputs[coup_in.mean] = MPCVariable(
+ name=coup_in.mean, value=updates.mean_trajectory[alias]
+ )
+ for alias, multiplier in updates.exchange_multiplier.items():
+ coup_in = self._alias_to_input_names[alias]
+ opt_inputs[coup_in.lam] = MPCVariable(name=coup_in.lam, value=multiplier)
+ opt_inputs[coup_in.mean] = MPCVariable(
+ name=coup_in.mean, value=updates.mean_diff_trajectory[alias]
+ )
+
+ opt_inputs[adt.PENALTY_FACTOR].value = updates.penalty_parameter
+ # perform optimization
+ self._result = self.optimization_backend.solve(
+ now=self._start_optimization_at, current_vars=opt_inputs
+ )
+
+ # send optimizationData back to coordinator to signal finished
+ # optimization. Select only trajectory where index is at least zero, to not
+ # send lags
+ cons_traj = {}
+ exchange_traj = {}
+ for coup in self.config.couplings:
+ cons_traj[coup.alias] = self._result[
+ coup.name
+ ] # we can serialize numpy now, maybe make this easier
+ for exchange in self.config.exchange:
+ exchange_traj[exchange.alias] = self._result[exchange.name]
+
+ opt_return = adt.AgentToCoordinator(
+ local_trajectory=cons_traj, local_exchange_trajectory=exchange_traj
+ )
+ self.logger.debug("Sent optimal solution.")
+ self.set(name=cdt.OPTIMIZATION_A2C, value=opt_return.to_json())
+
+ def _finish_optimization(self):
+ """
+ Finalize an iteration. Usually, this includes setting the actuation.
+ Returns:
+
+ """
+ # this check catches the case, where the agent was not alive / registered at
+ # the start of the round and thus did not participate and has no result
+ # Since the finish-signal of the coordinator is broadcast, it will trigger this
+ # function even if the agent did not participate in the optimization before
+ if self._result is not None:
+ self.set_actuation(self._result)
+ self._result = None
+
+ def _set_admm_parameters(self, options: adt.ADMMParameters):
+ """Sets new admm parameters, re-initializes the optimization problem
+ and returns an initial guess of the coupling variables."""
+
+ # update the config with new parameters
+ new_config_dict = self.config.model_dump()
+ new_config_dict.update(
+ {
+ adt.PENALTY_FACTOR: options.penalty_factor,
+ cdt.TIME_STEP: options.time_step,
+ cdt.PREDICTION_HORIZON: options.prediction_horizon,
+ }
+ )
+ self.config = new_config_dict
+ self.logger.info("%s: Reinitialized optimization problem.", self.agent.id)
+
+ def _initial_coupling_values(self) -> tuple[Dict[str, list], Dict[str, list]]:
+ """Gets the initial coupling values with correct trajectory length."""
+ grid_len = len(self.optimization_backend.coupling_grid)
+ guesses = {}
+ exchange_guesses = {}
+ for var in self.config.couplings:
+ val = convert_to_list(var.value)
+ # this overrides more precise guesses, but is more stable
+ guesses[var.alias] = [val[0]] * grid_len
+ for var in self.config.exchange:
+ val = convert_to_list(var.value)
+ exchange_guesses[var.alias] = [val[0]] * grid_len
+ return guesses, exchange_guesses
+
+[docs] def init_iteration_callback(self, variable: AgentVariable):
+ """Callback that answers the coordinators init_iteration flag."""
+ if self._registered_coordinator:
+ super().init_iteration_callback(variable)
+
+"""
+Defines classes that coordinate an ADMM process.
+"""
+
+import os
+import time
+from ast import literal_eval
+from pathlib import Path
+from typing import Dict, List, Optional
+import queue
+import logging
+from dataclasses import asdict
+import threading
+import math
+
+from pydantic import field_validator, Field
+import numpy as np
+import pandas as pd
+
+from agentlib.core.agent import Agent
+from agentlib.core.datamodels import AgentVariable, Source
+from pydantic_core.core_schema import FieldValidationInfo
+
+from agentlib_mpc.data_structures import coordinator_datatypes as cdt
+from agentlib_mpc.modules.dmpc.coordinator import Coordinator, CoordinatorConfig
+import agentlib_mpc.data_structures.admm_datatypes as adt
+
+logger = logging.getLogger(__name__)
+
+
+[docs]class ADMMCoordinatorConfig(CoordinatorConfig):
+ """Hold the config for ADMMCoordinator"""
+
+ penalty_factor: float = Field(
+ title="penalty_factor",
+ default=10,
+ description="Penalty factor of the ADMM algorithm. Should be equal "
+ "for all agents.",
+ )
+ wait_time_on_start_iters: float = Field(
+ title="wait_on_start_iterations",
+ default=0.1,
+ description="wait_on_start_iterations",
+ )
+ registration_period: float = Field(
+ title="registration_period",
+ default=5,
+ description="Time spent on registration before each optimization",
+ )
+ admm_iter_max: int = Field(
+ title="admm_iter_max",
+ default=20,
+ description="Maximum number of ADMM iterations before termination of control "
+ "step.",
+ )
+ time_step: float = Field(
+ title="time_step",
+ default=600, # seconds
+ description="Sampling interval of between two control steps. Will be used in "
+ "the discretization for MPC.",
+ )
+ sampling_time: Optional[float] = Field(
+ default=None, # seconds
+ description="Sampling interval for control steps. If None, will be the same as"
+ " time step. Does not affect the discretization of the MPC, "
+ "only the interval with which there will be optimization steps.",
+ validate_default=True,
+ )
+ prediction_horizon: int = Field(
+ title="prediction_horizon",
+ default=10,
+ description="Prediction horizon of participating agents.",
+ )
+ abs_tol: float = Field(
+ title="abs_tol",
+ default=1e-3,
+ description="Absolute stopping criterion.",
+ )
+ rel_tol: float = Field(
+ title="rel_tol",
+ default=1e-3,
+ description="Relative stopping criterion.",
+ )
+ primal_tol: float = Field(
+ default=1e-3,
+ description="Absolute primal stopping criterion.",
+ )
+ dual_tol: float = Field(
+ default=1e-3,
+ description="Absolute dual stopping criterion.",
+ )
+ use_relative_tolerances: bool = Field(
+ default=True,
+ description="If True, use abs_tol and rel_tol, if False us prim_tol and "
+ "dual_tol.",
+ )
+ penalty_change_threshold: float = Field(
+ default=-1,
+ description="When the primal residual is x times higher, vary the penalty "
+ "parameter and vice versa.",
+ )
+ penalty_change_factor: float = Field(
+ default=2, # seconds
+ description="Factor to vary the penalty parameter with.",
+ )
+ save_solve_stats: bool = Field(
+ default=False,
+ description="When True, saves the solve stats to a file.",
+ )
+ solve_stats_file: str = Field(
+ default="admm_stats.csv", # seconds
+ description="File name for the solve stats.",
+ )
+ save_iter_interval: int = Field(
+ default=1000,
+ )
+
+[docs] @field_validator("solve_stats_file")
+ @classmethod
+ def solve_stats_file_is_csv(cls, file: str):
+ assert file.endswith(".csv")
+ return file
+
+[docs] @field_validator("sampling_time")
+ @classmethod
+ def default_sampling_time(cls, samp_time, info: FieldValidationInfo):
+ if samp_time is None:
+ samp_time = info.data["time_step"]
+ return samp_time
+
+
+[docs]class ADMMCoordinator(Coordinator):
+ config: ADMMCoordinatorConfig
+
+ def __init__(self, *, config: dict, agent: Agent):
+ if agent.env.config.rt:
+ self.process = self._realtime_process
+ self.registration_callback = self._real_time_registration_callback
+ else:
+ self.process = self._fast_process
+ self.registration_callback = self._sequential_registration_callback
+
+ super().__init__(config=config, agent=agent)
+ self._coupling_variables: Dict[str, adt.ConsensusVariable] = {}
+ self._exchange_variables: Dict[str, adt.ExchangeVariable] = {}
+ self._agents_to_register = queue.Queue()
+ self.agent_dict: Dict[str, adt.AgentDictEntry] = {}
+ self._registration_queue: queue.Queue = queue.Queue()
+ self._registration_lock: threading.Lock = threading.Lock()
+ self.penalty_parameter = self.config.penalty_factor
+ self._iteration_stats: pd.DataFrame = pd.DataFrame(
+ columns=["primal_residual", "dual_residual"]
+ )
+ self._primal_residuals_tracker: List[float] = []
+ self._dual_residuals_tracker: List[float] = []
+ self._penalty_tracker: List[float] = []
+ self._performance_tracker: List[float] = []
+ self.start_algorithm_at: float = 0
+ self._performance_counter: float = time.perf_counter()
+
+ def _realtime_process(self):
+ """Starts a thread to run next to the environment (to prevent a long blocking
+ process). Periodically informs the thread of the next optimization."""
+ self._start_algorithm = threading.Event()
+
+ thread_proc = threading.Thread(
+ target=self._realtime_process_thread,
+ name=f"{self.source}_ProcessThread",
+ daemon=True,
+ )
+ thread_proc.start()
+ self.agent.register_thread(thread=thread_proc)
+
+ thread_reg = threading.Thread(
+ target=self._handle_registrations,
+ name=f"{self.source}_RegistrationThread",
+ daemon=True,
+ )
+ thread_reg.start()
+ self.agent.register_thread(thread=thread_reg)
+
+ while True:
+ self._start_algorithm.set()
+ yield self.env.timeout(self.config.sampling_time)
+
+ def _realtime_process_thread(self):
+ while True:
+ self._status = cdt.CoordinatorStatus.sleeping
+ self._start_algorithm.wait()
+ self._start_algorithm.clear()
+ with self._registration_lock:
+ self._realtime_step()
+ if self._start_algorithm.isSet():
+ self.logger.error(
+ "%s: Start of ADMM round was requested before "
+ "last one finished. Skipping cycle."
+ )
+ self._start_algorithm.clear()
+
+ def _realtime_step(self):
+ # ------------------
+ # start iteration
+ # ------------------
+ self.status = cdt.CoordinatorStatus.init_iterations
+ self.start_algorithm_at = self.env.time
+ self._performance_counter = time.perf_counter()
+ # maybe this will hold information instead of "True"
+ self.set(cdt.START_ITERATION_C2A, True)
+ # check for all_finished here
+ time.sleep(self.config.wait_time_on_start_iters)
+ if not list(self._agents_with_status(status=cdt.AgentStatus.ready)):
+ self.logger.info(f"No Agents available at time {self.env.now}.")
+ return # if no agents registered return early
+ self._update_mean_coupling_variables()
+ self._shift_coupling_variables()
+ # ------------------
+ # iteration loop
+ # ------------------
+ admm_iter = 0
+ for admm_iter in range(1, self.config.admm_iter_max + 1):
+ # ------------------
+ # optimization
+ # ------------------
+ # send
+ self.status = cdt.CoordinatorStatus.optimization
+ # set all agents to busy
+ self.trigger_optimizations()
+
+ # check for all finished here
+ self._wait_for_ready()
+
+ # ------------------
+ # perform update steps
+ # ------------------
+ self.status = cdt.CoordinatorStatus.updating
+ self._update_mean_coupling_variables()
+ self._update_multipliers()
+ # ------------------
+ # check convergence
+ # ------------------
+ converged = self._check_convergence(admm_iter)
+ if converged:
+ self.logger.info("Converged within %s iterations. ", admm_iter)
+ break
+ else:
+ self.logger.warning(
+ "Did not converge within the maximum number of iterations " "%s. ",
+ self.config.admm_iter_max,
+ )
+ self._wrap_up_algorithm(iterations=admm_iter)
+ self.set(cdt.START_ITERATION_C2A, False) # this signals the finish
+
+ def _wait_non_rt(self):
+ """Returns a triggered event. Cedes control to the simpy event queue for a
+ short moment. This is required in fast-as-possible simulations, to allow
+ other agents to react via callbacks."""
+ return self.env.timeout(0.001)
+
+ def _fast_process(self):
+ """Process function for use in fast-as-possible simulations. Regularly yields
+ control back to the environment, to allow the callbacks to run."""
+ yield self._wait_non_rt()
+
+ while True:
+ # ------------------
+ # start iteration
+ # ------------------
+ self.status = cdt.CoordinatorStatus.init_iterations
+ self.start_algorithm_at = self.env.time
+ self._performance_counter = time.perf_counter()
+ self.set(cdt.START_ITERATION_C2A, True)
+ yield self._wait_non_rt()
+ if not list(self._agents_with_status(status=cdt.AgentStatus.ready)):
+ self.logger.info(f"No Agents available at time {self.env.now}.")
+ communication_time = self.env.time - self.start_algorithm_at
+ yield self.env.timeout(self.config.sampling_time - communication_time)
+ continue # if no agents registered return early
+ self._update_mean_coupling_variables()
+ self._shift_coupling_variables()
+ # ------------------
+ # iteration loop
+ # ------------------
+ admm_iter = 0
+ for admm_iter in range(1, self.config.admm_iter_max + 1):
+ # ------------------
+ # optimization
+ # ------------------
+ # send
+ self.status = cdt.CoordinatorStatus.optimization
+ # set all agents to busy
+ self.trigger_optimizations()
+ yield self._wait_non_rt()
+
+ # check for all finished here
+ self._wait_for_ready()
+
+ # ------------------
+ # perform update steps
+ # ------------------
+ self.status = cdt.CoordinatorStatus.updating
+ self._update_mean_coupling_variables()
+ self._update_multipliers()
+ # ------------------
+ # check convergence
+ # ------------------
+ converged = self._check_convergence(admm_iter)
+ if converged:
+ self.logger.info("Converged within %s iterations. ", admm_iter)
+ break
+ else:
+ self.logger.warning(
+ "Did not converge within the maximum number of iterations " "%s. ",
+ self.config.admm_iter_max,
+ )
+ self._wrap_up_algorithm(iterations=admm_iter)
+ self.set(cdt.START_ITERATION_C2A, False) # this signals the finish
+ self.status = cdt.CoordinatorStatus.sleeping
+ time_spent_on_communication = self.env.time - self.start_algorithm_at
+ yield self.env.timeout(
+ self.config.sampling_time - time_spent_on_communication
+ )
+
+ def _update_mean_coupling_variables(self):
+ """Calculates a new mean of the coupling variables."""
+
+ active_agents = self._agents_with_status(cdt.AgentStatus.ready)
+ for variable in self._coupling_variables.values():
+ variable.update_mean_trajectory(sources=active_agents)
+ for variable in self._exchange_variables.values():
+ variable.update_diff_trajectories(sources=active_agents)
+
+ def _shift_coupling_variables(self):
+ """"""
+ for variable in self._coupling_variables.values():
+ variable.shift_values_by_one(horizon=self.config.prediction_horizon)
+ for variable in self._exchange_variables.values():
+ variable.shift_values_by_one(horizon=self.config.prediction_horizon)
+
+ def _update_multipliers(self):
+ """Performs the multiplier update for the coupling variables."""
+ rho = self.penalty_parameter
+ active_agents = self._agents_with_status(cdt.AgentStatus.ready)
+ for variable in self._coupling_variables.values():
+ variable.update_multipliers(rho=rho, sources=active_agents)
+ for variable in self._exchange_variables.values():
+ variable.update_multiplier(rho=rho)
+
+ def _agents_with_status(self, status: cdt.AgentStatus) -> List[Source]:
+ """Returns an iterator with all agents sources that are currently on
+ this status."""
+ active_agents = [s for (s, a) in self.agent_dict.items() if a.status == status]
+ return active_agents
+
+ def _check_convergence(self, iteration) -> bool:
+ """
+ Checks the convergence of the algorithm. Returns True if yes,
+ False if no.
+ Returns:
+ Tuple of (converged, primal residual norm, dual residual norm)
+
+ """
+ primal_residuals = []
+ dual_residuals = []
+ active_agents = self._agents_with_status(cdt.AgentStatus.ready)
+ flat_locals = []
+ flat_means = []
+ flat_multipliers = []
+
+ for var in self._coupling_variables.values():
+ prim, dual = var.get_residual(rho=self.penalty_parameter)
+ primal_residuals.extend(prim)
+ dual_residuals.extend(dual)
+ locs = var.flat_locals(sources=active_agents)
+ muls = var.flat_multipliers(active_agents)
+ flat_locals.extend(locs)
+ flat_multipliers.extend(muls)
+ flat_means.extend(var.mean_trajectory)
+
+ for var in self._exchange_variables.values():
+ prim, dual = var.get_residual(rho=self.penalty_parameter)
+ primal_residuals.extend(prim)
+ dual_residuals.extend(dual)
+ locs = var.flat_locals(sources=active_agents)
+ muls = var.multiplier
+ flat_locals.extend(locs)
+ flat_multipliers.extend(muls)
+ flat_means.extend(var.mean_trajectory)
+
+ # primal_residual = np.concatenate(primal_residuals)
+ # dual_residual = np.concatenate(dual_residuals)
+
+ # compute residuals
+ prim_norm = np.linalg.norm(primal_residuals)
+ dual_norm = np.linalg.norm(dual_residuals)
+
+ self._vary_penalty_parameter(primal_residual=prim_norm, dual_residual=dual_norm)
+ self._penalty_tracker.append(self.penalty_parameter)
+ self._primal_residuals_tracker.append(prim_norm)
+ self._dual_residuals_tracker.append(dual_norm)
+ self._performance_tracker.append(
+ time.perf_counter() - self._performance_counter
+ )
+
+ self.logger.debug(
+ "Finished iteration %s . \n Primal residual: %s \n Dual residual: " "%s",
+ iteration,
+ prim_norm,
+ dual_norm,
+ )
+ if iteration % self.config.save_iter_interval == 0:
+ self._save_stats(iterations=iteration)
+
+ if self.config.use_relative_tolerances:
+ # scaling factors for relative criterion
+ primal_scaling = max(
+ np.linalg.norm(flat_locals),
+ np.linalg.norm(flat_means), # Ax # Bz
+ )
+ dual_scaling = np.linalg.norm(flat_multipliers)
+ # compute tolerances for this iteration
+ sqrt_p = math.sqrt(len(flat_multipliers))
+ sqrt_n = math.sqrt(len(flat_locals)) # not actually n, but best we can do
+ eps_pri = (
+ sqrt_p * self.config.abs_tol + self.config.rel_tol * primal_scaling
+ )
+ eps_dual = sqrt_n * self.config.abs_tol + self.config.rel_tol * dual_scaling
+ converged = prim_norm < eps_pri and dual_norm < eps_dual
+ else:
+ converged = (
+ prim_norm < self.config.primal_tol and dual_norm < self.config.dual_tol
+ )
+
+ if converged:
+ return True
+ return False
+
+ def _save_stats(self, iterations: int) -> None:
+ """
+ Args:
+ iterations: Which iteration of the ADMM algorithm are we when this function
+ is called?
+ """
+ section_length = len(self._penalty_tracker)
+ section_start = iterations - section_length
+ index = [
+ (self.start_algorithm_at, i + section_start) for i in range(section_length)
+ ]
+
+ path = Path(self.config.solve_stats_file)
+ header = not path.is_file()
+ stats = pd.DataFrame(
+ {
+ "primal_residual": self._primal_residuals_tracker,
+ "dual_residual": self._dual_residuals_tracker,
+ "penalty_parameter": self._penalty_tracker,
+ "wall_time": self._performance_tracker,
+ },
+ index=index,
+ )
+ self._penalty_tracker = []
+ self._dual_residuals_tracker = []
+ self._primal_residuals_tracker = []
+ self._performance_tracker = []
+ path.parent.mkdir(exist_ok=True, parents=True)
+ stats.to_csv(path_or_buf=path, header=header, mode="a")
+
+ def _vary_penalty_parameter(self, primal_residual: float, dual_residual: float):
+ """Determines a new value for the penalty parameter based on residuals."""
+ mu = self.config.penalty_change_threshold
+ tau = self.config.penalty_change_factor
+
+ if mu <= 1:
+ # do not perform varying penalty method if the threshold is set below 1
+ return
+
+ if primal_residual > mu * dual_residual:
+ self.penalty_parameter = self.penalty_parameter * tau
+ elif dual_residual > mu * primal_residual:
+ self.penalty_parameter = self.penalty_parameter / tau
+
+[docs] def trigger_optimizations(self):
+ """
+ Triggers the optimization for all agents with status ready.
+ Returns:
+
+ """
+
+ # create an iterator for all agents which are ready for this round
+ active_agents: [str, adt.AgentDictEntry] = (
+ (s, a)
+ for (s, a) in self.agent_dict.items()
+ if a.status == cdt.AgentStatus.ready
+ )
+
+ # aggregate and send trajectories per agent
+ for source, agent in active_agents:
+ # collect mean and multiplier per coupling variable
+ mean_trajectories = {}
+ multipliers = {}
+ for alias in agent.coup_vars:
+ coup_var = self._coupling_variables[alias]
+ mean_trajectories[alias] = coup_var.mean_trajectory
+ multipliers[alias] = coup_var.multipliers[source]
+ diff_trajectories = {}
+ multiplier = {}
+ for alias in agent.exchange_vars:
+ coup_var = self._exchange_variables[alias]
+ diff_trajectories[alias] = coup_var.diff_trajectories[source]
+ multiplier[alias] = coup_var.multiplier
+
+ # package all coupling inputs needed for an agent
+ coordi_to_agent = adt.CoordinatorToAgent(
+ mean_trajectory=mean_trajectories,
+ multiplier=multipliers,
+ exchange_multiplier=multiplier,
+ mean_diff_trajectory=diff_trajectories,
+ target=source.agent_id,
+ penalty_parameter=self.penalty_parameter,
+ )
+
+ self.logger.debug("Sending to %s with source %s", agent.name, source)
+ self.logger.debug("Set %s to busy.", agent.name)
+
+ # send values
+ agent.status = cdt.AgentStatus.busy
+ self.set(cdt.OPTIMIZATION_C2A, coordi_to_agent.to_json())
+
+[docs] def register_agent(self, variable: AgentVariable):
+ """Registers the agent, after it sent its initial guess with correct
+ vector length."""
+ value = adt.AgentToCoordinator.from_json(variable.value)
+ src = variable.source
+ ag_dict_entry = self.agent_dict[variable.source]
+
+ # loop over coupling variables of this agent
+ for alias, traj in value.local_trajectory.items():
+ coup_var = self._coupling_variables.setdefault(
+ alias, adt.ConsensusVariable()
+ )
+
+ # initialize Lagrange-Multipliers and local solution
+ coup_var.multipliers[src] = [0] * len(traj)
+ coup_var.local_trajectories[src] = traj
+ ag_dict_entry.coup_vars.append(alias)
+
+ # loop over coupling variables of this agent
+ for alias, traj in value.local_exchange_trajectory.items():
+ coup_var = self._exchange_variables.setdefault(
+ alias, adt.ExchangeVariable()
+ )
+
+ # initialize Lagrange-Multipliers and local solution
+ coup_var.multiplier = [0] * len(traj)
+ coup_var.local_trajectories[src] = traj
+ ag_dict_entry.exchange_vars.append(alias)
+
+ # set agent from pending to standby
+ ag_dict_entry.status = cdt.AgentStatus.standby
+ self.logger.info(
+ f"Coordinator successfully registered agent {variable.source}."
+ )
+
+[docs] def optim_results_callback(self, variable: AgentVariable):
+ """
+ Saves the results of a local optimization.
+ Args:
+ variable:
+
+ Returns:
+
+ """
+ local_result = adt.AgentToCoordinator.from_json(variable.value)
+ source = variable.source
+ for alias, trajectory in local_result.local_trajectory.items():
+ coup_var = self._coupling_variables[alias]
+ coup_var.local_trajectories[source] = trajectory
+ for alias, trajectory in local_result.local_exchange_trajectory.items():
+ coup_var = self._exchange_variables[alias]
+ coup_var.local_trajectories[source] = trajectory
+
+ self.agent_dict[variable.source].status = cdt.AgentStatus.ready
+ self.received_variable.set()
+
+ def _send_parameters_to_agent(self, variable: AgentVariable):
+ """Sends an agent the global parameters after a signup request."""
+ admm_parameters = adt.ADMMParameters(
+ prediction_horizon=self.config.prediction_horizon,
+ time_step=self.config.time_step,
+ penalty_factor=self.config.penalty_factor,
+ )
+
+ message = cdt.RegistrationMessage(
+ agent_id=variable.source.agent_id, opts=asdict(admm_parameters)
+ )
+ self.set(cdt.REGISTRATION_C2A, asdict(message))
+
+[docs] def registration_callback(self, variable: AgentVariable):
+ self.logger.debug(f"receiving {variable.name} from {variable.source}")
+ if not (variable.source in self.agent_dict):
+ self.agent_dict[variable.source] = adt.AgentDictEntry(
+ name=variable.source,
+ status=cdt.AgentStatus.pending,
+ )
+ self._send_parameters_to_agent(variable)
+ self.logger.info(
+ f"Coordinator got request agent {variable.source} and set to "
+ f"'pending'."
+ )
+ return
+ # complete registration of pending agents
+ if self.agent_dict[variable.source].status is cdt.AgentStatus.pending:
+ self.register_agent(variable=variable)
+
+ def _sequential_registration_callback(self, variable: AgentVariable):
+ """Handles the registration for sequential i.e. local coordinators. Variables
+ are handled immediately."""
+ self.logger.debug(f"receiving {variable.name} from {variable.source}")
+ self._initial_registration(variable)
+
+ def _real_time_registration_callback(self, variable: AgentVariable):
+ """Handles the registration for realtime coordinators. Variables are put in a
+ queue and a thread registers them when it is safe to do so."""
+ self.logger.debug(f"receiving {variable.name} from {variable.source}")
+ self._registration_queue.put(variable)
+
+ def _initial_registration(self, variable: AgentVariable):
+ """Handles initial registration of a variable. If it is unknown, add it to
+ the agent_dict and send it the global parameters. If it is sending its
+ confirmation with initial trajectories,
+ refer to the actual registration function."""
+ if not (variable.source in self.agent_dict):
+ self.agent_dict[variable.source] = adt.AgentDictEntry(
+ name=variable.source,
+ status=cdt.AgentStatus.pending,
+ )
+ self._send_parameters_to_agent(variable)
+ self.logger.info(
+ f"Coordinator got request agent {variable.source} and set to "
+ f"'pending'."
+ )
+
+ # complete registration of pending agents
+ elif self.agent_dict[variable.source].status is cdt.AgentStatus.pending:
+ self.register_agent(variable=variable)
+
+ def _handle_registrations(self):
+ """Performs registration tasks while the algorithm is on standby."""
+
+ while True:
+ # add new agent to dict and send them global parameters
+ variable = self._registration_queue.get()
+
+ with self._registration_lock:
+ self._initial_registration(variable)
+
+ def _wrap_up_algorithm(self, iterations):
+ self._save_stats(iterations=iterations)
+ self.penalty_parameter = self.config.penalty_factor
+
+[docs] def get_results(self) -> pd.DataFrame:
+ """Reads the results on iteration data if they were saved."""
+ results_file = self.config.solve_stats_file
+ try:
+ df = pd.read_csv(results_file, index_col=0, header=0)
+ new_ind = [literal_eval(i) for i in df.index]
+ df.index = pd.MultiIndex.from_tuples(new_ind)
+ return df
+ except FileNotFoundError:
+ self.logger.error("Results file %s was not found.", results_file)
+ return pd.DataFrame()
+
+[docs] def cleanup_results(self):
+ results_file = self.config.solve_stats_file
+ if not results_file:
+ return
+ os.remove(results_file)
+
+import logging
+import time
+from dataclasses import asdict
+from typing import Dict
+import threading
+
+from pydantic import Field
+
+from agentlib.core import (
+ BaseModule,
+ BaseModuleConfig,
+ AgentVariable,
+ Agent,
+ Source,
+ AgentVariables,
+)
+from agentlib_mpc.data_structures.coordinator_datatypes import (
+ AgentStatus,
+ RegistrationMessage,
+)
+import agentlib_mpc.data_structures.coordinator_datatypes as cdt
+
+
+logger = logging.getLogger(__name__)
+
+
+[docs]class CoordinatorConfig(BaseModuleConfig):
+ maxIter: int = Field(default=10, description="Maximum number of iterations")
+ time_out_non_responders: float = Field(
+ default=1, description="Maximum wait time for subsystems in seconds"
+ )
+ messages_in: AgentVariables = [
+ AgentVariable(name=cdt.REGISTRATION_A2C),
+ AgentVariable(name=cdt.START_ITERATION_A2C),
+ AgentVariable(name=cdt.OPTIMIZATION_A2C),
+ ]
+ messages_out: AgentVariables = [
+ AgentVariable(name=cdt.REGISTRATION_C2A),
+ AgentVariable(name=cdt.START_ITERATION_C2A),
+ AgentVariable(name=cdt.OPTIMIZATION_C2A),
+ ]
+ shared_variable_fields: list[str] = ["messages_out"]
+
+
+[docs]class Coordinator(BaseModule):
+ """Class implementing the base coordination for distributed MPC"""
+
+ config: CoordinatorConfig
+
+ def __init__(self, *, config: dict, agent: Agent):
+ super().__init__(config=config, agent=agent)
+ self.agent_dict: Dict[Source, cdt.AgentDictEntry] = {}
+ self.status: cdt.CoordinatorStatus = cdt.CoordinatorStatus.sleeping
+ self.received_variable = threading.Event()
+
+[docs] def process(self):
+ yield self.env.timeout(0.01)
+
+ while True:
+ # ------------------
+ # start iteration
+ # ------------------
+ self.status = cdt.CoordinatorStatus.init_iterations
+ # maybe this will hold information instead of "True"
+ self.set(cdt.START_ITERATION_C2A, True)
+ # check for all_finished here
+ time.sleep(1)
+ # ------------------
+ # iteration loop
+ # ------------------
+ for iI in range(self.config.maxIter):
+ # ------------------
+ # optimization
+ # ------------------
+ # send
+ self.status = cdt.CoordinatorStatus.optimization
+ # set all agents to busy
+ self.trigger_optimizations()
+
+ # check for all finished here
+ self._wait_for_ready()
+
+ # receive
+ ...
+ # ------------------
+ # perform update steps
+ # ------------------
+ self.status = cdt.CoordinatorStatus.updating
+ ...
+ # ------------------
+ # check convergence
+ # ------------------
+ ...
+
+ yield self.env.timeout(1)
+
+[docs] def trigger_optimizations(self):
+ """
+ Triggers the optimization for all agents with status ready.
+ Returns:
+
+ """
+ send = self.agent.data_broker.send_variable
+ for source, agent in self.agent_dict.items():
+ if agent.status == cdt.AgentStatus.ready:
+ value = agent.optimization_data.to_dict()
+ self.logger.debug("Sending to %s with source %s", agent.name, source)
+ self.logger.debug("Set %s to busy.", agent.name)
+ agent.status = cdt.AgentStatus.busy
+ message = AgentVariable(
+ name=cdt.OPTIMIZATION_C2A,
+ source=source,
+ value=value,
+ )
+ send(message)
+
+[docs] def register_callbacks(self):
+ self.agent.data_broker.register_callback(
+ alias=cdt.REGISTRATION_A2C,
+ source=None,
+ callback=self.registration_callback,
+ )
+ self.agent.data_broker.register_callback(
+ alias=cdt.START_ITERATION_A2C,
+ source=None,
+ callback=self.init_iteration_callback,
+ )
+ self.agent.data_broker.register_callback(
+ alias=cdt.OPTIMIZATION_A2C,
+ source=None,
+ callback=self.optim_results_callback,
+ )
+
+[docs] def optim_results_callback(self, variable: AgentVariable):
+ """
+ Saves the results of a local optimization.
+ Args:
+ variable:
+
+ Returns:
+
+ """
+
+ entry = self.agent_dict[variable.source]
+ entry.optimization_data = cdt.OptimizationData.from_dict(variable.value)
+ self.agent_dict[variable.source].status = cdt.AgentStatus.ready
+ self.received_variable.set()
+
+[docs] def init_iteration_callback(self, variable: AgentVariable):
+ """
+ Processes and Agents InitIteration confirmation.
+ Args:
+ variable:
+
+ Returns:
+
+ """
+ if not self.status == cdt.CoordinatorStatus.init_iterations:
+ # maybe set AgentStatus to something meaningful
+ self.logger.error("Agent did not respond in time!")
+ return
+
+ if variable.value is not True:
+ # did not receive acknowledgement
+ return
+
+ try:
+ ag_dict_entry = self.agent_dict[variable.source]
+ except KeyError:
+ # likely did not finish registration of an agent yet, but the agent
+ # already has its end registered and responds to the init_iterations.
+ # Let it wait one round.
+ return
+
+ self.logger.debug(
+ "Received 'StartIteration' confirmation from %s", variable.source
+ )
+ if ag_dict_entry.status != cdt.AgentStatus.standby:
+ # if the status is not standby, the agent might still be in registration
+ # phase, or something else occurred
+ return
+ ag_dict_entry.status = cdt.AgentStatus.ready
+ self.received_variable.set()
+
+ @property
+ def all_finished(self):
+ """
+
+ Returns:
+ True, if there are no busy agents, else False
+
+ """
+ for src, ag_entry in self.agent_dict.items():
+ if ag_entry.status is cdt.AgentStatus.busy:
+ return False
+ return True
+
+[docs] def registration_callback(self, variable: AgentVariable):
+ self.logger.info(
+ f"receiving {variable.name}={variable.value} from {variable.source}"
+ )
+ # use information in message to set up coordinator
+
+ if not (variable.source in self.agent_dict): # add agent to dict
+ entry = cdt.AgentDictEntry(
+ name=variable.source,
+ status=AgentStatus.pending,
+ )
+ self.agent_dict[variable.source] = entry
+ OptimOpts = {"Nhor": 10, "dt": 60}
+ message = RegistrationMessage(
+ agent_id=variable.source.agent_id, opts=OptimOpts
+ )
+ self.set(cdt.REGISTRATION_C2A, asdict(message)) # {"source" :
+ # variable.source, "status" : True, "opts" : OptimOpts}
+ self.logger.info(
+ f"Coordinator got request agent {variable.source} and set to "
+ f"'pending'."
+ )
+ else: # process ready-flag
+ message = RegistrationMessage(**variable.value)
+ if message.status == AgentStatus.standby:
+ self.agent_dict[variable.source].status = AgentStatus.standby #
+ # change from
+ # pending to ready
+ self.logger.info(
+ f"Coordinator successfully registered agent {variable.source}."
+ )
+ else:
+ self.agent_dict.pop(variable.source) # delete agent from dict
+
+ def _wait_for_ready(
+ self,
+ ):
+ """Wait until all coupling variables arrive from the other systems."""
+
+ self.received_variable.clear()
+ self.logger.info("Start waiting for agents to finish computation.")
+ while True:
+ # check exit conditions
+ if self.all_finished:
+ count = 0
+ for ag in self.agent_dict.values():
+ if ag.status == cdt.AgentStatus.ready:
+ count += 1
+ self.logger.info("Got variables from all (%s) agents.", count)
+ break
+
+ # wait until a new item is put in the queue
+
+ if self.received_variable.wait(timeout=self.config.time_out_non_responders):
+ self.received_variable.clear()
+ else:
+ self._deregister_slow_participants()
+ break
+
+ def _deregister_slow_participants(self):
+ """Sets all agents that are still busy to standby, so they won't be
+ waited on again."""
+ for agent in self.agent_dict.values():
+ if agent.status == cdt.AgentStatus.busy:
+ agent.status = cdt.AgentStatus.standby
+ self.logger.info(
+ "De-registered agent %s as it was too slow.", agent.name
+ )
+
+
+if __name__ == "__main__":
+ pass
+
+import logging
+from dataclasses import asdict
+import abc
+
+from pydantic import Field
+
+from agentlib.core import (
+ BaseModule,
+ BaseModuleConfig,
+ AgentVariable,
+ Agent,
+ AgentVariables,
+)
+from agentlib.core.datamodels import Source
+from agentlib_mpc.data_structures.coordinator_datatypes import RegistrationMessage
+import agentlib_mpc.data_structures.coordinator_datatypes as cdt
+
+
+logger = logging.getLogger(__name__)
+
+
+[docs]class MiniEmployeeConfig(BaseModuleConfig):
+ request_frequency: float = Field(
+ default=1, description="Wait time between signup_requests"
+ )
+ coordinator: Source = Field(description="Define the agents coordinator")
+ messages_in: AgentVariables = [
+ AgentVariable(name=cdt.REGISTRATION_C2A),
+ AgentVariable(name=cdt.START_ITERATION_C2A),
+ AgentVariable(name=cdt.OPTIMIZATION_C2A),
+ ]
+ messages_out: AgentVariables = [
+ AgentVariable(name=cdt.REGISTRATION_A2C),
+ AgentVariable(name=cdt.START_ITERATION_A2C),
+ AgentVariable(name=cdt.OPTIMIZATION_A2C),
+ ]
+ registration_interval: float = Field(
+ default=10,
+ ge=0,
+ description="Interval in seconds after which a registration attempt is made.",
+ )
+ shared_variable_fields: list[str] = ["messages_out"]
+
+
+[docs]class MiniEmployee(BaseModule):
+ config: MiniEmployeeConfig
+
+ def __init__(self, *, config: dict, agent: Agent):
+ super().__init__(config=config, agent=agent)
+ self._registered_coordinator: Source = None
+ self._start_optimization_at: float = 0
+
+[docs] def process(self):
+ # send registration request to coordinator
+ timeout = self.config.registration_interval
+ while True:
+ if not self._registered_coordinator:
+ self.set(cdt.REGISTRATION_A2C, True)
+ yield self.env.timeout(timeout)
+
+[docs] def register_callbacks(self):
+ # callback used for registration process
+ coordinator_agent = Source(agent_id=self.config.coordinator.agent_id)
+ self.agent.data_broker.register_callback(
+ alias=cdt.REGISTRATION_C2A,
+ source=coordinator_agent,
+ callback=self.registration_callback,
+ )
+ #
+ # call back for iteration start (
+ self.agent.data_broker.register_callback(
+ alias=cdt.START_ITERATION_C2A,
+ source=coordinator_agent,
+ callback=self.init_iteration_callback,
+ )
+ #
+ # call back for optimization (
+ self.agent.data_broker.register_callback(
+ alias=cdt.OPTIMIZATION_C2A,
+ source=coordinator_agent,
+ callback=self.optimize,
+ )
+
+[docs] def pre_computation_hook(self):
+ """
+ This method is called in every computation step before the optimization starts.
+ Overwrite this method in a derived subclass if you want to take some actions each time before the optimal control problem is solved.
+ """
+ pass
+
+[docs] def init_iteration_callback(self, variable: AgentVariable):
+ """
+ Callback that processes the coordinators 'startIteration' flag.
+ Args:
+ variable:
+
+ """
+ # value is True on start
+ if variable.value:
+ self._start_optimization_at = self.env.time
+ # new measurement
+ self.get_new_measurement()
+ # shift trajectories
+ self.shift_trajectories()
+ # custom function which can be overloaded to do stuff before a step
+ self.pre_computation_hook()
+
+ self.set(cdt.START_ITERATION_A2C, True)
+ self.logger.debug("Sent 'StartIteration' True.")
+
+ # value is False on convergence/iteration limit
+ else:
+ self._finish_optimization()
+
+[docs] def get_new_measurement(self):
+ """
+ Retrieve new measurement from relevant sensors
+ Returns:
+
+ """
+ ...
+ # raise NotImplementedError
+
+ # return self.collect_variables_for_optimization()
+
+ @abc.abstractmethod
+ def _finish_optimization(self):
+ """
+ Finalize an iteration. Usually, this includes setting the actuation.
+ Returns:
+
+ """
+
+[docs] @abc.abstractmethod
+ def optimize(self, variable: AgentVariable):
+ """
+ Performs the optimization given the information from the coordinator.
+ Replies with local information.
+ Returns:
+
+ """
+ variables = cdt.OptimizationData.from_dict(variable.value)
+
+ # perform optimization
+ # send optimizationData back to coordinator to signal finished
+ # optimization
+
+ value = variables.to_dict()
+ self.logger.debug("Sent optimal solution.")
+ self.set(name=cdt.OPTIMIZATION_A2C, value=value)
+
+[docs] def shift_trajectories(self):
+ """
+ Shifts algorithm specific trajectories.
+ Returns:
+
+ """
+ ...
+ # raise NotImplementedError
+
+[docs] @abc.abstractmethod
+ def registration_callback(self, variable: AgentVariable):
+ """callback for registration"""
+ self.logger.info(
+ f"receiving {variable.name}={variable.value} from {variable.source}"
+ )
+ # global parameters to define optimisation problem
+ value = RegistrationMessage(**variable.value)
+
+ # Decide if message from coordinator is for this agent
+ if not (value.agent_id == self.source.agent_id):
+ return
+
+ self.OptimOpts = value.opts
+ status = True
+ answer = RegistrationMessage(status=cdt.AgentStatus.standby)
+ self._registered_coordinator = variable.source
+ if status:
+ self.set("registrationOut", asdict(answer))
+
+import os
+from typing import Dict, Optional, Tuple
+
+import pandas as pd
+import pydantic
+from agentlib.core import (
+ BaseModuleConfig,
+ BaseModule,
+ Agent,
+ AgentVariable,
+ Source,
+)
+from agentlib.core.errors import ConfigurationError
+from pydantic import Field
+
+from agentlib_mpc.data_structures import mpc_datamodels
+from agentlib_mpc.data_structures.mpc_datamodels import Results
+from agentlib_mpc.modules.mpc import create_optimization_backend
+from agentlib_mpc.optimization_backends.backend import (
+ OptimizationBackendT,
+)
+from agentlib_mpc.utils.analysis import load_mpc, load_mpc_stats
+
+AG_VAR_DICT = dict[str, AgentVariable]
+
+
+[docs]class MHEConfig(BaseModuleConfig):
+ """
+ Pydantic data model for MPC configuration parser
+ """
+
+ optimization_backend: dict
+ time_step: float = Field(
+ default=60,
+ ge=0,
+ description="Time step of the MHE.",
+ )
+ horizon: int = Field(
+ default=5,
+ ge=0,
+ description="Estimation horizon of the MHE.",
+ )
+ known_parameters: mpc_datamodels.MPCVariables = Field(
+ default=[],
+ description="List of known parameters of the MHE. They are "
+ "constant over the horizon. Parameters not listed "
+ "here will have their default from the model file.",
+ )
+ estimated_parameters: mpc_datamodels.MPCVariables = Field(
+ default=[],
+ description="List of unknown parameters of the MHE. They are "
+ "constant over the horizon and will be estimated.",
+ )
+ known_inputs: mpc_datamodels.MPCVariables = Field(
+ default=[],
+ description="List of known input variables of the MHE. Includes "
+ "controls, disturbances, setpoints, dynamic constraint boundaries etc.",
+ )
+ estimated_inputs: mpc_datamodels.MPCVariables = Field(
+ default=[],
+ description="List of unknown input variables of the MHE. Includes "
+ "mainly disturbances.",
+ ) # AgentVariables for the initial condition of states to be optimized
+ states: mpc_datamodels.MPCVariables = Field(
+ default=[],
+ description="List of all differential states of the MHE.",
+ )
+ state_weights: dict[str, float] = Field(
+ title="State Weights",
+ default={},
+ description="Mapping of state names to their weight in the MHE problem. If "
+ "you are certain with your measurement, chose a high value. If "
+ "you dont have a measurement / do not trust it, choose 0. Default "
+ "is 0.",
+ )
+ shared_variable_fields: list[str] = []
+
+[docs] @classmethod
+ @pydantic.field_validator("state_weights")
+ def state_weights_are_in_states(
+ cls, state_weights: dict, info: pydantic.ValidationInfo
+ ):
+ state_names = {s.name for s in info.data["states"]}
+ state_weight_names = set(state_weights)
+
+ missing_names = state_weight_names - state_names
+ if missing_names:
+ raise ValueError(
+ f"The following states defined in state weights do not exist in the "
+ f"states: {', '.join(missing_names)}"
+ )
+ return state_weights
+
+
+[docs]class MHE(BaseModule):
+ """
+ A moving horizon estimator.
+ """
+
+ config_type = MHEConfig
+ config: MHEConfig
+ var_ref: mpc_datamodels.MHEVariableReference
+
+ def __init__(self, config: dict, agent: Agent):
+ """
+ Constructor for model predictive controller (MPC).
+ Args:
+ config: name of the module
+ agent: agent the module belongs to
+ Configs:
+ outputs (object):
+ inputs (object):
+ ts: time step in s
+ n (int): prediction horizon
+ nc (int): control horizon (default prediction horizon)
+ """
+ super().__init__(config=config, agent=agent)
+
+ measured_states, weights_states = self._create_auxiliary_variables()
+ self.measured_states: AG_VAR_DICT = measured_states
+ self.weights_states: AG_VAR_DICT = weights_states
+
+ # creates a reference of variables, which have to be kept track of in a
+ # dataframe, to provide a past trajectory for the MHE
+ self._history_var_names: list[str] = [
+ v.name for v in self.config.known_inputs
+ ] + list(measured_states)
+
+ self.history: pd.DataFrame = pd.DataFrame(
+ columns=self._history_var_names, dtype=float
+ )
+
+ # construct the optimization problem
+ try:
+ self._init_optimization()
+ except (RuntimeError, ValueError) as err:
+ raise ConfigurationError(
+ f"The optimization backend of Agent {self.source} could not "
+ f"finish its setup!"
+ ) from err
+
+ def _setup_optimization_backend(self) -> OptimizationBackendT:
+ """Performs the setup of the optimization_backend, keeps track of status"""
+ self.init_status = mpc_datamodels.InitStatus.during_update
+ opti_back = create_optimization_backend(
+ self.config.optimization_backend, self.agent.id
+ )
+ opti_back.register_logger(self.logger)
+ disc_opts = opti_back.config.discretization_options
+ disc_opts.time_step = self.config.time_step
+ return opti_back
+
+ def _setup_var_ref(self) -> mpc_datamodels.MHEVariableReference:
+ var_ref = mpc_datamodels.MHEVariableReference.from_config(self.config)
+ var_ref.measured_states = list(self.measured_states)
+ var_ref.weights_states = list(self.weights_states)
+ return var_ref
+
+ def _after_config_update(self):
+ self._create_auxiliary_variables()
+ self.var_ref = self._setup_var_ref()
+ self.optimization_backend = self._setup_optimization_backend()
+ self._init_optimization()
+ self.init_status = mpc_datamodels.InitStatus.ready
+
+ def _init_optimization(self):
+ """Performs the setup of the optimization backend."""
+ self.optimization_backend.setup_optimization(
+ var_ref=self.var_ref,
+ )
+ self.logger.info("%s: Initialized optimization problem.", self.agent.id)
+
+[docs] def process(self):
+ while True:
+ current_vars = self.collect_variables_for_optimization()
+ solution = self.optimization_backend.solve(
+ now=self.env.now, current_vars=current_vars
+ )
+ self._set_estimation(solution)
+ self._remove_old_values_from_history()
+ yield self.env.timeout(self.config.time_step)
+
+ def _remove_old_values_from_history(self):
+ """Clears the history of all entries that are older than current time minus
+ horizon length."""
+ backwards_horizon_seconds = self.config.horizon * self.config.time_step
+ oldest_relevant_time = self.env.now - backwards_horizon_seconds
+ filt = self.history.index >= oldest_relevant_time
+ self.history = self.history[filt]
+
+ def _set_estimation(self, solution: Results):
+ """Sets the estimated variables to the DataBroker."""
+
+ # parameters are scalars defined at the beginning of the problem, so we send
+ # the first value in the parameter trajectory
+ for parameter in self.var_ref.estimated_parameters:
+ par_val = solution[parameter]
+ self.set(parameter, par_val)
+
+ # we want to know the most recent value of states and inputs
+ for var in self.var_ref.states + self.var_ref.estimated_inputs:
+ value = solution[var][-1]
+ self.set(var, float(value))
+
+[docs] def register_callbacks(self):
+ """Registers callbacks which listen to the variables which have to be saved as
+ time series. These callbacks save the values in the history for use in the
+ optimization."""
+
+ for inp in self.var_ref.known_inputs:
+ var = self.get(inp)
+ self.agent.data_broker.register_callback(
+ alias=var.alias,
+ source=var.source,
+ callback=self._callback_hist_vars,
+ name=var.name,
+ )
+
+ # registers callback which listens to alias/source of the state variable, but
+ # gets the name of the measured state as parameter, to correctly save it in the
+ # history
+ for state, meas_state in zip(self.var_ref.states, self.var_ref.measured_states):
+ var = self.get(state)
+ self.agent.data_broker.register_callback(
+ alias=var.alias,
+ source=var.source,
+ callback=self._callback_hist_vars,
+ name=meas_state,
+ )
+
+[docs] def collect_variables_for_optimization(
+ self, var_ref: mpc_datamodels.MHEVariableReference = None
+ ) -> Dict[str, AgentVariable]:
+ """Gets all variables noted in the var ref and puts them in a flat
+ dictionary. The MHE Version of this function has to perform some checks and
+ lookups extra, since variables come from different sources, and some need to
+ incorporate trajectories of past values."""
+ if var_ref is None:
+ var_ref = self.var_ref
+
+ # first fetch all variables with get, that are in the config
+ all_variables = {v: self.get(v) for v in var_ref.all_variables()}
+
+ # then, collect the variables for the weights and measured states, that have
+ # been generated and are not in the config
+ for ms_name, ms_var in self.measured_states.items():
+ all_variables[ms_name] = ms_var.copy()
+ for w_name, w_var in self.weights_states.items():
+ all_variables[w_name] = w_var.copy()
+
+ # for values whose past trajectory is required in the optimization, set the
+ # var value to that trajectory
+ for hist_var in self._history_var_names:
+ past_values = self.history[hist_var].dropna()
+ if not any(past_values):
+ # if the history of a variable is empty, fallback to the scalar value
+ continue
+
+ # create copy to not mess up scalar value of original variable in case
+ # fallback is needed
+ all_variables[hist_var].value = past_values
+
+ return all_variables
+
+ def _callback_hist_vars(self, variable: AgentVariable, name: str):
+ """Adds received measured inputs to the past trajectory."""
+ self.history.loc[variable.timestamp, name] = variable.value
+
+ def _create_auxiliary_variables(self) -> tuple[AG_VAR_DICT, AG_VAR_DICT]:
+ """Creates variables holding the weights and measurements of the states"""
+ states: mpc_datamodels.MPCVariables = self.config.states
+ measured_states: dict[str, AgentVariable] = {}
+ weights_states: dict[str, AgentVariable] = {}
+ for state in states:
+ weight_name = "weight_" + state.name
+ measurement_name = "measured_" + state.name
+
+ weights_states[weight_name] = mpc_datamodels.MPCVariable(
+ name=weight_name,
+ value=self.config.state_weights.get(state.name, 0),
+ type="float",
+ source=Source(module_id=self.id),
+ )
+ measured_states[measurement_name] = mpc_datamodels.MPCVariable(
+ name=measurement_name,
+ value=pd.Series(state.value),
+ type="pd.Series",
+ source=state.source,
+ )
+ self.weights_states = weights_states
+ self.measured_states = measured_states
+ return measured_states, weights_states
+
+[docs] def get_results(self) -> Optional[pd.DataFrame]:
+ """Read the results that were saved from the optimization backend and
+ returns them as Dataframe.
+
+ Returns:
+ (results, stats) tuple of Dataframes.
+ """
+ results_file = self.optimization_backend.config.results_file
+ try:
+ results, _ = self.read_results_file(results_file)
+ return results
+ except FileNotFoundError:
+ self.logger.error("Results file %s was not found.", results_file)
+ return None
+
+[docs] @staticmethod
+ def read_results_file(results_file: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
+ """
+ Read the provided csv-file as an MPC results file.
+ Args:
+ results_file: File path
+
+ Returns:
+ results, stats
+ results is the Dataframe with all inputs and outputs of the MPC
+ optimizations.
+ stats is the Dataframe with matching solver stats
+ """
+ results = load_mpc(results_file)
+ stats = load_mpc_stats(results_file)
+ return results, stats
+
+[docs] def cleanup_results(self):
+ results_file = self.optimization_backend.config.results_file
+ if not results_file:
+ return
+ os.remove(results_file)
+ os.remove(mpc_datamodels.stats_path(results_file))
+
+import logging
+
+from pydantic import field_validator, Field
+
+from agentlib_mpc.data_structures import mpc_datamodels
+from agentlib_mpc.data_structures.mpc_datamodels import MINLPVariableReference
+from agentlib_mpc.modules.mpc import BaseMPCConfig, BaseMPC
+
+logger = logging.getLogger(__name__)
+
+
+[docs]class MINLPMPCConfig(BaseMPCConfig):
+ """
+ Pydantic data model for MPC configuration parser
+ """
+
+ # AgentVariables for the controls to be optimized
+ binary_controls: mpc_datamodels.MPCVariables = Field(
+ default=[], description="List of all binary control variables of the MPC. "
+ )
+
+[docs] @field_validator("binary_controls")
+ @classmethod
+ def validate_binary_bounds(cls, binary_controls: mpc_datamodels.MPCVariables):
+ """Assures all binary variables have 0 and 1 as boundaries."""
+ for bc in binary_controls:
+ if bc.ub == 1 and bc.lb == 1:
+ continue
+ logger.warning(
+ f"Binary variable {bc.name} does not have bounds '0, 1'. This will be"
+ f" automatically changed."
+ )
+ bc.ub = 1
+ bc.lb = 0
+ return binary_controls
+
+
+[docs]class MINLPMPC(BaseMPC):
+ config: MINLPMPCConfig
+
+ def _setup_var_ref(self) -> mpc_datamodels.VariableReferenceT:
+ return MINLPVariableReference.from_config(self.config)
+
+[docs] def assert_mpc_variables_are_in_model(self):
+ """
+ Checks whether all variables of var_ref are contained in the model.
+ Returns names of model variables not contained in the var_ref,
+ sorted by keys: 'states', 'inputs', 'outputs', 'parameters'.
+ """
+
+ # arguments for validation function:
+ # (key in var_ref, model names, str for head error message)
+ args = [
+ (
+ "states",
+ self.model.get_state_names(),
+ "Differential variables / States",
+ ),
+ ("controls", self.model.get_input_names(), "Controls"),
+ ("binary_controls", self.model.get_input_names(), "Binary Controls"),
+ ("inputs", self.model.get_input_names(), "Inputs"),
+ ("outputs", self.model.get_output_names(), "Outputs"),
+ ("parameters", self.model.get_parameter_names(), "Parameters"),
+ ]
+
+ # perform validations and make a dictionary of unassigned variables
+ unassigned_by_mpc_var = {
+ key: self.assert_subset(self.var_ref.__dict__[key], names, message)
+ for key, names, message in args
+ }
+
+ # fix unassigned values for inputs
+ intersection_input = set.intersection(
+ unassigned_by_mpc_var["controls"],
+ unassigned_by_mpc_var["inputs"],
+ unassigned_by_mpc_var["binary_controls"],
+ )
+
+ # return dict should have model variables as keys, not mpc variables
+ unassigned_by_model_var = {
+ "states": unassigned_by_mpc_var["states"],
+ "inputs": intersection_input,
+ "outputs": unassigned_by_mpc_var["outputs"],
+ "parameters": unassigned_by_mpc_var["parameters"],
+ }
+
+ return unassigned_by_model_var
+
+[docs] def set_actuation(self, solution):
+ """Takes the solution from optimization backend and sends the first
+ step to AgentVariables."""
+ super().set_actuation(solution)
+ for b_control in self.var_ref.binary_controls:
+ # take the first entry of the control trajectory
+ actuation = solution[b_control][0]
+ self.set(b_control, actuation)
+
+"""Code stolen from Max Berktold"""
+
+import numpy as np
+from sklearn import kernel_approximation
+from abc import ABC, abstractmethod
+from sklearn.gaussian_process.kernels import Kernel, RBF
+
+
+[docs]class InducingPoints(ABC):
+ """
+ The idea is to reduce the effective number of input data points x to the GP
+ from n to m, with m<n, where the set of m points are called inducing points.
+ Since this makes the effective covariance matrix K smaller,
+ many inducing point approaches reduce the computational complexity from O(n3) to O(nm2).
+ The smaller m is, the bigger the speed up.
+
+ Source: https://bwengals.github.io/inducing-point-methods-to-speed-up-gps.html
+ """
+
+ def __init__(self):
+ pass
+
+[docs] @abstractmethod
+ def reduce(
+ self,
+ x: np.ndarray,
+ y: np.ndarray,
+ plot_distance_matrix: bool = True,
+ ) -> tuple[np.ndarray, np.ndarray]:
+ pass
+
+
+[docs]class NystroemReducer(InducingPoints):
+ def __init__(self, n_components: int, kernel: Kernel = None):
+ super(NystroemReducer, self).__init__()
+
+ if kernel is None:
+ kernel = RBF()
+
+ self.nystroem = kernel_approximation.Nystroem(
+ kernel=kernel, n_components=n_components
+ )
+
+[docs] def reduce(
+ self,
+ x: np.ndarray,
+ y: np.ndarray,
+ plot_distance_matrix: bool = True,
+ ) -> tuple[np.ndarray, np.ndarray]:
+ self.nystroem.fit(x, y)
+
+ return self.nystroem.components_, y[self.nystroem.component_indices_]
+
+
+if __name__ == "__main__":
+ from sklearn.gaussian_process import GaussianProcessRegressor
+ import matplotlib.pyplot as plt
+
+ n_components = 25
+
+ def f(x):
+ "some more or less complex output function"
+
+ return x[:, 0] * 2 + x[:, 1] * x[:, 0] + x[:, 1] * 3 + 1 / x[:, 1] * x[:, 1]
+
+ def get_score(x, y, x_test, y_test, message):
+ gpr = GaussianProcessRegressor(RBF(), normalize_y=True)
+ gpr.fit(x, y)
+ s = gpr.score(x_test, y_test)
+
+ print(message, s, "samples:", x.shape, y.shape)
+
+ return s
+
+ scores = list()
+
+ for i in range(100):
+ np.random.seed(i)
+
+ x_train = np.random.normal(size=(800, 2), loc=0, scale=10)
+ y_train = f(x_train)
+
+ x_test = np.random.normal(size=(200, 2), loc=0, scale=10)
+ y_test = f(x_test)
+
+ x_red, y_red = NystroemReducer(n_components=n_components).reduce(
+ x_train, y_train
+ )
+
+ score_before = get_score(x_train, y_train, x_test, y_test, "before")
+ score_after = get_score(x_red, y_red, x_test, y_test, "after")
+
+ plt.scatter(x_train.T[0], x_train.T[1], c=y_train)
+ plt.scatter(x_red.T[0], x_red.T[1], c="black", label="reduced_data")
+ plt.legend()
+ plt.title(
+ f"score_before: {round(score_before, 4)}, score_after: {round(score_after, 4)}\n n_components: {n_components}"
+ )
+ plt.show()
+
+import abc
+import logging
+import math
+from pathlib import Path
+from typing import Type, TYPE_CHECKING
+
+import numpy as np
+import pandas as pd
+import pydantic
+from agentlib.core import (
+ BaseModuleConfig,
+ Agent,
+ BaseModule,
+ AgentVariables,
+ AgentVariable,
+ Source,
+)
+from agentlib.core.errors import ConfigurationError
+from pydantic_core.core_schema import FieldValidationInfo
+
+from agentlib_mpc.data_structures.ml_model_datatypes import name_with_lag
+from agentlib_mpc.models.casadi_predictor import CasadiPredictor
+from agentlib_mpc.utils.analysis import load_sim
+from agentlib_mpc.models.serialized_ml_model import (
+ SerializedMLModel,
+ SerializedANN,
+ SerializedGPR,
+ SerializedLinReg,
+)
+from agentlib_mpc.models.serialized_ml_model import CustomGPR, MLModels
+from agentlib_mpc.data_structures import ml_model_datatypes
+from agentlib_mpc.data_structures.interpolation import InterpolationMethods
+from agentlib_mpc.utils.plotting.ml_model_test import evaluate_model
+from agentlib_mpc.utils.sampling import sample_values_to_target_grid
+
+from keras import Sequential
+
+
+logger = logging.getLogger(__name__)
+
+
+[docs]class MLModelTrainerConfig(BaseModuleConfig, abc.ABC):
+ """
+ Abstract Base Class for all Trainer Configs.
+ """
+
+ step_size: float
+ retrain_delay: float = pydantic.Field(
+ default=10000000000,
+ description="Time in seconds, after which retraining is triggered in regular"
+ " intervals",
+ )
+ inputs: AgentVariables = pydantic.Field(
+ default=[],
+ description="Variables which are inputs of the ML Model that should be trained.",
+ )
+ outputs: AgentVariables = pydantic.Field(
+ default=[],
+ description="Variables which are outputs of the ML Model that should be trained.",
+ )
+ lags: dict[str, int] = pydantic.Field(
+ default={},
+ description="Dictionary specifying the lags of each input and output variable. "
+ "If not specified, will be set to one.",
+ validate_default=True,
+ )
+ output_types: dict[str, ml_model_datatypes.OutputType] = pydantic.Field(
+ default={},
+ description="Dictionary specifying the output types of output variables. "
+ "If not specified, will be set to 'difference'.",
+ validate_default=True,
+ )
+ interpolations: dict[str, InterpolationMethods] = pydantic.Field(
+ default={},
+ description="Dictionary specifying the interpolation types of output variables. "
+ "If not specified, will be set to 'linear'.",
+ validate_default=True,
+ )
+ recursive_outputs: dict[str, bool] = pydantic.Field(
+ default={},
+ description="Dictionary specifying whether output variables are recursive, i.e."
+ " automatically appear as an input as well. If not specified, will"
+ " be set to 'recursive'.",
+ validate_default=True,
+ )
+ train_share: float = 0.7
+ validation_share: float = 0.15
+ test_share: float = 0.15
+ save_directory: Path = pydantic.Field(
+ default=None, description="Path, where created ML Models should be saved."
+ )
+ save_data: bool = pydantic.Field(
+ default=False, description="Whether the training data should be saved."
+ )
+ save_ml_model: bool = pydantic.Field(
+ default=False, description="Whether the created ML Models should be saved."
+ )
+ save_plots: bool = pydantic.Field(
+ default=False,
+ description="Whether a plot of the created ML Models performance should be saved.",
+ )
+ MLModel: AgentVariable = pydantic.Field(
+ default=AgentVariable(name="MLModel", value=None),
+ description="Serialized ML Model which can be sent to other Agents.",
+ )
+ time_series_memory_size: int = pydantic.Field(
+ default=1_000_000_000,
+ description="Maximum size of the data which is kept in memory for the ML Model "
+ "training. If saved data exceeds this value, the oldest data is "
+ "deleted.",
+ )
+ time_series_length: float = pydantic.Field(
+ default=10 * 365 * 24 * 3600,
+ description="Maximum time window of data which is kept for the ML Model training. If"
+ " saved data is older than current time minus time_series_length, "
+ "it will be deleted.",
+ )
+ use_values_for_incomplete_data: bool = pydantic.Field(
+ default=False,
+ description="Default False. If True, the values of inputs and outputs which are"
+ " defined in the config will be used for training, in case historic"
+ " data has not reached the trainer. If False, an Error will be "
+ "raised when the data is not sufficient.",
+ )
+ data_sources: list[Path] = pydantic.Field(
+ default=[],
+ description="List of paths to time series data, which can be loaded on "
+ "initialization of the agent.",
+ )
+ shared_variable_fields: list[str] = ["MLModel"]
+
+
+
+[docs] @pydantic.field_validator("lags")
+ @classmethod
+ def fill_lags(cls, lags, info: FieldValidationInfo):
+ """Adds lag one to all unspecified lags."""
+ all_features = {var.name for var in info.data["inputs"] + info.data["outputs"]}
+ lag_to_var_diff = set(lags).difference(all_features)
+ if lag_to_var_diff:
+ raise ConfigurationError(
+ f"Specified lags do not appear in variables. The following lags do not"
+ f" appear in the inputs or outputs of the ML Model: '{lag_to_var_diff}'"
+ )
+ all_lags = {feat: 1 for feat in all_features}
+ all_lags.update(lags)
+ return all_lags
+
+[docs] @pydantic.field_validator("output_types")
+ @classmethod
+ def fill_output_types(cls, output_types, info: FieldValidationInfo):
+ """Adds output type one to all unspecified output types."""
+ output_names = {out.name for out in info.data["outputs"]}
+ type_to_var_diff = set(output_types).difference(output_names)
+ if type_to_var_diff:
+ raise ConfigurationError(
+ f"Specified outputs for output_types do not appear in variables. The "
+ f"following lags do not appear in the inputs or outputs of the ML Model: "
+ f"'{type_to_var_diff}'"
+ )
+ all_output_types = {feat: "absolute" for feat in output_names}
+ all_output_types.update(output_types)
+ return all_output_types
+
+[docs] @pydantic.field_validator("interpolations")
+ @classmethod
+ def fill_interpolations(cls, interpolations, info: FieldValidationInfo):
+ """Adds interpolation method to all unspecified methods."""
+ all_features = {var.name for var in info.data["inputs"] + info.data["outputs"]}
+ interp_to_var_diff = set(interpolations).difference(all_features)
+ if interp_to_var_diff:
+ raise ConfigurationError(
+ f"Specified outputs for output_types do not appear in variables. The "
+ f"following features do not appear in the inputs or outputs of the ML Model: "
+ f"'{interp_to_var_diff}'"
+ )
+ all_interp_methods = {feat: "linear" for feat in all_features}
+ all_interp_methods.update(interpolations)
+ return all_interp_methods
+
+[docs] @pydantic.field_validator("recursive_outputs")
+ @classmethod
+ def fill_recursive_outputs(cls, recursives, info: FieldValidationInfo):
+ """Adds recursive flag to all unspecified outputs."""
+ output_names = {var.name for var in info.data["outputs"]}
+ recursives_to_var_diff = set(recursives).difference(output_names)
+ if recursives_to_var_diff:
+ raise ConfigurationError(
+ f"Specified outputs for recursive_outputs do not appear in variables. The "
+ f"following features do not appear in the inputs or outputs of the ML Model: "
+ f"'{recursives_to_var_diff}'"
+ )
+ all_recursive_flags = {feat: True for feat in output_names}
+ all_recursive_flags.update(recursives)
+ return all_recursive_flags
+
+[docs] @pydantic.field_validator("data_sources")
+ @classmethod
+ def check_data_sources_exist(cls, data_sources: list[Path]):
+ """Checks if all given data sources exist"""
+ existing_data = []
+ for data_src in data_sources:
+ if data_src.exists():
+ existing_data.append(data_src)
+ else:
+ logger.error(f"Given data source file {data_src} does not exist.")
+ return existing_data
+
+[docs] @pydantic.field_validator("save_data", "save_ml_model")
+ @classmethod
+ def check_if_save_path_is_there(cls, save_on: bool, info: FieldValidationInfo):
+ save_path = info.data["save_directory"]
+ if save_path is None:
+ raise ConfigurationError(
+ "ML Model saving is on, but no save_directory was specified."
+ )
+ return save_on
+
+
+[docs]class MLModelTrainer(BaseModule, abc.ABC):
+ """
+ Abstract Base Class for all Trainer classes.
+ """
+
+ config: MLModelTrainerConfig
+ model_type: Type[SerializedMLModel]
+
+ def __init__(self, config: dict, agent: Agent):
+ """
+ Constructor for model predictive controller (MPC).
+ """
+ super().__init__(config=config, agent=agent)
+ self.time_series_data = self._initialize_time_series_data()
+ history_type = dict[str, [tuple[list[float], list[float]]]]
+ self.history_dict: history_type = {
+ col: ([], []) for col in self.time_series_data.columns
+ }
+ self._data_sources: dict[str, Source] = {
+ var: None for var in self.time_series_data.columns
+ }
+ self.ml_model = self.build_ml_model()
+ self.input_features, self.output_features = self._define_features()
+
+ @property
+ def training_info(self) -> dict:
+ """Returns a dict with relevant config parameters regarding the training."""
+ # We exclude all fields of the Base Trainer, as its fields are with regard to
+ # data handling etc., and other relevant things from base trainer are already
+ # in the serialized model.
+ # However, parameters from child classes are relevant to the training of that
+ # model, and will be included
+ exclude = set(MLModelTrainerConfig.model_fields)
+ return self.config.model_dump(exclude=exclude)
+
+[docs] def register_callbacks(self):
+ for feat in self.config.inputs + self.config.outputs:
+ var = self.get(feat.name)
+ self.agent.data_broker.register_callback(
+ alias=var.alias,
+ source=var.source,
+ callback=self._callback_data,
+ name=var.name,
+ )
+
+[docs] def process(self):
+ while True:
+ yield self.env.timeout(self.config.retrain_delay)
+ self._update_time_series_data()
+ serialized_ml_model = self.retrain_model()
+ self.set(self.config.MLModel.name, serialized_ml_model)
+
+ def _initialize_time_series_data(self) -> pd.DataFrame:
+ """Loads simulation data to initialize the time_series data"""
+ feature_names = list(self.config.lags.keys())
+ time_series_data = {name: pd.Series(dtype=float) for name in feature_names}
+ for ann_src in self.config.data_sources:
+ loaded_time_series = load_sim(ann_src)
+ for column in loaded_time_series.columns:
+ if column in feature_names:
+ srs = loaded_time_series[column]
+ time_series_data[column] = pd.concat(
+ [time_series_data[column], srs]
+ )
+
+ return pd.DataFrame(time_series_data)
+
+[docs] def retrain_model(self):
+ """Trains the model based on the current historic data."""
+ sampled = self.resample()
+ inputs, outputs = self.create_inputs_and_outputs(sampled)
+ training_data = self.divide_in_tvt(inputs, outputs)
+ self.fit_ml_model(training_data)
+ serialized_ml_model = self.serialize_ml_model()
+ self.save_all(serialized_ml_model, training_data)
+ return serialized_ml_model
+
+[docs] def save_all(
+ self,
+ serialized_ml_model: SerializedMLModel,
+ training_data: ml_model_datatypes.TrainingData,
+ ):
+ """Saves all relevant data and results of the training process if desired."""
+ path = Path(self.config.save_directory, self.agent_and_time)
+ if self.config.save_data:
+ training_data.save(path)
+ if self.config.save_ml_model:
+ self.save_ml_model(serialized_ml_model, path=path)
+ if self.config.save_plots:
+ evaluate_model(
+ training_data,
+ CasadiPredictor.from_serialized_model(serialized_ml_model),
+ save_path=path,
+ show_plot=False,
+ )
+
+ def _callback_data(self, variable: AgentVariable, name: str):
+ """Adds received measured inputs to the past trajectory."""
+ # check that only data from the same source is used
+ if self._data_sources[name] is None:
+ self._data_sources[name] = variable.source
+ elif self._data_sources[name] != variable.source:
+ raise ValueError(
+ f"The trainer module got data from different sources "
+ f"({self._data_sources[name]}, {variable.source}). This is likely not "
+ f"intended. Please specify the intended source in the trainer config."
+ )
+
+ time_list, value_list = self.history_dict[name]
+ time_list.append(variable.timestamp)
+ value_list.append(variable.value)
+ self.logger.debug(
+ f"Updated variable {name} with {variable.value} at {variable.timestamp} s."
+ )
+
+ def _update_time_series_data(self):
+ """Clears the history of all entries that are older than current time minus
+ horizon length."""
+ df_list: list[pd.DataFrame] = []
+ for feature_name, (time_stamps, values) in self.history_dict.items():
+ df = pd.DataFrame({feature_name: values}, index=time_stamps)
+ df_list.append(df)
+ self.time_series_data = pd.concat(df_list, axis=1).sort_index()
+
+ data = self.time_series_data
+ if not data.size:
+ return
+
+ # delete rows based on how old the data is
+ cut_off_time = self.env.now - self.config.time_series_length
+ cut_off_index = data.index.get_indexer([cut_off_time], method="backfill")[0]
+ data.drop(data.index[:cut_off_index], inplace=True)
+
+ # delete rows if the memory usage is too high
+ del_rows_at_once = 20 # currently hard-coded
+ while data.memory_usage().sum() > self.config.time_series_memory_size:
+ data.drop(data.index[:del_rows_at_once], inplace=True)
+
+[docs] @abc.abstractmethod
+ def build_ml_model(self):
+ """
+ Builds and returns an ann model
+ """
+ pass
+
+[docs] @abc.abstractmethod
+ def fit_ml_model(self, training_data: ml_model_datatypes.TrainingData):
+ """
+ Fits the ML Model with the training data.
+ """
+ pass
+
+[docs] def resample(self) -> pd.DataFrame:
+ """Samples the available time_series data to the required step size."""
+ source_grids = {
+ col: self.time_series_data[col].dropna().index
+ for col in self.time_series_data.columns
+ }
+
+ # check if data for all features is sufficient
+ features_with_insufficient_data = []
+ for feat_name in list(source_grids):
+ if len(source_grids[feat_name]) < 5:
+ del source_grids[feat_name]
+ features_with_insufficient_data.append(feat_name)
+ if (
+ not self.config.use_values_for_incomplete_data
+ and features_with_insufficient_data
+ ):
+ raise RuntimeError(
+ f"Called ANN Trainer in strict mode but there was insufficient data."
+ f" Features with insufficient data are: "
+ f"{features_with_insufficient_data}"
+ )
+
+ # make target grid, which spans the maximum length, where data for every feature
+ # is available
+ start = max(sg[0] for sg in source_grids.values())
+ stop = min(sg[-1] for sg in source_grids.values())
+ target_grid = np.arange(start, stop, self.config.step_size)
+
+ # perform interpolation for all features with sufficient length
+ sampled = {}
+ for name, sg in source_grids.items():
+ single_sampled = sample_values_to_target_grid(
+ values=self.time_series_data[name].dropna(),
+ original_grid=sg,
+ target_grid=target_grid,
+ method=self.config.interpolations[name],
+ )
+ sampled[name] = single_sampled
+ sampled_data = pd.DataFrame(sampled, index=target_grid)
+
+ # pad data with fix values when data is incomplete
+ if self.config.use_values_for_incomplete_data:
+ length = len(target_grid)
+ for feat_name in features_with_insufficient_data:
+ sampled_data[feat_name] = [self.get(feat_name).value] * length
+
+ return sampled_data
+
+[docs] def serialize_ml_model(self) -> SerializedMLModel:
+ """
+ Serializes the ML Model, sa that it can be saved
+ as json file.
+ Returns:
+ SerializedMLModel version of the passed ML Model.
+ """
+ ann_inputs, ann_outputs = self._define_features()
+
+ serialized_ann = self.model_type.serialize(
+ model=self.ml_model,
+ dt=self.config.step_size,
+ input=ann_inputs,
+ output=ann_outputs,
+ training_info=self.training_info,
+ )
+ return serialized_ann
+
+[docs] def save_ml_model(self, serialized_ml_model: SerializedMLModel, path: Path):
+ """Saves the ML Model in serialized format."""
+ serialized_ml_model.save_serialized_model(path=Path(path, "ml_model.json"))
+
+ def _define_features(
+ self,
+ ) -> tuple[
+ dict[str, ml_model_datatypes.Feature],
+ dict[str, ml_model_datatypes.OutputFeature],
+ ]:
+ """Defines dictionaries for all features of the ANN based on the inputs and
+ outputs. This will also be the order, in which the serialized ann is exported"""
+ ann_inputs = {}
+ for name in self.input_names:
+ ann_inputs[name] = ml_model_datatypes.Feature(
+ name=name,
+ lag=self.config.lags[name],
+ )
+ ann_outputs = {}
+ for name in self.output_names:
+ ann_outputs[name] = ml_model_datatypes.OutputFeature(
+ name=name,
+ lag=self.config.lags[name],
+ output_type=self.config.output_types[name],
+ recursive=self.config.recursive_outputs[name],
+ )
+ return ann_inputs, ann_outputs
+
+ @property
+ def agent_and_time(self) -> str:
+ """A string that specifies id and time. Used to create save paths"""
+ return f"{self.agent.id}_{self.id}_{self.env.now}"
+
+ @property
+ def input_names(self):
+ return [inp.name for inp in self.config.inputs]
+
+ @property
+ def output_names(self):
+ return [out.name for out in self.config.outputs]
+
+[docs] def create_inputs_and_outputs(
+ self, full_data_sampled: pd.DataFrame
+ ) -> tuple[pd.DataFrame, pd.DataFrame]:
+ """Creates extra columns in the data which contain the shifted time-series data
+ which is lagged accordingly. Returns a tuple (input_data, output_data)"""
+ # inputs are all inputs, plus recursive outputs with lag
+ inps = [name_with_lag(v.name, 0) for v in self.config.inputs]
+ inps.extend(
+ [
+ name_with_lag(v.name, 0)
+ for v in self.config.outputs
+ if self.config.recursive_outputs[v.name]
+ ]
+ )
+
+ outs = [v.name for v in self.config.outputs]
+ input_df = pd.DataFrame(columns=inps)
+ output_df = pd.DataFrame(columns=(outs))
+
+ # inputs
+ for input_name in input_df.columns:
+ lag: int = self.config.lags[input_name]
+ for k in range(0, lag):
+ name = name_with_lag(input_name, k)
+ input_df[name] = full_data_sampled[input_name].shift(k)
+
+ # output
+ for output_name in output_df.columns:
+ output_df[output_name] = self._create_output_column(
+ name=output_name, column=full_data_sampled[output_name]
+ )
+
+ # some rows have nan now due to lags and output shift, we remove them
+ na_rows = input_df.isna().any(axis=1) + output_df.isna().any(axis=1)
+ input_df = input_df.loc[~na_rows]
+ output_df = output_df.loc[~na_rows]
+
+ # we have to make sure the columns are in consistent order, so the network is
+ # trained in the same way, that its features are defined when exported
+ columns_ordered = ml_model_datatypes.column_order(
+ inputs=self.input_features, outputs=self.output_features
+ )
+ input_df = input_df[columns_ordered]
+
+ return input_df, output_df
+
+ def _create_output_column(self, name: str, column: pd.Series):
+ """Creates an output column in the table for training data. Depending on
+ whether the feature is recursive, or represents a time delta, some changes have
+ to be made."""
+ output_type = self.config.output_types[name]
+ recursive = self.config.recursive_outputs[name]
+ if not recursive:
+ return column
+ if output_type == ml_model_datatypes.OutputType.difference:
+ return column.shift(-1) - column
+ else: # output_type == OutputType.absolute
+ return column.shift(-1)
+
+[docs] def divide_in_tvt(
+ self,
+ inputs: pd.DataFrame,
+ outputs: pd.DataFrame,
+ ):
+ """splits the samples into mpc, validating and testing sets"""
+
+ # calculate the sample count and shares
+ num_of_samples = inputs.shape[0]
+ n_training = int(self.config.train_share * num_of_samples)
+ n_validation = n_training + int(self.config.validation_share * num_of_samples)
+
+ # shuffle the data
+ permutation = np.random.permutation(num_of_samples)
+ inputs = inputs.iloc[permutation]
+ outputs = outputs.iloc[permutation]
+
+ # split the data
+ return ml_model_datatypes.TrainingData(
+ training_inputs=inputs.iloc[0:n_training],
+ training_outputs=outputs.iloc[0:n_training],
+ validation_inputs=inputs.iloc[n_training:n_validation],
+ validation_outputs=outputs.iloc[n_training:n_validation],
+ test_inputs=inputs.iloc[n_validation:],
+ test_outputs=outputs.iloc[n_validation:],
+ )
+
+
+[docs]class ANNTrainerConfig(MLModelTrainerConfig):
+ """
+ Pydantic data model for ANNTrainer configuration parser
+ """
+
+ epochs: int = 100
+ batch_size: int = 100
+ layers: list[tuple[int, ml_model_datatypes.Activation]] = pydantic.Field(
+ default=[(16, "sigmoid")],
+ description="Hidden layers which should be created for the ANN. An ANN always "
+ "has a BatchNormalization Layer, and an Output Layer the size of "
+ "the output dimensions. Additional hidden layers can be specified "
+ "here as a list of tuples: "
+ "(#neurons of layer, activation function).",
+ )
+ early_stopping: ml_model_datatypes.EarlyStoppingCallback = pydantic.Field(
+ default=ml_model_datatypes.EarlyStoppingCallback(),
+ description="Specification of the EarlyStopping Callback for training",
+ )
+
+
+[docs]class ANNTrainer(MLModelTrainer):
+ """
+ Module that generates ANNs based on received data.
+ """
+
+ config: ANNTrainerConfig
+ model_type = SerializedANN
+
+ def __init__(self, config: dict, agent: Agent):
+ super().__init__(config, agent)
+
+[docs] def build_ml_model(self) -> Sequential:
+ """Build an ANN with a one layer structure, can only create one ANN"""
+ from keras import layers
+
+ ann = Sequential()
+ ann.add(layers.BatchNormalization(axis=1))
+ for units, activation in self.config.layers:
+ ann.add(layers.Dense(units=units, activation=activation))
+ ann.add(layers.Dense(units=len(self.config.outputs), activation="linear"))
+ ann.compile(loss="mse", optimizer="adam")
+ return ann
+
+[docs] def fit_ml_model(self, training_data: ml_model_datatypes.TrainingData):
+ callbacks = []
+ if self.config.early_stopping.activate:
+ callbacks.append(self.config.early_stopping.callback())
+
+ self.ml_model.fit(
+ x=training_data.training_inputs,
+ y=training_data.training_outputs,
+ validation_data=(
+ training_data.validation_inputs,
+ training_data.validation_outputs,
+ ),
+ epochs=self.config.epochs,
+ batch_size=self.config.batch_size,
+ callbacks=callbacks,
+ )
+
+
+[docs]class GPRTrainerConfig(MLModelTrainerConfig):
+ """
+ Pydantic data model for GPRTrainer configuration parser
+ """
+
+ constant_value_bounds: tuple = (1e-3, 1e5)
+ length_scale_bounds: tuple = (1e-3, 1e5)
+ noise_level_bounds: tuple = (1e-3, 1e5)
+ noise_level: float = 1.5
+ normalize: bool = pydantic.Field(
+ default=False,
+ description="Defines whether the training data and the inputs are for prediction"
+ "are normalized before given to GPR.",
+ )
+ scale: float = pydantic.Field(
+ default=1.0,
+ description="Defines by which value the output data is divided for training and "
+ "multiplied after prediction.",
+ )
+ n_restarts_optimizer: int = pydantic.Field(
+ default=0,
+ description="Defines the number of restarts of the Optimizer for the "
+ "gpr_parameters of the kernel.",
+ )
+
+
+[docs]class GPRTrainer(MLModelTrainer):
+ """
+ Module that generates ANNs based on received data.
+ """
+
+ config: GPRTrainerConfig
+ model_type = SerializedGPR
+
+ def __init__(self, config: dict, agent: Agent):
+ super().__init__(config, agent)
+
+[docs] def build_ml_model(self):
+ """Build a GPR with a constant Kernel in combination with a white kernel."""
+ from sklearn.gaussian_process.kernels import ConstantKernel, RBF, WhiteKernel
+
+ kernel = ConstantKernel(
+ constant_value_bounds=self.config.constant_value_bounds
+ ) * RBF(length_scale_bounds=self.config.length_scale_bounds) + WhiteKernel(
+ noise_level=self.config.noise_level,
+ noise_level_bounds=self.config.noise_level_bounds,
+ )
+
+ gpr = CustomGPR(
+ kernel=kernel,
+ copy_X_train=False,
+ n_restarts_optimizer=self.config.n_restarts_optimizer,
+ )
+ gpr.data_handling.normalize = self.config.normalize
+ gpr.data_handling.scale = self.config.scale
+ return gpr
+
+[docs] def fit_ml_model(self, training_data: ml_model_datatypes.TrainingData):
+ """Fits GPR to training data"""
+ if self.config.normalize:
+ x_train = self._normalize(training_data.training_inputs.to_numpy())
+ else:
+ x_train = training_data.training_inputs
+ y_train = training_data.training_outputs / self.config.scale
+ self.ml_model.fit(
+ X=x_train,
+ y=y_train,
+ )
+
+ def _normalize(self, x: np.ndarray):
+ # update the normal and the mean
+ mean = x.mean(axis=0, dtype=float)
+ std = x.std(axis=0, dtype=float)
+ for idx, val in enumerate(std):
+ if val == 0:
+ logger.info(
+ "Encountered zero while normalizing. Continuing with a std of one for this Input."
+ )
+ std[idx] = 1.0
+
+ if mean is None and std is not None:
+ raise ValueError("Please update std and mean.")
+
+ # save mean and standard deviation to data_handling
+ self.ml_model.data_handling.mean = mean.tolist()
+ self.ml_model.data_handling.std = std.tolist()
+
+ # normalize x and return
+ return (x - mean) / std
+
+
+[docs]class LinRegTrainerConfig(MLModelTrainerConfig):
+ """
+ Pydantic data model for GPRTrainer configuration parser
+ """
+
+
+[docs]class LinRegTrainer(MLModelTrainer):
+ """
+ Module that generates ANNs based on received data.
+ """
+
+ config: LinRegTrainerConfig
+ model_type = SerializedLinReg
+
+ def __init__(self, config: dict, agent: Agent):
+ super().__init__(config, agent)
+
+[docs] def build_ml_model(self):
+ """Build a linear model."""
+ from sklearn.linear_model import LinearRegression
+
+ linear_model = LinearRegression()
+ return linear_model
+
+[docs] def fit_ml_model(self, training_data: ml_model_datatypes.TrainingData):
+ """Fits linear model to training data"""
+ self.ml_model.fit(
+ X=training_data.training_inputs,
+ y=training_data.training_outputs,
+ )
+
+
+ml_model_trainer = {
+ MLModels.ANN: ANNTrainer,
+ MLModels.GPR: GPRTrainer,
+ MLModels.LINREG: LinRegTrainer,
+}
+
+"""Module which generates random set points within a comfort zone. Code heavily stolen
+from Max Berktold"""
+
+import datetime
+import random
+
+from agentlib.core import BaseModuleConfig, BaseModule, Agent, AgentVariable
+
+
+[docs]class SetPointGeneratorConfig(BaseModuleConfig):
+ """
+ Pydantic data model for ANNTrainer configuration parser
+ """
+
+ target_variable: AgentVariable = AgentVariable(name="target")
+ day_start: int = 8
+ day_end: int = 16
+ day_lb: float = 273.15 + 19
+ night_lb: float = 273.15 + 16
+ day_ub: float = 273.15 + 21
+ night_ub: float = 273.15 + 24
+ interval: int = 60 * 60 * 4
+ shared_variable_fields: list[str] = ["target_variable"]
+
+
+[docs]class SetPointGenerator(BaseModule):
+ """
+ Module that generates and sends random set points based on daytime and values.
+ """
+
+ config: SetPointGeneratorConfig
+
+ def __init__(self, config: dict, agent: Agent):
+ """
+ Constructor for model predictive controller (MPC).
+ """
+ super().__init__(config=config, agent=agent)
+ self.last_randomization: float = self.env.time
+ lb, ub = self._bounds()
+ self.current_target = random.uniform(lb, ub)
+
+
+
+[docs] def process(self):
+ while True:
+ self.update_target()
+ self.set(self.config.target_variable.name, self.current_target)
+ yield self.env.timeout(self.config.interval)
+
+[docs] def update_target(self):
+ """Updates the control target for a given time"""
+
+ time = self.env.time
+ lb, ub = self._bounds()
+
+ # update target, if enough time has passed or the target violates boundaries
+ if (
+ time - self.last_randomization >= self.config.interval
+ or self.current_target < lb
+ or ub < self.current_target
+ ):
+ self.current_target = random.uniform(lb, ub)
+ self.last_randomization = time
+ self.logger.debug(
+ f"Set target {self.config.target_variable.name} to "
+ f"{self.current_target:.2f} {self.config.target_variable.unit}"
+ )
+
+ def _bounds(self) -> tuple[float, float]:
+ """Returns the lower and upper bound for a given time"""
+
+ if self._is_weekend():
+ return self.config.night_lb, self.config.night_ub
+
+ if self._is_daytime():
+ return self.config.day_lb, self.config.day_ub
+
+ return self.config.night_lb, self.config.night_ub
+
+ def _is_daytime(self) -> bool:
+ """Returns True if the given time is during day"""
+
+ time = datetime.datetime.fromtimestamp(self.env.time)
+
+ return self.config.day_start <= time.hour <= self.config.day_end
+
+ def _is_weekend(self) -> bool:
+ """returns True if the given time is during weekend"""
+
+ time = datetime.datetime.fromtimestamp(self.env.time)
+
+ return 5 <= time.weekday()
+
+"""Holds the base class for MPCs."""
+
+import os
+from typing import Tuple, Dict, Optional
+
+import pandas as pd
+from pydantic import Field, field_validator
+
+from agentlib.core.datamodels import (
+ AgentVariable,
+)
+from agentlib.core import Model, BaseModule, BaseModuleConfig, Agent
+from agentlib.core.errors import OptionalDependencyError, ConfigurationError
+from agentlib.utils import custom_injection
+from pydantic_core.core_schema import FieldValidationInfo
+
+from agentlib_mpc.data_structures.mpc_datamodels import (
+ VariableReference,
+ InitStatus,
+ Results,
+)
+from agentlib_mpc.optimization_backends import backend_types, uninstalled_backend_types
+from agentlib_mpc.optimization_backends.backend import (
+ OptimizationBackend,
+ OptimizationBackendT,
+)
+from agentlib_mpc.data_structures import mpc_datamodels
+from agentlib_mpc.utils.analysis import load_mpc, load_mpc_stats
+
+
+[docs]class BaseMPCConfig(BaseModuleConfig):
+ """
+ Pydantic data model for MPC configuration parser
+ """
+
+ # todo use config of optimization backend in annotation and create like modules
+ optimization_backend: dict
+ time_step: float = Field(
+ default=60,
+ ge=0,
+ description="Time step of the MPC.",
+ )
+ prediction_horizon: int = Field(
+ default=5,
+ ge=0,
+ description="Prediction horizon of the MPC.",
+ )
+ sampling_time: Optional[float] = Field(
+ default=None, # seconds
+ description="Sampling interval for control steps. If None, will be the same as"
+ " time step. Does not affect the discretization of the MPC, "
+ "only the interval with which there will be optimization steps.",
+ validate_default=True,
+ )
+ parameters: mpc_datamodels.MPCVariables = Field(
+ default=[],
+ description="List of model parameters of the MPC. They are "
+ "constant over the horizon. Parameters not listed "
+ "here will have their default from the model file.",
+ )
+ inputs: mpc_datamodels.MPCVariables = Field(
+ default=[],
+ description="List of all input variables of the MPC. Includes "
+ "predictions for disturbances, set_points, dynamic "
+ "constraint boundaries etc.",
+ )
+ outputs: mpc_datamodels.MPCVariables = Field(
+ default=[], description="List of all shared outputs of the MPC. "
+ )
+ # AgentVariables for the controls to be optimized
+ controls: mpc_datamodels.MPCVariables = Field(
+ default=[], description="List of all control variables of the MPC. "
+ )
+ # AgentVariables for the initial condition of states to be optimized
+ states: mpc_datamodels.MPCVariables = Field(
+ default=[],
+ description="List of all differential states of the MPC. The "
+ "entries can define the boundaries and the source for the measurements",
+ )
+ set_outputs: bool = Field(
+ default=False,
+ description="Sets the full output time series to the data broker.",
+ )
+ shared_variable_fields: list[str] = ["outputs", "controls"]
+
+[docs] @field_validator("sampling_time")
+ @classmethod
+ def default_sampling_time(cls, samp_time, info: FieldValidationInfo):
+ if samp_time is None:
+ samp_time = info.data["time_step"]
+ return samp_time
+
+
+[docs]def create_optimization_backend(optimization_backend, agent_id):
+ """Set up the optimization_backend"""
+ optimization_backend = optimization_backend.copy()
+ if "type" not in optimization_backend:
+ raise KeyError(
+ "Given model config does not contain key 'type' (type of the model)."
+ )
+ _type = optimization_backend.pop("type")
+ optimization_backend["name"] = agent_id
+ if isinstance(_type, dict):
+ custom_cls = custom_injection(config=_type)
+ backend = custom_cls(**optimization_backend)
+ elif isinstance(_type, str):
+ if _type in uninstalled_backend_types:
+ raise OptionalDependencyError(
+ dependency_name=_type,
+ dependency_install=uninstalled_backend_types[_type],
+ )
+ if _type not in backend_types:
+ raise TypeError(
+ f"Given backend is not a valid internal optimization "
+ f"backend. Supported backends are "
+ f"{', '.join(list(backend_types.keys()))}"
+ )
+ backend = backend_types[_type](config=optimization_backend)
+ else:
+ raise TypeError(
+ f"Error loading optimization backend. Config "
+ f"'type' has to be either str or dict. Got "
+ f"{type(_type)} instead. "
+ )
+ assert isinstance(backend, OptimizationBackend)
+ return backend
+
+
+[docs]class BaseMPC(BaseModule):
+ """
+ A model predictive controller.
+ More info to follow.
+ """
+
+ config: BaseMPCConfig
+
+ def __init__(self, config: dict, agent: Agent):
+ """
+ Constructor for model predictive controller (MPC).
+ Args:
+ config: name of the module
+ agent: agent the module belongs to
+ Configs:
+ outputs (object):
+ inputs (object):
+ ts: time step in s
+ n (int): prediction horizon
+ nc (int): control horizon (default prediction horizon)
+ """
+ self.init_status = mpc_datamodels.InitStatus.pre_module_init
+ super().__init__(config=config, agent=agent)
+
+ # Check that module config and model variables match
+ unassigned_model_variables = self.assert_mpc_variables_are_in_model()
+ assert unassigned_model_variables["inputs"] == set(), (
+ f"All model inputs must be declared in the MPC config. Model "
+ f"variable(s) '{unassigned_model_variables['inputs']}' is/are free."
+ )
+
+ def _setup_optimization_backend(self) -> OptimizationBackendT:
+ """Performs the setup of the optimization_backend, keeps track of status"""
+ self.init_status = mpc_datamodels.InitStatus.during_update
+ opti_back = create_optimization_backend(
+ self.config.optimization_backend, self.agent.id
+ )
+ opti_back.register_logger(self.logger)
+ disc_opts = opti_back.config.discretization_options
+ disc_opts.prediction_horizon = self.config.prediction_horizon
+ disc_opts.time_step = self.config.time_step
+ return opti_back
+
+ def _setup_var_ref(self) -> mpc_datamodels.VariableReferenceT:
+ return VariableReference.from_config(self.config)
+
+ def _after_config_update(self):
+ self.var_ref: mpc_datamodels.VariableReferenceT = self._setup_var_ref()
+ self.optimization_backend: OptimizationBackendT = (
+ self._setup_optimization_backend()
+ )
+ self._init_optimization()
+ self.init_status = mpc_datamodels.InitStatus.ready
+
+[docs] def assert_subset(self, mpc_names, model_names, message_head):
+ """
+ Helper function for assert assert_mpc_variables_are_in_model. Asserts
+ the variables of the var_ref corresponding to ref_key are a subset of
+ a list of names provided (usually obtained from the model) and prints
+ out an error if false. Returns the portion of model_names that are
+ not in the given var_ref.
+ """
+ assert set(mpc_names).issubset(model_names), (
+ f"{message_head} of MPC {self.agent.id} are not contained in "
+ f"model. Names must match. The following variables defined for the "
+ f"MPC do not appear in the model: "
+ f"'{set(mpc_names).difference(model_names)}'."
+ )
+ return set(model_names).difference(mpc_names)
+
+[docs] def assert_mpc_variables_are_in_model(self) -> dict[str, set[str]]:
+ """
+ Checks whether all variables of var_ref are contained in the model.
+ Returns names of model variables not contained in the var_ref,
+ sorted by keys: 'states', 'inputs', 'outputs', 'parameters'.
+ """
+
+ # arguments for validation function:
+ # (key in var_ref, model names, str for head error message)
+ args = [
+ (
+ "states",
+ self.model.get_state_names(),
+ "Differential variables / States",
+ ),
+ ("controls", self.model.get_input_names(), "Controls"),
+ ("inputs", self.model.get_input_names(), "Inputs"),
+ ("outputs", self.model.get_output_names(), "Outputs"),
+ ("parameters", self.model.get_parameter_names(), "Parameters"),
+ ]
+
+ # perform validations and make a dictionary of unassigned variables
+ unassigned_by_mpc_var = {
+ key: self.assert_subset(self.var_ref.__dict__[key], names, message)
+ for key, names, message in args
+ }
+
+ # fix unassigned values for inputs
+ intersection_input = set(unassigned_by_mpc_var["controls"]).intersection(
+ unassigned_by_mpc_var["inputs"]
+ )
+
+ # return dict should have model variables as keys, not mpc variables
+ unassigned_by_model_var = {
+ "states": unassigned_by_mpc_var["states"],
+ "inputs": intersection_input,
+ "outputs": unassigned_by_mpc_var["outputs"],
+ "parameters": unassigned_by_mpc_var["parameters"],
+ }
+
+ return unassigned_by_model_var
+
+[docs] def collect_variables_for_optimization(
+ self, var_ref: mpc_datamodels.VariableReference = None
+ ) -> Dict[str, AgentVariable]:
+ """Gets all variables noted in the var ref and puts them in a flat
+ dictionary."""
+ if var_ref is None:
+ var_ref = self.var_ref
+ return {v: self.get(v) for v in var_ref.all_variables()}
+
+ # class AgVarDropin:
+ # ub: float
+ # lb: float
+ # value: Union[float, list, pd.Series]
+ # interpolation_method: InterpolationMethod
+
+[docs] def process(self):
+ while True:
+ self.do_step()
+ yield self.env.timeout(self.config.time_step)
+
+[docs] def register_callbacks(self):
+ """Registers the init_optimization callback to all parameters which
+ cannot be changed without recreating the optimization problem."""
+ for key in OptimizationBackend.mpc_backend_parameters:
+ self.agent.data_broker.register_callback(
+ alias=key, source=None, callback=self.re_init_optimization
+ )
+
+ def _init_optimization(self):
+ """Performs the setup of the optimization backend."""
+ try:
+ self.optimization_backend.setup_optimization(var_ref=self.var_ref)
+ except (RuntimeError, ValueError) as err:
+ raise ConfigurationError(
+ f"The optimization backend of Agent {self.source} could not "
+ f"finish its setup!"
+ ) from err
+ self.logger.info("%s: Initialized optimization problem.", self.agent.id)
+
+[docs] def re_init_optimization(self, parameter: AgentVariable):
+ """Re-initializes the optimization backend with new parameters."""
+ self.optimization_backend.discretization_options[
+ parameter.name
+ ] = parameter.value
+ self._init_optimization()
+
+ @property
+ def model(self) -> Model:
+ """
+ Getter for current simulation model
+
+ Returns:
+ agentlib.model: Current simulation model
+ """
+ return self.optimization_backend.model
+
+[docs] def pre_computation_hook(self):
+ """
+ This method is called in every computation step before the optimization starts.
+ Overwrite this method in a derived subclass if you want to take some actions
+ each time before the optimal control problem is solved.
+ """
+ pass
+
+[docs] def do_step(self):
+ """
+ Performs an MPC step.
+ """
+ if not self.init_status == InitStatus.ready:
+ self.logger.warning("Skipping step, optimization_backend is not ready.")
+ return
+
+ self.pre_computation_hook()
+
+ # get new values from data_broker
+ updated_vars = self.collect_variables_for_optimization()
+
+ # solve optimization problem with up-to-date values from data_broker
+ result = self.optimization_backend.solve(self.env.time, updated_vars)
+
+ # Set variables in data_broker
+ self.set_actuation(result)
+ self.set_output(result)
+
+[docs] def set_actuation(self, solution: Results):
+ """Takes the solution from optimization backend and sends the first
+ step to AgentVariables."""
+ self.logger.info("Sending optimal control values to data_broker.")
+ for control in self.var_ref.controls:
+ # take the first entry of the control trajectory
+ actuation = solution[control][0]
+ self.set(control, actuation)
+
+[docs] def set_output(self, solution: Results):
+ """Takes the solution from optimization backend and sends it to AgentVariables."""
+ # Output must be defined in the conig as "type"="pd.Series"
+ if not self.config.set_outputs:
+ return
+ self.logger.info("Sending optimal output values to data_broker.")
+ df = solution.df
+ for output in self.var_ref.outputs:
+ series = df.variable[output]
+ self.set(output, series)
+
+[docs] def get_results(self) -> Optional[pd.DataFrame]:
+ """Read the results that were saved from the optimization backend and
+ returns them as Dataframe.
+
+ Returns:
+ (results, stats) tuple of Dataframes.
+ """
+ results_file = self.optimization_backend.config.results_file
+ if results_file is None or not self.optimization_backend.config.save_results:
+ self.logger.info("No results were saved .")
+ return None
+ try:
+ result, stat = self.read_results_file(results_file)
+ self.warn_for_missed_solves(stat)
+ return result
+ except FileNotFoundError:
+ self.logger.error("Results file %s was not found.", results_file)
+ return None
+
+[docs] def warn_for_missed_solves(self, stats: Optional[pd.DataFrame]):
+ """
+ Read the solver information from the optimization
+ Returns:
+ Warning if solver fails
+ """
+ if stats is None:
+ return
+ if stats["success"].all():
+ return
+ failures = ~stats["success"]
+ failure_indices = failures[failures].index.tolist()
+ self.logger.warning(
+ f"Warning: There were failed optimizations at the following times: "
+ f"{failure_indices}."
+ )
+
+[docs] @staticmethod
+ def read_results_file(results_file: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
+ """
+ Read the provided csv-file as an MPC results file.
+ Args:
+ results_file: File path
+
+ Returns:
+ results, stats
+ results is the Dataframe with all inputs and outputs of the MPC
+ optimizations.
+ stats is the Dataframe with matching solver stats
+ """
+
+ results = load_mpc(results_file)
+ stats = load_mpc_stats(results_file)
+ return results, stats
+
+[docs] def cleanup_results(self):
+ results_file = self.optimization_backend.config.results_file
+ if not results_file:
+ return
+ os.remove(results_file)
+ os.remove(mpc_datamodels.stats_path(results_file))
+
+"""Holds the class for full featured MPCs."""
+
+import numpy as np
+import pandas as pd
+from agentlib.core import AgentVariable
+
+from agentlib_mpc.data_structures import mpc_datamodels
+from pydantic import Field, field_validator, FieldValidationInfo
+from rapidfuzz import process, fuzz
+
+from agentlib_mpc.modules.mpc import BaseMPCConfig, BaseMPC
+
+
+[docs]class MPCConfig(BaseMPCConfig):
+ """
+ Pydantic data model for MPC configuration parser
+ """
+
+ r_del_u: dict[str, float] = Field(
+ default={},
+ description="Weights that are applied to the change in control variables.",
+ )
+
+[docs] @field_validator("r_del_u")
+ def check_r_del_u_in_controls(
+ cls, r_del_u: dict[str, float], info: FieldValidationInfo
+ ):
+ """Ensures r_del_u is only set for control variables."""
+ controls = {ctrl.name for ctrl in info.data["controls"]}
+ for name in r_del_u:
+ if name in controls:
+ # everything is fine
+ continue
+
+ # raise error
+ matches = process.extract(query=name, choices=controls, scorer=fuzz.WRatio)
+ matches = [m[0] for m in matches]
+ raise ValueError(
+ f"Tried to specify control change weight for {name}. However, "
+ f"{name} is not in the set of control variables. Did you mean one "
+ f"of these? {', '.join(matches)}"
+ )
+ return r_del_u
+
+
+[docs]class MPC(BaseMPC):
+ """
+ A model predictive controller.
+ More info to follow.
+ """
+
+ config: MPCConfig
+
+ def _init_optimization(self):
+ super()._init_optimization()
+ self._lags_dict_seconds = self.optimization_backend.get_lags_per_variable()
+
+ history = {}
+ # create a dict to keep track of all values for lagged variables timestamped
+ for v in self._lags_dict_seconds:
+ var = self.get(v)
+ history[v] = {}
+ # store scalar values as initial if they exist
+ if isinstance(var.value, (float, int)):
+ timestamp = var.timestamp or self.env.time
+ value = var.value
+ elif var.value is None:
+ self.logger.info(
+ "Initializing history for variable %s, but no value was available."
+ " Interpolating between bounds or setting to zero."
+ )
+ timestamp = self.env.time
+ value = var.value or np.nan_to_num(
+ (var.ub + var.lb) / 2, posinf=1000, neginf=1000
+ )
+ else:
+ # in this case it should probably be a series, which we can take as is
+ continue
+ history[v][timestamp] = value
+ self.history: dict[str, dict[float, float]] = history
+ self.register_callbacks_for_lagged_variables()
+
+
+
+ def _remove_old_values_from_history(self):
+ """Clears the history of all entries that are older than current time minus
+ horizon length."""
+ # iterate over all variables which save lag
+ for var_name, lag_in_seconds in self._lags_dict_seconds.items():
+ var_history = self.history[var_name]
+
+ # iterate over all saved values and delete them, if they are too old
+ for timestamp in list(var_history):
+ if timestamp < (self.env.time - lag_in_seconds):
+ var_history.pop(timestamp)
+
+ def _callback_hist_vars(self, variable: AgentVariable, name: str):
+ """Adds received measured inputs to the past trajectory."""
+ # if variables are intentionally sent as series, we don't need to store them
+ # ourselves
+ # only store scalar values
+ if isinstance(variable.value, (float, int)):
+ self.history[name][variable.timestamp] = variable.value
+
+[docs] def register_callbacks_for_lagged_variables(self):
+ """Registers callbacks which listen to the variables which have to be saved as
+ time series. These callbacks save the values in the history for use in the
+ optimization."""
+
+ for lagged_input in self._lags_dict_seconds:
+ var = self.get(lagged_input)
+ self.agent.data_broker.register_callback(
+ alias=var.alias,
+ source=var.source,
+ callback=self._callback_hist_vars,
+ name=var.name,
+ )
+
+ def _after_config_update(self):
+ self._internal_variables = self._create_internal_variables()
+ super()._after_config_update()
+
+ def _setup_var_ref(self) -> mpc_datamodels.VariableReferenceT:
+ return mpc_datamodels.FullVariableReference.from_config(self.config)
+
+[docs] def collect_variables_for_optimization(
+ self, var_ref: mpc_datamodels.VariableReference = None
+ ) -> dict[str, AgentVariable]:
+ """Gets all variables noted in the var ref and puts them in a flat
+ dictionary."""
+ if var_ref is None:
+ var_ref = self.var_ref
+
+ # config variables
+ variables = {v: self.get(v) for v in var_ref.all_variables()}
+
+ # history variables
+ for hist_var in self._lags_dict_seconds:
+ past_values = self.history[hist_var]
+ if not past_values:
+ # if the history of a variable is empty, fallback to the scalar value
+ continue
+
+ # create copy to not mess up scalar value of original variable in case
+ # fallback is needed
+ updated_var = variables[hist_var].copy(
+ update={"value": pd.Series(past_values)}
+ )
+ variables[hist_var] = updated_var
+
+ return {**variables, **self._internal_variables}
+
+ # class AgVarDropin:
+ # ub: float
+ # lb: float
+ # value: Union[float, list, pd.Series]
+ # interpolation_method: InterpolationMethod
+
+ def _create_internal_variables(self) -> dict[str, AgentVariable]:
+ """Creates a reference of all internal variables that are used for the MPC,
+ but not shared as AgentVariables.
+
+ Currently, this includes:
+ - Weights for control change (r_del_u)
+ """
+ r_del_u: dict[str, mpc_datamodels.MPCVariable] = {}
+ for control in self.config.controls:
+ r_del_u_name = mpc_datamodels.r_del_u_convention(control.name)
+ var = mpc_datamodels.MPCVariable(name=r_del_u_name)
+ r_del_u[r_del_u_name] = var
+ if control.name in self.config.r_del_u:
+ var.value = self.config.r_del_u[control.name]
+ else:
+ var.value = 0
+
+ return r_del_u
+
+import importlib
+
+from pydantic import BaseModel
+
+
+[docs]class BackendImport(BaseModel):
+ """
+ Data-Class to import a given python file
+ from ``import_path`` and load the given
+ ``class_name``
+ """
+
+ import_path: str
+ class_name: str
+
+ def __call__(self, *args, **kwargs):
+ """Import the Module with class_name from the import path"""
+ module = importlib.import_module(self.import_path)
+ cls = getattr(module, self.class_name)
+ return cls(*args, **kwargs)
+
+
+backend_types = {
+ "casadi_basic": BackendImport(
+ import_path="agentlib_mpc.optimization_backends.casadi_.basic",
+ class_name="CasADiBaseBackend",
+ ),
+ "casadi": BackendImport(
+ import_path="agentlib_mpc.optimization_backends.casadi_.full",
+ class_name="CasADiFullBackend",
+ ),
+ "casadi_admm": BackendImport(
+ import_path="agentlib_mpc.optimization_backends.casadi_.admm",
+ class_name="CasADiADMMBackend",
+ ),
+ "casadi_minlp": BackendImport(
+ import_path="agentlib_mpc.optimization_backends.casadi_.minlp",
+ class_name="CasADiMINLPBackend",
+ ),
+ "casadi_cia": BackendImport(
+ import_path="agentlib_mpc.optimization_backends.casadi_.minlp_cia",
+ class_name="CasADiCIABackend",
+ ),
+ "casadi_ml": BackendImport(
+ import_path="agentlib_mpc.optimization_backends.casadi_.casadi_ml",
+ class_name="CasADiBBBackend",
+ ),
+ "casadi_admm_ml": BackendImport(
+ import_path="agentlib_mpc.optimization_backends.casadi_.casadi_admm_ml",
+ class_name="CasADiADMMBackend_NN",
+ ),
+ "casadi_nn": BackendImport(
+ import_path="agentlib_mpc.optimization_backends.casadi_.casadi_ml",
+ class_name="CasADiBBBackend",
+ ),
+ "casadi_admm_nn": BackendImport(
+ import_path="agentlib_mpc.optimization_backends.casadi_.casadi_admm_ml",
+ class_name="CasADiADMMBackend_NN",
+ ),
+ "casadi_mhe": BackendImport(
+ import_path="agentlib_mpc.optimization_backends.casadi_.mhe",
+ class_name="MHEBackend",
+ ),
+}
+
+
+uninstalled_backend_types = {}
+
+try:
+ pass
+except ImportError:
+ uninstalled_backend_types.update()
+
+import abc
+import os
+from datetime import datetime
+import logging
+from pathlib import Path
+from typing import Dict, Union, Callable, TypeVar, Optional
+
+import pandas as pd
+import pydantic
+from agentlib.core.errors import ConfigurationError
+from pydantic import ConfigDict
+from pydantic_core.core_schema import FieldValidationInfo
+
+from agentlib.utils import custom_injection
+from agentlib.core import AgentVariable, Model
+from agentlib_mpc.data_structures import mpc_datamodels
+from agentlib_mpc.data_structures.mpc_datamodels import (
+ DiscretizationOptions,
+)
+from agentlib_mpc.data_structures.mpc_datamodels import Results
+
+logger = logging.getLogger(__name__)
+
+ModelT = TypeVar("ModelT", bound=Model)
+
+
+[docs]class BackendConfig(pydantic.BaseModel):
+ model: dict
+ discretization_options: DiscretizationOptions
+ name: Optional[str] = None
+ results_file: Optional[Path] = pydantic.Field(default=None)
+ save_results: Optional[bool] = pydantic.Field(validate_default=True, default=None)
+ overwrite_result_file: Optional[bool] = pydantic.Field(
+ default=False, validate_default=True
+ )
+ model_config = ConfigDict(extra="forbid")
+
+[docs] @pydantic.field_validator("results_file")
+ @classmethod
+ def check_csv(cls, file: Path):
+ if not file.suffix == ".csv":
+ raise ConfigurationError(
+ f"Results filename has to be a 'csv' file. Got {file} instead."
+ )
+ return file
+
+[docs] @pydantic.field_validator("save_results")
+ @classmethod
+ def disable_results_if_no_file(cls, save_results: bool, info: FieldValidationInfo):
+ if save_results is None:
+ # if user did not specify if results should be saved, we save them if a
+ # file is specified.
+ return bool(info.data["results_file"])
+ if save_results and info.data["results_file"] is None:
+ raise ConfigurationError(
+ "'save_results' was true, however there was no results file provided."
+ )
+ return save_results
+
+[docs] @pydantic.field_validator("overwrite_result_file")
+ @classmethod
+ def check_overwrite(cls, overwrite_result_file: bool, info: FieldValidationInfo):
+ """Checks, whether the overwrite results sttings are valid, and deletes
+ existing result files if applicable."""
+ res_file = info.data.get("results_file")
+ if res_file and info.data["save_results"]:
+ if overwrite_result_file:
+ try:
+ os.remove(res_file)
+ os.remove(mpc_datamodels.stats_path(res_file))
+ except FileNotFoundError:
+ pass
+ else:
+ if os.path.isfile(info.data["results_file"]):
+ raise FileExistsError(
+ f"Results file {res_file} already exists and will not be "
+ f"overwritten automatically. Set 'overwrite_result_file' to "
+ f"True to enable automatic overwrite it."
+ )
+ return overwrite_result_file
+
+
+[docs]class OptimizationBackend(abc.ABC):
+ """
+ Base class for all optimization backends. OptimizationBackends are a
+ plugin for the 'mpc' module. They provide means to setup and solve the
+ underlying optimization problem of the MPC. They also can save data of
+ the solutions.
+ """
+
+ _supported_models: dict[str, ModelT] = {}
+ mpc_backend_parameters = ("time_step", "prediction_horizon")
+ config_type = BackendConfig
+
+ def __init__(self, config: dict):
+ self.logger = logger
+ self.config = self.config_type(**config)
+ self.model: ModelT = self.model_from_config(self.config.model)
+ self.var_ref: Optional[mpc_datamodels.VariableReference] = None
+ self.cost_function: Optional[Callable] = None
+ self.stats = {}
+ self._created_file: bool = False # flag if we checked the file location
+
+[docs] def register_logger(self, logger: logging.Logger):
+ """Registers a logger, can be used to use the module logger"""
+ self.logger = logger
+
+[docs] @abc.abstractmethod
+ def setup_optimization(self, var_ref: mpc_datamodels.VariableReference):
+ """
+ Performs all necessary steps to make the ``solve`` method usable.
+
+ Args:
+ var_ref: Variable Reference that specifies the role of each model variable
+ in the mpc
+ """
+ self.var_ref = var_ref
+
+[docs] @abc.abstractmethod
+ def solve(
+ self, now: Union[float, datetime], current_vars: Dict[str, AgentVariable]
+ ) -> Results:
+ """
+ Solves the optimization problem given the current values of the
+ corresponding AgentVariables and system time. The standardization of
+ return values is a work in progress.
+
+ Args:
+ now: Current time used for interpolation of input trajectories.
+ current_vars: Dict of AgentVariables holding the values relevant to
+ the optimization problem. Keys are the names
+
+ Returns:
+ A dataframe with all optimization variables over their respective
+ grids. Depending on discretization, can include many nan's, so care
+ should be taken when using this, e.g. always use dropna() after
+ accessing a column.
+
+ Example:
+ variables mDot | T_0 | slack_T
+ time
+ 0 0.1 | 298 | nan
+ 230 nan | 297 | 3
+ 470 nan | 296 | 2
+ 588 nan | 295 | 1
+ 700 0.05 | 294 | nan
+ 930 nan | 294 | 0.1
+
+
+ """
+ raise NotImplementedError(
+ "The 'OptimizationBackend' class does not implement this because "
+ "it is individual to the subclasses"
+ )
+
+[docs] def update_discretization_options(self, opts: dict):
+ """Updates the discretization options with the new dict."""
+ self.config.discretization_options = (
+ self.config.discretization_options.model_copy(update=opts)
+ )
+ self.setup_optimization(var_ref=self.var_ref)
+
+[docs] def model_from_config(self, model: dict):
+ """Set the model to the backend."""
+ model = model.copy()
+ _type = model.pop("type")
+ custom_cls = custom_injection(config=_type)
+ model = custom_cls(**model)
+ if not any(
+ (
+ isinstance(model, _supp_model)
+ for _supp_model in self._supported_models.values()
+ )
+ ):
+ raise TypeError(
+ f"Given model is of type {type(model)} but "
+ f"should be instance of one of:"
+ f"{', '.join(list(self._supported_models.keys()))}"
+ )
+ return model
+
+[docs] def get_lags_per_variable(self) -> dict[str, float]:
+ """Returns the name of variables which include lags and their lag in seconds.
+ The MPC module can use this information to save relevant past data of lagged
+ variables"""
+ return {}
+
+[docs] def results_file_exists(self) -> bool:
+ """Checks if the results file already exists, and if not, creates it with
+ headers."""
+ if self._created_file:
+ return True
+
+ if self.config.results_file.is_file():
+ # todo, this case is weird, as it is the mistake-append
+ self._created_file = True
+ return True
+
+ # we only check the file location once to save system calls
+ self.config.results_file.parent.mkdir(parents=True, exist_ok=True)
+ self._created_file = True
+ return False
+
+[docs] def update_model_variables(self, current_vars: Dict[str, AgentVariable]):
+ """
+ Internal method to write current data_broker to model variables.
+ Only update values, not other module_types.
+ """
+ for inp in current_vars.values():
+ logger.debug(f"Updating model variable {inp.name}={inp.value}")
+ self.model.set(name=inp.name, value=inp.value)
+
+
+OptimizationBackendT = TypeVar("OptimizationBackendT", bound=OptimizationBackend)
+
+
+[docs]class ADMMBackend(OptimizationBackend):
+ """Base class for implementations of optimization backends for ADMM
+ algorithms."""
+
+ @property
+ @abc.abstractmethod
+ def coupling_grid(self) -> list[float]:
+ """Returns the grid on which the coupling variables are discretized."""
+ raise NotImplementedError
+
+import casadi as ca
+import pandas as pd
+
+from agentlib_mpc.data_structures.casadi_utils import DiscretizationMethod, Integrators
+from agentlib_mpc.data_structures.mpc_datamodels import stats_path
+from agentlib_mpc.models.casadi_model import CasadiModel, CasadiInput, CasadiParameter
+from agentlib_mpc.data_structures import admm_datatypes
+from agentlib_mpc.optimization_backends.casadi_.core.VariableGroup import (
+ OptimizationVariable,
+ OptimizationParameter,
+)
+from agentlib_mpc.optimization_backends.casadi_.basic import (
+ DirectCollocation,
+ MultipleShooting,
+ CasADiBaseBackend,
+)
+from agentlib_mpc.optimization_backends.backend import ADMMBackend
+from agentlib_mpc.optimization_backends.casadi_.core.discretization import Results
+from agentlib_mpc.optimization_backends.casadi_.full import FullSystem
+
+
+[docs]class CasadiADMMSystem(FullSystem):
+ local_couplings: OptimizationVariable
+ global_couplings: OptimizationParameter
+ multipliers: OptimizationParameter
+ local_exchange: OptimizationVariable
+ exchange_diff: OptimizationParameter
+ exchange_multipliers: OptimizationParameter
+ penalty_factor: OptimizationParameter
+
+[docs] def initialize(self, model: CasadiModel, var_ref: admm_datatypes.VariableReference):
+ super().initialize(model=model, var_ref=var_ref)
+
+ coup_names = [c.name for c in var_ref.couplings]
+ exchange_names = [c.name for c in var_ref.exchange]
+ pure_outs = [
+ m for m in model.outputs if m.name not in coup_names + exchange_names
+ ]
+ self.outputs = OptimizationVariable.declare(
+ denotation="y",
+ variables=pure_outs,
+ ref_list=var_ref.outputs,
+ )
+
+ self.local_couplings = OptimizationVariable.declare(
+ denotation="local_couplings",
+ variables=[model.get(name) for name in coup_names],
+ ref_list=coup_names,
+ )
+ couplings_global = [coup.mean for coup in var_ref.couplings]
+ self.global_couplings = OptimizationParameter.declare(
+ denotation="global_couplings",
+ variables=[CasadiInput(name=coup) for coup in couplings_global],
+ ref_list=couplings_global,
+ )
+
+ multipliers = [coup.multiplier for coup in var_ref.couplings]
+ self.multipliers = OptimizationParameter.declare(
+ denotation="multipliers",
+ variables=[CasadiInput(name=coup) for coup in multipliers],
+ ref_list=multipliers,
+ )
+
+ self.local_exchange = OptimizationVariable.declare(
+ denotation="local_exchange",
+ variables=[model.get(name) for name in exchange_names],
+ ref_list=exchange_names,
+ )
+ couplings_mean_diff = [coup.mean_diff for coup in var_ref.exchange]
+ self.exchange_diff = OptimizationParameter.declare(
+ denotation="average_diff",
+ variables=[CasadiInput(name=coup) for coup in couplings_mean_diff],
+ ref_list=couplings_mean_diff,
+ )
+
+ multipliers = [coup.multiplier for coup in var_ref.exchange]
+ self.exchange_multipliers = OptimizationParameter.declare(
+ denotation="exchange_multipliers",
+ variables=[CasadiInput(name=coup) for coup in multipliers],
+ ref_list=multipliers,
+ )
+
+ self.penalty_factor = OptimizationParameter.declare(
+ denotation="rho",
+ variables=[CasadiParameter(name="penalty_factor")],
+ ref_list=["penalty_factor"],
+ )
+
+ # add admm terms to objective function
+ admm_objective = 0
+ rho = self.penalty_factor.full_symbolic[0]
+ for i in range(len(var_ref.couplings)):
+ admm_in = self.global_couplings.full_symbolic[i]
+ admm_out = self.local_couplings.full_symbolic[i]
+ admm_lam = self.multipliers.full_symbolic[i]
+ admm_objective += admm_lam * admm_out + rho / 2 * (admm_in - admm_out) ** 2
+
+ for i in range(len(var_ref.exchange)):
+ admm_in = self.exchange_diff.full_symbolic[i]
+ admm_out = self.local_exchange.full_symbolic[i]
+ admm_lam = self.exchange_multipliers.full_symbolic[i]
+ admm_objective += admm_lam * admm_out + rho / 2 * (admm_in - admm_out) ** 2
+
+ self.cost_function += admm_objective
+
+
+[docs]class ADMMCollocation(DirectCollocation):
+ def _discretize(self, sys: CasadiADMMSystem):
+ """
+ Perform a direct collocation discretization.
+ # pylint: disable=invalid-name
+ """
+
+ # setup the polynomial base
+ collocation_matrices = self._collocation_polynomial()
+
+ # shorthands
+ n = self.options.prediction_horizon
+ ts = self.options.time_step
+
+ # Initial State
+ x0 = self.add_opt_par(sys.initial_state)
+ xk = self.add_opt_var(sys.states, lb=x0, ub=x0, guess=x0)
+ uk = self.add_opt_par(sys.last_control)
+
+ # Parameters that are constant over the horizon
+ const_par = self.add_opt_par(sys.model_parameters)
+ du_weights = self.add_opt_par(sys.r_del_u)
+ rho = self.add_opt_par(sys.penalty_factor)
+
+ # Formulate the NLP
+ # loop over prediction horizon
+ while self.k < n:
+ # New NLP variable for the control
+ u_prev = uk
+ uk = self.add_opt_var(sys.controls)
+ # penalty for control change between time steps
+ self.objective_function += ts * ca.dot(du_weights, (u_prev - uk) ** 2)
+
+ # New parameter for inputs
+ dk = self.add_opt_par(sys.non_controlled_inputs)
+
+ # perform inner collocation loop
+ # perform inner collocation loop
+ opt_vars_inside_inner = [
+ sys.algebraics,
+ sys.outputs,
+ sys.local_couplings,
+ sys.local_exchange,
+ ]
+ opt_pars_inside_inner = [
+ sys.global_couplings,
+ sys.multipliers,
+ sys.exchange_multipliers,
+ sys.exchange_diff,
+ ]
+ constant_over_inner = {
+ sys.controls: uk,
+ sys.non_controlled_inputs: dk,
+ sys.model_parameters: const_par,
+ sys.penalty_factor: rho,
+ }
+ xk_end, constraints = self._collocation_inner_loop(
+ collocation=collocation_matrices,
+ state_at_beginning=xk,
+ states=sys.states,
+ opt_vars=opt_vars_inside_inner,
+ opt_pars=opt_pars_inside_inner,
+ const=constant_over_inner,
+ )
+
+ # increment loop counter and time
+ self.k += 1
+ self.pred_time = ts * self.k
+
+ # New NLP variables at end of interval
+ xk = self.add_opt_var(sys.states)
+
+ # Add continuity constraint
+ self.add_constraint(xk - xk_end, gap_closing=True)
+
+ # add collocation constraints later for fatrop
+ for constraint in constraints:
+ self.add_constraint(*constraint)
+
+
+[docs]class ADMMMultipleShooting(MultipleShooting):
+ def _discretize(self, sys: CasadiADMMSystem):
+ """Performs a multiple shooting discretization for ADMM-based optimization.
+
+ This method implements the multiple shooting discretization scheme for both consensus
+ and exchange ADMM variants. It handles:
+ 1. State continuity across shooting intervals
+ 2. Local coupling variables and their consensus terms
+ 3. Exchange variables between subsystems
+ 4. Integration of system dynamics
+ 5. Objective function construction including ADMM penalty terms
+
+ Args:
+ sys (CasadiADMMSystem): The system to be discretized, containing states,
+ controls, and ADMM-specific variables
+ """
+ # Extract key parameters
+ prediction_horizon = self.options.prediction_horizon
+ timestep = self.options.time_step
+ integration_options = {"t0": 0, "tf": timestep}
+
+ # Initialize state trajectory
+ initial_state = self.add_opt_par(sys.initial_state)
+ current_state = self.add_opt_var(
+ sys.states, lb=initial_state, ub=initial_state, guess=initial_state
+ )
+
+ # Initialize control input
+ previous_control = self.add_opt_par(sys.last_control)
+
+ # Add time-invariant parameters
+ control_rate_weights = self.add_opt_par(sys.r_del_u)
+ model_parameters = self.add_opt_par(sys.model_parameters)
+ admm_penalty = self.add_opt_par(sys.penalty_factor)
+
+ # Create system integrator
+ dynamics_integrator = self._create_ode(
+ sys, integration_options, self.options.integrator
+ )
+
+ # Perform multiple shooting discretization
+ for k in range(prediction_horizon):
+ # 1. Handle control inputs and their rate penalties
+ current_control = self.add_opt_var(sys.controls)
+ control_rate_penalty = timestep * ca.dot(
+ control_rate_weights, (previous_control - current_control) ** 2
+ )
+ self.objective_function += control_rate_penalty
+ previous_control = current_control
+
+ # 2. Add optimization variables for current shooting interval
+ disturbance = self.add_opt_par(sys.non_controlled_inputs)
+ algebraic_vars = self.add_opt_var(sys.algebraics)
+ output_vars = self.add_opt_var(sys.outputs)
+
+ # 3. Add ADMM consensus variables
+ local_coupling = self.add_opt_var(sys.local_couplings)
+ global_coupling = self.add_opt_par(sys.global_couplings)
+ coupling_multipliers = self.add_opt_par(sys.multipliers)
+
+ # 4. Add ADMM exchange variables
+ exchange_difference = self.add_opt_par(sys.exchange_diff)
+ exchange_multipliers = self.add_opt_par(sys.exchange_multipliers)
+ local_exchange = self.add_opt_var(sys.local_exchange)
+
+ # 5. Construct stage-wise optimization problem
+ stage_variables = {
+ sys.states.name: current_state,
+ sys.algebraics.name: algebraic_vars,
+ sys.local_couplings.name: local_coupling,
+ sys.outputs.name: output_vars,
+ sys.local_exchange.name: local_exchange,
+ sys.global_couplings.name: global_coupling,
+ sys.multipliers.name: coupling_multipliers,
+ sys.controls.name: current_control,
+ sys.non_controlled_inputs.name: disturbance,
+ sys.model_parameters.name: model_parameters,
+ sys.penalty_factor.name: admm_penalty,
+ sys.exchange_diff.name: exchange_difference,
+ sys.exchange_multipliers.name: exchange_multipliers,
+ }
+
+ stage_result = self._stage_function(**stage_variables)
+
+ # 6. Integrate system dynamics
+ integration_result = dynamics_integrator(
+ x0=current_state,
+ p=ca.vertcat(
+ current_control,
+ local_coupling,
+ disturbance,
+ model_parameters,
+ algebraic_vars,
+ output_vars,
+ ),
+ )
+
+ # 7. Add continuity constraints
+ self.k = k + 1
+ self.pred_time = timestep * self.k
+ next_state = self.add_opt_var(sys.states)
+ self.add_constraint(next_state - integration_result["xf"], gap_closing=True)
+
+ # 8. Add model constraints and objective contributions
+ self.add_constraint(
+ stage_result["model_constraints"],
+ lb=stage_result["lb_model_constraints"],
+ ub=stage_result["ub_model_constraints"],
+ )
+ self.objective_function += stage_result["cost_function"] * timestep
+
+ # Update for next interval
+ current_state = next_state
+
+ def _create_ode(
+ self, sys: CasadiADMMSystem, opts: dict, integrator: Integrators
+ ) -> ca.Function:
+ # dummy function for empty ode, since ca.integrator would throw an error
+ if sys.states.full_symbolic.shape[0] == 0:
+ return lambda *args, **kwargs: {"xf": ca.MX.sym("xk_end", 0)}
+
+ ode = sys.ode
+ # create inputs
+ x = sys.states.full_symbolic
+ p = ca.vertcat(
+ sys.controls.full_symbolic,
+ sys.local_couplings.full_symbolic,
+ sys.non_controlled_inputs.full_symbolic,
+ sys.model_parameters.full_symbolic,
+ sys.algebraics.full_symbolic,
+ sys.outputs.full_symbolic,
+ )
+ integrator_ode = {"x": x, "p": p, "ode": ode}
+ if integrator == Integrators.euler:
+ xk_end = x + ode * opts["tf"]
+ opt_integrator = ca.Function(
+ "system", [x, p], [xk_end], ["x0", "p"], ["xf"]
+ )
+ else: # rk, cvodes
+ opt_integrator = ca.integrator("system", integrator, integrator_ode, opts)
+ return opt_integrator
+
+
+[docs]class CasADiADMMBackend(CasADiBaseBackend, ADMMBackend):
+ """
+ Class doing optimization of ADMM subproblems with CasADi.
+ """
+
+ system_type = CasadiADMMSystem
+ discretization_types = {
+ DiscretizationMethod.collocation: ADMMCollocation,
+ DiscretizationMethod.multiple_shooting: ADMMMultipleShooting,
+ }
+ system: CasadiADMMSystem
+
+ def __init__(self, config: dict):
+ super().__init__(config)
+ self.results: list[pd.DataFrame] = []
+ self.result_stats: list[str] = []
+ self.it: int = 0
+ self.now: float = 0
+
+ @property
+ def coupling_grid(self):
+ return self.discretization.grid(self.system.multipliers)
+
+[docs] def save_result_df(
+ self,
+ results: Results,
+ now: float = 0,
+ ):
+ """
+ Save the results of `solve` into a dataframe at each time step.
+
+ Example results dataframe:
+
+ value_type variable ... lower
+ variable T_0 T_0_slack ... T_0_slack mDot_0
+ time_step ...
+ 2 0.000000 298.160000 NaN ... NaN NaN
+ 101.431499 297.540944 -149.465942 ... -inf 0.0
+ 450.000000 295.779780 -147.704779 ... -inf 0.0
+ 798.568501 294.720770 -146.645769 ... -inf 0.0
+ Args:
+ results:
+ now:
+
+ Returns:
+
+ """
+ if not self.config.save_results:
+ return
+
+ res_file = self.config.results_file
+
+ if self.results_file_exists():
+ self.it += 1
+ if now != self.now: # means we advanced to next step
+ self.it = 0
+ self.now = now
+ else:
+ self.it = 0
+ self.now = now
+ results.write_columns(res_file)
+ results.write_stats_columns(stats_path(res_file))
+
+ df = results.df
+ df.index = list(map(lambda x: str((now, self.it, x)), df.index))
+ self.results.append(df)
+
+ # append solve stats
+ index = str((now, self.it))
+ self.result_stats.append(results.stats_line(index))
+
+ # save last results at the start of new sampling time, or if 1000 iterations
+ # are exceeded
+ if not (self.it == 0 or self.it % 1000 == 0):
+ return
+
+ with open(res_file, "a", newline="") as f:
+ for iteration_result in self.results:
+ iteration_result.to_csv(f, mode="a", header=False)
+
+ with open(stats_path(res_file), "a") as f:
+ f.writelines(self.result_stats)
+ self.results = []
+ self.result_stats = []
+
+import dataclasses
+
+import casadi as ca
+import numpy as np
+
+from agentlib_mpc.data_structures.casadi_utils import (
+ Constraint,
+ LB_PREFIX,
+ UB_PREFIX,
+ DiscretizationMethod,
+ SolverFactory,
+ Integrators,
+)
+from agentlib_mpc.data_structures.mpc_datamodels import VariableReference
+from agentlib_mpc.models.casadi_model import CasadiModel
+from agentlib_mpc.optimization_backends.casadi_.core.casadi_backend import CasADiBackend
+from agentlib_mpc.optimization_backends.casadi_.core.VariableGroup import (
+ OptimizationQuantity,
+ OptimizationVariable,
+ OptimizationParameter,
+)
+from agentlib_mpc.optimization_backends.casadi_.core.discretization import (
+ Discretization,
+)
+from agentlib_mpc.optimization_backends.casadi_.core.system import System
+
+
+[docs]class BaseSystem(System):
+ # variables
+ states: OptimizationVariable
+ controls: OptimizationVariable
+ algebraics: OptimizationVariable
+ outputs: OptimizationVariable
+
+ # parameters
+ non_controlled_inputs: OptimizationParameter
+ model_parameters: OptimizationParameter
+ initial_state: OptimizationParameter
+
+ # dynamics
+ model_constraints: Constraint
+ cost_function: ca.MX
+ ode: ca.MX
+
+[docs] def initialize(self, model: CasadiModel, var_ref: VariableReference):
+ # define variables
+ self.states = OptimizationVariable.declare(
+ denotation="state",
+ variables=model.get_states(var_ref.states),
+ ref_list=var_ref.states,
+ assert_complete=True,
+ )
+ self.controls = OptimizationVariable.declare(
+ denotation="control",
+ variables=model.get_inputs(var_ref.controls),
+ ref_list=var_ref.controls,
+ assert_complete=True,
+ )
+ self.algebraics = OptimizationVariable.declare(
+ denotation="z",
+ variables=model.auxiliaries,
+ ref_list=[],
+ )
+ self.outputs = OptimizationVariable.declare(
+ denotation="y",
+ variables=model.outputs,
+ ref_list=var_ref.outputs,
+ )
+
+ # define parameters
+ self.non_controlled_inputs = OptimizationParameter.declare(
+ denotation="d",
+ variables=model.get_inputs(var_ref.inputs),
+ ref_list=var_ref.inputs,
+ assert_complete=True,
+ )
+ self.model_parameters = OptimizationParameter.declare(
+ denotation="parameter",
+ variables=model.parameters,
+ ref_list=var_ref.parameters,
+ )
+ self.initial_state = OptimizationParameter.declare(
+ denotation="initial_state", # append the 0 as a convention to get initial guess
+ variables=model.get_states(var_ref.states),
+ ref_list=var_ref.states,
+ use_in_stage_function=False,
+ assert_complete=True,
+ )
+
+ # dynamics
+ ode = ca.vertcat(*[sta.ode for sta in model.get_states(var_ref.states)])
+ self.ode = ca.reshape(ode, -1, 1)
+ self.cost_function = model.cost_func
+ self.model_constraints = Constraint(
+ function=ca.vertcat(*[c.function for c in model.get_constraints()]),
+ lb=ca.vertcat(*[c.lb for c in model.get_constraints()]),
+ ub=ca.vertcat(*[c.ub for c in model.get_constraints()]),
+ )
+
+
+[docs]@dataclasses.dataclass
+class CollocationMatrices:
+ order: int
+ root: np.ndarray
+ B: np.ndarray
+ C: np.ndarray
+ D: np.ndarray
+
+
+[docs]class DirectCollocation(Discretization):
+ def _discretize(self, sys: BaseSystem):
+ """
+ Defines a direct collocation discretization.
+ # pylint: disable=invalid-name
+ """
+
+ # setup the polynomial base
+ collocation_matrices = self._collocation_polynomial()
+
+ # shorthands
+ n = self.options.prediction_horizon
+ ts = self.options.time_step
+
+ # Initial State
+ x0 = self.add_opt_par(sys.initial_state)
+ xk = self.add_opt_var(sys.states, lb=x0, ub=x0, guess=x0)
+
+ # Parameters that are constant over the horizon
+ const_par = self.add_opt_par(sys.model_parameters)
+
+ # Formulate the NLP
+ # loop over prediction horizon
+ k = 0
+ while k < n:
+ # New NLP variable for the control
+ uk = self.add_opt_var(sys.controls)
+
+ # New parameter for inputs
+ dk = self.add_opt_par(sys.non_controlled_inputs)
+
+ # perform inner collocation loop
+ opt_vars_inside_inner = [sys.algebraics, sys.outputs]
+ opt_pars_inside_inner = []
+
+ constant_over_inner = {
+ sys.controls: uk,
+ sys.non_controlled_inputs: dk,
+ sys.model_parameters: const_par,
+ }
+ xk_end, constraints = self._collocation_inner_loop(
+ collocation=collocation_matrices,
+ state_at_beginning=xk,
+ states=sys.states,
+ opt_vars=opt_vars_inside_inner,
+ opt_pars=opt_pars_inside_inner,
+ const=constant_over_inner,
+ )
+
+ # increment loop counter and time
+ k += 1
+ self.pred_time = ts * k
+
+ # New NLP variable for differential state at end of interval
+ xk = self.add_opt_var(sys.states)
+
+ # Add continuity constraint
+ self.add_constraint(xk - xk_end, gap_closing=True)
+
+ # add collocation constraints later for fatrop
+ for constraint in constraints:
+ self.add_constraint(*constraint)
+
+ def _construct_stage_function(self, system: BaseSystem):
+ """
+ Combine information from the model and the var_ref to create CasADi
+ functions which describe the system dynamics and constraints at each
+ stage of the optimization problem. Sets the stage function. It has
+ all mpc variables as inputs, sorted by denotation (declared in
+ self.declare_quantities) and outputs ode, cost function and 3 outputs
+ per constraint (constraint, lb_constraint, ub_constraint).
+
+ In the basic case, it has the form:
+ CasadiFunction: ['x', 'z', 'u', 'y', 'd', 'p'] ->
+ ['ode', 'cost_function', 'model_constraints',
+ 'ub_model_constraints', 'lb_model_constraints']
+
+ Args:
+ system
+ """
+ all_system_quantities: dict[str, OptimizationQuantity] = {
+ var.name: var for var in system.quantities
+ }
+ constraints = {"model_constraints": system.model_constraints}
+
+ inputs = [
+ q.full_symbolic
+ for q in all_system_quantities.values()
+ if q.use_in_stage_function
+ ]
+ input_denotations = [
+ q.name
+ for denotation, q in all_system_quantities.items()
+ if q.use_in_stage_function
+ ]
+
+ # aggregate constraints
+ constraints_func = [c.function for c in constraints.values()]
+ constraints_lb = [c.lb for c in constraints.values()]
+ constraints_ub = [c.ub for c in constraints.values()]
+ constraint_denotations = list(constraints.keys())
+ constraint_lb_denotations = [LB_PREFIX + k for k in constraints]
+ constraint_ub_denotations = [UB_PREFIX + k for k in constraints]
+
+ # aggregate outputs
+ outputs = [
+ system.ode,
+ system.cost_function,
+ *constraints_func,
+ *constraints_lb,
+ *constraints_ub,
+ ]
+ output_denotations = [
+ "ode",
+ "cost_function",
+ *constraint_denotations,
+ *constraint_lb_denotations,
+ *constraint_ub_denotations,
+ ]
+
+ # function describing system dynamics and cost function
+ self._stage_function = ca.Function(
+ "f",
+ inputs,
+ outputs,
+ # input handles to make kwarg use possible and to debug
+ input_denotations,
+ # output handles to make kwarg use possible and to debug
+ output_denotations,
+ )
+
+[docs] def initialize(self, system: BaseSystem, solver_factory: SolverFactory):
+ """Initializes the trajectory optimization problem, creating all symbolic
+ variables of the OCP, the mapping function and the numerical solver."""
+ self._construct_stage_function(system)
+ super().initialize(system=system, solver_factory=solver_factory)
+
+ def _collocation_inner_loop(
+ self,
+ state_at_beginning: ca.MX,
+ collocation: CollocationMatrices,
+ states: OptimizationVariable,
+ opt_vars: list[OptimizationVariable],
+ opt_pars: list[OptimizationParameter],
+ const: dict[OptimizationQuantity, ca.MX],
+ ) -> tuple[ca.MX, tuple]:
+ """
+ Constructs the inner loop of a collocation discretization. Takes the
+
+ Args
+ collocation: The collocation matrices
+ state_at_beginning: The casadi MX instance representing the state at the
+ beginning of the collocation interval
+ states: The OptimizationVariable representing the states
+ opt_vars: The OptimizationVariables which should be defined at each
+ collocation point
+ par_vars: The OptimizationParameters which should be defined at each
+ collocation point
+ const: Variables or parameters to feed into the system function that are
+ constant over the inner loop. Value is the current MX to be used.
+
+ Returns:
+ state_k_end[MX]: state at the end of collocation interval
+ """
+ constraints = []
+ constants = {var.name: mx for var, mx in const.items()}
+
+ # remember time at start of collocation loop
+ start_time = self.pred_time
+
+ # shorthands
+ ts = self.options.time_step
+
+ # State variables at collocation points
+ state_collocation = []
+ opt_vars_collocation = []
+ opt_pars_collocation = []
+
+ # add variables at collocation points
+ for j in range(collocation.order): # d is collocation order
+ # set time
+ self.pred_time = start_time + collocation.root[j + 1] * ts
+
+ # differential state
+ state_kj = self.add_opt_var(states, post_den=f"_{j}")
+ state_collocation.append(state_kj)
+
+ opt_vars_collocation.append({})
+ for opt_var in opt_vars:
+ var_kj = self.add_opt_var(opt_var, post_den=f"_{j}")
+ opt_vars_collocation[-1].update({opt_var.name: var_kj})
+
+ opt_pars_collocation.append({})
+ for opt_par in opt_pars:
+ par_kj = self.add_opt_par(opt_par, post_den=f"_{j}")
+ opt_pars_collocation[-1].update({opt_par.name: par_kj})
+
+ # Loop over collocation points
+ state_k_end = collocation.D[0] * state_at_beginning
+ for j in range(1, collocation.order + 1):
+ # Expression for the state derivative at the collocation point
+ xp = collocation.C[0, j] * state_at_beginning
+ for r in range(collocation.order):
+ xp = xp + collocation.C[r + 1, j] * state_collocation[r]
+
+ stage = self._stage_function(
+ **{states.name: state_collocation[j - 1]},
+ **opt_pars_collocation[j - 1],
+ **opt_vars_collocation[j - 1],
+ **constants,
+ )
+
+ constraints.append((ts * stage["ode"] - xp,))
+ constraints.append(
+ (
+ stage["model_constraints"],
+ stage["lb_model_constraints"],
+ stage["ub_model_constraints"],
+ )
+ )
+
+ # Add contribution to the end state
+ state_k_end = state_k_end + collocation.D[j] * state_collocation[j - 1]
+
+ # Add contribution to quadrature function
+ self.objective_function += collocation.B[j] * stage["cost_function"] * ts
+
+ return state_k_end, constraints
+
+ def _collocation_polynomial(self) -> CollocationMatrices:
+ """Returns the matrices needed for direct collocation discretization."""
+ # Degree of interpolating polynomial
+ d = self.options.collocation_order
+ polynomial = self.options.collocation_method
+
+ # Get collocation points
+ tau_root = np.append(0, ca.collocation_points(d, polynomial))
+
+ # Coefficients of the collocation equation
+ C = np.zeros((d + 1, d + 1))
+
+ # Coefficients of the continuity equation
+ D = np.zeros(d + 1)
+
+ # Coefficients of the quadrature function
+ B = np.zeros(d + 1)
+
+ # Construct polynomial basis
+ for j in range(d + 1):
+ # Construct Lagrange polynomials to get the polynomial basis at
+ # the collocation point
+ p = np.poly1d([1])
+ for r in range(d + 1):
+ if r != j:
+ p *= np.poly1d([1, -tau_root[r]]) / (tau_root[j] - tau_root[r])
+
+ # Evaluate the polynomial at the final time to get the
+ # coefficients of the continuity equation
+ D[j] = p(1.0)
+
+ # Evaluate the time derivative of the polynomial at all collocation
+ # points to get the coefficients of the continuity equation
+ pder = np.polyder(p)
+ for r in range(d + 1):
+ C[j, r] = pder(tau_root[r])
+
+ # Evaluate the integral of the polynomial to get the coefficients
+ # of the quadrature function
+ pint = np.polyint(p)
+ B[j] = pint(1.0)
+
+ return CollocationMatrices(
+ order=d,
+ root=tau_root,
+ B=B,
+ C=C,
+ D=D,
+ )
+
+
+[docs]class MultipleShooting(Discretization):
+ def _discretize(self, sys: BaseSystem):
+ """
+ Defines a multiple shooting discretization
+ """
+ vars_dict = {sys.states.name: {}}
+ n = self.options.prediction_horizon
+ ts = self.options.time_step
+ opts = {"t0": 0, "tf": ts}
+ # Initial State
+ x0 = self.add_opt_par(sys.initial_state)
+ xk = self.add_opt_var(sys.states, lb=x0, ub=x0, guess=x0)
+ vars_dict[sys.states.name][0] = xk
+ const_par = self.add_opt_par(sys.model_parameters)
+ # ODE is used here because the algebraics can be calculated with the stage function
+ opt_integrator = self._create_ode(sys, opts, integrator=self.options.integrator)
+ # initiate states
+ while self.k < n:
+ uk = self.add_opt_var(sys.controls)
+ dk = self.add_opt_par(sys.non_controlled_inputs)
+ zk = self.add_opt_var(sys.algebraics)
+ yk = self.add_opt_var(sys.outputs)
+ # get stage
+ stage_arguments = {
+ # variables
+ sys.states.name: xk,
+ sys.algebraics.name: zk,
+ sys.outputs.name: yk,
+ # parameters
+ sys.controls.name: uk,
+ sys.non_controlled_inputs.name: dk,
+ sys.model_parameters.name: const_par,
+ }
+ # get stage
+ stage = self._stage_function(**stage_arguments)
+
+ self.add_constraint(
+ stage["model_constraints"],
+ lb=stage["lb_model_constraints"],
+ ub=stage["ub_model_constraints"],
+ )
+ fk = opt_integrator(
+ x0=xk,
+ p=ca.vertcat(uk, dk, const_par, zk, yk),
+ )
+ xk_end = fk["xf"]
+ # calculate model constraint
+ self.k += 1
+ self.pred_time = ts * self.k
+ xk = self.add_opt_var(sys.states)
+ vars_dict[sys.states.name][self.k] = xk
+ self.add_constraint(xk_end - xk, gap_closing=True)
+ self.objective_function += stage["cost_function"] * ts
+
+ def _create_ode(self, sys: BaseSystem, opts: dict, integrator: Integrators):
+ # dummy function for empty ode, since ca.integrator would throw an error
+ if sys.states.full_symbolic.shape[0] == 0:
+ return lambda *args, **kwargs: {"xf": ca.MX.sym("xk_end", 0)}
+
+ ode = sys.ode
+ # create inputs
+ x = sys.states.full_symbolic
+ # the order of elements here is important when calling the integrator!
+ p = ca.vertcat(
+ sys.controls.full_symbolic,
+ sys.non_controlled_inputs.full_symbolic,
+ sys.model_parameters.full_symbolic,
+ sys.algebraics.full_symbolic,
+ sys.outputs.full_symbolic,
+ )
+ integrator_ode = {"x": x, "p": p, "ode": ode}
+
+ if integrator == Integrators.euler:
+ xk_end = x + ode * opts["tf"]
+ opt_integrator = ca.Function(
+ "system", [x, p], [xk_end], ["x0", "p"], ["xf"]
+ )
+ else: # rk, cvodes
+ opt_integrator = ca.integrator("system", integrator, integrator_ode, opts)
+
+ return opt_integrator
+
+ def _construct_stage_function(self, system: BaseSystem):
+ """
+ Combine information from the model and the var_ref to create CasADi
+ functions which describe the system dynamics and constraints at each
+ stage of the optimization problem. Sets the stage function. It has
+ all mpc variables as inputs, sorted by denotation (declared in
+ self.declare_quantities) and outputs ode, cost function and 3 outputs
+ per constraint (constraint, lb_constraint, ub_constraint).
+
+ In the basic case, it has the form:
+ CasadiFunction: ['x', 'z', 'u', 'y', 'd', 'p'] ->
+ ['ode', 'cost_function', 'model_constraints',
+ 'ub_model_constraints', 'lb_model_constraints']
+
+ Args:
+ system
+ """
+ all_system_quantities: dict[str, OptimizationQuantity] = {
+ var.name: var for var in system.quantities
+ }
+ constraints = {"model_constraints": system.model_constraints}
+
+ inputs = [
+ q.full_symbolic
+ for q in all_system_quantities.values()
+ if q.use_in_stage_function
+ ]
+ input_denotations = [
+ q.name
+ for denotation, q in all_system_quantities.items()
+ if q.use_in_stage_function
+ ]
+
+ # aggregate constraints
+ constraints_func = [c.function for c in constraints.values()]
+ constraints_lb = [c.lb for c in constraints.values()]
+ constraints_ub = [c.ub for c in constraints.values()]
+ constraint_denotations = list(constraints.keys())
+ constraint_lb_denotations = [LB_PREFIX + k for k in constraints]
+ constraint_ub_denotations = [UB_PREFIX + k for k in constraints]
+
+ # aggregate outputs
+ outputs = [
+ system.ode,
+ system.cost_function,
+ *constraints_func,
+ *constraints_lb,
+ *constraints_ub,
+ ]
+ output_denotations = [
+ "ode",
+ "cost_function",
+ *constraint_denotations,
+ *constraint_lb_denotations,
+ *constraint_ub_denotations,
+ ]
+
+ # function describing system dynamics and cost function
+ self._stage_function = ca.Function(
+ "f",
+ inputs,
+ outputs,
+ # input handles to make kwarg use possible and to debug
+ input_denotations,
+ # output handles to make kwarg use possible and to debug
+ output_denotations,
+ )
+
+[docs] def initialize(self, system: BaseSystem, solver_factory: SolverFactory):
+ """Initializes the trajectory optimization problem, creating all symbolic
+ variables of the OCP, the mapping function and the numerical solver."""
+ self._construct_stage_function(system)
+ super().initialize(system=system, solver_factory=solver_factory)
+
+
+[docs]class CasADiBaseBackend(CasADiBackend):
+ """
+ Class doing optimization of ADMM subproblems with CasADi.
+ """
+
+ system_type = BaseSystem
+ discretization_types = {
+ DiscretizationMethod.collocation: DirectCollocation,
+ DiscretizationMethod.multiple_shooting: MultipleShooting,
+ }
+ system: BaseSystem
+
+import logging
+from typing import Union
+
+import casadi as ca
+
+from agentlib_mpc.models.casadi_model import CasadiInput, CasadiParameter
+from agentlib_mpc.data_structures.casadi_utils import (
+ LB_PREFIX,
+ UB_PREFIX,
+ DiscretizationMethod,
+ Constraint,
+)
+from agentlib_mpc.data_structures.ml_model_datatypes import name_with_lag
+from agentlib_mpc.models.casadi_ml_model import CasadiMLModel
+from agentlib_mpc.optimization_backends.casadi_.casadi_ml import (
+ CasadiMLSystem,
+ CasADiBBBackend,
+ MultipleShooting_ML,
+)
+from agentlib_mpc.optimization_backends.casadi_.core.VariableGroup import (
+ OptimizationVariable,
+ OptimizationParameter,
+)
+from agentlib_mpc.data_structures import admm_datatypes
+from agentlib_mpc.optimization_backends.casadi_.admm import (
+ ADMMMultipleShooting,
+ CasadiADMMSystem,
+ CasADiADMMBackend,
+)
+
+logger = logging.getLogger(__name__)
+
+
+[docs]class CasadiADMMNNSystem(CasadiADMMSystem, CasadiMLSystem):
+ """
+ In this class, the lags are determined by the trainer alone and the lags are
+ saved in the serialized MLModel so that it doesn't have to be defined in the
+ model again
+ """
+
+ past_couplings: OptimizationParameter
+ past_exchange: OptimizationParameter
+
+[docs] def initialize(
+ self, model: CasadiMLModel, var_ref: admm_datatypes.VariableReference
+ ):
+ # define variables
+ self.states = OptimizationVariable.declare(
+ denotation="state",
+ variables=model.get_states(var_ref.states),
+ ref_list=var_ref.states,
+ assert_complete=True,
+ )
+ self.controls = OptimizationVariable.declare(
+ denotation="control",
+ variables=model.get_inputs(var_ref.controls),
+ ref_list=var_ref.controls,
+ assert_complete=True,
+ )
+ self.algebraics = OptimizationVariable.declare(
+ denotation="z",
+ variables=model.auxiliaries,
+ ref_list=[],
+ )
+ self.outputs = OptimizationVariable.declare(
+ denotation="y",
+ variables=model.outputs,
+ ref_list=var_ref.outputs,
+ )
+
+ # define parameters
+ self.non_controlled_inputs = OptimizationParameter.declare(
+ denotation="d",
+ variables=model.get_inputs(var_ref.inputs),
+ ref_list=var_ref.inputs,
+ assert_complete=True,
+ )
+ self.model_parameters = OptimizationParameter.declare(
+ denotation="parameter",
+ variables=model.parameters,
+ ref_list=var_ref.parameters,
+ )
+ self.initial_state = OptimizationParameter.declare(
+ denotation="initial_state", # append the 0 as a convention to get initial guess
+ variables=model.get_states(var_ref.states),
+ ref_list=var_ref.states,
+ use_in_stage_function=False,
+ assert_complete=True,
+ )
+ self.last_control = OptimizationParameter.declare(
+ denotation="initial_control", # append the 0 as a convention to get initial guess
+ variables=model.get_inputs(var_ref.controls),
+ ref_list=var_ref.controls,
+ use_in_stage_function=False,
+ assert_complete=True,
+ )
+ self.r_del_u = OptimizationParameter.declare(
+ denotation="r_del_u",
+ variables=[CasadiParameter(name=r_del_u) for r_del_u in var_ref.r_del_u],
+ ref_list=var_ref.r_del_u,
+ use_in_stage_function=False,
+ assert_complete=True,
+ )
+
+ self.cost_function = model.cost_func
+ self.model_constraints = Constraint(
+ function=ca.vertcat(*[c.function for c in model.get_constraints()]),
+ lb=ca.vertcat(*[c.lb for c in model.get_constraints()]),
+ ub=ca.vertcat(*[c.ub for c in model.get_constraints()]),
+ )
+ self.sim_step = model.make_predict_function_for_mpc()
+ self.model = model
+ self.lags_dict: dict[str, int] = model.lags_dict
+
+ coup_names = [c.name for c in var_ref.couplings]
+ exchange_names = [c.name for c in var_ref.exchange]
+ pure_outs = [
+ m for m in model.outputs if m.name not in coup_names + exchange_names
+ ]
+ self.outputs = OptimizationVariable.declare(
+ denotation="y",
+ variables=pure_outs,
+ ref_list=var_ref.outputs,
+ )
+
+ self.local_couplings = OptimizationVariable.declare(
+ denotation="local_couplings",
+ variables=[model.get(name) for name in coup_names],
+ ref_list=coup_names,
+ )
+ couplings_global = [coup.mean for coup in var_ref.couplings]
+ self.global_couplings = OptimizationParameter.declare(
+ denotation="global_couplings",
+ variables=[CasadiInput(name=coup) for coup in couplings_global],
+ ref_list=couplings_global,
+ )
+
+ multipliers = [coup.multiplier for coup in var_ref.couplings]
+ self.multipliers = OptimizationParameter.declare(
+ denotation="multipliers",
+ variables=[CasadiInput(name=coup) for coup in multipliers],
+ ref_list=multipliers,
+ )
+
+ self.local_exchange = OptimizationVariable.declare(
+ denotation="local_exchange",
+ variables=[model.get(name) for name in exchange_names],
+ ref_list=exchange_names,
+ )
+ couplings_mean_diff = [coup.mean_diff for coup in var_ref.exchange]
+ self.exchange_diff = OptimizationParameter.declare(
+ denotation="average_diff",
+ variables=[CasadiInput(name=coup) for coup in couplings_mean_diff],
+ ref_list=couplings_mean_diff,
+ )
+
+ multipliers = [coup.multiplier for coup in var_ref.exchange]
+ self.exchange_multipliers = OptimizationParameter.declare(
+ denotation="exchange_multipliers",
+ variables=[CasadiInput(name=coup) for coup in multipliers],
+ ref_list=multipliers,
+ )
+
+ self.penalty_factor = OptimizationParameter.declare(
+ denotation="rho",
+ variables=[CasadiParameter(name="penalty_factor")],
+ ref_list=["penalty_factor"],
+ )
+ past_coup_names = [coup.lagged for coup in var_ref.couplings]
+ self.past_couplings = OptimizationParameter.declare(
+ denotation="past_couplings",
+ variables=[CasadiInput(name=name) for name in past_coup_names],
+ ref_list=past_coup_names,
+ use_in_stage_function=False,
+ )
+ past_exchange_names = [exchange.lagged for exchange in var_ref.exchange]
+ self.past_exchange = OptimizationParameter.declare(
+ denotation="past_exchange",
+ variables=[CasadiInput(name=name) for name in exchange_names],
+ ref_list=past_exchange_names,
+ use_in_stage_function=False,
+ )
+
+ # add admm terms to objective function
+ admm_objective = 0
+ rho = self.penalty_factor.full_symbolic[0]
+ for i in range(len(var_ref.couplings)):
+ admm_in = self.global_couplings.full_symbolic[i]
+ admm_out = self.local_couplings.full_symbolic[i]
+ admm_lam = self.multipliers.full_symbolic[i]
+ admm_objective += admm_lam * admm_out + rho / 2 * (admm_in - admm_out) ** 2
+
+ for i in range(len(var_ref.exchange)):
+ admm_in = self.exchange_diff.full_symbolic[i]
+ admm_out = self.local_exchange.full_symbolic[i]
+ admm_lam = self.exchange_multipliers.full_symbolic[i]
+ admm_objective += admm_lam * admm_out + rho / 2 * (admm_in - admm_out) ** 2
+
+ self.cost_function += admm_objective
+
+ @property
+ def variables(self) -> list[OptimizationVariable]:
+ return [
+ var
+ for var in self.__dict__.values()
+ if isinstance(var, OptimizationVariable)
+ ]
+
+ @property
+ def parameters(self) -> list[OptimizationParameter]:
+ return [
+ var
+ for var in self.__dict__.values()
+ if isinstance(var, OptimizationParameter)
+ ]
+
+ @property
+ def quantities(self) -> list[Union[OptimizationParameter, OptimizationVariable]]:
+ return self.variables + self.parameters
+
+ @property
+ def sim_step_quantities(
+ self,
+ ) -> dict[str, Union[OptimizationParameter, OptimizationVariable]]:
+ omit_in_blackbox_function = {
+ "global_couplings",
+ "multipliers",
+ "average_diff",
+ "exchange_multipliers",
+ "rho",
+ }
+ return {
+ var.name: var
+ for var in self.quantities
+ if not var.name in omit_in_blackbox_function
+ }
+
+
+[docs]class MultipleShootingADMMNN(ADMMMultipleShooting, MultipleShooting_ML):
+ max_lag: int
+
+ def _discretize(self, sys: CasadiADMMNNSystem):
+ n = self.options.prediction_horizon
+ ts = self.options.time_step
+
+ # Parameters that are constant over the horizon
+ const_par = self.add_opt_par(sys.model_parameters)
+ rho = self.add_opt_par(sys.penalty_factor)
+ du_weights = self.add_opt_par(sys.r_del_u)
+
+ pre_grid_states = [ts * i for i in range(-sys.max_lag + 1, 1)]
+ inputs_lag = min(-2, -sys.max_lag) # at least -2, to consider last control
+ pre_grid_inputs = [ts * i for i in range(inputs_lag + 1, 0)]
+ prediction_grid = [ts * i for i in range(0, n)]
+
+ # sort for debugging purposes
+ full_grid = sorted(
+ list(set(prediction_grid + pre_grid_inputs + pre_grid_states))
+ )
+
+ # dict[time, dict[denotation, ca.MX]]
+ mx_dict: dict[float, dict[str, ca.MX]] = {time: {} for time in full_grid}
+
+ # add past state variables
+ for time in pre_grid_states:
+ self.pred_time = time
+ x_past = self.add_opt_par(sys.initial_state)
+ # add past states as optimization variables with fixed values so they can
+ # be accessed by the first few steps, when there are lags
+ mx_dict[time][sys.states.name] = self.add_opt_var(
+ sys.states, lb=x_past, ub=x_past, guess=x_past
+ )
+ mx_dict[time][sys.initial_state.name] = x_past
+
+ # add past inputs
+ for time in pre_grid_inputs:
+ self.pred_time = time
+ d = sys.non_controlled_inputs
+ mx_dict[time][d.name] = self.add_opt_par(d)
+ u_past = self.add_opt_par(sys.last_control)
+ mx_dict[time][sys.controls.name] = self.add_opt_var(
+ sys.controls, lb=u_past, ub=u_past, guess=u_past
+ )
+ mx_dict[time][sys.last_control.name] = u_past
+
+ # admm quantities
+ past_coup = self.add_opt_par(sys.past_couplings)
+ past_exch = self.add_opt_par(sys.past_exchange)
+ mx_dict[time][sys.local_couplings.name] = past_coup
+ mx_dict[time][sys.local_exchange.name] = past_exch
+ mx_dict[time][sys.local_couplings.name] = self.add_opt_var(
+ sys.local_couplings, lb=past_coup, ub=past_coup, guess=past_coup
+ )
+ mx_dict[time][sys.local_exchange.name] = self.add_opt_var(
+ sys.local_exchange, lb=past_exch, ub=past_exch, guess=past_exch
+ )
+
+ # add all variables over future grid
+ for time in prediction_grid:
+ self.pred_time = time
+ mx_dict[time][sys.controls.name] = self.add_opt_var(sys.controls)
+ mx_dict[time][sys.non_controlled_inputs.name] = self.add_opt_par(
+ sys.non_controlled_inputs
+ )
+ mx_dict[time][sys.algebraics.name] = self.add_opt_var(sys.algebraics)
+ mx_dict[time][sys.outputs.name] = self.add_opt_var(sys.outputs)
+
+ # admm related quantities
+ mx_dict[time][sys.multipliers.name] = self.add_opt_par(sys.multipliers)
+ mx_dict[time][sys.exchange_multipliers.name] = self.add_opt_par(
+ sys.exchange_multipliers
+ )
+ mx_dict[time][sys.exchange_diff.name] = self.add_opt_par(sys.exchange_diff)
+ mx_dict[time][sys.global_couplings.name] = self.add_opt_par(
+ sys.global_couplings
+ )
+ mx_dict[time][sys.local_exchange.name] = self.add_opt_var(
+ sys.local_exchange
+ )
+ mx_dict[time][sys.local_couplings.name] = self.add_opt_var(
+ sys.local_couplings
+ )
+
+ # create the state grid
+ # x0 will always be the state at time 0 since the loop it is defined in starts
+ # in the past and finishes at 0
+ self.pred_time = 0
+ for time in prediction_grid[1:]:
+ self.pred_time = time
+ mx_dict[time][sys.states.name] = self.add_opt_var(sys.states)
+ self.pred_time += ts
+ mx_dict[self.pred_time] = {sys.states.name: self.add_opt_var(sys.states)}
+
+ all_quantities = sys.all_system_quantities()
+ # add constraints and create the objective function for all stages
+ for time in prediction_grid:
+ stage_mx = mx_dict[time]
+
+ # add penalty on control change between intervals
+ u_prev = mx_dict[time - ts][sys.controls.name]
+ uk = stage_mx[sys.controls.name]
+ self.objective_function += ts * ca.dot(du_weights, (u_prev - uk) ** 2)
+
+ # get stage arguments from current time step
+ stage_arguments = {
+ # variables
+ sys.states.name: stage_mx[sys.states.name],
+ sys.algebraics.name: stage_mx[sys.algebraics.name],
+ sys.outputs.name: stage_mx[sys.outputs.name],
+ # parameters
+ sys.controls.name: stage_mx[sys.controls.name],
+ sys.non_controlled_inputs.name: stage_mx[
+ sys.non_controlled_inputs.name
+ ],
+ sys.model_parameters.name: const_par,
+ sys.penalty_factor.name: rho,
+ # admm related quantities
+ sys.multipliers.name: stage_mx[sys.multipliers.name],
+ sys.exchange_multipliers.name: stage_mx[sys.exchange_multipliers.name],
+ sys.exchange_diff.name: stage_mx[sys.exchange_diff.name],
+ sys.global_couplings.name: stage_mx[sys.global_couplings.name],
+ sys.local_exchange.name: stage_mx[sys.local_exchange.name],
+ sys.local_couplings.name: stage_mx[sys.local_couplings.name],
+ }
+
+ # collect stage arguments for lagged variables
+ for lag, denotation_dict in self._lagged_input_names.items():
+ for denotation, var_names in denotation_dict.items():
+ l_name = name_with_lag(denotation, lag)
+ mx_list = []
+ for v_name in var_names:
+ index = all_quantities[denotation].full_names.index(v_name)
+ mx_list.append(mx_dict[time - lag * ts][denotation][index])
+ stage_arguments[l_name] = ca.vertcat(*mx_list)
+
+ # evaluate a stage, add path constraints, multiple shooting constraints
+ # and add to the objective function
+ stage_result = self._stage_function(**stage_arguments)
+ self.add_constraint(
+ stage_result["model_constraints"],
+ lb=stage_result["lb_model_constraints"],
+ ub=stage_result["ub_model_constraints"],
+ )
+ self.add_constraint(
+ stage_result["next_states"] - mx_dict[time + ts][sys.states.name]
+ )
+ self.objective_function += stage_result["cost_function"] * ts
+
+ def _construct_stage_function(self, system: CasadiADMMNNSystem):
+ """
+ Combine information from the model and the var_ref to create CasADi
+ functions which describe the system dynamics and constraints at each
+ stage of the optimization problem. Sets the stage function. It has
+ all mpc variables as inputs, sorted by denotation (declared in
+ self.declare_quantities) and outputs ode, cost function and 3 outputs
+ per constraint (constraint, lb_constraint, ub_constraint).
+
+ In the basic case, it has the form:
+ CasadiFunction: ['x', 'z', 'u', 'y', 'd', 'p'] ->
+ ['ode', 'cost_function', 'model_constraints',
+ 'ub_model_constraints', 'lb_model_constraints']
+
+ Args:
+ system
+ """
+ all_system_quantities = system.all_system_quantities()
+ constraints = {"model_constraints": system.model_constraints}
+
+ inputs = [
+ q.full_symbolic
+ for q in all_system_quantities.values()
+ if q.use_in_stage_function
+ ]
+ input_denotations = [
+ q.name
+ for denotation, q in all_system_quantities.items()
+ if q.use_in_stage_function
+ ]
+
+ # aggregate constraints
+ constraints_func = [c.function for c in constraints.values()]
+ constraints_lb = [c.lb for c in constraints.values()]
+ constraints_ub = [c.ub for c in constraints.values()]
+ constraint_denotations = list(constraints.keys())
+ constraint_lb_denotations = [LB_PREFIX + k for k in constraints]
+ constraint_ub_denotations = [UB_PREFIX + k for k in constraints]
+
+ # create a dictionary which holds all the inputs for the sim step of the model
+ all_input_variables = {}
+ lagged_inputs: dict[int, dict[str, ca.MX]] = {}
+ # dict[lag, dict[denotation, list[var_name]]]
+ lagged_input_names: dict[int, dict[str, list[str]]] = {}
+
+ for q_name, quantity in system.sim_step_quantities.items():
+ if not quantity.use_in_stage_function:
+ continue
+
+ for v_id, v_name in enumerate(quantity.full_names):
+ all_input_variables[v_name] = quantity.full_symbolic[v_id]
+ lag = system.lags_dict.get(v_name, 1)
+
+ # if lag exists, we have to create and organize new variables
+ for j in range(1, lag):
+ # create an MX variable for this lag
+ l_name = name_with_lag(v_name, j)
+ new_lag_var = ca.MX.sym(l_name)
+ all_input_variables[l_name] = new_lag_var
+
+ # add the mx variable to its lag time and denotation
+ lagged_inputs_j = lagged_inputs.setdefault(j, {})
+ lv_mx = lagged_inputs_j.setdefault(q_name, ca.DM([]))
+ lagged_inputs[j][q_name] = ca.vertcat(lv_mx, new_lag_var)
+
+ # keep track of the variable names that were added
+ lagged_input_names_j = lagged_input_names.setdefault(j, {})
+ lv_names = lagged_input_names_j.setdefault(q_name, [])
+ lv_names.append(v_name)
+
+ self._lagged_input_names = lagged_input_names
+ flat_lagged_inputs = {
+ f"{den}_{i}": mx
+ for i, subdict in lagged_inputs.items()
+ for den, mx in subdict.items()
+ }
+
+ all_outputs = system.sim_step(**all_input_variables)
+ state_output_it = (all_outputs[s_name] for s_name in system.states.full_names)
+ state_output = ca.vertcat(*state_output_it)
+
+ # aggregate outputs
+ outputs = [
+ state_output,
+ system.cost_function,
+ *constraints_func,
+ *constraints_lb,
+ *constraints_ub,
+ ]
+ output_denotations = [
+ "next_states",
+ "cost_function",
+ *constraint_denotations,
+ *constraint_lb_denotations,
+ *constraint_ub_denotations,
+ ]
+
+ # function describing system dynamics and cost function
+ self._stage_function = ca.Function(
+ "f",
+ inputs + list(flat_lagged_inputs.values()),
+ outputs,
+ # input handles to make kwarg use possible and to debug
+ input_denotations + list(flat_lagged_inputs),
+ # output handles to make kwarg use possible and to debug
+ output_denotations,
+ )
+
+
+[docs]class CasADiADMMBackend_NN(CasADiADMMBackend, CasADiBBBackend):
+ """
+ Class doing optimization with an MLModel.
+ """
+
+ system_type = CasadiADMMNNSystem
+ discretization_types = {
+ DiscretizationMethod.multiple_shooting: MultipleShootingADMMNN
+ }
+ system: CasadiADMMNNSystem
+ # a dictionary of collections of the variable lags
+
+import casadi as ca
+from typing import Dict
+import collections
+
+from agentlib_mpc.models.casadi_model import CasadiParameter
+
+from agentlib_mpc.data_structures.casadi_utils import (
+ LB_PREFIX,
+ UB_PREFIX,
+ DiscretizationMethod,
+ SolverFactory,
+ Constraint,
+)
+from agentlib_mpc.data_structures.ml_model_datatypes import name_with_lag
+from agentlib_mpc.data_structures.mpc_datamodels import (
+ FullVariableReference,
+)
+from agentlib_mpc.models.casadi_ml_model import CasadiMLModel
+from agentlib_mpc.optimization_backends.casadi_.core.VariableGroup import (
+ OptimizationQuantity,
+ OptimizationVariable,
+ OptimizationParameter,
+)
+from agentlib_mpc.optimization_backends.casadi_.basic import (
+ MultipleShooting,
+ CasADiBaseBackend,
+)
+from agentlib_mpc.optimization_backends.casadi_.full import FullSystem
+
+
+[docs]class CasadiMLSystem(FullSystem):
+ # multiple possibilities of using the MLModel
+ # stage function for neural networks
+ model: CasadiMLModel
+ lags_dict: dict[str, int]
+ sim_step: ca.Function
+
+[docs] def initialize(self, model: CasadiMLModel, var_ref: FullVariableReference):
+ # define variables
+ self.states = OptimizationVariable.declare(
+ denotation="state",
+ variables=model.get_states(var_ref.states),
+ ref_list=var_ref.states,
+ assert_complete=True,
+ )
+ self.controls = OptimizationVariable.declare(
+ denotation="control",
+ variables=model.get_inputs(var_ref.controls),
+ ref_list=var_ref.controls,
+ assert_complete=True,
+ )
+ self.algebraics = OptimizationVariable.declare(
+ denotation="z",
+ variables=model.auxiliaries,
+ ref_list=[],
+ )
+ self.outputs = OptimizationVariable.declare(
+ denotation="y",
+ variables=model.outputs,
+ ref_list=var_ref.outputs,
+ )
+
+ # define parameters
+ self.non_controlled_inputs = OptimizationParameter.declare(
+ denotation="d",
+ variables=model.get_inputs(var_ref.inputs),
+ ref_list=var_ref.inputs,
+ assert_complete=True,
+ )
+ self.model_parameters = OptimizationParameter.declare(
+ denotation="parameter",
+ variables=model.parameters,
+ ref_list=var_ref.parameters,
+ )
+ self.initial_state = OptimizationParameter.declare(
+ denotation="initial_state",
+ variables=model.get_states(var_ref.states),
+ ref_list=var_ref.states,
+ use_in_stage_function=False,
+ assert_complete=True,
+ )
+ self.last_control = OptimizationParameter.declare(
+ denotation="initial_control",
+ variables=model.get_inputs(var_ref.controls),
+ ref_list=var_ref.controls,
+ use_in_stage_function=False,
+ assert_complete=True,
+ )
+ self.r_del_u = OptimizationParameter.declare(
+ denotation="r_del_u",
+ variables=[CasadiParameter(name=r_del_u) for r_del_u in var_ref.r_del_u],
+ ref_list=var_ref.r_del_u,
+ use_in_stage_function=False,
+ assert_complete=True,
+ )
+ self.cost_function = model.cost_func
+ self.model_constraints = Constraint(
+ function=ca.vertcat(*[c.function for c in model.get_constraints()]),
+ lb=ca.vertcat(*[c.lb for c in model.get_constraints()]),
+ ub=ca.vertcat(*[c.ub for c in model.get_constraints()]),
+ )
+ self.sim_step = model.make_predict_function_for_mpc()
+ self.model = model
+ self.lags_dict: dict[str, int] = model.lags_dict
+
+ @property
+ def max_lag(self) -> int:
+ if self.lags_dict:
+ return max(self.lags_dict.values())
+ else:
+ # if there is no bb variable, we have a lag of 1
+ return 1
+
+[docs] def all_system_quantities(self) -> dict[str, OptimizationQuantity]:
+ return {var.name: var for var in self.quantities}
+
+
+[docs]class MultipleShooting_ML(MultipleShooting):
+ max_lag: int
+
+ def _discretize(self, sys: CasadiMLSystem):
+ n = self.options.prediction_horizon
+ ts = self.options.time_step
+ const_par = self.add_opt_par(sys.model_parameters)
+ du_weights = self.add_opt_par(sys.r_del_u)
+
+ pre_grid_states = [ts * i for i in range(-sys.max_lag + 1, 1)]
+ inputs_lag = min(-2, -sys.max_lag) # at least -2, to consider last control
+ pre_grid_inputs = [ts * i for i in range(inputs_lag + 1, 0)]
+ prediction_grid = [ts * i for i in range(0, n)]
+
+ # sort for debugging purposes
+ full_grid = sorted(
+ list(set(prediction_grid + pre_grid_inputs + pre_grid_states))
+ )
+
+ # dict[time, dict[denotation, ca.MX]]
+ mx_dict: dict[float, dict[str, ca.MX]] = {time: {} for time in full_grid}
+
+ # add past state variables
+ for time in pre_grid_states:
+ self.pred_time = time
+ x_past = self.add_opt_par(sys.initial_state)
+ # add past states as optimization variables with fixed values so they can
+ # be accessed by the first few steps, when there are lags
+ mx_dict[time][sys.states.name] = self.add_opt_var(
+ sys.states, lb=x_past, ub=x_past, guess=x_past
+ )
+ mx_dict[time][sys.initial_state.name] = x_past
+
+ # add past inputs
+ for time in pre_grid_inputs:
+ self.pred_time = time
+ d = sys.non_controlled_inputs
+ mx_dict[time][d.name] = self.add_opt_par(d)
+ u_past = self.add_opt_par(sys.last_control)
+ mx_dict[time][sys.controls.name] = self.add_opt_var(
+ sys.controls, lb=u_past, ub=u_past, guess=u_past
+ )
+ mx_dict[time][sys.last_control.name] = u_past
+
+ # add all variables over future grid
+ for time in prediction_grid:
+ self.pred_time = time
+ mx_dict[time][sys.controls.name] = self.add_opt_var(sys.controls)
+ mx_dict[time][sys.non_controlled_inputs.name] = self.add_opt_par(
+ sys.non_controlled_inputs
+ )
+ mx_dict[time][sys.algebraics.name] = self.add_opt_var(sys.algebraics)
+ mx_dict[time][sys.outputs.name] = self.add_opt_var(sys.outputs)
+
+ # create the state grid
+ # x0 will always be the state at time 0 since the loop it is defined in starts
+ # in the past and finishes at 0
+ self.pred_time = 0
+ for time in prediction_grid[1:]:
+ self.pred_time = time
+ mx_dict[time][sys.states.name] = self.add_opt_var(sys.states)
+ self.pred_time += ts
+ mx_dict[self.pred_time] = {sys.states.name: self.add_opt_var(sys.states)}
+
+ all_quantities = sys.all_system_quantities()
+ # add constraints and create the objective function for all stages
+ for time in prediction_grid:
+ stage_mx = mx_dict[time]
+
+ # add penalty on control change between intervals
+ u_prev = mx_dict[time - ts][sys.controls.name]
+ uk = stage_mx[sys.controls.name]
+ self.objective_function += ts * ca.dot(du_weights, (u_prev - uk) ** 2)
+
+ # get stage arguments from current time step
+ stage_arguments = {
+ # variables
+ sys.states.name: stage_mx[sys.states.name],
+ sys.algebraics.name: stage_mx[sys.algebraics.name],
+ sys.outputs.name: stage_mx[sys.outputs.name],
+ # parameters
+ sys.controls.name: stage_mx[sys.controls.name],
+ sys.non_controlled_inputs.name: stage_mx[
+ sys.non_controlled_inputs.name
+ ],
+ sys.model_parameters.name: const_par,
+ }
+
+ # collect stage arguments for lagged variables
+ for lag, denotation_dict in self._lagged_input_names.items():
+ for denotation, var_names in denotation_dict.items():
+ l_name = name_with_lag(denotation, lag)
+ mx_list = []
+ for v_name in var_names:
+ # add only the singular variable which has a lag on this level
+ # to the stage arguments
+ index = all_quantities[denotation].full_names.index(v_name)
+ mx_list.append(mx_dict[time - lag * ts][denotation][index])
+ stage_arguments[l_name] = ca.vertcat(*mx_list)
+
+ # evaluate a stage, add path constraints, multiple shooting constraints
+ # and add to the objective function
+ stage_result = self._stage_function(**stage_arguments)
+ self.add_constraint(
+ stage_result["model_constraints"],
+ lb=stage_result["lb_model_constraints"],
+ ub=stage_result["ub_model_constraints"],
+ )
+ self.add_constraint(
+ stage_result["next_states"] - mx_dict[time + ts][sys.states.name]
+ )
+ self.objective_function += stage_result["cost_function"] * ts
+
+[docs] def initialize(self, system: CasadiMLSystem, solver_factory: SolverFactory):
+ """Initializes the trajectory optimization problem, creating all symbolic
+ variables of the OCP, the mapping function and the numerical solver."""
+ self._construct_stage_function(system)
+ super().initialize(system=system, solver_factory=solver_factory)
+
+ def _construct_stage_function(self, system: CasadiMLSystem):
+ """
+ Combine information from the model and the var_ref to create CasADi
+ functions which describe the system dynamics and constraints at each
+ stage of the optimization problem. Sets the stage function. It has
+ all mpc variables as inputs, sorted by denotation (declared in
+ self.declare_quantities) and outputs ode, cost function and 3 outputs
+ per constraint (constraint, lb_constraint, ub_constraint).
+
+ In the basic case, it has the form:
+ CasadiFunction: ['x', 'z', 'u', 'y', 'd', 'p'] ->
+ ['ode', 'cost_function', 'model_constraints',
+ 'ub_model_constraints', 'lb_model_constraints']
+
+ Args:
+ system
+ """
+ all_system_quantities = system.all_system_quantities()
+ constraints = {"model_constraints": system.model_constraints}
+
+ inputs = [
+ q.full_symbolic
+ for q in all_system_quantities.values()
+ if q.use_in_stage_function
+ ]
+ input_denotations = [
+ q.name
+ for denotation, q in all_system_quantities.items()
+ if q.use_in_stage_function
+ ]
+
+ # aggregate constraints
+ constraints_func = [c.function for c in constraints.values()]
+ constraints_lb = [c.lb for c in constraints.values()]
+ constraints_ub = [c.ub for c in constraints.values()]
+ constraint_denotations = list(constraints.keys())
+ constraint_lb_denotations = [LB_PREFIX + k for k in constraints]
+ constraint_ub_denotations = [UB_PREFIX + k for k in constraints]
+
+ # create a dictionary which holds all the inputs for the sim step of the model
+ all_input_variables = {}
+ lagged_inputs: dict[int, dict[str, ca.MX]] = {}
+ # dict[lag, dict[denotation, list[var_name]]]
+ lagged_input_names: dict[int, dict[str, list[str]]] = {}
+ for q_name, q_obj in all_system_quantities.items():
+ if not q_obj.use_in_stage_function:
+ continue
+ for v_id, v_name in enumerate(q_obj.full_names):
+ all_input_variables[v_name] = q_obj.full_symbolic[v_id]
+ lag = system.lags_dict.get(v_name, 1)
+
+ # if lag exists, we have to create and organize new variables
+ for j in range(1, lag):
+ # create an MX variable for this lag
+ l_name = name_with_lag(v_name, j)
+ new_lag_var = system.model.lags_mx_store[l_name]
+ all_input_variables[l_name] = new_lag_var
+
+ # add the mx variable to its lag time and denotation
+ lagged_inputs_j = lagged_inputs.setdefault(j, {})
+ lv_mx = lagged_inputs_j.setdefault(q_name, ca.DM([]))
+ lagged_inputs[j][q_name] = ca.vertcat(lv_mx, new_lag_var)
+
+ # keep track of the variable names that were added
+ lagged_input_names_j = lagged_input_names.setdefault(j, {})
+ lv_names = lagged_input_names_j.setdefault(q_name, [])
+ lv_names.append(v_name)
+
+ self._lagged_input_names = lagged_input_names
+ flat_lagged_inputs = {
+ f"{den}_{i}": mx
+ for i, subdict in lagged_inputs.items()
+ for den, mx in subdict.items()
+ }
+
+ all_outputs = system.sim_step(**all_input_variables)
+ state_output_it = (all_outputs[s_name] for s_name in system.states.full_names)
+ state_output = ca.vertcat(*state_output_it)
+
+ # aggregate outputs
+ outputs = [
+ state_output,
+ system.cost_function,
+ *constraints_func,
+ *constraints_lb,
+ *constraints_ub,
+ ]
+ output_denotations = [
+ "next_states",
+ "cost_function",
+ *constraint_denotations,
+ *constraint_lb_denotations,
+ *constraint_ub_denotations,
+ ]
+
+ # function describing system dynamics and cost function
+ self._stage_function = ca.Function(
+ "f",
+ inputs + list(flat_lagged_inputs.values()),
+ outputs,
+ # input handles to make kwarg use possible and to debug
+ input_denotations + list(flat_lagged_inputs),
+ # output handles to make kwarg use possible and to debug
+ output_denotations,
+ )
+
+ def _create_lag_structure_for_denotations(self, system: CasadiMLSystem):
+ all_system_quantities = self.all_system_quantities(system)
+ all_input_variables = {}
+ lagged_inputs: dict[int, dict[str, ca.MX]] = {}
+ # dict[lag, dict[denotation, list[var_name]]]
+ lagged_input_names: dict[int, dict[str, list[str]]] = {}
+ for q_name, q_obj in all_system_quantities.items():
+ if not q_obj.use_in_stage_function:
+ continue
+ for v_id, v_name in enumerate(q_obj.full_names):
+ all_input_variables[v_name] = q_obj.full_symbolic[v_id]
+ lag = system.lags_dict.get(v_name, 1)
+
+ # if lag exists, we have to create and organize new variables
+ for j in range(1, lag):
+ # create an MX variable for this lag
+ l_name = name_with_lag(v_name, j)
+ new_lag_var = ca.MX.sym(l_name)
+ all_input_variables[l_name] = new_lag_var
+
+ # add the mx variable to its lag time and denotation
+ lagged_inputs_j = lagged_inputs.setdefault(j, {})
+ lv_mx = lagged_inputs_j.setdefault(q_name, ca.DM([]))
+ lagged_inputs[j][q_name] = ca.vertcat(lv_mx, new_lag_var)
+
+ # keep track of the variable names that were added
+ lagged_input_names_j = lagged_input_names.setdefault(j, {})
+ lv_names = lagged_input_names_j.setdefault(q_name, [])
+ lv_names.append(v_name)
+
+ return
+
+
+[docs]class CasADiBBBackend(CasADiBaseBackend):
+ """
+ Class doing optimization with a MLModel.
+ """
+
+ system_type = CasadiMLSystem
+ discretization_types = {DiscretizationMethod.multiple_shooting: MultipleShooting_ML}
+ system: CasadiMLSystem
+ # a dictionary of collections of the variable lags
+ lag_collection: Dict[str, collections.deque] = {}
+ max_lag: int
+
+[docs] def get_lags_per_variable(self) -> dict[str, float]:
+ """Returns the name of variables which include lags and their lag. The MPC
+ module can use this information to save relevant past data of lagged
+ variables"""
+ ts = self.config.discretization_options.time_step
+ return {
+ name: (lag - 1) * ts
+ for name, lag in self.system.lags_dict.items()
+ if name in self.var_ref
+ }
+
+from __future__ import annotations
+
+import dataclasses
+
+import casadi as ca
+
+from agentlib_mpc.models.casadi_model import CasadiVariable
+
+
+[docs]@dataclasses.dataclass(frozen=True)
+class OptimizationQuantity:
+ name: str
+ full_symbolic: ca.MX # used in complex cost functions for admm etc
+ dim: int
+ ref_names: tuple[str] # used in get_mpc_inputs
+ full_names: tuple[str] # used in create_res_format
+ use_in_stage_function: bool
+
+ def __hash__(self):
+ return hash(self.name)
+
+
+def _check_ref_in_full(ref: list[str], full_names: list[str]):
+ diff = set(ref).difference(full_names)
+ if diff:
+ raise ValueError(
+ f"The variables from the variable ref are not a subset of the model "
+ f"variables. The following variables are wrong: {diff}"
+ )
+
+
+[docs]@dataclasses.dataclass(frozen=True)
+class OptimizationVariable(OptimizationQuantity):
+ input_map: ca.Function # get mpc inputs
+ output_map: ca.Function # get mpc outputs
+ use_in_stage_function: bool
+ binary: bool
+
+[docs] @classmethod
+ def declare(
+ cls,
+ denotation: str,
+ variables: list[CasadiVariable],
+ ref_list: list[str],
+ use_in_stage_function: bool = True,
+ assert_complete: bool = False,
+ binary: bool = False,
+ ) -> OptimizationVariable:
+ """
+ Declares a group of optimization variables that serve a purpose in
+ the optimization problem. Typical groups are states, the control
+ inputs or slack variables.
+
+ Args:
+ binary: Flag, whether these variables are binary
+ denotation: The key of the variable, e.g. 'X', 'U', etc. Use
+ this key in the discretization function to add the variable at
+ different stages of the optimization problem. The optimal value
+ of these variables will also be mapped to this key.
+ variables: A list of
+ CasadiVariables or an MX/SX vector including all variables
+ within this category.
+ ref_list: A list of names indicating which variables
+ in full_list are AgentVariables and need to be updated before
+ each optimization.
+ use_in_stage_function: If False, the variable is not
+ added to the stage function. If True, the variable needs to be
+ provided to the stage function at every point in the
+ discretization function.
+ assert_complete: If True, throws an error if the ref_list does
+ not contain all variables.
+ """
+ full_symbolic = []
+ full_names = []
+ ref_symbolic = []
+ lb_full = []
+ lb_ref = []
+ ub_full = []
+ ub_ref = []
+ ref_list_ordered = []
+
+ for var in variables:
+ name = var.name
+ if assert_complete and name not in ref_list:
+ raise ValueError(
+ f"The variable {name} which is defined in the model "
+ f" has to be defined in the ModuleConfig!"
+ )
+
+ full_symbolic.append(var.sym)
+ full_names.append(name)
+
+ if name in ref_list:
+ lb = ca.MX.sym(f"lb_{denotation}")
+ ub = ca.MX.sym(f"lb_{denotation}")
+ lb_ref.append(lb)
+ ub_ref.append(ub)
+ ref_symbolic.append(var.sym)
+ ref_list_ordered.append(name)
+ lb_full.append(lb)
+ ub_full.append(ub)
+ else:
+ lb_full.append(var.lb)
+ ub_full.append(var.ub)
+
+ full_symbolic = ca.vertcat(*full_symbolic)
+
+ # create functions that map between model variable vectors (so all variables)
+ # and variables from the var_ref (only the ones specified in the user config)
+ input_mapping = ca.Function(
+ f"par_map_{denotation}",
+ [ca.vertcat(*lb_ref), ca.vertcat(*ub_ref)],
+ [ca.vertcat(*lb_full), ca.vertcat(*ub_full)],
+ ["lb_ref", "ub_ref"],
+ [f"lb_{denotation}", f"ub_{denotation}"],
+ )
+ output_mapping = ca.Function(
+ f"par_map_{denotation}",
+ [full_symbolic],
+ [ca.vertcat(*ref_symbolic)],
+ [denotation],
+ ["ref"],
+ )
+
+ dimension = full_symbolic.shape[0]
+ _check_ref_in_full(ref_list, full_names)
+ return cls(
+ name=denotation,
+ full_symbolic=full_symbolic,
+ dim=dimension,
+ ref_names=tuple(ref_list_ordered),
+ full_names=tuple(full_names),
+ use_in_stage_function=use_in_stage_function,
+ input_map=input_mapping,
+ output_map=output_mapping,
+ binary=binary,
+ )
+
+ def __hash__(self):
+ return hash(self.name)
+
+
+[docs]@dataclasses.dataclass(frozen=True)
+class OptimizationParameter(OptimizationQuantity):
+ full_with_defaults: ca.MX
+ add_default_values: ca.Function
+
+[docs] @classmethod
+ def declare(
+ cls,
+ denotation: str,
+ variables: list[CasadiVariable],
+ ref_list: list[str],
+ use_in_stage_function=True,
+ assert_complete: bool = False,
+ ):
+ """
+ Declares a group of optimization parameters that serve a purpose in
+ the optimization problem. Typical groups are uncontrollable inputs or
+ physical parameters.
+
+ Args:
+ denotation: The key of the variable, e.g. 'p', 'd', etc. Use this
+ key in the discretization function to add the parameter at
+ different stages of the optimization problem.
+ variables: A list of CasadiVariables including all parameters
+ within this category.
+ ref_list: A list of names indicating which parameters in full_list
+ are AgentVariables and need to be updated before each
+ optimization.
+ use_in_stage_function: If False, the parameter is not added to the
+ stage function. If True, the variable needs to be provided to
+ the stage function at every point in the discretization function.
+ assert_complete: If True, throws an error if the ref_list does
+ not contain all variables.
+ """
+ provided = []
+ full_with_defaults = []
+ full_symbolic = []
+ full_names = []
+ ref_list_ordered = []
+ for var in variables:
+ name = var.name
+ if assert_complete:
+ assert name in ref_list, (
+ f"The variable {name} which is defined in the model "
+ f" has to be defined in the ModuleConfig!"
+ )
+
+ full_symbolic.append(var.sym)
+ full_names.append(name)
+ if name in ref_list:
+ full_with_defaults.append(var.sym)
+ provided.append(var.sym)
+ ref_list_ordered.append(name)
+ else:
+ if var.value is None:
+ raise ValueError(
+ f"Parameter '{name}' is not declared in the module "
+ f"config. Tried using default from model "
+ f" but it was 'None'."
+ )
+ full_with_defaults.append(var.value)
+ full_with_defaults = ca.vertcat(*full_with_defaults)
+
+ add_default_values = ca.Function(
+ f"par_map_{denotation}",
+ [ca.vertcat(*provided)],
+ [full_with_defaults],
+ ["ref"],
+ [denotation],
+ )
+ _check_ref_in_full(ref_list, full_names)
+ return OptimizationParameter(
+ name=denotation,
+ full_with_defaults=full_with_defaults,
+ full_symbolic=ca.vertcat(*full_symbolic),
+ dim=full_with_defaults.shape[0],
+ ref_names=tuple(ref_list_ordered),
+ full_names=tuple(full_names),
+ use_in_stage_function=use_in_stage_function,
+ add_default_values=add_default_values,
+ )
+
+ def __hash__(self):
+ return hash(self.name)
+
+import logging
+import platform
+from pathlib import Path
+from typing import Type, Optional
+
+import casadi as ca
+import pydantic
+from agentlib.core.errors import ConfigurationError
+
+from agentlib_mpc.data_structures.mpc_datamodels import MPCVariable, stats_path
+from agentlib_mpc.optimization_backends.casadi_.core import system
+from agentlib_mpc.optimization_backends.casadi_.core.VariableGroup import (
+ OptimizationVariable,
+ OptimizationParameter,
+)
+from agentlib_mpc.optimization_backends.casadi_.core.discretization import (
+ DiscretizationT,
+ Results,
+)
+from agentlib_mpc.optimization_backends.backend import (
+ OptimizationBackend,
+ BackendConfig,
+)
+from agentlib_mpc.models.casadi_model import (
+ CasadiModel,
+)
+from agentlib_mpc.data_structures import mpc_datamodels
+from agentlib_mpc.data_structures.casadi_utils import (
+ CasadiDiscretizationOptions,
+ SolverFactory,
+ DiscretizationMethod,
+ SolverOptions,
+)
+from agentlib_mpc.utils import sampling
+
+logger = logging.getLogger(__name__)
+
+
+[docs]class CasadiBackendConfig(BackendConfig):
+ discretization_options: CasadiDiscretizationOptions = pydantic.Field(
+ default_factory=CasadiDiscretizationOptions
+ )
+ solver: SolverOptions = pydantic.Field(default_factory=SolverOptions)
+ build_batch_bat: Optional[Path] = pydantic.Field(
+ default=None,
+ description="Path to a batch file, which can compile C code on windows.",
+ )
+ do_jit: Optional[bool] = pydantic.Field(
+ default=None,
+ description="Boolean to turn JIT of the optimization problems on or off.",
+ validate_default=True,
+ )
+
+[docs] @pydantic.field_validator("do_jit")
+ @classmethod
+ def validate_compile(cls, do_jit, info: pydantic.FieldValidationInfo):
+ """Checks whether code compilation should be done."""
+
+ # if we're on Linux, we cannot generate the code as of now
+ if platform.system() == "Linux":
+ if do_jit is True:
+ raise NotImplementedError(
+ "C Code generation not implemented yet for linux."
+ )
+ # if not specified or False, we do not do jit
+ return False
+
+ # assume we're on Windows. If there is no batch file, we have to return False
+ bat_file = info.data["build_batch_bat"]
+ if bat_file is None:
+ if do_jit is True:
+ raise ConfigurationError(
+ "Cannot do C-Code generation on Windows without specifying a "
+ "proper batch file through the 'build_batch_bat' option."
+ )
+ return False
+
+ # at this point we are on Windows and have a (hopefully) valid batch file
+ if do_jit is None:
+ # the user provided a batch file but no clear instruction. For backwards
+ # compatibility, we will assume compilation is desired and return True
+ return True
+
+ # if both do_jit and the batch file are specified, we do not modify do_jit
+ return do_jit
+
+
+[docs]class CasADiBackend(OptimizationBackend):
+ """
+ OptimizationBackend for solving the optimization problem with CasADi.
+ Requires the model to be a CasADi model.
+ """
+
+ system_type: Type[system.SystemT]
+ system: system.SystemT
+ discretization_types: dict[DiscretizationMethod, Type[DiscretizationT]]
+ discretization: DiscretizationT
+ _supported_models = {"CasadiModel": CasadiModel}
+ config_type = CasadiBackendConfig
+
+[docs] def setup_optimization(self, var_ref: mpc_datamodels.VariableReference):
+ """
+ Performs all necessary steps to make the ``solve`` method usable.
+ To do this, it calls several auxiliary functions. These functions can
+ be overloaded to change the resulting optimization problem.
+
+ Args:
+ var_ref: class with variable name lists sorted by function in the mpc.
+ """
+ super().setup_optimization(var_ref=var_ref)
+ self.reset_setup_attributes()
+
+ # connect variable roles defined by the mpc module with the model
+ self.system.initialize(model=self.model, var_ref=self.var_ref)
+ solver_factory = SolverFactory(
+ do_jit=self.config.do_jit,
+ bat_file=self.config.build_batch_bat,
+ name=self.config.name,
+ options=self.config.solver,
+ logger=self.logger,
+ )
+ self.discretization.initialize(
+ system=self.system, solver_factory=solver_factory
+ )
+
+[docs] def solve(self, now: float, current_vars: dict[str, MPCVariable]) -> Results:
+ # collect and format inputs
+ mpc_inputs = self._get_current_mpc_inputs(agent_variables=current_vars, now=now)
+ full_results = self.discretization.solve(mpc_inputs)
+ self.save_result_df(full_results, now=now)
+
+ return full_results
+
+ def _get_current_mpc_inputs(
+ self, agent_variables: dict[str, MPCVariable], now: float
+ ) -> dict[str, ca.DM]:
+ """
+ Reads the value from all received AgentVariables and performs the
+ necessary expansion/interpolation of values onto the correct grid.
+
+ Args:
+ agent_variables: dictionary containing all AgentVariables from the
+ var_ref, with names as keys.
+ now: current time, used for interpolation of trajectory data
+
+ Returns:
+ dictionary with keys matching the required input for
+ self._mpc_inputs_to_nlp_inputs()
+ """
+
+ def get_variable_boundaries(var: OptimizationVariable) -> dict[str, ca.DM]:
+ """
+ Gets boundaries and initial guesses for all optimization
+ variables of denotation 'of'. Currently, initial guesses are used
+ without shifting.
+
+ Args:
+ var: denotation matching the variable type that is gathered
+
+ Returns:
+ dict of the form {lb_<den>: ca.MX, ub_<den>: ca.MX, guess_<den>: ca.MX}
+ """
+ ref_list = var.ref_names
+ input_map = var.input_map
+ grid = self.discretization.grid(var)
+
+ lower_bounds, upper_bounds = [], []
+ for ref in ref_list:
+ agent_variable = agent_variables[ref]
+ ub = sampling.sample(
+ trajectory=agent_variable.ub,
+ grid=grid,
+ current=now,
+ method=agent_variable.interpolation_method,
+ )
+ upper_bounds.append(ub)
+ lb = sampling.sample(
+ trajectory=agent_variable.lb,
+ grid=grid,
+ current=now,
+ method=agent_variable.interpolation_method,
+ )
+ lower_bounds.append(lb)
+
+ boundaries = input_map(
+ ub_ref=ca.horzcat(*upper_bounds).T, lb_ref=ca.horzcat(*lower_bounds).T
+ )
+
+ return boundaries
+
+ def get_parameter_values(par: OptimizationParameter) -> dict[str, ca.DM]:
+ """
+ Gets values for all optimization parameters of denotation 'of'
+
+ Args:
+ par: denotation matching the variable type that is gathered
+
+ Returns:
+ dict of the form {<den>: ca.MX}
+ """
+ ref_list = par.ref_names
+ input_map = par.add_default_values
+ grid = self.discretization.grid(par)
+
+ input_matrix = []
+ for ref in ref_list:
+ var = agent_variables[ref]
+ value = var.value
+ if value is None:
+ raise ValueError(
+ f"Input for variable {ref} is empty. "
+ f"Cannot solve optimization problem."
+ )
+ try:
+ interpolation_method = var.interpolation_method
+ except AttributeError as e:
+ # Catch the case where normal AgentVariables got mixed into the
+ # optimization input, possibly due to subclassing the MPC class
+ # and dynamically changing the MPC input
+ raise TypeError(
+ f"The variable {ref} does not have an interpolationmethod. All "
+ f"Variables used in MPC need to be of type MPCVariable "
+ f"(subclass of AgentVariable). This is likely caused by an "
+ f"error in a custom module."
+ ) from e
+ input_matrix.append(
+ sampling.sample(
+ trajectory=value,
+ grid=grid,
+ current=now,
+ method=interpolation_method,
+ )
+ )
+
+ return input_map(ref=ca.horzcat(*input_matrix).T)
+
+ mpc_inputs = {}
+
+ for sys_par in self.system.parameters:
+ sys_par_values = get_parameter_values(par=sys_par)
+ mpc_inputs.update(sys_par_values)
+ for sys_var in self.system.variables:
+ sys_var_boundaries = get_variable_boundaries(var=sys_var)
+ mpc_inputs.update(sys_var_boundaries)
+
+ return mpc_inputs
+
+[docs] def reset_setup_attributes(self):
+ """Cleans all attributes that are used for optimization setup."""
+ self.system = self.system_type()
+ opts = self.config.discretization_options
+ method = opts.method
+ self.discretization = self.discretization_types[method](options=opts)
+ self.discretization.logger = self.logger
+
+[docs] def save_result_df(
+ self,
+ results: Results,
+ now: float = 0,
+ ):
+ """
+ Save the results of `solve` into a dataframe at each time step.
+
+ Example results dataframe:
+
+ value_type variable ... lower
+ variable T_0 T_0_slack ... T_0_slack mDot_0
+ time_step ...
+ 2 0.000000 298.160000 NaN ... NaN NaN
+ 101.431499 297.540944 -149.465942 ... -inf 0.0
+ 450.000000 295.779780 -147.704779 ... -inf 0.0
+ 798.568501 294.720770 -146.645769 ... -inf 0.0
+ Args:
+ results:
+ now:
+
+ Returns:
+
+ """
+ if not self.config.save_results:
+ return
+
+ res_file = self.config.results_file
+ if not self.results_file_exists():
+ results.write_columns(res_file)
+ results.write_stats_columns(stats_path(res_file))
+
+ df = results.df
+ df.index = list(map(lambda x: str((now, x)), df.index))
+ df.to_csv(res_file, mode="a", header=False)
+
+ with open(stats_path(res_file), "a") as f:
+ f.writelines(results.stats_line(str(now)))
+
+"""Holds classes that implement different transcriptions of the OCP"""
+
+import abc
+import dataclasses
+from pathlib import Path
+from typing import TypeVar, Union, Callable, Optional
+
+import casadi as ca
+import numpy as np
+import pandas as pd
+
+from agentlib_mpc.data_structures.casadi_utils import (
+ CaFuncInputs,
+ OptVarMXContainer,
+ OptParMXContainer,
+ CasadiDiscretizationOptions,
+ SolverFactory,
+ MPCInputs,
+ GUESS_PREFIX,
+)
+from agentlib_mpc.optimization_backends.casadi_.core.VariableGroup import (
+ OptimizationQuantity,
+ OptimizationParameter,
+ OptimizationVariable,
+)
+from agentlib_mpc.optimization_backends.casadi_.core.system import System
+
+
+CasadiVariableList = Union[list[ca.MX], ca.MX]
+
+
+[docs]@dataclasses.dataclass
+class Results:
+ matrix: ca.MX
+ grid: list[float]
+ columns: pd.MultiIndex
+ stats: dict
+ variable_grid_indices: dict[str, list[int]]
+ _variable_name_to_index: dict[str, int] = None
+
+ def __post_init__(self):
+ self._variable_name_to_index = self.variable_lookup()
+ try:
+ iters = self.stats.pop("iterations")
+ self.stats["obj"] = iters["obj"][-1]
+ except KeyError:
+ pass
+ if "fatrop" in self.stats:
+ self.stats.pop("ng")
+ self.stats.pop("nu")
+ self.stats.pop("nx")
+ self.stats.pop("fatrop")
+
+ def __getitem__(self, item: str) -> np.ndarray:
+ return self.matrix[
+ self.variable_grid_indices[item], self._variable_name_to_index[item]
+ ].toarray(simplify=True)
+
+[docs] def variable_lookup(self) -> dict[str, int]:
+ """Creates a mapping from variable names to the column index in the Matrix"""
+ lookup = {}
+ for index, label in enumerate(self.columns):
+ if label[0] == "variable":
+ lookup[label[1]] = index
+ return lookup
+
+ @property
+ def df(self) -> pd.DataFrame:
+ return pd.DataFrame(self.matrix, index=self.grid, columns=self.columns)
+
+[docs] def write_columns(self, file: Path):
+ df = pd.DataFrame(columns=self.columns)
+ df.to_csv(file)
+
+[docs] def write_stats_columns(self, file: Path):
+ line = f""",{",".join(self.stats)}\n"""
+ with open(file, "w") as f:
+ f.write(line)
+
+[docs] def stats_line(self, index: str) -> str:
+ return f""""{index}",{",".join(map(str, self.stats.values()))}\n"""
+
+
+[docs]class Discretization(abc.ABC):
+ """
+ opt_vars: holds symbolic variables during problem creation
+ opt_vars_lb: holds symbolic variables during problem creation
+ opt_vars_ub: holds symbolic variables during problem creation
+ initial_guess: holds symbolic variables during problem creation
+ opt_pars: holds symbolic variables during problem creation
+ constraints: holds symbolic variables during problem creation
+ constraints_lb: holds symbolic variables during problem creation
+ constraints_ub: holds symbolic variables during problem creation
+ objective_function: cost function during problem creation
+ mpc_opt_vars (dict): holds the symbolic variables and grids during
+ problem creation sorted by type as in system_variables
+ mpc_opt_pars (dict): holds the symbolic variables and grids during
+ problem creation sorted by type as in system_parameters
+ """
+
+ _stage_function: ca.Function
+ _mpc_inputs_to_nlp_inputs: ca.Function
+ _nlp_outputs_to_mpc_outputs: ca.Function
+ _optimizer: ca.Function
+ _result_map: ca.Function
+ only_positive_times_in_results = True
+
+ def __init__(self, options: CasadiDiscretizationOptions):
+ self.options = options
+ self._finished_discretization: bool = False
+
+ # attributes used for problem creation
+ self.k: int = 0 # increment for prediction loop
+ self.pred_time: float = 0 # for creation of grids
+
+ # lists that hold all variables of the optimization problem
+ self.opt_vars: CasadiVariableList = [] # hold all optimization variables
+ self.opt_vars_lb: list[ca.MX] = []
+ self.opt_vars_ub: list[ca.MX] = []
+ self.initial_guess: list[ca.MX] = []
+ self.opt_pars: CasadiVariableList = [] # hold all optimization parameters
+ self.constraints: CasadiVariableList = []
+ self.constraints_lb: list[ca.MX] = []
+ self.constraints_ub: list[ca.MX] = []
+ self.objective_function: CaFuncInputs = ca.DM(0)
+ self.binary_opt_vars = []
+ self.equalities: list[bool] = []
+
+ # dicts of variables of the optimization problem, sorted by role
+ self.mpc_opt_vars: dict[str, OptVarMXContainer] = {}
+ self.mpc_opt_pars: dict[str, OptParMXContainer] = {}
+
+ self._create_results: Optional[Callable[[ca.DM, dict], Results]] = None
+ self.logger = None
+
+[docs] def initialize(self, system: System, solver_factory: SolverFactory):
+ """Initializes the trajectory optimization problem, creating all symbolic
+ variables of the OCP, the mapping function and the numerical solver."""
+ self._discretize(system)
+ self._finished_discretization = True
+ self.create_nlp_in_out_mapping(system)
+ self._create_solver(solver_factory)
+
+ @abc.abstractmethod
+ def _discretize(self, sys: System):
+ """Specifies the discretization of direct optimization methods like
+ collocation, multiple shooting etc. This function creates the lists of
+ variables, parameters, constraints etc. by using the self.add_opt_var functions.
+ For an example see optimization_backends.casadi_.basic
+ """
+ ...
+
+ def _create_solver(self, solver_factory: SolverFactory):
+ self._optimizer = solver_factory.create_solver(
+ nlp=self.nlp, discrete=self.binary_vars, equalities=self.equalities
+ )
+
+[docs] def solve(self, mpc_inputs: MPCInputs) -> Results:
+ """
+ Solves the discretized trajectory optimization problem.
+
+ Args:
+ mpc_inputs: Casadi Matrices specifying the input of all different types
+ of optimization parameters. Matrices consist of different variable rows
+ and have a column for each time step in the discretization.
+ There are separate matrices for each input type (as defined in the
+ System), and also for the upper and lower boundaries of variables
+ respectively.
+
+
+ Returns:
+ Results: The complete evolution of the states, inputs and boundaries of each
+ variable and parameter over the prediction horizon, as well as solve
+ statistics.
+
+ """
+ # collect and format inputs
+ guesses = self._determine_initial_guess(mpc_inputs)
+ mpc_inputs.update(guesses)
+ nlp_inputs: dict[str, ca.DM] = self._mpc_inputs_to_nlp_inputs(**mpc_inputs)
+
+ # perform optimization
+ nlp_output = self._optimizer(**nlp_inputs)
+
+ # format and return solution
+ mpc_output = self._nlp_outputs_to_mpc_outputs(vars_at_optimum=nlp_output["x"])
+ self._remember_solution(mpc_output)
+ result = self._process_solution(inputs=mpc_inputs, outputs=mpc_output)
+ return result
+
+ def _determine_initial_guess(self, mpc_inputs: MPCInputs) -> MPCInputs:
+ """
+ Collects initial guesses for all mpc variables. If possible, uses result
+ of last optimization.
+ If not available, the current measurement is used for states, and the mean of
+ the upper and lower bound is used otherwise.
+ """
+ guesses = {}
+
+ for denotation, var in self.mpc_opt_vars.items():
+ guess = var.opt
+ if guess is None:
+ # if initial value is available, assume it is constant and make guess
+ guess_denotation = f"initial_{denotation}"
+ if guess_denotation in mpc_inputs:
+ # changes here because of the long guess.array caused by np.tile for lags
+ if mpc_inputs[guess_denotation].shape[1] > 1:
+ state_measurements = mpc_inputs[guess_denotation][:, -1]
+ else:
+ state_measurements = mpc_inputs[guess_denotation]
+ guess = np.tile(state_measurements, len(var.grid))
+ # get guess from boundaries if last optimum is not available
+ else:
+ guess = np.array(
+ 0.5
+ * (
+ mpc_inputs[f"lb_{denotation}"]
+ + mpc_inputs[f"ub_{denotation}"]
+ )
+ )
+ guess = np.nan_to_num(
+ guess, posinf=100_000_000, neginf=-100_000_000
+ )
+ guesses.update({GUESS_PREFIX + denotation: guess})
+
+ return guesses
+
+ def _remember_solution(self, optimum: dict[str, ca.DM]):
+ """Saves the last optimal solution for all optimization variables
+ sorted by type."""
+ for den, var in self.mpc_opt_vars.items():
+ var.opt = optimum[den]
+
+ def _process_solution(self, inputs: dict, outputs: dict) -> Results:
+ """
+ If self.result_file is not empty,
+ collect all inputs and outputs of the optimization problem and format
+ them as DataFrames and pass them to OptimizationBackend.save_df().
+ Args:
+ inputs: mpc_inputs dict returned from _get_current_mpc_inputs
+ outputs: mpc_output from self._nlp_outputs_to_mpc_outputs
+ """
+ # update the guess values at the variable positions with the outputs
+ for key, value in inputs.items():
+ key: str
+ if key.startswith(GUESS_PREFIX):
+ out_key = key[len(GUESS_PREFIX) :]
+ inputs[key] = outputs[out_key]
+
+ result_matrix = self._result_map(**inputs)["result"]
+
+ return self._create_results(result_matrix, self._optimizer.stats())
+
+[docs] def create_nlp_in_out_mapping(self, system: System):
+ """
+ Function creating mapping functions between the MPC variables ordered
+ by type (as defined in `declare_quantities` and the raw input/output
+ vector of the CasADi NLP.
+ """
+ # Concatenate nlp variables to CasADi MX vectors
+ self.opt_vars = ca.vertcat(*self.opt_vars)
+ self.constraints = ca.vertcat(*self.constraints)
+ self.opt_pars = ca.vertcat(*self.opt_pars)
+ initial_guess = ca.vertcat(*self.initial_guess)
+ opt_vars_lb = ca.vertcat(*self.opt_vars_lb)
+ opt_vars_ub = ca.vertcat(*self.opt_vars_ub)
+ constraints_lb = ca.vertcat(*self.constraints_lb)
+ constraints_ub = ca.vertcat(*self.constraints_ub)
+
+ # nlp inputs
+ nlp_inputs = [
+ self.opt_pars,
+ initial_guess,
+ opt_vars_lb,
+ opt_vars_ub,
+ constraints_lb,
+ constraints_ub,
+ ]
+ nlp_input_denotations = [
+ "p",
+ "x0",
+ "lbx",
+ "ubx",
+ "lbg",
+ "ubg",
+ ]
+
+ # create empty lists to store all nlp inputs and outputs
+ mpc_inputs = []
+ mpc_input_denotations = []
+ mpc_outputs = []
+ mpc_output_denotations = []
+
+ # Concatenate mpc outputs and their bounds to CasADi MX matrices
+ for denotation, opt_var in self.mpc_opt_vars.items():
+ # mpc opt vars
+ var = opt_var.var
+ var = ca.horzcat(*var)
+ mpc_outputs.append(var)
+ mpc_output_denotations.append(denotation)
+
+ # their bounds and guess
+ lb = ca.horzcat(*opt_var.lb)
+ ub = ca.horzcat(*opt_var.ub)
+ guess = ca.horzcat(*opt_var.guess)
+ mpc_inputs.extend([lb, ub, guess])
+ mpc_input_denotations.extend(
+ [f"lb_{denotation}", f"ub_{denotation}", GUESS_PREFIX + denotation]
+ )
+
+ # Concatenate mpc inputs to CasADi MX matrices
+ for denotation, opt_par in self.mpc_opt_pars.items():
+ var = opt_par.var
+ var = ca.horzcat(*var)
+ mpc_inputs.append(var)
+ mpc_input_denotations.append(denotation)
+
+ # Mapping function that rearranges the variables for input into the NLP
+ self._mpc_inputs_to_nlp_inputs = ca.Function(
+ "mpc_inputs_to_nlp_inputs",
+ mpc_inputs,
+ nlp_inputs,
+ mpc_input_denotations,
+ nlp_input_denotations,
+ )
+
+ # Mapping function that rearranges the output of the nlp and sorts
+ # by denotation
+ self._nlp_outputs_to_mpc_outputs = ca.Function(
+ "nlp_outputs_to_mpc_outputs",
+ [self.opt_vars],
+ mpc_outputs,
+ ["vars_at_optimum"],
+ mpc_output_denotations,
+ )
+
+ matrix, col_index, full_grid, var_grids = self._create_result_format(system)
+ self._result_map = ca.Function(
+ "result_map", mpc_inputs, [matrix], mpc_input_denotations, ["result"]
+ )
+
+ def make_results_view(result_matrix: ca.DM, stats: dict) -> Results:
+ return Results(
+ matrix=result_matrix,
+ columns=col_index,
+ grid=full_grid,
+ variable_grid_indices=var_grids,
+ stats=stats,
+ )
+
+ self._create_results = make_results_view
+
+ @property
+ def nlp(self) -> dict[str, ca.MX]:
+ """The nlp dict that casadi solvers need for instantiation"""
+ if not self._finished_discretization:
+ raise RuntimeError("You have to initialize first")
+ return {
+ "x": self.opt_vars,
+ "f": self.objective_function,
+ "g": self.constraints,
+ "p": self.opt_pars,
+ }
+
+ @property
+ def binary_vars(self) -> list[bool]:
+ """List specifying for every optimization variable, whether it is binary."""
+ if not self._finished_discretization:
+ raise RuntimeError("You have to initialize first")
+ return self.binary_opt_vars
+
+ def _create_result_format(
+ self, system: System
+ ) -> (ca.MX, pd.MultiIndex, list[float], dict[str, list[int]]):
+ """
+ Creates an MX matrix that includes all inputs and outputs of the nlp
+ in an ordered format.
+ Sets the _result_columns and _full_index private attributes.
+
+ Created format:
+ variable upper lower parameter ...
+ t_0 t_0 t_0 rho ...
+ time . . . .
+ 1 . . . .
+ 2 . . . .
+ 3
+ 4
+ 5
+ 6
+
+ Returns:
+ The Matrix as MX that defines the output format of the solver
+ A pandas column index for this matrix
+ The full grid
+ A dict specifying the row index with non-nan values for all variables
+ (not parameters)
+
+ """
+
+ def make_column(vars_in_quantity: int, grid: list) -> (ca.MX, list[int]):
+ """Creates a matrix with the width of the number of variables in a quantity
+ group, and the length of the full grid. Also returns the indexes of this
+ variable group that point to non-nan entries. The indices are used in
+ slices of the results object."""
+ col = []
+ non_nan_entries = []
+ for index_full, time in enumerate(full_grid):
+ if time in grid:
+ index = grid.index(time)
+ entry = mx_list[index].T
+ if not self.only_positive_times_in_results or time >= 0:
+ # with NARX there can be times smaller 0, however sometimes
+ # we dont want them in the results slice.
+ non_nan_entries.append(index_full)
+ else:
+ entry = np.full((1, vars_in_quantity), np.nan)
+ col = ca.vertcat(col, entry)
+
+ return col, non_nan_entries
+
+ full_grid = set()
+ variable_grids: dict[str, list[int]] = {}
+ for quant_type in {**self.mpc_opt_vars, **self.mpc_opt_pars}.values():
+ full_grid.update(set(quant_type.grid))
+ full_grid = sorted(full_grid)
+ columns = []
+ output_matrix = ca.MX.sym("Results", len(full_grid), 0)
+
+ for sys_pars in system.parameters:
+ names_list = sys_pars.full_names
+ if not names_list:
+ continue
+
+ columns.extend(list(map(lambda x: ("parameter", x), names_list)))
+ grid = self.grid(sys_pars)
+ mx_list = self.mpc_opt_pars[sys_pars.name].var
+ column, _ = make_column(len(names_list), grid)
+ output_matrix = ca.horzcat(output_matrix, column)
+
+ for sys_vars in system.variables:
+ names_list = sys_vars.full_names
+
+ if not names_list:
+ continue
+
+ grid = self.grid(sys_vars)
+ iterator = [("var", "variable"), ("ub", "upper"), ("lb", "lower")]
+ for key, header in iterator:
+ columns.extend(list(map(lambda x: (header, x), names_list)))
+ mx_list = self.mpc_opt_vars[sys_vars.name].__dict__[key]
+ column, grid_indices = make_column(len(names_list), grid)
+ output_matrix = ca.horzcat(output_matrix, column)
+
+ if key == "var":
+ variable_grids.update({n: grid_indices for n in names_list})
+
+ result_columns = pd.MultiIndex.from_tuples(columns)
+ return output_matrix, result_columns, full_grid, variable_grids
+
+[docs] def add_opt_var(
+ self,
+ quantity: OptimizationVariable,
+ lb: ca.MX = None,
+ ub: ca.MX = None,
+ guess: float = None,
+ post_den: str = "",
+ ):
+ """
+ Create an optimization variable and append to all the associated
+ lists. If lb or ub are given, they override the values provided at
+ runtime! The usual application of this is, to fix the initial value
+ of a state to a parameter.
+
+ Args:
+ quantity: corresponding system variable
+ lb: lower bound of the variable
+ ub: upper bound of the variable
+ guess: default for the initial guess
+ post_den: string to add to casadi MX after denotation (for debugging)
+ """
+ # get dimension
+ dimension = quantity.dim
+ denotation = quantity.name
+
+ # create symbolic variables
+ opt_var = ca.MX.sym(f"{denotation}_{self.pred_time}{post_den}", dimension)
+ lower = ca.MX.sym(f"lb_{denotation}_{self.pred_time}{post_den}", dimension)
+ upper = ca.MX.sym(f"ub_{denotation}_{self.pred_time}{post_den}", dimension)
+
+ # if are not given (generally true), use the default variable for nlp lists
+ if lb is None:
+ lb = lower
+ if ub is None:
+ ub = upper
+
+ # append to nlp specific lists
+ self.opt_vars.append(opt_var)
+ self.opt_vars_lb.append(lb)
+ self.opt_vars_ub.append(ub)
+ if guess is None:
+ guess = opt_var
+ self.initial_guess.append(guess)
+ self.binary_opt_vars.extend([quantity.binary] * dimension)
+
+ # append to variable specific lists
+ var_list = self.mpc_opt_vars.setdefault(denotation, OptVarMXContainer())
+ var_list.var.append(opt_var)
+ var_list.lb.append(lower)
+ var_list.ub.append(upper)
+ var_list.guess.append(opt_var)
+ var_list.grid.append(self.pred_time)
+
+ return opt_var
+
+[docs] def add_opt_par(self, quantity: OptimizationParameter, post_den: str = ""):
+ """
+ Create an optimization parameter and append to all the associated lists.
+
+ denotation[str]: the key of the parameter, e.g. 'P', 'Q', ...
+ dimension[int]: the dimension of the parameter
+ post_den[str]: string to add to casadi MX after denotation (for debugging)
+ """
+ # get dimension
+ dimension = quantity.dim
+ denotation = quantity.name
+
+ # create symbolic variables
+ opt_par = ca.MX.sym(f"{denotation}_{self.pred_time}{post_den}", dimension)
+ self.opt_pars.append(opt_par)
+
+ # append to variable specific lists
+ par_list = self.mpc_opt_pars.setdefault(denotation, OptParMXContainer())
+ par_list.var.append(opt_par)
+ par_list.grid.append(self.pred_time)
+
+ return opt_par
+
+[docs] def add_constraint(
+ self,
+ constraint_function: CaFuncInputs,
+ lb: CaFuncInputs = None,
+ ub: CaFuncInputs = None,
+ *,
+ gap_closing: bool = False,
+ ):
+ """
+ Add a constraint to the optimization problem. If no bounds are given,
+ adds an equality constraint.
+ """
+ # set equality for fatrop
+ self.equalities.extend([gap_closing] * constraint_function.shape[0])
+
+ # set bounds to default for equality constraints
+ if lb is None:
+ lb = ca.DM.zeros(constraint_function.shape[0], 1)
+ if ub is None:
+ ub = ca.DM.zeros(constraint_function.shape[0], 1)
+
+ # Append inequality constraints
+ self.constraints.append(constraint_function)
+ self.constraints_lb.append(lb)
+ self.constraints_ub.append(ub)
+
+[docs] def grid(self, var: OptimizationQuantity) -> list[float]:
+ denotation = var.name
+ if isinstance(var, OptimizationVariable):
+ return self.mpc_opt_vars[denotation].grid
+ if isinstance(var, OptimizationParameter):
+ return self.mpc_opt_pars[denotation].grid
+
+
+DiscretizationT = TypeVar("DiscretizationT", bound=Discretization)
+
+"""Holds the System class, which knows the model"""
+
+from __future__ import annotations
+
+import abc
+from typing import TypeVar, Union
+
+from agentlib_mpc.data_structures.mpc_datamodels import VariableReference
+from agentlib_mpc.models.casadi_model import CasadiModel
+from agentlib_mpc.optimization_backends.casadi_.core.VariableGroup import (
+ OptimizationVariable,
+ OptimizationParameter,
+)
+
+
+[docs]class System(abc.ABC):
+ """
+
+ Examples:
+ class MySystem(System):
+
+ # variables
+ states: OptimizationVariable
+ controls: OptimizationVariable
+ algebraics: OptimizationVariable
+ outputs: OptimizationVariable
+
+ # parameters
+ non_controlled_inputs: OptimizationParameter
+ model_parameters: OptimizationParameter
+ initial_state: OptimizationParameter
+
+ # dynamics
+ model_constraints: Constraint
+ cost_function: ca.MX
+ ode: ca.MX
+
+ def initialize(self, model: CasadiModel, var_ref: VariableReference):
+
+ self.states = OptimizationVariable.declare(
+ denotation="state",
+ variables=model.get_states(var_ref.states),
+ ref_list=var_ref.states,
+ assert_complete=True,
+ )
+
+ .
+ .
+ .
+ )
+ """
+
+[docs] @abc.abstractmethod
+ def initialize(self, model: CasadiModel, var_ref: VariableReference): ...
+
+ @property
+ def variables(self) -> list[OptimizationVariable]:
+ return [
+ var
+ for var in self.__dict__.values()
+ if isinstance(var, OptimizationVariable)
+ ]
+
+ @property
+ def parameters(self) -> list[OptimizationParameter]:
+ return [
+ var
+ for var in self.__dict__.values()
+ if isinstance(var, OptimizationParameter)
+ ]
+
+ @property
+ def quantities(self) -> list[Union[OptimizationParameter, OptimizationVariable]]:
+ return self.variables + self.parameters
+
+
+SystemT = TypeVar("SystemT", bound=System)
+
+import casadi as ca
+
+from agentlib_mpc.optimization_backends.casadi_ import basic
+from agentlib_mpc.data_structures.casadi_utils import (
+ DiscretizationMethod,
+)
+from agentlib_mpc.data_structures.mpc_datamodels import (
+ FullVariableReference,
+)
+from agentlib_mpc.models.casadi_model import CasadiModel, CasadiParameter
+from agentlib_mpc.optimization_backends.casadi_.core.casadi_backend import CasADiBackend
+from agentlib_mpc.optimization_backends.casadi_.core.VariableGroup import (
+ OptimizationParameter,
+)
+
+
+[docs]class FullSystem(basic.BaseSystem):
+ last_control: OptimizationParameter
+ r_del_u: OptimizationParameter # penalty on change of control between time steps
+
+[docs] def initialize(self, model: CasadiModel, var_ref: FullVariableReference):
+ super().initialize(model=model, var_ref=var_ref)
+
+ self.last_control = OptimizationParameter.declare(
+ denotation="u_prev",
+ variables=model.get_inputs(var_ref.controls),
+ ref_list=var_ref.controls,
+ use_in_stage_function=False,
+ assert_complete=True,
+ )
+ self.r_del_u = OptimizationParameter.declare(
+ denotation="r_del_u",
+ variables=[CasadiParameter(name=r_del_u) for r_del_u in var_ref.r_del_u],
+ ref_list=var_ref.r_del_u,
+ use_in_stage_function=False,
+ assert_complete=True,
+ )
+
+
+[docs]class DirectCollocation(basic.DirectCollocation):
+ def _discretize(self, sys: FullSystem):
+ """
+ Defines a direct collocation discretization.
+ # pylint: disable=invalid-name
+ """
+
+ # setup the polynomial base
+ collocation_matrices = self._collocation_polynomial()
+
+ # shorthands
+ n = self.options.prediction_horizon
+ ts = self.options.time_step
+
+ # Initial State
+ x0 = self.add_opt_par(sys.initial_state)
+ xk = self.add_opt_var(sys.states, lb=x0, ub=x0, guess=x0)
+ uk = self.add_opt_par(sys.last_control)
+
+ # Parameters that are constant over the horizon
+ const_par = self.add_opt_par(sys.model_parameters)
+ du_weights = self.add_opt_par(sys.r_del_u)
+
+ # Formulate the NLP
+ # loop over prediction horizon
+ while self.k < n:
+ # New NLP variable for the control
+ u_prev = uk
+ uk = self.add_opt_var(sys.controls)
+ # penalty for control change between time steps
+ self.objective_function += ts * ca.dot(du_weights, (u_prev - uk) ** 2)
+
+ # New parameter for inputs
+ dk = self.add_opt_par(sys.non_controlled_inputs)
+
+ # perform inner collocation loop
+ opt_vars_inside_inner = [sys.algebraics, sys.outputs]
+ opt_pars_inside_inner = []
+
+ constant_over_inner = {
+ sys.controls: uk,
+ sys.non_controlled_inputs: dk,
+ sys.model_parameters: const_par,
+ }
+ xk_end, constraints = self._collocation_inner_loop(
+ collocation=collocation_matrices,
+ state_at_beginning=xk,
+ states=sys.states,
+ opt_vars=opt_vars_inside_inner,
+ opt_pars=opt_pars_inside_inner,
+ const=constant_over_inner,
+ )
+
+ # increment loop counter and time
+ self.k += 1
+ self.pred_time = ts * self.k
+
+ # New NLP variable for differential state at end of interval
+ xk = self.add_opt_var(sys.states)
+
+ # Add continuity constraint
+ self.add_constraint(xk - xk_end, gap_closing=True)
+
+ # add collocation constraints later for fatrop
+ for constraint in constraints:
+ self.add_constraint(*constraint)
+
+
+[docs]class MultipleShooting(basic.MultipleShooting):
+ def _discretize(self, sys: FullSystem):
+ """
+ Defines a multiple shooting discretization
+ """
+ vars_dict = {sys.states.name: {}}
+ n = self.options.prediction_horizon
+ ts = self.options.time_step
+ opts = {"t0": 0, "tf": ts}
+ # Initial State
+ x0 = self.add_opt_par(sys.initial_state)
+ xk = self.add_opt_var(sys.states, lb=x0, ub=x0, guess=x0)
+ vars_dict[sys.states.name][0] = xk
+ uk = self.add_opt_par(sys.last_control)
+
+ # Parameters that are constant over the horizon
+ du_weights = self.add_opt_par(sys.r_del_u)
+ const_par = self.add_opt_par(sys.model_parameters)
+
+ # ODE is used here because the algebraics can be calculated with the stage function
+ opt_integrator = self._create_ode(sys, opts, self.options.integrator)
+ # initiate states
+ while self.k < n:
+ u_prev = uk
+ uk = self.add_opt_var(sys.controls)
+ # penalty for control change between time steps
+ self.objective_function += ts * ca.dot(du_weights, (u_prev - uk) ** 2)
+ dk = self.add_opt_par(sys.non_controlled_inputs)
+ zk = self.add_opt_var(sys.algebraics)
+ yk = self.add_opt_var(sys.outputs)
+
+ # get path constraints and objective values (stage)
+ stage_arguments = {
+ # variables
+ sys.states.name: xk,
+ sys.algebraics.name: zk,
+ sys.outputs.name: yk,
+ # parameters
+ sys.controls.name: uk,
+ sys.non_controlled_inputs.name: dk,
+ sys.model_parameters.name: const_par,
+ }
+ stage = self._stage_function(**stage_arguments)
+
+ # integral and multiple shooting constraint
+ fk = opt_integrator(
+ x0=xk,
+ p=ca.vertcat(uk, dk, const_par, zk, yk),
+ )
+ xk_end = fk["xf"]
+ self.k += 1
+ self.pred_time = ts * self.k
+ xk = self.add_opt_var(sys.states)
+ vars_dict[sys.states.name][self.k] = xk
+ self.add_constraint(xk - xk_end, gap_closing=True)
+
+ # add model constraints last due to fatrop
+ self.add_constraint(
+ stage["model_constraints"],
+ lb=stage["lb_model_constraints"],
+ ub=stage["ub_model_constraints"],
+ )
+ self.objective_function += stage["cost_function"] * ts
+
+
+[docs]class CasADiFullBackend(CasADiBackend):
+ """
+ Class doing optimization of ADMM subproblems with CasADi.
+ """
+
+ system_type = FullSystem
+ discretization_types = {
+ DiscretizationMethod.collocation: DirectCollocation,
+ DiscretizationMethod.multiple_shooting: MultipleShooting,
+ }
+ system: FullSystem
+
+import dataclasses
+import logging
+from typing import Union
+
+import casadi as ca
+import numpy as np
+import pandas as pd
+from scipy import interpolate
+
+from agentlib_mpc.data_structures.casadi_utils import (
+ Constraint,
+ LB_PREFIX,
+ UB_PREFIX,
+ DiscretizationMethod,
+ SolverFactory,
+)
+from agentlib_mpc.data_structures.mpc_datamodels import MHEVariableReference
+from agentlib_mpc.models.casadi_model import CasadiModel, CasadiInput
+from agentlib_mpc.optimization_backends.casadi_.core.casadi_backend import CasADiBackend
+from agentlib_mpc.optimization_backends.casadi_.core.VariableGroup import (
+ OptimizationQuantity,
+ OptimizationVariable,
+ OptimizationParameter,
+)
+from agentlib_mpc.optimization_backends.casadi_.core.discretization import (
+ Discretization,
+)
+from agentlib_mpc.optimization_backends.casadi_.core.system import System
+
+
+logger = logging.getLogger(__name__)
+
+
+[docs]class MHESystem(System):
+ # variables
+ estimated_states: OptimizationVariable
+ estimated_inputs: OptimizationVariable
+ estimated_parameters: OptimizationVariable
+ algebraics: OptimizationVariable
+ outputs: OptimizationVariable
+
+ # parameters
+ measured_states: OptimizationParameter
+ known_inputs: OptimizationParameter
+ known_parameters: OptimizationParameter
+
+ # dynamics
+ model_constraints: Constraint
+ cost_function: ca.MX
+ ode: ca.MX
+
+[docs] def initialize(self, model: CasadiModel, var_ref: MHEVariableReference):
+ # define variables
+ self.states = OptimizationVariable.declare(
+ denotation="states",
+ variables=model.get_states(var_ref.states),
+ ref_list=var_ref.states,
+ assert_complete=True,
+ )
+ self.estimated_inputs = OptimizationVariable.declare(
+ denotation="estimated_inputs",
+ variables=model.get_inputs(var_ref.estimated_inputs),
+ ref_list=var_ref.estimated_inputs,
+ assert_complete=True,
+ )
+ self.estimated_parameters = OptimizationVariable.declare(
+ denotation="estimated_parameters",
+ variables=model.get_parameters(var_ref.estimated_parameters),
+ ref_list=var_ref.estimated_parameters,
+ )
+ self.algebraics = OptimizationVariable.declare(
+ denotation="algebraics",
+ variables=model.auxiliaries,
+ ref_list=[],
+ )
+ self.outputs = OptimizationVariable.declare(
+ denotation="outputs",
+ variables=model.outputs,
+ ref_list=var_ref.outputs,
+ )
+
+ self.known_inputs = OptimizationParameter.declare(
+ denotation="known_inputs",
+ variables=model.get_inputs(var_ref.known_inputs),
+ ref_list=var_ref.known_inputs,
+ assert_complete=True,
+ )
+ known_parameter_names = set(model.get_parameter_names()) - set(
+ var_ref.estimated_parameters
+ )
+ self.known_parameters = OptimizationParameter.declare(
+ denotation="known_parameters",
+ variables=model.get_parameters(list(known_parameter_names)),
+ ref_list=var_ref.known_parameters,
+ assert_complete=False,
+ )
+ self.measured_states = OptimizationParameter.declare(
+ denotation="measured_states",
+ variables=[CasadiInput(name=name) for name in var_ref.measured_states],
+ ref_list=var_ref.measured_states,
+ )
+ self.weights_states = OptimizationParameter.declare(
+ denotation="weight_states",
+ variables=[CasadiInput(name=name) for name in var_ref.weights_states],
+ ref_list=var_ref.weights_states,
+ )
+
+ # add admm terms to objective function
+ objective: ca.MX = 0
+ for i in range(len(var_ref.states)):
+ states = self.states.full_symbolic[i]
+ measured_states = self.measured_states.full_symbolic[i]
+ weights = self.weights_states.full_symbolic[i]
+ objective += weights * (states - measured_states) ** 2
+
+ # dynamics
+ self.ode = ca.vertcat(*[sta.ode for sta in model.get_states(var_ref.states)])
+ self.cost_function = objective
+ self.model_constraints = Constraint(
+ function=ca.vertcat(*[c.function for c in model.get_constraints()]),
+ lb=ca.vertcat(*[c.lb for c in model.get_constraints()]),
+ ub=ca.vertcat(*[c.ub for c in model.get_constraints()]),
+ )
+
+
+[docs]@dataclasses.dataclass
+class CollocationMatrices:
+ order: int
+ root: np.ndarray
+ B: np.ndarray
+ C: np.ndarray
+ D: np.ndarray
+
+
+[docs]class DirectCollocation(Discretization):
+ only_positive_times_in_results: bool = False
+
+ def _discretize(self, sys: MHESystem):
+ """
+ Defines a direct collocation discretization.
+ # pylint: disable=invalid-name
+ """
+
+ # setup the polynomial base
+ collocation_matrices = self._collocation_polynomial()
+
+ # shorthands
+ n = self.options.prediction_horizon
+ ts = self.options.time_step
+ start_time = -n * ts
+ self.pred_time = start_time
+
+ # Initial State
+ x_est_k = self.add_opt_var(sys.states)
+
+ # Parameters that are constant over the horizon
+ known_pars = self.add_opt_par(sys.known_parameters)
+ estimated_pars = self.add_opt_var(sys.estimated_parameters)
+ weights = self.add_opt_par(sys.weights_states)
+
+ # Formulate the NLP
+ # loop over prediction horizon
+ while self.k < n:
+ # New NLP variable for the control
+ inp_known = self.add_opt_par(sys.known_inputs)
+ inp_est = self.add_opt_var(sys.estimated_inputs)
+
+ # perform inner collocation loop
+ opt_vars_inside_inner = [sys.outputs, sys.algebraics]
+ opt_pars_inside_inner = [sys.measured_states]
+
+ constant_over_inner = {
+ sys.known_inputs: inp_known,
+ sys.estimated_inputs: inp_est,
+ sys.estimated_parameters: estimated_pars,
+ sys.known_parameters: known_pars,
+ sys.weights_states: weights,
+ }
+ xk_end = self._collocation_inner_loop(
+ collocation=collocation_matrices,
+ state_at_beginning=x_est_k,
+ states=sys.states,
+ opt_vars=opt_vars_inside_inner,
+ opt_pars=opt_pars_inside_inner,
+ const=constant_over_inner,
+ )
+
+ # increment loop counter and time
+ self.k += 1
+ self.pred_time = start_time + ts * self.k
+
+ # New NLP variable for differential state at end of interval
+ xk = self.add_opt_var(sys.states)
+
+ # Add continuity constraint
+ self.add_constraint(xk_end - xk)
+
+ def _construct_stage_function(self, system: MHESystem):
+ """
+ Combine information from the model and the var_ref to create CasADi
+ functions which describe the system dynamics and constraints at each
+ stage of the optimization problem. Sets the stage function. It has
+ all mpc variables as inputs, sorted by denotation (declared in
+ self.declare_quantities) and outputs ode, cost function and 3 outputs
+ per constraint (constraint, lb_constraint, ub_constraint).
+
+ In the basic case, it has the form:
+ CasadiFunction: ['x', 'z', 'u', 'y', 'd', 'p'] ->
+ ['ode', 'cost_function', 'model_constraints',
+ 'ub_model_constraints', 'lb_model_constraints']
+
+ Args:
+ system
+ """
+ all_system_quantities: dict[str, OptimizationQuantity] = {
+ var.name: var for var in system.quantities
+ }
+ constraints = {"model_constraints": system.model_constraints}
+
+ inputs = [
+ q.full_symbolic
+ for q in all_system_quantities.values()
+ if q.use_in_stage_function
+ ]
+ input_denotations = [
+ q.name
+ for denotation, q in all_system_quantities.items()
+ if q.use_in_stage_function
+ ]
+
+ # aggregate constraints
+ constraints_func = [c.function for c in constraints.values()]
+ constraints_lb = [c.lb for c in constraints.values()]
+ constraints_ub = [c.ub for c in constraints.values()]
+ constraint_denotations = list(constraints.keys())
+ constraint_lb_denotations = [LB_PREFIX + k for k in constraints]
+ constraint_ub_denotations = [UB_PREFIX + k for k in constraints]
+
+ # aggregate outputs
+ outputs = [
+ system.ode,
+ system.cost_function,
+ *constraints_func,
+ *constraints_lb,
+ *constraints_ub,
+ ]
+ output_denotations = [
+ "ode",
+ "cost_function",
+ *constraint_denotations,
+ *constraint_lb_denotations,
+ *constraint_ub_denotations,
+ ]
+
+ # function describing system dynamics and cost function
+ self._stage_function = ca.Function(
+ "f",
+ inputs,
+ outputs,
+ # input handles to make kwarg use possible and to debug
+ input_denotations,
+ # output handles to make kwarg use possible and to debug
+ output_denotations,
+ )
+
+[docs] def initialize(self, system: MHESystem, solver_factory: SolverFactory):
+ """Initializes the trajectory optimization problem, creating all symbolic
+ variables of the OCP, the mapping function and the numerical solver."""
+ self._construct_stage_function(system)
+ super().initialize(system=system, solver_factory=solver_factory)
+
+ def _collocation_inner_loop(
+ self,
+ state_at_beginning: ca.MX,
+ collocation: CollocationMatrices,
+ states: OptimizationVariable,
+ opt_vars: list[OptimizationVariable],
+ opt_pars: list[OptimizationParameter],
+ const: dict[OptimizationQuantity, ca.MX],
+ ) -> ca.MX:
+ """
+ Constructs the inner loop of a collocation discretization. Takes the
+
+ Args
+ collocation: The collocation matrices
+ state_at_beginning: The casadi MX instance representing the state at the
+ beginning of the collocation interval
+ states: The OptimizationVariable representing the states
+ opt_vars: The OptimizationVariables which should be defined at each
+ collocation point
+ par_vars: The OptimizationParameters which should be defined at each
+ collocation point
+ const: Variables or parameters to feed into the system function that are
+ constant over the inner loop. Value is the current MX to be used.
+
+ Returns:
+ state_k_end[MX]: state at the end of collocation interval
+ """
+ constants = {var.name: mx for var, mx in const.items()}
+
+ # remember time at start of collocation loop
+ start_time = self.pred_time
+
+ # shorthands
+ ts = self.options.time_step
+
+ # State variables at collocation points
+ state_collocation = []
+ opt_vars_collocation = []
+ opt_pars_collocation = []
+
+ # add variables at collocation points
+ for j in range(collocation.order): # d is collocation order
+ # set time
+ self.pred_time = start_time + collocation.root[j + 1] * ts
+
+ # differential state
+ state_kj = self.add_opt_var(states, post_den=f"_{j}")
+ state_collocation.append(state_kj)
+
+ opt_vars_collocation.append({})
+ for opt_var in opt_vars:
+ var_kj = self.add_opt_var(opt_var, post_den=f"_{j}")
+ opt_vars_collocation[-1].update({opt_var.name: var_kj})
+
+ opt_pars_collocation.append({})
+ for opt_par in opt_pars:
+ par_kj = self.add_opt_par(opt_par, post_den=f"_{j}")
+ opt_pars_collocation[-1].update({opt_par.name: par_kj})
+
+ # Loop over collocation points
+ state_k_end = collocation.D[0] * state_at_beginning
+ for j in range(1, collocation.order + 1):
+ # Expression for the state derivative at the collocation point
+ xp = collocation.C[0, j] * state_at_beginning
+ for r in range(collocation.order):
+ xp = xp + collocation.C[r + 1, j] * state_collocation[r]
+
+ stage = self._stage_function(
+ **{states.name: state_collocation[j - 1]},
+ **opt_pars_collocation[j - 1],
+ **opt_vars_collocation[j - 1],
+ **constants,
+ )
+
+ self.add_constraint(ts * stage["ode"] - xp)
+
+ # Append inequality constraints
+ self.add_constraint(
+ stage["model_constraints"],
+ lb=stage["lb_model_constraints"],
+ ub=stage["ub_model_constraints"],
+ )
+
+ # Add contribution to the end state
+ state_k_end = state_k_end + collocation.D[j] * state_collocation[j - 1]
+
+ # Add contribution to quadrature function
+ self.objective_function += collocation.B[j] * stage["cost_function"] * ts
+
+ return state_k_end
+
+ def _collocation_polynomial(self) -> CollocationMatrices:
+ """Returns the matrices needed for direct collocation discretization."""
+ # Degree of interpolating polynomial
+ d = self.options.collocation_order
+ polynomial = self.options.collocation_method
+
+ # Get collocation points
+ tau_root = np.append(0, ca.collocation_points(d, polynomial))
+
+ # Coefficients of the collocation equation
+ C = np.zeros((d + 1, d + 1))
+
+ # Coefficients of the continuity equation
+ D = np.zeros(d + 1)
+
+ # Coefficients of the quadrature function
+ B = np.zeros(d + 1)
+
+ # Construct polynomial basis
+ for j in range(d + 1):
+ # Construct Lagrange polynomials to get the polynomial basis at
+ # the collocation point
+ p = np.poly1d([1])
+ for r in range(d + 1):
+ if r != j:
+ p *= np.poly1d([1, -tau_root[r]]) / (tau_root[j] - tau_root[r])
+
+ # Evaluate the polynomial at the final time to get the
+ # coefficients of the continuity equation
+ D[j] = p(1.0)
+
+ # Evaluate the time derivative of the polynomial at all collocation
+ # points to get the coefficients of the continuity equation
+ pder = np.polyder(p)
+ for r in range(d + 1):
+ C[j, r] = pder(tau_root[r])
+
+ # Evaluate the integral of the polynomial to get the coefficients
+ # of the quadrature function
+ pint = np.polyint(p)
+ B[j] = pint(1.0)
+
+ return CollocationMatrices(
+ order=d,
+ root=tau_root,
+ B=B,
+ C=C,
+ D=D,
+ )
+
+
+[docs]class MHEBackend(CasADiBackend):
+ """
+ Class doing optimization of ADMM subproblems with CasADi.
+ """
+
+ system_type = MHESystem
+ discretization_types = {
+ DiscretizationMethod.collocation: DirectCollocation,
+ }
+ system: MHESystem
+
+[docs] @staticmethod
+ def sample(
+ trajectory: Union[float, int, pd.Series, list[Union[float, int]]],
+ grid: Union[list, np.ndarray],
+ current: float = 0,
+ method: str = "linear",
+ ) -> list:
+ """
+ Obtain the specified portion of the trajectory.
+
+ Args:
+ trajectory: The trajectory to be sampled. Scalars will be
+ expanded onto the grid. Lists need to exactly match the provided
+ grid. Otherwise, a list of tuples is accepted with the form (
+ timestamp, value). A dict with the keys 'grid' and 'value' is also
+ accepted.
+ current: start time of requested trajectory
+ grid: target interpolation grid in seconds in relative terms (i.e.
+ starting from 0 usually)
+ method: interpolation method, currently accepted: 'linear',
+ 'spline', 'previous'
+
+ Returns:
+ Sampled list of values.
+
+ Takes a slice of the trajectory from the current time step with the
+ specified length and interpolates it to match the requested sampling.
+ If the requested horizon is longer than the available data, the last
+ available value will be used for the remainder.
+
+ Raises:
+ ValueError
+ TypeError
+ """
+ target_grid_length = len(grid)
+ if isinstance(trajectory, (float, int)):
+ # return constant trajectory for scalars
+ return [trajectory] * target_grid_length
+ if isinstance(trajectory, list):
+ # return lists of matching length without timestamps
+ if len(trajectory) == target_grid_length:
+ return trajectory
+ raise ValueError(
+ f"Passed list with length {len(trajectory)} "
+ f"does not match target ({target_grid_length})."
+ )
+ if isinstance(trajectory, pd.Series):
+ source_grid = np.array(trajectory.index)
+ values = trajectory.values
+ else:
+ raise TypeError(
+ f"Passed trajectory of type '{type(trajectory)}' " f"cannot be sampled."
+ )
+ target_grid = np.array(grid) + current
+
+ # expand scalar values
+ if len(source_grid) == 1:
+ return [trajectory[0]] * target_grid_length
+
+ # skip resampling if grids are (almost) the same
+ if (target_grid.shape == source_grid.shape) and all(target_grid == source_grid):
+ return list(values)
+ values = np.array(values)
+
+ # check requested portion of trajectory, whether the most recent value in the
+ # source grid is older than the first value in the MHE trajectory
+ if target_grid[0] >= source_grid[-1]:
+ # return the last value of the trajectory if requested sample
+ # starts out of range
+ logger.warning(
+ f"Latest value of source grid %s is older than "
+ f"current time (%s. Returning latest value anyway.",
+ source_grid[-1],
+ current,
+ )
+ return [values[-1]] * target_grid_length
+
+ # determine whether the target grid lies within the available source grid, and
+ # how many entries to extrapolate on either side
+ source_grid_oldest_time: float = source_grid[0]
+ source_grid_newest_time: float = source_grid[-1]
+ source_is_recent_enough: np.ndarray = target_grid < source_grid_newest_time
+ source_is_old_enough: np.ndarray = target_grid > source_grid_oldest_time
+ number_of_missing_old_entries: int = target_grid_length - np.count_nonzero(
+ source_is_old_enough
+ )
+ number_of_missing_new_entries: int = target_grid_length - np.count_nonzero(
+ source_is_recent_enough
+ )
+ # shorten target interpolation grid by extra points that go above or below
+ # available data range
+ target_grid = target_grid[source_is_recent_enough * source_is_old_enough]
+
+ # interpolate data to match new grid
+ if method == "linear":
+ tck = interpolate.interp1d(x=source_grid, y=values, kind="linear")
+ sequence_new = list(tck(target_grid))
+ elif method == "spline":
+ raise NotImplementedError(
+ "Spline interpolation is currently not " "supported"
+ )
+ elif method == "previous":
+ tck = interpolate.interp1d(source_grid, values, kind="previous")
+ sequence_new = list(tck(target_grid))
+ else:
+ raise ValueError(
+ f"Chosen 'method' {method} is not a valid method. "
+ f"Currently supported: linear, spline, previous"
+ )
+
+ # extrapolate sequence with last available value if necessary
+ interpolated_trajectory = (
+ [values[0]] * number_of_missing_old_entries
+ + sequence_new
+ + [values[-1]] * number_of_missing_new_entries
+ )
+
+ return interpolated_trajectory
+
+import casadi as ca
+import numpy as np
+
+from agentlib_mpc.data_structures.casadi_utils import DiscretizationMethod
+from agentlib_mpc.data_structures.mpc_datamodels import MINLPVariableReference
+from agentlib_mpc.models.casadi_model import CasadiModel
+from agentlib_mpc.optimization_backends.casadi_.core.VariableGroup import (
+ OptimizationVariable,
+)
+
+from agentlib_mpc.optimization_backends.casadi_ import basic
+from agentlib_mpc.optimization_backends.casadi_.core.casadi_backend import CasADiBackend
+
+# todo: All the names are minlp, but this is actually minlp capable
+
+
+[docs]class CasadiMINLPSystem(basic.BaseSystem):
+ binary_controls: OptimizationVariable
+
+ def __init__(self):
+ super().__init__()
+ self.is_linear = False
+
+[docs] def initialize(self, model: CasadiModel, var_ref: MINLPVariableReference):
+ self.binary_controls = OptimizationVariable.declare(
+ denotation="w",
+ variables=model.get_inputs(var_ref.binary_controls),
+ ref_list=var_ref.binary_controls,
+ assert_complete=True,
+ binary=True,
+ )
+ super().initialize(model=model, var_ref=var_ref)
+ self.is_linear = self._is_minlp()
+
+ def _is_minlp(self) -> bool:
+ inputs = ca.vertcat(*(v.full_symbolic for v in self.variables))
+ parameters = ca.vertcat(
+ *(v.full_symbolic for v in self.parameters if v.use_in_stage_function)
+ )
+ test_params = [1] * ca.vertcat(
+ *(
+ v.add_default_values()[v.name]
+ for v in self.parameters
+ if v.use_in_stage_function
+ )
+ )
+ ode = self.ode
+ constraints = self.model_constraints.function
+ outputs = ca.vertcat(ode, constraints)
+ jac = ca.jacobian(outputs, inputs)
+ print(jac)
+ test_input = [0] * inputs.shape[0]
+ jac_func = ca.Function(
+ "jac_func",
+ [inputs, parameters],
+ [jac],
+ ["inputs", "parameters"],
+ ["jacobian"],
+ )
+ test2 = np.array(test_input) + 0.5
+ return jac_func(test_input, test_params) == jac_func(test2, test_params)
+
+
+[docs]class DirectCollocation(basic.DirectCollocation):
+ def _discretize(self, sys: CasadiMINLPSystem):
+ """
+ Defines a direct collocation discretization.
+ # pylint: disable=invalid-name
+ """
+
+ # setup the polynomial base
+ collocation_matrices = self._collocation_polynomial()
+
+ # shorthands
+ n = self.options.prediction_horizon
+ ts = self.options.time_step
+
+ # Initial State
+ x0 = self.add_opt_par(sys.initial_state)
+ xk = self.add_opt_var(sys.states, lb=x0, ub=x0, guess=x0)
+
+ # Parameters that are constant over the horizon
+ const_par = self.add_opt_par(sys.model_parameters)
+
+ # Formulate the NLP
+ # loop over prediction horizon
+ while self.k < n:
+ # New NLP variable for the control
+ uk = self.add_opt_var(sys.controls)
+ wk = self.add_opt_var(sys.binary_controls)
+
+ # New parameter for inputs
+ dk = self.add_opt_par(sys.non_controlled_inputs)
+
+ # perform inner collocation loop
+ opt_vars_inside_inner = [sys.algebraics, sys.outputs]
+ opt_pars_inside_inner = []
+
+ constant_over_inner = {
+ sys.controls: uk,
+ sys.non_controlled_inputs: dk,
+ sys.model_parameters: const_par,
+ sys.binary_controls: wk,
+ }
+ xk_end, constraints = self._collocation_inner_loop(
+ collocation=collocation_matrices,
+ state_at_beginning=xk,
+ states=sys.states,
+ opt_vars=opt_vars_inside_inner,
+ opt_pars=opt_pars_inside_inner,
+ const=constant_over_inner,
+ )
+
+ # increment loop counter and time
+ self.k += 1
+ self.pred_time = ts * self.k
+
+ # New NLP variable for differential state at end of interval
+ xk = self.add_opt_var(sys.states)
+
+ # Add continuity constraint
+ self.add_constraint(xk - xk_end, gap_closing=True)
+
+ # add collocation constraints later for fatrop
+ for constraint in constraints:
+ self.add_constraint(*constraint)
+
+
+[docs]class CasADiMINLPBackend(CasADiBackend):
+ """
+ Class doing optimization of ADMM subproblems with CasADi.
+ """
+
+ system_type = CasadiMINLPSystem
+ discretization_types = {DiscretizationMethod.collocation: DirectCollocation}
+ system: CasadiMINLPSystem
+
+import warnings
+from ast import literal_eval
+import datetime
+from pathlib import Path
+from typing import NewType, Literal, Union, Optional, Iterable
+
+import pandas as pd
+from pandas.api.types import is_float_dtype
+import numpy as np
+
+from agentlib_mpc.data_structures import mpc_datamodels
+from agentlib_mpc.utils import TimeConversionTypes, TIME_CONVERSION
+
+SimulationTime = NewType("SimulationTime", float)
+
+
+
+
+
+[docs]def load_mpc(file: Union[Path, str]) -> pd.DataFrame:
+ df = pd.read_csv(file, index_col=[0], header=[0, 1])
+ new_ind = [literal_eval(i) for i in df.index]
+ df.index = pd.MultiIndex.from_tuples(new_ind)
+ return df
+
+
+[docs]def load_mpc_stats(results_file: Union[str, Path]) -> Optional[pd.DataFrame]:
+ stats_file = mpc_datamodels.stats_path(results_file)
+ try:
+ df = pd.read_csv(stats_file, index_col=0)
+ except Exception:
+ return None
+ if is_float_dtype(df.index):
+ return df
+ new_ind = [literal_eval(i) for i in df.index]
+ df.index = pd.MultiIndex.from_tuples(new_ind)
+ return df
+
+
+[docs]def load_sim(file: Path, causality=None) -> pd.DataFrame:
+ df = pd.read_csv(file, header=[0, 1, 2], index_col=0)
+ if causality:
+ df = df[causality]
+ return df.droplevel(level=1, axis=1)
+ return df.droplevel(level=2, axis=1).droplevel(level=0, axis=1)
+
+
+[docs]def convert_multi_index(
+ data: pd.DataFrame, convert_to: Union[TimeConversionTypes, Literal["datetime"]]
+):
+ """Converts an index of an MPC or ADMM results Dataframe to a different unit,
+ assuming it is passed in seconds."""
+ # last = data.index.nlevels - 1 # should be 1 for mpc, 2 for admm
+ outer = convert_index(convert_to, data.index.unique(0))
+ return data.set_index(
+ data.index.set_levels(outer, level=0)
+ ) # .set_levels(inner, level=last)
+
+
+[docs]def convert_index(
+ convert_to: Union[TimeConversionTypes, Literal["datetime"]], index: pd.Index
+):
+ """
+ Converts an index from seconds to datetime or another unit
+ Args:
+ convert_to: unit, e.g. minutes, hours, datetime
+ index: pandas index object
+
+ Returns:
+
+ """
+ if convert_to == "datetime":
+ return pd.to_datetime(index.astype(int), unit="s")
+ else:
+ return index / TIME_CONVERSION[convert_to]
+
+
+[docs]def perform_index_update(
+ data: pd.DataFrame, offset: Union[float, Literal["auto"], bool], admm: bool = False
+) -> pd.DataFrame:
+ """Updates the index of a raw mpc/admm result dataframe, to be offset by a desired
+ time value."""
+ if not offset:
+ return data
+ outer_index = data.index.get_level_values(0)
+ if offset == "auto" or offset is True:
+ _index_offset = outer_index[0]
+ else:
+ _index_offset = offset
+ outer_index = outer_index - _index_offset
+
+ if admm:
+ arrays = [
+ outer_index,
+ data.index.get_level_values(1),
+ data.index.get_level_values(2),
+ ]
+ else: # mpc
+ arrays = [outer_index, data.index.get_level_values(1)]
+
+ # set index like this, because set_index() only works for dataframes, not series
+ data_copy = data.copy()
+ data_copy.index = pd.MultiIndex.from_arrays(arrays)
+ return data_copy
+
+
+[docs]def mpc_at_time_step(
+ data: pd.DataFrame,
+ time_step: float,
+ variable=None,
+ variable_type="variable",
+ index_offset: Union[float, Literal["auto"], bool] = True,
+) -> pd.DataFrame:
+ """
+ Gets the results of an optimization at a time step.
+
+ Args:
+ data: The multi-indexed results data frame from the mpc
+ time_step: The time step from which results should be shown.
+ If no exact match, shows closest.
+ variable: If specified, only returns results
+ with regard to a certain variable.
+ variable_type: The type of the variable provided (parameter, variable, lower, ...)
+ index_offset: Determines how the index will be updated when loading the data.
+ The offset will be subtracted from the time-index. This is useful for results
+ of realtime systems, where the time value with be a unix time stamp and we want
+ to cut the number down to something understandable. For example, if the time
+ index (level 0 of the input Dataframe) is [105, 115, 125] and we give an
+ index_offset of 100, the data will be handled as if the index was [5, 15, 25].
+ If "auto" or True is provided as an argument, the index will be modified to
+ start at 0. If 0 or False are provided, no modifications will be made.
+
+ Returns:
+ pd.DataFrame: A single-indexed Dataframe of the optimization results
+ at the specified time step. If variable is not specified,
+ returns all variables with a double column index, if it
+ is specified returns only values and/or bounds with
+ single indexed columns.
+ """
+
+ # get the closest matching (outer) index matching the requested time step
+ data = perform_index_update(data, index_offset, admm=False)
+ outer_index = data.index.get_level_values(0)
+ idx = np.searchsorted(outer_index, time_step, side="left")
+ if idx > 0 and (
+ idx == len(outer_index)
+ or np.fabs(time_step - outer_index[idx - 1])
+ < np.fabs(time_step - outer_index[idx])
+ ):
+ closest = outer_index[idx - 1]
+ else:
+ closest = outer_index[idx]
+
+ # select the data at this index and increment the inner index
+ if variable:
+ data_at_ts = data[variable_type][variable].loc[closest]
+ else:
+ data_at_ts = data.loc[closest]
+ data_at_ts = data_at_ts.copy()
+ data_at_ts.index = data_at_ts.index + closest
+
+ return data_at_ts
+
+
+[docs]def admm_at_time_step(
+ data: Union[pd.DataFrame, pd.Series],
+ time_step: float = None,
+ variable=None,
+ iteration: float = -1,
+ index_offset: Union[float, Literal["auto"], bool] = True,
+ convert_to: TimeConversionTypes = "seconds",
+) -> pd.DataFrame:
+ """
+ Gets the results of an optimization at a time step.
+ Args:
+ index_offset: Determines how the index will be updated when loading the data.
+ The offset will be subtracted from the time-index. This is useful for results
+ of realtime systems, where the time value with be a unix time stamp and we want
+ to cut the number down to something understandable. For example, if the time
+ index (level 0 of the input Dataframe) is [105, 115, 125] and we give an
+ index_offset of 100, the data will be handled as if the index was [5, 15, 25].
+ If "auto" or True is provided as an argument, the index will be modified to
+ start at 0. If 0 or False are provided, no modifications will be made.
+ data: The multi-indexed results data frame from the mpc
+ time_step: The time step from which results should be shown.
+ If no exact match, shows closest.
+ variable: If specified, only returns results
+ with regard to a certain variable.
+ iteration: Specifies, from which inner ADMM iteration data should be
+ from. If negative, counts from last iteration. Default -1.
+ convert_to: Whether the data should be converted to datetime, minutes etc.
+
+
+ Returns:
+ A single-indexed Dataframe of the optimization results
+ at the specified time step. If variable is not specified,
+ returns all variables with a double column index, if it
+ is specified returns only values and/or bounds with
+ single indexed columns.
+ """
+
+ # get the closest matching (outer) index matching the requested time step
+ data = convert_multi_index(data, convert_to=convert_to)
+ if not convert_to == "datetime":
+ data = perform_index_update(data, index_offset, admm=True)
+ outer_index = data.index.get_level_values(0)
+
+ if time_step is None:
+ time_step = 0 if not convert_to == "datetime" else datetime.datetime.now()
+
+ idx = np.searchsorted(outer_index, time_step, side="left")
+ if idx > 0 and (
+ idx == len(outer_index)
+ or np.fabs(time_step - outer_index[idx - 1])
+ < np.fabs(time_step - outer_index[idx])
+ ):
+ closest = outer_index[idx - 1]
+ else:
+ closest = outer_index[idx]
+
+ data_at_ts = data.loc[closest]
+
+ # if iteration provided is negative we count backwards (like list indexing)
+ if iteration < 0:
+ number_of_admm_iterations = data_at_ts.index.get_level_values(0).max()
+ iteration = number_of_admm_iterations + 1 + iteration
+
+ # select the data at this index and increment the inner index
+ if variable:
+ data_at_it = data_at_ts.xs(variable, axis=1, level="variable").loc[iteration]
+ else:
+ data_at_it = data_at_ts.loc[iteration]
+ data_at_it = data_at_it.copy()
+
+ if convert_to == "datetime":
+ index = convert_index(convert_to, data_at_it.index + closest.value // 1e9)
+ else:
+ index = convert_index(convert_to, data_at_it.index) + closest
+ data_at_it.index = index
+ return data_at_it
+
+
+[docs]def get_number_of_iterations(data: pd.DataFrame) -> dict[SimulationTime, int]:
+ """Returns the number of iterations at each time instance of the ADMM simulation."""
+
+ ind_full = data.index
+ ind = ind_full.droplevel(2).drop_duplicates()
+ time_stamps = ind.droplevel(1).drop_duplicates()
+ result = {}
+ for t in time_stamps:
+ _slice = ind.get_loc(t)
+ result[SimulationTime(t)] = len(ind[_slice])
+
+ return result
+
+
+[docs]def get_time_steps(data: pd.DataFrame) -> Iterable[float]:
+ """Returns the time steps at which an MPC step was performed."""
+ return sorted(set(data.index.get_level_values(0)))
+
+
+[docs]def first_vals_at_trajectory_index(data: Union[pd.DataFrame, pd.Series]):
+ """Gets the first values at each time step of a results trajectory."""
+ time_steps = get_time_steps(data)
+ first_vals = pd.Series(
+ {time_step: data.loc[time_step].iloc[0] for time_step in time_steps}
+ )
+ if np.nan in first_vals:
+ warnings.warn(
+ "Nan detected in first values. You may need to select the "
+ "correct column of the DataFrame and drop NaN before."
+ )
+ return first_vals
+
+
+[docs]def last_vals_at_trajectory_index(data: Union[pd.DataFrame, pd.Series]):
+ """Gets the last values at each time step of a results trajectory."""
+ time_steps = get_time_steps(data)
+ # -1 covers for parameters (only one entry) and states (-horizon until 0)
+ last_vals = pd.Series(
+ {time_step: data.at[time_step].iloc[-1] for time_step in time_steps}
+ )
+
+ if np.nan in last_vals:
+ warnings.warn(
+ "Nan detected in first values. You may need to select the "
+ "correct column of the DataFrame and drop NaN before."
+ )
+ return last_vals
+
+"""Modules that defines functions to be used for automatically creating animations of
+ADMM convergence"""
+
+import functools
+from pathlib import Path
+from typing import NewType, Iterable, Union, Callable
+
+import matplotlib.pyplot as plt
+import pandas as pd
+from matplotlib.animation import FuncAnimation
+
+from agentlib_mpc.utils.analysis import (
+ admm_at_time_step,
+ load_admm,
+ get_number_of_iterations,
+)
+from agentlib_mpc.utils.plotting.basic import make_grid, make_fig, Style, Customizer
+
+Label = str
+LinesDict = dict[Label, plt.Line2D]
+Data = dict[Label, pd.DataFrame]
+Init = Callable[[], None]
+Animate = Callable[[Union[int, Iterable]], None]
+
+
+[docs]def make_lines(labels: list[Label], ax: plt.Axes, fig: plt.Figure) -> LinesDict:
+ lines: LinesDict = {}
+
+ for label in labels:
+ lines[label] = ax.plot([], [], lw=2, label=str(label))[0]
+
+ return lines
+
+
+[docs]def init_full(lines: LinesDict, annotation: plt.Annotation, ax: plt.Axes):
+ for line in lines.values():
+ line.set_data([], [])
+
+ ax.legend(list(lines.values()), list(lines))
+ # annotation
+ return tuple(lines.values()) + (annotation,)
+
+
+[docs]def animate_full(
+ i: int, lines: LinesDict, annotation: plt.Annotation, data: Data, time_step: float
+): # Upper plot: Temperatures
+ for label, line in lines.items():
+ data_for_iter = admm_at_time_step(
+ data=data[label],
+ time_step=time_step,
+ iteration=i,
+ ).dropna()
+
+ line.set_data(data_for_iter.index, data_for_iter)
+
+ # annotation
+ annotation.set_text(f"Iteration: {i}")
+
+ print(f"Made Frame {i}")
+
+ return tuple(lines.values()) + (annotation,)
+
+
+[docs]def make_image(
+ data: Data,
+ time_step: float = 0,
+ file_name: str = "",
+ customize: Customizer = None,
+ iteration=-1,
+):
+ labels = list(data) # from data
+
+ with Style() as style:
+ fig, ax = make_fig(style)
+ if customize:
+ fig, ax = customize(fig, ax)
+ lines = make_lines(labels, ax=ax, fig=fig)
+ annotation = ax.annotate(
+ xy=(0.1, 0.1),
+ xytext=(0.5, 1.05),
+ text="Iteration: 0",
+ animated=True,
+ textcoords="axes fraction",
+ xycoords="axes fraction",
+ ha="center",
+ )
+
+ animate = functools.partial(
+ animate_full,
+ annotation=annotation,
+ lines=lines,
+ data=data,
+ time_step=time_step,
+ )
+ init = functools.partial(init_full, annotation=annotation, lines=lines, ax=ax)
+ init()
+ animate(i=iteration)
+ if file_name:
+ fig.savefig(fname=file_name)
+
+
+[docs]def make_animation(
+ data: Data,
+ time_step: float = 0,
+ file_name: str = "",
+ customize: Customizer = None,
+ iteration=-1,
+ interval: int = 300,
+):
+ labels = list(data) # from data
+
+ with Style() as style:
+ fig, ax = make_fig(style)
+ if customize:
+ fig, ax = customize(fig, ax)
+ lines = make_lines(labels, ax=ax, fig=fig)
+ annotation = ax.annotate(
+ xy=(0.1, 0.1),
+ xytext=(0.5, 1.05),
+ text="Iteration: 0",
+ animated=True,
+ textcoords="axes fraction",
+ xycoords="axes fraction",
+ ha="center",
+ )
+
+ animate = functools.partial(
+ animate_full,
+ annotation=annotation,
+ lines=lines,
+ data=data,
+ time_step=time_step,
+ )
+ init = functools.partial(init_full, annotation=annotation, lines=lines, ax=ax)
+
+ # setup_figure()
+ anim = FuncAnimation(
+ fig,
+ animate,
+ init_func=init,
+ frames=iteration,
+ interval=interval,
+ blit=True,
+ repeat_delay=1500,
+ )
+ if not file_name.endswith(".gif"):
+ raise ValueError(
+ f"Target filename needs '.gif' extension. Given filename was {file_name}"
+ )
+ anim.save(file_name, writer="imagemagick")
+
+
+if __name__ == "__main__":
+
+ def customize_fig(fig: plt.Figure, ax: plt.Axes) -> (plt.Figure, plt.Axes):
+ # grids
+ make_grid(ax)
+
+ # auxiliary
+ ax.set_ylim(0, 0.11)
+ ax.set_xlim(0, 3000)
+ ax.legend()
+ # cax.get_legend().remove()
+ ax.set_ylabel("Temperature / $°C$")
+
+ # ticks
+ # xticks = np.arange(mpc_log.index[0], mpc_log.index[-1] + 1, 24 * 3600)
+ # xtickval = [str(i) for i, _ in enumerate(xticks)]
+ # plt.xticks(xticks, xtickval)
+ ax.set_xlabel("Time / h")
+ return fig, ax
+
+ test_dir = Path(
+ r"C:\Users\ses\Dokumente\Vorträge\Freitagsvorträge\2021September\admm"
+ )
+ filename = "first_anim.gif"
+ room = load_admm(Path(test_dir, "admm_opt.csv"))
+ cooler = load_admm(Path(test_dir, "cooler_res.csv"))
+ data_ = {
+ Label("room_T"): room["variable"]["mDot_0"],
+ Label("cooler_T"): cooler["variable"]["mDot"],
+ }
+
+ iter_dict = get_number_of_iterations(room)
+ iters = pd.Series(iter_dict).iloc[0]
+
+ make_animation(
+ file_name=filename,
+ data=data_,
+ customize=customize_fig,
+ time_step=500,
+ iteration=iters,
+ )
+
+import pandas as pd
+
+import agentlib_mpc.data_structures.admm_datatypes as adt
+from agentlib_mpc.utils.analysis import admm_at_time_step
+from agentlib_mpc.utils.plotting.basic import EBCColors
+from agentlib_mpc.utils.plotting.mpc import interpolate_colors
+
+
+[docs]def plot_consensus_shades(
+ results: dict[str, dict[str, pd.DataFrame]],
+ data: dict[str, pd.DataFrame],
+ time_step: float,
+ # series: pd.Series,
+ # ax: plt.Axes,
+ # plot_actual_values: bool = False,
+ # step: bool = False,
+):
+ """
+
+ Args:
+ series:
+
+ Returns:
+
+ """
+ data = {}
+
+ def mean(df: pd.DataFrame, name: str) -> pd.Series:
+ return df["parameter"][adt.MEAN_PREFIX + name]
+
+ def local(df: pd.DataFrame, name: str) -> pd.Series:
+ return df["variable"][adt.LOCAL_PREFIX + name]
+
+ def lmbda(df: pd.DataFrame, name: str) -> pd.Series:
+ return df["parameter"][adt.MULTIPLIER_PREFIX + name]
+
+ room_2 = results["CooledRoom_nn2"]["admm_module"]
+ trajectories = {label: admm_at_time_step(srs) for label, srs in data.items()}
+
+ # check the number of iterations on a random trajectory
+ a_trajectory = next(iter(trajectories.values()))
+ number_of_iterations: int = room_2.index.unique(level=0).shape[0]
+
+ # series = room_2[]
+ number_of_predictions: int = room_2.index.unique(level=0).shape[0]
+
+ # stores the first value of each prediction. In the case of a control_variable,
+ # this will give the optimal control output the mpc determined this step, or in
+ # the case of a state, this will give the measurement it worked with
+ actual_values: dict[float, float] = {}
+
+ for i, (time, prediction) in enumerate(series.groupby(level=0)):
+ prediction: pd.Series = prediction.dropna()
+ actual_values[time] = prediction.iloc[0]
+
+ progress = i / number_of_predictions
+ prediction_color = interpolate_colors(
+ progress=progress,
+ colors=[EBCColors.red, EBCColors.dark_grey, EBCColors.light_grey],
+ )
+ prediction.index = prediction.index.droplevel(0) + time
+ print(prediction)
+ # if not step:
+ # prediction.plot(ax=ax, color=prediction_color)
+ # else:
+ # prediction.plot(ax=ax, color=prediction_color, drawstyle="steps-post")
+
+ # if plot_actual_values:
+ # actual_series = pd.Series(actual_values)
+ # if not step:
+ # actual_series.plot(ax=ax, color="black")
+ # else:
+ # actual_series.plot(ax=ax, color=EBCColors.dark_red, drawstyle="steps-post")
+
+ # last_index = prediction.index[-1]
+ # num_iters = last_index[1]
+
+import os
+import webbrowser
+from pathlib import Path
+from typing import List, Dict, Optional, Literal
+
+from agentlib.core.errors import OptionalDependencyError
+import pandas as pd
+
+
+from agentlib_mpc.utils.analysis import load_mpc
+from agentlib_mpc.utils.plotting.admm_residuals import load_residuals
+from agentlib_mpc.utils.plotting.interactive import get_port
+
+try:
+ import dash
+ from dash import html, dcc
+ from dash.dependencies import Input, Output, State
+ import plotly.graph_objects as go
+ import dash_daq as daq
+except ImportError as e:
+ raise OptionalDependencyError(
+ dependency_name="interactive",
+ dependency_install="plotly, dash",
+ used_object="interactive",
+ ) from e
+
+
+[docs]def load_agent_data(directory: str) -> Dict[str, pd.DataFrame]:
+ """
+ Load MPC data for multiple agents from files containing 'admm' in their name.
+
+ Args:
+ directory (str): Directory path containing the data files.
+
+ Returns:
+ Dict[str, pd.DataFrame]: Dictionary with agent names as keys and their data as values.
+ """
+ agent_data = {}
+ for filename in os.listdir(directory):
+ if (
+ "admm" in filename.casefold()
+ and filename.endswith(".csv")
+ and not "stats" in filename.casefold()
+ ):
+ file_path = os.path.join(directory, filename)
+ agent_name = f"Agent_{len(agent_data) + 1}"
+ try:
+ agent_data[agent_name] = load_mpc(file_path)
+ except Exception as e:
+ print(f"Error loading file {filename}: {str(e)}")
+ return agent_data
+
+
+[docs]def get_coupling_variables(df: pd.DataFrame) -> List[str]:
+ """
+ Identify coupling variables in the dataframe.
+
+ Args:
+ df (pd.DataFrame): The MPC data for an agent.
+
+ Returns:
+ List[str]: List of coupling variable names.
+ """
+ coupling_vars = []
+ for col in df.columns:
+ if col[0] == "parameter" and col[1].startswith("admm_coupling_mean_"):
+ var_name = col[1].replace("admm_coupling_mean_", "")
+ if ("variable", var_name) in df.columns:
+ coupling_vars.append(var_name)
+ return coupling_vars
+
+
+[docs]def get_data_for_plot(
+ agent_data: Dict[str, pd.DataFrame],
+ time_step: float,
+ iteration: int,
+ coupling_var: str,
+) -> Dict[str, List[float]]:
+ """
+ Extract data for the coupling variable plot.
+
+ Args:
+ agent_data (Dict[str, pd.DataFrame]): Dictionary containing data for each agent.
+ time_step (float): Selected time step.
+ iteration (int): Selected iteration number.
+ coupling_var (str): Name of the selected coupling variable.
+
+ Returns:
+ Dict[str, List[float]]: Dictionary with agent names as keys and their values as lists.
+ """
+ plot_data = {}
+ prediction_grid = None
+
+ for agent_name, df in agent_data.items():
+ try:
+ agent_data_at_step = df.loc[(time_step, iteration)]
+ agent_values = agent_data_at_step[("variable", coupling_var)].values
+ plot_data[agent_name] = agent_values.tolist()
+
+ if prediction_grid is None:
+ prediction_grid = agent_data_at_step.index.tolist()
+
+ # Get mean value (assuming it's the same for all agents)
+ if "Mean" not in plot_data:
+ mean_values = agent_data_at_step[
+ ("parameter", f"admm_coupling_mean_{coupling_var}")
+ ].values
+ plot_data["Mean"] = mean_values.tolist()
+ except KeyError:
+ continue # Skip this agent if data is not available for the selected time step and iteration
+
+ return plot_data, prediction_grid
+
+
+[docs]def create_coupling_var_plot(
+ plot_data: Dict[str, List[float]], prediction_grid: List[float], coupling_var: str
+) -> go.Figure:
+ """
+ Create a plotly figure for the coupling variable plot.
+
+ Args:
+ plot_data (Dict[str, List[float]]): Dictionary with agent names as keys and their values as lists.
+ prediction_grid (List[float]): List of prediction grid values.
+ coupling_var (str): Name of the coupling variable.
+
+ Returns:
+ go.Figure: Plotly figure object.
+ """
+ fig = go.Figure()
+
+ for agent_name, values in plot_data.items():
+ if agent_name == "Mean":
+ line_style = dict(color="red", dash="dash", width=2)
+ else:
+ line_style = dict(width=1)
+
+ fig.add_trace(
+ go.Scatter(
+ x=prediction_grid,
+ y=values,
+ mode="lines",
+ name=agent_name,
+ line=line_style,
+ )
+ )
+
+ fig.update_layout(
+ title=f"Coupling Variable: {coupling_var}",
+ xaxis_title="Prediction Grid",
+ yaxis_title="Value",
+ legend_title="Legend",
+ legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01),
+ )
+
+ return fig
+
+
+[docs]def get_max_iterations_per_timestep(
+ agent_data: Dict[str, pd.DataFrame],
+) -> Dict[float, int]:
+ max_iterations = {}
+ for df in agent_data.values():
+ for time_step in df.index.get_level_values(0).unique():
+ iterations = df.loc[time_step].index.get_level_values(0).max()
+ if (
+ time_step not in max_iterations
+ or iterations > max_iterations[time_step]
+ ):
+ max_iterations[time_step] = iterations
+ return max_iterations
+
+
+[docs]def create_residuals_plot(residuals_df: pd.DataFrame, time_step: float) -> go.Figure:
+ """
+ Create a plotly figure for the residuals plot.
+
+ Args:
+ residuals_df (pd.DataFrame): DataFrame containing residuals data.
+ time_step (float): Selected time step.
+
+ Returns:
+ go.Figure: Plotly figure object.
+ """
+ fig = go.Figure()
+
+ residuals_data = residuals_df.loc[time_step]
+
+ if len(residuals_data) == 1: # Only one iteration (iteration = 0)
+ primal_residual = residuals_data["primal_residual"].iloc[0]
+ dual_residual = residuals_data["dual_residual"].iloc[0]
+
+ fig.add_trace(
+ go.Scatter(
+ x=[0, 1],
+ y=[primal_residual, primal_residual],
+ mode="lines",
+ name="Primal Residual",
+ )
+ )
+
+ fig.add_trace(
+ go.Scatter(
+ x=[0, 1],
+ y=[dual_residual, dual_residual],
+ mode="lines",
+ name="Dual Residual",
+ )
+ )
+
+ fig.update_layout(
+ xaxis_range=[0, 1],
+ xaxis_title="Iteration",
+ )
+ else:
+ fig.add_trace(
+ go.Scatter(
+ x=residuals_data.index,
+ y=residuals_data["primal_residual"],
+ mode="lines",
+ name="Primal Residual",
+ )
+ )
+
+ fig.add_trace(
+ go.Scatter(
+ x=residuals_data.index,
+ y=residuals_data["dual_residual"],
+ mode="lines",
+ name="Dual Residual",
+ )
+ )
+
+ fig.update_layout(
+ xaxis_title="Iteration",
+ )
+
+ fig.update_layout(
+ title="Primal and Dual Residuals",
+ yaxis_title="Residual Value",
+ yaxis_type="log",
+ yaxis=dict(
+ tickformat=".2e", # Use scientific notation with 2 decimal places
+ exponentformat="e", # Use "e" notation for exponents
+ ),
+ legend_title="Legend",
+ )
+
+ return fig
+
+
+[docs]def create_app(agent_data: Dict[str, pd.DataFrame], residuals_df: pd.DataFrame):
+ """
+ Create and configure the Dash app.
+
+ Args:
+ agent_data (Dict[str, pd.DataFrame]): Dictionary containing data for each agent.
+ residuals_df (pd.DataFrame): DataFrame containing residuals data.
+
+ Returns:
+ dash.Dash: Configured Dash app.
+ """
+ app = dash.Dash(__name__)
+
+ # Get time steps and iteration numbers
+ first_agent_data = next(iter(agent_data.values()))
+ time_steps = sorted(first_agent_data.index.get_level_values(0).unique())
+ max_iterations_per_timestep = get_max_iterations_per_timestep(agent_data)
+ overall_max_iterations = max(max_iterations_per_timestep.values())
+
+ # Get coupling variables
+ coupling_vars = get_coupling_variables(first_agent_data)
+
+ app.layout = html.Div(
+ [
+ html.H1("Distributed MPC with ADMM Dashboard"),
+ # time step slider
+ html.Div(
+ [
+ html.Label("Time Step"),
+ html.Div(
+ [
+ html.Div(
+ [
+ dcc.Slider(
+ id="time-step-slider",
+ min=0,
+ max=len(time_steps) - 1,
+ value=0,
+ marks={
+ i: f"{time_steps[i]:.2f}"
+ for i in range(
+ 0,
+ len(time_steps),
+ max(1, len(time_steps) // 10),
+ )
+ },
+ step=1,
+ )
+ ],
+ style={
+ "width": "80%",
+ "display": "inline-block",
+ "verticalAlign": "middle",
+ },
+ ),
+ html.Div(
+ [
+ html.Button(
+ "◀",
+ id="prev-time-step",
+ n_clicks=0,
+ style={"marginRight": "5px"},
+ ),
+ html.Button(
+ "▶",
+ id="next-time-step",
+ n_clicks=0,
+ style={"marginRight": "5px"},
+ ),
+ html.Div(
+ id="time-step-display",
+ style={
+ "display": "inline-block",
+ "marginRight": "10px",
+ },
+ ),
+ daq.NumericInput(
+ id="time-step-input",
+ min=0,
+ max=len(time_steps) - 1,
+ value=0,
+ size=60,
+ ),
+ ],
+ style={
+ "width": "20%",
+ "display": "inline-block",
+ "verticalAlign": "middle",
+ "textAlign": "right",
+ },
+ ),
+ ]
+ ),
+ ]
+ ),
+ # iteration slide
+ html.Div(
+ [
+ html.Label("Iteration Number"),
+ html.Div(
+ [
+ html.Div(
+ [
+ dcc.Slider(
+ id="iteration-slider",
+ min=0,
+ max=overall_max_iterations,
+ value=0,
+ marks={
+ i: str(i)
+ for i in range(
+ 0,
+ overall_max_iterations + 1,
+ max(1, overall_max_iterations // 10),
+ )
+ },
+ step=1,
+ )
+ ],
+ style={
+ "width": "80%",
+ "display": "inline-block",
+ "verticalAlign": "middle",
+ },
+ ),
+ html.Div(
+ [
+ daq.NumericInput(
+ id="iteration-input",
+ min=0,
+ max=overall_max_iterations,
+ value=0,
+ size=60,
+ )
+ ],
+ style={
+ "width": "20%",
+ "display": "inline-block",
+ "verticalAlign": "middle",
+ "textAlign": "right",
+ },
+ ),
+ ]
+ ),
+ ]
+ ),
+ html.Div(
+ [
+ html.Label("Coupling Variable"),
+ dcc.Dropdown(
+ id="coupling-var-dropdown",
+ options=[{"label": var, "value": var} for var in coupling_vars],
+ value=coupling_vars[0] if coupling_vars else None,
+ ),
+ ]
+ ),
+ dcc.Graph(id="coupling-var-plot"),
+ dcc.Graph(id="residuals-plot"),
+ dcc.Store(id="y-axis-range"),
+ ]
+ )
+
+ @app.callback(
+ [
+ Output("time-step-input", "value"),
+ Output("time-step-display", "children"),
+ Output("time-step-slider", "value"),
+ ],
+ [
+ Input("time-step-slider", "value"),
+ Input("prev-time-step", "n_clicks"),
+ Input("next-time-step", "n_clicks"),
+ Input("time-step-input", "value"),
+ ],
+ [State("time-step-input", "value")],
+ )
+ def update_time_step(
+ slider_value, prev_clicks, next_clicks, input_value, current_value
+ ):
+ ctx = dash.callback_context
+ if not ctx.triggered:
+ return (
+ current_value,
+ f"Current time step: {time_steps[current_value]:.2f}",
+ current_value,
+ )
+
+ input_id = ctx.triggered[0]["prop_id"].split(".")[0]
+
+ if input_id == "time-step-slider":
+ new_value = slider_value
+ elif input_id == "prev-time-step":
+ new_value = max(0, current_value - 1)
+ elif input_id == "next-time-step":
+ new_value = min(len(time_steps) - 1, current_value + 1)
+ elif input_id == "time-step-input":
+ new_value = input_value
+ else:
+ new_value = current_value
+
+ return new_value, f"Current time step: {time_steps[new_value]:.2f}", new_value
+
+ @app.callback(
+ [
+ Output("iteration-slider", "max"),
+ Output("iteration-slider", "marks"),
+ Output("iteration-input", "max"),
+ ],
+ [Input("time-step-input", "value")],
+ )
+ def update_iteration_range(time_step_index):
+ time_step = time_steps[time_step_index]
+ max_iter = max_iterations_per_timestep[time_step]
+ marks = {i: str(i) for i in range(0, max_iter + 1, max(1, max_iter // 10))}
+ return max_iter, marks, max_iter
+
+ @app.callback(
+ Output("coupling-var-plot", "figure"),
+ [
+ Input("time-step-input", "value"),
+ Input("iteration-input", "value"),
+ Input("coupling-var-dropdown", "value"),
+ Input("y-axis-range", "data"),
+ ],
+ )
+ def update_coupling_var_plot(time_step_index, iteration, coupling_var, y_range):
+ if coupling_var is None:
+ return go.Figure()
+
+ time_step = time_steps[time_step_index]
+ max_iter = max_iterations_per_timestep[time_step]
+ iteration = min(iteration, max_iter)
+
+ plot_data, prediction_grid = get_data_for_plot(
+ agent_data, time_step, iteration, coupling_var
+ )
+ fig = create_coupling_var_plot(plot_data, prediction_grid, coupling_var)
+
+ if y_range is not None:
+ fig.update_layout(yaxis_range=y_range)
+
+ return fig
+
+ @app.callback(
+ Output("y-axis-range", "data"),
+ [Input("time-step-input", "value"), Input("coupling-var-dropdown", "value")],
+ )
+ def compute_y_axis_range(time_step_index, coupling_var):
+ if coupling_var is None:
+ return None
+
+ time_step = time_steps[time_step_index]
+
+ max_vals = []
+ min_vals = []
+ for agent, data in agent_data.items():
+ try:
+ step_data = data[("variable", coupling_var)][time_step]
+ except KeyError:
+ continue
+ max_vals.append(step_data.max())
+ min_vals.append(step_data.min())
+
+ y_min = min(min_vals)
+ y_max = max(max_vals)
+
+ y_range = [y_min - 0.1 * (y_max - y_min), y_max + 0.1 * (y_max - y_min)]
+ return y_range
+
+ @app.callback(
+ Output("residuals-plot", "figure"),
+ [Input("time-step-input", "value")],
+ )
+ def update_residuals_plot(time_step_index):
+ time_step = time_steps[time_step_index]
+
+ # Check if residuals data exists for this time step
+ if time_step not in residuals_df.index:
+ # If no data, return an empty figure with a message
+ fig = go.Figure()
+ fig.add_annotation(
+ text="No residuals data available for this time step",
+ xref="paper",
+ yref="paper",
+ x=0.5,
+ y=0.5,
+ showarrow=False,
+ )
+ return fig
+
+ return create_residuals_plot(residuals_df, time_step)
+
+ return app
+
+
+[docs]def main():
+ # Specify the directory containing the data files
+ data_directory = Path(r"D:\repos\juelich_mpc\juelich_mpc\mpc\simple_model\results")
+
+ # Load agent data
+ agent_data = load_agent_data(data_directory)
+ agent_data["heating"] = load_mpc(Path(data_directory, "heating_agent_res.csv"))
+ # "room_1": load_mpc(Path(data_directory, "room_1_admm.csv")),
+ # "room_2": load_mpc(Path(data_directory, "room_2_admm.csv")),
+ # "room_3": load_mpc(Path(data_directory, "room_3_admm.csv")),
+ # "heating": load_mpc(Path(data_directory, "heating_agent_res.csv")),
+
+ # Load residuals data
+ residuals_file = os.path.join(
+ data_directory, "residuals.csv"
+ ) # Adjust the filename as needed
+ residuals_df = load_residuals(residuals_file)
+
+ # Create and run the app
+ app = create_app(agent_data, residuals_df)
+ port = get_port()
+
+ webbrowser.open_new_tab(f"http://localhost:{port}")
+ app.run_server(debug=False, port=port)
+
+
+[docs]def show_admm_dashboard(
+ data: dict[str, pd.DataFrame],
+ residuals: Optional[pd.DataFrame] = None,
+ scale: Literal["seconds", "minutes", "hours", "days"] = "seconds",
+):
+ app = create_app(data, residuals)
+ port = get_port()
+
+ webbrowser.open_new_tab(f"http://localhost:{port}")
+ app.run_server(debug=False, port=port)
+
+
+if __name__ == "__main__":
+ main()
+
+from ast import literal_eval
+from pathlib import Path
+from typing import Union
+
+import matplotlib.pyplot as plt
+import pandas as pd
+
+from agentlib_mpc.utils.plotting.basic import Style, make_fig, make_grid, EBCColors
+
+
+[docs]def load_residuals(file: Union[str, Path]) -> pd.DataFrame:
+ """Loads a residuals csv file in the correct format."""
+ df = pd.read_csv(file, index_col=0)
+ new_ind = [literal_eval(i) for i in df.index]
+ df.index = pd.MultiIndex.from_tuples(new_ind)
+ return df
+
+
+[docs]def plot_single_time_step(
+ residuals: pd.DataFrame,
+ time_step: float = None,
+ primal_tol: float = None,
+ dual_tol: float = None,
+) -> (plt.Figure, plt.Axes):
+ """Plots the decrease of the residuals over iterations for a time step"""
+
+ if time_step is None:
+ residuals_time = residuals.index.get_level_values(0)[0]
+ first_opt = residuals.loc[residuals_time]
+ else:
+ first_opt = residuals.loc[time_step]
+
+ with Style() as style:
+ fig, ax = make_fig(style)
+ make_grid(ax)
+ ax.set_ylabel("Residuals")
+
+ first_opt["primal_residual"].plot(
+ ax=ax, label="$r^k$", color=EBCColors.blue, linewidth=0.7
+ )
+ first_opt["dual_residual"].plot(
+ ax=ax, label="$s^k$", color=EBCColors.red, linewidth=0.7
+ )
+ ax.set_yscale("log")
+
+ if primal_tol:
+ ax.axhline(
+ primal_tol,
+ label="$r_0$",
+ color=EBCColors.blue,
+ linewidth=0.7,
+ linestyle="--",
+ )
+ if dual_tol:
+ ax.axhline(
+ dual_tol,
+ label="$s_0$",
+ color=EBCColors.red,
+ linewidth=0.7,
+ linestyle="--",
+ )
+ ax.legend(
+ loc="center left", bbox_to_anchor=(1, 0.5), frameon=False, handlelength=1
+ )
+ return fig, ax
+
+
+[docs]def plot_over_time(
+ residuals: pd.DataFrame,
+ primal_tol: float = None,
+ dual_tol: float = None,
+ max_iters: int = None,
+) -> (plt.Figure, (plt.Axes, plt.Axes)):
+ """Plots the final residuals over time."""
+ res_over_time = residuals_over_time(residuals)
+
+ with Style() as style:
+ fig, (ax_res, ax_iter) = plt.subplots(2, 1)
+ ax_res: plt.Axes
+ ax_iter: plt.Axes
+ ax_res.tick_params(
+ axis="both",
+ which="major",
+ labelsize=style.font_dict["fontsize"],
+ left=False,
+ )
+ ax_iter.tick_params(
+ axis="both",
+ which="major",
+ labelsize=style.font_dict["fontsize"],
+ left=False,
+ )
+ make_grid(ax_res)
+ make_grid(ax_iter)
+
+ res_over_time["primal_residual"].plot(
+ ax=ax_res, label="$r_t$", color=EBCColors.blue, linewidth=0.7
+ )
+ res_over_time["dual_residual"].plot(
+ ax=ax_res, label="$s_t$", color=EBCColors.red, linewidth=0.7
+ )
+ if primal_tol:
+ ax_res.axhline(
+ primal_tol,
+ label="$r_0$",
+ color=EBCColors.blue,
+ linewidth=0.7,
+ linestyle="--",
+ )
+ if dual_tol:
+ ax_res.axhline(
+ dual_tol,
+ label="$s_0$",
+ color=EBCColors.red,
+ linewidth=0.7,
+ linestyle="--",
+ )
+ ax_res.set_ylabel("Residuals")
+ ax_res.legend(
+ loc="center left", bbox_to_anchor=(1, 0.5), frameon=False, handlelength=1
+ )
+ # ax_res.set_yscale("log")
+
+ ax_iter.set_ylabel("Iterations")
+ res_over_time["iters"].plot(
+ ax=ax_iter, label="iterations", color="black", linewidth=0.7
+ )
+ if max_iters:
+ ax_iter.axhline(
+ max_iters,
+ label="Iteration limit",
+ color="black",
+ linewidth=0.7,
+ linestyle="--",
+ )
+ ax_iter.legend(
+ loc="center left", bbox_to_anchor=(1, 0.5), frameon=False, handlelength=1
+ )
+ return fig, (ax_res, ax_iter)
+
+
+[docs]def residuals_over_time(residuals: pd.DataFrame) -> pd.DataFrame:
+ """Evaluates the residuals over time. Takes a raw residuals DataFrame and returns a
+ Dataframe, which has for each time step the number of iterations and the final primal and dual residuals.
+
+ Returns:
+ DataFrame with float index (time in seconds) and the columns
+ ("primal_residual", "dual_residual", "iters")
+ """
+ time_vals = set(residuals.index.get_level_values(0))
+ iters = {t: residuals.loc[t].shape[0] for t in time_vals}
+ prim_res = {t: residuals.loc[t].iloc[-1]["primal_residual"] for t in time_vals}
+ dual_res = {t: residuals.loc[t].iloc[-1]["dual_residual"] for t in time_vals}
+
+ df = pd.DataFrame(
+ {"primal_residual": prim_res, "dual_residual": dual_res, "iters": iters}
+ ).sort_index()
+ return df
+
+"""Some basic plotting utilities"""
+
+import logging
+import typing
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Tuple, Callable, TypedDict, Annotated
+
+import matplotlib
+from matplotlib import pyplot as plt
+from matplotlib.ticker import AutoMinorLocator
+
+
+logger = logging.getLogger(__name__)
+
+
+
+
+
+Float0to1 = Annotated[float, ValueRange(0.0, 1.0)]
+ColorTuple = tuple[Float0to1, Float0to1, Float0to1]
+
+
+[docs]class EBCColors:
+ dark_red: ColorTuple = (172 / 255, 43 / 255, 28 / 255)
+ red: ColorTuple = (221 / 255, 64 / 255, 45 / 255)
+ light_red: ColorTuple = (235 / 255, 140 / 255, 129 / 255)
+ green: ColorTuple = (112 / 255, 173 / 255, 71 / 255)
+ light_grey: ColorTuple = (217 / 255, 217 / 255, 217 / 255)
+ grey: ColorTuple = (157 / 255, 158 / 255, 160 / 255)
+ dark_grey: ColorTuple = (78 / 255, 79 / 255, 80 / 255)
+ light_blue: ColorTuple = (157 / 255, 195 / 255, 230 / 255)
+ blue: ColorTuple = (0 / 255, 84 / 255, 159 / 255)
+ ebc_palette_sort_1: list[ColorTuple] = [
+ dark_red,
+ red,
+ light_red,
+ dark_grey,
+ grey,
+ light_grey,
+ blue,
+ light_blue,
+ green,
+ ]
+ ebc_palette_sort_2: list[ColorTuple] = [
+ red,
+ blue,
+ grey,
+ green,
+ dark_red,
+ dark_grey,
+ light_red,
+ light_blue,
+ light_grey,
+ ]
+
+
+
+
+
+[docs]class Style:
+ def __init__(self, use_tex: bool = False):
+ self.font_dict: FontDict = {"fontsize": 11}
+ self.use_tex = use_tex
+
+ def __enter__(self):
+ try:
+ style_path = Path(Path(__file__).parent, "ebc.paper.mplstyle")
+ plt.style.use(style_path)
+ except OSError:
+ logger.warning("Style Sheet could not be loaded, using default style.")
+ if self.use_tex:
+ matplotlib.rc("text", usetex=True)
+ matplotlib.rcParams["text.latex.preamble"] = r"\usepackage{amsmath}"
+ # matplotlib.rcParams.update({
+ # "font.family": 'serif',
+ # 'font.serif': 'Times',
+ # })
+ #
+ # fontP = FontProperties().set_size('xx-small')
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ matplotlib.rcParams.update(matplotlib.rcParamsDefault)
+
+
+Customizer = Callable[[plt.Figure, plt.Axes, Style], Tuple[plt.Figure, plt.Axes]]
+MultiCustomizer = Callable[
+ [plt.Figure, tuple[plt.Axes], Style], Tuple[plt.Figure, tuple[plt.Axes]]
+]
+
+
+@typing.overload
+def make_fig(
+ style: Style, customizer: Customizer = None, rows: int = 1
+) -> tuple[plt.Figure, tuple[plt.Axes, ...]]: ...
+
+
+@typing.overload
+def make_fig(
+ style: Style, customizer: MultiCustomizer = None
+) -> tuple[plt.Figure, plt.Axes]: ...
+
+
+@typing.overload
+def make_fig(style: Style) -> tuple[plt.Figure, plt.Axes]: ...
+
+
+[docs]def make_fig(
+ style: Style, customizer: Customizer = None, rows=None
+) -> Tuple[plt.Figure, tuple[plt.Axes]]:
+ """Creates a figure and axes with an amount of rows. If rows is specified, return
+ a tuple of axes, else only an ax"""
+ if rows is None:
+ _rows = 1
+ else:
+ _rows = rows
+ fig, all_ax = plt.subplots(_rows, 1, sharex=True)
+
+ if rows is None:
+ # if rows was not specified, return a single axes object
+ ax = all_ax
+ ax.tick_params(
+ axis="both",
+ which="major",
+ labelsize=style.font_dict["fontsize"],
+ left=False,
+ )
+ if customizer:
+ customizer(fig, all_ax, style)
+ return fig, all_ax
+
+ # if rows was specified, return a tuple
+ if rows == 1:
+ all_ax = (all_ax,)
+
+ for ax in all_ax:
+ ax.tick_params(
+ axis="both",
+ which="major",
+ labelsize=style.font_dict["fontsize"],
+ left=False,
+ )
+ if customizer:
+ customizer(fig, all_ax, style)
+ return fig, all_ax
+
+
+[docs]def make_grid(ax: plt.Axes):
+ ax.xaxis.set_minor_locator(AutoMinorLocator())
+ ax.yaxis.set_minor_locator(AutoMinorLocator())
+ ax.grid(
+ which="major",
+ axis="both",
+ linestyle="--",
+ linewidth=0.5,
+ color="black",
+ zorder=0,
+ )
+ ax.grid(
+ which="minor", axis="both", linestyle="--", linewidth=0.5, color="0.7", zorder=0
+ )
+
+
+[docs]def make_side_legend(ax: plt.Axes, fig: plt.Figure = None, right_position: float = 1):
+ ax.legend(loc="center left", bbox_to_anchor=(1, 0.5), frameon=False, handlelength=1)
+ if fig and right_position > 0:
+ fig.subplots_adjust(right=right_position)
+
+from pathlib import Path
+
+import matplotlib.pyplot as plt
+import pandas as pd
+
+from agentlib_mpc.utils.analysis import load_admm, admm_at_time_step
+from agentlib_mpc.utils.plotting import basic
+from agentlib_mpc.utils.plotting.basic import Customizer, Style, EBCColors
+
+
+[docs]def spy_structure(df: pd.DataFrame, customizer: Customizer = None, file: Path = ""):
+ with basic.Style() as style:
+ style.font_dict["fontsize"] = 12
+ fig, ax = basic.make_fig(style, customizer=customizer)
+
+ columns = df.columns
+ index = df.index
+ ax.spy(
+ df.notnull(),
+ markersize=10,
+ markerfacecolor=EBCColors.red,
+ markeredgecolor=EBCColors.red,
+ )
+ ax.set_xticklabels(columns, fontsize=style.font_dict["fontsize"])
+ ax.set_xticks([i for i, _ in enumerate(columns)])
+ ax.set_yticklabels(
+ [f"{int(time)} s" for i, time in enumerate(index)],
+ fontsize=style.font_dict["fontsize"],
+ )
+ ax.set_yticks([i for i, _ in enumerate(index)])
+
+ if file:
+ fig.savefig(file)
+ else:
+ fig.show()
+
+
+if __name__ == "__main__":
+
+ def customize(fig: plt.Figure, ax: plt.Axes, style: Style):
+ cm = 1 / 2.54
+ fig.set_size_inches(8 * cm, 9 * cm)
+ return fig, ax
+
+ base_path = Path(
+ r"C:\Users\ses\Dokumente\Konferenzen\MED2022\Figures\Simulations"
+ r"\iteration analysis\tracking_constantrho1"
+ )
+
+ cons1_path = Path(base_path, "admm_consumer1.csv")
+ cons2_path = Path(base_path, "admm_consumer2.csv")
+ prod_path = Path(base_path, "admm_prod_opt.csv")
+
+ cons1 = load_admm(cons1_path)
+ room_0_0 = admm_at_time_step(cons1, time_step=0, iteration=0)
+ room_0_0_vars = room_0_0["variable"].iloc[:8]
+ # room_0_0_vars = room_0_0_vars[['room_T', 'wall_T', 'room_T_slack', 'Heat_flow_in', 'heating_T']]
+ room_0_0_vars.drop("room_T_slack", axis=1, inplace=True)
+ column_labels = ["$T_z$", "$T_w$", r"$\dot{Q}_{in}$", "$T_h$"]
+ room_0_0_vars.columns = column_labels
+ spy_structure(
+ room_0_0_vars, customizer=customize, file=Path("cons_structure_short.png")
+ )
+
+ def customize(fig: plt.Figure, ax: plt.Axes, style: Style):
+ cm = 1 / 2.54
+ fig.set_size_inches(8 * cm, 9 * cm)
+ ax.yaxis.tick_right()
+ return fig, ax
+
+ prod = load_admm(prod_path)
+ prod_0_0 = admm_at_time_step(prod, time_step=0, iteration=0)
+ prod_0_0_vars = prod_0_0["variable"].iloc[:8]
+ prod_0_0_vars = prod_0_0_vars[["heating_1_T", "Heat_flow_out_1", "heating_1_speed"]]
+ column_labels = ["$T_h$", r"$\dot{Q}_{in}$", "$u_1$"]
+ prod_0_0_vars.columns = column_labels
+ spy_structure(
+ prod_0_0_vars, customizer=customize, file=Path("prod_structure_short.png")
+ )
+
+import pandas as pd
+from typing import Literal, Optional
+
+import socket
+import webbrowser
+
+from agentlib.core.errors import OptionalDependencyError
+
+from agentlib_mpc.utils import TIME_CONVERSION
+from agentlib_mpc.utils.analysis import load_mpc
+from agentlib_mpc.utils.plotting.basic import EBCColors
+from agentlib_mpc.utils.plotting.mpc import interpolate_colors
+
+try:
+ import dash
+ from dash import html, dcc
+ from dash.dependencies import Input, Output, State
+ import plotly.graph_objects as go
+except ImportError as e:
+ raise OptionalDependencyError(
+ dependency_name="interactive",
+ dependency_install="plotly, dash",
+ used_object="interactive",
+ ) from e
+
+
+[docs]def make_figure_plotly() -> go.Figure:
+ fig = go.Figure()
+ fig.update_layout(
+ title="Interactive Plot Example",
+ showlegend=True,
+ updatemenus=[
+ dict(
+ type="buttons",
+ direction="left",
+ buttons=list(
+ [
+ dict(args=["type", "scatter"], label="Line", method="restyle"),
+ # dict(
+ # args=["type", "bar"],
+ # label="Bar",
+ # method="restyle"
+ # ),
+ ]
+ ),
+ ),
+ ],
+ )
+
+ # This function toggles the visibility of a trace (line) in the plot
+ def toggle_traces(trace, points, selector):
+ if len(points.trace_indexes) == 0: # No point has been clicked, do nothing
+ return
+ trace_index = points.trace_indexes[0] # Get the index of the clicked trace
+ fig.data[trace_index].visible = not fig.data[
+ trace_index
+ ].visible # Toggle visibility
+
+ # Add Click event handler to traces
+ fig.for_each_trace(lambda trace: trace.on_click(toggle_traces))
+ return fig
+
+
+[docs]def plot_mpc_plotly(
+ series: pd.Series,
+ step: bool = False,
+ convert_to: Literal["seconds", "minutes", "hours", "days"] = "seconds",
+ y_axis_label: str = "",
+) -> go.Figure:
+ """
+ Args:
+ title:
+ y_axis_label:
+ series: A column of the MPC results Dataframe
+ plot_actual_values: whether the closed loop actual values at the start of each
+ optimization should be plotted (default True)
+ plot_predictions: whether all predicted trajectories should be plotted
+ step: whether to use a step plot or a line plot
+ convert_to: Will convert the index of the returned series to the specified unit
+ (seconds, minutes, hours, days)
+
+ Returns:
+ Figure
+ """
+ fig = go.Figure()
+ number_of_predictions: int = series.index.unique(level=0).shape[0]
+
+ # stores the first value of each prediction
+ actual_values: dict[float, float] = {}
+
+ for i, (time_seconds, prediction) in enumerate(series.groupby(level=0)):
+ prediction: pd.Series = prediction.dropna().droplevel(0)
+
+ time_converted = time_seconds / TIME_CONVERSION[convert_to]
+ actual_values[time_converted] = prediction.loc[0]
+ prediction.index = (prediction.index + time_seconds) / TIME_CONVERSION[
+ convert_to
+ ]
+
+ progress = i / number_of_predictions
+ prediction_color = interpolate_colors(
+ progress=progress,
+ colors=[EBCColors.red, EBCColors.dark_grey],
+ )
+ if not step:
+ fig.add_trace(
+ go.Scatter(
+ x=prediction.index,
+ y=prediction,
+ mode="lines",
+ line=dict(color=f"rgb{prediction_color}", width=0.7),
+ name=f"{time_converted} {convert_to[0]}",
+ legendgroup=f"Prediction",
+ legendgrouptitle_text=f"Predictions",
+ visible=True,
+ legendrank=i + 2,
+ # id=f"trace-{y_axis_label}-{i}",
+ )
+ )
+ else:
+ fig.add_trace(
+ go.Scatter(
+ x=prediction.index,
+ y=prediction,
+ mode="lines",
+ line=dict(
+ color=f"rgb{prediction_color}",
+ width=0.7,
+ shape="hv",
+ ),
+ name=f"{time_converted} {convert_to[0]}",
+ legendgroup=f"Prediction",
+ legendgrouptitle_text=f"Predictions",
+ visible=True,
+ legendrank=i + 2,
+ # id=f"trace-{y_axis_label}-{i}",
+ )
+ )
+
+ actual_series = pd.Series(actual_values)
+ if not step:
+ fig.add_trace(
+ go.Scatter(
+ x=actual_series.index,
+ y=actual_series,
+ mode="lines",
+ line=dict(color="black", width=1.5),
+ name="Actual Values",
+ legendrank=1,
+ )
+ )
+ else:
+ fig.add_trace(
+ go.Scatter(
+ x=actual_series.index,
+ y=actual_series,
+ mode="lines",
+ line=dict(color="black", width=1.5, shape="hv"),
+ name="Actual Values",
+ legendrank=1,
+ )
+ )
+
+ # Update x-axis label based on convert_to argument
+ x_axis_label = f"Time in {convert_to}"
+
+ fig.update_layout(
+ showlegend=True,
+ legend=dict(
+ groupclick="toggleitem",
+ itemclick="toggle",
+ itemdoubleclick="toggleothers",
+ ),
+ xaxis_title=x_axis_label,
+ yaxis_title=y_axis_label,
+ uirevision="same", # Add this line
+ )
+
+ return fig
+
+
+[docs]def plot_admm_plotly(
+ series: pd.Series,
+ plot_actual_values: bool = True,
+ plot_predictions: bool = False,
+ step: bool = False,
+ convert_to: Literal["seconds", "minutes", "hours", "days"] = "seconds",
+):
+ """
+ Args:
+ series: A column of the MPC results Dataframe
+ fig: Plotly figure to plot on
+ plot_actual_values: whether the closed loop actual values at the start of each
+ optimization should be plotted (default True)
+ plot_predictions: whether all predicted trajectories should be plotted
+ step: whether to use a step plot or a line plot
+ convert_to: Will convert the index of the returned series to the specified unit
+ (seconds, minutes, hours, days)
+
+ Returns:
+ None
+ """
+ grid = series.index.get_level_values(2).unique()
+ tail_length = len(grid[grid >= 0])
+ series_final_predictions = series.groupby(level=0).tail(tail_length).droplevel(1)
+ plot_mpc_plotly(
+ series=series_final_predictions,
+ step=step,
+ convert_to=convert_to,
+ )
+
+
+[docs]def show_dashboard(
+ data: pd.DataFrame,
+ stats: Optional[pd.DataFrame] = None,
+ scale: Literal["seconds", "minutes", "hours", "days"] = "seconds",
+):
+ app = dash.Dash(__name__, title="MPC Results")
+
+ # Get the list of columns from the DataFrame, and check if they can be plotted
+ columns = data["variable"].columns
+ columns_okay = []
+ for column in columns:
+ try:
+ fig = plot_mpc_plotly(
+ data["variable"][column],
+ convert_to=scale,
+ y_axis_label=column,
+ )
+ columns_okay.append(column)
+ except Exception:
+ pass
+
+ # Store initial figures
+ initial_figures = {}
+ for column in columns_okay:
+ fig = plot_mpc_plotly(
+ data["variable"][column],
+ convert_to=scale,
+ y_axis_label=column,
+ )
+ # Add uirevision to maintain legend state
+ fig.update_layout(uirevision="same")
+ initial_figures[column] = fig
+
+ # Define the layout of the webpage
+ app.layout = html.Div(
+ [
+ html.H1("MPC Results"),
+ # Store for keeping track of trace visibility
+ dcc.Store(id="trace-visibility", data={}),
+ make_components(columns_okay, data, stats=stats, convert_to=scale),
+ ]
+ )
+
+ port = get_port()
+
+ @app.callback(
+ [Output(f"plot-{column}", "figure") for column in columns_okay],
+ [Input(f"plot-{column}", "restyleData") for column in columns_okay],
+ [State(f"plot-{column}", "figure") for column in columns_okay],
+ )
+ def update_plots(*args):
+ ctx = dash.callback_context
+ if not ctx.triggered:
+ return [dash.no_update] * len(columns_okay)
+
+ n_plots = len(columns_okay)
+ restyle_data = args[:n_plots]
+ current_figures = args[n_plots:]
+
+ # Find which plot was changed
+ triggered_prop = ctx.triggered[0]["prop_id"].split(".")[0]
+ triggered_index = next(
+ i for i, col in enumerate(columns_okay) if f"plot-{col}" == triggered_prop
+ )
+ triggered_data = restyle_data[triggered_index]
+
+ if not triggered_data:
+ return [dash.no_update] * n_plots
+
+ # Get the visibility update from the triggered plot
+ visibility_update = triggered_data[0].get("visible", [None])[0]
+ trace_indices = triggered_data[1]
+
+ # Update all figures
+ updated_figures = []
+ for fig in current_figures:
+ # Ensure uirevision is set
+ fig["layout"]["uirevision"] = "same"
+ # Update visibility for the corresponding traces
+ for idx in trace_indices:
+ fig["data"][idx]["visible"] = visibility_update
+ updated_figures.append(fig)
+
+ return updated_figures
+
+ webbrowser.open_new_tab(f"http://localhost:{port}")
+ app.run(debug=False, port=port)
+
+
+[docs]def make_components(
+ columns, data, convert_to, stats: Optional[pd.DataFrame] = None
+) -> [html.Div]:
+ components = [
+ html.Div(
+ [
+ # html.H3(column),
+ dcc.Graph(
+ id=f"plot-{column}",
+ figure=plot_mpc_plotly(
+ data["variable"][column],
+ convert_to=convert_to,
+ y_axis_label=column,
+ ),
+ style={
+ "min-width": "600px",
+ "min-height": "400px",
+ "max-width": "900px",
+ "max-height": "450px",
+ },
+ ),
+ ],
+ className="draggable",
+ )
+ for column in columns
+ ]
+ if stats is not None:
+ components.insert(0, html.Div([solver_return(stats, convert_to)]))
+ components.insert(
+ 1, html.Div([obj_plot(stats, convert_to)])
+ ) # Add the "obj" plot
+
+ return html.Div(
+ components,
+ style={
+ "display": "grid",
+ "grid-template-columns": "repeat(auto-fit, minmax(600px, 1fr))",
+ "grid-gap": "20px",
+ "padding": "20px",
+ "min-width": "600px",
+ "min-height": "200px",
+ },
+ id="plot-container",
+ )
+
+
+[docs]def obj_plot(
+ data, convert_to: Literal["seconds", "minutes", "hours", "days"] = "seconds"
+) -> dcc.Graph:
+ df = data.copy()
+ index = df.index.values / TIME_CONVERSION[convert_to]
+
+ trace = go.Scatter(
+ x=index,
+ y=df["obj"],
+ mode="lines",
+ name="Objective Value",
+ )
+
+ layout = go.Layout(
+ title="Objective Value",
+ xaxis_title=f"Time in {convert_to}",
+ yaxis_title="Objective Value",
+ showlegend=True,
+ )
+
+ fig = go.Figure(data=[trace], layout=layout)
+
+ return dcc.Graph(
+ id="plot-obj",
+ figure=fig,
+ style={
+ "min-width": "600px",
+ "min-height": "400px",
+ "max-width": "900px",
+ "max-height": "450px",
+ },
+ )
+
+
+[docs]def solver_return(
+ data, convert_to: Literal["seconds", "minutes", "hours", "days"] = "seconds"
+) -> dcc.Graph:
+ solver_data = []
+ indices = []
+ j = 0
+ for i in reversed(data.index.values):
+ if i in indices:
+ break
+ j += 1
+ indices.append(i)
+ solver_data.append(data.iloc[len(data) - j])
+ df = pd.DataFrame(solver_data)
+ df = df.iloc[::-1]
+
+ return_status = {}
+ for idx, success in df.success.items():
+ if success:
+ solver_return = df.return_status[idx]
+ else:
+ solver_return = "Solve_Not_Succeeded"
+ return_status[idx] = solver_return
+
+ solver_returns = pd.Series(return_status)
+ index = solver_returns.index.values / TIME_CONVERSION[convert_to]
+
+ colors = {
+ "Solve_Succeeded": "green",
+ "Solved_To_Acceptable_Level": "orange",
+ "Solve_Not_Succeeded": "red",
+ }
+ legend_names = {
+ "Solved_To_Acceptable_Level": "Acceptable",
+ "Solve_Succeeded": "Optimal",
+ "Solve_Not_Succeeded": "Failure",
+ }
+
+ traces = []
+ for status in colors:
+ mask = solver_returns.values == status
+ if mask.any():
+ trace = go.Scatter(
+ x=index[mask],
+ y=df.loc[solver_returns.index[mask], "iter_count"],
+ mode="markers",
+ marker=dict(
+ color=colors[status],
+ size=10,
+ ),
+ name=legend_names[status],
+ )
+ else:
+ trace = go.Scatter(
+ x=[None],
+ y=[None],
+ mode="markers",
+ marker=dict(
+ color=colors[status],
+ size=10,
+ ),
+ name=legend_names[status],
+ )
+ traces.append(trace)
+
+ layout = go.Layout(
+ title="Solver Return Status",
+ xaxis_title=f"Time in {convert_to}",
+ yaxis_title="Iterations",
+ showlegend=True,
+ )
+
+ fig = go.Figure(data=traces, layout=layout)
+
+ return dcc.Graph(
+ id="plot-solver-return",
+ figure=fig,
+ style={
+ "min-width": "600px",
+ "min-height": "400px",
+ "max-width": "900px",
+ "max-height": "450px",
+ },
+ )
+
+
+[docs]def draggable_script():
+ return html.Script(
+ """
+ var draggableElements = document.getElementsByClassName('draggable');
+ for (var i = 0; i < draggableElements.length; i++) {
+ var element = draggableElements[i];
+ element.addEventListener('mousedown', function(e) {
+ var offset = [
+ this.offsetLeft - e.clientX,
+ this.offsetTop - e.clientY
+ ];
+ var moveHandler = function(e) {
+ element.style.left = (e.clientX + offset[0]) + 'px';
+ element.style.top = (e.clientY + offset[1]) + 'px';
+ };
+ document.addEventListener('mousemove', moveHandler);
+ document.addEventListener('mouseup', function() {
+ document.removeEventListener('mousemove', moveHandler);
+ });
+ });
+ }
+ """
+ )
+
+
+[docs]def get_port():
+ port = 8050
+ while True:
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ is_free = s.connect_ex(("localhost", port)) != 0
+ if is_free:
+ return port
+ else:
+ port += 1
+
+
+if __name__ == "__main__":
+ data_ = load_mpc(
+ r"D:\repos\agentlib_mpc\examples\one_room_mpc\physical\results\mpc.csv"
+ )
+ show_dashboard(data_)
+ # fig = plot_mpc_plotly(
+ # data["variable"]["T"] - 273.15,
+ # y_axis_label="Room temperature",
+ # convert_to="minutes",
+ # step=False,
+ # )
+ # fig.show()
+
+from pathlib import Path
+from typing import Callable, Union, Optional
+
+import numpy as np
+import pandas as pd
+from matplotlib import pyplot as plt
+
+from agentlib_mpc.models.casadi_predictor import CasadiPredictor, casadi_predictors
+from agentlib_mpc.models.serialized_ml_model import SerializedMLModel
+from agentlib_mpc.utils.plotting import basic
+from agentlib_mpc.data_structures import ml_model_datatypes
+
+
+[docs]def calc_scores(errors: np.ndarray, metric: Callable) -> float:
+ if all(np.isnan(errors)):
+ return 0
+
+ return float(np.mean(metric(errors)))
+
+
+[docs]def predict_array(
+ df: pd.DataFrame, ml_model: CasadiPredictor, outputs: pd.Index
+) -> pd.DataFrame:
+ arr = (
+ ml_model.predict(df.values.reshape(1, -1))
+ .toarray()
+ .reshape((df.shape[0], len(outputs)))
+ )
+ return pd.DataFrame(arr, columns=outputs, index=df.index)
+
+
+[docs]def pairwise_sort(*arrays: tuple[np.ndarray, np.ndarray]):
+ true_sorted = np.concatenate([true.flatten() for true, pred in arrays])
+ empty = np.empty(shape=true_sorted.shape)
+ empty[:] = np.nan
+
+ idx = np.argsort(true_sorted)
+ true_sorted = true_sorted[idx]
+
+ i = 0
+ out = list()
+
+ for _, pred in arrays:
+ copy_empty = empty.copy()
+ copy_empty[i : i + len(pred)] = pred
+ i += len(pred)
+
+ copy_empty = copy_empty[idx]
+
+ out.append(copy_empty)
+
+ return out, true_sorted
+
+
+# Change
+[docs]def evaluate_model(
+ training_data: ml_model_datatypes.TrainingData,
+ model: Union[CasadiPredictor, SerializedMLModel],
+ metric: Callable = None,
+ show_plot: bool = True,
+ save_path: Optional[Path] = None,
+):
+ """Tests the Model on test data"""
+
+ if metric is None:
+ metric = lambda x: x * x
+
+ # make model executable
+ if isinstance(model, SerializedMLModel):
+ model_ = casadi_predictors[model.model_type](model)
+ else:
+ model_ = model
+
+ # # make the predictions
+ outputs = training_data.training_outputs.columns
+
+ train_pred = predict_array(
+ df=training_data.training_inputs, ml_model=model_, outputs=outputs
+ )
+ valid_pred = predict_array(
+ df=training_data.validation_inputs, ml_model=model_, outputs=outputs
+ )
+ test_pred = predict_array(
+ df=training_data.test_inputs, ml_model=model_, outputs=outputs
+ )
+ train_error = training_data.training_outputs - train_pred
+ valid_error = training_data.validation_outputs - valid_pred
+ test_error = training_data.test_outputs - test_pred
+
+ for name in outputs:
+ train_score = calc_scores(train_error[name], metric=metric)
+ valid_score = calc_scores(valid_error[name], metric=metric)
+ test_score = calc_scores(test_error[name], metric=metric)
+ total_score = sum([train_score, valid_score, test_score]) / 3
+
+ # plot
+ y_pred_sorted, y_true_sorted = pairwise_sort(
+ (training_data.training_outputs[name].values, train_pred[name]),
+ (training_data.validation_outputs[name].values, valid_pred[name]),
+ (training_data.test_outputs[name].values, test_pred[name]),
+ )
+
+ scale = range(len(y_true_sorted))
+
+ with basic.Style() as style:
+ fig, ax = basic.make_fig(style=style)
+ ax: plt.Axes
+ for y, c, label in zip(
+ y_pred_sorted,
+ [basic.EBCColors.red, basic.EBCColors.green, basic.EBCColors.blue],
+ ["Train", "Valid", "Test"],
+ ):
+ if not all(np.isnan(y)):
+ ax.scatter(scale, y, s=0.6, color=c, label=label)
+
+ ax.scatter(
+ scale,
+ y_true_sorted,
+ s=0.6,
+ color=basic.EBCColors.dark_grey,
+ label="True",
+ )
+ ax.set_xlabel("Samples")
+ ax.legend(loc="upper left")
+ ax.yaxis.grid(linestyle="dotted")
+ ax.set_title(
+ f"{name}\ntest_score={test_score.__round__(4)}\ntotal_score={total_score.__round__(4)}"
+ )
+ if show_plot:
+ fig.show()
+ if save_path is not None:
+ fig.savefig(fname=Path(save_path, f"evaluation_{name}.png"))
+
+# from agentlib_mpc.utils.plotting.basic import ColorTuple
+from pathlib import Path
+from typing import Literal
+
+import matplotlib.pyplot as plt
+import pandas as pd
+
+from agentlib_mpc.utils import TIME_CONVERSION
+from agentlib_mpc.utils.analysis import load_mpc
+from agentlib_mpc.utils.plotting.basic import (
+ ColorTuple,
+ EBCColors,
+ Float0to1,
+ make_fig,
+ Style,
+)
+
+
+[docs]def interpolate_colors(progress: Float0to1, colors: list[ColorTuple]) -> ColorTuple:
+ """
+ Interpolates colors based on a sample number. To be used when plotting many mpc
+ predictions in one plot, so a fade from old to new predictions can be seen.
+
+ Original credit to Max Berktold.
+
+ Args:
+ progress:
+ colors:
+
+ Returns:
+
+ """
+ if progress <= 0:
+ return colors[0]
+ elif progress >= 1:
+ return colors[-1]
+
+ num_colors = len(colors)
+ interval = 1 / (num_colors - 1)
+ color_index = int(progress / interval)
+ t = (progress - interval * color_index) / interval
+ color1 = colors[color_index]
+ color2 = colors[color_index + 1]
+ return (
+ (1 - t) * color1[0] + t * color2[0],
+ (1 - t) * color1[1] + t * color2[1],
+ (1 - t) * color1[2] + t * color2[2],
+ )
+
+
+[docs]def plot_mpc(
+ series: pd.Series,
+ ax: plt.Axes,
+ plot_actual_values: bool = True,
+ plot_predictions: bool = False,
+ step: bool = False,
+ convert_to: Literal["seconds", "minutes", "hours", "days"] = "seconds",
+):
+ """
+
+ Args:
+ series: A column of the MPC results Dataframe
+ ax: which Axes to plot on
+ plot_actual_values: whether the closed loop actual values at the start of each
+ optimization should be plotted (default True)
+ plot_predictions: whether all predicted trajectories should be plotted
+ step:
+ convert_to: Will convert the index of the returned series to the specified unit
+ (seconds, minutes, hours, days)
+
+ Returns:
+
+ """
+ number_of_predictions: int = series.index.unique(level=0).shape[0]
+
+ # stores the first value of each prediction. In the case of a control_variable,
+ # this will give the optimal control output the mpc determined this step, or in
+ # the case of a state, this will give the measurement it worked with
+ actual_values: dict[float, float] = {}
+
+ for i, (time_seconds, prediction) in enumerate(series.groupby(level=0)):
+ prediction: pd.Series = prediction.dropna().droplevel(0)
+
+ time_converted = time_seconds / TIME_CONVERSION[convert_to]
+ if plot_actual_values:
+ actual_values[time_converted] = prediction.at[0]
+
+ prediction.index = (prediction.index + time_seconds) / TIME_CONVERSION[
+ convert_to
+ ]
+
+ if plot_predictions:
+ progress = i / number_of_predictions
+ prediction_color = interpolate_colors(
+ progress=progress,
+ colors=[EBCColors.red, EBCColors.dark_grey, EBCColors.light_grey],
+ )
+ if not step:
+ prediction.plot(
+ ax=ax, color=prediction_color, linewidth=0.7, label="_nolegend_"
+ )
+ else:
+ prediction.plot(
+ ax=ax,
+ color=prediction_color,
+ drawstyle="steps-post",
+ linewidth=0.7,
+ label="_nolegend_",
+ )
+
+ if plot_actual_values:
+ actual_series = pd.Series(actual_values)
+ if not step:
+ actual_series.plot(ax=ax, color="black", linewidth=1.5)
+ else:
+ actual_series.plot(
+ ax=ax, color="black", linewidth=1.5, drawstyle="steps-post"
+ )
+
+
+[docs]def plot_admm(
+ series: pd.Series,
+ ax: plt.Axes,
+ plot_actual_values: bool = True,
+ plot_predictions: bool = False,
+ step: bool = False,
+ convert_to: Literal["seconds", "minutes", "hours", "days"] = "seconds",
+):
+ """
+
+ Args:
+ series: A column of the MPC results Dataframe
+ ax: which Axes to plot on
+ plot_actual_values: whether the closed loop actual values at the start of each
+ optimization should be plotted (default True)
+ plot_predictions: whether all predicted trajectories should be plotted
+ step:
+ convert_to: Will convert the index of the returned series to the specified unit
+ (seconds, minutes, hours, days)
+
+ Returns:
+
+ """
+ grid = series.index.get_level_values(2).unique()
+ tail_length = len(grid[grid >= 0])
+ series_final_predictions = series.groupby(level=0).tail(tail_length).droplevel(1)
+ return plot_mpc(
+ series=series_final_predictions,
+ ax=ax,
+ plot_actual_values=plot_actual_values,
+ plot_predictions=plot_predictions,
+ step=step,
+ convert_to=convert_to,
+ )
+
+import itertools
+import logging
+from typing import Union, Iterable, Sequence, List
+from numbers import Real
+
+import numpy as np
+import pandas as pd
+
+from agentlib_mpc.data_structures.interpolation import InterpolationMethods
+
+logger = logging.getLogger(__name__)
+
+
+[docs]def sample_values_to_target_grid(
+ values: Iterable[float],
+ original_grid: Iterable[float],
+ target_grid: Iterable[float],
+ method: Union[str, InterpolationMethods],
+) -> list[float]:
+ if method == InterpolationMethods.linear:
+ return np.interp(target_grid, original_grid, values).tolist()
+ elif method == InterpolationMethods.spline3:
+ raise NotImplementedError("Spline interpolation is currently not supported")
+ elif method == InterpolationMethods.previous:
+ return interpolate_to_previous(target_grid, original_grid, values)
+ elif method == InterpolationMethods.mean_over_interval:
+ values = np.array(values)
+ original_grid = np.array(original_grid)
+ result = []
+ for i, j in pairwise(target_grid):
+ slicer = np.logical_and(original_grid >= i, original_grid < j)
+ result.append(values[slicer].mean())
+ # take last value twice, so the length is consistent with the other resampling
+ # methods
+ result.append(result[-1])
+ return result
+ else:
+ raise ValueError(
+ f"Chosen 'method' {method} is not a valid method. "
+ f"Currently supported: linear, spline, previous"
+ )
+
+
+[docs]def sample(
+ trajectory: Union[Real, pd.Series, list[Real], dict[Real, Real]],
+ grid: Union[list, np.ndarray],
+ current: float = 0,
+ method: str = "linear",
+) -> list:
+ """
+ Obtain the specified portion of the trajectory.
+
+ Args:
+ trajectory: The trajectory to be sampled. Scalars will be
+ expanded onto the grid. Lists need to exactly match the provided
+ grid. Otherwise, a pandas Series is accepted with the timestamp as index. A
+ dict with the keys as time stamps is also accepted.
+ current: start time of requested trajectory
+ grid: target interpolation grid in seconds in relative terms (i.e.
+ starting from 0 usually)
+ method: interpolation method, currently accepted: 'linear',
+ 'spline', 'previous'
+
+ Returns:
+ Sampled list of values.
+
+ Takes a slice of the trajectory from the current time step with the
+ specified length and interpolates it to match the requested sampling.
+ If the requested horizon is longer than the available data, the last
+ available value will be used for the remainder.
+
+ Raises:
+ ValueError
+ TypeError
+ """
+ target_grid_length = len(grid)
+ if isinstance(trajectory, (float, int)):
+ # return constant trajectory for scalars
+ return [trajectory] * target_grid_length
+ if isinstance(trajectory, list):
+ # return lists of matching length without timestamps
+ if len(trajectory) == target_grid_length:
+ return trajectory
+ raise ValueError(
+ f"Passed list with length {len(trajectory)} "
+ f"does not match target ({target_grid_length})."
+ )
+ if isinstance(trajectory, pd.Series):
+ trajectory = trajectory.dropna()
+ source_grid = np.array(trajectory.index)
+ values = trajectory.values
+ elif isinstance(trajectory, dict):
+ source_grid = np.array(list(trajectory))
+ values = np.array(list(trajectory.values()))
+ else:
+ raise TypeError(
+ f"Passed trajectory of type '{type(trajectory)}' " f"cannot be sampled."
+ )
+ target_grid = np.array(grid) + current
+
+ # expand scalar values
+ if len(source_grid) == 1:
+ if isinstance(trajectory, list):
+ return [trajectory[0]] * target_grid_length
+ # if not list, assume it is a series
+ else:
+ return [trajectory.iloc[0]] * target_grid_length
+
+ # skip resampling if grids are (almost) the same
+ if (target_grid.shape == source_grid.shape) and all(target_grid == source_grid):
+ return list(values)
+ values = np.array(values)
+
+ # check requested portion of trajectory, whether the most recent value in the
+ # source grid is older than the first value in the MHE trajectory
+ if target_grid[0] >= source_grid[-1]:
+ # return the last value of the trajectory if requested sample
+ # starts out of range
+ logger.warning(
+ f"Latest value of source grid %s is older than "
+ f"current time (%s. Returning latest value anyway.",
+ source_grid[-1],
+ current,
+ )
+ return [values[-1]] * target_grid_length
+
+ # determine whether the target grid lies within the available source grid, and
+ # how many entries to extrapolate on either side
+ source_grid_oldest_time: float = source_grid[0]
+ source_grid_newest_time: float = source_grid[-1]
+ source_is_recent_enough: np.ndarray = target_grid < source_grid_newest_time
+ source_is_old_enough: np.ndarray = target_grid > source_grid_oldest_time
+ number_of_missing_old_entries: int = target_grid_length - np.count_nonzero(
+ source_is_old_enough
+ )
+ number_of_missing_new_entries: int = target_grid_length - np.count_nonzero(
+ source_is_recent_enough
+ )
+ # shorten target interpolation grid by extra points that go above or below
+ # available data range
+ target_grid = target_grid[source_is_recent_enough * source_is_old_enough]
+
+ # interpolate data to match new grid
+ sequence_new = sample_values_to_target_grid(
+ values=values, original_grid=source_grid, target_grid=target_grid, method=method
+ )
+
+ # extrapolate sequence with last available value if necessary
+ interpolated_trajectory = (
+ [values[0]] * number_of_missing_old_entries
+ + sequence_new
+ + [values[-1]] * number_of_missing_new_entries
+ )
+
+ return interpolated_trajectory
+
+
+[docs]def pairwise(iterable: Iterable):
+ "s -> (s0,s1), (s1,s2), (s2, s3), ..."
+ a, b = itertools.tee(iterable)
+ next(b, None)
+ return zip(a, b)
+
+
+[docs]def earliest_index(time, arr, stop, start=0):
+ """Helper function for interpolate_to_previous.
+ Finds the current index to which we should forwardfill."""
+ for i in range(start, stop):
+ if arr[i] > time:
+ return i - 1
+ return 0
+
+
+[docs]def interpolate_to_previous(
+ target_grid: Iterable[float],
+ original_grid: Iterable[float],
+ values: Sequence[float],
+) -> List[float]:
+ """Interpolates to previous value of original grid, i.e. a forward fill.
+
+ Stand-in for the following scipy code:
+ tck = interpolate.interp1d(list(original_grid), values, kind="previous")
+ result = list(tck(target_grid))
+ """
+ result = []
+ _grid_index = 0
+ stop = len(original_grid)
+ for target_point in target_grid:
+ _grid_index = earliest_index(
+ target_point, original_grid, stop, start=_grid_index
+ )
+ result.append(values[_grid_index])
+ return result
+
+Because the models follow the FMU standard, where variables are divided +between inputs, outputs, locals/states and parameters. In this case, our +cooler model takes a mass flow as an input ("mDot" in this case) and +produces the same mass flow as an output to other systems ("mDot_out" in +this case). In a more complex setting, the cooler might have an internal PID +controller to set the mass flow to its correct value. In that case, "mDot" +would be setpoint of the mass flow, and "mDot_out" would be the actual mass +flow. ++
+Algebraic equations are explicit assignments to a CasadiOutput. They are considered when simulating the model or when doing MPC with it. +Constraints specified as tuples can be of implicit nature, however they are +ignored for simulation. The only limitation on constraints is, that +variables that make up the upper or lower bound cannot be used as +optimization variables in the MPC. ++
+Before every step, the simulator gets the current input values from the +agent and sets them to the model. After performing the step, the outputs +from the model are written to the agent. Since states per definition are +internal to the model, they are not set by the agent and their initial +values have to be changed in the model itself. The same goes for parameters. ++
' + + '' + + _("Hide Search Matches") + + "
" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(SphinxHighlight.highlightSearchWords); +_ready(SphinxHighlight.initEscapeListener); diff --git a/docs/0.6.4/docs/agentlib_mpc.data_structures.html b/docs/0.6.4/docs/agentlib_mpc.data_structures.html new file mode 100644 index 0000000..9e39349 --- /dev/null +++ b/docs/0.6.4/docs/agentlib_mpc.data_structures.html @@ -0,0 +1,1241 @@ + + + + + + + + +Bases: object
Collection of parameters which have to be shared across all agents in ADMM.
+Bases: object
Helper class to organize ADMM participants.
+Bases: AgentDictEntry
Holds participating coupling variables (consensus and exchange) of a single +agent in ADMM. Used in the coordinator.
+Bases: StructuredValue
Bases: CouplingVariable
Bases: StructuredValue
Bases: object
Holds naming conventions for different optimizatin variables / parameters +associated with a coupling variable in consensus ADMM.
+ + +Bases: object
Holds information about a phy
+Returns the flattened array of all local variables and their multipliers.
+sources – list of sources that should be included in the update. +By default, all are included.
+flat lists of local variables and multipliers (locals, multipliers)
+Returns the primal and dual residual of the last iteration as a tuple +of flattened Arrays. +:param rho:
+(primal residual, dual residual)
+Returns all agent sources that are registered to this coupling.
+Bases: object
Holds naming conventions for different optimizatin variables / parameters +associated with a coupling variable in exchange ADMM.
+ + +Bases: CouplingVariable
Bases: object
Base Class to specify the structure of an AgentVariable Value. It will +be efficiently sent and deserialized.
+ + + + +Bases: FullVariableReference
Holds info about all variables of an MPC and their role in the optimization +problem.
+ + +Stores all sorts of Dataclasses, Enums or Factories to help with the +CasadiBackend.
+Bases: DiscretizationOptions
extra: str = forbid
ge = 1
le = 9
Bases: str
, Enum
Bases: object
Bases: str
, Enum
Bases: str
, Enum
Bases: NamedTuple
Alias for field number 1
+Alias for field number 0
+Alias for field number 2
+Bases: object
Stores the necessary MX variables created during discretization for +OptimizationParameters.
+Bases: OptParMXContainer
Stores the necessary MX variables created during discretization for +OptimizationVariables.
+Bases: object
Creates a solver given an NLP and an options construct.
+Bases: BaseModel
extra: str = forbid
Bases: str
, Enum
Code-generates an ipopt solver and compiles it. +Currently, only works on Windows! Requires a batch file that knows +how to setup Visual Studio command line and compiles the source code.
+The Path to the .dll file for the compiled solver.
+TypeError –
FileNotFoundError –
RuntimeError –
Bases: object
Dataclass holding the status of a participating agent in DMPC.
+Bases: str
, Enum
Enum used within a DMPC-Coordinator to keep track of the statuses of its +participating agents.
+Bases: str
, Enum
Enum used to keep track of the status of a DMPC-Coordinator.
+Bases: object
Contains specific variables (or time series) of the agent
+ + + + +Bases: object
Dataclass structuring the communication during registration between a +participating agent and the coordinator in DMPC.
+Bases: object
Bases: BaseModel
Class defining the options to discretize an MPC. Can be extended for different +optimization implementations.
+extra: str = allow
Prediction horizon of the MPC.
+ge = 0
Time step of the MPC.
+ge = 0
Bases: VariableReference
Bases: str
, Enum
Keep track of the readyness status of the MPC.
+Bases: BaseVariableReference
Returns a list of all variables registered in the var_ref which the MHE can +get from the config with get()
+Bases: VariableReference
Bases: AgentVariable
AgentVariable used to define input variables of MPC.
+Bases: Protocol
Bases: BaseVariableReference
AgentDictEntry
+AgentStatus
+CoordinatorStatus
+OptimizationData
+RegistrationMessage
+BaseVariableReference
+DiscretizationOptions
+FullVariableReference
+InitStatus
+MHEVariableReference
MHEVariableReference.all_variables()
MHEVariableReference.estimated_inputs
MHEVariableReference.estimated_parameters
MHEVariableReference.known_inputs
MHEVariableReference.known_parameters
MHEVariableReference.measured_states
MHEVariableReference.outputs
MHEVariableReference.states
MHEVariableReference.weights_states
MINLPVariableReference
+MPCVariable
+Results
Results.df
VariableReference
+cia_relaxed_results_path()
r_del_u_convention()
stats_path()
CasadiInput
+CasadiModel
CasadiModel.auxiliaries
CasadiModel.differentials
CasadiModel.do_step()
CasadiModel.get()
CasadiModel.get_constraints()
CasadiModel.get_differential_values()
CasadiModel.get_input_values()
CasadiModel.initialize()
CasadiModel.inputs
CasadiModel.output_equations
CasadiModel.outputs
CasadiModel.parameters
CasadiModel.set_differential_values()
CasadiModel.set_output_values()
CasadiModel.setup_system()
CasadiModel.states
CasadiModelConfig
+CasadiOutput
+CasadiParameter
CasadiState
+CasadiVariable
+get_symbolic()
ADMMBackend
+BackendConfig
+OptimizationBackend
OptimizationBackend.config_type
OptimizationBackend.get_lags_per_variable()
OptimizationBackend.model_from_config()
OptimizationBackend.mpc_backend_parameters
OptimizationBackend.register_logger()
OptimizationBackend.results_file_exists()
OptimizationBackend.setup_optimization()
OptimizationBackend.solve()
OptimizationBackend.update_discretization_options()
OptimizationBackend.update_model_variables()
Plugin for the AgentLib which contains modules for MPC, distributed MPC with ADMM +and data-driven modeling.
+Holds the classes for CasADi variables and the CasADi model.
+Bases: CasadiVariable
Class that stores various attributes of control variables.
+Bases: Model
Base Class for CasADi models. To implement your own model, inherit +from this class, specify the variables (inputs, outputs, states, +parameters and override the setup_system() method.
+List of all CasadiStates without an associated equation. Common +uses for this are slack variables that appear in cost functions and +constraints of optimization models.
+List of all CasadiStates with an associated differential equation.
+Performing one simulation step +:param t_start: start time for integration +:param t_sample: increment of solver integration
+Returns:
+Get any variable from using name:
+name (str) – The item to get from config by name of Variable. +Hence, item=ModelVariable.name
+The matching variable
+var (ModelVariable)
+AttributeError – If the item was not found in the variables of the + module.
+List of constraints of the form (lower, function, upper).
+Initializes Casadi model. Creates the integrator to be used in +do_step(). The integrator takes the current state and input values as +input and returns the state values at the end of the interval and the +value of the cost function integrated over the interval.
+Get all model inputs as a list
+List of algebraic equations RHS in the form +0 = z - g(x, z, p, … )
+Get all model outputs as a list
+Get all model parameters as a list
+Sets the values for all differential variables. Provided values list MUST +match the order in which differentials are saved, there is no check.
+Sets the values for all outputs. Provided values list MUST match the order +in which outputs are saved, there is no check.
+Get all model states as a list
+Bases: ModelConfig
validate_assignment: bool = True
arbitrary_types_allowed: bool = True
extra: str = forbid
cost_function (casadi.casadi.MX | casadi.casadi.SX | casadi.casadi.DM | casadi.casadi.Sparsity)
outputs (List[agentlib_mpc.models.casadi_model.CasadiOutput])
parameters (List[agentlib_mpc.models.casadi_model.CasadiParameter])
system (casadi.casadi.MX | casadi.casadi.SX | casadi.casadi.DM | casadi.casadi.Sparsity)
include_default_model_variables
» inputs
include_default_model_variables
» outputs
include_default_model_variables
» parameters
include_default_model_variables
» states
include_default_model_variables
include_default_model_variables
include_default_model_variables
include_default_model_variables
We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: CasadiVariable
Class that stores various attributes of control variables.
+Bases: CasadiVariable
Class that stores various attributes of parameters.
+Bases: CasadiVariable
Class that stores various attributes of CasADi differential variables.
+Serializes the Variable in json format and returns a string
+Bases: ModelVariable
Base Class for variables used in Casadi Models for simulation and +optimization. Implements the standard arithmetic operations, +so CasadiVariables can be used in equations. +.. attribute:: sym
+++The symbolic CasADi variable used to define ode’s and +optimization problems.
+
Ensures a symbolic MX variable is created with each CasadiVariable +instance, and that its dimensions are consistent.
+Package containing models for agentlib_mpc.
+Holds functionality for ADMM modules.
+Bases: DistributedMPC
This class represents a module participating in a fully decentralized +Consensus-ADMM optimization for distributed MPC. +Agents autonomously send the values of their coupling variables, register +other participants and perform update steps.
+ + +Gives and iterator of all ADMMParticipation that are registered.
+Checks whether all variables of var_ref are contained in the model. +Returns names of model variables not contained in the var_ref, +sorted by keys: ‘states’, ‘inputs’, ‘outputs’, ‘parameters’.
+Collects updated AgentVariables only of the coupling variables.
+Gets all variables noted in the var ref and puts them in a flat +dictionary.
+Sets the participating status of all participating agents to +False.
+Get the values of all agents for a coupling variable.
+Read the results that were saved from the optimization backend and +returns them as Dataframe.
+(results, stats) tuple of Dataframes.
+Puts received variables in the correct queue, depending on +registration status of this agent.
+This abstract method must be implemented in order to sync the module +with the other processes of the agent and the whole MAS.
+Set the participation to true for the given coupling input.
+Dictionary containing all other agents this agent shares variables with. +Ordered in a two-layer form, with variables at the first layer and +agents at the second layer. Contains ADMMParticipation objects at +the base layer.
+Examples
+self.registered_participants = +{‘coupling_var_1’: {‘src_of_agent1’: status_1,
+++++‘src_of_agent2’: status_2, +‘src_of_agent3’: status_3}
++
+- ‘coupling_var_1’: {‘src_of_agent3’: status_a,
- +
‘src_of_agent2’: status_b, +‘src_of_agent4’: status_c}
+
} +here, <status> refers to an ADMMParticipation object.
+Sets the ready status of all participating agents to False.
+Sets the coupling outputs to the data_broker, which automatically sends them.
+solution – Output dictionary from optimization_backend.solve().
+Sends an admm coupling variable through the data_broker and sets its +value locally
+Performs the update of the lagrange multipliers. +lambda^k+1 := lambda^k - rho*(z-x_i)
+Bases: DistributedMPCConfig
arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
Maximum computation + waiting time for one iteration.
+ge = 0
Maximum number of ADMM iterations before termination of control step.
+ge = 0
Penalty factor of the ADMM algorithm. Should be equal for all agents.
+ge = 0
Time spent on registration before each optimization
+ge = 0
Ensures no user provided variable is named with the reserved ADMM prefix.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: object
Holds data for the status of a shared variable of another system.
+ + + + +Bases: ADMM
This abstract method must be implemented in order to sync the module +with the other processes of the agent and the whole MAS.
+Timeout value used to wait one on registration. Waits in real time +(time.sleep)
+Timeout value used to sync local admm processes. Should be very +small.
+Bases: ADMMConfig
arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: Enum
Module implementing the coordinated ADMM module, which works together +with a coordinator.
+Bases: MiniEmployee
, ADMM
Module to implement an ADMM agent, which is guided by a coordinator. +Only optimizes based on callbacks.
+ + +Callback that answers the coordinators init_iteration flag.
+Performs the optimization given the mean trajectories and multipliers from the +coordinator. +Replies with the local optimal trajectories. +Returns:
+Bases: MiniEmployeeConfig
, ADMMConfig
arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
check_valid_fields
» shared_variable_fields
check_valid_fields
Asserts that couplings and exchange have values, as they are needed for +initial guess.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+alias of coup_input
Defines classes that coordinate an ADMM process.
+Bases: Coordinator
Deletes all files this module created.
+Override this method, if your module creates e.g. results files etc.
+Saves the results of a local optimization. +:param variable:
+Returns:
+Bases: CoordinatorConfig
Hold the config for ADMMCoordinator
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
Absolute stopping criterion.
+Maximum number of ADMM iterations before termination of control step.
+Absolute dual stopping criterion.
+Factor to vary the penalty parameter with.
+When the primal residual is x times higher, vary the penalty parameter and vice versa.
+Penalty factor of the ADMM algorithm. Should be equal for all agents.
+Prediction horizon of participating agents.
+Absolute primal stopping criterion.
+Time spent on registration before each optimization
+Relative stopping criterion.
+Sampling interval for control steps. If None, will be the same as time step. Does not affect the discretization of the MPC, only the interval with which there will be optimization steps.
+When True, saves the solve stats to a file.
+File name for the solve stats.
+Sampling interval of between two control steps. Will be used in the discretization for MPC.
+If True, use abs_tol and rel_tol, if False us prim_tol and dual_tol.
+wait_on_start_iterations
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+ADMM
ADMM.admm_step()
ADMM.all_coupling_statuses()
ADMM.assert_mpc_variables_are_in_model()
ADMM.collect_couplings_for_optimization()
ADMM.collect_variables_for_optimization()
ADMM.cons_and_exchange
ADMM.deregister_all_participants()
ADMM.get_participants_values()
ADMM.get_results()
ADMM.participant_callback()
ADMM.penalty_factor_var
ADMM.process()
ADMM.receive_participant()
ADMM.registered_participants
ADMM.reset_participants_ready()
ADMM.send_coupling_values()
ADMM.send_coupling_variable()
ADMM.update_lambda()
ADMM.var_ref
ADMMConfig
+ADMMParticipation
+LocalADMM
+LocalADMMConfig
+ModuleStatus
+ParticipantStatus
+ADMMCoordinator
+ADMMCoordinatorConfig
ADMMCoordinatorConfig.abs_tol
ADMMCoordinatorConfig.admm_iter_max
ADMMCoordinatorConfig.dual_tol
ADMMCoordinatorConfig.penalty_change_factor
ADMMCoordinatorConfig.penalty_change_threshold
ADMMCoordinatorConfig.penalty_factor
ADMMCoordinatorConfig.prediction_horizon
ADMMCoordinatorConfig.primal_tol
ADMMCoordinatorConfig.registration_period
ADMMCoordinatorConfig.rel_tol
ADMMCoordinatorConfig.sampling_time
ADMMCoordinatorConfig.save_iter_interval
ADMMCoordinatorConfig.save_solve_stats
ADMMCoordinatorConfig.solve_stats_file
ADMMCoordinatorConfig.time_step
ADMMCoordinatorConfig.use_relative_tolerances
ADMMCoordinatorConfig.wait_time_on_start_iters
ADMMCoordinatorConfig.default_sampling_time
ADMMCoordinatorConfig.model_post_init()
ADMMCoordinatorConfig.solve_stats_file_is_csv
Bases: BaseModule
Class implementing the base coordination for distributed MPC
+Returns: +True, if there are no busy agents, else False
+Processes and Agents InitIteration confirmation. +:param variable:
+Returns:
+Saves the results of a local optimization. +:param variable:
+Returns:
+Bases: BaseModuleConfig
arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
check_valid_fields
» shared_variable_fields
Maximum number of iterations
+check_valid_fields
Maximum wait time for subsystems in seconds
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: BaseModule
Callback that processes the coordinators ‘startIteration’ flag. +:param variable:
+Performs the optimization given the information from the coordinator. +Replies with local information. +Returns:
+This method is called in every computation step before the optimization starts. +Overwrite this method in a derived subclass if you want to take some actions each time before the optimal control problem is solved.
+Bases: BaseModuleConfig
arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
check_valid_fields
» shared_variable_fields
Define the agents coordinator
+Interval in seconds after which a registration attempt is made.
+ge = 0
Wait time between signup_requests
+check_valid_fields
We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: MPC
Base class which defines common interfaces among all +distributed mpc approaches (either optimization based, +game theory based or some other).
+Bases: MPCConfig
Base config class with common configurations
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
We need to both initialize private attributes and call the user-defined model_post_init +method.
+Holds the base class for MPCs.
+Bases: BaseModule
A model predictive controller. +More info to follow.
+Checks whether all variables of var_ref are contained in the model. +Returns names of model variables not contained in the var_ref, +sorted by keys: ‘states’, ‘inputs’, ‘outputs’, ‘parameters’.
+Helper function for assert assert_mpc_variables_are_in_model. Asserts +the variables of the var_ref corresponding to ref_key are a subset of +a list of names provided (usually obtained from the model) and prints +out an error if false. Returns the portion of model_names that are +not in the given var_ref.
+Deletes all files this module created.
+Override this method, if your module creates e.g. results files etc.
+Gets all variables noted in the var ref and puts them in a flat +dictionary.
+Read the results that were saved from the optimization backend and +returns them as Dataframe.
+(results, stats) tuple of Dataframes.
+Getter for current simulation model
+Current simulation model
+agentlib.model
+This method is called in every computation step before the optimization starts. +Overwrite this method in a derived subclass if you want to take some actions +each time before the optimal control problem is solved.
+This abstract method must be implemented in order to sync the module +with the other processes of the agent and the whole MAS.
+Re-initializes the optimization backend with new parameters.
+Read the provided csv-file as an MPC results file. +:param results_file: File path
+results, stats +results is the Dataframe with all inputs and outputs of the MPC +optimizations. +stats is the Dataframe with matching solver stats
+Registers the init_optimization callback to all parameters which +cannot be changed without recreating the optimization problem.
+Takes the solution from optimization backend and sends the first +step to AgentVariables.
+Bases: BaseModuleConfig
Pydantic data model for MPC configuration parser
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
controls (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
inputs (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
outputs (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
parameters (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
states (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
check_valid_fields
» shared_variable_fields
List of all control variables of the MPC.
+List of all input variables of the MPC. Includes predictions for disturbances, set_points, dynamic constraint boundaries etc.
+List of all shared outputs of the MPC.
+List of model parameters of the MPC. They are constant over the horizon. Parameters not listed here will have their default from the model file.
+Prediction horizon of the MPC.
+ge = 0
Sampling interval for control steps. If None, will be the same as time step. Does not affect the discretization of the MPC, only the interval with which there will be optimization steps.
+Sets the full output time series to the data broker.
+check_valid_fields
List of all differential states of the MPC. The entries can define the boundaries and the source for the measurements
+Time step of the MPC.
+ge = 0
We need to both initialize private attributes and call the user-defined model_post_init +method.
+This package contains all modules for the +distributed model predictive control using multi agent systems.
+It contains classes for local optimization and global coordination.
+ + +Bases: OptimizationQuantity
Declares a group of optimization parameters that serve a purpose in +the optimization problem. Typical groups are uncontrollable inputs or +physical parameters.
+denotation – The key of the variable, e.g. ‘p’, ‘d’, etc. Use this +key in the discretization function to add the parameter at +different stages of the optimization problem.
variables – A list of CasadiVariables including all parameters +within this category.
ref_list – A list of names indicating which parameters in full_list +are AgentVariables and need to be updated before each +optimization.
use_in_stage_function – If False, the parameter is not added to the +stage function. If True, the variable needs to be provided to +the stage function at every point in the discretization function.
assert_complete – If True, throws an error if the ref_list does +not contain all variables.
Bases: object
Bases: OptimizationQuantity
Declares a group of optimization variables that serve a purpose in +the optimization problem. Typical groups are states, the control +inputs or slack variables.
+binary – Flag, whether these variables are binary
denotation – The key of the variable, e.g. ‘X’, ‘U’, etc. Use +this key in the discretization function to add the variable at +different stages of the optimization problem. The optimal value +of these variables will also be mapped to this key.
variables – A list of +CasadiVariables or an MX/SX vector including all variables +within this category.
ref_list – A list of names indicating which variables +in full_list are AgentVariables and need to be updated before +each optimization.
use_in_stage_function – If False, the variable is not +added to the stage function. If True, the variable needs to be +provided to the stage function at every point in the +discretization function.
assert_complete – If True, throws an error if the ref_list does +not contain all variables.
Bases: OptimizationBackend
OptimizationBackend for solving the optimization problem with CasADi. +Requires the model to be a CasADi model.
+alias of CasadiBackendConfig
Save the results of solve into a dataframe at each time step.
+Example results dataframe:
+value_type variable … lower +variable T_0 T_0_slack … T_0_slack mDot_0 +time_step … +2 0.000000 298.160000 NaN … NaN NaN
+++101.431499 297.540944 -149.465942 … -inf 0.0 +450.000000 295.779780 -147.704779 … -inf 0.0 +798.568501 294.720770 -146.645769 … -inf 0.0
+
results –
now –
Returns:
+Performs all necessary steps to make the solve
method usable.
+To do this, it calls several auxiliary functions. These functions can
+be overloaded to change the resulting optimization problem.
var_ref – class with variable name lists sorted by function in the mpc.
+Solves the optimization problem given the current values of the +corresponding AgentVariables and system time. The standardization of +return values is a work in progress.
+now – Current time used for interpolation of input trajectories.
current_vars – Dict of AgentVariables holding the values relevant to +the optimization problem. Keys are the names
A dataframe with all optimization variables over their respective +grids. Depending on discretization, can include many nan’s, so care +should be taken when using this, e.g. always use dropna() after +accessing a column.
+++ ++
+- Example:
- +
++variables mDot | T_0 | slack_T
+time +0 0.1 | 298 | nan +230 nan | 297 | 3 +470 nan | 296 | 2 +588 nan | 295 | 1 +700 0.05 | 294 | nan +930 nan | 294 | 0.1
+
Bases: BackendConfig
extra: str = forbid
Path to a batch file, which can compile C code on windows.
+Boolean to turn JIT of the optimization problems on or off.
+Holds classes that implement different transcriptions of the OCP
+Bases: ABC
opt_vars: holds symbolic variables during problem creation +opt_vars_lb: holds symbolic variables during problem creation +opt_vars_ub: holds symbolic variables during problem creation +initial_guess: holds symbolic variables during problem creation +opt_pars: holds symbolic variables during problem creation +constraints: holds symbolic variables during problem creation +constraints_lb: holds symbolic variables during problem creation +constraints_ub: holds symbolic variables during problem creation +objective_function: cost function during problem creation +mpc_opt_vars (dict): holds the symbolic variables and grids during
+++problem creation sorted by type as in system_variables
+
problem creation sorted by type as in system_parameters
+Add a constraint to the optimization problem. If no bounds are given, +adds an equality constraint.
+Create an optimization parameter and append to all the associated lists.
+denotation[str]: the key of the parameter, e.g. ‘P’, ‘Q’, … +dimension[int]: the dimension of the parameter +post_den[str]: string to add to casadi MX after denotation (for debugging)
+Create an optimization variable and append to all the associated +lists. If lb or ub are given, they override the values provided at +runtime! The usual application of this is, to fix the initial value +of a state to a parameter.
+quantity – corresponding system variable
lb – lower bound of the variable
ub – upper bound of the variable
guess – default for the initial guess
post_den – string to add to casadi MX after denotation (for debugging)
List specifying for every optimization variable, whether it is binary.
+Function creating mapping functions between the MPC variables ordered +by type (as defined in declare_quantities and the raw input/output +vector of the CasADi NLP.
+Initializes the trajectory optimization problem, creating all symbolic +variables of the OCP, the mapping function and the numerical solver.
+The nlp dict that casadi solvers need for instantiation
+Solves the discretized trajectory optimization problem.
+mpc_inputs – Casadi Matrices specifying the input of all different types +of optimization parameters. Matrices consist of different variable rows +and have a column for each time step in the discretization. +There are separate matrices for each input type (as defined in the +System), and also for the upper and lower boundaries of variables +respectively.
+variable and parameter over the prediction horizon, as well as solve +statistics.
+Bases: object
Holds the System class, which knows the model
+Bases: ABC
Examples
+class MySystem(System):
+++# variables +states: OptimizationVariable +controls: OptimizationVariable +algebraics: OptimizationVariable +outputs: OptimizationVariable
+# parameters +non_controlled_inputs: OptimizationParameter +model_parameters: OptimizationParameter +initial_state: OptimizationParameter
+# dynamics +model_constraints: Constraint +cost_function: ca.MX +ode: ca.MX
+def initialize(self, model: CasadiModel, var_ref: VariableReference):
++++
+- self.states = OptimizationVariable.declare(
- +
denotation=”state”, +variables=model.get_states(var_ref.states), +ref_list=var_ref.states, +assert_complete=True,
+)
+.
+
)
+CasADiBackend
+CasadiBackendConfig
+Discretization
Discretization.add_constraint()
Discretization.add_opt_par()
Discretization.add_opt_var()
Discretization.binary_vars
Discretization.create_nlp_in_out_mapping()
Discretization.grid()
Discretization.initialize()
Discretization.nlp
Discretization.only_positive_times_in_results
Discretization.solve()
Results
+Bases: DirectCollocation
Bases: MultipleShooting
Bases: CasADiBaseBackend
, ADMMBackend
Class doing optimization of ADMM subproblems with CasADi.
+Returns the grid on which the coupling variables are discretized.
+Save the results of solve into a dataframe at each time step.
+Example results dataframe:
+value_type variable … lower +variable T_0 T_0_slack … T_0_slack mDot_0 +time_step … +2 0.000000 298.160000 NaN … NaN NaN
+++101.431499 297.540944 -149.465942 … -inf 0.0 +450.000000 295.779780 -147.704779 … -inf 0.0 +798.568501 294.720770 -146.645769 … -inf 0.0
+
results –
now –
Returns:
+alias of CasadiADMMSystem
Bases: FullSystem
Bases: System
Bases: CasADiBackend
Class doing optimization of ADMM subproblems with CasADi.
+alias of BaseSystem
Bases: object
Bases: Discretization
Initializes the trajectory optimization problem, creating all symbolic +variables of the OCP, the mapping function and the numerical solver.
+Bases: Discretization
Initializes the trajectory optimization problem, creating all symbolic +variables of the OCP, the mapping function and the numerical solver.
+Bases: CasADiBackend
Class doing optimization of ADMM subproblems with CasADi.
+alias of FullSystem
Bases: DirectCollocation
Bases: BaseSystem
Bases: MultipleShooting
Bases: CasADiBackend
Class doing optimization of ADMM subproblems with CasADi.
+alias of CasadiMINLPSystem
Bases: BaseSystem
Bases: DirectCollocation
BaseSystem
+CasADiBaseBackend
+CollocationMatrices
+DirectCollocation
+MultipleShooting
+Bases: OptimizationBackend
Base class for implementations of optimization backends for ADMM +algorithms.
+Returns the grid on which the coupling variables are discretized.
+Bases: BaseModel
extra: str = forbid
Checks, whether the overwrite results sttings are valid, and deletes +existing result files if applicable.
+Bases: ABC
Base class for all optimization backends. OptimizationBackends are a +plugin for the ‘mpc’ module. They provide means to setup and solve the +underlying optimization problem of the MPC. They also can save data of +the solutions.
+alias of BackendConfig
Returns the name of variables which include lags and their lag in seconds. +The MPC module can use this information to save relevant past data of lagged +variables
+Registers a logger, can be used to use the module logger
+Checks if the results file already exists, and if not, creates it with +headers.
+Performs all necessary steps to make the solve
method usable.
var_ref – Variable Reference that specifies the role of each model variable +in the mpc
+Solves the optimization problem given the current values of the +corresponding AgentVariables and system time. The standardization of +return values is a work in progress.
+now – Current time used for interpolation of input trajectories.
current_vars – Dict of AgentVariables holding the values relevant to +the optimization problem. Keys are the names
A dataframe with all optimization variables over their respective +grids. Depending on discretization, can include many nan’s, so care +should be taken when using this, e.g. always use dropna() after +accessing a column.
+++ ++
+- Example:
- +
++variables mDot | T_0 | slack_T
+time +0 0.1 | 298 | nan +230 nan | 297 | 3 +470 nan | 296 | 2 +588 nan | 295 | 1 +700 0.05 | 294 | nan +930 nan | 294 | 0.1
+
Gets the results of an optimization at a time step. +:param index_offset: Determines how the index will be updated when loading the data. +:param The offset will be subtracted from the time-index. This is useful for results: +:param of realtime systems: to cut the number down to something understandable. For example, if the time
+++index (level 0 of the input Dataframe) is [105, 115, 125] and we give an +index_offset of 100, the data will be handled as if the index was [5, 15, 25].
++++
+- If “auto” or True is provided as an argument, the index will be modified to
- +
start at 0. If 0 or False are provided, no modifications will be made.
+
want (where the time value with be a unix time stamp and we) –
to cut the number down to something understandable. For example, if the time +index (level 0 of the input Dataframe) is [105, 115, 125] and we give an +index_offset of 100, the data will be handled as if the index was [5, 15, 25].
++++
+- If “auto” or True is provided as an argument, the index will be modified to
- +
start at 0. If 0 or False are provided, no modifications will be made.
+
data – The multi-indexed results data frame from the mpc
time_step – The time step from which results should be shown. +If no exact match, shows closest.
variable – If specified, only returns results +with regard to a certain variable.
iteration – Specifies, from which inner ADMM iteration data should be +from. If negative, counts from last iteration. Default -1.
convert_to – Whether the data should be converted to datetime, minutes etc.
A single-indexed Dataframe of the optimization results +at the specified time step. If variable is not specified, +returns all variables with a double column index, if it +is specified returns only values and/or bounds with +single indexed columns.
+Converts an index from seconds to datetime or another unit +:param convert_to: unit, e.g. minutes, hours, datetime +:param index: pandas index object
+Returns:
+Converts an index of an MPC or ADMM results Dataframe to a different unit, +assuming it is passed in seconds.
+Gets the first values at each time step of a results trajectory.
+Returns the number of iterations at each time instance of the ADMM simulation.
+Returns the time steps at which an MPC step was performed.
+Gets the last values at each time step of a results trajectory.
+Gets the results of an optimization at a time step.
+data – The multi-indexed results data frame from the mpc
time_step – The time step from which results should be shown. +If no exact match, shows closest.
variable – If specified, only returns results +with regard to a certain variable.
variable_type – The type of the variable provided (parameter, variable, lower, …)
index_offset – Determines how the index will be updated when loading the data.
results (The offset will be subtracted from the time-index. This is useful for) –
systems (of realtime) –
to cut the number down to something understandable. For example, if the time +index (level 0 of the input Dataframe) is [105, 115, 125] and we give an +index_offset of 100, the data will be handled as if the index was [5, 15, 25].
++++
+- If “auto” or True is provided as an argument, the index will be modified to
- +
start at 0. If 0 or False are provided, no modifications will be made.
+
want (where the time value with be a unix time stamp and we) –
to cut the number down to something understandable. For example, if the time +index (level 0 of the input Dataframe) is [105, 115, 125] and we give an +index_offset of 100, the data will be handled as if the index was [5, 15, 25].
++++
+- If “auto” or True is provided as an argument, the index will be modified to
- +
start at 0. If 0 or False are provided, no modifications will be made.
+
at the specified time step. If variable is not specified, +returns all variables with a double column index, if it +is specified returns only values and/or bounds with +single indexed columns.
+pd.DataFrame
+Package containing utils for agentlib_mpc.
+Modules that defines functions to be used for automatically creating animations of +ADMM convergence
+Loads a residuals csv file in the correct format.
+Plots the final residuals over time.
+Plots the decrease of the residuals over iterations for a time step
+Evaluates the residuals over time. Takes a raw residuals DataFrame and returns a +Dataframe, which has for each time step the number of iterations and the final primal and dual residuals.
+DataFrame with float index (time in seconds) and the columns +(“primal_residual”, “dual_residual”, “iters”)
+Some basic plotting utilities
+Bases: object
Bases: TypedDict
Bases: object
Creates a figure and axes with an amount of rows. If rows is specified, return +a tuple of axes, else only an ax
+Bases: object
Collection of parameters which have to be shared across all agents in ADMM.
+Bases: object
Helper class to organize ADMM participants.
+Bases: AgentDictEntry
Holds participating coupling variables (consensus and exchange) of a single +agent in ADMM. Used in the coordinator.
+Bases: StructuredValue
Bases: CouplingVariable
Bases: StructuredValue
Bases: object
Holds naming conventions for different optimizatin variables / parameters +associated with a coupling variable in consensus ADMM.
+ + +Bases: object
Holds information about a phy
+Returns the flattened array of all local variables and their multipliers.
+sources – list of sources that should be included in the update. +By default, all are included.
+flat lists of local variables and multipliers (locals, multipliers)
+Returns the primal and dual residual of the last iteration as a tuple +of flattened Arrays. +:param rho:
+(primal residual, dual residual)
+Returns all agent sources that are registered to this coupling.
+Bases: object
Holds naming conventions for different optimizatin variables / parameters +associated with a coupling variable in exchange ADMM.
+ + +Bases: CouplingVariable
Bases: object
Base Class to specify the structure of an AgentVariable Value. It will +be efficiently sent and deserialized.
+ + + + +Bases: FullVariableReference
Holds info about all variables of an MPC and their role in the optimization +problem.
+ + +Creates an instance from a pydantic values dict which includes lists of +AgentVariables with the keys corresponding to ‘states’, ‘inputs’, etc..
+Stores all sorts of Dataclasses, Enums or Factories to help with the +CasadiBackend.
+Bases: DiscretizationOptions
extra: str = forbid
ge = 1
le = 9
Prediction horizon of the MPC.
+ge = 0
Time step of the MPC.
+ge = 0
Bases: str
, Enum
Bases: object
Bases: str
, Enum
Bases: str
, Enum
Bases: NamedTuple
Alias for field number 1
+Alias for field number 0
+Alias for field number 2
+Bases: object
Stores the necessary MX variables created during discretization for +OptimizationParameters.
+Bases: OptParMXContainer
Stores the necessary MX variables created during discretization for +OptimizationVariables.
+Bases: object
Creates a solver given an NLP and an options construct.
+Bases: BaseModel
extra: str = forbid
Bases: str
, Enum
Code-generates an ipopt solver and compiles it. +Currently, only works on Windows! Requires a batch file that knows +how to setup Visual Studio command line and compiles the source code.
+The Path to the .dll file for the compiled solver.
+TypeError –
FileNotFoundError –
RuntimeError –
Bases: object
Dataclass holding the status of a participating agent in DMPC.
+Bases: str
, Enum
Enum used within a DMPC-Coordinator to keep track of the statuses of its +participating agents.
+Bases: str
, Enum
Enum used to keep track of the status of a DMPC-Coordinator.
+Bases: object
Contains specific variables (or time series) of the agent
+ + + + +Bases: object
Dataclass structuring the communication during registration between a +participating agent and the coordinator in DMPC.
+Bases: str
, Enum
Bases: BaseModel
Bases: BaseModel
Bases: Feature
What kind of output this is. If ‘absolute’ an forward pass of the MLModel will yield the absolute value of the featuer at the next time step. If it is ‘difference’, the difference to the last time step will be generated, so it has to be added again.
+If the output feature is recursive, it will also be used as an input for the model. This is useful for mpc, where we want to model the evolution of states based on their previous value. If false, can be used to model algebraic relationships. Default is true.
+Bases: str
, Enum
Bases: object
Stores the data which is used to train a model.
+ + +Saves three csv files in the path location. The csv files contain the test, +training and validation data
+Defines the order of the columns in which Training data should be passed to +keras, and saved for the Serialization.
+Bases: object
Bases: BaseModel
Class defining the options to discretize an MPC. Can be extended for different +optimization implementations.
+extra: str = allow
Prediction horizon of the MPC.
+ge = 0
Time step of the MPC.
+ge = 0
Bases: VariableReference
Bases: str
, Enum
Keep track of the readyness status of the MPC.
+Bases: BaseVariableReference
Returns a list of all variables registered in the var_ref which the MHE can +get from the config with get()
+Bases: VariableReference
Bases: AgentVariable
AgentVariable used to define input variables of MPC.
+Bases: Protocol
Bases: BaseVariableReference
Plugin for the AgentLib which contains modules for MPC, distributed MPC with ADMM +and data-driven modeling.
+ADMMParameters
+ADMMParticipation
+AgentDictEntry
+AgentToCoordinator
+ConsensusVariable
ConsensusVariable.delta_mean
ConsensusVariable.flat_multipliers()
ConsensusVariable.local_trajectories
ConsensusVariable.mean_trajectory
ConsensusVariable.multipliers
ConsensusVariable.primal_residual
ConsensusVariable.shift_values_by_one()
ConsensusVariable.update_mean_trajectory()
ConsensusVariable.update_multipliers()
CoordinatorToAgent
+CouplingEntry
+CouplingVariable
+ExchangeEntry
+ExchangeVariable
ExchangeVariable.delta_mean
ExchangeVariable.diff_trajectories
ExchangeVariable.local_trajectories
ExchangeVariable.mean_trajectory
ExchangeVariable.multiplier
ExchangeVariable.primal_residual
ExchangeVariable.shift_values_by_one()
ExchangeVariable.update_diff_trajectories()
ExchangeVariable.update_multiplier()
StructuredValue
+VariableReference
+coupling_alias()
exchange_alias()
AgentDictEntry
+AgentStatus
+CoordinatorStatus
+OptimizationData
+RegistrationMessage
+EarlyStoppingCallback
+Feature
+OutputFeature
+OutputType
+TrainingData
+column_order()
name_with_lag()
BaseVariableReference
+DiscretizationOptions
+FullVariableReference
+InitStatus
+MHEVariableReference
MHEVariableReference.all_variables()
MHEVariableReference.estimated_inputs
MHEVariableReference.estimated_parameters
MHEVariableReference.known_inputs
MHEVariableReference.known_parameters
MHEVariableReference.measured_states
MHEVariableReference.outputs
MHEVariableReference.states
MHEVariableReference.weights_states
MINLPVariableReference
+MPCVariable
+Results
Results.df
VariableReference
+cia_relaxed_results_path()
r_del_u_convention()
stats_path()
CasadiMLModel
CasadiMLModel.auxiliaries
CasadiMLModel.bb_outputs
CasadiMLModel.bb_states
CasadiMLModel.config_type
CasadiMLModel.do_step()
CasadiMLModel.get_ml_model_values()
CasadiMLModel.initialize()
CasadiMLModel.make_predict_function_for_mpc()
CasadiMLModel.register_ml_models()
CasadiMLModel.set_with_timestamp()
CasadiMLModel.setup_system()
CasadiMLModel.update_ml_models()
CasadiMLModelConfig
+assert_recursive_outputs_are_states()
compute_dupes()
CasadiInput
+CasadiModel
CasadiModel.auxiliaries
CasadiModel.differentials
CasadiModel.do_step()
CasadiModel.get()
CasadiModel.get_constraints()
CasadiModel.get_differential_values()
CasadiModel.get_input_values()
CasadiModel.initialize()
CasadiModel.inputs
CasadiModel.output_equations
CasadiModel.outputs
CasadiModel.parameters
CasadiModel.set_differential_values()
CasadiModel.set_output_values()
CasadiModel.setup_system()
CasadiModel.states
CasadiModelConfig
CasadiModelConfig.cost_function
CasadiModelConfig.description
CasadiModelConfig.dt
CasadiModelConfig.inputs
CasadiModelConfig.name
CasadiModelConfig.outputs
CasadiModelConfig.parameters
CasadiModelConfig.sim_time
CasadiModelConfig.states
CasadiModelConfig.system
CasadiModelConfig.user_config
CasadiModelConfig.validate_variables
CasadiModelConfig.model_post_init()
CasadiOutput
+CasadiParameter
CasadiState
+CasadiVariable
+get_symbolic()
ANNLayerTypes
+BatchNormalization
+CasadiANN
+CasadiGPR
+CasadiLinReg
+CasadiPredictor
+Dense
+Flatten
+LSTM
+Layer
+CustomGPR
+GPRDataHandlingParameters
+GPRKernelParameters
+GPRParameters
+LinRegParameters
+MLModels
+SerializedANN
+SerializedGPR
+SerializedLinReg
+SerializedMLModel
SerializedMLModel.agentlib_mpc_hash
SerializedMLModel.dt
SerializedMLModel.input
SerializedMLModel.model_type
SerializedMLModel.output
SerializedMLModel.training_info
SerializedMLModel.deserialize()
SerializedMLModel.load_serialized_model()
SerializedMLModel.load_serialized_model_from_dict()
SerializedMLModel.load_serialized_model_from_file()
SerializedMLModel.load_serialized_model_from_string()
SerializedMLModel.save_serialized_model()
SerializedMLModel.serialize()
get_git_revision_short_hash()
ModuleImport
+DataSource
+DataSourceConfig
+BaseMPC
BaseMPC.assert_mpc_variables_are_in_model()
BaseMPC.assert_subset()
BaseMPC.cleanup_results()
BaseMPC.collect_variables_for_optimization()
BaseMPC.do_step()
BaseMPC.get_results()
BaseMPC.model
BaseMPC.pre_computation_hook()
BaseMPC.process()
BaseMPC.re_init_optimization()
BaseMPC.read_results_file()
BaseMPC.register_callbacks()
BaseMPC.set_actuation()
BaseMPC.set_output()
BaseMPC.warn_for_missed_solves()
BaseMPCConfig
BaseMPCConfig.controls
BaseMPCConfig.inputs
BaseMPCConfig.log_level
BaseMPCConfig.module_id
BaseMPCConfig.optimization_backend
BaseMPCConfig.outputs
BaseMPCConfig.parameters
BaseMPCConfig.prediction_horizon
BaseMPCConfig.sampling_time
BaseMPCConfig.set_outputs
BaseMPCConfig.shared_variable_fields
BaseMPCConfig.states
BaseMPCConfig.time_step
BaseMPCConfig.type
BaseMPCConfig.validate_incoming_values
BaseMPCConfig.default_sampling_time
BaseMPCConfig.model_post_init()
create_optimization_backend()
BackendImport
+ADMMBackend
+BackendConfig
+OptimizationBackend
OptimizationBackend.config_type
OptimizationBackend.cost_function
OptimizationBackend.get_lags_per_variable()
OptimizationBackend.model
OptimizationBackend.model_from_config()
OptimizationBackend.mpc_backend_parameters
OptimizationBackend.register_logger()
OptimizationBackend.results_file_exists()
OptimizationBackend.setup_optimization()
OptimizationBackend.solve()
OptimizationBackend.update_discretization_options()
OptimizationBackend.update_model_variables()
OptimizationBackend.var_ref
Package containing models for agentlib_mpc.
+Holds the classes for CasADi variables and the CasADi model.
+Bases: CasadiModel
This class is created to handle one or multiple ML models +used to predict the states. Compared to previous version, it is now +only dependent on the trained models which provides information about the lags +with the serialized_ML_Models. This way, there is no need to define the lags +again in the model class
+List of all CasadiStates without an associated equation. Common +uses for this are slack variables that appear in cost functions and +constraints of optimization models.
+List of all CasadiStates with an associated black box equation.
+List of all CasadiStates with an associated black box equation.
+Simulates a time step of the simulation model. In CasADi MLModel model, both black- +and white-box models can be used in the simulation to be combined into a grey-box
+gets the inputs values with the correct lags or all MLModels
+Prepare the black- and white-box models for CasADi backend optimization and +simulation
+Creates a prediction step function which is suitable for MPC with multiple +shooting.
+Loads a serialized MLModel and find the output states of the MLModel +Divides the differential states of the model into states determined by white-box +model (self._differentials) and by black-box model (self._differentials_network)
+Updates the internal MLModels with the passed MLModels.
+Warning: This function does not fully check, if the result makes sense! +Consider the following case: +You have two ml_models with outputs out1 in ml_model1, and out2 and out3 in ml_model2. +You call this function with an ml_model3 that defines out2. +This function would replace ml_model2 with ml_model3, leaving the out3 undefined, causing +an error in subequent functions. Try to make sure you specify all outputs when +supplying ml_models, that make parts of other ml_models obsolete.
+Bases: CasadiModelConfig
validate_assignment: bool = True
arbitrary_types_allowed: bool = True
extra: str = forbid
check_dt
» all fields
We need to both initialize private attributes and call the user-defined model_post_init +method.
+Raises a ConfigurationError if there are recursive ML-models for outputs.
+Holds the classes for CasADi variables and the CasADi model.
+Bases: CasadiVariable
Class that stores various attributes of control variables.
+Bases: Model
Base Class for CasADi models. To implement your own model, inherit +from this class, specify the variables (inputs, outputs, states, +parameters and override the setup_system() method.
+List of all CasadiStates without an associated equation. Common +uses for this are slack variables that appear in cost functions and +constraints of optimization models.
+List of all CasadiStates with an associated differential equation.
+Performing one simulation step +:param t_start: start time for integration +:param t_sample: increment of solver integration
+Returns:
+Get any variable from using name:
+name (str) – The item to get from config by name of Variable. +Hence, item=ModelVariable.name
+The matching variable
+var (ModelVariable)
+AttributeError – If the item was not found in the variables of the + module.
+List of constraints of the form (lower, function, upper).
+Initializes Casadi model. Creates the integrator to be used in +do_step(). The integrator takes the current state and input values as +input and returns the state values at the end of the interval and the +value of the cost function integrated over the interval.
+Get all model inputs as a list
+List of algebraic equations RHS in the form +0 = z - g(x, z, p, … )
+Get all model outputs as a list
+Get all model parameters as a list
+Sets the values for all differential variables. Provided values list MUST +match the order in which differentials are saved, there is no check.
+Sets the values for all outputs. Provided values list MUST match the order +in which outputs are saved, there is no check.
+Get all model states as a list
+Bases: ModelConfig
validate_assignment: bool = True
arbitrary_types_allowed: bool = True
extra: str = forbid
cost_function (casadi.casadi.MX | casadi.casadi.SX | casadi.casadi.DM | casadi.casadi.Sparsity)
outputs (List[agentlib_mpc.models.casadi_model.CasadiOutput])
parameters (List[agentlib_mpc.models.casadi_model.CasadiParameter])
system (casadi.casadi.MX | casadi.casadi.SX | casadi.casadi.DM | casadi.casadi.Sparsity)
check_name
» name
include_default_model_variables
» inputs
include_default_model_variables
» outputs
include_default_model_variables
» parameters
include_default_model_variables
» states
include_default_model_variables
check_name
include_default_model_variables
include_default_model_variables
include_default_model_variables
The config given by the user to instantiate this class.Will be stored to enable a valid overwriting of the default config and to better restart modules.Is also useful to debug validators and the general BaseModuleConfig.
+If true, the validator of a variables value is called whenever a new value is set. Disabled by default for performance reasons.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: CasadiVariable
Class that stores various attributes of control variables.
+Bases: CasadiVariable
Class that stores various attributes of parameters.
+Bases: CasadiVariable
Class that stores various attributes of CasADi differential variables.
+Serializes the Variable in json format and returns a string
+Bases: ModelVariable
Base Class for variables used in Casadi Models for simulation and +optimization. Implements the standard arithmetic operations, +so CasadiVariables can be used in equations. +.. attribute:: sym
+++The symbolic CasADi variable used to define ode’s and +optimization problems.
+
Ensures a symbolic MX variable is created with each CasadiVariable +instance, and that its dimensions are consistent.
+Bases: str
, Enum
Bases: Layer
Batch Normalizing layer. Make sure the axis setting is set to two.
+ + +Bases: CasadiPredictor
Generic implementations of sequential Keras models in CasADi.
+Input shape of Predictor.
+Bases: CasadiPredictor
Generic implementation of scikit-learn Gaussian Process Regressor.
+Input shape of Predictor.
+Bases: CasadiPredictor
Generic Casadi implementation of scikit-learn LinerRegression.
+Input shape of Predictor.
+Bases: ABC
Protocol for generic Casadi implementation of various ML-Model-based predictors.
+Serialized model which will be translated to a casadi model.
+Predictor model from other libraries, which are translated to
+Symbolical input of predictor. Has the necessary shape of the input.
+Symbolical casadi prediction function of the given model.
+Initialize sub predictor class.
+Input shape of Predictor.
+Output shape of Predictor.
+Bases: Layer
Fully connected layer.
+ + +Bases: Layer
Long Short Term Memory cell.
+ + + + +Bases: GaussianProcessRegressor
Extends scikit-learn GaussianProcessRegressor with normalizing and scaling option +by adding the attribute data_handling, customizing the predict function accordingly +and adding a normalize function.
+Overwrite predict method of GaussianProcessRegressor to include normalization.
+Request metadata passed to the predict
method.
Note that this method is only relevant if
+enable_metadata_routing=True
(see sklearn.set_config()
).
+Please see User Guide on how the routing
+mechanism works.
The options for each parameter are:
+True
: metadata is requested, and passed to predict
if provided. The request is ignored if metadata is not provided.
False
: metadata is not requested and the meta-estimator will not pass it to predict
.
None
: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
str
: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (sklearn.utils.metadata_routing.UNCHANGED
) retains the
+existing request. This allows you to change the request for some
+parameters and not others.
New in version 1.3.
+Note
+This method is only relevant if this estimator is used as a
+sub-estimator of a meta-estimator, e.g. used inside a
+Pipeline
. Otherwise it has no effect.
return_cov (str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED) – Metadata routing for return_cov
parameter in predict
.
return_std (str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED) – Metadata routing for return_std
parameter in predict
.
self – The updated object.
+object
+Request metadata passed to the score
method.
Note that this method is only relevant if
+enable_metadata_routing=True
(see sklearn.set_config()
).
+Please see User Guide on how the routing
+mechanism works.
The options for each parameter are:
+True
: metadata is requested, and passed to score
if provided. The request is ignored if metadata is not provided.
False
: metadata is not requested and the meta-estimator will not pass it to score
.
None
: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
str
: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (sklearn.utils.metadata_routing.UNCHANGED
) retains the
+existing request. This allows you to change the request for some
+parameters and not others.
New in version 1.3.
+Note
+This method is only relevant if this estimator is used as a
+sub-estimator of a meta-estimator, e.g. used inside a
+Pipeline
. Otherwise it has no effect.
sample_weight (str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED) – Metadata routing for sample_weight
parameter in score
.
self – The updated object.
+object
+Bases: BaseModel
Mean values of input data for normalization. None if normalize equals to False.
+Boolean which defines whether the input data will be normalized or not.
+Number by which the y vector is divided before training and multiplied after evaluation.
+Standard deviation of input data for normalization. None if normalize equals to False.
+Bases: BaseModel
arbitrary_types_allowed: bool = True
The constant value which defines the covariance: k(x_1, x_2) = constant_value.
+The lower and upper bound on constant_value. If set to “fixed”, constant_value cannot be changed during hyperparameter tuning.
+The length scale of the kernel. If a float, an isotropic kernel is used. If an array, an anisotropic kernel is used where each dimension of l defines the length-scale of the respective feature dimension.
+The lower and upper bound on ‘length_scale’. If set to “fixed”, ‘length_scale’ cannot be changed during hyperparameter tuning.
+Parameter controlling the noise level (variance).
+The lower and upper bound on ‘noise_level’. If set to “fixed”, ‘noise_level’ cannot be changed during hyperparameter tuning.
+Returns the (flattened, log-transformed) non-fixed gpr_parameters.
+Bases: BaseModel
arbitrary_types_allowed: bool = True
Lower-triangular Cholesky decomposition of the kernel in X_train.
+Feature vectors or other representations of training data (also required for prediction).
+Value added to the diagonal of the kernel matrix during fitting. This can prevent a potential numerical issue during fitting, by ensuring that the calculated values form a positive definite matrix. It can also be interpreted as the variance of additional Gaussian measurement noise on the training observations. Note that this is different from using a WhiteKernel. If an array is passed, it must have the same number of entries as the data used for fitting and is used as datapoint-dependent noise level. Allowing to specify the noise level directly as a parameter is mainly for convenience and for consistency with Ridge.
+The log-marginal-likelihood of self.kernel_.theta.
+Number of features seen during fit.
+Target values in training data (also required for prediction).
+Bases: BaseModel
Estimated coefficients for the linear regression problem. If multiple targets are passed during the fit (y 2D), this is a 2D array of shape (n_targets, n_features), while if only one target is passed, this is a 1D array of length n_features.
+Independent term in the linear model. Set to 0.0 if fit_intercept = False.
+Number of features seen during fit.
+Rank of matrix X. Only available when X is dense.
+Singular values of X. Only available when X is dense.
+Bases: str
, Enum
Bases: SerializedMLModel
Contains Keras ANN in serialized form and offers functions to transform +Keras Sequential ANNs to SerializedANN objects (from_ANN) and vice versa (deserialize).
+architecture/structure of ANN saved as json string.
+weights and biases of all layers saved as lists of np.ndarrays.
+protected_namespaces: tuple = ()
arbitrary_types_allowed: bool = True
The structure of the ANN as json string.
+The weights of the ANN.
+Deserializes SerializedANN object and returns a Keras Sequential ANN.
+Serializes Keras Sequential ANN and returns SerializedANN object
+Bases: SerializedMLModel
Contains scikit-learn GaussianProcessRegressor and its Kernel and provides functions to transform +these to SerializedGPR objects and vice versa.
+Attributes:
+protected_namespaces: tuple = ()
arbitrary_types_allowed: bool = True
data_handling (agentlib_mpc.models.serialized_ml_model.GPRDataHandlingParameters)
gpr_parameters (agentlib_mpc.models.serialized_ml_model.GPRParameters)
kernel_parameters (agentlib_mpc.models.serialized_ml_model.GPRKernelParameters)
model_type (agentlib_mpc.models.serialized_ml_model.MLModels)
Information about data handling for GPR.
+GPR parameters of GPR and its Kernel and Data of fitted GPR.
+Parameters of kernel of the fitted GPR.
+Deserializes SerializedGPR object and returns a scikit learn GaussionProcessRegressor. +:returns: GPR version of the SerializedGPR +:rtype: gpr_fitted
+model – GaussianProcessRegressor from ScikitLearn.
dt – The length of time step of one prediction of GPR in seconds.
input – GPR input variables with their lag order.
output – GPR output variables (which are automatically also inputs, as +we need them recursively in MPC.) with their lag order.
training_info – Config of Trainer Class, which trained the Model.
SerializedGPR version of the passed GPR.
+Bases: SerializedMLModel
Contains scikit-learn LinearRegression and provides functions to transform +these to SerializedLinReg objects and vice versa.
+Attributes:
+protected_namespaces: tuple = ()
arbitrary_types_allowed: bool = True
Parameters of kernel of the fitted linear model.
+Deserializes SerializedLinReg object and returns a LinearRegression object of scikit-learn. +:returns: LinearRegression version of the SerializedLinReg +:rtype: linear_model_fitted
+model – LinearRegression from ScikitLearn.
dt – The length of time step of one prediction of LinReg in seconds.
input – LinReg input variables with their lag order.
output – LinReg output variables (which are automatically also inputs, as ” +“we need them recursively in MPC.) with their lag order.
training_info – Config of Trainer Class, which trained the Model.
SerializedLinReg version of the passed linear model.
+Bases: BaseModel
, ABC
protected_namespaces: tuple = ()
The commit hash of the agentlib_mpc version this was created with.
+The length of time step of one prediction of Model in seconds.
+Model input variables with their lag order.
+Model output variables (which are automatically also inputs, as we need them recursively in MPC.) with their lag order.
+Config of Trainer class with all the meta data used for training of the Model.
+Deserializes SerializedMLModel object and returns a specific Machine Learning Model object. +:returns: Machine Learning Model. +:rtype: MLModel
+Loads the ML model from a source
+Loads SerializedMLModel object from a dict and creates a new specific Machine Learning Model object +which is returned.
+json_string – json string which will be loaded.
+SerializedMLModel object with data from json file.
+Loads SerializedMLModel object from a json file and creates a new specific Machine Learning Model object +which is returned.
+path – relative/absolute path which determines which json file will be loaded.
+SerializedMLModel object with data from json file.
+Loads SerializedMLModel object from a json string and creates a new specific Machine Learning Model object +which is returned.
+json_string – json string which will be loaded.
+SerializedMLModel object with data from json file.
+Saves MLModel object as json string. +:param path: relative/absolute path which determines where the json will be saved.
+model – Machine Learning Model.
dt – The length of time step of one prediction of Model in seconds.
input – Model input variables with their lag order.
output – Model output variables (which are automatically also inputs, as +we need them recursively in MPC.) with their lag order.
training_info – Config of Trainer Class, which trained the Model.
SerializedMLModel version of the passed ML Model.
+Bases: TRYSensor
Bases: TRYSensorConfig
arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
prediction length in hours
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Holds functionality for ADMM modules.
+Bases: DistributedMPC
This class represents a module participating in a fully decentralized +Consensus-ADMM optimization for distributed MPC. +Agents autonomously send the values of their coupling variables, register +other participants and perform update steps.
+ + +Gives and iterator of all ADMMParticipation that are registered.
+Checks whether all variables of var_ref are contained in the model. +Returns names of model variables not contained in the var_ref, +sorted by keys: ‘states’, ‘inputs’, ‘outputs’, ‘parameters’.
+Collects updated AgentVariables only of the coupling variables.
+Gets all variables noted in the var ref and puts them in a flat +dictionary.
+Sets the participating status of all participating agents to +False.
+Get the values of all agents for a coupling variable.
+Read the results that were saved from the optimization backend and +returns them as Dataframe.
+(results, stats) tuple of Dataframes.
+Puts received variables in the correct queue, depending on +registration status of this agent.
+This abstract method must be implemented in order to sync the module +with the other processes of the agent and the whole MAS.
+Set the participation to true for the given coupling input.
+Dictionary containing all other agents this agent shares variables with. +Ordered in a two-layer form, with variables at the first layer and +agents at the second layer. Contains ADMMParticipation objects at +the base layer.
+Examples
+self.registered_participants = +{‘coupling_var_1’: {‘src_of_agent1’: status_1,
+++++‘src_of_agent2’: status_2, +‘src_of_agent3’: status_3}
++
+- ‘coupling_var_1’: {‘src_of_agent3’: status_a,
- +
‘src_of_agent2’: status_b, +‘src_of_agent4’: status_c}
+
} +here, <status> refers to an ADMMParticipation object.
+Sets the ready status of all participating agents to False.
+Sets the coupling outputs to the data_broker, which automatically sends them.
+solution – Output dictionary from optimization_backend.solve().
+Sends an admm coupling variable through the data_broker and sets its +value locally
+Performs the update of the lagrange multipliers. +lambda^k+1 := lambda^k - rho*(z-x_i)
+Bases: DistributedMPCConfig
arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
couplings (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
exchange (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
check_valid_fields
» shared_variable_fields
check_valid_level
» log_level
List of all control variables of the MPC.
+List of all input variables of the MPC. Includes predictions for disturbances, set_points, dynamic constraint boundaries etc.
+Maximum computation + waiting time for one iteration.
+ge = 0
The log level for this Module. Default uses the root-loggers level.Options: DEBUG; INFO; WARNING; ERROR; CRITICAL
+check_valid_level
Maximum number of ADMM iterations before termination of control step.
+ge = 0
The unqiue id of the module within an agent, used only to communicate withing the agent.
+List of all shared outputs of the MPC.
+List of model parameters of the MPC. They are constant over the horizon. Parameters not listed here will have their default from the model file.
+Penalty factor of the ADMM algorithm. Should be equal for all agents.
+ge = 0
Prediction horizon of the MPC.
+ge = 0
Weights that are applied to the change in control variables.
+Time spent on registration before each optimization
+ge = 0
Sampling interval for control steps. If None, will be the same as time step. Does not affect the discretization of the MPC, only the interval with which there will be optimization steps.
+Sets the full output time series to the data broker.
+check_valid_fields
List of all differential states of the MPC. The entries can define the boundaries and the source for the measurements
+Time step of the MPC.
+ge = 0
The type of the Module. Used to find the Python-Object from all agentlib-core and plugin Module options. If a dict is given,it must contain the keys ‘file’ and ‘class_name’. ‘file’ is the filepath of a python file containing the Module.’class_name’ is the name of the Module class within this file.
+If true, the validator of the AgentVariable value is called when receiving a new value from the DataBroker.
+Ensures no user provided variable is named with the reserved ADMM prefix.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: object
Holds data for the status of a shared variable of another system.
+ + + + +Bases: ADMM
This abstract method must be implemented in order to sync the module +with the other processes of the agent and the whole MAS.
+Timeout value used to wait one on registration. Waits in real time +(time.sleep)
+Timeout value used to sync local admm processes. Should be very +small.
+Bases: ADMMConfig
arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
check_valid_fields
» shared_variable_fields
check_valid_level
» log_level
List of all control variables of the MPC.
+List of all input variables of the MPC. Includes predictions for disturbances, set_points, dynamic constraint boundaries etc.
+Maximum computation + waiting time for one iteration.
+ge = 0
The log level for this Module. Default uses the root-loggers level.Options: DEBUG; INFO; WARNING; ERROR; CRITICAL
+check_valid_level
Maximum number of ADMM iterations before termination of control step.
+ge = 0
The unqiue id of the module within an agent, used only to communicate withing the agent.
+List of all shared outputs of the MPC.
+List of model parameters of the MPC. They are constant over the horizon. Parameters not listed here will have their default from the model file.
+Penalty factor of the ADMM algorithm. Should be equal for all agents.
+ge = 0
Prediction horizon of the MPC.
+ge = 0
Weights that are applied to the change in control variables.
+Time spent on registration before each optimization
+ge = 0
Sampling interval for control steps. If None, will be the same as time step. Does not affect the discretization of the MPC, only the interval with which there will be optimization steps.
+Sets the full output time series to the data broker.
+check_valid_fields
List of all differential states of the MPC. The entries can define the boundaries and the source for the measurements
+Time step of the MPC.
+ge = 0
The type of the Module. Used to find the Python-Object from all agentlib-core and plugin Module options. If a dict is given,it must contain the keys ‘file’ and ‘class_name’. ‘file’ is the filepath of a python file containing the Module.’class_name’ is the name of the Module class within this file.
+If true, the validator of the AgentVariable value is called when receiving a new value from the DataBroker.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: Enum
Module implementing the coordinated ADMM module, which works together +with a coordinator.
+Bases: MiniEmployee
, ADMM
Module to implement an ADMM agent, which is guided by a coordinator. +Only optimizes based on callbacks.
+ + +Callback that answers the coordinators init_iteration flag.
+Performs the optimization given the mean trajectories and multipliers from the +coordinator. +Replies with the local optimal trajectories. +Returns:
+Bases: MiniEmployeeConfig
, ADMMConfig
arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
check_valid_fields
» shared_variable_fields
check_valid_level
» log_level
List of all control variables of the MPC.
+Define the agents coordinator
+List of all input variables of the MPC. Includes predictions for disturbances, set_points, dynamic constraint boundaries etc.
+Maximum computation + waiting time for one iteration.
+ge = 0
The log level for this Module. Default uses the root-loggers level.Options: DEBUG; INFO; WARNING; ERROR; CRITICAL
+check_valid_level
Maximum number of ADMM iterations before termination of control step.
+ge = 0
The unqiue id of the module within an agent, used only to communicate withing the agent.
+List of all shared outputs of the MPC.
+List of model parameters of the MPC. They are constant over the horizon. Parameters not listed here will have their default from the model file.
+Penalty factor of the ADMM algorithm. Should be equal for all agents.
+ge = 0
Prediction horizon of the MPC.
+ge = 0
Weights that are applied to the change in control variables.
+Interval in seconds after which a registration attempt is made.
+ge = 0
Time spent on registration before each optimization
+ge = 0
Wait time between signup_requests
+Sampling interval for control steps. If None, will be the same as time step. Does not affect the discretization of the MPC, only the interval with which there will be optimization steps.
+Sets the full output time series to the data broker.
+check_valid_fields
List of all differential states of the MPC. The entries can define the boundaries and the source for the measurements
+Time step of the MPC.
+ge = 0
The type of the Module. Used to find the Python-Object from all agentlib-core and plugin Module options. If a dict is given,it must contain the keys ‘file’ and ‘class_name’. ‘file’ is the filepath of a python file containing the Module.’class_name’ is the name of the Module class within this file.
+If true, the validator of the AgentVariable value is called when receiving a new value from the DataBroker.
+Asserts that couplings and exchange have values, as they are needed for +initial guess.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+alias of coup_input
Defines classes that coordinate an ADMM process.
+Bases: Coordinator
Deletes all files this module created.
+Override this method, if your module creates e.g. results files etc.
+Saves the results of a local optimization. +:param variable:
+Returns:
+Bases: CoordinatorConfig
Hold the config for ADMMCoordinator
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
check_valid_fields
» shared_variable_fields
check_valid_level
» log_level
Absolute stopping criterion.
+Maximum number of ADMM iterations before termination of control step.
+Absolute dual stopping criterion.
+The log level for this Module. Default uses the root-loggers level.Options: DEBUG; INFO; WARNING; ERROR; CRITICAL
+check_valid_level
Maximum number of iterations
+The unqiue id of the module within an agent, used only to communicate withing the agent.
+Factor to vary the penalty parameter with.
+When the primal residual is x times higher, vary the penalty parameter and vice versa.
+Penalty factor of the ADMM algorithm. Should be equal for all agents.
+Prediction horizon of participating agents.
+Absolute primal stopping criterion.
+Time spent on registration before each optimization
+Relative stopping criterion.
+Sampling interval for control steps. If None, will be the same as time step. Does not affect the discretization of the MPC, only the interval with which there will be optimization steps.
+When True, saves the solve stats to a file.
+check_valid_fields
File name for the solve stats.
+Maximum wait time for subsystems in seconds
+Sampling interval of between two control steps. Will be used in the discretization for MPC.
+The type of the Module. Used to find the Python-Object from all agentlib-core and plugin Module options. If a dict is given,it must contain the keys ‘file’ and ‘class_name’. ‘file’ is the filepath of a python file containing the Module.’class_name’ is the name of the Module class within this file.
+If True, use abs_tol and rel_tol, if False us prim_tol and dual_tol.
+If true, the validator of the AgentVariable value is called when receiving a new value from the DataBroker.
+wait_on_start_iterations
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: MPC
Base class which defines common interfaces among all +distributed mpc approaches (either optimization based, +game theory based or some other).
+Bases: MPCConfig
Base config class with common configurations
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
We need to both initialize private attributes and call the user-defined model_post_init +method.
+ADMM
ADMM.admm_step()
ADMM.all_coupling_statuses()
ADMM.assert_mpc_variables_are_in_model()
ADMM.collect_couplings_for_optimization()
ADMM.collect_variables_for_optimization()
ADMM.cons_and_exchange
ADMM.deregister_all_participants()
ADMM.get_participants_values()
ADMM.get_results()
ADMM.participant_callback()
ADMM.penalty_factor_var
ADMM.process()
ADMM.receive_participant()
ADMM.registered_participants
ADMM.reset_participants_ready()
ADMM.send_coupling_values()
ADMM.send_coupling_variable()
ADMM.update_lambda()
ADMM.var_ref
ADMMConfig
ADMMConfig.controls
ADMMConfig.couplings
ADMMConfig.exchange
ADMMConfig.inputs
ADMMConfig.iteration_timeout
ADMMConfig.log_level
ADMMConfig.max_iterations
ADMMConfig.module_id
ADMMConfig.optimization_backend
ADMMConfig.outputs
ADMMConfig.parameters
ADMMConfig.penalty_factor
ADMMConfig.prediction_horizon
ADMMConfig.r_del_u
ADMMConfig.registration_period
ADMMConfig.sampling_time
ADMMConfig.set_outputs
ADMMConfig.shared_variable_fields
ADMMConfig.states
ADMMConfig.time_step
ADMMConfig.type
ADMMConfig.validate_incoming_values
ADMMConfig.check_prefixes_of_variables
ADMMConfig.model_post_init()
ADMMParticipation
+LocalADMM
+LocalADMMConfig
LocalADMMConfig.controls
LocalADMMConfig.couplings
LocalADMMConfig.exchange
LocalADMMConfig.inputs
LocalADMMConfig.iteration_timeout
LocalADMMConfig.log_level
LocalADMMConfig.max_iterations
LocalADMMConfig.module_id
LocalADMMConfig.optimization_backend
LocalADMMConfig.outputs
LocalADMMConfig.parameters
LocalADMMConfig.penalty_factor
LocalADMMConfig.prediction_horizon
LocalADMMConfig.r_del_u
LocalADMMConfig.registration_delay
LocalADMMConfig.registration_period
LocalADMMConfig.sampling_time
LocalADMMConfig.set_outputs
LocalADMMConfig.shared_variable_fields
LocalADMMConfig.states
LocalADMMConfig.sync_delay
LocalADMMConfig.time_step
LocalADMMConfig.type
LocalADMMConfig.validate_incoming_values
LocalADMMConfig.model_post_init()
ModuleStatus
+ParticipantStatus
+CoordinatedADMM
+CoordinatedADMMConfig
CoordinatedADMMConfig.controls
CoordinatedADMMConfig.coordinator
CoordinatedADMMConfig.couplings
CoordinatedADMMConfig.exchange
CoordinatedADMMConfig.inputs
CoordinatedADMMConfig.iteration_timeout
CoordinatedADMMConfig.log_level
CoordinatedADMMConfig.max_iterations
CoordinatedADMMConfig.messages_in
CoordinatedADMMConfig.messages_out
CoordinatedADMMConfig.module_id
CoordinatedADMMConfig.optimization_backend
CoordinatedADMMConfig.outputs
CoordinatedADMMConfig.parameters
CoordinatedADMMConfig.penalty_factor
CoordinatedADMMConfig.prediction_horizon
CoordinatedADMMConfig.r_del_u
CoordinatedADMMConfig.registration_interval
CoordinatedADMMConfig.registration_period
CoordinatedADMMConfig.request_frequency
CoordinatedADMMConfig.sampling_time
CoordinatedADMMConfig.set_outputs
CoordinatedADMMConfig.shared_variable_fields
CoordinatedADMMConfig.states
CoordinatedADMMConfig.time_step
CoordinatedADMMConfig.type
CoordinatedADMMConfig.validate_incoming_values
CoordinatedADMMConfig.couplings_should_have_values
CoordinatedADMMConfig.model_post_init()
coupInput
ADMMCoordinator
+ADMMCoordinatorConfig
ADMMCoordinatorConfig.abs_tol
ADMMCoordinatorConfig.admm_iter_max
ADMMCoordinatorConfig.dual_tol
ADMMCoordinatorConfig.log_level
ADMMCoordinatorConfig.maxIter
ADMMCoordinatorConfig.messages_in
ADMMCoordinatorConfig.messages_out
ADMMCoordinatorConfig.module_id
ADMMCoordinatorConfig.penalty_change_factor
ADMMCoordinatorConfig.penalty_change_threshold
ADMMCoordinatorConfig.penalty_factor
ADMMCoordinatorConfig.prediction_horizon
ADMMCoordinatorConfig.primal_tol
ADMMCoordinatorConfig.registration_period
ADMMCoordinatorConfig.rel_tol
ADMMCoordinatorConfig.sampling_time
ADMMCoordinatorConfig.save_iter_interval
ADMMCoordinatorConfig.save_solve_stats
ADMMCoordinatorConfig.shared_variable_fields
ADMMCoordinatorConfig.solve_stats_file
ADMMCoordinatorConfig.time_out_non_responders
ADMMCoordinatorConfig.time_step
ADMMCoordinatorConfig.type
ADMMCoordinatorConfig.use_relative_tolerances
ADMMCoordinatorConfig.validate_incoming_values
ADMMCoordinatorConfig.wait_time_on_start_iters
ADMMCoordinatorConfig.default_sampling_time
ADMMCoordinatorConfig.model_post_init()
ADMMCoordinatorConfig.solve_stats_file_is_csv
Bases: BaseModule
Class implementing the base coordination for distributed MPC
+Returns: +True, if there are no busy agents, else False
+Processes and Agents InitIteration confirmation. +:param variable:
+Returns:
+Saves the results of a local optimization. +:param variable:
+Returns:
+Bases: BaseModuleConfig
arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
check_valid_fields
» shared_variable_fields
check_valid_level
» log_level
The log level for this Module. Default uses the root-loggers level.Options: DEBUG; INFO; WARNING; ERROR; CRITICAL
+check_valid_level
Maximum number of iterations
+The unqiue id of the module within an agent, used only to communicate withing the agent.
+check_valid_fields
Maximum wait time for subsystems in seconds
+The type of the Module. Used to find the Python-Object from all agentlib-core and plugin Module options. If a dict is given,it must contain the keys ‘file’ and ‘class_name’. ‘file’ is the filepath of a python file containing the Module.’class_name’ is the name of the Module class within this file.
+If true, the validator of the AgentVariable value is called when receiving a new value from the DataBroker.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: BaseModule
Callback that processes the coordinators ‘startIteration’ flag. +:param variable:
+Performs the optimization given the information from the coordinator. +Replies with local information. +Returns:
+This method is called in every computation step before the optimization starts. +Overwrite this method in a derived subclass if you want to take some actions each time before the optimal control problem is solved.
+Bases: BaseModuleConfig
arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
check_valid_fields
» shared_variable_fields
check_valid_level
» log_level
Define the agents coordinator
+The log level for this Module. Default uses the root-loggers level.Options: DEBUG; INFO; WARNING; ERROR; CRITICAL
+check_valid_level
The unqiue id of the module within an agent, used only to communicate withing the agent.
+Interval in seconds after which a registration attempt is made.
+ge = 0
Wait time between signup_requests
+check_valid_fields
The type of the Module. Used to find the Python-Object from all agentlib-core and plugin Module options. If a dict is given,it must contain the keys ‘file’ and ‘class_name’. ‘file’ is the filepath of a python file containing the Module.’class_name’ is the name of the Module class within this file.
+If true, the validator of the AgentVariable value is called when receiving a new value from the DataBroker.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: BaseModule
A moving horizon estimator.
+Deletes all files this module created.
+Override this method, if your module creates e.g. results files etc.
+Gets all variables noted in the var ref and puts them in a flat +dictionary. The MHE Version of this function has to perform some checks and +lookups extra, since variables come from different sources, and some need to +incorporate trajectories of past values.
+Read the results that were saved from the optimization backend and +returns them as Dataframe.
+(results, stats) tuple of Dataframes.
+This abstract method must be implemented in order to sync the module +with the other processes of the agent and the whole MAS.
+Read the provided csv-file as an MPC results file. +:param results_file: File path
+results, stats +results is the Dataframe with all inputs and outputs of the MPC +optimizations. +stats is the Dataframe with matching solver stats
+Registers callbacks which listen to the variables which have to be saved as +time series. These callbacks save the values in the history for use in the +optimization.
+Bases: BaseModuleConfig
Pydantic data model for MPC configuration parser
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
estimated_inputs (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
estimated_parameters (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
known_inputs (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
known_parameters (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
states (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
check_valid_fields
» shared_variable_fields
List of unknown input variables of the MHE. Includes mainly disturbances.
+List of unknown parameters of the MHE. They are constant over the horizon and will be estimated.
+Estimation horizon of the MHE.
+ge = 0
List of known input variables of the MHE. Includes controls, disturbances, setpoints, dynamic constraint boundaries etc.
+List of known parameters of the MHE. They are constant over the horizon. Parameters not listed here will have their default from the model file.
+check_valid_fields
Mapping of state names to their weight in the MHE problem. If you are certain with your measurement, chose a high value. If you dont have a measurement / do not trust it, choose 0. Default is 0.
+List of all differential states of the MHE.
+Time step of the MHE.
+ge = 0
We need to both initialize private attributes and call the user-defined model_post_init +method.
+Wrap a classmethod, staticmethod, property or unbound function +and act as a descriptor that allows us to detect decorated items +from the class’ attributes.
+This class’ __get__ returns the wrapped item’s __get__ result, +which makes it transparent for classmethods and staticmethods.
+The decorator that has to be wrapped.
+The decorator info.
+A wrapper function to wrap V1 style function.
+This package contains all modules for the +distributed model predictive control using multi agent systems.
+It contains classes for local optimization and global coordination.
+Bases: object
DistributedMPC
DistributedMPCConfig
+Coordinator
+CoordinatorConfig
CoordinatorConfig.log_level
CoordinatorConfig.maxIter
CoordinatorConfig.messages_in
CoordinatorConfig.messages_out
CoordinatorConfig.module_id
CoordinatorConfig.shared_variable_fields
CoordinatorConfig.time_out_non_responders
CoordinatorConfig.type
CoordinatorConfig.validate_incoming_values
CoordinatorConfig.model_post_init()
MiniEmployee
+MiniEmployeeConfig
MiniEmployeeConfig.coordinator
MiniEmployeeConfig.log_level
MiniEmployeeConfig.messages_in
MiniEmployeeConfig.messages_out
MiniEmployeeConfig.module_id
MiniEmployeeConfig.registration_interval
MiniEmployeeConfig.request_frequency
MiniEmployeeConfig.shared_variable_fields
MiniEmployeeConfig.type
MiniEmployeeConfig.validate_incoming_values
MiniEmployeeConfig.model_post_init()
MHE
+MHEConfig
MHEConfig.estimated_inputs
MHEConfig.estimated_parameters
MHEConfig.horizon
MHEConfig.known_inputs
MHEConfig.known_parameters
MHEConfig.optimization_backend
MHEConfig.shared_variable_fields
MHEConfig.state_weights
MHEConfig.states
MHEConfig.time_step
MHEConfig.model_post_init()
MHEConfig.state_weights_are_in_states()
MHEConfig.wrapped
MHEConfig.decorator_info
MHEConfig.shim
ANNTrainer
+ANNTrainerConfig
+GPRTrainer
+GPRTrainerConfig
+LinRegTrainer
+LinRegTrainerConfig
+MLModelTrainer
MLModelTrainer.agent_and_time
MLModelTrainer.build_ml_model()
MLModelTrainer.create_inputs_and_outputs()
MLModelTrainer.divide_in_tvt()
MLModelTrainer.fit_ml_model()
MLModelTrainer.input_names
MLModelTrainer.model_type
MLModelTrainer.output_names
MLModelTrainer.process()
MLModelTrainer.register_callbacks()
MLModelTrainer.resample()
MLModelTrainer.retrain_model()
MLModelTrainer.save_all()
MLModelTrainer.save_ml_model()
MLModelTrainer.serialize_ml_model()
MLModelTrainer.training_info
MLModelTrainerConfig
MLModelTrainerConfig.MLModel
MLModelTrainerConfig.data_sources
MLModelTrainerConfig.inputs
MLModelTrainerConfig.interpolations
MLModelTrainerConfig.lags
MLModelTrainerConfig.output_types
MLModelTrainerConfig.outputs
MLModelTrainerConfig.recursive_outputs
MLModelTrainerConfig.retrain_delay
MLModelTrainerConfig.save_data
MLModelTrainerConfig.save_directory
MLModelTrainerConfig.save_ml_model
MLModelTrainerConfig.save_plots
MLModelTrainerConfig.shared_variable_fields
MLModelTrainerConfig.step_size
MLModelTrainerConfig.test_share
MLModelTrainerConfig.time_series_length
MLModelTrainerConfig.time_series_memory_size
MLModelTrainerConfig.train_share
MLModelTrainerConfig.use_values_for_incomplete_data
MLModelTrainerConfig.validation_share
MLModelTrainerConfig.check_data_sources_exist
MLModelTrainerConfig.check_if_save_path_is_there
MLModelTrainerConfig.check_shares_amount_to_one
MLModelTrainerConfig.fill_interpolations
MLModelTrainerConfig.fill_lags
MLModelTrainerConfig.fill_output_types
MLModelTrainerConfig.fill_recursive_outputs
MLModelTrainerConfig.model_post_init()
SetPointGenerator
+SetPointGeneratorConfig
SetPointGeneratorConfig.day_end
SetPointGeneratorConfig.day_lb
SetPointGeneratorConfig.day_start
SetPointGeneratorConfig.day_ub
SetPointGeneratorConfig.interval
SetPointGeneratorConfig.night_lb
SetPointGeneratorConfig.night_ub
SetPointGeneratorConfig.shared_variable_fields
SetPointGeneratorConfig.target_variable
SetPointGeneratorConfig.model_post_init()
Bases: BaseModule
Bases: BaseModuleConfig
arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
Optional list of columns of data frame that should be sent.If ommited, all datapoint in frame are sent.
+Data that should be communicated during execution.Index should be either numeric or Datetime, numeric values are interpreted as seconds.
+Offset will be subtracted from index.
+Interpolation method used for resampling of data.Only ‘linear’ and ‘previous’ are allowed.
+Sample time of data source. Default is 1 s.
+Makes sure data is a data frame, and loads it if required.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: BaseMPC
Bases: BaseMPCConfig
Pydantic data model for MPC configuration parser
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
List of all binary control variables of the MPC.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Assures all binary variables have 0 and 1 as boundaries.
+Holds the base class for MPCs.
+Bases: BaseModule
A model predictive controller. +More info to follow.
+Checks whether all variables of var_ref are contained in the model. +Returns names of model variables not contained in the var_ref, +sorted by keys: ‘states’, ‘inputs’, ‘outputs’, ‘parameters’.
+Helper function for assert assert_mpc_variables_are_in_model. Asserts +the variables of the var_ref corresponding to ref_key are a subset of +a list of names provided (usually obtained from the model) and prints +out an error if false. Returns the portion of model_names that are +not in the given var_ref.
+Deletes all files this module created.
+Override this method, if your module creates e.g. results files etc.
+Gets all variables noted in the var ref and puts them in a flat +dictionary.
+Read the results that were saved from the optimization backend and +returns them as Dataframe.
+(results, stats) tuple of Dataframes.
+Getter for current simulation model
+Current simulation model
+agentlib.model
+This method is called in every computation step before the optimization starts. +Overwrite this method in a derived subclass if you want to take some actions +each time before the optimal control problem is solved.
+This abstract method must be implemented in order to sync the module +with the other processes of the agent and the whole MAS.
+Re-initializes the optimization backend with new parameters.
+Read the provided csv-file as an MPC results file. +:param results_file: File path
+results, stats +results is the Dataframe with all inputs and outputs of the MPC +optimizations. +stats is the Dataframe with matching solver stats
+Registers the init_optimization callback to all parameters which +cannot be changed without recreating the optimization problem.
+Takes the solution from optimization backend and sends the first +step to AgentVariables.
+Bases: BaseModuleConfig
Pydantic data model for MPC configuration parser
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
controls (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
inputs (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
outputs (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
parameters (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
states (List[agentlib_mpc.data_structures.mpc_datamodels.MPCVariable])
check_valid_fields
» shared_variable_fields
check_valid_level
» log_level
List of all control variables of the MPC.
+List of all input variables of the MPC. Includes predictions for disturbances, set_points, dynamic constraint boundaries etc.
+The log level for this Module. Default uses the root-loggers level.Options: DEBUG; INFO; WARNING; ERROR; CRITICAL
+check_valid_level
The unqiue id of the module within an agent, used only to communicate withing the agent.
+List of all shared outputs of the MPC.
+List of model parameters of the MPC. They are constant over the horizon. Parameters not listed here will have their default from the model file.
+Prediction horizon of the MPC.
+ge = 0
Sampling interval for control steps. If None, will be the same as time step. Does not affect the discretization of the MPC, only the interval with which there will be optimization steps.
+Sets the full output time series to the data broker.
+check_valid_fields
List of all differential states of the MPC. The entries can define the boundaries and the source for the measurements
+Time step of the MPC.
+ge = 0
The type of the Module. Used to find the Python-Object from all agentlib-core and plugin Module options. If a dict is given,it must contain the keys ‘file’ and ‘class_name’. ‘file’ is the filepath of a python file containing the Module.’class_name’ is the name of the Module class within this file.
+If true, the validator of the AgentVariable value is called when receiving a new value from the DataBroker.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Holds the class for full featured MPCs.
+Bases: BaseMPC
A model predictive controller. +More info to follow.
+Gets all variables noted in the var ref and puts them in a flat +dictionary.
+Bases: BaseMPCConfig
Pydantic data model for MPC configuration parser
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
Weights that are applied to the change in control variables.
+Ensures r_del_u is only set for control variables.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Code stolen from Max Berktold
+Bases: ABC
The idea is to reduce the effective number of input data points x to the GP +from n to m, with m<n, where the set of m points are called inducing points.
+++ + +Since this makes the effective covariance matrix K smaller, +many inducing point approaches reduce the computational complexity from O(n3) to O(nm2). +The smaller m is, the bigger the speed up.
+Source: https://bwengals.github.io/inducing-point-methods-to-speed-up-gps.html
+
Bases: InducingPoints
Bases: MLModelTrainer
Module that generates ANNs based on received data.
+Build an ANN with a one layer structure, can only create one ANN
+Fits the ML Model with the training data.
+alias of SerializedANN
Bases: MLModelTrainerConfig
Pydantic data model for ANNTrainer configuration parser
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
Specification of the EarlyStopping Callback for training
+Hidden layers which should be created for the ANN. An ANN always has a BatchNormalization Layer, and an Output Layer the size of the output dimensions. Additional hidden layers can be specified here as a list of tuples: (#neurons of layer, activation function).
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: MLModelTrainer
Module that generates ANNs based on received data.
+Build a GPR with a constant Kernel in combination with a white kernel.
+Fits GPR to training data
+alias of SerializedGPR
Bases: MLModelTrainerConfig
Pydantic data model for GPRTrainer configuration parser
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
Defines the number of restarts of the Optimizer for the gpr_parameters of the kernel.
+Defines whether the training data and the inputs are for predictionare normalized before given to GPR.
+Defines by which value the output data is divided for training and multiplied after prediction.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: MLModelTrainer
Module that generates ANNs based on received data.
+ + +Fits linear model to training data
+alias of SerializedLinReg
Bases: MLModelTrainerConfig
Pydantic data model for GPRTrainer configuration parser
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: BaseModule
, ABC
Abstract Base Class for all Trainer classes.
+A string that specifies id and time. Used to create save paths
+Creates extra columns in the data which contain the shifted time-series data +which is lagged accordingly. Returns a tuple (input_data, output_data)
+splits the samples into mpc, validating and testing sets
+Fits the ML Model with the training data.
+This abstract method must be implemented in order to sync the module +with the other processes of the agent and the whole MAS.
+Samples the available time_series data to the required step size.
+Saves all relevant data and results of the training process if desired.
+Saves the ML Model in serialized format.
+Serializes the ML Model, sa that it can be saved +as json file. +:returns: SerializedMLModel version of the passed ML Model.
+Returns a dict with relevant config parameters regarding the training.
+Bases: BaseModuleConfig
, ABC
Abstract Base Class for all Trainer Configs.
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
check_valid_fields
» shared_variable_fields
Serialized ML Model which can be sent to other Agents.
+List of paths to time series data, which can be loaded on initialization of the agent.
+Variables which are inputs of the ML Model that should be trained.
+Dictionary specifying the interpolation types of output variables. If not specified, will be set to ‘linear’.
+Dictionary specifying the lags of each input and output variable. If not specified, will be set to one.
+Dictionary specifying the output types of output variables. If not specified, will be set to ‘difference’.
+Variables which are outputs of the ML Model that should be trained.
+Dictionary specifying whether output variables are recursive, i.e. automatically appear as an input as well. If not specified, will be set to ‘recursive’.
+Time in seconds, after which retraining is triggered in regular intervals
+Whether the training data should be saved.
+Path, where created ML Models should be saved.
+Whether the created ML Models should be saved.
+Whether a plot of the created ML Models performance should be saved.
+check_valid_fields
Maximum time window of data which is kept for the ML Model training. If saved data is older than current time minus time_series_length, it will be deleted.
+Maximum size of the data which is kept in memory for the ML Model training. If saved data exceeds this value, the oldest data is deleted.
+Default False. If True, the values of inputs and outputs which are defined in the config will be used for training, in case historic data has not reached the trainer. If False, an Error will be raised when the data is not sufficient.
+Checks if all given data sources exist
+Makes sure, the shares amount to one.
+Adds interpolation method to all unspecified methods.
+Adds output type one to all unspecified output types.
+Adds recursive flag to all unspecified outputs.
+We need to both initialize private attributes and call the user-defined model_post_init +method.
+Module which generates random set points within a comfort zone. Code heavily stolen +from Max Berktold
+Bases: BaseModule
Module that generates and sends random set points based on daytime and values.
+ + + + + + +Bases: BaseModuleConfig
Pydantic data model for ANNTrainer configuration parser
+arbitrary_types_allowed: bool = True
validate_assignment: bool = True
extra: str = forbid
frozen: bool = True
check_valid_fields
» shared_variable_fields
check_valid_fields
We need to both initialize private attributes and call the user-defined model_post_init +method.
+Bases: OptimizationQuantity
Declares a group of optimization parameters that serve a purpose in +the optimization problem. Typical groups are uncontrollable inputs or +physical parameters.
+denotation – The key of the variable, e.g. ‘p’, ‘d’, etc. Use this +key in the discretization function to add the parameter at +different stages of the optimization problem.
variables – A list of CasadiVariables including all parameters +within this category.
ref_list – A list of names indicating which parameters in full_list +are AgentVariables and need to be updated before each +optimization.
use_in_stage_function – If False, the parameter is not added to the +stage function. If True, the variable needs to be provided to +the stage function at every point in the discretization function.
assert_complete – If True, throws an error if the ref_list does +not contain all variables.
Bases: object
Bases: OptimizationQuantity
Declares a group of optimization variables that serve a purpose in +the optimization problem. Typical groups are states, the control +inputs or slack variables.
+binary – Flag, whether these variables are binary
denotation – The key of the variable, e.g. ‘X’, ‘U’, etc. Use +this key in the discretization function to add the variable at +different stages of the optimization problem. The optimal value +of these variables will also be mapped to this key.
variables – A list of +CasadiVariables or an MX/SX vector including all variables +within this category.
ref_list – A list of names indicating which variables +in full_list are AgentVariables and need to be updated before +each optimization.
use_in_stage_function – If False, the variable is not +added to the stage function. If True, the variable needs to be +provided to the stage function at every point in the +discretization function.
assert_complete – If True, throws an error if the ref_list does +not contain all variables.
Bases: OptimizationBackend
OptimizationBackend for solving the optimization problem with CasADi. +Requires the model to be a CasADi model.
+alias of CasadiBackendConfig
Save the results of solve into a dataframe at each time step.
+Example results dataframe:
+value_type variable … lower +variable T_0 T_0_slack … T_0_slack mDot_0 +time_step … +2 0.000000 298.160000 NaN … NaN NaN
+++101.431499 297.540944 -149.465942 … -inf 0.0 +450.000000 295.779780 -147.704779 … -inf 0.0 +798.568501 294.720770 -146.645769 … -inf 0.0
+
results –
now –
Returns:
+Performs all necessary steps to make the solve
method usable.
+To do this, it calls several auxiliary functions. These functions can
+be overloaded to change the resulting optimization problem.
var_ref – class with variable name lists sorted by function in the mpc.
+Solves the optimization problem given the current values of the +corresponding AgentVariables and system time. The standardization of +return values is a work in progress.
+now – Current time used for interpolation of input trajectories.
current_vars – Dict of AgentVariables holding the values relevant to +the optimization problem. Keys are the names
A dataframe with all optimization variables over their respective +grids. Depending on discretization, can include many nan’s, so care +should be taken when using this, e.g. always use dropna() after +accessing a column.
+++ ++
+- Example:
- +
++variables mDot | T_0 | slack_T
+time +0 0.1 | 298 | nan +230 nan | 297 | 3 +470 nan | 296 | 2 +588 nan | 295 | 1 +700 0.05 | 294 | nan +930 nan | 294 | 0.1
+
Bases: BackendConfig
extra: str = forbid
Path to a batch file, which can compile C code on windows.
+Boolean to turn JIT of the optimization problems on or off.
+Holds classes that implement different transcriptions of the OCP
+Bases: ABC
opt_vars: holds symbolic variables during problem creation +opt_vars_lb: holds symbolic variables during problem creation +opt_vars_ub: holds symbolic variables during problem creation +initial_guess: holds symbolic variables during problem creation +opt_pars: holds symbolic variables during problem creation +constraints: holds symbolic variables during problem creation +constraints_lb: holds symbolic variables during problem creation +constraints_ub: holds symbolic variables during problem creation +objective_function: cost function during problem creation +mpc_opt_vars (dict): holds the symbolic variables and grids during
+++problem creation sorted by type as in system_variables
+
problem creation sorted by type as in system_parameters
+Add a constraint to the optimization problem. If no bounds are given, +adds an equality constraint.
+Create an optimization parameter and append to all the associated lists.
+denotation[str]: the key of the parameter, e.g. ‘P’, ‘Q’, … +dimension[int]: the dimension of the parameter +post_den[str]: string to add to casadi MX after denotation (for debugging)
+Create an optimization variable and append to all the associated +lists. If lb or ub are given, they override the values provided at +runtime! The usual application of this is, to fix the initial value +of a state to a parameter.
+quantity – corresponding system variable
lb – lower bound of the variable
ub – upper bound of the variable
guess – default for the initial guess
post_den – string to add to casadi MX after denotation (for debugging)
List specifying for every optimization variable, whether it is binary.
+Function creating mapping functions between the MPC variables ordered +by type (as defined in declare_quantities and the raw input/output +vector of the CasADi NLP.
+Initializes the trajectory optimization problem, creating all symbolic +variables of the OCP, the mapping function and the numerical solver.
+The nlp dict that casadi solvers need for instantiation
+Solves the discretized trajectory optimization problem.
+mpc_inputs – Casadi Matrices specifying the input of all different types +of optimization parameters. Matrices consist of different variable rows +and have a column for each time step in the discretization. +There are separate matrices for each input type (as defined in the +System), and also for the upper and lower boundaries of variables +respectively.
+variable and parameter over the prediction horizon, as well as solve +statistics.
+Bases: object
Holds the System class, which knows the model
+Bases: ABC
Examples
+class MySystem(System):
+++# variables +states: OptimizationVariable +controls: OptimizationVariable +algebraics: OptimizationVariable +outputs: OptimizationVariable
+# parameters +non_controlled_inputs: OptimizationParameter +model_parameters: OptimizationParameter +initial_state: OptimizationParameter
+# dynamics +model_constraints: Constraint +cost_function: ca.MX +ode: ca.MX
+def initialize(self, model: CasadiModel, var_ref: VariableReference):
++++
+- self.states = OptimizationVariable.declare(
- +
denotation=”state”, +variables=model.get_states(var_ref.states), +ref_list=var_ref.states, +assert_complete=True,
+)
+.
+
)
+OptimizationParameter
OptimizationParameter.add_default_values
OptimizationParameter.declare()
OptimizationParameter.dim
OptimizationParameter.full_names
OptimizationParameter.full_symbolic
OptimizationParameter.full_with_defaults
OptimizationParameter.name
OptimizationParameter.ref_names
OptimizationParameter.use_in_stage_function
OptimizationQuantity
+OptimizationVariable
OptimizationVariable.binary
OptimizationVariable.declare()
OptimizationVariable.dim
OptimizationVariable.full_names
OptimizationVariable.full_symbolic
OptimizationVariable.input_map
OptimizationVariable.name
OptimizationVariable.output_map
OptimizationVariable.ref_names
OptimizationVariable.use_in_stage_function
CasADiBackend
CasADiBackend.config_type
CasADiBackend.cost_function
CasADiBackend.discretization
CasADiBackend.discretization_types
CasADiBackend.model
CasADiBackend.reset_setup_attributes()
CasADiBackend.save_result_df()
CasADiBackend.setup_optimization()
CasADiBackend.solve()
CasADiBackend.system
CasADiBackend.system_type
CasADiBackend.var_ref
CasadiBackendConfig
CasadiBackendConfig.build_batch_bat
CasadiBackendConfig.discretization_options
CasadiBackendConfig.do_jit
CasadiBackendConfig.model
CasadiBackendConfig.name
CasadiBackendConfig.overwrite_result_file
CasadiBackendConfig.results_file
CasadiBackendConfig.save_results
CasadiBackendConfig.solver
CasadiBackendConfig.validate_compile
Discretization
Discretization.add_constraint()
Discretization.add_opt_par()
Discretization.add_opt_var()
Discretization.binary_vars
Discretization.constraints
Discretization.constraints_lb
Discretization.constraints_ub
Discretization.create_nlp_in_out_mapping()
Discretization.equalities
Discretization.grid()
Discretization.initial_guess
Discretization.initialize()
Discretization.k
Discretization.mpc_opt_pars
Discretization.mpc_opt_vars
Discretization.nlp
Discretization.objective_function
Discretization.only_positive_times_in_results
Discretization.opt_pars
Discretization.opt_vars
Discretization.opt_vars_lb
Discretization.opt_vars_ub
Discretization.pred_time
Discretization.solve()
Results
+Bases: DirectCollocation
Bases: MultipleShooting
Bases: CasADiBaseBackend
, ADMMBackend
Class doing optimization of ADMM subproblems with CasADi.
+Returns the grid on which the coupling variables are discretized.
+Save the results of solve into a dataframe at each time step.
+Example results dataframe:
+value_type variable … lower +variable T_0 T_0_slack … T_0_slack mDot_0 +time_step … +2 0.000000 298.160000 NaN … NaN NaN
+++101.431499 297.540944 -149.465942 … -inf 0.0 +450.000000 295.779780 -147.704779 … -inf 0.0 +798.568501 294.720770 -146.645769 … -inf 0.0
+
results –
now –
Returns:
+alias of CasadiADMMSystem
Bases: FullSystem
Bases: System
Bases: CasADiBackend
Class doing optimization of ADMM subproblems with CasADi.
+alias of BaseSystem
Bases: object
Bases: Discretization
Initializes the trajectory optimization problem, creating all symbolic +variables of the OCP, the mapping function and the numerical solver.
+Bases: Discretization
Initializes the trajectory optimization problem, creating all symbolic +variables of the OCP, the mapping function and the numerical solver.
+Bases: CasADiADMMBackend
, CasADiBBBackend
Class doing optimization with an MLModel.
+alias of CasadiADMMNNSystem
Bases: CasadiADMMSystem
, CasadiMLSystem
In this class, the lags are determined by the trainer alone and the lags are +saved in the serialized MLModel so that it doesn’t have to be defined in the +model again
+Bases: ADMMMultipleShooting
, MultipleShooting_ML
Bases: CasADiBaseBackend
Class doing optimization with a MLModel.
+Returns the name of variables which include lags and their lag. The MPC +module can use this information to save relevant past data of lagged +variables
+alias of CasadiMLSystem
Bases: FullSystem
Bases: MultipleShooting
Initializes the trajectory optimization problem, creating all symbolic +variables of the OCP, the mapping function and the numerical solver.
+Bases: CasADiBackend
Class doing optimization of ADMM subproblems with CasADi.
+alias of FullSystem
Bases: DirectCollocation
Bases: BaseSystem
Bases: MultipleShooting
Bases: object
Bases: Discretization
Initializes the trajectory optimization problem, creating all symbolic +variables of the OCP, the mapping function and the numerical solver.
+Bases: CasADiBackend
Class doing optimization of ADMM subproblems with CasADi.
+Obtain the specified portion of the trajectory.
+trajectory – The trajectory to be sampled. Scalars will be +expanded onto the grid. Lists need to exactly match the provided +grid. Otherwise, a list of tuples is accepted with the form ( +timestamp, value). A dict with the keys ‘grid’ and ‘value’ is also +accepted.
current – start time of requested trajectory
grid – target interpolation grid in seconds in relative terms (i.e. +starting from 0 usually)
method – interpolation method, currently accepted: ‘linear’, +‘spline’, ‘previous’
Sampled list of values.
+Takes a slice of the trajectory from the current time step with the +specified length and interpolates it to match the requested sampling. +If the requested horizon is longer than the available data, the last +available value will be used for the remainder.
+ValueError –
TypeError –
Bases: System
Bases: CasADiBackend
Class doing optimization of ADMM subproblems with CasADi.
+alias of CasadiMINLPSystem
Bases: BaseSystem
Bases: DirectCollocation
Bases: BaseModel
Data-Class to import a given python file
+from import_path
and load the given
+class_name
ADMMCollocation
ADMMMultipleShooting
CasADiADMMBackend
CasADiADMMBackend.cost_function
CasADiADMMBackend.coupling_grid
CasADiADMMBackend.discretization
CasADiADMMBackend.discretization_types
CasADiADMMBackend.it
CasADiADMMBackend.model
CasADiADMMBackend.now
CasADiADMMBackend.result_stats
CasADiADMMBackend.results
CasADiADMMBackend.save_result_df()
CasADiADMMBackend.system
CasADiADMMBackend.system_type
CasADiADMMBackend.var_ref
CasadiADMMSystem
CasadiADMMSystem.algebraics
CasadiADMMSystem.controls
CasadiADMMSystem.cost_function
CasadiADMMSystem.exchange_diff
CasadiADMMSystem.exchange_multipliers
CasadiADMMSystem.global_couplings
CasadiADMMSystem.initial_state
CasadiADMMSystem.initialize()
CasadiADMMSystem.last_control
CasadiADMMSystem.local_couplings
CasadiADMMSystem.local_exchange
CasadiADMMSystem.model_constraints
CasadiADMMSystem.model_parameters
CasadiADMMSystem.multipliers
CasadiADMMSystem.non_controlled_inputs
CasadiADMMSystem.ode
CasadiADMMSystem.outputs
CasadiADMMSystem.penalty_factor
CasadiADMMSystem.r_del_u
CasadiADMMSystem.states
BaseSystem
+CasADiBaseBackend
+CollocationMatrices
+DirectCollocation
+MultipleShooting
+CasADiBBBackend
+CasadiMLSystem
+MultipleShooting_ML
+CasADiFullBackend
+DirectCollocation
FullSystem
FullSystem.algebraics
FullSystem.controls
FullSystem.cost_function
FullSystem.initial_state
FullSystem.initialize()
FullSystem.last_control
FullSystem.model_constraints
FullSystem.model_parameters
FullSystem.non_controlled_inputs
FullSystem.ode
FullSystem.outputs
FullSystem.r_del_u
FullSystem.states
MultipleShooting
CollocationMatrices
+DirectCollocation
+MHEBackend
+MHESystem
MHESystem.algebraics
MHESystem.cost_function
MHESystem.estimated_inputs
MHESystem.estimated_parameters
MHESystem.estimated_states
MHESystem.initialize()
MHESystem.known_inputs
MHESystem.known_parameters
MHESystem.measured_states
MHESystem.model_constraints
MHESystem.ode
MHESystem.outputs
CasADiMINLPBackend
+CasadiMINLPSystem
CasadiMINLPSystem.algebraics
CasadiMINLPSystem.binary_controls
CasadiMINLPSystem.controls
CasadiMINLPSystem.cost_function
CasadiMINLPSystem.initial_state
CasadiMINLPSystem.initialize()
CasadiMINLPSystem.model_constraints
CasadiMINLPSystem.model_parameters
CasadiMINLPSystem.non_controlled_inputs
CasadiMINLPSystem.ode
CasadiMINLPSystem.outputs
CasadiMINLPSystem.states
DirectCollocation
Bases: OptimizationBackend
Base class for implementations of optimization backends for ADMM +algorithms.
+Returns the grid on which the coupling variables are discretized.
+Bases: BaseModel
extra: str = forbid
Checks, whether the overwrite results sttings are valid, and deletes +existing result files if applicable.
+Bases: ABC
Base class for all optimization backends. OptimizationBackends are a +plugin for the ‘mpc’ module. They provide means to setup and solve the +underlying optimization problem of the MPC. They also can save data of +the solutions.
+alias of BackendConfig
Returns the name of variables which include lags and their lag in seconds. +The MPC module can use this information to save relevant past data of lagged +variables
+Registers a logger, can be used to use the module logger
+Checks if the results file already exists, and if not, creates it with +headers.
+Performs all necessary steps to make the solve
method usable.
var_ref – Variable Reference that specifies the role of each model variable +in the mpc
+Solves the optimization problem given the current values of the +corresponding AgentVariables and system time. The standardization of +return values is a work in progress.
+now – Current time used for interpolation of input trajectories.
current_vars – Dict of AgentVariables holding the values relevant to +the optimization problem. Keys are the names
A dataframe with all optimization variables over their respective +grids. Depending on discretization, can include many nan’s, so care +should be taken when using this, e.g. always use dropna() after +accessing a column.
+++ ++
+- Example:
- +
++variables mDot | T_0 | slack_T
+time +0 0.1 | 298 | nan +230 nan | 297 | 3 +470 nan | 296 | 2 +588 nan | 295 | 1 +700 0.05 | 294 | nan +930 nan | 294 | 0.1
+
Updates the discretization options with the new dict.
+Internal method to write current data_broker to model variables. +Only update values, not other module_types.
+Package containing utils for agentlib_mpc.
+EBCColors
+FontDict
+Style
ValueRange
+make_fig()
make_grid()
make_side_legend()
Gets the results of an optimization at a time step. +:param index_offset: Determines how the index will be updated when loading the data. +:param The offset will be subtracted from the time-index. This is useful for results: +:param of realtime systems: to cut the number down to something understandable. For example, if the time
+++index (level 0 of the input Dataframe) is [105, 115, 125] and we give an +index_offset of 100, the data will be handled as if the index was [5, 15, 25].
++++
+- If “auto” or True is provided as an argument, the index will be modified to
- +
start at 0. If 0 or False are provided, no modifications will be made.
+
want (where the time value with be a unix time stamp and we) –
to cut the number down to something understandable. For example, if the time +index (level 0 of the input Dataframe) is [105, 115, 125] and we give an +index_offset of 100, the data will be handled as if the index was [5, 15, 25].
++++
+- If “auto” or True is provided as an argument, the index will be modified to
- +
start at 0. If 0 or False are provided, no modifications will be made.
+
data – The multi-indexed results data frame from the mpc
time_step – The time step from which results should be shown. +If no exact match, shows closest.
variable – If specified, only returns results +with regard to a certain variable.
iteration – Specifies, from which inner ADMM iteration data should be +from. If negative, counts from last iteration. Default -1.
convert_to – Whether the data should be converted to datetime, minutes etc.
A single-indexed Dataframe of the optimization results +at the specified time step. If variable is not specified, +returns all variables with a double column index, if it +is specified returns only values and/or bounds with +single indexed columns.
+Converts an index from seconds to datetime or another unit +:param convert_to: unit, e.g. minutes, hours, datetime +:param index: pandas index object
+Returns:
+Converts an index of an MPC or ADMM results Dataframe to a different unit, +assuming it is passed in seconds.
+Gets the first values at each time step of a results trajectory.
+Returns the number of iterations at each time instance of the ADMM simulation.
+Returns the time steps at which an MPC step was performed.
+Gets the last values at each time step of a results trajectory.
+Gets the results of an optimization at a time step.
+data – The multi-indexed results data frame from the mpc
time_step – The time step from which results should be shown. +If no exact match, shows closest.
variable – If specified, only returns results +with regard to a certain variable.
variable_type – The type of the variable provided (parameter, variable, lower, …)
index_offset – Determines how the index will be updated when loading the data.
results (The offset will be subtracted from the time-index. This is useful for) –
systems (of realtime) –
to cut the number down to something understandable. For example, if the time +index (level 0 of the input Dataframe) is [105, 115, 125] and we give an +index_offset of 100, the data will be handled as if the index was [5, 15, 25].
++++
+- If “auto” or True is provided as an argument, the index will be modified to
- +
start at 0. If 0 or False are provided, no modifications will be made.
+
want (where the time value with be a unix time stamp and we) –
to cut the number down to something understandable. For example, if the time +index (level 0 of the input Dataframe) is [105, 115, 125] and we give an +index_offset of 100, the data will be handled as if the index was [5, 15, 25].
++++
+- If “auto” or True is provided as an argument, the index will be modified to
- +
start at 0. If 0 or False are provided, no modifications will be made.
+
at the specified time step. If variable is not specified, +returns all variables with a double column index, if it +is specified returns only values and/or bounds with +single indexed columns.
+pd.DataFrame
+Helper function for interpolate_to_previous. +Finds the current index to which we should forwardfill.
+Interpolates to previous value of original grid, i.e. a forward fill.
+Stand-in for the following scipy code: +tck = interpolate.interp1d(list(original_grid), values, kind=”previous”) +result = list(tck(target_grid))
+s -> (s0,s1), (s1,s2), (s2, s3), …
+Obtain the specified portion of the trajectory.
+trajectory –
The trajectory to be sampled. Scalars will be +expanded onto the grid. Lists need to exactly match the provided +grid. Otherwise, a pandas Series is accepted with the timestamp as index. A
+++dict with the keys as time stamps is also accepted.
+
current – start time of requested trajectory
grid – target interpolation grid in seconds in relative terms (i.e. +starting from 0 usually)
method – interpolation method, currently accepted: ‘linear’, +‘spline’, ‘previous’
Sampled list of values.
+Takes a slice of the trajectory from the current time step with the +specified length and interpolates it to match the requested sampling. +If the requested horizon is longer than the available data, the last +available value will be used for the remainder.
+ValueError –
TypeError –
Modules that defines functions to be used for automatically creating animations of +ADMM convergence
+Create and configure the Dash app.
+Args: +agent_data (Dict[str, pd.DataFrame]): Dictionary containing data for each agent. +residuals_df (pd.DataFrame): DataFrame containing residuals data.
+Returns: +dash.Dash: Configured Dash app.
+Create a plotly figure for the coupling variable plot.
+Args: +plot_data (Dict[str, List[float]]): Dictionary with agent names as keys and their values as lists. +prediction_grid (List[float]): List of prediction grid values. +coupling_var (str): Name of the coupling variable.
+Returns: +go.Figure: Plotly figure object.
+Create a plotly figure for the residuals plot.
+Args: +residuals_df (pd.DataFrame): DataFrame containing residuals data. +time_step (float): Selected time step.
+Returns: +go.Figure: Plotly figure object.
+Identify coupling variables in the dataframe.
+Args: +df (pd.DataFrame): The MPC data for an agent.
+Returns: +List[str]: List of coupling variable names.
+Extract data for the coupling variable plot.
+Args: +agent_data (Dict[str, pd.DataFrame]): Dictionary containing data for each agent. +time_step (float): Selected time step. +iteration (int): Selected iteration number. +coupling_var (str): Name of the selected coupling variable.
+Returns: +Dict[str, List[float]]: Dictionary with agent names as keys and their values as lists.
+Load MPC data for multiple agents from files containing ‘admm’ in their name.
+Args: +directory (str): Directory path containing the data files.
+Returns: +Dict[str, pd.DataFrame]: Dictionary with agent names as keys and their data as values.
+Loads a residuals csv file in the correct format.
+Plots the final residuals over time.
+Plots the decrease of the residuals over iterations for a time step
+Evaluates the residuals over time. Takes a raw residuals DataFrame and returns a +Dataframe, which has for each time step the number of iterations and the final primal and dual residuals.
+DataFrame with float index (time in seconds) and the columns +(“primal_residual”, “dual_residual”, “iters”)
+Some basic plotting utilities
+Bases: object
Bases: TypedDict
Bases: object
Creates a figure and axes with an amount of rows. If rows is specified, return +a tuple of axes, else only an ax
+series – A column of the MPC results Dataframe
fig – Plotly figure to plot on
plot_actual_values – whether the closed loop actual values at the start of each +optimization should be plotted (default True)
plot_predictions – whether all predicted trajectories should be plotted
step – whether to use a step plot or a line plot
convert_to – Will convert the index of the returned series to the specified unit +(seconds, minutes, hours, days)
None
+title –
y_axis_label –
series – A column of the MPC results Dataframe
plot_actual_values – whether the closed loop actual values at the start of each +optimization should be plotted (default True)
plot_predictions – whether all predicted trajectories should be plotted
step – whether to use a step plot or a line plot
convert_to – Will convert the index of the returned series to the specified unit +(seconds, minutes, hours, days)
Figure
+Tests the Model on test data
+Interpolates colors based on a sample number. To be used when plotting many mpc +predictions in one plot, so a fade from old to new predictions can be seen.
+Original credit to Max Berktold.
+progress –
colors –
Returns:
+series – A column of the MPC results Dataframe
ax – which Axes to plot on
plot_actual_values – whether the closed loop actual values at the start of each +optimization should be plotted (default True)
plot_predictions – whether all predicted trajectories should be plotted
step –
convert_to – Will convert the index of the returned series to the specified unit +(seconds, minutes, hours, days)
Returns:
+series – A column of the MPC results Dataframe
ax – which Axes to plot on
plot_actual_values – whether the closed loop actual values at the start of each +optimization should be plotted (default True)
plot_predictions – whether all predicted trajectories should be plotted
step –
convert_to – Will convert the index of the returned series to the specified unit +(seconds, minutes, hours, days)
Returns:
++ |
+ |
+ | + |
+ | + |
+ | + |
+ |
This is a plugin for AgentLib. +Includes functions for modeling with CasADi, and using those models in nonlinear MPC, central and distributed (based on ADMM).
+See examples and the tutorial in the docs. +Best example to start is an MPC for a single air conditioned room.
+Install with:
+pip install agentlib_mpc
+
To install with full dependencies (recommended), run:
+pip install agentlib_mpc[full]
+
AgentLib_MPC has a number of optional dependencies:
+fmu: Support simulation of FMU models (https://fmi-standard.org/).
ml: Use machine learning based NARX models for MPC. Currently supports neural networks, gaussian process regression and linear regression. Installs tensorflow, keras and scikit-learn.
interactive: Utility functions for displaying mpc results in an interactive dashboard. Installs plotly and dash.
Install these like
+pip install agentlib_mpc[ml]
+
For now, please cite the base framework under https://github.com/RWTH-EBC/AgentLib.
+A preprint is available under http://dx.doi.org/10.2139/ssrn.4884846 and can be cited as:
+++Eser, Steffen and Storek, Thomas and Wüllhorst, Fabian and Dähling, Stefan and Gall, Jan and Stoffel, Phillip and Müller, Dirk, A Modular Python Framework for Rapid Development of Advanced Control Algorithms for Energy Systems. Available at SSRN: https://ssrn.com/abstract=4884846 or http://dx.doi.org/10.2139/ssrn.4884846
+
When using AgentLib-MPC, please remember to cite other tools that you are using, for example CasADi or IPOPT.
+We gratefully acknowledge the financial support by Federal Ministry for Economic Affairs and Climate Action (BMWK), promotional reference 03ET1495A.
++
Clone agentlib_mpc into a local directory. +Then, run:
+pip install .\
+
If you want to contribute code and help develop this repository, first clone the repo and then install it via an .egg:
+pip install -e .
+
In this section, we will learn how to use agentlib for distributed MPC using +the alternating direction method of multipliers (ADMM). The required example +files are located in ‘examples/admm/’. They include two +main scripts and two directories with models and config files respectively. +We simulate the same system as before, however this time the AHU determines +its mass flow without knowing the system behaviour of the room, creating the +need for coordination.
+There are three main scripts. One runs a local version of the ADMM algorithm, which +operates within a single thread and is suited for simulation and testing. The other +one runs the agents in separate python processes and communicates through MQTT. +The last one implements a coordinated ADMM, which can be useful, since it helps +unify parameter setting and provides better convergence criteria. +Here, we will look at the Realtime implementation using multiprocessing.
+from agentlib.utils.multi_agent_system import MultiProcessingMAS
+import logging
+import matplotlib.pyplot as plt
+
The only new import this time is the
+MultiProcessingMAS
utility. Unlike the LocalMASAgency we used before,
+the MultiprocessingMAS spawns a separate python process for each agent,
+allowing
+for the true parallelism that would take place in a real-world MAS. However,
+this also requires the condition that simulations are performed in Realtime,
+since time is now the common variable between systems that keeps them in sync.
+Now onto the main script.
env_config = {"rt": True,
+ "strict": True,
+ "factor": 0.1,
+ "t_sample": 60}
+
+mas = MultiProcessingMAS(agent_configs=['configs\\cooler.json',
+ 'configs\\cooled_room.json',
+ 'configs\\simulator.json'],
+ env=env_config,
+ variable_logging=True)
+mas.run(until=until)
+results = mas.get_results()
+
As explained, we choose a Realtime environment, set it to strict
+(RuntimeError will be raised if simulation is too slow), and give it a
+factor
of 0.1 to speed it up. Finally, we set t_sample
to 60, so we
+will save our results in an interval of 60 seconds. Then, we provide our MAS
+three configs - one for the room controller, one for the AHU controller and
+one to simulate the full system.
There are three models. The simulation model and the room model are similar to +the models we used in the MPC examples before, with the main difference +being in the constraints and cost function. The simulation model omits the +MPC-related parts of the model, while the room model is the same as before, +with only the air mass flow term missing from the cost function. The cooler +model on the other hand is a simple input equals output model of the mass +flow, including the cost function term that was removed from the room model. +Therefore, we created a situation, where the room is not explicitly +penalized for usage of the mass flow anymore, but instead a separate system is.
+For this example, we are not providing the configs in the python script
+itself, but store them separately as json. Both agent configs and configs of
+single modules can be stored in separate json. Let’s look at the config file
+configs/communicators/cooled_room_mqtt.json
.
+Since the agents are now using separate processes, we cannot use the
+local_broadcast
communicator anymore. Instead, we are using the MQTT
+communicator from the agentlib. The config for an MQTT commincator is a bit
+more complicated than the local_broadcast. After providing
+an id and specifying the type to “mqtt”, there are some parameters to provide:“url” and “subscriptions”. For small test scripts, the url from the snippet
+below will do.
{
+ "module_id": "Ag1Com",
+ "type": "mqtt",
+ "url": "mqtt://test.mosquitto.org",
+ "subscriptions": ["Cooler", "Simulation"]
+}
+
The subscriptions are a list of agent ids the agent is subscribed to. For more info
+on MQTT topics visit e.g.
+here.
+In agentlib, the mqtt communicator sends messages under a topic consisting of
+“/agentlib/#
is a wildcard, so by specifying the topics
+in the way above, the agent will receive all messages from the Cooler agent
+and the Simulation agent. The resulting communication structure can be seen
+in the image below:
tbd +Let’s look at the beginning of the config for the room agent. First of all, +we see a file path in the list of modules, which points to our communicator +config. The root of relative filepaths is the directory, where the main +script is run.
+{
+ "id": "CooledRoom",
+ "modules": [
+ "configs/communicators/cooled_room_mqtt.json",
+ {
+ "module_id": "admm_module",
+ "type": "admm",
+ "optimization_backend": {
+ "type": "casadi_admm",
+ "model": {
+ "type": {
+ "file": "models/ca_room_model.py",
+ "class_name": "CaCooledRoom"
+ }
+ },
+ "solver": {
+ "name": "ipopt",
+ "options": {
+ "print_level": 0
+ }
+ },
+ "results_file": "admm_opt.csv"
+ },
+
We can see, that the module type for the controller now reads “admm”, and the
+optimization backend type is “casadi_admm”. We can also see, that there are
+some new options set for the optimization_backend, namely the solver option.
+The numerical solver name can be chosen from a list of supported solvers
+(currently supported are ipopt
, sqpmethod
, qpoases
). For most purposes,
+IPOPT will be the solver of choice. However, we can change the default
+options for the chosen solver. To see applicable options, please refer to
+the documentation of the solver. For IPOPT, an overview of all the options
+can be found on the official site.
+In our
+case, we set the print_level to 0 to avoid clutter in the console output.
+We also specify a results_file
, so we save detailed information about each
+NLP solution in csv format, readable e.g. as a multi-indexed pandas
+Dataframe.
After providing parameters and inputs in the usual way, let’s +have a look at what changed between the central MPC and the ADMM.
+Note
+The prediction_horizon
, time_step
and penalty_factor
parameters of
+the ADMM module affect the strucuture of the optimization problem and
+need to be identical for all modules taking part in the ADMM algorithm.
+Currently, this is not validated automatically, so care should be taken when
+writing the config. The timeout
, registration_period
and
+admm_iter_max
parameters should also be the same our similar.
"controls": [
+],
+"states": [
+ {
+ "name": "T_0",
+ "value": 298.16,
+ "ub": 303.15,
+ "lb": 288.15
+ }
+],
+"couplings": [
+ {
+ "name": "mDot_0",
+ "alias": "mDotCoolAir",
+ "value": 0.05,
+ "ub": 0.1,
+ "lb": 0}
+
The controls
list is now empty, as the air mass flow is not determined by
+the room anymore. Instead, it is now listed under the new type couplings
.
+The couplings are optimization variables, so they should also have upper and
+lower boundaries. ADMM with agentlib is based on consensus, meaning partial
+systems that have to agree on shared variables are optimized. The shared
+variables are identified through their alias. In this
+example, all agents that define a coupling with alias “mDotCoolAir” share
+this variable. The value for the state “T_0” is obtained from the Simulation
+agent, so care should be taken to make sure the alias matches. In this case,
+the default alias of “T_0” will match, since the name exists in the
+simulation model.
Now let’s see the config on the side of the cooler:
+ "controls": [
+ {
+ "name": "mDot",
+ "value": 0.02,
+ "ub": 0.1,
+ "lb": 0
+ }
+ ],
+ "states": [
+ ],
+ "couplings": [
+ {
+ "name": "mDot_out",
+ "alias": "mDotCoolAir",
+ "value": 0.05
+ }
+ ]
+},
+
We can see, that there are two variables of interest, one in controls
and
+one in couplings
. The control “mDot” is the actuation that is sent to the
+simulator after optimization. Therefore, the alias of the mass flow in the
+Simulation agent must match “mDot”. The coupling “mDot_out” is assigned with
+the alias “mDotCoolAir”, which matches the coupling in the room agent.
+Because the models follow the FMU standard, where variables are divided +between inputs, outputs, locals/states and parameters. In this case, our +cooler model takes a mass flow as an input ("mDot" in this case) and +produces the same mass flow as an output to other systems ("mDot_out" in +this case). In a more complex setting, the cooler might have an internal PID +controller to set the mass flow to its correct value. In that case, "mDot" +would be setpoint of the mass flow, and "mDot_out" would be the actual mass +flow. ++
To run a model predictive controller, a system model for use in optimization +is required. What model types are available depends on the chosen +optimization backend. In this section, creating an MPC with a CasADi backend +is explained. Open the ‘examples/one_room_mpc/physical/simple_mpc.py’ example.
+As usual, let’s look at the imports first.
+import logging
+from typing import List
+import matplotlib.pyplot as plt
+from agentlib.models.casadi_model import CasadiModel, CasadiInput, CasadiState, \
+ CasadiParameter, CasadiOutput
+from agentlib.utils.multi_agent_system import LocalMASAgency
+
We import logging as usual and
+typing
is used to annotate the optimzation model we will be creating,
+and matplotlib is used to plot the results. Next, we import the CasadiModel and
+some CasadiVariables. We will use these to specify an agentlib-style
+CasadiModel. Finally, we import the LocalMASAgency utility. This can be used
+to conveniently create and run your local MAS, without creating the agents
+and their environment by hand.
Now let’s see how we can create an optimization model. The model contains +the physical system dynamics, as well as the cost function and additional +constraints on the system.
+In this example, we will create a model of a room, which is under a constant +heat load and can be controlled by changing the mass flow of cool air from +an air handling unit.
+Creating a custom CasadiModel is similar to creating a module.
+Creating a class that inherits from CasadiModelConfig
Declare the model variables in the config class
+inputs
outputs
states
parameters
Creating a class that inherits from CasadiModel
Assign the config with config: <<ConfigClass>>
Define model equations by overwriting the setup_system
method
Let’s see, how we declare the variables required for our simple room model. +Since modeling in agentlib is based on the FMU-standard, we divide our +variables into inputs, outputs, parameters and locals (called states to +avoid clash with the python builtin locals). First, we need to create a custom +config for our CasadiModel.
+class MyCasadiModelConfig(CasadiModelConfig):
+ inputs: List[CasadiInput] = [
+ # controls
+ CasadiInput(name="mDot", value=0.0225, unit="K", description="Air mass flow into zone"),
+
+ # disturbances
+ CasadiInput(name="load", value=150, unit="W", description="Heat "
+ "load into zone"),
+ CasadiInput(name="T_in", value=290.15, unit="K", description="Inflow air temperature"),
+
+ # settings
+ CasadiInput(name="T_upper", value=294.15, unit="K", description="Upper boundary (soft) for T."),
+ ]
+
+ states: List[CasadiState] = [
+ # differential
+ CasadiState(name="T", value=293.15, unit="K", description="Temperature of zone"),
+
+ # algebraic
+
+ # slack variables
+ CasadiState(name="T_slack", value=0, unit="K", description="Slack variable of temperature of zone")
+ ]
+
+ parameters: List[CasadiParameter] = [
+ CasadiParameter(name="cp", value=1000, unit="J/kg*K", description="thermal capacity of the air"),
+ CasadiParameter(name="C", value=100000, unit="J/K",
+ description="thermal capacity of zone"),
+ CasadiParameter(name="s_T", value=1, unit="-", description="Weight for T in constraint function"),
+ CasadiParameter(name="r_mDot", value=1, unit="-",
+ description="Weight for mDot in objective function")
+ ]
+ outputs: List[CasadiOutput] = [
+ CasadiOutput(name='T_out', unit="K", description="Temperature of zone")
+ ]
+
Our room model has four inputs. These include the inputs of the physical +system, being the air mass flow from the AHU, the temperature of this mass flow and +the load on the system. We also count the upper room temperature limit as an +input, since it should be settable by the occupants of the room. To declare an input, we put a CasadiInput object into a list inputs. A +variable always needs a name. You can also give it a value, which will be +used if no other value is provided at Runtime. The unit and description +parameters currently serve no purpose, but can be helpful to readers of the +model. Next we define the states. For one, that is the temperature of the room. +Since we use soft constraints to enforce an adequate room temperature, we also have to include +a slack variable.
+Note
+States in the context of an AgentLib model refers to all variables that +are local to a model. All differential variables have to be declared as +states, but not all states need to be associated with a +differential equation.
+Next, we have the parameters. These include the specific thermal capacity of +air, the thermal capacity of the room and two weights for the cost function. +Finally, we specify an output of the model. It is not required for the MPC +in this example, but can be useful for situations, where one might want to +use the same model for optimization and simulation. Outputs always need to be +associated with an algebraic equation.
+After specifying a config, we can write the model class itself, which containts the
+dynamics. First, it is important to specify the config_type
attribute of the
+class and set it to the config class we defined. The model equations and constraints are specified in the setup_system
method.
+We can access the variables defined above by referencing self.<name>
.
+Differential equations are associated with a variable by setting the ode
+attribute of that variable. In the same way, algebraic equations can be
+defined by setting the alg
attribute.
class MyCasadiModel(CasadiModel):
+
+ config: MyCasadiModelConfig
+
+ def setup_system(self):
+ # Define ode
+ self.T.ode = self.cp * self.mDot / self.C * \
+ (self.T_in - self.T) + \
+ self.load / self.C
+
+ # Define ae
+ self.T_out.alg = self.T
+
+ # Constraints: List[(lower bound, function, upper bound)]
+ self.constraints = [
+
+ # soft constraints
+ (0, self.T + self.T_slack, self.T_upper),
+
+ ]
+
+ # Objective function
+ objective = sum([
+ self.r_mDot * self.mDot,
+ self.s_T * self.T_slack ** 2,
+ ])
+
+ return objective
+
Constraints can be added to the model through the constraints
attribute.
+It should be defined as a list of tuples, with the lower bound coming first,
+the constraint function coming second and the upper bound coming last.
+Equality constraints can be added by setting upper and lower bound to the
+same value. Note that algebraic equations will also be converted to equality
+constraints internally. Here, we set one constraint to implement the soft
+constraint on the room temperature.
+Algebraic equations are explicit assignments to a CasadiOutput. They are considered when simulating the model or when doing MPC with it. +Constraints specified as tuples can be of implicit nature, however they are +ignored for simulation. The only limitation on constraints is, that +variables that make up the upper or lower bound cannot be used as +optimization variables in the MPC. ++
Note
+Python intuition tells us self.<name>
should not work, as we did not
+set the attribute.
+In the model base class of agentlib, the __get_attr__
method is written
+in a way that allows access to all variables that are defined in the
+Config class of the model.
Finally, we can specify and return the objective function in the same way as
+the other equations. We use the sum()
function from python to
+improve readability.
Let’s look at the environment config first.
+ENV_CONFIG = {"rt": False,
+ "t_sample": 60}
+
This time, we specify ‘rt’ +(=Realtime) as False, meaning we want the simulation to run as fast as possible. +The ‘t_sample’ option specifies the time step in which the interal clock of +the environment ticks. This is relevant e.g. for classical controllers like +PID. It will also affect the sampling with which results are saved.
+Below is the config for the MPC agent. As before, we specify an “id” and a +list of modules, with the first one being a local_broadcast communicator. +Then, we add the MPC module. We specify “mpc” as the type, and then add the +other options. A central part of the MPC is its _optimizationbackend. The +optimization backend is specified by another dictionary, always consisting +of “type” and “model”. The model will usually be user-specified and usually +is provided with the same syntax of “file” and “class_name” as the custom +module in the PingPong example. The optimization backend also takes an +option “discretization_options”, however we will look at that later.
+AGENT_MPC = {"id": "myMPCAgent",
+ "modules": [
+ {"module_id": "Ag1Com",
+ "type": "local_broadcast"},
+ {"module_id": "myMPC",
+ "type": "mpc",
+ "optimization_backend":
+ {"type": "casadi",
+ "model": {
+ "type": {"file": __file__,
+ "class_name": "MyCasadiModel"}},
+ ...
+ },
+ "time_step": 900,
+ "prediction_horizon": 5,
+ "parameters": [
+ {"name": "s_T", "value": 3},
+ {"name": "r_mDot", "value": 1},
+ ],
+ "inputs": [
+ {"name": "load", "value": 150},
+ {"name": "T_upper", "value": ub},
+ {"name": "T_in", "value": 290.15},
+ ],
+ "controls": [{"name": "mDot", "value": 0.02, "ub": 1, "lb": 0}],
+ "states": [{"name": "T", "value": 298.16, "ub": 303.15, "lb": 288.15}],
+ },
+ ]}
+ ]}
+
Aside from that, “time_step” and “prediction_horizon” need to be specified.
+The other options the MPC module takes are parameters
, inputs
, controls
and
+states
. The time step should be provided
+in seconds. The states in the MPC config refer to differential variables,
+not to be confused with states in the model, which refer to any internal
+variables. Quantities declared in the module config are variables of the multi-agent-system and
+can be shared with other modules of the same agent, and communicated with
+other agents. All agent variables declared here must match - in name - their
+counterpart in the provided model. Controls, states and inputs must be
+provided fully matching the model. Outputs can be ignored if they are not
+required. Finally, parameters can be omitted, if a default value is provided
+in the model definition. Here, the weight parameters in the cost function
+are provided, as it might be required to change them. However, physical
+parameters such as the thermal capacity of air are taken from the model, as
+they are not expected to change. A variable is given a name and a value. For states, the value will determine
+the initial value of the differential variable, if it is not provided
+externally, for example by a simulation agent. Since controls and
+states are the variables of the optimization
+problem, boundaries should be provided via the keys “ub” and “lb”. These
+values are for constant hard boundaries. If time-variant boundaries are
+required, one should declare an additional variable and constraint in the model.
Now that we have our control agent setup, we need to simulate our system.
+The easiest way to do this in agentlib, is to setup an agent with a
+simulator
module. Usually in agentlib, we would use an FMU to simulate a
+system. In this example, we will use the CasadiModel we created for the
+optimization. The resulting agent config is shown below.
AGENT_SIM = {"id": "SimAgent",
+ "modules": [
+ {"module_id": "Ag1Com",
+ "type": "local_broadcast"},
+ {"module_id": "room",
+ "type": "simulator",
+ "model": {"type": {"file": __file__,
+ "class_name": "MyCasadiModel"},
+ "states": [
+ {"name": "T", "value": 298.16}
+ ]},
+ "t_sample": 60,
+ "outputs": [
+ {"name": "T_out", "alias": "T"},
+ ],
+ "inputs": [
+ {"name": "mDot", "value": 0.02, "alias": "mDot"},
+ ]},
+ ]}
+
The model type for the simulator is provided in the same manner as before. +However, here we can see, that we have the option to provide additional +variable options to the model. For example, here we change the starting +value of the temperature to a value above the upper (soft) boundary, so our +controller has to work.
+Then, inputs and outputs of the simulator. Every
+simulator needs to be provided with a sampling rate “t_sample” in seconds.
+Additionally, declare the output “T_out”. This is the first time we use the
+alias keyword. The alias is part of the duo of alias
and source
that uniquely
+define a variable within the MAS. The source is the combination of the agent_id
+and the module_id
where the variable was defined. When expecting variables from
+another agent, only the agent_id
has to be specified, and when the variable is from
+a module within the same agent, the module_id shoudld be specified. The
+alias
is a name independent of the variable name in models (think of long Modelica names)
+and is consistent across agents for the same variable. By default, the name
+of a variable is also its alias. In the case of T_out however, we have to
+specify that this is the variable we want to send to the MPC for its initial
+state. Since the state in the MPC agent is named “T” (and by default has
+alias “T”), we have to set “T” as the alias for our model output. The model in turn takes the computed mass flow setpoint as input, hence we
+also have to declare the input. Since the names mDot already match between
+simulator and MPC, the explicit alias declaration is redundant in this case.
+Additionally, we set a default value for “mDot”, which is used before the
+first value is received from the MPC.
+Before every step, the simulator gets the current input values from the +agent and sets them to the model. After performing the step, the outputs +from the model are written to the agent. Since states per definition are +internal to the model, they are not set by the agent and their initial +values have to be changed in the model itself. The same goes for parameters. ++
With all of the setup done, we can now see our MAS run.
+Last time, we manually created the agents and the environment. This time, we
+use the LocalMASAgency
utility to setup the system and save results. By
+setting the variable_logging
option to True, time series of all agent
+variables present in the system will be saved. After running the MAS, we can
+retrieve and plot the results of our simulation.
def run_example(with_plots=True):
+ mas = LocalMASAgency(agent_configs=[AGENT_MPC, AGENT_SIM],
+ env=ENV_CONFIG,
+ variable_logging=True)
+ mas.run(until=10000)
+ results = mas.get_results()
+
Explanation of ‘examples/multi-agent-systems/casadi/mpc_trajectories.py’
+tbd
+This section collects tutorials on how to get started with agentlib.
+This tutorial is periodically updated, but for the most current and accurate information, please refer to the actual example files in the repository. The example files should be considered the authoritative source if there are any discrepancies.
+ +ci.test_examples
(ci/test_examples.py
)Line | +Col. | +Type | +Symbol | +ID | +Obj | +Message | +
---|---|---|---|---|---|---|
5 | +0 | +warning | +unused-import |
+ W0611 | ++ | Unused import subprocess |
+
9 | +0 | +error | +import-error |
+ E0401 | ++ | Unable to import 'pytest' |
+
65 | +8 | +warning | +pointless-string-statement |
+ W0105 | +TestExamples.test_mpc | +String statement has no effect |
+
73 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestExamples.test_admm_local | +Missing function or method docstring |
+
83 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestExamples.test_admm_coordinated | +Missing function or method docstring |
+
93 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestExamples.test_exchange_admm | +Missing function or method docstring |
+
109 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestExamples.test_admm_mp_broadcast | +Missing function or method docstring |
+
/builds/EBC/EBC_all/github_ci/AgentLib-MPC/pylintrc
(pylintrc
)Line | +Col. | +Type | +Symbol | +ID | +Obj | +Message | +
---|---|---|---|---|---|---|
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'print-statement' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'parameter-unpacking' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'unpacking-in-except' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'old-raise-syntax' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'backtick' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'import-star-module-level' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'apply-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'basestring-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'buffer-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'cmp-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'coerce-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'execfile-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'file-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'long-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'raw_input-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'reduce-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'standarderror-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'unicode-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'xrange-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'coerce-method' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'delslice-method' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'getslice-method' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'setslice-method' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'no-absolute-import' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'old-division' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'dict-iter-method' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'dict-view-method' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'next-method-called' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'metaclass-assignment' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'indexing-exception' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'raising-string' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'reload-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'oct-method' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'hex-method' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'nonzero-method' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'cmp-method' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'input-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'round-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'intern-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'unichr-builtin' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'map-builtin-not-iterating' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'zip-builtin-not-iterating' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'range-builtin-not-iterating' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'filter-builtin-not-iterating' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'using-cmp-argument' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'div-method' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'idiv-method' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'rdiv-method' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'exception-message-attribute' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'invalid-str-codec' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'sys-max-int' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'bad-python3-import' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'deprecated-string-function' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'deprecated-str-translate-call' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'deprecated-itertools-function' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'deprecated-types-field' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'next-method-defined' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'dict-items-not-iterating' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'dict-keys-not-iterating' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'dict-values-not-iterating' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'deprecated-operator-function' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'deprecated-urllib-function' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'xreadlines-attribute' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'deprecated-sys-function' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'exception-escape' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'comprehension-escape' was removed from pylint, see https://github.com/pylint-dev/pylint/pull/4942. |
+
1 | +0 | +refactor | +useless-option-value |
+ R0022 | ++ | Useless option value for '--disable', 'no-self-use' was moved to an optional extension, see https://pylint.readthedocs.io/en/latest/whatsnew/2/2.14/summary.html#removed-checkers. |
+
1 | +0 | +warning | +unknown-option-value |
+ W0012 | ++ | Unknown option value for '--disable', expected a valid pylint message and got 'long-suffix' |
+
1 | +0 | +warning | +unknown-option-value |
+ W0012 | ++ | Unknown option value for '--disable', expected a valid pylint message and got 'old-ne-operator' |
+
1 | +0 | +warning | +unknown-option-value |
+ W0012 | ++ | Unknown option value for '--disable', expected a valid pylint message and got 'old-octal-literal' |
+
1 | +0 | +warning | +unknown-option-value |
+ W0012 | ++ | Unknown option value for '--disable', expected a valid pylint message and got 'non-ascii-bytes-literal' |
+
1 | +0 | +warning | +unknown-option-value |
+ W0012 | ++ | Unknown option value for '--disable', expected a valid pylint message and got 'eq-without-hash' |
+
conftest
(tests/conftest.py
)Line | +Col. | +Type | +Symbol | +ID | +Obj | +Message | +
---|---|---|---|---|---|---|
1 | +0 | +convention | +missing-module-docstring |
+ C0114 | ++ | Missing module docstring |
+
2 | +0 | +error | +import-error |
+ E0401 | ++ | Unable to import 'pytest' |
+
7 | +0 | +convention | +missing-function-docstring |
+ C0116 | +model_type | +Missing function or method docstring |
+
13 | +0 | +convention | +missing-function-docstring |
+ C0116 | +example_casadi_model | +Missing function or method docstring |
+
13 | +25 | +warning | +redefined-outer-name |
+ W0621 | +example_casadi_model | +Redefining name 'model_type' from outer scope (line 7) |
+
test_admm
(tests/test_admm.py
)Line | +Col. | +Type | +Symbol | +ID | +Obj | +Message | +
---|---|---|---|---|---|---|
1 | +0 | +convention | +missing-module-docstring |
+ C0114 | ++ | Missing module docstring |
+
22 | +0 | +convention | +invalid-name |
+ C0103 | ++ | Constant name "a" doesn't conform to UPPER_CASE naming style |
+
63 | +0 | +convention | +missing-class-docstring |
+ C0115 | +TestRTADMM | +Missing class docstring |
+
74 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestRTADMM.test_admm_init | +Missing function or method docstring |
+
76 | +8 | +warning | +unused-variable |
+ W0612 | +TestRTADMM.test_admm_init | +Unused variable 'admm_module' |
+
80 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestRTADMM.test_comm | +Missing function or method docstring |
+
85 | +13 | +warning | +protected-access |
+ W0212 | +TestRTADMM.test_comm | +Access to a protected member _start_executing_callbacks of a client class |
+
86 | +13 | +warning | +protected-access |
+ W0212 | +TestRTADMM.test_comm | +Access to a protected member _start_executing_callbacks of a client class |
+
88 | +8 | +warning | +attribute-defined-outside-init |
+ W0201 | +TestRTADMM.test_comm | +Attribute 'admm_module1' defined outside __init__ |
+
89 | +8 | +warning | +attribute-defined-outside-init |
+ W0201 | +TestRTADMM.test_comm | +Attribute 'admm_module2' defined outside __init__ |
+
92 | +8 | +warning | +attribute-defined-outside-init |
+ W0201 | +TestRTADMM.test_comm | +Attribute 'counter' defined outside __init__ |
+
93 | +8 | +warning | +attribute-defined-outside-init |
+ W0201 | +TestRTADMM.test_comm | +Attribute 'counter2' defined outside __init__ |
+
109 | +8 | +warning | +protected-access |
+ W0212 | +TestRTADMM.test_comm | +Access to a protected member _solve_local_optimization of a client class |
+
110 | +12 | +warning | +protected-access |
+ W0212 | +TestRTADMM.test_comm | +Access to a protected member _solve_local_optimization_debug of a client class |
+
112 | +8 | +warning | +protected-access |
+ W0212 | +TestRTADMM.test_comm | +Access to a protected member _solve_local_optimization of a client class |
+
113 | +12 | +warning | +protected-access |
+ W0212 | +TestRTADMM.test_comm | +Access to a protected member _solve_local_optimization_debug of a client class |
+
118 | +19 | +warning | +protected-access |
+ W0212 | +TestRTADMM.test_comm | +Access to a protected member _admm_loop of a client class |
+
123 | +19 | +warning | +protected-access |
+ W0212 | +TestRTADMM.test_comm | +Access to a protected member _admm_loop of a client class |
+
127 | +13 | +warning | +protected-access |
+ W0212 | +TestRTADMM.test_comm | +Access to a protected member _process_realtime of a client class |
+
128 | +13 | +warning | +protected-access |
+ W0212 | +TestRTADMM.test_comm | +Access to a protected member _process_realtime of a client class |
+
149 | +24 | +warning | +protected-access |
+ W0212 | +TestRTADMM.test_comm | +Access to a protected member _admm_variables of a client class |
+
150 | +24 | +warning | +protected-access |
+ W0212 | +TestRTADMM.test_comm | +Access to a protected member _admm_variables of a client class |
+
163 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestRTADMM.new_receive | +Missing function or method docstring |
+
167 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestRTADMM.new_receive_2 | +Missing function or method docstring |
+
test_casadi_backend
(tests/test_casadi_backend.py
)Line | +Col. | +Type | +Symbol | +ID | +Obj | +Message | +
---|---|---|---|---|---|---|
1 | +0 | +convention | +missing-module-docstring |
+ C0114 | ++ | Missing module docstring |
+
4 | +0 | +error | +import-error |
+ E0401 | ++ | Unable to import 'pytest' |
+
26 | +0 | +convention | +missing-function-docstring |
+ C0116 | +var_ref | +Missing function or method docstring |
+
37 | +0 | +convention | +missing-function-docstring |
+ C0116 | +example_casadi_system | +Missing function or method docstring |
+
37 | +48 | +warning | +redefined-outer-name |
+ W0621 | +example_casadi_system | +Redefining name 'var_ref' from outer scope (line 26) |
+
44 | +0 | +convention | +missing-function-docstring |
+ C0116 | +collocation_discretization | +Missing function or method docstring |
+
44 | +31 | +warning | +redefined-outer-name |
+ W0621 | +collocation_discretization | +Redefining name 'example_casadi_system' from outer scope (line 37) |
+
55 | +0 | +convention | +missing-function-docstring |
+ C0116 | +example_backend | +Missing function or method docstring |
+
55 | +32 | +warning | +redefined-outer-name |
+ W0621 | +example_backend | +Redefining name 'var_ref' from outer scope (line 26) |
+
56 | +4 | +convention | +invalid-name |
+ C0103 | +example_backend | +Variable name "be" doesn't conform to snake_case naming style |
+
61 | +0 | +convention | +missing-function-docstring |
+ C0116 | +test_optimization_variable | +Missing function or method docstring |
+
68 | +8 | +warning | +unused-variable |
+ W0612 | +test_optimization_variable | +Unused variable 'optimization_variable' |
+
75 | +4 | +warning | +unused-variable |
+ W0612 | +test_optimization_variable | +Unused variable 'optimization_variable_1' |
+
90 | +8 | +warning | +unused-variable |
+ W0612 | +test_optimization_variable | +Unused variable 'optimization_variable_3' |
+
97 | +0 | +convention | +missing-function-docstring |
+ C0116 | +test_system | +Missing function or method docstring |
+
97 | +16 | +warning | +redefined-outer-name |
+ W0621 | +test_system | +Redefining name 'example_casadi_system' from outer scope (line 37) |
+
113 | +0 | +convention | +missing-function-docstring |
+ C0116 | +test_discretization | +Missing function or method docstring |
+
114 | +4 | +warning | +redefined-outer-name |
+ W0621 | +test_discretization | +Redefining name 'collocation_discretization' from outer scope (line 44) |
+
114 | +51 | +warning | +redefined-outer-name |
+ W0621 | +test_discretization | +Redefining name 'example_casadi_system' from outer scope (line 37) |
+
135 | +0 | +convention | +missing-function-docstring |
+ C0116 | +test_add_opt | +Missing function or method docstring |
+
135 | +17 | +warning | +redefined-outer-name |
+ W0621 | +test_add_opt | +Redefining name 'example_casadi_system' from outer scope (line 37) |
+
154 | +0 | +convention | +missing-function-docstring |
+ C0116 | +test_create_backend | +Missing function or method docstring |
+
154 | +24 | +warning | +redefined-outer-name |
+ W0621 | +test_create_backend | +Redefining name 'example_backend' from outer scope (line 55) |
+
155 | +4 | +convention | +invalid-name |
+ C0103 | +test_create_backend | +Variable name "be" doesn't conform to snake_case naming style |
+
159 | +13 | +warning | +protected-access |
+ W0212 | +test_create_backend | +Access to a protected member _mpc_inputs_to_nlp_inputs of a client class |
+
test_casadi_ml_model
(tests/test_casadi_ml_model.py
)Line | +Col. | +Type | +Symbol | +ID | +Obj | +Message | +
---|---|---|---|---|---|---|
3 | +0 | +error | +import-error |
+ E0401 | ++ | Unable to import 'pytest' |
+
9 | +0 | +convention | +missing-class-docstring |
+ C0115 | +CasadiMLTestConfig | +Missing class docstring |
+
14 | +0 | +convention | +missing-class-docstring |
+ C0115 | +CasadiMLTestModel | +Missing class docstring |
+
23 | +0 | +convention | +missing-function-docstring |
+ C0116 | +test_casadi_ml_model | +Missing function or method docstring |
+
test_mpc
(tests/test_mpc.py
)Line | +Col. | +Type | +Symbol | +ID | +Obj | +Message | +
---|---|---|---|---|---|---|
1 | +0 | +convention | +missing-module-docstring |
+ C0114 | ++ | Missing module docstring |
+
6 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | standard import "import pathlib" should be placed before "import numpy as np" |
+
13 | +0 | +warning | +unused-import |
+ W0611 | ++ | Unused OptimizationBackend imported from agentlib_mpc.optimization_backends.backend |
+
18 | +0 | +convention | +invalid-name |
+ C0103 | ++ | Constant name "a" doesn't conform to UPPER_CASE naming style |
+
24 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestSampling.test_sample_datetime | +Missing function or method docstring |
+
75 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestSampling.test_get_scalar | +Missing function or method docstring |
+
81 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestSampling.test_series | +Missing function or method docstring |
+
82 | +8 | +convention | +invalid-name |
+ C0103 | +TestSampling.test_series | +Variable name "sr" doesn't conform to snake_case naming style |
+
94 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestSampling.test_list | +Missing function or method docstring |
+
105 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestSampling.test_previous_interpolation_method | +Missing function or method docstring |
+
152 | +4 | +convention | +invalid-name |
+ C0103 | +TestCasadiMPC.test_CasadiMPC | +Method name "test_CasadiMPC" doesn't conform to snake_case naming style |
+
158 | +14 | +warning | +protected-access |
+ W0212 | +TestCasadiMPC.test_CasadiMPC | +Access to a protected member _modules of a client class |
+
166 | +4 | +convention | +missing-function-docstring |
+ C0116 | +TestCasadiMPC.test_incomplete_config | +Missing function or method docstring |
+
test_serialized_ann
(tests/test_serialized_ann.py
)Line | +Col. | +Type | +Symbol | +ID | +Obj | +Message | +
---|---|---|---|---|---|---|
5 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | standard import "import unittest" should be placed before "import numpy as np" |
+
8 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | standard import "from pathlib import Path" should be placed before "import numpy as np" |
+
13 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | third party import "from fixtures.ann import ANNTrainer" should be placed before "from agentlib_mpc.data_structures import ml_model_datatypes" |
+
14 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | third party import "from fixtures.data_generator import DataGenerator" should be placed before "from agentlib_mpc.data_structures import ml_model_datatypes" |
+
test_serialized_gpr
(tests/test_serialized_gpr.py
)Line | +Col. | +Type | +Symbol | +ID | +Obj | +Message | +
---|---|---|---|---|---|---|
1 | +0 | +convention | +missing-module-docstring |
+ C0114 | ++ | Missing module docstring |
+
2 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | standard import "import os" should be placed before "import numpy as np" |
+
3 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | standard import "import unittest" should be placed before "import numpy as np" |
+
5 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | standard import "from pathlib import Path" should be placed before "import numpy as np" |
+
10 | +0 | +warning | +unused-import |
+ W0611 | ++ | Unused MLModels imported from agentlib_mpc.models.serialized_ml_model |
+
11 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | third party import "from fixtures.gpr import GPRTrainer" should be placed before "from agentlib_mpc.data_structures import ml_model_datatypes" |
+
12 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | third party import "from fixtures.data_generator import DataGenerator" should be placed before "from agentlib_mpc.data_structures import ml_model_datatypes" |
+
test_serialized_linreg
(tests/test_serialized_linreg.py
)Line | +Col. | +Type | +Symbol | +ID | +Obj | +Message | +
---|---|---|---|---|---|---|
1 | +0 | +convention | +missing-module-docstring |
+ C0114 | ++ | Missing module docstring |
+
2 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | standard import "import os" should be placed before "import numpy as np" |
+
3 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | standard import "import unittest" should be placed before "import numpy as np" |
+
5 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | standard import "from pathlib import Path" should be placed before "import numpy as np" |
+
11 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | third party import "from fixtures.linreg import LinRegTrainer" should be placed before "from agentlib_mpc.data_structures import ml_model_datatypes" |
+
12 | +0 | +convention | +wrong-import-order |
+ C0411 | ++ | third party import "from fixtures.data_generator import DataGenerator" should be placed before "from agentlib_mpc.data_structures import ml_model_datatypes" |
+
Name | +Count | +
---|---|
refactor | +67 | +
warning | +40 | +
error | +4 | +
convention | +56 | +
Name | +Count | +
---|---|
useless-option-value | +67 | +
unknown-option-value | +5 | +
import-error | +4 | +
pointless-string-statement | +1 | +
missing-function-docstring | +26 | +
unused-import | +3 | +
missing-module-docstring | +6 | +
redefined-outer-name | +9 | +
invalid-name | +6 | +
missing-class-docstring | +3 | +
unused-variable | +4 | +
protected-access | +14 | +
attribute-defined-outside-init | +4 | +
wrong-import-order | +15 | +
Name | +Count | +
---|---|
/builds/EBC/EBC_all/github_ci/AgentLib-MPC/pylintrc | +72 | +
ci.test_examples | +7 | +
conftest | +5 | +
test_admm | +24 | +
test_casadi_backend | +25 | +
test_casadi_ml_model | +4 | +
test_mpc | +13 | +
test_serialized_ann | +4 | +
test_serialized_gpr | +7 | +
test_serialized_linreg | +6 | +
Name | +Count | +
---|---|
pylintrc | +72 | +
ci/test_examples.py | +7 | +
tests/conftest.py | +5 | +
tests/test_admm.py | +24 | +
tests/test_casadi_backend.py | +25 | +
tests/test_casadi_ml_model.py | +4 | +
tests/test_mpc.py | +13 | +
tests/test_serialized_ann.py | +4 | +
tests/test_serialized_gpr.py | +7 | +
tests/test_serialized_linreg.py | +6 | +