diff --git a/Cargo.toml b/Cargo.toml index a7eebef2..23513cb5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,7 @@ opt-level = 3 # fast and small wasm [workspace.dependencies] -serde = { version = "1", features = ["derive"] } +serde = { version = "1", features = ["derive", ] } serde_json = "1.0" thiserror = "1.0.25" num = "0.4.0" diff --git a/pywr-core/src/aggregated_node.rs b/pywr-core/src/aggregated_node.rs index aa57ad0c..a9917339 100644 --- a/pywr-core/src/aggregated_node.rs +++ b/pywr-core/src/aggregated_node.rs @@ -48,11 +48,11 @@ impl AggregatedNodeVec { &mut self, name: &str, sub_name: Option<&str>, - nodes: &[NodeIndex], - factors: Option, + nodes: &[Vec], + relationship: Option, ) -> AggregatedNodeIndex { let node_index = AggregatedNodeIndex(self.nodes.len()); - let node = AggregatedNode::new(&node_index, name, sub_name, nodes, factors); + let node = AggregatedNode::new(&node_index, name, sub_name, nodes, relationship); self.nodes.push(node); node_index } @@ -74,35 +74,74 @@ impl Factors { } } +#[derive(Debug, PartialEq)] +pub struct Exclusivity { + // The minimum number of nodes that must be active + min_active: usize, + // The maximum number of nodes that can be active + max_active: usize, +} + +impl Exclusivity { + pub fn min_active(&self) -> usize { + self.min_active + } + pub fn max_active(&self) -> usize { + self.max_active + } +} + +/// Additional relationship between nodes in an aggregated node. +#[derive(Debug, PartialEq)] +pub enum Relationship { + /// Node flows are related to on another by a set of factors. + Factored(Factors), + /// Node flows are mutually exclusive. + Exclusive(Exclusivity), +} + +impl Relationship { + pub fn new_ratio_factors(factors: &[MetricF64]) -> Self { + Relationship::Factored(Factors::Ratio(factors.to_vec())) + } + pub fn new_proportion_factors(factors: &[MetricF64]) -> Self { + Relationship::Factored(Factors::Proportion(factors.to_vec())) + } + + pub fn new_exclusive(min_active: usize, max_active: usize) -> Self { + Relationship::Exclusive(Exclusivity { min_active, max_active }) + } +} + #[derive(Debug, PartialEq)] pub struct AggregatedNode { meta: NodeMeta, flow_constraints: FlowConstraints, - nodes: Vec, - factors: Option, + nodes: Vec>, + relationship: Option, } #[derive(Debug, PartialEq, Copy, Clone)] -pub struct NodeFactor { - pub index: NodeIndex, +pub struct NodeFactor<'a> { + pub indices: &'a [NodeIndex], pub factor: f64, } -impl NodeFactor { - fn new(node: NodeIndex, factor: f64) -> Self { - Self { index: node, factor } +impl<'a> NodeFactor<'a> { + fn new(indices: &'a [NodeIndex], factor: f64) -> Self { + Self { indices, factor } } } /// A pair of nodes and their factors #[derive(Debug, PartialEq, Copy, Clone)] -pub struct NodeFactorPair { - pub node0: NodeFactor, - pub node1: NodeFactor, +pub struct NodeFactorPair<'a> { + pub node0: NodeFactor<'a>, + pub node1: NodeFactor<'a>, } -impl NodeFactorPair { - fn new(node0: NodeFactor, node1: NodeFactor) -> Self { +impl<'a> NodeFactorPair<'a> { + fn new(node0: NodeFactor<'a>, node1: NodeFactor<'a>) -> Self { Self { node0, node1 } } @@ -114,26 +153,26 @@ impl NodeFactorPair { /// A constant node factor. If the factor is non-constant, the factor value here is `None`. #[derive(Debug, PartialEq, Copy, Clone)] -pub struct NodeConstFactor { - pub index: NodeIndex, +pub struct NodeConstFactor<'a> { + pub indices: &'a [NodeIndex], pub factor: Option, } -impl NodeConstFactor { - fn new(node: NodeIndex, factor: Option) -> Self { - Self { index: node, factor } +impl<'a> NodeConstFactor<'a> { + fn new(indices: &'a [NodeIndex], factor: Option) -> Self { + Self { indices, factor } } } /// A pair of nodes and their factors #[derive(Debug, PartialEq, Copy, Clone)] -pub struct NodeConstFactorPair { - pub node0: NodeConstFactor, - pub node1: NodeConstFactor, +pub struct NodeConstFactorPair<'a> { + pub node0: NodeConstFactor<'a>, + pub node1: NodeConstFactor<'a>, } -impl NodeConstFactorPair { - fn new(node0: NodeConstFactor, node1: NodeConstFactor) -> Self { +impl<'a> NodeConstFactorPair<'a> { + fn new(node0: NodeConstFactor<'a>, node1: NodeConstFactor<'a>) -> Self { Self { node0, node1 } } @@ -152,14 +191,14 @@ impl AggregatedNode { index: &AggregatedNodeIndex, name: &str, sub_name: Option<&str>, - nodes: &[NodeIndex], - factors: Option, + nodes: &[Vec], + relationship: Option, ) -> Self { Self { meta: NodeMeta::new(index, name, sub_name), flow_constraints: FlowConstraints::default(), nodes: nodes.to_vec(), - factors, + relationship, } } @@ -181,33 +220,66 @@ impl AggregatedNode { *self.meta.index() } - pub fn get_nodes(&self) -> Vec { - self.nodes.to_vec() + pub fn iter_nodes(&self) -> impl Iterator { + self.nodes.iter().map(|n| n.as_slice()) + } + + /// Does the aggregated node have a mutual exclusivity relationship? + pub fn has_exclusivity(&self) -> bool { + self.relationship + .as_ref() + .map(|r| matches!(r, Relationship::Exclusive(_))) + .unwrap_or(false) } /// Does the aggregated node have factors? pub fn has_factors(&self) -> bool { - self.factors.is_some() + self.relationship + .as_ref() + .map(|r| matches!(r, Relationship::Factored(_))) + .unwrap_or(false) } /// Does the aggregated node have constant factors? pub fn has_const_factors(&self) -> bool { - self.factors.as_ref().map(|f| f.is_constant()).unwrap_or(false) + self.relationship + .as_ref() + .map(|r| match r { + Relationship::Factored(f) => f.is_constant(), + _ => false, + }) + .unwrap_or(false) } - pub fn set_factors(&mut self, factors: Option) { - self.factors = factors; + pub fn set_relationship(&mut self, relationship: Option) { + self.relationship = relationship; + } + + pub fn get_exclusivity(&self) -> Option<&Exclusivity> { + self.relationship.as_ref().and_then(|r| match r { + Relationship::Factored(_) => None, + Relationship::Exclusive(e) => Some(e), + }) } pub fn get_factors(&self) -> Option<&Factors> { - self.factors.as_ref() + self.relationship.as_ref().and_then(|r| match r { + Relationship::Factored(f) => Some(f), + Relationship::Exclusive(_) => None, + }) } /// Return normalised factor pairs - pub fn get_factor_node_pairs(&self) -> Option> { - if self.factors.is_some() { - let n0 = self.nodes[0]; - - Some(self.nodes.iter().skip(1).map(|&n1| (n0, n1)).collect::>()) + pub fn get_factor_node_pairs(&self) -> Option> { + if self.has_factors() { + let n0 = self.nodes[0].as_slice(); + + Some( + self.nodes + .iter() + .skip(1) + .map(|n1| (n0, n1.as_slice())) + .collect::>(), + ) } else { None } @@ -215,7 +287,7 @@ impl AggregatedNode { /// Return constant normalised factor pairs pub fn get_const_norm_factor_pairs(&self, values: &ConstParameterValues) -> Option> { - if let Some(factors) = &self.factors { + if let Some(factors) = self.get_factors() { let pairs = match factors { Factors::Proportion(prop_factors) => { get_const_norm_proportional_factor_pairs(prop_factors, &self.nodes, values) @@ -231,7 +303,7 @@ impl AggregatedNode { /// Return normalised factor pairs /// pub fn get_norm_factor_pairs(&self, model: &Network, state: &State) -> Option> { - if let Some(factors) = &self.factors { + if let Some(factors) = self.get_factors() { let pairs = match factors { Factors::Proportion(prop_factors) => { get_norm_proportional_factor_pairs(prop_factors, &self.nodes, model, state) @@ -300,12 +372,12 @@ impl AggregatedNode { /// There should be one less factor than node indices. The factors correspond to each of the node /// indices after the first. Factor pairs relating the first index to each of the other indices are /// calculated. This requires the sum of the factors to be greater than 0.0 and less than 1.0. -fn get_norm_proportional_factor_pairs( +fn get_norm_proportional_factor_pairs<'a>( factors: &[MetricF64], - nodes: &[NodeIndex], + nodes: &'a [Vec], model: &Network, state: &State, -) -> Vec { +) -> Vec> { if factors.len() != nodes.len() - 1 { panic!("Found {} proportional factors and {} nodes in aggregated node. The number of proportional factors should equal one less than the number of nodes.", factors.len(), nodes.len()); } @@ -333,13 +405,13 @@ fn get_norm_proportional_factor_pairs( } let f0 = 1.0 - total; - let n0 = nodes[0]; + let n0 = nodes[0].as_slice(); nodes .iter() .skip(1) .zip(values) - .map(move |(&n1, f1)| NodeFactorPair::new(NodeFactor::new(n0, f0), NodeFactor::new(n1, f1))) + .map(move |(n1, f1)| NodeFactorPair::new(NodeFactor::new(n0, f0), NodeFactor::new(n1.as_slice(), f1))) .collect::>() } @@ -349,11 +421,11 @@ fn get_norm_proportional_factor_pairs( /// indices after the first. Factor pairs relating the first index to each of the other indices are /// calculated. This requires the sum of the factors to be greater than 0.0 and less than 1.0. If /// any of the factors are not constant, the factor pairs will contain `None` values. -fn get_const_norm_proportional_factor_pairs( +fn get_const_norm_proportional_factor_pairs<'a>( factors: &[MetricF64], - nodes: &[NodeIndex], + nodes: &'a [Vec], values: &ConstParameterValues, -) -> Vec { +) -> Vec> { if factors.len() != nodes.len() - 1 { panic!("Found {} proportional factors and {} nodes in aggregated node. The number of proportional factors should equal one less than the number of nodes.", factors.len(), nodes.len()); } @@ -376,7 +448,7 @@ fn get_const_norm_proportional_factor_pairs( .collect::, PywrError>>() .expect("Failed to get current factor values. Ensure that all factors are not negative."); - let n0 = nodes[0]; + let n0 = nodes[0].as_slice(); // To calculate the factors we require that every factor is available. if values.iter().any(|v| v.is_none()) { @@ -385,8 +457,8 @@ fn get_const_norm_proportional_factor_pairs( .iter() .skip(1) .zip(values) - .map(move |(&n1, f1)| { - NodeConstFactorPair::new(NodeConstFactor::new(n0, None), NodeConstFactor::new(n1, f1)) + .map(move |(n1, f1)| { + NodeConstFactorPair::new(NodeConstFactor::new(n0, None), NodeConstFactor::new(n1.as_slice(), f1)) }) .collect::>() } else { @@ -408,7 +480,9 @@ fn get_const_norm_proportional_factor_pairs( .iter() .skip(1) .zip(values) - .map(move |(&n1, f1)| NodeConstFactorPair::new(NodeConstFactor::new(n0, f0), NodeConstFactor::new(n1, f1))) + .map(move |(n1, f1)| { + NodeConstFactorPair::new(NodeConstFactor::new(n0, f0), NodeConstFactor::new(n1.as_slice(), f1)) + }) .collect::>() } } @@ -418,17 +492,17 @@ fn get_const_norm_proportional_factor_pairs( /// The number of node indices and factors should be equal. The factors correspond to each of the /// node indices. Factor pairs relating the first index to each of the other indices are calculated. /// This requires that the factors are all non-zero. -fn get_norm_ratio_factor_pairs( +fn get_norm_ratio_factor_pairs<'a>( factors: &[MetricF64], - nodes: &[NodeIndex], + nodes: &'a [Vec], model: &Network, state: &State, -) -> Vec { +) -> Vec> { if factors.len() != nodes.len() { panic!("Found {} ratio factors and {} nodes in aggregated node. The number of ratio factors should equal the number of nodes.", factors.len(), nodes.len()); } - let n0 = nodes[0]; + let n0 = nodes[0].as_slice(); let f0 = factors[0].get_value(model, state).unwrap(); if f0 < 0.0 { panic!("Negative factor is not allowed"); @@ -438,12 +512,15 @@ fn get_norm_ratio_factor_pairs( .iter() .zip(factors) .skip(1) - .map(move |(&n1, f1)| { + .map(move |(n1, f1)| { let v1 = f1.get_value(model, state)?; if v1 < 0.0 { Err(PywrError::NegativeFactor) } else { - Ok(NodeFactorPair::new(NodeFactor::new(n0, f0), NodeFactor::new(n1, v1))) + Ok(NodeFactorPair::new( + NodeFactor::new(n0, f0), + NodeFactor::new(n1.as_slice(), v1), + )) } }) .collect::, PywrError>>() @@ -452,16 +529,16 @@ fn get_norm_ratio_factor_pairs( /// Constant ratio factors using constant values if they are available. If they are not available, /// the factors are `None`. -fn get_const_norm_ratio_factor_pairs( +fn get_const_norm_ratio_factor_pairs<'a>( factors: &[MetricF64], - nodes: &[NodeIndex], + nodes: &'a [Vec], values: &ConstParameterValues, -) -> Vec { +) -> Vec> { if factors.len() != nodes.len() { panic!("Found {} ratio factors and {} nodes in aggregated node. The number of ratio factors should equal the number of nodes.", factors.len(), nodes.len()); } - let n0 = nodes[0]; + let n0 = nodes[0].as_slice(); // Try to convert the factor into a constant let f0 = factors[0] @@ -478,7 +555,7 @@ fn get_const_norm_ratio_factor_pairs( .iter() .zip(factors) .skip(1) - .map(move |(&n1, f1)| { + .map(move |(n1, f1)| { let v1 = f1 .try_get_constant_value(values) .unwrap_or_else(|e| panic!("Failed to get constant value for factor: {}", e)); @@ -491,7 +568,7 @@ fn get_const_norm_ratio_factor_pairs( Ok(NodeConstFactorPair::new( NodeConstFactor::new(n0, f0), - NodeConstFactor::new(n1, v1), + NodeConstFactor::new(n1.as_slice(), v1), )) }) .collect::, PywrError>>() @@ -500,7 +577,7 @@ fn get_const_norm_ratio_factor_pairs( #[cfg(test)] mod tests { - use crate::aggregated_node::Factors; + use crate::aggregated_node::Relationship; use crate::metric::MetricF64; use crate::models::Model; use crate::network::Network; @@ -529,9 +606,10 @@ mod tests { network.connect_nodes(input_node, link_node1).unwrap(); network.connect_nodes(link_node1, output_node1).unwrap(); - let factors = Some(Factors::Ratio(vec![2.0.into(), 1.0.into()])); + let relationship = Some(Relationship::new_ratio_factors(&[2.0.into(), 1.0.into()])); - let _agg_node = network.add_aggregated_node("agg-node", None, &[link_node0, link_node1], factors); + let _agg_node = + network.add_aggregated_node("agg-node", None, &[vec![link_node0], vec![link_node1]], relationship); // Setup a demand on output-0 let output_node = network.get_mut_node_by_name("output", Some("0")).unwrap(); @@ -579,9 +657,13 @@ mod tests { let factor_profile = MonthlyProfileParameter::new("factor-profile".into(), [2.0; 12], None); let factor_profile_idx = network.add_simple_parameter(Box::new(factor_profile)).unwrap(); - let factors = Some(Factors::Ratio(vec![factor_profile_idx.into(), 1.0.into()])); + let relationship = Some(Relationship::new_ratio_factors(&[ + factor_profile_idx.into(), + 1.0.into(), + ])); - let _agg_node = network.add_aggregated_node("agg-node", None, &[link_node0, link_node1], factors); + let _agg_node = + network.add_aggregated_node("agg-node", None, &[vec![link_node0], vec![link_node1]], relationship); // Setup a demand on output-0 let output_node = network.get_mut_node_by_name("output", Some("0")).unwrap(); @@ -605,4 +687,141 @@ mod tests { run_all_solvers(&model, &["cbc"], &[]); } + + /// Test mutual exclusive flows + /// + /// The model has a single input that diverges to two links, only one of which can be active at a time. + #[test] + fn test_simple_mutual_exclusivity() { + let mut network = Network::default(); + + let input_node = network.add_input_node("input", None).unwrap(); + let link_node0 = network.add_link_node("link", Some("0")).unwrap(); + let output_node0 = network.add_output_node("output", Some("0")).unwrap(); + + network.connect_nodes(input_node, link_node0).unwrap(); + network.connect_nodes(link_node0, output_node0).unwrap(); + + let link_node1 = network.add_link_node("link", Some("1")).unwrap(); + let output_node1 = network.add_output_node("output", Some("1")).unwrap(); + + network.connect_nodes(input_node, link_node1).unwrap(); + network.connect_nodes(link_node1, output_node1).unwrap(); + + let _me_node = network.add_aggregated_node( + "mutual-exclusivity", + None, + &[vec![link_node0], vec![link_node1]], + Some(Relationship::new_exclusive(0, 1)), + ); + + // Setup a demand on output-0 and output-1. + // output-0 has a lower penalty cost than output-1, so the flow should be directed to output-0. + let output_node = network.get_mut_node_by_name("output", Some("0")).unwrap(); + output_node.set_max_flow_constraint(Some(100.0.into())).unwrap(); + + output_node.set_cost(Some((-10.0).into())); + + let output_node = network.get_mut_node_by_name("output", Some("1")).unwrap(); + output_node.set_max_flow_constraint(Some(100.0.into())).unwrap(); + + output_node.set_cost(Some((-5.0).into())); + + // Set-up assertion for "output-0" node + let idx = network.get_node_by_name("link", Some("0")).unwrap().index(); + let expected = Array2::from_elem((366, 10), 100.0); + let recorder = AssertionRecorder::new("link-0-flow", MetricF64::NodeOutFlow(idx), expected, None, None); + network.add_recorder(Box::new(recorder)).unwrap(); + + // Set-up assertion for "output-1" node + let idx = network.get_node_by_name("link", Some("1")).unwrap().index(); + let expected = Array2::from_elem((366, 10), 0.0); + let recorder = AssertionRecorder::new("link-1-flow", MetricF64::NodeOutFlow(idx), expected, None, None); + network.add_recorder(Box::new(recorder)).unwrap(); + + let model = Model::new(default_time_domain().into(), network); + + run_all_solvers(&model, &["clp"], &[]); + } + + /// Test double mutual exclusive flows + /// + /// The model has a single input that diverges to three links. Two sets of mutual exclusivity + /// constraints are defined, one for the first two links and one for the last two links. This + /// tests that a node can appear in two different mutual exclusivity constraints. + #[test] + fn test_double_mutual_exclusivity() { + let mut network = Network::default(); + + let input_node = network.add_input_node("input", None).unwrap(); + let link_node0 = network.add_link_node("link", Some("0")).unwrap(); + let output_node0 = network.add_output_node("output", Some("0")).unwrap(); + + network.connect_nodes(input_node, link_node0).unwrap(); + network.connect_nodes(link_node0, output_node0).unwrap(); + + let link_node1 = network.add_link_node("link", Some("1")).unwrap(); + let output_node1 = network.add_output_node("output", Some("1")).unwrap(); + + network.connect_nodes(input_node, link_node1).unwrap(); + network.connect_nodes(link_node1, output_node1).unwrap(); + + let link_node2 = network.add_link_node("link", Some("2")).unwrap(); + let output_node2 = network.add_output_node("output", Some("2")).unwrap(); + + network.connect_nodes(input_node, link_node2).unwrap(); + network.connect_nodes(link_node2, output_node2).unwrap(); + + let _me_node = network.add_aggregated_node( + "mutual-exclusivity-01", + None, + &[vec![link_node0], vec![link_node1]], + Some(Relationship::new_exclusive(0, 1)), + ); + let _me_node = network.add_aggregated_node( + "mutual-exclusivity-12", + None, + &[vec![link_node1], vec![link_node2]], + Some(Relationship::new_exclusive(0, 1)), + ); + + // Setup a demand on the outputs + // output-1 has a lower penalty cost than output-0 and output-2, so the flow should be directed to output-1. + let output_node = network.get_mut_node_by_name("output", Some("0")).unwrap(); + output_node.set_max_flow_constraint(Some(100.0.into())).unwrap(); + + output_node.set_cost(Some((-5.0).into())); + + let output_node = network.get_mut_node_by_name("output", Some("1")).unwrap(); + output_node.set_max_flow_constraint(Some(100.0.into())).unwrap(); + + output_node.set_cost(Some((-15.0).into())); + + let output_node = network.get_mut_node_by_name("output", Some("2")).unwrap(); + output_node.set_max_flow_constraint(Some(100.0.into())).unwrap(); + + output_node.set_cost(Some((-5.0).into())); + + // Set-up assertion for "output-0" node + let idx = network.get_node_by_name("link", Some("0")).unwrap().index(); + let expected = Array2::from_elem((366, 10), 0.0); + let recorder = AssertionRecorder::new("link-0-flow", MetricF64::NodeOutFlow(idx), expected, None, None); + network.add_recorder(Box::new(recorder)).unwrap(); + + // Set-up assertion for "output-0" node + let idx = network.get_node_by_name("link", Some("1")).unwrap().index(); + let expected = Array2::from_elem((366, 10), 100.0); + let recorder = AssertionRecorder::new("link-1-flow", MetricF64::NodeOutFlow(idx), expected, None, None); + network.add_recorder(Box::new(recorder)).unwrap(); + + // Set-up assertion for "output-2" node + let idx = network.get_node_by_name("link", Some("2")).unwrap().index(); + let expected = Array2::from_elem((366, 10), 0.0); + let recorder = AssertionRecorder::new("link-2-flow", MetricF64::NodeOutFlow(idx), expected, None, None); + network.add_recorder(Box::new(recorder)).unwrap(); + + let model = Model::new(default_time_domain().into(), network); + + run_all_solvers(&model, &["clp"], &[]); + } } diff --git a/pywr-core/src/derived_metric.rs b/pywr-core/src/derived_metric.rs index 61f90980..016ba23d 100644 --- a/pywr-core/src/derived_metric.rs +++ b/pywr-core/src/derived_metric.rs @@ -108,7 +108,7 @@ impl DerivedMetric { Self::NodeInFlowDeficit(idx) => { let node = network.get_node(idx)?; let flow = state.get_network_state().get_node_in_flow(idx)?; - let max_flow = node.get_current_max_flow(network, state)?; + let max_flow = node.get_max_flow(network, state)?; Ok(max_flow - flow) } Self::PowerFromNodeFlow(idx, turbine_data) => { diff --git a/pywr-core/src/metric.rs b/pywr-core/src/metric.rs index 39497412..d7b7017c 100644 --- a/pywr-core/src/metric.rs +++ b/pywr-core/src/metric.rs @@ -105,15 +105,15 @@ impl MetricF64 { MetricF64::NodeVolume(idx) => Ok(state.get_network_state().get_node_volume(idx)?), MetricF64::AggregatedNodeInFlow(idx) => { let node = model.get_aggregated_node(idx)?; - node.get_nodes() - .iter() + node.iter_nodes() + .flat_map(|indices| indices.iter()) .map(|idx| state.get_network_state().get_node_in_flow(idx)) .sum::>() } MetricF64::AggregatedNodeOutFlow(idx) => { let node = model.get_aggregated_node(idx)?; - node.get_nodes() - .iter() + node.iter_nodes() + .flat_map(|indices| indices.iter()) .map(|idx| state.get_network_state().get_node_out_flow(idx)) .sum::>() } diff --git a/pywr-core/src/network.rs b/pywr-core/src/network.rs index 4dd97e90..f13a457e 100644 --- a/pywr-core/src/network.rs +++ b/pywr-core/src/network.rs @@ -1,4 +1,4 @@ -use crate::aggregated_node::{AggregatedNode, AggregatedNodeIndex, AggregatedNodeVec, Factors}; +use crate::aggregated_node::{AggregatedNode, AggregatedNodeIndex, AggregatedNodeVec, Relationship}; use crate::aggregated_storage_node::{AggregatedStorageNode, AggregatedStorageNodeIndex, AggregatedStorageNodeVec}; use crate::derived_metric::{DerivedMetric, DerivedMetricIndex}; use crate::edge::{Edge, EdgeIndex, EdgeVec}; @@ -574,6 +574,11 @@ impl Network { features.insert(SolverFeatures::AggregatedNodeDynamicFactors); } + // Aggregated nodes with exclusivities require the MutualExclusivity feature. + if self.aggregated_nodes.iter().any(|n| n.has_exclusivity()) { + features.insert(SolverFeatures::MutualExclusivity); + } + // The presence of any virtual storage node requires the VirtualStorage feature. if self.virtual_storage_nodes.len() > 0 { features.insert(SolverFeatures::VirtualStorage); @@ -948,14 +953,14 @@ impl Network { Ok(()) } - pub fn set_aggregated_node_factors( + pub fn set_aggregated_node_relationship( &mut self, name: &str, sub_name: Option<&str>, - factors: Option, + relationship: Option, ) -> Result<(), PywrError> { let node = self.get_mut_aggregated_node_by_name(name, sub_name)?; - node.set_factors(factors); + node.set_relationship(relationship); Ok(()) } @@ -1277,14 +1282,14 @@ impl Network { &mut self, name: &str, sub_name: Option<&str>, - nodes: &[NodeIndex], - factors: Option, + nodes: &[Vec], + relationship: Option, ) -> Result { if let Ok(_agg_node) = self.get_aggregated_node_by_name(name, sub_name) { return Err(PywrError::NodeNameAlreadyExists(name.to_string())); } - let node_index = self.aggregated_nodes.push_new(name, sub_name, nodes, factors); + let node_index = self.aggregated_nodes.push_new(name, sub_name, nodes, relationship); Ok(node_index) } diff --git a/pywr-core/src/node.rs b/pywr-core/src/node.rs index 21cc5664..b1d73adf 100644 --- a/pywr-core/src/node.rs +++ b/pywr-core/src/node.rs @@ -1,13 +1,13 @@ use crate::edge::EdgeIndex; use crate::metric::{MetricF64, SimpleMetricF64}; use crate::network::Network; -use crate::state::{NodeState, SimpleParameterValues, State}; +use crate::state::{ConstParameterValues, NodeState, SimpleParameterValues, State}; use crate::timestep::Timestep; use crate::virtual_storage::VirtualStorageIndex; use crate::PywrError; use std::ops::{Deref, DerefMut}; -#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)] +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug, Hash)] pub struct NodeIndex(usize); impl Deref for NodeIndex { @@ -96,6 +96,28 @@ impl NodeVec { } } +/// Bounds for the flow of a node +#[derive(Debug, Clone, Copy)] +pub struct FlowBounds { + /// Minimum flow + pub min_flow: f64, + /// Maximum flow + pub max_flow: f64, +} + +#[derive(Debug, Clone, Copy)] +pub struct VolumeBounds { + /// Available volume (i.e. max that can be removed) + pub available: f64, + /// Missing volume (i.e. max that can be added) + pub missing: f64, +} + +pub enum NodeBounds { + Flow(FlowBounds), + Volume(VolumeBounds), +} + #[derive(Debug, Clone)] pub enum Constraint { MinFlow, @@ -293,46 +315,6 @@ impl Node { } } - // /// Return a reference to a node's flow constraints if they exist. - // fn flow_constraints(&self) -> Option<&FlowConstraints> { - // match self { - // Node::Input(n) => Some(&n.flow_constraints), - // Node::Link(n) => Some(&n.flow_constraints), - // Node::Output(n) => Some(&n.flow_constraints), - // Node::Storage(n) => None, - // } - // } - - // /// Return a mutable reference to a node's flow constraints if they exist. - // fn flow_constraints_mut(&mut self) -> Result<&mut FlowConstraints, PywrError> { - // match self { - // Node::Input(n) => Ok(&mut n.flow_constraints), - // Node::Link(n) => Ok(&mut n.flow_constraints), - // Node::Output(n) => Ok(&mut n.flow_constraints), - // Node::Storage(_) => Err(PywrError::FlowConstraintsUndefined), - // } - // } - - // /// Return a reference to a node's storage constraints if they exist. - // fn storage_constraints(&self) -> Result<&StorageConstraints, PywrError> { - // match self { - // Node::Input(_) => Err(PywrError::StorageConstraintsUndefined), - // Node::Link(_) => Err(PywrError::StorageConstraintsUndefined), - // Node::Output(_) => Err(PywrError::StorageConstraintsUndefined), - // Node::Storage(n) => Ok(&n.storage_constraints), - // } - // } - - // /// Return a mutable reference to a node's storage constraints if they exist. - // fn storage_constraints_mut(&self) -> Result<&mut StorageConstraints, PywrError> { - // match self.0.borrow_mut().deref_mut() { - // _Node::Input(_) => Err(PywrError::StorageConstraintsUndefined), - // _Node::Link(_) => Err(PywrError::StorageConstraintsUndefined), - // _Node::Output(_) => Err(PywrError::StorageConstraintsUndefined), - // _Node::Storage(n) => Ok(&mut n.storage_constraints), - // } - // } - pub fn before(&self, timestep: &Timestep, state: &mut State) -> Result<(), PywrError> { // Currently only storage nodes do something during before match self { @@ -361,7 +343,7 @@ impl Node { } } - pub fn get_current_min_flow(&self, network: &Network, state: &State) -> Result { + pub fn get_min_flow(&self, network: &Network, state: &State) -> Result { match self { Self::Input(n) => n.get_min_flow(network, state), Self::Link(n) => n.get_min_flow(network, state), @@ -370,6 +352,15 @@ impl Node { } } + pub fn get_const_min_flow(&self, values: &ConstParameterValues) -> Result, PywrError> { + match self { + Self::Input(n) => n.get_const_min_flow(values), + Self::Link(n) => n.get_const_min_flow(values), + Self::Output(n) => n.get_const_min_flow(values), + Self::Storage(_) => Err(PywrError::FlowConstraintsUndefined), + } + } + pub fn set_max_flow_constraint(&mut self, value: Option) -> Result<(), PywrError> { match self { Self::Input(n) => { @@ -388,7 +379,7 @@ impl Node { } } - pub fn get_current_max_flow(&self, network: &Network, state: &State) -> Result { + pub fn get_max_flow(&self, network: &Network, state: &State) -> Result { match self { Self::Input(n) => n.get_max_flow(network, state), Self::Link(n) => n.get_max_flow(network, state), @@ -397,6 +388,15 @@ impl Node { } } + pub fn get_const_max_flow(&self, values: &ConstParameterValues) -> Result, PywrError> { + match self { + Self::Input(n) => n.get_const_max_flow(values), + Self::Link(n) => n.get_const_max_flow(values), + Self::Output(n) => n.get_const_max_flow(values), + Self::Storage(_) => Err(PywrError::FlowConstraintsUndefined), + } + } + pub fn is_max_flow_unconstrained(&self) -> Result { match self { Self::Input(n) => Ok(n.is_max_flow_unconstrained()), @@ -406,16 +406,6 @@ impl Node { } } - pub fn get_current_flow_bounds(&self, network: &Network, state: &State) -> Result<(f64, f64), PywrError> { - match ( - self.get_current_min_flow(network, state), - self.get_current_max_flow(network, state), - ) { - (Ok(min_flow), Ok(max_flow)) => Ok((min_flow, max_flow)), - _ => Err(PywrError::FlowConstraintsUndefined), - } - } - pub fn set_min_volume_constraint(&mut self, value: Option) -> Result<(), PywrError> { match self { Self::Input(_) => Err(PywrError::StorageConstraintsUndefined), @@ -465,17 +455,65 @@ impl Node { } } - pub fn get_current_available_volume_bounds(&self, state: &State) -> Result<(f64, f64), PywrError> { - match (self.get_current_min_volume(state), self.get_current_max_volume(state)) { - (Ok(min_vol), Ok(max_vol)) => { - let current_volume = state.get_network_state().get_node_volume(&self.index())?; + /// Get constant bounds for the node, if they exist, depending on its type. + /// + /// Note that [`Node::Storage`] nodes can never have constant bounds. + pub fn get_const_bounds(&self, values: &ConstParameterValues) -> Result, PywrError> { + match self { + Self::Input(n) => { + let min_flow = n.get_const_min_flow(values)?; + let max_flow = n.get_const_max_flow(values)?; + + match (min_flow, max_flow) { + (Some(min_flow), Some(max_flow)) => Ok(Some(NodeBounds::Flow(FlowBounds { min_flow, max_flow }))), + _ => Ok(None), + } + } + Self::Output(n) => { + let min_flow = n.get_const_min_flow(values)?; + let max_flow = n.get_const_max_flow(values)?; - let available = current_volume - min_vol; - let missing = max_vol - current_volume; + match (min_flow, max_flow) { + (Some(min_flow), Some(max_flow)) => Ok(Some(NodeBounds::Flow(FlowBounds { min_flow, max_flow }))), + _ => Ok(None), + } + } + Self::Link(n) => { + let min_flow = n.get_const_min_flow(values)?; + let max_flow = n.get_const_max_flow(values)?; - Ok((available, missing)) + match (min_flow, max_flow) { + (Some(min_flow), Some(max_flow)) => Ok(Some(NodeBounds::Flow(FlowBounds { min_flow, max_flow }))), + _ => Ok(None), + } + } + Self::Storage(_) => Ok(None), + } + } + + /// Get bounds for the node depending on its type. + pub fn get_bounds(&self, network: &Network, state: &State) -> Result { + match self { + Self::Input(n) => Ok(NodeBounds::Flow(FlowBounds { + min_flow: n.flow_constraints.get_min_flow(network, state)?, + max_flow: n.flow_constraints.get_max_flow(network, state)?, + })), + Self::Output(n) => Ok(NodeBounds::Flow(FlowBounds { + min_flow: n.flow_constraints.get_min_flow(network, state)?, + max_flow: n.flow_constraints.get_max_flow(network, state)?, + })), + Self::Link(n) => Ok(NodeBounds::Flow(FlowBounds { + min_flow: n.flow_constraints.get_min_flow(network, state)?, + max_flow: n.flow_constraints.get_max_flow(network, state)?, + })), + Self::Storage(n) => { + let current_volume = state.get_network_state().get_node_volume(&n.meta.index)?; + + let available = current_volume - n.get_min_volume(state)?; + let missing = n.get_max_volume(state)? - current_volume; + + Ok(NodeBounds::Volume(VolumeBounds { available, missing })) } - _ => Err(PywrError::FlowConstraintsUndefined), } } @@ -570,9 +608,19 @@ impl FlowConstraints { Some(m) => m.get_value(network, state), } } + + /// Return the constant minimum flow if it exists. + /// + /// Defaults to zero if no parameter is defined. + pub fn get_const_min_flow(&self, values: &ConstParameterValues) -> Result, PywrError> { + match &self.min_flow { + None => Ok(Some(0.0)), + Some(m) => m.try_get_constant_value(values), + } + } /// Return the current maximum flow from the parameter state /// - /// Defaults to f64::MAX if no parameter is defined. + /// Defaults to [`f64::MAX`] if no parameter is defined. pub fn get_max_flow(&self, network: &Network, state: &State) -> Result { match &self.max_flow { None => Ok(f64::MAX), @@ -580,6 +628,16 @@ impl FlowConstraints { } } + /// Return the constant maximum flow if it exists. + /// + /// Defaults to [`f64::MAX`] if no parameter is defined. + pub fn get_const_max_flow(&self, values: &ConstParameterValues) -> Result, PywrError> { + match &self.max_flow { + None => Ok(Some(f64::MAX)), + Some(m) => m.try_get_constant_value(values), + } + } + pub fn is_max_flow_unconstrained(&self) -> bool { self.max_flow.is_none() } @@ -701,12 +759,18 @@ impl InputNode { fn get_min_flow(&self, network: &Network, state: &State) -> Result { self.flow_constraints.get_min_flow(network, state) } + fn get_const_min_flow(&self, values: &ConstParameterValues) -> Result, PywrError> { + self.flow_constraints.get_const_min_flow(values) + } fn set_max_flow(&mut self, value: Option) { self.flow_constraints.max_flow = value; } fn get_max_flow(&self, network: &Network, state: &State) -> Result { self.flow_constraints.get_max_flow(network, state) } + fn get_const_max_flow(&self, values: &ConstParameterValues) -> Result, PywrError> { + self.flow_constraints.get_const_max_flow(values) + } fn is_max_flow_unconstrained(&self) -> bool { self.flow_constraints.is_max_flow_unconstrained() } @@ -747,12 +811,18 @@ impl OutputNode { fn get_min_flow(&self, network: &Network, state: &State) -> Result { self.flow_constraints.get_min_flow(network, state) } + fn get_const_min_flow(&self, values: &ConstParameterValues) -> Result, PywrError> { + self.flow_constraints.get_const_min_flow(values) + } fn set_max_flow(&mut self, value: Option) { self.flow_constraints.max_flow = value; } fn get_max_flow(&self, network: &Network, state: &State) -> Result { self.flow_constraints.get_max_flow(network, state) } + fn get_const_max_flow(&self, values: &ConstParameterValues) -> Result, PywrError> { + self.flow_constraints.get_const_max_flow(values) + } fn is_max_flow_unconstrained(&self) -> bool { self.flow_constraints.is_max_flow_unconstrained() } @@ -795,12 +865,18 @@ impl LinkNode { fn get_min_flow(&self, network: &Network, state: &State) -> Result { self.flow_constraints.get_min_flow(network, state) } + fn get_const_min_flow(&self, values: &ConstParameterValues) -> Result, PywrError> { + self.flow_constraints.get_const_min_flow(values) + } fn set_max_flow(&mut self, value: Option) { self.flow_constraints.max_flow = value; } fn get_max_flow(&self, network: &Network, state: &State) -> Result { self.flow_constraints.get_max_flow(network, state) } + fn get_const_max_flow(&self, values: &ConstParameterValues) -> Result, PywrError> { + self.flow_constraints.get_const_max_flow(values) + } fn is_max_flow_unconstrained(&self) -> bool { self.flow_constraints.is_max_flow_unconstrained() } diff --git a/pywr-core/src/solvers/builder.rs b/pywr-core/src/solvers/builder.rs index 0390f2d6..a9879b59 100644 --- a/pywr-core/src/solvers/builder.rs +++ b/pywr-core/src/solvers/builder.rs @@ -1,13 +1,14 @@ use crate::aggregated_node::AggregatedNodeIndex; use crate::edge::EdgeIndex; use crate::network::Network; -use crate::node::{Node, NodeType}; +use crate::node::{Node, NodeBounds, NodeIndex, NodeType}; use crate::solvers::col_edge_map::{ColumnEdgeMap, ColumnEdgeMapBuilder}; use crate::solvers::SolverTimings; use crate::state::{ConstParameterValues, State}; use crate::timestep::Timestep; use crate::PywrError; -use std::collections::BTreeMap; +use num::Zero; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::fmt::Debug; use std::ops::Deref; use std::time::Instant; @@ -19,10 +20,17 @@ enum Bounds { // Free, Lower(f64), // Upper(f64), - // Double(f64, f64), + Double(f64, f64), // Fixed(f64), } +/// Column type +#[derive(Copy, Clone, Debug)] +pub enum ColType { + Continuous, + Integer, +} + /// Sparse form of a linear program. /// /// This struct is intended to facilitate passing the LP data to a external library. Most @@ -31,6 +39,7 @@ struct Lp { col_lower: Vec, col_upper: Vec, col_obj_coef: Vec, + col_type: Vec, row_lower: Vec, row_upper: Vec, row_mask: Vec, @@ -125,6 +134,7 @@ struct LpBuilder { col_lower: Vec, col_upper: Vec, col_obj_coef: Vec, + col_type: Vec, rows: Vec>, fixed_rows: Vec>, } @@ -138,6 +148,7 @@ where col_lower: Vec::new(), col_upper: Vec::new(), col_obj_coef: Vec::new(), + col_type: Vec::new(), rows: Vec::new(), fixed_rows: Vec::new(), } @@ -148,9 +159,9 @@ impl LpBuilder where I: num::PrimInt, { - fn add_column(&mut self, obj_coef: f64, bounds: Bounds) { + fn add_column(&mut self, obj_coef: f64, bounds: Bounds, col_type: ColType) -> I { let (lb, ub): (f64, f64) = match bounds { - // Bounds::Double(lb, ub) => (lb, ub), + Bounds::Double(lb, ub) => (lb, ub), Bounds::Lower(lb) => (lb, FMAX), // Bounds::Fixed(b) => (b, b), // Bounds::Free => (f64::MIN, FMAX), @@ -160,6 +171,8 @@ where self.col_lower.push(lb); self.col_upper.push(ub); self.col_obj_coef.push(obj_coef); + self.col_type.push(col_type); + I::from(self.col_lower.len() - 1).unwrap() } /// Add a fixed row to the LP. @@ -221,6 +234,7 @@ where col_lower: self.col_lower, col_upper: self.col_upper, col_obj_coef: self.col_obj_coef, + col_type: self.col_type, row_lower, row_upper, row_mask, @@ -276,6 +290,24 @@ where } } +/// The row id types associated with a node's constraints. +#[derive(Copy, Clone)] +struct NodeRowId { + /// A regular node constraint bounded by lower and upper bounds. + row_id: I, + node_idx: NodeIndex, + row_type: NodeRowType, +} + +/// The row id types associated with a node's constraints. +#[derive(Copy, Clone)] +enum NodeRowType { + /// A regular node constraint bounded by lower and upper bounds. + Continuous, + /// A binary node constraint where the upper bound is controlled by a binary variable. + Binary { bin_col_id: I }, +} + struct AggNodeFactorRow { agg_node_idx: AggregatedNodeIndex, // Row index for each node-pair. If `None` the row is fixed and does not need updating. @@ -285,7 +317,7 @@ struct AggNodeFactorRow { pub struct BuiltSolver { builder: Lp, col_edge_map: ColumnEdgeMap, - node_constraints_row_ids: Vec, + node_constraints_row_ids: Vec>, agg_node_constraint_row_ids: Vec, agg_node_factor_constraint_row_ids: Vec>, virtual_storage_constraint_row_ids: Vec, @@ -319,6 +351,10 @@ where &self.builder.col_obj_coef } + pub fn col_type(&self) -> &[ColType] { + &self.builder.col_type + } + pub fn row_lower(&self) -> &[f64] { &self.builder.row_lower } @@ -397,22 +433,27 @@ where ) -> Result<(), PywrError> { let dt = timestep.days(); - for (row_id, node) in self.node_constraints_row_ids.iter().zip(network.nodes().deref()) { - let (lb, ub): (f64, f64) = match node.get_current_flow_bounds(network, state) { - Ok(bnds) => bnds, - Err(PywrError::FlowConstraintsUndefined) => { - // Must be a storage node - let (avail, missing) = match node.get_current_available_volume_bounds(state) { - Ok(bnds) => bnds, - Err(e) => return Err(e), - }; - - (-avail / dt, missing / dt) - } - Err(e) => return Err(e), + for row in self.node_constraints_row_ids.iter() { + let node = network.get_node(&row.node_idx)?; + let (lb, ub): (f64, f64) = match node.get_bounds(network, state)? { + NodeBounds::Flow(bounds) => (bounds.min_flow, bounds.max_flow), + NodeBounds::Volume(bounds) => (-bounds.available / dt, bounds.missing / dt), }; - self.builder.apply_row_bounds(*row_id, lb, ub); + match row.row_type { + NodeRowType::Continuous => { + // Regular node constraint + self.builder.apply_row_bounds(row.row_id.to_usize().unwrap(), lb, ub); + } + NodeRowType::Binary { bin_col_id } => { + if !lb.is_zero() { + panic!("Binary node constraint with non-zero lower bounds not implemented!"); + } + // Update the coefficients for the binary column to be the upper bound + self.builder.coefficients_to_update.push((row.row_id, bin_col_id, -ub)); + // This row is already upper bounded to zero in `create_node_constraints`. + } + } } Ok(()) @@ -436,13 +477,21 @@ where // Modify the constraint matrix coefficients for the nodes // TODO error handling? let nodes = network.nodes(); - let node0 = nodes.get(&node_pair.node0.index).expect("Node index not found!"); - let node1 = nodes.get(&node_pair.node1.index).expect("Node index not found!"); - - self.builder - .update_row_coefficients(*row_idx, node0, 1.0, &self.col_edge_map); - self.builder - .update_row_coefficients(*row_idx, node1, -node_pair.ratio(), &self.col_edge_map); + for node0_idx in node_pair.node0.indices { + let node0 = nodes.get(node0_idx).expect("Node index not found!"); + self.builder + .update_row_coefficients(*row_idx, node0, 1.0, &self.col_edge_map); + } + + for node1_idx in node_pair.node1.indices { + let node1 = nodes.get(node1_idx).expect("Node index not found!"); + self.builder.update_row_coefficients( + *row_idx, + node1, + -node_pair.ratio(), + &self.col_edge_map, + ); + } self.builder.apply_row_bounds(row_idx.to_usize().unwrap(), 0.0, 0.0); } @@ -498,6 +547,8 @@ where pub struct SolverBuilder { builder: LpBuilder, col_edge_map: ColumnEdgeMapBuilder, + node_bin_col_map: HashMap>, + node_set_bin_col_map: HashMap, I>, } impl Default for SolverBuilder @@ -508,6 +559,8 @@ where Self { builder: LpBuilder::default(), col_edge_map: ColumnEdgeMapBuilder::default(), + node_bin_col_map: HashMap::new(), + node_set_bin_col_map: HashMap::new(), } } } @@ -527,13 +580,15 @@ where // Create edge mass balance constraints self.create_mass_balance_constraints(network); // Create the nodal constraints - let node_constraints_row_ids = self.create_node_constraints(network); + let node_constraints_row_ids = self.create_node_constraints(network, values)?; // Create the aggregated node constraints let agg_node_constraint_row_ids = self.create_aggregated_node_constraints(network); // Create the aggregated node factor constraints let agg_node_factor_constraint_row_ids = self.create_aggregated_node_factor_constraints(network, values); // Create virtual storage constraints let virtual_storage_constraint_row_ids = self.create_virtual_storage_constraints(network); + // Create mutual exclusivity constraints + self.create_mutual_exclusivity_constraints(network); Ok(BuiltSolver { builder: self.builder.build(), @@ -584,7 +639,30 @@ where // Add columns set the columns as x >= 0.0 (i.e. no upper bounds) for _ in 0..self.col_edge_map.ncols() { - self.builder.add_column(0.0, Bounds::Lower(0.0)); + self.builder.add_column(0.0, Bounds::Lower(0.0), ColType::Continuous); + } + + // Determine the set of nodes that are in one or more mutual exclusivity constraints + // We only want to create one binary variable for each unique set of nodes. For the + // majority of cases the nodes will only be in one set. However, if a node is in different + // sets then we need to create separate binary variables and associated them with that node. + let mut node_sets_in_a_mutual_exclusivity = HashSet::new(); + for agg_node in network.aggregated_nodes().deref() { + if agg_node.has_exclusivity() { + for node_set in agg_node.iter_nodes() { + node_sets_in_a_mutual_exclusivity.insert(node_set); + } + } + } + + // Add any binary columns associated with each set of nodes + for node_set in node_sets_in_a_mutual_exclusivity.into_iter() { + let col_id = self.builder.add_column(0.0, Bounds::Double(0.0, 1.0), ColType::Integer); + for node_idx in node_set.iter() { + self.node_bin_col_map.entry(*node_idx).or_default().push(col_id); + } + + self.node_set_bin_col_map.insert(node_set.to_vec(), col_id); } Ok(()) @@ -632,7 +710,7 @@ where } } - fn add_node(&mut self, node: &Node, factor: f64, row: &mut RowBuilder) { + fn add_node(&self, node: &Node, factor: f64, row: &mut RowBuilder) { match node.node_type() { NodeType::Link => { for edge in node.get_outgoing_edges().unwrap() { @@ -669,19 +747,86 @@ where /// /// One constraint is created per node to enforce any constraints (flow or storage) /// that it may define. Returns the row_ids associated with each constraint. - fn create_node_constraints(&mut self, network: &Network) -> Vec { + fn create_node_constraints( + &mut self, + network: &Network, + values: &ConstParameterValues, + ) -> Result>, PywrError> { let mut row_ids = Vec::with_capacity(network.nodes().len()); for node in network.nodes().deref() { - // Create empty arrays to store the matrix data - let mut row: RowBuilder = RowBuilder::default(); + // Get the node's flow bounds if they are constants + // Storage nodes cannot have constant bounds + let bounds = match node.get_const_bounds(values)? { + Some(NodeBounds::Flow(bounds)) => Some(bounds), + _ => None, + }; - self.add_node(node, 1.0, &mut row); + // If there are binary variables associated with this node, then we need to add a row + // that enforces each binary variable's constraints + if let Some(cols) = self.node_bin_col_map.get(&node.index()) { + for col in cols { + let mut row: RowBuilder = RowBuilder::default(); + self.add_node(node, 1.0, &mut row); + let mut is_fixed = false; + + match bounds { + Some(bounds) => { + if bounds.min_flow != 0.0 { + panic!("Binary node constraint with non-zero lower bounds not implemented!"); + } + // If the bounds are constant then the binary variable is used to control the upper bound + row.add_element(*col, -bounds.max_flow.min(1e8)); + row.set_lower(FMIN); + row.set_upper(0.0); + is_fixed = true; + } + None => { + // If the bounds are not constant then the binary variable coefficient is updated later + // Use a placeholder of -1.0 for now + row.add_element(*col, -1.0); + } + } - let row_id = self.builder.add_variable_row(row); - row_ids.push(row_id.to_usize().unwrap()) + if is_fixed { + self.builder.add_fixed_row(row); + } else { + let row_id = self.builder.add_variable_row(row); + let row_type = NodeRowType::Binary { bin_col_id: *col }; + + row_ids.push(NodeRowId { + row_id, + node_idx: node.index(), + row_type, + }); + } + } + } else { + let mut row: RowBuilder = RowBuilder::default(); + self.add_node(node, 1.0, &mut row); + let mut is_fixed = false; + + // Apply the bounds if they are constant; otherwise the bounds are updated later + if let Some(bounds) = bounds { + row.set_lower(bounds.min_flow); + row.set_upper(bounds.max_flow); + is_fixed = true; + } + + if is_fixed { + self.builder.add_fixed_row(row); + } else { + let row_id = self.builder.add_variable_row(row); + + row_ids.push(NodeRowId { + row_id, + node_idx: node.index(), + row_type: NodeRowType::Continuous, + }); + } + } } - row_ids + Ok(row_ids) } /// Create aggregated node factor constraints @@ -706,13 +851,17 @@ where // TODO error handling? let nodes = network.nodes(); - let node0 = nodes.get(&node_pair.node0.index).expect("Node index not found!"); - let node1 = nodes.get(&node_pair.node1.index).expect("Node index not found!"); + for node0_idx in node_pair.node0.indices { + let node0 = nodes.get(node0_idx).expect("Node index not found!"); + self.add_node(node0, 1.0, &mut row); + } let ratio = node_pair.ratio(); - self.add_node(node0, 1.0, &mut row); - self.add_node(node1, -ratio.unwrap_or(1.0), &mut row); + for node1_idx in node_pair.node1.indices { + let node1 = nodes.get(node1_idx).expect("Node index not found!"); + self.add_node(node1, -ratio.unwrap_or(1.0), &mut row); + } // Make the row fixed at zero RHS row.set_lower(0.0); row.set_upper(0.0); @@ -750,10 +899,12 @@ where // Create empty arrays to store the matrix data let mut row: RowBuilder = RowBuilder::default(); - for node_index in agg_node.get_nodes() { + for node_indices in agg_node.iter_nodes() { // TODO error handling? - let node = network.nodes().get(&node_index).expect("Node index not found!"); - self.add_node(node, 1.0, &mut row); + for node_idx in node_indices { + let node = network.nodes().get(node_idx).expect("Node index not found!"); + self.add_node(node, 1.0, &mut row); + } } let row_id = self.builder.add_variable_row(row); @@ -788,6 +939,27 @@ where } row_ids } + + /// Create mutual exclusivity constraints + fn create_mutual_exclusivity_constraints(&mut self, network: &Network) { + for agg_node in network.aggregated_nodes().iter() { + if let Some(exclusivity) = agg_node.get_exclusivity() { + let mut row = RowBuilder::default(); + for node_index in agg_node.iter_nodes() { + let bin_col = self + .node_set_bin_col_map + .get(node_index) + .expect("Binary column not found for Node in mutual exclusivity constraint!"); + + row.add_element(*bin_col, 1.0); + } + row.set_upper(exclusivity.max_active() as f64); + row.set_lower(exclusivity.min_active() as f64); + + self.builder.add_fixed_row(row); + } + } + } } #[cfg(test)] @@ -814,9 +986,9 @@ mod tests { fn builder_solve2() { let mut builder = LpBuilder::default(); - builder.add_column(-2.0, Bounds::Lower(0.0)); - builder.add_column(-3.0, Bounds::Lower(0.0)); - builder.add_column(-4.0, Bounds::Lower(0.0)); + builder.add_column(-2.0, Bounds::Lower(0.0), ColType::Continuous); + builder.add_column(-3.0, Bounds::Lower(0.0), ColType::Continuous); + builder.add_column(-4.0, Bounds::Lower(0.0), ColType::Continuous); // Row1 let mut row = RowBuilder::default(); diff --git a/pywr-core/src/solvers/cbc/mod.rs b/pywr-core/src/solvers/cbc/mod.rs index 1d2b920f..13fa11de 100644 --- a/pywr-core/src/solvers/cbc/mod.rs +++ b/pywr-core/src/solvers/cbc/mod.rs @@ -1,6 +1,6 @@ mod settings; -use super::builder::SolverBuilder; +use super::builder::{ColType, SolverBuilder}; use crate::network::Network; use crate::solvers::builder::BuiltSolver; use crate::solvers::{Solver, SolverFeatures, SolverTimings}; @@ -76,12 +76,22 @@ impl Cbc { } } - pub fn add_cols(&mut self, col_lower: &[c_double], col_upper: &[c_double], obj_coefs: &[c_double]) { + pub fn add_cols( + &mut self, + col_lower: &[c_double], + col_upper: &[c_double], + col_type: &[ColType], + obj_coefs: &[c_double], + ) { let number: c_int = col_lower.len() as c_int; for col_idx in 0..number { let lower = col_lower[col_idx as usize]; let upper = col_upper[col_idx as usize]; + let is_integer = match col_type[col_idx as usize] { + ColType::Continuous => 0, + ColType::Integer => 1, + }; let obj_coef = obj_coefs[col_idx as usize]; unsafe { @@ -92,7 +102,7 @@ impl Cbc { lower, upper, obj_coef, - 0 as c_char, + is_integer as c_char, 0, ptr::null_mut(), ptr::null_mut(), @@ -187,7 +197,12 @@ impl CbcSolver { fn from_builder(builder: BuiltSolver) -> Self { let mut cbc = Cbc::default(); - cbc.add_cols(builder.col_lower(), builder.col_upper(), builder.col_obj_coef()); + cbc.add_cols( + builder.col_lower(), + builder.col_upper(), + builder.col_type(), + builder.col_obj_coef(), + ); cbc.add_rows( builder.row_lower(), @@ -221,6 +236,7 @@ impl Solver for CbcSolver { SolverFeatures::AggregatedNode, SolverFeatures::VirtualStorage, SolverFeatures::AggregatedNodeFactors, + SolverFeatures::MutualExclusivity, ] } @@ -248,10 +264,11 @@ impl Solver for CbcSolver { self.cbc.change_row_lower(self.builder.row_lower()); self.cbc.change_row_upper(self.builder.row_upper()); - // TODO raise an error for missing feature - // for (row, column, coefficient) in self.builder.coefficients_to_update() { - // self.cbc.modify_coefficient(*row, *column, *coefficient) - // } + if !self.builder.coefficients_to_update().is_empty() { + return Err(PywrError::MissingSolverFeatures); + // TODO waiting for support in CBC's C API: https://github.com/coin-or/Cbc/pull/656 + // self.cbc.modify_coefficient(*row, *column, *coefficient) + } timings.update_constraints += now.elapsed(); @@ -291,7 +308,12 @@ mod tests { #[test] fn cbc_add_rows() { let mut model = Cbc::default(); - model.add_cols(&[0.0, 0.0], &[10.0, 10.0], &[0.0, 0.0]); + model.add_cols( + &[0.0, 0.0], + &[10.0, 10.0], + &[ColType::Continuous, ColType::Continuous], + &[0.0, 0.0], + ); let row_lower: Vec = vec![0.0]; let row_upper: Vec = vec![2.0]; @@ -308,6 +330,7 @@ mod tests { let row_lower = vec![0.0, 0.0]; let col_lower = vec![0.0, 0.0, 0.0]; let col_upper = vec![f64::MAX, f64::MAX, f64::MAX]; + let col_type = vec![ColType::Continuous, ColType::Continuous, ColType::Continuous]; let col_obj_coef = vec![-2.0, -3.0, -4.0]; let row_starts = vec![0, 3, 6]; let columns = vec![0, 1, 2, 0, 1, 2]; @@ -315,7 +338,7 @@ mod tests { let mut lp = Cbc::default(); - lp.add_cols(&col_lower, &col_upper, &col_obj_coef); + lp.add_cols(&col_lower, &col_upper, &col_type, &col_obj_coef); lp.add_rows(&row_lower, &row_upper, &row_starts, &columns, &elements); lp.solve(); @@ -329,6 +352,7 @@ mod tests { let row_lower = vec![0.0, 0.0]; let col_lower = vec![0.0, 0.0, 0.0]; let col_upper = vec![f64::MAX, f64::MAX, f64::MAX]; + let col_type = vec![ColType::Continuous, ColType::Continuous, ColType::Continuous]; let col_obj_coef = vec![-2.0, -3.0, -4.0]; let row_starts = vec![0, 3, 6]; let columns = vec![0, 1, 2, 0, 1, 2]; @@ -336,7 +360,7 @@ mod tests { let mut lp = Cbc::default(); - lp.add_cols(&col_lower, &col_upper, &col_obj_coef); + lp.add_cols(&col_lower, &col_upper, &col_type, &col_obj_coef); lp.add_rows(&row_lower, &row_upper, &row_starts, &columns, &elements); lp.solve(); diff --git a/pywr-core/src/solvers/highs/mod.rs b/pywr-core/src/solvers/highs/mod.rs index 921a35f8..7eb74b40 100644 --- a/pywr-core/src/solvers/highs/mod.rs +++ b/pywr-core/src/solvers/highs/mod.rs @@ -1,15 +1,16 @@ mod settings; use crate::network::Network; -use crate::solvers::builder::{BuiltSolver, SolverBuilder}; +use crate::solvers::builder::{BuiltSolver, ColType, SolverBuilder}; use crate::solvers::{Solver, SolverFeatures, SolverTimings}; use crate::state::{ConstParameterValues, State}; use crate::timestep::Timestep; use crate::PywrError; use highs_sys::{ - HighsInt, Highs_addCols, Highs_addRows, Highs_changeCoeff, Highs_changeColsCostByRange, Highs_changeObjectiveSense, - Highs_changeRowsBoundsByMask, Highs_create, Highs_getDoubleInfoValue, Highs_getSolution, Highs_run, - Highs_setBoolOptionValue, Highs_setStringOptionValue, OBJECTIVE_SENSE_MINIMIZE, STATUS_OK, + kHighsVarTypeContinuous, kHighsVarTypeInteger, HighsInt, Highs_addCols, Highs_addRows, Highs_changeCoeff, + Highs_changeColIntegrality, Highs_changeColsCostByRange, Highs_changeObjectiveSense, Highs_changeRowsBoundsByMask, + Highs_create, Highs_getDoubleInfoValue, Highs_getSolution, Highs_run, Highs_setBoolOptionValue, + Highs_setStringOptionValue, OBJECTIVE_SENSE_MINIMIZE, STATUS_OK, }; use libc::c_void; pub use settings::{HighsSolverSettings, HighsSolverSettingsBuilder}; @@ -34,6 +35,11 @@ impl Default for Highs { let option_name = CString::new("output_flag").unwrap(); Highs_setBoolOptionValue(ptr, option_name.as_ptr(), 0); + // TODO - can these be put into the logging system? + // let option_name = CString::new("log_to_console").unwrap(); + // Highs_setBoolOptionValue(ptr, option_name.as_ptr(), 1); + // let option_name = CString::new("log_dev_level").unwrap(); + // Highs_setIntOptionValue(ptr, option_name.as_ptr(), 2); // model.presolve("off"); Highs_changeObjectiveSense(ptr, OBJECTIVE_SENSE_MINIMIZE); @@ -56,7 +62,15 @@ impl Highs { } } - pub fn add_cols(&mut self, col_lower: &[f64], col_upper: &[f64], col_obj_coef: &[f64], ncols: HighsInt) { + pub fn add_cols( + &mut self, + col_lower: &[f64], + col_upper: &[f64], + col_obj_coef: &[f64], + col_type: &[ColType], + ncols: HighsInt, + ) { + // Add all of the columns unsafe { let ret = Highs_addCols( self.ptr, @@ -71,6 +85,19 @@ impl Highs { ); assert_eq!(ret, STATUS_OK); } + + // Now change the column types + for (i, &ctype) in col_type.iter().enumerate() { + let ctype_int: HighsInt = match ctype { + ColType::Continuous => kHighsVarTypeContinuous, + ColType::Integer => kHighsVarTypeInteger, + }; + + unsafe { + let ret = Highs_changeColIntegrality(self.ptr, i as HighsInt, ctype_int); + assert_eq!(ret, STATUS_OK); + } + } } pub fn add_rows( @@ -111,7 +138,7 @@ impl Highs { } } - pub fn change_coeff(&mut self, row: HighsInt, col: HighsInt, value: f64) { + pub fn change_coefficient(&mut self, row: HighsInt, col: HighsInt, value: f64) { unsafe { let ret = Highs_changeCoeff(self.ptr, row, col, value); assert_eq!(ret, STATUS_OK); @@ -174,6 +201,8 @@ impl Solver for HighsSolver { fn features() -> &'static [SolverFeatures] { &[ + SolverFeatures::VirtualStorage, + SolverFeatures::MutualExclusivity, SolverFeatures::AggregatedNode, SolverFeatures::AggregatedNodeFactors, SolverFeatures::AggregatedNodeDynamicFactors, @@ -194,7 +223,13 @@ impl Solver for HighsSolver { let mut highs_lp = Highs::default(); - highs_lp.add_cols(built.col_lower(), built.col_upper(), built.col_obj_coef(), num_cols); + highs_lp.add_cols( + built.col_lower(), + built.col_upper(), + built.col_obj_coef(), + built.col_type(), + num_cols, + ); highs_lp.add_rows( built.row_lower(), @@ -223,6 +258,7 @@ impl Solver for HighsSolver { timings.update_objective += now.elapsed(); let now = Instant::now(); + self.highs.change_row_bounds( self.builder.row_mask(), self.builder.row_lower(), @@ -230,7 +266,9 @@ impl Solver for HighsSolver { ); for (row, column, coefficient) in self.builder.coefficients_to_update() { - self.highs.change_coeff(*row, *column, *coefficient); + // Highs only accepts coefficients in the range -1e10 to 1e10 + self.highs + .change_coefficient(*row, *column, coefficient.clamp(-1e10, 1e10)); } timings.update_constraints += now.elapsed(); @@ -274,8 +312,9 @@ mod tests { let col_lower: Vec = vec![0.0, 0.0]; let col_upper: Vec = vec![f64::MAX, f64::MAX]; let col_obj_coef: Vec = vec![1.0, 1.0]; + let col_type = vec![ColType::Continuous, ColType::Continuous]; - lp.add_cols(&col_lower, &col_upper, &col_obj_coef, 2); + lp.add_cols(&col_lower, &col_upper, &col_obj_coef, &col_type, 2); let row_lower: Vec = vec![0.0]; let row_upper: Vec = vec![2.0]; @@ -293,6 +332,7 @@ mod tests { let col_lower = vec![0.0, 0.0, 0.0]; let col_upper = vec![f64::MAX, f64::MAX, f64::MAX]; let col_obj_coef = vec![-2.0, -3.0, -4.0]; + let col_type = vec![ColType::Continuous, ColType::Continuous, ColType::Continuous]; let row_starts = vec![0, 3, 6]; let columns = vec![0, 1, 2, 0, 1, 2]; let elements = vec![3.0, 2.0, 1.0, 2.0, 5.0, 3.0]; @@ -302,7 +342,7 @@ mod tests { let nrows = row_upper.len() as HighsInt; let nnz = elements.len() as HighsInt; - lp.add_cols(&col_lower, &col_upper, &col_obj_coef, ncols); + lp.add_cols(&col_lower, &col_upper, &col_obj_coef, &col_type, ncols); lp.add_rows(&row_lower, &row_upper, nnz, &row_starts, &columns, &elements); lp.run(); @@ -321,6 +361,7 @@ mod tests { let col_lower = vec![0.0, 0.0, 0.0]; let col_upper = vec![f64::MAX, f64::MAX, f64::MAX]; let col_obj_coef = vec![-2.0, -3.0, -4.0]; + let col_type = vec![ColType::Continuous, ColType::Continuous, ColType::Continuous]; let row_starts = vec![0, 3, 6]; let columns = vec![0, 1, 2, 0, 1, 2]; let elements = vec![3.0, 2.0, 1.0, 2.0, 5.0, 3.0]; @@ -330,7 +371,7 @@ mod tests { let nrows = row_upper.len() as HighsInt; let nnz = elements.len() as HighsInt; - lp.add_cols(&col_lower, &col_upper, &col_obj_coef, ncols); + lp.add_cols(&col_lower, &col_upper, &col_obj_coef, &col_type, ncols); lp.add_rows(&row_lower, &row_upper, nnz, &row_starts, &columns, &elements); lp.run(); diff --git a/pywr-core/src/solvers/ipm_ocl/mod.rs b/pywr-core/src/solvers/ipm_ocl/mod.rs index 231ecc7f..ee28dcec 100644 --- a/pywr-core/src/solvers/ipm_ocl/mod.rs +++ b/pywr-core/src/solvers/ipm_ocl/mod.rs @@ -338,7 +338,7 @@ impl BuiltSolver { .iter() .map(|state| { // TODO check for non-zero lower bounds and error? - node.get_current_flow_bounds(network, state) + node.get_flow_bounds(network, state) .expect("Flow bounds expected for Input, Output and Link nodes.") .1 .min(B_MAX) diff --git a/pywr-core/src/solvers/ipm_simd/mod.rs b/pywr-core/src/solvers/ipm_simd/mod.rs index 6718c4c1..0e5027e4 100644 --- a/pywr-core/src/solvers/ipm_simd/mod.rs +++ b/pywr-core/src/solvers/ipm_simd/mod.rs @@ -366,7 +366,7 @@ where .iter() .map(|state| { // TODO check for non-zero lower bounds and error? - node.get_current_flow_bounds(network, state) + node.get_flow_bounds(network, state) .expect("Flow bounds expected for Input, Output and Link nodes.") .1 .min(B_MAX) diff --git a/pywr-core/src/solvers/mod.rs b/pywr-core/src/solvers/mod.rs index c5974674..3c5ac322 100644 --- a/pywr-core/src/solvers/mod.rs +++ b/pywr-core/src/solvers/mod.rs @@ -74,6 +74,7 @@ pub enum SolverFeatures { AggregatedNodeFactors, AggregatedNodeDynamicFactors, VirtualStorage, + MutualExclusivity, } /// Solver settings that are common to all solvers. diff --git a/pywr-schema/src/nodes/annual_virtual_storage.rs b/pywr-schema/src/nodes/annual_virtual_storage.rs index d15345f5..a429ebc0 100644 --- a/pywr-schema/src/nodes/annual_virtual_storage.rs +++ b/pywr-schema/src/nodes/annual_virtual_storage.rs @@ -64,8 +64,29 @@ impl AnnualVirtualStorageNode { } } +#[cfg(feature = "core")] impl AnnualVirtualStorageNode { - #[cfg(feature = "core")] + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + args: &LoadArgs, + ) -> Result, SchemaError> { + let indices = self + .nodes + .iter() + .map(|name| { + args.schema + .get_node_by_name(name) + .ok_or_else(|| SchemaError::NodeNotFound(name.to_string()))? + .node_indices_for_constraints(network, args) + }) + .collect::, _>>()? + .into_iter() + .flatten() + .collect(); + Ok(indices) + } + pub fn add_to_model(&self, network: &mut pywr_core::network::Network, args: &LoadArgs) -> Result<(), SchemaError> { let cost = match &self.cost { Some(v) => v.load(network, args)?.into(), @@ -82,11 +103,7 @@ impl AnnualVirtualStorageNode { None => None, }; - let node_idxs = self - .nodes - .iter() - .map(|name| network.get_node_index_by_name(name.as_str(), None)) - .collect::, _>>()?; + let node_idxs = self.node_indices_for_constraints(network, args)?; let reset_month = self.reset.month.try_into()?; let reset = VirtualStorageReset::DayOfYear { @@ -109,7 +126,6 @@ impl AnnualVirtualStorageNode { Ok(()) } - #[cfg(feature = "core")] pub fn create_metric( &self, network: &mut pywr_core::network::Network, diff --git a/pywr-schema/src/nodes/core.rs b/pywr-schema/src/nodes/core.rs index 8325d569..0a904dda 100644 --- a/pywr-schema/src/nodes/core.rs +++ b/pywr-schema/src/nodes/core.rs @@ -45,6 +45,13 @@ impl InputNode { #[cfg(feature = "core")] impl InputNode { + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + let idx = network.get_node_index_by_name(self.meta.name.as_str(), None)?; + Ok(vec![idx]) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network) -> Result<(), SchemaError> { network.add_input_node(self.meta.name.as_str(), None)?; Ok(()) @@ -155,6 +162,13 @@ impl LinkNode { #[cfg(feature = "core")] impl LinkNode { + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + let idx = network.get_node_index_by_name(self.meta.name.as_str(), None)?; + Ok(vec![idx]) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network) -> Result<(), SchemaError> { network.add_link_node(self.meta.name.as_str(), None)?; Ok(()) @@ -266,6 +280,13 @@ impl OutputNode { #[cfg(feature = "core")] impl OutputNode { + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + let idx = network.get_node_index_by_name(self.meta.name.as_str(), None)?; + Ok(vec![idx]) + } pub fn create_metric( &self, network: &mut pywr_core::network::Network, @@ -404,6 +425,13 @@ impl StorageNode { #[cfg(feature = "core")] impl StorageNode { + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + let idx = network.get_node_index_by_name(self.meta.name.as_str(), None)?; + Ok(vec![idx]) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network) -> Result<(), SchemaError> { // Add the node with no constraints network.add_storage_node(self.meta.name.as_str(), None, self.initial_volume.into(), None, None)?; @@ -601,6 +629,13 @@ impl CatchmentNode { #[cfg(feature = "core")] impl CatchmentNode { + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + let idx = network.get_node_index_by_name(self.meta.name.as_str(), None)?; + Ok(vec![idx]) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network) -> Result<(), SchemaError> { network.add_input_node(self.meta.name.as_str(), None)?; Ok(()) @@ -673,9 +708,17 @@ impl TryFrom for CatchmentNode { #[derive(serde::Deserialize, serde::Serialize, Clone, Debug, JsonSchema, PywrVisitAll)] #[serde(tag = "type", deny_unknown_fields)] -pub enum Factors { - Proportion { factors: Vec }, - Ratio { factors: Vec }, +pub enum Relationship { + Proportion { + factors: Vec, + }, + Ratio { + factors: Vec, + }, + Exclusive { + min_active: Option, + max_active: Option, + }, } #[derive(serde::Deserialize, serde::Serialize, Clone, Default, Debug, JsonSchema, PywrVisitAll)] @@ -685,7 +728,7 @@ pub struct AggregatedNode { pub nodes: Vec, pub max_flow: Option, pub min_flow: Option, - pub factors: Option, + pub factors: Option, } impl AggregatedNode { @@ -709,11 +752,37 @@ impl AggregatedNode { #[cfg(feature = "core")] impl AggregatedNode { - pub fn add_to_model(&self, network: &mut pywr_core::network::Network) -> Result<(), SchemaError> { - let nodes = self + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + args: &LoadArgs, + ) -> Result, SchemaError> { + let indices = self .nodes .iter() - .map(|name| network.get_node_index_by_name(name, None)) + .map(|name| { + args.schema + .get_node_by_name(name) + .ok_or_else(|| SchemaError::NodeNotFound(name.to_string()))? + .node_indices_for_constraints(network, args) + }) + .collect::, _>>()? + .into_iter() + .flatten() + .collect(); + Ok(indices) + } + pub fn add_to_model(&self, network: &mut pywr_core::network::Network, args: &LoadArgs) -> Result<(), SchemaError> { + let nodes: Vec> = self + .nodes + .iter() + .map(|name| { + let node = args + .schema + .get_node_by_name(name) + .ok_or_else(|| SchemaError::NodeNotFound(name.to_string()))?; + node.node_indices_for_constraints(network, args) + }) .collect::, _>>()?; // We initialise with no factors, but will update them in the `set_constraints` method @@ -738,22 +807,30 @@ impl AggregatedNode { } if let Some(factors) = &self.factors { - let f = match factors { - Factors::Proportion { factors } => pywr_core::aggregated_node::Factors::Proportion( - factors - .iter() - .map(|f| f.load(network, args)) - .collect::, _>>()?, - ), - Factors::Ratio { factors } => pywr_core::aggregated_node::Factors::Ratio( - factors + let r = match factors { + Relationship::Proportion { factors } => { + pywr_core::aggregated_node::Relationship::new_proportion_factors( + &factors + .iter() + .map(|f| f.load(network, args)) + .collect::, _>>()?, + ) + } + Relationship::Ratio { factors } => pywr_core::aggregated_node::Relationship::new_ratio_factors( + &factors .iter() .map(|f| f.load(network, args)) .collect::, _>>()?, ), + Relationship::Exclusive { min_active, max_active } => { + pywr_core::aggregated_node::Relationship::new_exclusive( + min_active.unwrap_or(0), + max_active.unwrap_or(1), + ) + } }; - network.set_aggregated_node_factors(self.meta.name.as_str(), None, Some(f))?; + network.set_aggregated_node_relationship(self.meta.name.as_str(), None, Some(r))?; } Ok(()) @@ -793,7 +870,7 @@ impl TryFrom for AggregatedNode { let mut unnamed_count = 0; let factors = match v1.factors { - Some(f) => Some(Factors::Ratio { + Some(f) => Some(Relationship::Ratio { factors: f .into_iter() .map(|v| v.try_into_v2_parameter(Some(&meta.name), &mut unnamed_count)) @@ -851,6 +928,26 @@ impl AggregatedStorageNode { #[cfg(feature = "core")] impl AggregatedStorageNode { + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + args: &LoadArgs, + ) -> Result, SchemaError> { + let indices = self + .storage_nodes + .iter() + .map(|name| { + args.schema + .get_node_by_name(name) + .ok_or_else(|| SchemaError::NodeNotFound(name.to_string()))? + .node_indices_for_constraints(network, args) + }) + .collect::, _>>()? + .into_iter() + .flatten() + .collect(); + Ok(indices) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network) -> Result<(), SchemaError> { let nodes = self .storage_nodes @@ -909,12 +1006,13 @@ mod tests { use crate::nodes::core::StorageInitialVolume; use crate::nodes::InputNode; use crate::nodes::StorageNode; - #[cfg(feature = "core")] use crate::PywrModel; #[cfg(feature = "core")] - use pywr_core::test_utils::run_all_solvers; + use pywr_core::test_utils::{run_all_solvers, ExpectedOutputs}; #[cfg(feature = "core")] use std::str::FromStr; + #[cfg(feature = "core")] + use tempfile::TempDir; #[test] fn test_input() { @@ -993,4 +1091,111 @@ mod tests { // Test all solvers run_all_solvers(&model, &[], &[]); } + + fn me1_str() -> &'static str { + include_str!("../test_models/mutual-exclusivity1.json") + } + fn me2_str() -> &'static str { + include_str!("../test_models/mutual-exclusivity2.json") + } + + #[cfg(feature = "core")] + fn me3_str() -> &'static str { + include_str!("../test_models/mutual-exclusivity3.json") + } + #[cfg(feature = "core")] + fn me1_outputs_str() -> &'static str { + include_str!("../test_models/mutual-exclusivity1.csv") + } + #[cfg(feature = "core")] + fn me2_outputs_str() -> &'static str { + include_str!("../test_models/mutual-exclusivity2.csv") + } + #[cfg(feature = "core")] + fn me3_outputs_str() -> &'static str { + include_str!("../test_models/mutual-exclusivity3.csv") + } + #[test] + fn test_me1_model_schema() { + let data = me1_str(); + let schema: PywrModel = serde_json::from_str(data).unwrap(); + + assert_eq!(schema.network.nodes.len(), 6); + assert_eq!(schema.network.edges.len(), 4); + } + #[test] + fn test_me2_model_schema() { + let data = me2_str(); + let schema: PywrModel = serde_json::from_str(data).unwrap(); + + assert_eq!(schema.network.nodes.len(), 6); + assert_eq!(schema.network.edges.len(), 4); + } + #[test] + #[cfg(feature = "core")] + fn test_me1_model_run() { + let data = me1_str(); + let schema: PywrModel = serde_json::from_str(data).unwrap(); + let temp_dir = TempDir::new().unwrap(); + + let mut model = schema.build_model(None, Some(temp_dir.path())).unwrap(); + + let network = model.network_mut(); + assert_eq!(network.nodes().len(), 5); + assert_eq!(network.edges().len(), 4); + + // After model run there should be an output file. + let expected_outputs = [ExpectedOutputs::new( + temp_dir.path().join("output.csv"), + me1_outputs_str(), + )]; + + // Test all solvers + run_all_solvers(&model, &["clp"], &expected_outputs); + } + #[test] + #[cfg(feature = "core")] + fn test_me2_model_run() { + let data = me2_str(); + let schema: PywrModel = serde_json::from_str(data).unwrap(); + let temp_dir = TempDir::new().unwrap(); + + let mut model = schema.build_model(None, Some(temp_dir.path())).unwrap(); + + let network = model.network_mut(); + assert_eq!(network.nodes().len(), 10); + assert_eq!(network.edges().len(), 11); + + // After model run there should be an output file. + let expected_outputs = [ExpectedOutputs::new( + temp_dir.path().join("output.csv"), + me2_outputs_str(), + )]; + + // Test all solvers + run_all_solvers(&model, &["clp"], &expected_outputs); + } + + #[test] + #[cfg(feature = "core")] + fn test_me3_model_run() { + let data = me3_str(); + let schema: PywrModel = serde_json::from_str(data).unwrap(); + let temp_dir = TempDir::new().unwrap(); + + let mut model = schema.build_model(None, Some(temp_dir.path())).unwrap(); + + let network = model.network_mut(); + assert_eq!(network.nodes().len(), 7); + assert_eq!(network.edges().len(), 8); + + // After model run there should be an output file. + let expected_outputs = [ExpectedOutputs::new( + temp_dir.path().join("output.csv"), + me3_outputs_str(), + )]; + + // Test all solvers + run_all_solvers(&model, &["clp"], &expected_outputs); + } } diff --git a/pywr-schema/src/nodes/delay.rs b/pywr-schema/src/nodes/delay.rs index 0f21e615..6be0e61d 100644 --- a/pywr-schema/src/nodes/delay.rs +++ b/pywr-schema/src/nodes/delay.rs @@ -66,6 +66,13 @@ impl DelayNode { #[cfg(feature = "core")] impl DelayNode { + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + let indices = vec![network.get_node_index_by_name(self.meta.name.as_str(), Self::input_sub_now())?]; + Ok(indices) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network) -> Result<(), SchemaError> { network.add_output_node(self.meta.name.as_str(), Self::output_sub_name())?; network.add_input_node(self.meta.name.as_str(), Self::input_sub_now())?; diff --git a/pywr-schema/src/nodes/loss_link.rs b/pywr-schema/src/nodes/loss_link.rs index ec7babb0..d8dde48c 100644 --- a/pywr-schema/src/nodes/loss_link.rs +++ b/pywr-schema/src/nodes/loss_link.rs @@ -7,7 +7,7 @@ use crate::model::LoadArgs; use crate::nodes::{NodeAttribute, NodeMeta}; use crate::parameters::TryIntoV2Parameter; #[cfg(feature = "core")] -use pywr_core::{aggregated_node::Factors, metric::MetricF64}; +use pywr_core::{aggregated_node::Relationship, metric::MetricF64}; use pywr_schema_macros::PywrVisitAll; use pywr_v1_schema::nodes::LossLinkNode as LossLinkNodeV1; use schemars::JsonSchema; @@ -30,7 +30,7 @@ impl LossFactor { &self, network: &mut pywr_core::network::Network, args: &LoadArgs, - ) -> Result, SchemaError> { + ) -> Result, SchemaError> { match self { LossFactor::Gross { factor } => { let lf = factor.load(network, args)?; @@ -40,17 +40,17 @@ impl LossFactor { return Ok(None); } // Gross losses are configured as a proportion of the net flow - Ok(Some(Factors::Proportion(vec![lf]))) + Ok(Some(Relationship::new_proportion_factors(&[lf]))) } LossFactor::Net { factor } => { let lf = factor.load(network, args)?; - // Handle the case where we a given a zero loss factor + // Handle the case where we are given a zero loss factor // The aggregated node does not support zero loss factors so filter them here. if lf.is_constant_zero() { return Ok(None); } // Net losses are configured as a ratio of the net flow - Ok(Some(Factors::Ratio(vec![1.0.into(), lf]))) + Ok(Some(Relationship::new_ratio_factors(&[1.0.into(), lf]))) } } } @@ -125,6 +125,14 @@ impl LossLinkNode { fn agg_sub_name() -> Option<&'static str> { Some("agg") } + + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + let indices = vec![network.get_node_index_by_name(self.meta.name.as_str(), Self::net_sub_name())?]; + Ok(indices) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network) -> Result<(), SchemaError> { let idx_net = network.add_link_node(self.meta.name.as_str(), Self::net_sub_name())?; // TODO make the loss node configurable (i.e. it could be a link if a network wanted to use the loss) @@ -136,7 +144,7 @@ impl LossLinkNode { network.add_aggregated_node( self.meta.name.as_str(), Self::agg_sub_name(), - &[idx_net, idx_loss], + &[vec![idx_net], vec![idx_loss]], None, )?; } @@ -165,7 +173,7 @@ impl LossLinkNode { if let Some(loss_factor) = &self.loss_factor { let factors = loss_factor.load(network, args)?; - network.set_aggregated_node_factors(self.meta.name.as_str(), Self::agg_sub_name(), factors)?; + network.set_aggregated_node_relationship(self.meta.name.as_str(), Self::agg_sub_name(), factors)?; } Ok(()) diff --git a/pywr-schema/src/nodes/mod.rs b/pywr-schema/src/nodes/mod.rs index 25ec16bf..d02a9577 100644 --- a/pywr-schema/src/nodes/mod.rs +++ b/pywr-schema/src/nodes/mod.rs @@ -395,7 +395,7 @@ impl Node { Node::River(n) => n.add_to_model(network), Node::RiverSplitWithGauge(n) => n.add_to_model(network), Node::WaterTreatmentWorks(n) => n.add_to_model(network), - Node::Aggregated(n) => n.add_to_model(network), + Node::Aggregated(n) => n.add_to_model(network, args), Node::AggregatedStorage(n) => n.add_to_model(network), Node::VirtualStorage(n) => n.add_to_model(network, args), Node::AnnualVirtualStorage(n) => n.add_to_model(network, args), @@ -408,6 +408,36 @@ impl Node { } } + /// Get the node indices for the constraints that this node has added to the network. + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + args: &LoadArgs, + ) -> Result, SchemaError> { + match self { + Node::Input(n) => n.node_indices_for_constraints(network), + Node::Link(n) => n.node_indices_for_constraints(network), + Node::Output(n) => n.node_indices_for_constraints(network), + Node::Storage(n) => n.node_indices_for_constraints(network), + Node::Catchment(n) => n.node_indices_for_constraints(network), + Node::RiverGauge(n) => n.node_indices_for_constraints(network), + Node::LossLink(n) => n.node_indices_for_constraints(network), + Node::River(n) => n.node_indices_for_constraints(network), + Node::RiverSplitWithGauge(n) => n.node_indices_for_constraints(network), + Node::WaterTreatmentWorks(n) => n.node_indices_for_constraints(network), + Node::Aggregated(n) => n.node_indices_for_constraints(network, args), + Node::AggregatedStorage(n) => n.node_indices_for_constraints(network, args), + Node::VirtualStorage(n) => n.node_indices_for_constraints(network, args), + Node::AnnualVirtualStorage(n) => n.node_indices_for_constraints(network, args), + Node::PiecewiseLink(n) => n.node_indices_for_constraints(network), + Node::PiecewiseStorage(n) => n.node_indices_for_constraints(network), + Node::Delay(n) => n.node_indices_for_constraints(network), + Node::Turbine(n) => n.node_indices_for_constraints(network), + Node::MonthlyVirtualStorage(n) => n.node_indices_for_constraints(network, args), + Node::RollingVirtualStorage(n) => n.node_indices_for_constraints(network, args), + } + } + pub fn set_constraints( &self, network: &mut pywr_core::network::Network, diff --git a/pywr-schema/src/nodes/monthly_virtual_storage.rs b/pywr-schema/src/nodes/monthly_virtual_storage.rs index 0c699f65..e8d74b4f 100644 --- a/pywr-schema/src/nodes/monthly_virtual_storage.rs +++ b/pywr-schema/src/nodes/monthly_virtual_storage.rs @@ -60,6 +60,26 @@ impl MonthlyVirtualStorageNode { #[cfg(feature = "core")] impl MonthlyVirtualStorageNode { + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + args: &LoadArgs, + ) -> Result, SchemaError> { + let indices = self + .nodes + .iter() + .map(|name| { + args.schema + .get_node_by_name(name) + .ok_or_else(|| SchemaError::NodeNotFound(name.to_string()))? + .node_indices_for_constraints(network, args) + }) + .collect::, _>>()? + .into_iter() + .flatten() + .collect(); + Ok(indices) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network, args: &LoadArgs) -> Result<(), SchemaError> { let cost = match &self.cost { Some(v) => v.load(network, args)?.into(), @@ -76,11 +96,7 @@ impl MonthlyVirtualStorageNode { None => None, }; - let node_idxs = self - .nodes - .iter() - .map(|name| network.get_node_index_by_name(name.as_str(), None)) - .collect::, _>>()?; + let node_idxs = self.node_indices_for_constraints(network, args)?; let reset = VirtualStorageReset::NumberOfMonths { months: self.reset.months as i32, diff --git a/pywr-schema/src/nodes/piecewise_link.rs b/pywr-schema/src/nodes/piecewise_link.rs index 6a976f1a..faee1d1d 100644 --- a/pywr-schema/src/nodes/piecewise_link.rs +++ b/pywr-schema/src/nodes/piecewise_link.rs @@ -78,6 +78,18 @@ impl PiecewiseLinkNode { #[cfg(feature = "core")] impl PiecewiseLinkNode { + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + let indices = self + .steps + .iter() + .enumerate() + .map(|(i, _)| network.get_node_index_by_name(self.meta.name.as_str(), Self::step_sub_name(i).as_deref())) + .collect::, _>>()?; + Ok(indices) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network) -> Result<(), SchemaError> { // create a link node for each step for (i, _) in self.steps.iter().enumerate() { diff --git a/pywr-schema/src/nodes/piecewise_storage.rs b/pywr-schema/src/nodes/piecewise_storage.rs index 62b5829c..93a6c784 100644 --- a/pywr-schema/src/nodes/piecewise_storage.rs +++ b/pywr-schema/src/nodes/piecewise_storage.rs @@ -85,6 +85,19 @@ impl PiecewiseStorageNode { Some("agg-store") } + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + let indices = self + .steps + .iter() + .enumerate() + .map(|(i, _)| network.get_node_index_by_name(self.meta.name.as_str(), Self::step_sub_name(i).as_deref())) + .collect::, _>>()?; + Ok(indices) + } + pub fn add_to_model(&self, network: &mut pywr_core::network::Network, args: &LoadArgs) -> Result<(), SchemaError> { // These are the min and max volume of the overall node let max_volume: SimpleMetricF64 = self.max_volume.load(network, args)?.try_into()?; diff --git a/pywr-schema/src/nodes/river.rs b/pywr-schema/src/nodes/river.rs index bbd4d2d4..d7fc33d1 100644 --- a/pywr-schema/src/nodes/river.rs +++ b/pywr-schema/src/nodes/river.rs @@ -31,6 +31,13 @@ impl RiverNode { #[cfg(feature = "core")] impl RiverNode { + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + let idx = network.get_node_index_by_name(self.meta.name.as_str(), None)?; + Ok(vec![idx]) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network) -> Result<(), SchemaError> { network.add_link_node(self.meta.name.as_str(), None)?; Ok(()) diff --git a/pywr-schema/src/nodes/river_gauge.rs b/pywr-schema/src/nodes/river_gauge.rs index 4397a6d6..e2875f46 100644 --- a/pywr-schema/src/nodes/river_gauge.rs +++ b/pywr-schema/src/nodes/river_gauge.rs @@ -68,6 +68,16 @@ impl RiverGaugeNode { #[cfg(feature = "core")] impl RiverGaugeNode { + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + let indices = vec![ + network.get_node_index_by_name(self.meta.name.as_str(), Self::mrf_sub_name())?, + network.get_node_index_by_name(self.meta.name.as_str(), Self::bypass_sub_name())?, + ]; + Ok(indices) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network) -> Result<(), SchemaError> { network.add_link_node(self.meta.name.as_str(), Self::mrf_sub_name())?; network.add_link_node(self.meta.name.as_str(), Self::bypass_sub_name())?; diff --git a/pywr-schema/src/nodes/river_split_with_gauge.rs b/pywr-schema/src/nodes/river_split_with_gauge.rs index f11d1350..d5115bd3 100644 --- a/pywr-schema/src/nodes/river_split_with_gauge.rs +++ b/pywr-schema/src/nodes/river_split_with_gauge.rs @@ -7,7 +7,7 @@ use crate::model::LoadArgs; use crate::nodes::{NodeAttribute, NodeMeta}; use crate::parameters::TryIntoV2Parameter; #[cfg(feature = "core")] -use pywr_core::{aggregated_node::Factors, metric::MetricF64, node::NodeIndex}; +use pywr_core::{aggregated_node::Relationship, metric::MetricF64, node::NodeIndex}; use pywr_schema_macros::PywrVisitAll; use pywr_v1_schema::nodes::RiverSplitWithGaugeNode as RiverSplitWithGaugeNodeV1; use schemars::JsonSchema; @@ -104,6 +104,29 @@ impl RiverSplitWithGaugeNode { fn split_agg_sub_name(i: usize) -> Option { Some(format!("split-agg-{i}")) } + + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + // This gets the indices of all the link nodes + // There's currently no way to isolate the flows to the individual splits + // Therefore, the only metrics are gross inflow and outflow + let mut indices = vec![ + network.get_node_index_by_name(self.meta.name.as_str(), Self::mrf_sub_name())?, + network.get_node_index_by_name(self.meta.name.as_str(), Self::bypass_sub_name())?, + ]; + + let split_idx: Vec = self + .splits + .iter() + .enumerate() + .map(|(i, _)| network.get_node_index_by_name(self.meta.name.as_str(), Self::split_sub_name(i).as_deref())) + .collect::>()?; + + indices.extend(split_idx); + Ok(indices) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network) -> Result<(), SchemaError> { // TODO do this properly network.add_link_node(self.meta.name.as_str(), Self::mrf_sub_name())?; @@ -117,7 +140,7 @@ impl RiverSplitWithGaugeNode { network.add_aggregated_node( self.meta.name.as_str(), Self::split_agg_sub_name(i).as_deref(), - &[bypass_idx, split_idx], + &[vec![bypass_idx], vec![split_idx]], None, )?; } @@ -143,11 +166,11 @@ impl RiverSplitWithGaugeNode { for (i, (factor, _)) in self.splits.iter().enumerate() { // Set the factors for each split - let factors = Factors::Proportion(vec![factor.load(network, args)?]); - network.set_aggregated_node_factors( + let r = Relationship::new_proportion_factors(&[factor.load(network, args)?]); + network.set_aggregated_node_relationship( self.meta.name.as_str(), Self::split_agg_sub_name(i).as_deref(), - Some(factors), + Some(r), )?; } diff --git a/pywr-schema/src/nodes/rolling_virtual_storage.rs b/pywr-schema/src/nodes/rolling_virtual_storage.rs index 362e9356..462701e5 100644 --- a/pywr-schema/src/nodes/rolling_virtual_storage.rs +++ b/pywr-schema/src/nodes/rolling_virtual_storage.rs @@ -100,6 +100,26 @@ impl RollingVirtualStorageNode { #[cfg(feature = "core")] impl RollingVirtualStorageNode { + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + args: &LoadArgs, + ) -> Result, SchemaError> { + let indices = self + .nodes + .iter() + .map(|name| { + args.schema + .get_node_by_name(name) + .ok_or_else(|| SchemaError::NodeNotFound(name.to_string()))? + .node_indices_for_constraints(network, args) + }) + .collect::, _>>()? + .into_iter() + .flatten() + .collect(); + Ok(indices) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network, args: &LoadArgs) -> Result<(), SchemaError> { let initial_volume = if let Some(iv) = self.initial_volume { StorageInitialVolume::Absolute(iv) @@ -124,12 +144,7 @@ impl RollingVirtualStorageNode { None => None, }; - let node_idxs = self - .nodes - .iter() - .map(|name| network.get_node_index_by_name(name.as_str(), None)) - .collect::, _>>()?; - + let node_idxs = self.node_indices_for_constraints(network, args)?; // The rolling licence never resets let reset = VirtualStorageReset::Never; let timesteps = diff --git a/pywr-schema/src/nodes/turbine.rs b/pywr-schema/src/nodes/turbine.rs index 98fb76d1..f1321761 100644 --- a/pywr-schema/src/nodes/turbine.rs +++ b/pywr-schema/src/nodes/turbine.rs @@ -98,6 +98,14 @@ impl TurbineNode { fn sub_name() -> Option<&'static str> { Some("turbine") } + + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + let idx = network.get_node_index_by_name(self.meta.name.as_str(), None)?; + Ok(vec![idx]) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network, _args: &LoadArgs) -> Result<(), SchemaError> { network.add_link_node(self.meta.name.as_str(), None)?; Ok(()) diff --git a/pywr-schema/src/nodes/virtual_storage.rs b/pywr-schema/src/nodes/virtual_storage.rs index b8b64cdf..ca219d1f 100644 --- a/pywr-schema/src/nodes/virtual_storage.rs +++ b/pywr-schema/src/nodes/virtual_storage.rs @@ -48,6 +48,26 @@ impl VirtualStorageNode { #[cfg(feature = "core")] impl VirtualStorageNode { + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + args: &LoadArgs, + ) -> Result, SchemaError> { + let indices = self + .nodes + .iter() + .map(|name_ref| { + args.schema + .get_node_by_name(&name_ref.name) + .ok_or_else(|| SchemaError::NodeNotFound(name_ref.name.to_string()))? + .node_indices_for_constraints(network, args) + }) + .collect::, _>>()? + .into_iter() + .flatten() + .collect(); + Ok(indices) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network, args: &LoadArgs) -> Result<(), SchemaError> { let cost = match &self.cost { Some(v) => v.load(network, args)?.into(), @@ -64,11 +84,7 @@ impl VirtualStorageNode { None => None, }; - let node_idxs = self - .nodes - .iter() - .map(|name| network.get_node_index_by_name(name.name.as_str(), None)) - .collect::, _>>()?; + let node_idxs = self.node_indices_for_constraints(network, args)?; // Standard virtual storage node never resets. let reset = VirtualStorageReset::Never; diff --git a/pywr-schema/src/nodes/water_treatment_works.rs b/pywr-schema/src/nodes/water_treatment_works.rs index 19c1d542..846f4ade 100644 --- a/pywr-schema/src/nodes/water_treatment_works.rs +++ b/pywr-schema/src/nodes/water_treatment_works.rs @@ -108,6 +108,14 @@ impl WaterTreatmentWorks { fn agg_sub_name() -> Option<&'static str> { Some("agg") } + + pub fn node_indices_for_constraints( + &self, + network: &pywr_core::network::Network, + ) -> Result, SchemaError> { + let idx = network.get_node_index_by_name(self.meta.name.as_str(), Self::net_sub_name())?; + Ok(vec![idx]) + } pub fn add_to_model(&self, network: &mut pywr_core::network::Network) -> Result<(), SchemaError> { let idx_net = network.add_link_node(self.meta.name.as_str(), Self::net_sub_name())?; let idx_soft_min_flow = network.add_link_node(self.meta.name.as_str(), Self::net_soft_min_flow_sub_name())?; @@ -124,7 +132,7 @@ impl WaterTreatmentWorks { network.add_aggregated_node( self.meta.name.as_str(), Self::agg_sub_name(), - &[idx_net, idx_loss], + &[vec![idx_net], vec![idx_loss]], None, )?; } @@ -173,7 +181,7 @@ impl WaterTreatmentWorks { if let Some(loss_factor) = &self.loss_factor { let factors = loss_factor.load(network, args)?; - network.set_aggregated_node_factors(self.meta.name.as_str(), Self::agg_sub_name(), factors)?; + network.set_aggregated_node_relationship(self.meta.name.as_str(), Self::agg_sub_name(), factors)?; } Ok(()) diff --git a/pywr-schema/src/test_models/mutual-exclusivity1.csv b/pywr-schema/src/test_models/mutual-exclusivity1.csv new file mode 100644 index 00000000..a721413d --- /dev/null +++ b/pywr-schema/src/test_models/mutual-exclusivity1.csv @@ -0,0 +1,13 @@ +time_start,time_end,scenario_index,metric_set,name,attribute,value +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,link1,Inflow,10.0 +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,link1,Outflow,10.0 +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,link2,Inflow,0.0 +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,link2,Outflow,0.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,link1,Inflow,10.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,link1,Outflow,10.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,link2,Inflow,0.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,link2,Outflow,0.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,link1,Inflow,10.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,link1,Outflow,10.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,link2,Inflow,0.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,link2,Outflow,0.0 diff --git a/pywr-schema/src/test_models/mutual-exclusivity1.json b/pywr-schema/src/test_models/mutual-exclusivity1.json new file mode 100644 index 00000000..a437c5d5 --- /dev/null +++ b/pywr-schema/src/test_models/mutual-exclusivity1.json @@ -0,0 +1,140 @@ +{ + "metadata": { + "title": "Mutual exclusivity test 1", + "description": "Test mutual exclusivities work with simple link node types", + "minimum_version": "0.1" + }, + "timestepper": { + "start": "2015-01-01", + "end": "2015-01-03", + "timestep": 1 + }, + "network": { + "nodes": [ + { + "meta": { + "name": "input1" + }, + "type": "Input" + }, + { + "meta": { + "name": "link1" + }, + "type": "Link", + "max_flow": { + "type": "Constant", + "value": 10.0 + } + }, + { + "meta": { + "name": "demand1" + }, + "type": "Output", + "max_flow": { + "type": "Constant", + "value": 15.0 + }, + "cost": { + "type": "Constant", + "value": -15 + } + }, + { + "meta": { + "name": "link2" + }, + "type": "Link", + "max_flow": { + "type": "Constant", + "value": 10.0 + } + }, + { + "meta": { + "name": "demand2" + }, + "type": "Output", + "max_flow": { + "type": "Constant", + "value": 15.0 + }, + "cost": { + "type": "Constant", + "value": -10 + } + }, + { + "meta": { + "name": "me" + }, + "type": "Aggregated", + "nodes": [ + "link1", + "link2" + ], + "factors": { + "type": "Exclusive" + } + } + ], + "edges": [ + { + "from_node": "input1", + "to_node": "link1" + }, + { + "from_node": "link1", + "to_node": "demand1" + }, + { + "from_node": "input1", + "to_node": "link2" + }, + { + "from_node": "link2", + "to_node": "demand2" + } + ], + "metric_sets": [ + { + "name": "nodes", + "metrics": [ + { + "type": "Node", + "name": "link1", + "attribute": "Inflow" + }, + { + "type": "Node", + "name": "link1", + "attribute": "Outflow" + }, + { + "type": "Node", + "name": "link2", + "attribute": "Inflow" + }, + { + "type": "Node", + "name": "link2", + "attribute": "Outflow" + } + ] + } + ], + "outputs": [ + { + "name": "node-outputs", + "type": "CSV", + "format": "long", + "filename": "output.csv", + "metric_set": [ + "nodes" + ], + "decimal_places": 1 + } + ] + } +} diff --git a/pywr-schema/src/test_models/mutual-exclusivity2.csv b/pywr-schema/src/test_models/mutual-exclusivity2.csv new file mode 100644 index 00000000..1f556f3a --- /dev/null +++ b/pywr-schema/src/test_models/mutual-exclusivity2.csv @@ -0,0 +1,19 @@ +time_start,time_end,scenario_index,metric_set,name,attribute,value +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,wtw1,Inflow,11.0 +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,wtw1,Outflow,10.0 +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,wtw1,Loss,1.0 +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,wtw2,Inflow,0.0 +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,wtw2,Outflow,0.0 +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,wtw2,Loss,0.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,wtw1,Inflow,11.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,wtw1,Outflow,10.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,wtw1,Loss,1.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,wtw2,Inflow,0.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,wtw2,Outflow,0.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,wtw2,Loss,0.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,wtw1,Inflow,11.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,wtw1,Outflow,10.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,wtw1,Loss,1.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,wtw2,Inflow,0.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,wtw2,Outflow,0.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,wtw2,Loss,0.0 diff --git a/pywr-schema/src/test_models/mutual-exclusivity2.json b/pywr-schema/src/test_models/mutual-exclusivity2.json new file mode 100644 index 00000000..5112d2a3 --- /dev/null +++ b/pywr-schema/src/test_models/mutual-exclusivity2.json @@ -0,0 +1,157 @@ +{ + "metadata": { + "title": "Mutual exclusivity test 2", + "description": "Test mutual exclusivities work with compound node types", + "minimum_version": "0.1" + }, + "timestepper": { + "start": "2015-01-01", + "end": "2015-01-03", + "timestep": 1 + }, + "network": { + "nodes": [ + { + "meta": { + "name": "input1" + }, + "type": "Input" + }, + { + "meta": { + "name": "wtw1" + }, + "type": "WaterTreatmentWorks", + "max_flow": { + "type": "Constant", + "value": 10.0 + }, + "loss_factor": { + "type": "Net", + "factor": { + "type": "Constant", + "value": 0.1 + } + } + }, + { + "meta": { + "name": "demand1" + }, + "type": "Output", + "max_flow": { + "type": "Constant", + "value": 15.0 + }, + "cost": { + "type": "Constant", + "value": -15 + } + }, + { + "meta": { + "name": "wtw2" + }, + "type": "WaterTreatmentWorks", + "max_flow": { + "type": "Constant", + "value": 10.0 + } + }, + { + "meta": { + "name": "demand2" + }, + "type": "Output", + "max_flow": { + "type": "Constant", + "value": 15.0 + }, + "cost": { + "type": "Constant", + "value": -10 + } + }, + { + "meta": { + "name": "me" + }, + "type": "Aggregated", + "nodes": [ + "wtw1", + "wtw2" + ], + "factors": { + "type": "Exclusive" + } + } + ], + "edges": [ + { + "from_node": "input1", + "to_node": "wtw1" + }, + { + "from_node": "wtw1", + "to_node": "demand1" + }, + { + "from_node": "input1", + "to_node": "wtw2" + }, + { + "from_node": "wtw2", + "to_node": "demand2" + } + ], + "metric_sets": [ + { + "name": "nodes", + "metrics": [ + { + "type": "Node", + "name": "wtw1", + "attribute": "Inflow" + }, + { + "type": "Node", + "name": "wtw1", + "attribute": "Outflow" + }, + { + "type": "Node", + "name": "wtw1", + "attribute": "Loss" + }, + { + "type": "Node", + "name": "wtw2", + "attribute": "Inflow" + }, + { + "type": "Node", + "name": "wtw2", + "attribute": "Outflow" + }, + { + "type": "Node", + "name": "wtw2", + "attribute": "Loss" + } + ] + } + ], + "outputs": [ + { + "name": "node-outputs", + "type": "CSV", + "format": "long", + "filename": "output.csv", + "metric_set": [ + "nodes" + ], + "decimal_places": 1 + } + ] + } +} diff --git a/pywr-schema/src/test_models/mutual-exclusivity3.csv b/pywr-schema/src/test_models/mutual-exclusivity3.csv new file mode 100644 index 00000000..a721413d --- /dev/null +++ b/pywr-schema/src/test_models/mutual-exclusivity3.csv @@ -0,0 +1,13 @@ +time_start,time_end,scenario_index,metric_set,name,attribute,value +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,link1,Inflow,10.0 +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,link1,Outflow,10.0 +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,link2,Inflow,0.0 +2015-01-01T00:00:00,2015-01-02T00:00:00,0,nodes,link2,Outflow,0.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,link1,Inflow,10.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,link1,Outflow,10.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,link2,Inflow,0.0 +2015-01-02T00:00:00,2015-01-03T00:00:00,0,nodes,link2,Outflow,0.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,link1,Inflow,10.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,link1,Outflow,10.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,link2,Inflow,0.0 +2015-01-03T00:00:00,2015-01-04T00:00:00,0,nodes,link2,Outflow,0.0 diff --git a/pywr-schema/src/test_models/mutual-exclusivity3.json b/pywr-schema/src/test_models/mutual-exclusivity3.json new file mode 100644 index 00000000..7d817383 --- /dev/null +++ b/pywr-schema/src/test_models/mutual-exclusivity3.json @@ -0,0 +1,160 @@ +{ + "metadata": { + "title": "Mutual exclusivity test 1", + "description": "Test mutual exclusivities work with simple link node types", + "minimum_version": "0.1" + }, + "timestepper": { + "start": "2015-01-01", + "end": "2015-01-03", + "timestep": 1 + }, + "network": { + "nodes": [ + { + "meta": { + "name": "input1" + }, + "type": "Input" + }, + { + "meta": { + "name": "link1" + }, + "type": "PiecewiseLink", + "steps": [ + { + "max_flow": { + "type": "Constant", + "value": 5.0 + } + }, + { + "max_flow": { + "type": "Constant", + "value": 5.0 + } + } + ] + }, + { + "meta": { + "name": "demand1" + }, + "type": "Output", + "max_flow": { + "type": "Constant", + "value": 15.0 + }, + "cost": { + "type": "Constant", + "value": -15 + } + }, + { + "meta": { + "name": "link2" + }, + "type": "PiecewiseLink", + "steps": [ + { + "max_flow": { + "type": "Constant", + "value": 5.0 + } + }, + { + "max_flow": { + "type": "Constant", + "value": 5.0 + } + } + ] + }, + { + "meta": { + "name": "demand2" + }, + "type": "Output", + "max_flow": { + "type": "Constant", + "value": 15.0 + }, + "cost": { + "type": "Constant", + "value": -10 + } + }, + { + "meta": { + "name": "me" + }, + "type": "Aggregated", + "nodes": [ + "link1", + "link2" + ], + "factors": { + "type": "Exclusive" + } + } + ], + "edges": [ + { + "from_node": "input1", + "to_node": "link1" + }, + { + "from_node": "link1", + "to_node": "demand1" + }, + { + "from_node": "input1", + "to_node": "link2" + }, + { + "from_node": "link2", + "to_node": "demand2" + } + ], + "metric_sets": [ + { + "name": "nodes", + "metrics": [ + { + "type": "Node", + "name": "link1", + "attribute": "Inflow" + }, + { + "type": "Node", + "name": "link1", + "attribute": "Outflow" + }, + { + "type": "Node", + "name": "link2", + "attribute": "Inflow" + }, + { + "type": "Node", + "name": "link2", + "attribute": "Outflow" + } + ] + } + ], + "outputs": [ + { + "name": "node-outputs", + "type": "CSV", + "format": "long", + "filename": "output.csv", + "metric_set": [ + "nodes" + ], + "decimal_places": 1 + } + ] + } +}