diff --git a/axiom-profiler-GUI/src/results/filters.rs b/axiom-profiler-GUI/src/results/filters.rs index ff2e96f8..47a18db3 100644 --- a/axiom-profiler-GUI/src/results/filters.rs +++ b/axiom-profiler-GUI/src/results/filters.rs @@ -141,7 +141,15 @@ impl Filter { let nodes_of_nth_matching_loop = graph .raw .node_indices() - .filter(|nx| graph.raw[*nx].part_of_ml.contains(&n)) + .flat_map(|nx| { + let node = &graph.raw[nx]; + match *node.kind() { + NodeKind::Instantiation(iidx) => { + node.part_of_ml.contains(&n).then_some(iidx) + } + _ => None, + } + }) .collect::>(); let relevant_non_qi_nodes: Vec<_> = Dfs::new(&*graph.raw.graph, nth_ml_endnode.0) .iter(graph.raw.rev()) diff --git a/axiom-profiler-GUI/src/utils/lookup.rs b/axiom-profiler-GUI/src/utils/lookup.rs index 412c45f5..2f919840 100644 --- a/axiom-profiler-GUI/src/utils/lookup.rs +++ b/axiom-profiler-GUI/src/utils/lookup.rs @@ -76,7 +76,7 @@ pub type StringLookupZ3 = StringLookup>; impl StringLookupZ3 { pub fn init(parser: &Z3Parser) -> Self { let mut lookup = Self::new(); - for (idx, instantiation) in parser.instantiations() { + for (idx, instantiation) in parser.instantiations().iter_enumerated() { let match_ = &parser[instantiation.match_]; if let Some(quant) = match_.kind.quant_idx() { let name = match &parser[quant].kind { diff --git a/smt-log-parser/src/analysis/dependencies.rs b/smt-log-parser/src/analysis/dependencies.rs new file mode 100644 index 00000000..7d143a32 --- /dev/null +++ b/smt-log-parser/src/analysis/dependencies.rs @@ -0,0 +1,103 @@ +use std::{cmp::Ordering, ops::Deref}; + +use fxhash::FxHashSet; + +use crate::{items::QuantIdx, FxHashMap, TiVec, Z3Parser}; + +use super::InstGraph; + +#[derive(Clone)] +pub struct QuantifierAnalysis(TiVec); + +#[derive(Default, Clone)] +pub struct QuantifierInfo { + /// How much total cost did this quantifier accrue from individual + /// instantiations. + pub costs: f64, + /// How many times does an instantiation of this quantifier depend on an + /// instantiation of the other quantifier. + pub direct_deps: FxHashMap, +} + +type TransQuantAnalaysis = TiVec>; + +impl Deref for QuantifierAnalysis { + type Target = TiVec; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl QuantifierAnalysis { + /// Calculate the analysis. Make sure that you have run + /// `initialise_inst_succs_and_preds` on the `inst_graph`! + pub fn new(parser: &Z3Parser, inst_graph: &InstGraph) -> Self { + let mut self_ = Self( + parser + .quantifiers + .iter() + .map(|_| QuantifierInfo::default()) + .collect(), + ); + for (iidx, inst) in parser.insts.insts.iter_enumerated() { + let match_ = &parser.insts[inst.match_]; + let Some(qidx) = match_.kind.quant_idx() else { + continue; + }; + let qinfo = &mut self_.0[qidx]; + let ginst = &inst_graph.raw[iidx]; + qinfo.costs += ginst.cost; + for &parent_iidx in &ginst.inst_parents.nodes { + let parent_inst = &parser.insts[parent_iidx]; + let parent_match_ = &parser.insts[parent_inst.match_]; + let Some(parent_qidx) = parent_match_.kind.quant_idx() else { + continue; + }; + *qinfo.direct_deps.entry(parent_qidx).or_default() += 1; + } + } + self_ + } + + pub fn total_costs(&self) -> f64 { + self.iter().map(|info| info.costs).sum() + } + + pub fn calculate_transitive(&self, mut steps: Option) -> TransQuantAnalaysis { + let mut initial = self + .iter() + .map(|info| info.direct_deps.keys().copied().collect()) + .collect(); + while !steps.is_some_and(|steps| steps == 0) { + if !self.calculate_transitive_one(&mut initial) { + break; + } + if let Some(steps) = &mut steps { + *steps -= 1; + } + } + initial + } + fn calculate_transitive_one(&self, analysis: &mut TransQuantAnalaysis) -> bool { + let mut changed = false; + for (idx, info) in self.iter_enumerated() { + for &ddep in info.direct_deps.keys() { + let (curr, ddep) = match idx.cmp(&ddep) { + Ordering::Less => { + let (left, right) = analysis.split_at_mut(ddep); + (&mut left[idx], right.first().unwrap()) + } + Ordering::Greater => { + let (left, right) = analysis.split_at_mut(idx); + (right.first_mut().unwrap(), &left[ddep]) + } + Ordering::Equal => continue, + }; + let old_len = curr.len(); + curr.extend(ddep); + changed |= old_len != curr.len(); + } + } + changed + } +} diff --git a/smt-log-parser/src/analysis/graph/analysis/next_insts.rs b/smt-log-parser/src/analysis/graph/analysis/next_insts.rs index 4b55528c..ad7bcc40 100644 --- a/smt-log-parser/src/analysis/graph/analysis/next_insts.rs +++ b/smt-log-parser/src/analysis/graph/analysis/next_insts.rs @@ -1,5 +1,3 @@ -use std::collections::HashSet; - use petgraph::Direction; use crate::{ @@ -37,9 +35,7 @@ impl, const FORWARD: bool> Initialiser Self::Value { - NextInsts { - nodes: HashSet::default(), - } + NextInsts::default() } fn assign(&mut self, node: &mut Node, value: Self::Value) { if FORWARD { @@ -68,25 +64,9 @@ impl, const FORWARD: bool> TransferInitialiser< } fn add(&mut self, node: &mut Node, value: Self::Value) { if FORWARD { - node.inst_parents = NextInsts { - nodes: node - .inst_parents - .nodes - .iter() - .cloned() - .chain(value.nodes.iter().cloned()) - .collect(), - }; + node.inst_parents.nodes.extend(value.nodes); } else { - node.inst_children = NextInsts { - nodes: node - .inst_children - .nodes - .iter() - .cloned() - .chain(value.nodes.iter().cloned()) - .collect(), - }; + node.inst_children.nodes.extend(value.nodes); } } } @@ -94,28 +74,20 @@ impl, const FORWARD: bool> TransferInitialiser< pub struct DefaultNextInsts; impl NextInstsInitialiser for DefaultNextInsts { fn base(&mut self, _node: &Node, _parser: &Z3Parser) -> NextInsts { - NextInsts { - nodes: HashSet::default(), - } - } - type Observed = NextInsts; - fn observe(&mut self, node: &Node, _parser: &Z3Parser) -> Self::Observed { - if FORWARD { - node.inst_parents.clone() - } else { - node.inst_children.clone() - } + NextInsts::default() } + type Observed = (); + fn observe(&mut self, _node: &Node, _parser: &Z3Parser) -> Self::Observed {} fn transfer( &mut self, node: &Node, - from_idx: RawNodeIndex, + _from_idx: RawNodeIndex, _idx: usize, _incoming: &[Self::Observed], ) -> NextInsts { - let value = match node.kind() { - NodeKind::Instantiation(_) => NextInsts { - nodes: std::iter::once(from_idx).collect(), + match *node.kind() { + NodeKind::Instantiation(iidx) => NextInsts { + nodes: std::iter::once(iidx).collect(), }, _ => { if FORWARD { @@ -124,7 +96,6 @@ impl NextInstsInitialiser for DefaultNextInsts, + pub nodes: FxHashSet, } impl Node { @@ -325,12 +325,8 @@ impl Node { bwd_depth: Depth::default(), subgraph: None, kind, - inst_parents: NextInsts { - nodes: FxHashSet::default(), - }, - inst_children: NextInsts { - nodes: FxHashSet::default(), - }, + inst_parents: NextInsts::default(), + inst_children: NextInsts::default(), part_of_ml: FxHashSet::default(), } } diff --git a/smt-log-parser/src/analysis/graph/visible.rs b/smt-log-parser/src/analysis/graph/visible.rs index 156ea9f0..e46b1ad5 100644 --- a/smt-log-parser/src/analysis/graph/visible.rs +++ b/smt-log-parser/src/analysis/graph/visible.rs @@ -15,7 +15,7 @@ use crate::{ use super::{ analysis::matching_loop::MIN_MATCHING_LOOP_LENGTH, - raw::{EdgeKind, Node, NodeKind}, + raw::{EdgeKind, IndexesInstGraph, Node, NodeKind}, InstGraph, RawEdgeIndex, RawNodeIndex, }; @@ -126,7 +126,8 @@ impl InstGraph { for (i, node) in self.raw.graph.node_weights().enumerate() { let from = node_index_map[i]; if from != NodeIndex::end() { - for next_inst in node.inst_children.nodes.clone() { + for &next_inst in &node.inst_children.nodes { + let next_inst = next_inst.index(&self.raw); let to = node_index_map[next_inst.0.index()]; if to != NodeIndex::end() { graph.add_edge( diff --git a/smt-log-parser/src/analysis/misc.rs b/smt-log-parser/src/analysis/misc.rs new file mode 100644 index 00000000..3b244319 --- /dev/null +++ b/smt-log-parser/src/analysis/misc.rs @@ -0,0 +1,68 @@ +use crate::{items::QuantIdx, TiVec, Z3Parser}; + +pub struct LogInfo { + pub match_: MatchesInfo, + pub inst: InstsInfo, + pub quants: QuantsInfo, +} + +#[derive(Default)] +/// Counts of different match-line kinds. Essentially how many instantiations +/// were from each of the different categories. +pub struct MatchesInfo { + pub mbqi: u64, + pub theory_solving: u64, + pub axioms: u64, + pub quantifiers: u64, +} + +#[derive(Default)] +/// Counts of different instantiation stats. +pub struct InstsInfo { + pub enodes: u64, + pub geqs: u64, + pub treqs: u64, + pub insts: u64, +} + +impl InstsInfo { + pub fn total(&self) -> u64 { + self.enodes + self.geqs + self.treqs + self.insts + } +} + +/// How many times each quantifier was instantiated +pub struct QuantsInfo(pub TiVec); + +impl LogInfo { + pub fn new(parser: &Z3Parser) -> Self { + let mut quants = QuantsInfo(parser.quantifiers.iter().map(|_| 0).collect()); + let mut match_ = MatchesInfo::default(); + for inst in parser.instantiations().iter() { + let match_i = &parser[inst.match_]; + if let Some(qidx) = match_i.kind.quant_idx() { + quants.0[qidx] += 1; + } + use crate::items::MatchKind::*; + match &match_i.kind { + MBQI { .. } => match_.mbqi += 1, + TheorySolving { .. } => match_.theory_solving += 1, + Axiom { .. } => match_.axioms += 1, + Quantifier { .. } => match_.quantifiers += 1, + } + } + + let inst = InstsInfo { + insts: parser.insts.insts.len() as u64, + enodes: parser.egraph.enodes.len() as u64, + geqs: parser.egraph.equalities.given.len() as u64, + treqs: parser.egraph.equalities.transitive.len() as u64, + }; + + Self { + match_, + inst, + quants, + } + } +} diff --git a/smt-log-parser/src/analysis/mod.rs b/smt-log-parser/src/analysis/mod.rs index 30391906..edd6f563 100644 --- a/smt-log-parser/src/analysis/mod.rs +++ b/smt-log-parser/src/analysis/mod.rs @@ -1,3 +1,7 @@ +mod dependencies; mod graph; +mod misc; +pub use dependencies::*; pub use graph::*; +pub use misc::*; diff --git a/smt-log-parser/src/cmd/args.rs b/smt-log-parser/src/cmd/args.rs index bc8b3fe3..1a8a46d8 100644 --- a/smt-log-parser/src/cmd/args.rs +++ b/smt-log-parser/src/cmd/args.rs @@ -16,13 +16,22 @@ pub enum Commands { logfile: std::path::PathBuf, /// Depth of dependencies to lookup - #[arg(short, long, default_value_t = 1)] - depth: u32, + #[arg(short, long, default_value = "0")] + depth: Option, /// Whether to pretty print the output results #[arg(short, long, default_value_t = false)] pretty_print: bool, }, + #[cfg(feature = "analysis")] + /// Print out statistics for the SMT solver + Stats { + /// The path to the smt log file + logfile: std::path::PathBuf, + /// how many of the most instantiated axioms to print + #[arg(short)] + k: Option, + }, /// Tests the parser and analysis, printing out timing information Test { /// The paths to the smt log files diff --git a/smt-log-parser/src/cmd/dependencies.rs b/smt-log-parser/src/cmd/dependencies.rs index 58e5b3f2..bb1ac18a 100644 --- a/smt-log-parser/src/cmd/dependencies.rs +++ b/smt-log-parser/src/cmd/dependencies.rs @@ -1,151 +1,93 @@ -use fxhash::{FxHashMap, FxHashSet}; -use petgraph::visit::{Dfs, EdgeFiltered, EdgeRef, Reversed, Visitable, Walker}; use std::path::PathBuf; use smt_log_parser::{ - analysis::{raw::IndexesInstGraph, InstGraph, RawNodeIndex}, - items::InstIdx, - LogParser, Z3Parser, + analysis::{InstGraph, QuantifierAnalysis}, + items::QuantIdx, + Z3Parser, }; -pub fn run(logfile: PathBuf, depth: u32, pretty_print: bool) -> Result<(), String> { - let path = std::path::Path::new(&logfile); - let filename = path - .file_name() - .map(|f| f.to_string_lossy()) - .unwrap_or_default(); +pub fn run(logfile: PathBuf, depth: Option, pretty_print: bool) -> Result<(), String> { + let parser = super::run_on_logfile(logfile)?; + let mut inst_graph = InstGraph::new(&parser).map_err(|e| format!("{e:?}"))?; + inst_graph.initialise_inst_succs_and_preds(&parser); - if !path.is_file() { - return Err(format!("path {filename} did not point to a file")); + let qanalysis = QuantifierAnalysis::new(&parser, &inst_graph); + let total_costs = qanalysis.total_costs(); + fn get_quant_name(parser: &Z3Parser, qidx: QuantIdx) -> Option<&str> { + parser[qidx].kind.user_name().map(|name| &parser[name]) } - let (_metadata, parser) = Z3Parser::from_file(path).unwrap(); - let parser = parser.process_all().map_err(|e| e.to_string())?; - let inst_graph = InstGraph::new(&parser).map_err(|e| format!("{e:?}"))?; - let (total, axiom_deps) = build_axiom_dependency_graph(&parser, &inst_graph); - - if depth == 1 { + if depth.is_some_and(|depth| depth == 0) { // TODO: deduplicate - for (axiom, (count, deps)) in axiom_deps { - let percentage = 100.0 * count as f64 / total as f64; - let total = deps.values().sum::() as f64; + for (qidx, info) in qanalysis.iter_enumerated() { + let Some(name) = get_quant_name(&parser, qidx) else { + continue; + }; + let percentage = (100.0 * info.costs) / total_costs as f64; + let total = info.direct_deps.values().sum::() as f64; + let named = || { + info.direct_deps.iter().flat_map(|(ddep, count)| { + get_quant_name(&parser, *ddep).map(|name| (name, *count)) + }) + }; if pretty_print { - println!( - "axiom {axiom} ({percentage:.1}%) depends on {} axioms:", - deps.len() - ); - for (dep, count) in deps { + let named_count = named().count(); + if info.direct_deps.len() == named_count { + println!("axiom {name} ({percentage:.1}%) depends on {named_count} axioms:"); + } else { + println!( + "axiom {name} ({percentage:.1}%) depends on {} axioms, of those {named_count} are named:", + info.direct_deps.len(), + ); + } + for (dep, count) in named() { let percentage = 100.0 * count as f64 / total; println!(" - {dep} ({percentage:.1}%)"); } } else { - let deps: Vec = deps - .into_iter() + let deps: Vec = named() .map(|(dep, count)| { let percentage = 100.0 * count as f64 / total; format!("{dep} ({percentage:.1}%)") }) .collect(); - println!("{axiom} ({percentage:.1}%) = {}", deps.join(", ")); + let named_count = deps.len(); + if info.direct_deps.len() == named_count { + println!("{name} ({percentage:.1}%) -> {}", deps.join(", ")); + } else { + println!( + "{name} ({percentage:.1}%), {named_count}/{} named -> {}", + info.direct_deps.len(), + deps.join(", ") + ); + } } } return Ok(()); } - let mut axiom_deps = axiom_deps - .into_iter() - .map(|(k, (count, v))| (k, (count, v.into_keys().collect::>()))) - .collect::>(); - for _ in 1..depth { - extend_by_transitive_deps(&mut axiom_deps); - } + let trans = qanalysis.calculate_transitive(depth); - for (axiom, (count, deps)) in axiom_deps { - let percentage = count as f64 / total as f64 * 100.0; + for (qidx, deps) in trans.iter_enumerated() { + let info = &qanalysis[qidx]; + let Some(name) = get_quant_name(&parser, qidx) else { + continue; + }; + let percentage = (100.0 * info.costs as f64) / total_costs as f64; + let named = || deps.iter().flat_map(|ddep| get_quant_name(&parser, *ddep)); if pretty_print { println!( - "axiom {axiom} ({percentage:.1}%) depends on {} axioms:", + "axiom {name} ({percentage:.1}%) depends on {} axioms:", deps.len() ); - for dep in deps { + for dep in named() { println!(" - {dep}"); } } else { - let deps: Vec<&str> = deps.into_iter().collect(); - println!("{axiom} ({percentage:.1}%) = {}", deps.join(", ")); + let deps: Vec<_> = named().collect(); + println!("{name} ({percentage:.1}%) = {}", deps.join(", ")); } } Ok(()) } - -/// Returns an iterator over all instantiations of a quantifier that -/// have a user name. -fn named_instantiations(parser: &Z3Parser) -> impl Iterator + '_ { - parser - .instantiations() - .filter_map(|(idx, inst)| parser[inst.match_].kind.quant_idx().map(|v| (idx, v))) - .filter_map(|(idx, quant_id)| parser[quant_id].kind.user_name().map(|v| (idx, &parser[v]))) -} - -pub type DependencyMap<'a> = FxHashMap<&'a str, (usize, FxHashMap<&'a str, usize>)>; - -/// Constructs a mapping from axioms to the immediately preceding axiom that produced a term that triggered them. -fn build_axiom_dependency_graph<'a>( - parser: &'a Z3Parser, - inst_graph: &InstGraph, -) -> (usize, DependencyMap<'a>) { - let node_name_map: FxHashMap = named_instantiations(parser).collect(); - let total = node_name_map.len(); - let mut node_dep_map: FxHashMap<&str, (usize, FxHashMap<&str, usize>)> = FxHashMap::default(); - - for (idx, name) in &node_name_map { - let named_node = idx.index(&inst_graph.raw); - // We will be removing these edges in the `filtered` graph so need to - // start the DFS from the parents. - let parents = inst_graph - .raw - .graph - .neighbors_directed(named_node.0, petgraph::Direction::Incoming) - .collect(); - // Start a DFS from all the parents of the named node. - let dfs = Dfs::from_parts(parents, inst_graph.raw.graph.visit_map()); - - // A graph without the edges leading to named nodes. This will prevent - // the DFS from walking past such nodes. - let filtered = EdgeFiltered::from_fn(&*inst_graph.raw.graph, |edge| { - !inst_graph.raw[RawNodeIndex(edge.target())] - .kind() - .inst() - .is_some_and(|inst| node_name_map.contains_key(&inst)) - }); - // Walk the graph in reverse (i.e. using Incoming edges) and filter only - // the leaf nodes. - let dependent_on = dfs - .iter(Reversed(&filtered)) - .map(RawNodeIndex) - .filter_map(|node| inst_graph.raw[node].kind().inst()) - .filter_map(|inst| node_name_map.get(&inst).copied()); - - let entry = node_dep_map.entry(name); - let entry = entry.or_default(); - entry.0 += 1; - for dependent_on in dependent_on { - *entry.1.entry(dependent_on).or_default() += 1; - } - } - - (total, node_dep_map) -} - -/// Extends the dependency graph by 1 transitive step -fn extend_by_transitive_deps(axiom_deps: &mut FxHashMap<&str, (usize, FxHashSet<&str>)>) { - let old_deps = axiom_deps.clone(); - for (axiom, (_, deps)) in &old_deps { - for dep in deps { - if let Some((_, extended_deps)) = old_deps.get(dep) { - axiom_deps.get_mut(axiom).unwrap().1.extend(extended_deps); - } - } - } -} diff --git a/smt-log-parser/src/cmd/mod.rs b/smt-log-parser/src/cmd/mod.rs index 04cb4422..a8b0970a 100644 --- a/smt-log-parser/src/cmd/mod.rs +++ b/smt-log-parser/src/cmd/mod.rs @@ -1,9 +1,11 @@ mod args; #[cfg(feature = "analysis")] mod dependencies; +mod stats; mod test; use clap::Parser; +use smt_log_parser::{LogParser, Z3Parser}; pub fn run() -> Result<(), String> { match args::Cli::parse().command { @@ -13,8 +15,16 @@ pub fn run() -> Result<(), String> { depth, pretty_print, } => dependencies::run(logfile, depth, pretty_print)?, + #[cfg(feature = "analysis")] + args::Commands::Stats { logfile, k } => stats::run(logfile, k)?, args::Commands::Test { logfiles } => test::run(logfiles)?, } Ok(()) } + +fn run_on_logfile(logfile: std::path::PathBuf) -> Result { + let path = std::path::Path::new(&logfile); + let (_metadata, parser) = Z3Parser::from_file(path).map_err(|e| e.to_string())?; + parser.process_all().map_err(|e| e.to_string()) +} diff --git a/smt-log-parser/src/cmd/stats.rs b/smt-log-parser/src/cmd/stats.rs new file mode 100644 index 00000000..513ba45e --- /dev/null +++ b/smt-log-parser/src/cmd/stats.rs @@ -0,0 +1,44 @@ +use std::path::PathBuf; + +use smt_log_parser::analysis::LogInfo; + +pub fn run(logfile: PathBuf, top_k: Option) -> Result<(), String> { + let parser = super::run_on_logfile(logfile)?; + let info = LogInfo::new(&parser); + + let mut instantiations_occurrances: Vec<_> = info + .quants + .0 + .iter_enumerated() + .flat_map(|(qidx, icount)| { + parser[qidx] + .kind + .user_name() + .map(|name| (&parser[name], icount)) + }) + .collect(); + instantiations_occurrances.sort_by(|l, r| Ord::cmp(&r.1, &l.1)); + + println!("no-enodes: {}", info.inst.enodes); + println!("no-given-equalities: {}", info.inst.geqs); + println!("no-trans-equalities: {}", info.inst.treqs); + println!("no-instantiations: {}", info.inst.insts); + println!("no-mbqi-instantiations: {}", info.match_.mbqi); + println!( + "no-theory-solving-instantiations: {}", + info.match_.theory_solving + ); + println!("no-axioms-instantiations: {}", info.match_.axioms); + println!("no-quantifiers-instantiations: {}", info.match_.quantifiers); + println!("nodes-count: {}", info.inst.total()); + + println!("top-instantiations="); + let iter = instantiations_occurrances + .iter() + .take(top_k.unwrap_or(usize::MAX)); + for (count, inst) in iter { + println!("{} = {}", inst, count); + } + + Ok(()) +} diff --git a/smt-log-parser/src/mem_dbg/mod.rs b/smt-log-parser/src/mem_dbg/mod.rs index 6524c7be..1cfe1dfa 100644 --- a/smt-log-parser/src/mem_dbg/mod.rs +++ b/smt-log-parser/src/mem_dbg/mod.rs @@ -115,6 +115,11 @@ impl Default for TiVec { Self(typed_index_collections::TiVec::default()) } } +impl FromIterator for TiVec { + fn from_iter>(iter: T) -> Self { + Self(typed_index_collections::TiVec::from_iter(iter)) + } +} // FxHashMap @@ -124,6 +129,11 @@ impl Default for FxHashMap { Self(fxhash::FxHashMap::default()) } } +impl FromIterator<(K, V)> for FxHashMap { + fn from_iter>(iter: T) -> Self { + Self(fxhash::FxHashMap::from_iter(iter)) + } +} // StringTable diff --git a/smt-log-parser/src/parsers/z3/z3parser.rs b/smt-log-parser/src/parsers/z3/z3parser.rs index 8d486a1e..d2e08a65 100644 --- a/smt-log-parser/src/parsers/z3/z3parser.rs +++ b/smt-log-parser/src/parsers/z3/z3parser.rs @@ -1,5 +1,6 @@ #[cfg(feature = "mem_dbg")] use mem_dbg::{MemDbg, MemSize}; +use typed_index_collections::TiSlice; use crate::{ items::*, @@ -636,11 +637,11 @@ impl Z3Parser { (self.quantifiers.len(), self.insts.has_theory_solving_inst()) } - pub fn quantifiers(&self) -> impl Iterator { - self.quantifiers.iter_enumerated() + pub fn quantifiers(&self) -> &TiSlice { + &self.quantifiers } - pub fn instantiations(&self) -> impl Iterator { - self.insts.insts.iter_enumerated() + pub fn instantiations(&self) -> &TiSlice { + &self.insts.insts } }