diff --git a/Cargo.lock b/Cargo.lock index fc5b6388..e05a9e33 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -28,9 +28,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "anymap2" @@ -335,7 +335,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13b588ba4ac1a99f7f2964d24b3d896ddc6bf847ee3855dbd4366f058cfcd331" dependencies = [ "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -359,7 +359,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -515,7 +515,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -922,9 +922,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.168" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "linux-raw-sys" @@ -994,9 +994,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394" dependencies = [ "adler2", ] @@ -1044,9 +1044,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -1240,7 +1240,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1439,9 +1439,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ "bitflags 2.6.0", ] @@ -1574,15 +1574,15 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" [[package]] name = "serde" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" dependencies = [ "serde_derive", ] @@ -1600,20 +1600,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" dependencies = [ "itoa 1.0.14", "memchr", @@ -1745,9 +1745,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.90" +version = "2.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +checksum = "d53cbcb5a243bd33b7858b1d7f4aca2153490815872d86d955d6ea29f743c035" dependencies = [ "proc-macro2", "quote", @@ -1828,7 +1828,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1880,7 +1880,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2075,7 +2075,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "wasm-bindgen-shared", ] @@ -2110,7 +2110,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2312,5 +2312,5 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] diff --git a/aris/Cargo.toml b/aris/Cargo.toml index 779f4a14..8016a30c 100644 --- a/aris/Cargo.toml +++ b/aris/Cargo.toml @@ -1,8 +1,11 @@ [package] name = "aris" version = "0.1.0" -authors = ["Avi Weinstock "] +authors = ["Avi Weinstock ", "Zach Bonagura "] edition = "2021" +description = "Aris is a formal logic proof-building tool designed to help users learn and practice logic." +repository = "https://github.com/Bram-Hub/aris" +license = "GPL-3.0" [dependencies] base64 = "0.21.0" diff --git a/aris/src/equivs.rs b/aris/src/equivs.rs index 6ea73fcb..20e2b333 100644 --- a/aris/src/equivs.rs +++ b/aris/src/equivs.rs @@ -35,13 +35,6 @@ define_rewrite_rule! { ("(P | Q) & (P | R)", "P | (Q & R)"), ] } -define_rewrite_rule! { - COMPLEMENT, - &[ - ("phi & ~phi", "_|_"), - ("phi | ~phi", "^|^"), - ] -} define_rewrite_rule! { IDENTITY, &[ @@ -63,29 +56,6 @@ define_rewrite_rule! { ("~_|_", "^|^"), ] } -define_rewrite_rule! { - ABSORPTION, - &[ - ("phi & (phi | psi)", "phi"), - ("phi | (phi & psi)", "phi"), - ] -} -define_rewrite_rule! { - REDUCTION, - &[ - ("phi & (~phi | psi)", "phi & psi"), - ("~phi & (phi | psi)", "~phi & psi"), - ("phi | (~phi & psi)", "phi | psi"), - ("~phi | (phi & psi)", "~phi | psi"), - ] -} -define_rewrite_rule! { - ADJACENCY, - &[ - ("(phi | psi) & (phi | ~psi)", "phi"), - ("(phi & psi) | (phi & ~psi)", "phi"), - ] -} // Conditional Equivalences define_rewrite_rule! { @@ -175,12 +145,6 @@ define_rewrite_rule! { ("(phi & psi) | (~phi & ~psi)", "phi <-> psi"), ] } -define_rewrite_rule! { - BICONDITIONAL_CONTRAPOSITION, - &[ - ("phi <-> psi", "psi <-> phi"), - ] -} define_rewrite_rule! { BICONDITIONAL_COMMUTATION, &[ @@ -247,10 +211,11 @@ mod tests { } } + /// Test function to verify the logical equivalence of rewrite rules using brute-force truth tables. #[test] fn bruteforce_equivalence_truthtables() { use std::collections::HashMap; - let rules: Vec<&RewriteRule> = vec![&*DOUBLE_NEGATION, &*DISTRIBUTION, &*COMPLEMENT, &*IDENTITY, &*ANNIHILATION, &*INVERSE, &*ABSORPTION, &*REDUCTION, &*ADJACENCY, &*CONDITIONAL_ABSORPTION, &*CONDITIONAL_ANNIHILATION, &*CONDITIONAL_IMPLICATION, &*CONDITIONAL_CONTRAPOSITION, &*CONDITIONAL_EXPORTATION, &*CONDITIONAL_COMPLEMENT, &*CONDITIONAL_IDENTITY, &*CONDITIONAL_DISTRIBUTION, &*CONDITIONAL_REDUCTION, &*KNIGHTS_AND_KNAVES, &*CONDITIONAL_IDEMPOTENCE, &*BICONDITIONAL_CONTRAPOSITION, &*BICONDITIONAL_ASSOCIATION, &*BICONDITIONAL_COMMUTATION, &*BICONDITIONAL_REDUCTION, &*BICONDITIONAL_COMPLEMENT, &*BICONDITIONAL_IDENTITY, &*BICONDITIONAL_EQUIVALENCE, &*BICONDITIONAL_NEGATION, &*BICONDITIONAL_SUBSTITUTION]; + let rules: Vec<&RewriteRule> = vec![&*DOUBLE_NEGATION, &*DISTRIBUTION, &*IDENTITY, &*ANNIHILATION, &*INVERSE, &*CONDITIONAL_ABSORPTION, &*CONDITIONAL_ANNIHILATION, &*CONDITIONAL_IMPLICATION, &*CONDITIONAL_CONTRAPOSITION, &*CONDITIONAL_EXPORTATION, &*CONDITIONAL_COMPLEMENT, &*CONDITIONAL_IDENTITY, &*CONDITIONAL_DISTRIBUTION, &*CONDITIONAL_REDUCTION, &*KNIGHTS_AND_KNAVES, &*CONDITIONAL_IDEMPOTENCE, &*BICONDITIONAL_ASSOCIATION, &*BICONDITIONAL_COMMUTATION, &*BICONDITIONAL_REDUCTION, &*BICONDITIONAL_COMPLEMENT, &*BICONDITIONAL_IDENTITY, &*BICONDITIONAL_EQUIVALENCE, &*BICONDITIONAL_NEGATION, &*BICONDITIONAL_SUBSTITUTION]; for rule in rules { for (lhs, rhs) in rule.reductions.iter() { println!("Testing {lhs} -> {rhs}"); diff --git a/aris/src/expr.rs b/aris/src/expr.rs index 0e6a9775..eb0b398e 100644 --- a/aris/src/expr.rs +++ b/aris/src/expr.rs @@ -861,6 +861,9 @@ impl Expr { normalize_expr(self) } + /// Normalize biconditional contrapositions in the expression. + /// Converts expressions of the form '(A <-> B)' into their negated counterparts '(~A <-> ~B)'. + /// Ensures that this transformation does not occur repeatedly to avoid infinite loops. pub fn normalize_biconditional_contraposition(self) -> Expr { self.transform(&|expr| { match expr { @@ -924,6 +927,7 @@ impl Expr { } } + /// Helper function for collecting unique expressions under absorption laws fn collect_unique_exprs_absorption(expr: &Expr, op: Op, unique_exprs: &mut BTreeSet) { match expr { Expr::Assoc { op: expr_op, exprs } if *expr_op == op => { @@ -947,6 +951,9 @@ impl Expr { } } + /// Normalize an expression using absorption laws: + /// Simplifies redundant subexpressions like 'A | (A & B)' => 'A'. + /// Handles associative operators to simplify expressions with nested terms. pub fn normalize_absorption(self) -> Expr { if let Expr::Assoc { op, exprs } = self { let mut unique_exprs = BTreeSet::new(); @@ -965,6 +972,8 @@ impl Expr { } } + /// Helper function for collecting expressions to reduce over logical reduction rules + /// Applies rules like 'A & ~A' -> '⊥' or 'A | ~A' -> '⊤'. fn collect_reduction_exprs(expr: &Expr, op: Op, reduced_exprs: &mut BTreeSet) { match expr { Expr::Assoc { op: expr_op, exprs } if *expr_op == op => { @@ -1004,6 +1013,9 @@ impl Expr { } } + /// Normalize an expression using logical reduction rules: + /// Simplifies contradictions ('A & ~A') to '⊥' and tautologies ('A | ~A') to '⊤'. + /// Reduces nested expressions based on their logical structure. pub fn normalize_reduction(self) -> Expr { match self { Expr::Assoc { op, exprs } => { @@ -1047,6 +1059,8 @@ impl Expr { } } + /// Check if an expression is a subset of another associative expression. + /// Used to validate containment relationships between subexpressions. fn is_subset_of_assoc(expr: &Expr, set: &BTreeSet, op: Op) -> bool { match expr { Expr::Assoc { op: expr_op, exprs } if *expr_op == op => exprs.iter().all(|sub_expr| set.contains(sub_expr)), @@ -1054,6 +1068,8 @@ impl Expr { } } + /// Normalize adjacency in expressions: + /// Simplifies and flattens expressions with adjacent associative operators. pub fn normalize_adjacency(self) -> Expr { match self { Expr::Assoc { op, exprs } => { @@ -1076,6 +1092,7 @@ impl Expr { } } + /// Extract expressions from associative structures while preserving their operator type. fn extract_associative(self, op: Op) -> Vec { match self { Expr::Assoc { op: expr_op, exprs } if expr_op == op => exprs, @@ -1083,6 +1100,8 @@ impl Expr { } } + /// General simplification for associative expressions: + /// Attempts to merge or simplify pairs of subexpressions under a given operator. fn simplify_general(mut exprs: Vec, op: Op) -> Option { for i in 0..exprs.len() { for j in (i + 1..exprs.len()).rev() { @@ -1102,6 +1121,8 @@ impl Expr { } } + /// Try to simplify a pair of expressions based on logical relationships. + /// Merges or simplifies expressions under logical operators (e.g., 'A | B'). fn try_simplify_pair(e1: &Expr, e2: &Expr, op: Op) -> Option { match op { Op::Or | Op::And => Self::simplify_logic(e1, e2), @@ -1109,6 +1130,7 @@ impl Expr { } } + /// Attempt to simplify two associative expressions with the same operator. fn simplify_logic(e1: &Expr, e2: &Expr) -> Option { match (e1, e2) { (Expr::Assoc { op: sub_op1, exprs: exprs1 }, Expr::Assoc { op: sub_op2, exprs: exprs2 }) if *sub_op1 == *sub_op2 => { @@ -1127,6 +1149,7 @@ impl Expr { None } + /// Create an associative expression from a list of subexpressions. fn assoc_or_single(mut exprs: Vec, op: Op) -> Expr { exprs.sort(); exprs.dedup(); @@ -1136,6 +1159,7 @@ impl Expr { } } + /// Determine if two lists of subexpressions are complementary. fn is_complementary(others1: &[&Expr], others2: &[&Expr]) -> bool { matches!( (others1, others2), @@ -1513,6 +1537,8 @@ impl Expr { }) } + /// Infer and manipulate quantifiers: + /// Applies rules like '(∃x (P & Q))' => '(∃x P) & (∃x Q)' and merges compatible quantifiers. pub fn quantifier_inference(self) -> Expr { let distribute_quantifiers = |expr: &Expr| -> Option { match expr { @@ -1547,6 +1573,8 @@ impl Expr { self.transform(&|expr| distribute_quantifiers(&expr).map_or((expr.clone(), false), |transformed| (transformed, true))) } + /// Distribute quantifiers across associative operators: + /// Pushes quantifiers inside conjunctions or disjunctions when applicable. pub fn quantifier_distribution(self) -> Expr { let push_quantifier_inside = |kind: QuantKind, qname: String, exprs: &mut Vec| { exprs.iter_mut().for_each(|iter| { diff --git a/aris/src/parser.rs b/aris/src/parser.rs index 4e32f92b..66ebffcc 100644 --- a/aris/src/parser.rs +++ b/aris/src/parser.rs @@ -1,4 +1,18 @@ //! Parse infix logical expressions into an AST +//! +//! This file defines a parser for converting infix logical expressions into an Abstract Syntax Tree (AST). +//! It utilizes the 'nom' parser combinator library to handle parsing logic. The resulting AST can represent +//! various logical constructs, such as predicates, quantifiers, tautologies, contradictions, and operators +//! (e.g., AND, OR, IMPLIES). +//! +//! ## Main Functions +//! - 'parse': Converts a logical expression string into an AST ('Expr') or returns 'None' if parsing fails. +//! - 'parse_unwrap': Like 'parse', but panics on failure. Primarily used for testing. +//! +//! ## Grammar and Parsing Notes +//! - The parser handles infix logical expressions with support for parentheses, quantifiers, and operators. +//! - Functions are modular and correspond to specific grammar productions in Extended Backus-Naur Form (EBNF). +//! - The parser includes support for Unicode symbols (e.g., '∀', '∃', '∧', '∨'). use nom::branch::alt; use nom::bytes::complete::tag; @@ -36,10 +50,12 @@ pub fn parse_unwrap(input: &str) -> Expr { parse(input).unwrap_or_else(|| panic!("failed parsing: {input}")) } +/// Custom error helper function for parser failure fn custom_error(a: A) -> nom::IResult { Err(nom::Err::Error(nom::error::Error { input: a, code: nom::error::ErrorKind::Fail })) } +/// Parses a variable, ensuring it is not a reserved keyword fn variable(input: &str) -> nom::IResult<&str, String> { verify(variable_, |v| keyword(v).is_err())(input) } @@ -48,50 +64,62 @@ fn variable(input: &str) -> nom::IResult<&str, String> { // `alt` corresponds to alternation/choice in an EBNF grammar // `tag` is used for literal string values, and supports unicode +/// Matches whitespace characters (spaces or tabs) fn space(input: &str) -> IResult<&str, ()> { value((), many0(one_of(" \t")))(input) } +/// Matches variable-like identifiers (alphanumeric or underscores) fn variable_(input: &str) -> IResult<&str, String> { map(recognize(many1(one_of("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"))), |v: &str| v.to_owned())(input) } +/// Matches logical keywords ('forall' or 'exists') fn keyword(input: &str) -> IResult<&str, &str> { alt((tag("forall"), tag("exists")))(input) } +/// Parses a logical contradiction (e.g., '_⊥_') fn contradiction(input: &str) -> IResult<&str, Expr> { value(Expr::Contra, alt((tag("_|_"), tag("⊥"))))(input) } +/// Parses a logical tautology (e.g., '⊤') fn tautology(input: &str) -> IResult<&str, Expr> { value(Expr::Taut, alt((tag("^|^"), tag("⊤"))))(input) } +/// Parses a negation term (e.g., '¬A') fn notterm(input: &str) -> IResult<&str, Expr> { map(preceded(alt((tag("~"), tag("¬"))), paren_expr), |e| Expr::Not { operand: Box::new(e) })(input) } +/// Parses a predicate or variable term fn predicate(input: &str) -> IResult<&str, Expr> { alt((map(pair(delimited(space, variable, space), delimited(tag("("), separated_list0(tuple((space, tag(","), space)), expr), tag(")"))), |(name, args)| Expr::Apply { func: Box::new(Expr::Var { name }), args }), map(delimited(space, variable, space), |name| Expr::Var { name })))(input) } +/// Parses a universal quantifier ('∀') and associates it with an expression fn forall_quantifier(input: &str) -> IResult<&str, QuantKind> { value(QuantKind::Forall, alt((tag("forall "), tag("∀"))))(input) } +/// Parses an existential quantifier ('∃') and associates it with an expression fn exists_quantifier(input: &str) -> IResult<&str, QuantKind> { value(QuantKind::Exists, alt((tag("exists "), tag("∃"))))(input) } +/// Parses any quantifier ('∀' or '∃') fn quantifier(input: &str) -> IResult<&str, QuantKind> { alt((forall_quantifier, exists_quantifier))(input) } +/// Matches whitespace characters after quantifier fn space_after_quantifier(input: &str) -> IResult<&str, ()> { value((), many1(one_of(" \t")))(input) } +/// Matches whitespace characters depending on if there exists a quantifier or not fn conditional_space(input: &str) -> IResult<&str, ()> { let is_next_quantifier = peek(quantifier)(input); @@ -101,6 +129,7 @@ fn conditional_space(input: &str) -> IResult<&str, ()> { } } +/// Parses a logical binder (quantifier + variable + body) fn binder(input: &str) -> IResult<&str, Expr> { map( tuple(( @@ -120,34 +149,42 @@ fn binder(input: &str) -> IResult<&str, Expr> { )(input) } +/// Parses an implication term (e.g., 'A -> B' or 'A → B') fn impl_term(input: &str) -> IResult<&str, Expr> { map(separated_pair(paren_expr, tuple((space, alt((tag("->"), tag("→"))), space)), paren_expr), |(left, right)| Expr::Impl { left: Box::new(left), right: Box::new(right) })(input) } +/// Parses an AND operator (e.g., '&', '∧', or '/\') fn andrepr(input: &str) -> IResult<&str, Op> { value(Op::And, alt((tag("&"), tag("∧"), tag("/\\"))))(input) } +/// Parses an OR operator (e.g., '|', '∨', or '\/') fn orrepr(input: &str) -> IResult<&str, Op> { value(Op::Or, alt((tag("|"), tag("∨"), tag("\\/"))))(input) } +/// Parses a biconditional operator (e.g., '<->' or '↔') fn biconrepr(input: &str) -> IResult<&str, Op> { value(Op::Bicon, alt((tag("<->"), tag("↔"))))(input) } +/// Parses an equivalence operator (e.g., '===' or '≡')/// Parses an equivalence operator (e.g., '===' or '≡') fn equivrepr(input: &str) -> IResult<&str, Op> { value(Op::Equiv, alt((tag("==="), tag("≡"))))(input) } +/// Parses an addition operator ('+') fn plusrepr(input: &str) -> IResult<&str, Op> { value(Op::Add, tag("+"))(input) } +/// Parses a multiplication operator ('*') fn multrepr(input: &str) -> IResult<&str, Op> { value(Op::Mult, tag("*"))(input) } +/// Parses a sequence of associative terms and their operators fn assoc_term_aux(input: &str) -> IResult<&str, (Vec, Vec)> { alt(( map(tuple((paren_expr, delimited(space, alt((andrepr, orrepr, biconrepr, equivrepr, plusrepr, multrepr)), space), assoc_term_aux)), |(e, sym, (mut es, mut syms))| { diff --git a/aris/src/proofs.rs b/aris/src/proofs.rs index fbcecbf4..7522771b 100644 --- a/aris/src/proofs.rs +++ b/aris/src/proofs.rs @@ -165,7 +165,7 @@ pub mod pooledproof; /// - Trivial to construct from Claim objects /// ## Cons /// - Doesn't support most operations -/// - Doesn't handle binding structure, so can't be used for first order logic, only prepositional logic +/// - Doesn't handle binding structure, so can't be used for first order logic, only Propositional logic pub mod java_shallow_proof; /// A LinedProof is a wrapper around another proof type that adds lines and strings, for interfacing with the GUI @@ -202,57 +202,130 @@ pub trait Proof: Sized { type JustificationReference: Clone + Eq + Ord + Hash; type SubproofReference: Clone + Eq + Ord + Hash; type Subproof: Proof; + + /// Creates a new proof object. + /// This initializes an empty proof structure. fn new() -> Self; + + /// Retrieves the top-level proof of the current proof structure. + /// The top-level proof is the outermost subproof that encapsulates all lines and subproofs. fn top_level_proof(&self) -> &Self::Subproof; + + /// Looks up the expression associated with a premise reference. fn lookup_premise(&self, r: &Self::PremiseReference) -> Option; + + /// Looks up the justification associated with a justification reference. fn lookup_step(&self, r: &Self::JustificationReference) -> Option, Self::SubproofReference>>; + + /// Looks up a subproof using its reference. fn lookup_subproof(&self, r: &Self::SubproofReference) -> Option; + + /// Mutates the expression of a premise in place. + /// The provided closure is executed on the mutable reference to the expression. fn with_mut_premise A>(&mut self, r: &Self::PremiseReference, f: F) -> Option; + + /// Mutates the justification of a step in place. + /// The provided closure is executed on the mutable reference to the justification. fn with_mut_step, Self::SubproofReference>) -> A>(&mut self, r: &Self::JustificationReference, f: F) -> Option; + + /// Mutates a subproof in place. + /// The provided closure is executed on the mutable reference to the subproof. fn with_mut_subproof A>(&mut self, r: &Self::SubproofReference, f: F) -> Option; + + /// Adds a new premise to the proof. + /// Returns a reference to the newly added premise. fn add_premise(&mut self, e: Expr) -> Self::PremiseReference; + + /// Adds a new subproof to the proof. + /// Returns a reference to the newly added subproof. fn add_subproof(&mut self) -> Self::SubproofReference; + + /// Adds a new step (justification) to the proof. + /// Returns a reference to the newly added step. fn add_step(&mut self, just: Justification, Self::SubproofReference>) -> Self::JustificationReference; + + /// Prepends a step to the proof, adding it at the beginning. + /// If no steps exist, adds it as the first step. fn prepend_step(&mut self, just: Justification, Self::SubproofReference>) -> Self::JustificationReference { match self.lines().first() { Some(first_step) => self.add_step_relative(just, first_step, false), None => self.add_step(just), } } + + /// Adds a premise relative to another premise reference. + /// The `after` parameter determines if the new premise is added before or after the reference. fn add_premise_relative(&mut self, e: Expr, r: &Self::PremiseReference, after: bool) -> Self::PremiseReference; + + /// Adds a subproof relative to another justification reference. + /// The `after` parameter determines if the new subproof is added before or after the reference. fn add_subproof_relative(&mut self, r: &JsRef, after: bool) -> Self::SubproofReference; + + /// Adds a step relative to another justification reference. + /// The `after` parameter determines if the new step is added before or after the reference. fn add_step_relative(&mut self, just: Justification, Self::SubproofReference>, r: &JsRef, after: bool) -> Self::JustificationReference; + + /// Removes a line from the proof using its reference. fn remove_line(&mut self, r: &PjRef); + + /// Removes a subproof from the proof using its reference. fn remove_subproof(&mut self, r: &Self::SubproofReference); + + /// Retrieves all premise references within the proof. + /// Returns a vector of premise references. fn premises(&self) -> Vec; + + /// Retrieves all justification and subproof references within the proof. + /// Returns a vector of mixed references. fn lines(&self) -> Vec>; + + /// Retrieves the parent subproof of a given line reference, if it exists. fn parent_of_line(&self, r: &PjsRef) -> Option; + + /// Verifies a specific line in the proof to check its validity. fn verify_line(&self, r: &PjRef) -> Result<(), ProofCheckError, Self::SubproofReference>>; + /// Retrieves the expression for a given proof reference, or `None` if not found. fn lookup_expr(&self, r: &PjRef) -> Option { r.clone().fold(hlist![|pr| self.lookup_premise(&pr), |jr| self.lookup_step(&jr).map(|x| x.0)]) } + + /// Retrieves the expression for a proof reference, returning an error if not found. fn lookup_expr_or_die(&self, r: &PjRef) -> Result, Self::SubproofReference>> { self.lookup_expr(r).ok_or_else(|| ProofCheckError::LineDoesNotExist(r.clone())) } + + /// Retrieves a premise expression or returns an error if the reference is invalid. fn lookup_premise_or_die(&self, r: &Self::PremiseReference) -> Result, Self::SubproofReference>> { self.lookup_premise(r).ok_or_else(|| ProofCheckError::LineDoesNotExist(Coproduct::inject(r.clone()))) } + + /// Retrieves a justification or returns an error if the reference is invalid. fn lookup_justification_or_die(&self, r: &Self::JustificationReference) -> Result, ProofCheckError, Self::SubproofReference>> { self.lookup_step(r).ok_or_else(|| ProofCheckError::LineDoesNotExist(Coproduct::inject(r.clone()))) } + + /// Looks up a premise or justification, returning either type wrapped in a coproduct. fn lookup_pj(&self, r: &PjRef) -> Option)> { r.clone().fold(hlist![|pr| self.lookup_premise(&pr).map(Coproduct::inject), |jr| self.lookup_step(&jr).map(Coproduct::inject)]) } + + /// Retrieves a subproof or returns an error if the reference is invalid. fn lookup_subproof_or_die(&self, r: &Self::SubproofReference) -> Result, Self::SubproofReference>> { self.lookup_subproof(r).ok_or_else(|| ProofCheckError::SubproofDoesNotExist(r.clone())) } + + /// Returns all direct justification references in the proof. fn direct_lines(&self) -> Vec { self.lines().iter().filter_map(|x| Coproduct::uninject::(x.clone()).ok()).collect() } + + /// Returns all proof references, including premises and direct justification lines. fn exprs(&self) -> Vec> { self.premises().into_iter().map(Coproduct::inject).chain(self.direct_lines().into_iter().map(Coproduct::inject)).collect() } + + /// Retrieves all justifications within the proof, optionally including premises. fn contained_justifications(&self, include_premises: bool) -> HashSet> { let mut ret = self.lines().into_iter().filter_map(|x| x.fold(hlist![|r: Self::JustificationReference| Some(vec![r].into_iter().map(Coproduct::inject).collect()), |r: Self::SubproofReference| self.lookup_subproof(&r).map(|sub| sub.contained_justifications(include_premises)),])).fold(HashSet::new(), |mut x, y| { x.extend(y); @@ -263,6 +336,9 @@ pub trait Proof: Sized { } ret } + + /// Computes the transitive dependencies of a specific line in the proof. + /// Returns a set of all lines that the given line depends on. fn transitive_dependencies(&self, line: PjRef) -> HashSet> { use frunk_core::coproduct::Coproduct::{Inl, Inr}; // TODO: cycle detection @@ -290,6 +366,9 @@ pub trait Proof: Sized { } result } + + /// Determines the depth of a specific line in the proof hierarchy. + /// Returns the number of subproof levels enclosing the line. fn depth_of_line(&self, r: &PjsRef) -> usize { let mut result = 0; let mut current = r.clone(); @@ -300,6 +379,8 @@ pub trait Proof: Sized { result } + /// Computes all possible dependencies that a given line can reference. + /// Updates the provided sets of valid dependencies and subproof references. fn possible_deps_for_line(&self, r: &PjRef, deps: &mut HashSet>, sdeps: &mut HashSet) { // r1 can reference r2 if all of the following hold: // 1) r2 occurs before r1 diff --git a/aris/src/proofs/proof_tests.rs b/aris/src/proofs/proof_tests.rs index a40df43f..2136a985 100644 --- a/aris/src/proofs/proof_tests.rs +++ b/aris/src/proofs/proof_tests.rs @@ -67,7 +67,7 @@ macro_rules! enumerate_subproofless_tests { test_commutation_bicon, test_association_bool, test_association_bicon, test_demorgan, test_idempotence, test_doublenegation, test_distribution, test_complement, test_identity, test_annihilation, test_inverse, test_absorption, - test_reduction, test_adjacency, test_resolution, test_tautcon, test_empty_rule, + test_reduction, test_adjacency, test_resolution, test_truth_func_conseq, test_empty_rule, test_modus_tollens, test_hypothetical_syllogism, test_disjunctive_syllogism, test_constructive_dilemma, test_destructive_dilemma, test_halfdemorgan, test_strengthen_antecedent, test_weaken_consequent, test_con_intro_negation, @@ -826,22 +826,20 @@ pub fn test_complement() -> (P, Vec>, Vec>) { let r16 = prf.add_step(Justification(p("^|^"), RuleM::Complement, vec![i(r5)], vec![])); let r17 = prf.add_step(Justification(p("_|_"), RuleM::Complement, vec![i(r6.clone())], vec![])); let r18 = prf.add_step(Justification(p("^|^"), RuleM::Complement, vec![i(r6)], vec![])); - let r33 = prf.add_step(Justification(p("_|_ & P"), RuleM::Complement, vec![i(r32.clone())], vec![])); - let r34 = prf.add_step(Justification(p("_|_ & P & X & F"), RuleM::Complement, vec![i(r32)], vec![])); - let r20 = prf.add_step(Justification(p("^|^ & ^|^ & _|_ & _|_"), RuleM::ConditionalComplement, vec![i(r19.clone())], vec![])); let r21 = prf.add_step(Justification(p("^|^ & (B <-> B) & (C <-> ~C) & (~D <-> D)"), RuleM::ConditionalComplement, vec![i(r19.clone())], vec![])); let r22 = prf.add_step(Justification(p("(A -> A) & ^|^ & (C <-> ~C) & (~D <-> D)"), RuleM::ConditionalComplement, vec![i(r19.clone())], vec![])); let r23 = prf.add_step(Justification(p("(A -> A) & (B <-> B) & _|_ & (~D <-> D)"), RuleM::ConditionalComplement, vec![i(r19.clone())], vec![])); let r24 = prf.add_step(Justification(p("(A -> A) & (B <-> B) & (C <-> ~C) & _|_"), RuleM::ConditionalComplement, vec![i(r19.clone())], vec![])); let r25 = prf.add_step(Justification(p("_|_ & _|_ & ^|^ & ^|^"), RuleM::ConditionalComplement, vec![i(r19.clone())], vec![])); - let r26 = prf.add_step(Justification(p("^|^ & ^|^ & _|_ & _|_"), RuleM::BiconditionalComplement, vec![i(r19.clone())], vec![])); let r27 = prf.add_step(Justification(p("^|^ & (B <-> B) & (C <-> ~C) & (~D <-> D)"), RuleM::BiconditionalComplement, vec![i(r19.clone())], vec![])); let r28 = prf.add_step(Justification(p("(A -> A) & ^|^ & (C <-> ~C) & (~D <-> D)"), RuleM::BiconditionalComplement, vec![i(r19.clone())], vec![])); let r29 = prf.add_step(Justification(p("(A -> A) & (B <-> B) & _|_ & (~D <-> D)"), RuleM::BiconditionalComplement, vec![i(r19.clone())], vec![])); let r30 = prf.add_step(Justification(p("(A -> A) & (B <-> B) & (C <-> ~C) & _|_"), RuleM::BiconditionalComplement, vec![i(r19.clone())], vec![])); let r31 = prf.add_step(Justification(p("_|_ & _|_ & ^|^ & ^|^"), RuleM::BiconditionalComplement, vec![i(r19)], vec![])); + let r33 = prf.add_step(Justification(p("_|_ & P"), RuleM::Complement, vec![i(r32.clone())], vec![])); + let r34 = prf.add_step(Justification(p("_|_ & P & X & F"), RuleM::Complement, vec![i(r32)], vec![])); (prf, vec![i(r7), i(r9), i(r12), i(r14), i(r16), i(r17), i(r21), i(r28), i(r29), i(r30), i(r33)], vec![i(r8), i(r10), i(r11), i(r13), i(r15), i(r18), i(r20), i(r22), i(r23), i(r24), i(r25), i(r26), i(r27), i(r31), i(r34)]) } @@ -866,7 +864,6 @@ pub fn test_identity() -> (P, Vec>, Vec>) { let r10 = prf.add_step(Justification(p("_|_"), RuleM::Identity, vec![i(r3)], vec![])); let r11 = prf.add_step(Justification(p("A"), RuleM::Identity, vec![i(r4.clone())], vec![])); let r12 = prf.add_step(Justification(p("_|_"), RuleM::Identity, vec![i(r4)], vec![])); - let r14 = prf.add_step(Justification(p("~A & B"), RuleM::ConditionalIdentity, vec![i(r13.clone())], vec![])); let r15 = prf.add_step(Justification(p("A & B"), RuleM::ConditionalIdentity, vec![i(r13.clone())], vec![])); let r16 = prf.add_step(Justification(p("~A & ~B"), RuleM::ConditionalIdentity, vec![i(r13)], vec![])); @@ -895,7 +892,6 @@ pub fn test_annihilation() -> (P, Vec>, Vec>) { let r10 = prf.add_step(Justification(p("A"), RuleM::Annihilation, vec![i(r3)], vec![])); let r11 = prf.add_step(Justification(p("^|^"), RuleM::Annihilation, vec![i(r4.clone())], vec![])); let r12 = prf.add_step(Justification(p("A"), RuleM::Annihilation, vec![i(r4)], vec![])); - let r14 = prf.add_step(Justification(p("^|^ & ^|^"), RuleM::ConditionalAnnihilation, vec![i(r13)], vec![])); let r16 = prf.add_step(Justification(p("^|^"), RuleM::ConditionalAnnihilation, vec![i(r15)], vec![])); @@ -930,16 +926,12 @@ pub fn test_absorption() -> (P, Vec>, Vec>) { let r1 = prf.add_premise(p("A & (A | B)")); let r2 = prf.add_premise(p("A & (B | A)")); - let r3 = prf.add_premise(p("A | (A & B)")); let r4 = prf.add_premise(p("A | (B & A)")); - let r5 = prf.add_premise(p("(A & B) | A")); let r6 = prf.add_premise(p("(B & A) | A")); - let r7 = prf.add_premise(p("(A | B) & A")); let r8 = prf.add_premise(p("(B | A) & A")); - let r9 = prf.add_premise(p("((A | B) & A) & (((A | B) & A) | C)")); let r10 = prf.add_step(Justification(p("A"), RuleM::Absorption, vec![i(r1)], vec![])); @@ -969,38 +961,32 @@ pub fn test_reduction() -> (P, Vec>, Vec>) { let p8 = prf.add_premise(p("D | (~A & (~~A | B))")); let p9 = prf.add_premise(p("P & M & (~P | Q)")); let p10 = prf.add_premise(p("P | M | (~P & Q)")); - let p11 = prf.add_premise(p("~P & (P | Q)")); let p12 = prf.add_premise(p("P & M & (~(P & M) | Q)")); let p13 = prf.add_premise(p("(forall A (A & B)) | (~(forall A (A & B)) & C)")); - let p14 = prf.add_premise(p("¬A | (A & B) | ¬C | (C & D)")); let r1 = prf.add_step(Justification(p("A & B"), RuleM::Reduction, vec![i(p1.clone())], vec![])); let r2 = prf.add_step(Justification(p("~A & B"), RuleM::Reduction, vec![i(p2.clone())], vec![])); let r3 = prf.add_step(Justification(p("A | B"), RuleM::Reduction, vec![i(p3.clone())], vec![])); let r4 = prf.add_step(Justification(p("~B | A"), RuleM::Reduction, vec![i(p4.clone())], vec![])); - let r6 = prf.add_step(Justification(p("A"), RuleM::Reduction, vec![i(p1)], vec![])); let r7 = prf.add_step(Justification(p("A | B"), RuleM::Reduction, vec![i(p2)], vec![])); let r8 = prf.add_step(Justification(p("B"), RuleM::Reduction, vec![i(p3)], vec![])); let r9 = prf.add_step(Justification(p("B & A"), RuleM::Reduction, vec![i(p4)], vec![])); - let r10 = prf.add_step(Justification(p("B & (C | ~A)"), RuleM::Reduction, vec![i(p6.clone())], vec![])); let r11 = prf.add_step(Justification(p("B & (C & ~A)"), RuleM::Reduction, vec![i(p6)], vec![])); let r12 = prf.add_step(Justification(p("A | (~A & B)"), RuleM::Reduction, vec![i(p7)], vec![])); let r13 = prf.add_step(Justification(p("D | (~A & B)"), RuleM::Reduction, vec![i(p8)], vec![])); - let r14 = prf.add_step(Justification(p("P & M & Q"), RuleM::Reduction, vec![i(p9.clone())], vec![])); let r15 = prf.add_step(Justification(p("(P & M) | Q"), RuleM::Reduction, vec![i(p9)], vec![])); let r16 = prf.add_step(Justification(p("P | M | Q"), RuleM::Reduction, vec![i(p10.clone())], vec![])); let r17 = prf.add_step(Justification(p("(P | M) & Q"), RuleM::Reduction, vec![i(p10)], vec![])); - let r18 = prf.add_step(Justification(p("~P & Q"), RuleM::Reduction, vec![i(p11.clone())], vec![])); let r19 = prf.add_step(Justification(p("P & M & Q"), RuleM::Reduction, vec![i(p12.clone())], vec![])); let r20 = prf.add_step(Justification(p("(forall A (A & B)) | C"), RuleM::Reduction, vec![i(p13)], vec![])); - let r21 = prf.add_step(Justification(p("¬A ∨ B ∨ ¬C ∨ D"), RuleM::Reduction, vec![i(p14.clone())], vec![])); + (prf, vec![i(r1), i(r2), i(r3), i(r4), i(r19), i(r18), i(r10), i(r12), i(r13), i(r14), i(r16), i(r20), i(r21)], vec![i(r6), i(r7), i(r8), i(r9), i(r11), i(r15), i(r17)]) } @@ -1020,10 +1006,8 @@ pub fn test_adjacency() -> (P, Vec>, Vec>) { let r2 = prf.add_step(Justification(p("A"), RuleM::Adjacency, vec![i(p2.clone())], vec![])); let r3 = prf.add_step(Justification(p("(B | A) & (A | ~B)"), RuleM::Adjacency, vec![i(p1.clone())], vec![])); let r4 = prf.add_step(Justification(p("(~B & A) | (A & B)"), RuleM::Adjacency, vec![i(p2.clone())], vec![])); - let r5 = prf.add_step(Justification(p("B"), RuleM::Adjacency, vec![i(p1)], vec![])); let r6 = prf.add_step(Justification(p("B"), RuleM::Adjacency, vec![i(p2)], vec![])); - let r7 = prf.add_step(Justification(p("Q"), RuleM::Adjacency, vec![i(p3)], vec![])); let r8 = prf.add_step(Justification(p("Q"), RuleM::Adjacency, vec![i(p4)], vec![])); let r9 = prf.add_step(Justification(p("A & M"), RuleM::Adjacency, vec![i(p5.clone())], vec![])); @@ -1047,7 +1031,6 @@ pub fn test_resolution() -> (P, Vec>, Vec>) { let r2 = prf.add_step(Justification(p("a1 | a2"), RuleM::Resolution, vec![i(p1.clone()), i(p3.clone())], vec![])); let r3 = prf.add_step(Justification(p("_|_"), RuleM::Resolution, vec![i(p3.clone()), i(p4)], vec![])); let r4 = prf.add_step(Justification(p("a1 & a2"), RuleM::Resolution, vec![i(p3.clone()), i(p6.clone())], vec![])); - let r5 = prf.add_step(Justification(p("a1 | a2 | c | b1 | b2"), RuleM::Resolution, vec![i(p1.clone()), i(p2.clone())], vec![])); let r6 = prf.add_step(Justification(p("a1 | a2 | b1"), RuleM::Resolution, vec![i(p1.clone()), i(p2.clone())], vec![])); let r7 = prf.add_step(Justification(p("a1 | a2"), RuleM::Resolution, vec![i(p5.clone()), i(p3.clone())], vec![])); @@ -1058,7 +1041,7 @@ pub fn test_resolution() -> (P, Vec>, Vec>) { (prf, vec![i(r1), i(r2), i(r3), i(r4)], vec![i(r5), i(r6), i(r7), i(r8), i(r9), i(r10)]) } -pub fn test_tautcon() -> (P, Vec>, Vec>) { +pub fn test_truth_func_conseq() -> (P, Vec>, Vec>) { use self::coproduct_inject as i; use crate::parser::parse_unwrap as p; let mut prf = P::new(); @@ -1071,22 +1054,18 @@ pub fn test_tautcon() -> (P, Vec>, Vec>) { let r2 = prf.add_step(Justification(p("^|^"), RuleM::TruthFunctionalConsequence, vec![i(p1.clone())], vec![])); let r3 = prf.add_step(Justification(p("A"), RuleM::TruthFunctionalConsequence, vec![i(p1.clone())], vec![])); let r4 = prf.add_step(Justification(p("~~~((A & ~B) | ~C)"), RuleM::TruthFunctionalConsequence, vec![i(p1.clone())], vec![])); - let r5 = prf.add_step(Justification(p("_|_"), RuleM::TruthFunctionalConsequence, vec![i(p2.clone())], vec![])); let r6 = prf.add_step(Justification(p("^|^"), RuleM::TruthFunctionalConsequence, vec![i(p2.clone())], vec![])); let r7 = prf.add_step(Justification(p("A"), RuleM::TruthFunctionalConsequence, vec![i(p2.clone())], vec![])); let r8 = prf.add_step(Justification(p("~~~((A & ~B) | ~C)"), RuleM::TruthFunctionalConsequence, vec![i(p2)], vec![])); - let r9 = prf.add_step(Justification(p("A"), RuleM::TruthFunctionalConsequence, vec![i(p3.clone())], vec![])); let r10 = prf.add_step(Justification(p("B"), RuleM::TruthFunctionalConsequence, vec![i(p3.clone())], vec![])); let r11 = prf.add_step(Justification(p("A | B"), RuleM::TruthFunctionalConsequence, vec![i(p3.clone())], vec![])); let r12 = prf.add_step(Justification(p("A & B"), RuleM::TruthFunctionalConsequence, vec![i(p3)], vec![])); - let r13 = prf.add_step(Justification(p("A"), RuleM::TruthFunctionalConsequence, vec![i(p4.clone())], vec![])); let r14 = prf.add_step(Justification(p("B"), RuleM::TruthFunctionalConsequence, vec![i(p4.clone())], vec![])); let r15 = prf.add_step(Justification(p("A | B"), RuleM::TruthFunctionalConsequence, vec![i(p4.clone())], vec![])); let r16 = prf.add_step(Justification(p("A & B"), RuleM::TruthFunctionalConsequence, vec![i(p4.clone())], vec![])); - let r17 = prf.add_step(Justification(p("B"), RuleM::TruthFunctionalConsequence, vec![i(p1), i(p4)], vec![])); (prf, vec![i(r1), i(r2), i(r3), i(r4), i(r6), i(r9), i(r11), i(r13), i(r15), i(r17)], vec![i(r5), i(r7), i(r8), i(r10), i(r12), i(r14), i(r16)]) diff --git a/aris/src/proofs/xml_interop.rs b/aris/src/proofs/xml_interop.rs index 692a970d..f9790e57 100644 --- a/aris/src/proofs/xml_interop.rs +++ b/aris/src/proofs/xml_interop.rs @@ -31,7 +31,7 @@ pub fn proof_from_xml(r: R) -> Result<(P, ProofMetaData), Str let s: &str = $x; match crate::parser::parse(&s) { Some(e) => e, - None if s == "" => Expr::Var { name: "__xml_interop_blank_line".into() }, + None if s == "" => Expr::Var { name: "".to_string() }, None => return Err(format!("Failed to parse {:?}, element stack {:?}", s, element_stack)), } }}; diff --git a/aris/src/rules.rs b/aris/src/rules.rs index 44c6734b..f5d60652 100644 --- a/aris/src/rules.rs +++ b/aris/src/rules.rs @@ -46,12 +46,12 @@ Each `check` implementation usually starts off with bringing the rules of the re Adding the tests and implementing the rule can be interleaved; it's convenient to debug the implementation by iterating on `cargo test -- test_your_rule_name`, possibly with `--nocapture` if you're println-debugging. ## Checklist for adding a new rule type -(e.g. for adding `PrepositionalInference` if it wasn't already there) +(e.g. for adding `PropositionalInference` if it wasn't already there) - Create the new enum, preferably right after all the existing ones - Add the new enum to the `Rule` type alias, inside the `SharedChecks` and `Coprod!` wrappers - Add a `RuleT` impl block for the new enum - if default metadata applies to all rules of the type, add those (e.g. `BooleanEquivalence`) - - if default metadata doesn't apply to all rules of the type, add an empty match block (e.g. `PrepositionalInference`) + - if default metadata doesn't apply to all rules of the type, add an empty match block (e.g. `PropositionalInference`) */ use crate::equivs; @@ -80,7 +80,7 @@ use strum_macros::*; #[allow(missing_docs)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum PrepositionalInference { +pub enum PropositionalInference { AndIntro, AndElim, OrIntro, @@ -235,7 +235,7 @@ pub struct EmptyRule; #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct SharedChecks(T); -pub type Rule = SharedChecks; +pub type Rule = SharedChecks; /// Conveniences for constructing rules of the appropriate type, primarily for testing. /// The non-standard naming conventions here are because a module is being used to pretend to be an enum. @@ -283,20 +283,20 @@ pub mod RuleM { // If the parens are omitted, $value:tt only captures SharedChecks, without the (...) // I haven't yet found a way to use macro_rules! to convert between expr and pat. declare_rules! { - [AndIntro, "CONJUNCTION", (SharedChecks(Inl(PrepositionalInference::AndIntro)))], - [AndElim, "SIMPLIFICATION", (SharedChecks(Inl(PrepositionalInference::AndElim)))], - [OrIntro, "ADDITION", (SharedChecks(Inl(PrepositionalInference::OrIntro)))], - [OrElim, "DISJUNCTIVE_ELIMINATION", (SharedChecks(Inl(PrepositionalInference::OrElim)))], - [ImpIntro, "CONDITIONAL_PROOF", (SharedChecks(Inl(PrepositionalInference::ImpIntro)))], - [ImpElim, "MODUS_PONENS", (SharedChecks(Inl(PrepositionalInference::ImpElim)))], - [NotIntro, "PROOF_BY_CONTRADICTION", (SharedChecks(Inl(PrepositionalInference::NotIntro)))], - [NotElim, "DOUBLENEGATION", (SharedChecks(Inl(PrepositionalInference::NotElim)))], - [ContradictionIntro, "CONTRADICTION", (SharedChecks(Inl(PrepositionalInference::ContradictionIntro)))], - [ContradictionElim, "PRINCIPLE_OF_EXPLOSION", (SharedChecks(Inl(PrepositionalInference::ContradictionElim)))], - [BiconditionalIntro, "BICONDITIONAL_INTRO", (SharedChecks(Inl(PrepositionalInference::BiconditionalIntro)))], - [BiconditionalElim, "BICONDITIONAL_ELIM", (SharedChecks(Inl(PrepositionalInference::BiconditionalElim)))], - [EquivalenceIntro, "EQUIVALENCE_INTRO", (SharedChecks(Inl(PrepositionalInference::EquivalenceIntro)))], - [EquivalenceElim, "EQUIVALENCE_ELIM", (SharedChecks(Inl(PrepositionalInference::EquivalenceElim)))], + [AndIntro, "CONJUNCTION", (SharedChecks(Inl(PropositionalInference::AndIntro)))], + [AndElim, "SIMPLIFICATION", (SharedChecks(Inl(PropositionalInference::AndElim)))], + [OrIntro, "ADDITION", (SharedChecks(Inl(PropositionalInference::OrIntro)))], + [OrElim, "DISJUNCTIVE_ELIMINATION", (SharedChecks(Inl(PropositionalInference::OrElim)))], + [ImpIntro, "CONDITIONAL_PROOF", (SharedChecks(Inl(PropositionalInference::ImpIntro)))], + [ImpElim, "MODUS_PONENS", (SharedChecks(Inl(PropositionalInference::ImpElim)))], + [NotIntro, "PROOF_BY_CONTRADICTION", (SharedChecks(Inl(PropositionalInference::NotIntro)))], + [NotElim, "DOUBLENEGATION", (SharedChecks(Inl(PropositionalInference::NotElim)))], + [ContradictionIntro, "CONTRADICTION", (SharedChecks(Inl(PropositionalInference::ContradictionIntro)))], + [ContradictionElim, "PRINCIPLE_OF_EXPLOSION", (SharedChecks(Inl(PropositionalInference::ContradictionElim)))], + [BiconditionalIntro, "BICONDITIONAL_INTRO", (SharedChecks(Inl(PropositionalInference::BiconditionalIntro)))], + [BiconditionalElim, "BICONDITIONAL_ELIM", (SharedChecks(Inl(PropositionalInference::BiconditionalElim)))], + [EquivalenceIntro, "EQUIVALENCE_INTRO", (SharedChecks(Inl(PropositionalInference::EquivalenceIntro)))], + [EquivalenceElim, "EQUIVALENCE_ELIM", (SharedChecks(Inl(PropositionalInference::EquivalenceElim)))], [ForallIntro, "UNIVERSAL_GENERALIZATION", (SharedChecks(Inr(Inl(PredicateInference::ForallIntro))))], [ForallElim, "UNIVERSAL_INSTANTIATION", (SharedChecks(Inr(Inl(PredicateInference::ForallElim))))], @@ -522,9 +522,9 @@ pub fn do_expressions_contradict(prem1: &Expr, prem2: &Expr) -> Result ) } -impl RuleT for PrepositionalInference { +impl RuleT for PropositionalInference { fn get_name(&self) -> String { - use PrepositionalInference::*; + use PropositionalInference::*; match self { AndIntro => "∧ Introduction", AndElim => "∧ Elimination", @@ -544,7 +544,7 @@ impl RuleT for PrepositionalInference { .into() } fn get_classifications(&self) -> HashSet { - use PrepositionalInference::*; + use PropositionalInference::*; use RuleClassification::*; let mut ret = HashSet::new(); match self { @@ -558,7 +558,7 @@ impl RuleT for PrepositionalInference { ret } fn num_deps(&self) -> Option { - use PrepositionalInference::*; + use PropositionalInference::*; match self { AndElim | OrIntro | OrElim | NotElim | ContradictionElim => Some(1), ContradictionIntro | ImpElim | BiconditionalElim | EquivalenceElim => Some(2), @@ -567,7 +567,7 @@ impl RuleT for PrepositionalInference { } } fn num_subdeps(&self) -> Option { - use PrepositionalInference::*; + use PropositionalInference::*; match self { NotIntro | ImpIntro => Some(1), AndElim | OrIntro | NotElim | ContradictionElim | ContradictionIntro | ImpElim | AndIntro | BiconditionalElim | EquivalenceElim => Some(0), @@ -577,8 +577,8 @@ impl RuleT for PrepositionalInference { #[allow(clippy::redundant_closure)] fn check(self, p: &P, conclusion: Expr, deps: Vec>, sdeps: Vec) -> Result<(), ProofCheckError, P::SubproofReference>> { - use PrepositionalInference::*; use ProofCheckError::*; + use PropositionalInference::*; match self { AndIntro => { if deps.len() == 1 { @@ -1696,7 +1696,7 @@ where premise = premise.sort_commutative_ops(restriction); conclusion_mut = conclusion_mut.sort_commutative_ops(restriction); } - let mut p = normalize_fn(premise.clone()); + let mut p = normalize_fn(premise); let mut q = normalize_fn(conclusion_mut); if commutative { p = p.sort_commutative_ops(restriction); @@ -1705,7 +1705,7 @@ where if p == q { Ok(()) } else { - Err(ProofCheckError::Other(format!("{p} and {q} are not equal. {:?}", premise))) + Err(ProofCheckError::Other(format!("{p} and {q} are not equal."))) } } diff --git a/auto-grader/Cargo.toml b/auto-grader/Cargo.toml index 95ef6edd..cdf7ad3d 100644 --- a/auto-grader/Cargo.toml +++ b/auto-grader/Cargo.toml @@ -3,6 +3,9 @@ name = "aris-auto-grader" version = "0.1.0" authors = ["Avi Weinstock "] edition = "2021" +description = "Aris is a formal logic proof-building tool designed to help users learn and practice logic." +repository = "https://github.com/Bram-Hub/aris" +license = "GPL-3.0" [dependencies] aris = { path = "../aris" } diff --git a/web-app/src/components/proof_widget/mod.rs b/web-app/src/components/proof_widget/mod.rs index dfb719c8..ff31e4b2 100644 --- a/web-app/src/components/proof_widget/mod.rs +++ b/web-app/src/components/proof_widget/mod.rs @@ -1,3 +1,8 @@ +/*!This file implements a Yew-based frontend component for editing, displaying, and managing natural deduction + * proofs in a structured UI. It provides mechanisms for creating, editing, and validating proofs interactively, + * with support for keyboard shortcuts, line dependency management, and UI-specific data handling. The file includes + * utility functions, enums, and a main component (ProofWidget) that manages the state and rendering of the proof editor. */ + mod actions; use crate::box_chars; @@ -37,6 +42,8 @@ use wasm_bindgen::JsCast; use js_sys::Math::random; +/// Retrieves the document object of the current web page. +/// This is used for DOM manipulation and event listeners. fn document() -> web_sys::Document { web_sys::window().expect_throw("window is undefined").document().expect_throw("document is undefined") } @@ -124,6 +131,8 @@ pub struct ProofWidgetProps { } impl ProofWidget { + /// Renders a checkbox for managing line dependencies. + /// This checkbox is interactive if the dependency is valid, otherwise it is disabled. fn render_line_num_dep_checkbox(&self, ctx: &Context, line: Option, proofref: Coprod!(PjRef

,

::SubproofReference)) -> Html { let line = match line { Some(line) => line.to_string(), @@ -231,6 +240,7 @@ impl ProofWidget { } } + /// Renders a UI widget for a justification line, including its dependencies and rule selector. fn render_justification_widget(&self, ctx: &Context, jref:

::JustificationReference) -> Html { let just = self.prf.lookup_justification_or_die(&jref).expect("proofref should exist in self.prf"); @@ -278,6 +288,9 @@ impl ProofWidget { } } + + /// Renders feedback for a specific proof line, such as correctness or errors. + /// Feedback includes messages for parse errors, valid premises, and rule violations. fn render_line_feedback(&self, proofref: PjRef

, is_subproof: bool) -> Html { use aris::parser::parse; let raw_line = match self.pud.ref_to_input.get(&proofref).and_then(|x| if !x.is_empty() { Some(x) } else { None }) { @@ -309,6 +322,8 @@ impl ProofWidget { } } } + + /// Renders a single proof line with all associated UI elements, including indentation, feedback, and actions. fn render_proof_line(&self, ctx: &Context, line: usize, depth: usize, proofref: PjRef

, edge_decoration: &str) -> Html { use Coproduct::{Inl, Inr}; let line_num_dep_checkbox = self.render_line_num_dep_checkbox(ctx, Some(line), Coproduct::inject(proofref)); @@ -437,6 +452,8 @@ impl ProofWidget { } } + /// Renders the entire proof structure as a hierarchical table. + /// Subproofs are displayed indented, with dependency management and line actions integrated. fn render_proof(&self, ctx: &Context, prf: &

::Subproof, sref: Option<

::SubproofReference>, line: &mut usize, depth: &mut usize) -> Html { // output has a bool tag to prune subproof spacers with, because VNode's PartialEq doesn't do the right thing let mut output: Vec<(Html, bool)> = Vec::new(); @@ -576,7 +593,9 @@ impl ProofWidget { } } -/// Is the user allowed to remove the line at `line_ref`? +/// Determines if the user is allowed to remove a line at `line_ref`. +/// Premises at the top level can only be removed if there are multiple top-level premises. +/// Steps can always be removed. fn may_remove_line(prf: &P, line_ref: &PjRef

) -> bool { use Coproduct::Inl; @@ -609,16 +628,14 @@ fn render_open_error(error: &str) -> Html { } } -/// Create a new empty premise, the default premise when creating a new one in -/// the UI. The `ProofUiData` is supposed to be modified so this appears blank. +/// Create a new empty premise, the default premise when creating a new one in the UI. fn new_empty_premise() -> Expr { - Expr::var("__js_ui_blank_premise") + Expr::var("") } /// Create a new empty step, the default step when creating a new one in the UI. -/// The `ProofUiData` is supposed to be modified so this appears blank. fn new_empty_step() -> Justification,

::SubproofReference> { - Justification(Expr::var("__js_ui_blank_step"), RuleM::EmptyRule, vec![], vec![]) + Justification(Expr::var(""), RuleM::EmptyRule, vec![], vec![]) } /// Create a new empty proof, the default proof shown in the UI @@ -637,6 +654,9 @@ fn new_empty_proof() -> (P, ProofUiData

) { impl Component for ProofWidget { type Message = ProofWidgetMsg; type Properties = ProofWidgetProps; + + /// Creates a new `ProofWidget` component. + /// Initializes the proof, UI data, and error handling based on the input properties. fn create(ctx: &Context) -> Self { ctx.props().oncreate.emit(ctx.link().clone()); let (prf, pud, error) = match &ctx.props().data { @@ -665,6 +685,9 @@ impl Component for ProofWidget { Component::update(&mut tmp, ctx, ProofWidgetMsg::Nop); tmp } + + /// Updates the `ProofWidget` state based on messages, such as line edits or rule changes. + /// This handles line actions, updates proof data, and re-renders the UI as needed. fn update(&mut self, ctx: &Context, msg: Self::Message) -> bool { let mut ret = false; if ctx.props().verbose { @@ -843,9 +866,15 @@ impl Component for ProofWidget { } ret } + + /// Handles property changes for the `ProofWidget` component. + /// Always triggers a re-render when properties change. fn changed(&mut self, _: &Context, _: &Self::Properties) -> bool { true } + + /// Renders the `ProofWidget` component. + /// Displays either the proof editor or an error message if the proof could not be loaded. fn view(&self, ctx: &Context) -> Html { let widget = match &self.open_error { Some(err) => render_open_error(err), @@ -863,6 +892,8 @@ impl Component for ProofWidget { } } + + /// Executes post-render logic, such as initializing Bootstrap submenus and popovers. fn rendered(&mut self, _: &Context, _: bool) { js_sys::eval("$('[data-submenu]').submenupicker(); $('[data-toggle=popover]').popover()").unwrap_throw(); } diff --git a/web-app/static/index.html b/web-app/static/index.html index a9761657..31a2f09c 100644 --- a/web-app/static/index.html +++ b/web-app/static/index.html @@ -1,57 +1,76 @@ - + - - + Aris - - - - - - - + + + + + + + - + - + - + - + - - - + diff --git a/web-app/static/proofImages_light/Strong Induction.png b/web-app/static/proofImages_light/Strong Induction.png index e1febab4..c21a355b 100644 Binary files a/web-app/static/proofImages_light/Strong Induction.png and b/web-app/static/proofImages_light/Strong Induction.png differ diff --git a/web-app/static/proofImages_light/Weak Induction.png b/web-app/static/proofImages_light/Weak Induction.png index 6bd99b09..ee0b0ff2 100644 Binary files a/web-app/static/proofImages_light/Weak Induction.png and b/web-app/static/proofImages_light/Weak Induction.png differ