diff --git a/Tests/test_NAL/test_NAL1.py b/Tests/test_NAL/test_NAL1.py index d3f3be4..c40a142 100644 --- a/Tests/test_NAL/test_NAL1.py +++ b/Tests/test_NAL/test_NAL1.py @@ -15,6 +15,8 @@ import Tests.utils_for_test as utils_for_test from Tests.utils_for_test import * +import logging + # utils_for_test.engine = RuleMap() class TEST_NAL1(unittest.TestCase): @@ -164,7 +166,13 @@ def test_exemplification(self): self.assertTrue( output_contains(tasks_derived, ' robin>. %1.00;0.45%') ) - + + # Set log level + logging.basicConfig() + log = logging.getLogger("LOG") + results = kanren.inference(task.sentence, belief.sentence) + log.warning(results) + pass diff --git a/Tests/utils_for_test.py b/Tests/utils_for_test.py index 76cc006..3dd9aca 100644 --- a/Tests/utils_for_test.py +++ b/Tests/utils_for_test.py @@ -17,7 +17,6 @@ nars = Reasoner(100, 100) engine: GeneralEngine = nars.inference - def process_two_premises(premise1: str, premise2: str, n_cycle: int) -> List[Task]: '''''' tasks_all_cycles = [] diff --git a/pynars/NARS/Control/Reasoner.py b/pynars/NARS/Control/Reasoner.py index 644bf93..9093719 100644 --- a/pynars/NARS/Control/Reasoner.py +++ b/pynars/NARS/Control/Reasoner.py @@ -9,7 +9,7 @@ from pynars.Narsese._py.Statement import Statement from pynars.Narsese._py.Task import Belief from ..DataStructures import Bag, Memory, NarseseChannel, Buffer, Task, Concept -from ..InferenceEngine import GeneralEngine, TemporalEngine, VariableEngine +from ..InferenceEngine import GeneralEngine, TemporalEngine, VariableEngine, KanrenEngine from pynars import Config from pynars.Config import Enable from typing import Callable, List, Tuple, Union @@ -24,7 +24,8 @@ def __init__(self, n_memory, capacity, config = './config.json', nal_rules={1,2, # print('''Init...''') Config.load(config) - self.inference = GeneralEngine(add_rules=nal_rules) + self.inference = KanrenEngine() + # self.inference = GeneralEngine(add_rules=nal_rules) self.variable_inference = VariableEngine(add_rules=nal_rules) self.temporal_inference = TemporalEngine(add_rules=nal_rules) # for temporal causal reasoning diff --git a/pynars/NARS/InferenceEngine/GeneralEngine/GeneralEngine.py b/pynars/NARS/InferenceEngine/GeneralEngine/GeneralEngine.py index 56eba53..d9cba62 100644 --- a/pynars/NARS/InferenceEngine/GeneralEngine/GeneralEngine.py +++ b/pynars/NARS/InferenceEngine/GeneralEngine/GeneralEngine.py @@ -375,6 +375,9 @@ def step(self, concept: Concept): term_belief = concept_target.term # if belief is None: continue + # if belief is not None: + # print(">>", task.term, belief.term) + # before matching and applying the rules, do variable-related processes (i.e. unification, and substitution/introduction/elimination) subst, elimn, intro = GeneralEngine.unify(task.term, belief.term if belief is not None else None, concept.term, task_link_valid, term_link) task_subst, task_elimn, task_intro = GeneralEngine.substitute(subst, elimn, intro, task) diff --git a/pynars/NARS/InferenceEngine/KanrenEngine/KanrenEngine.py b/pynars/NARS/InferenceEngine/KanrenEngine/KanrenEngine.py new file mode 100644 index 0000000..de61dba --- /dev/null +++ b/pynars/NARS/InferenceEngine/KanrenEngine/KanrenEngine.py @@ -0,0 +1,895 @@ +from kanren import run, eq, var +from kanren.constraints import neq, ConstrainedVar +from unification import unify, reify +from cons import cons, car, cdr + +from itertools import combinations + +from pynars import Narsese, Global +from pynars.Narsese import Term, Copula, Connector, Statement, Compound, Variable, VarPrefix, Sentence, Punctuation, Stamp + +from pynars.NAL.Functions import * +from pynars.NARS.DataStructures import Concept, Task, TaskLink, TermLink, Judgement, Question +from pynars.NAL.Functions.Tools import project_truth, revisible +from collections import defaultdict +from typing import List + +nal1 = ''' +{ P>. M>} |- P> .ded +{

M>. S>} |-

S> .ded' +{ P>. S>} |- P> .ind +{ P>. S>} |-

S> .ind' +{

M>. M>} |- P> .abd +{

M>. M>} |-

S> .abd' +{

M>. S>} |- P> .exe +{ P>. M>} |-

S> .exe' +''' + +nal2 = ''' +{ P>. M>} |- P> .res +{ P>. S>} |- P> .res +{

M>. M>} |- P> .res +{

M>. S>} |- P> .res +{ P>. S>} |- P> .com +{

M>. M>} |- P> .com' +{ P>. M>} |- P> .ana +{ P>. S>} |- P> .ana +{

M>. M>} |-

S> .ana +{

M>. S>} |-

S> .ana +{ P>. M>} |- P> .ana' +{

M>. M>} |- P> .ana' +{ P>. S>} |-

S> .ana' +{

M>. S>} |-

S> .ana' +''' + +nal3 = ''' +'composition +{ T1>. T2>} |- (&, T1, T2)> .int +{ M>. M>} |- <(|, T1, T2) --> M> .int +{ T1>. T2>} |- (|, T1, T2)> .uni +{ M>. M>} |- <(&, T1, T2) --> M> .uni +{ T1>. T2>} |- (-, T1, T2)> .dif +{ T1>. T2>} |- (-, T2, T1)> .dif' +{ M>. M>} |- <(~, T1, T2) --> M> .dif +{ M>. M>} |- <(~, T2, T1) --> M> .dif' +'decomposition +{(--, (&, T1, T2)>). T1>} |- (--, T2>) .ded +'TODO: need alternative syntax for decomposition +'i.e. {(--, (&, T1, T2)>). _T1>} |- (--, ((&, T1, T2) - _T1)>) .ded +{(--, (&, T2, T1)>). T1>} |- (--, T2>) .ded +{ (|, T1, T2)>. (--, T1>)} |- T2> .ded +{ (|, T2, T1)>. (--, T1>)} |- T2> .ded +{(--, (-, T1, T2)>). T1>} |- T2> .ded +{(--, (-, T2, T1)>). (--, T1>)} |- (--, T2>) .ded +{(--, <(|, T2, T1) --> M>). M>} |- (--, M>) .ded +{(--, <(|, T1, T2) --> M>). M>} |- (--, M>) .ded +{<(&, T2, T1) --> M>. (--, M>)} |- M> .ded +{<(&, T1, T2) --> M>. (--, M>)} |- M> .ded +{(--, <(~, T1, T2) --> M>). M>} |- M> .ded +{(--, <(~, T2, T1) --> M>). (--, M>)} |- (--, M>) .ded +{(--, (&&, T1, T2)). T1} |- (--, T2) .ded +{(--, (&&, T2, T1)). T1} |- (--, T2) .ded +{(||, T1, T2). (--, T1)} |- T2 .ded +{(||, T2, T1). (--, T1)} |- T2 .ded +''' + +nal5 = ''' +'conditional syllogistic +{ P>. S} |- P .ded +{

S>. S} |- P .abd +{S. P>} |- P .ana +{S.

S>} |- P .ana +{ P>. S} |- P .ana' +{

S>. S} |- P .ana' + +'conditional conjunctive +'(C ^ S) => P, S |- C => P (alternative syntax below) +{<(&&, C, S) ==> P>. _S} |- <((&&, C, S) - _S) ==> P> .ded +{<(&&, S, C) ==> P>. _S} |- <((&&, S, C) - _S) ==> P> .ded + +'(C ^ S) => P, M => S |- (C ^ M) => P (alternative syntax below) +{<(&&, C, S) ==> P>. _S>} |- <(&&, ((&&, C, S) - _S), M) ==> P> .ded +{<(&&, S, C) ==> P>. _S>} |- <(&&, ((&&, S, C) - _S), M) ==> P> .ded + +'(C ^ S) => P, C => P |- S (alternative syntax below) +{<(&&, C, S) ==> P>. <_C ==> P>} |- ((&&, C, S) - _C) .abd +{<(&&, S, C) ==> P>. <_C ==> P>} |- ((&&, S, C) - _C) .abd + +'(C ^ S) => P, (C ^ M) => P |- M => S (alternative syntax below) +{<(&&, C, S) ==> P>. <(&&, _C, M) ==> P>} |- <((&&, _C, M) - C) ==> ((&&, C, S) - _C)> .abd +{<(&&, S, C) ==> P>. <(&&, _C, M) ==> P>} |- <((&&, _C, M) - C) ==> ((&&, S, C) - _C)> .abd +{<(&&, C, S) ==> P>. <(&&, M, _C) ==> P>} |- <((&&, M, _C) - C) ==> ((&&, C, S) - _C)> .abd +{<(&&, S, C) ==> P>. <(&&, M, _C) ==> P>} |- <((&&, M, _C) - C) ==> ((&&, S, C) - _C)> .abd + +'{ P>. S} |- <(&&, C, S) ==> P> .ind (alternative syntax below) +{<(&&, C, M) ==> P>. (&&, S, M)} |- <(&&, C, S, M) ==> P> .ind +{<(&&, M, C) ==> P>. (&&, S, M)} |- <(&&, C, S, M) ==> P> .ind +{<(&&, C, M) ==> P>. (&&, M, S)} |- <(&&, C, S, M) ==> P> .ind +{<(&&, M, C) ==> P>. (&&, M, S)} |- <(&&, C, S, M) ==> P> .ind + +'(C ^ M) => P, M => S |- (C ^ S) => P (alternative syntax below) +{<(&&, C, M) ==> P>. <_M ==> S>} |- <(&&, ((&&, C, M) - _M), S) ==> P> .ind +{<(&&, M, C) ==> P>. <_M ==> S>} |- <(&&, ((&&, M, C) - _M), S) ==> P> .ind +''' + +immediate = ''' +S |- (--, S) .neg + P> |-

S> .cnv + P> |-

S> .cnv + P> |- <(--, P) ==> (--, S)> .cnt +''' + +conditional_compositional = ''' +{P. S} |- P> .ind +{P. S} |-

S> .abd +{P. S} |- P> .com +{T1. T2} |- (&&, T1, T2) .int +{T1. T2} |- (||, T1, T2) .uni +{ P>. S} |- <(&&, C, S) ==> P> .ind +''' + +theorems = ''' +'inheritance +<(T1 & T2) --> T1> + (T1 | T2)> +<(T1 - T2) --> T1> +<((/, R, _, T) * T) --> R> + ((\, R, _, T) * T)> + +'similarity +<(--, (--, T)) <-> T> +<(/, (T1 * T2), _, T2) <-> T1> +<(\, (T1 * T2), _, T2) <-> T1> + +'implication +< P> ==> P>> +< P> ==> P>> +<(&&, S1, S2) ==> S1> +<(&&, S1, S2) ==> S2> + (||, S1, S2)> + (||, S1, S2)> + +< P> ==> <(S | M) --> (P | M)>> +< P> ==> <(S & M) --> (P & M)>> +< P> ==> <(S | M) <-> (P | M)>> +< P> ==> <(S & M) <-> (P & M)>> +<

S> ==> <(S | M) <-> (P | M)>> +<

S> ==> <(S & M) <-> (P & M)>> + +< P> ==> <(S || M) ==> (P || M)>> +< P> ==> <(S && M) ==> (P && M)>> +< P> ==> <(S || M) <=> (P || M)>> +< P> ==> <(S && M) <=> (P && M)>> +<

S> ==> <(S || M) <=> (P || M)>> +<

S> ==> <(S && M) <=> (P && M)>> + +< P> ==> <(S - M) --> (P - M)>> +< P> ==> <(M - P) --> (M - S)>> +< P> ==> <(S ~ M) --> (P ~ M)>> +< P> ==> <(M ~ P) --> (M ~ S)>> + +< P> ==> <(S - M) <-> (P - M)>> +< P> ==> <(M - P) <-> (M - S)>> +< P> ==> <(S ~ M) <-> (P ~ M)>> +< P> ==> <(M ~ P) <-> (M ~ S)>> +<

S> ==> <(S - M) <-> (P - M)>> +<

S> ==> <(M - P) <-> (M - S)>> +<

S> ==> <(S ~ M) <-> (P ~ M)>> +<

S> ==> <(M ~ P) <-> (M ~ S)>> + +< (T1 - T2)> ==> (--, T2>)> +<<(T1 ~ T2) --> M> ==> (--, M>)> + +< P> ==> <(/, S, _, M) --> (/, P, _, M)>> +< P> ==> <(\, S, _, M) --> (\, P, _, M)>> +< P> ==> <(/, M, _, P) --> (/, M, _, S)>> +< P> ==> <(\, M, _, P) --> (\, M, _, S)>> + +'equivalence +< P> <=> (&&, P>,

S>)> +<

S> <=> (&&, P>,

S>)> +< P> <=> (&&, P>,

S>)> +<

S> <=> (&&, P>,

S>)> + +< P> <=> <{S} <-> {P}>> +<

S> <=> <{S} <-> {P}>> +< P> <=> <[S] <-> [P]>> +<

S> <=> <[S] <-> [P]>> + +< {P}> <=> {P}>> +<<[S] --> P> <=> <[S] <-> P>> + +<<(S1 * S2) --> (P1 * P2)> <=> (&&, P1>, P2>)> +<<(S1 * S2) <-> (P1 * P2)> <=> (&&, P1>, P2>)> +<<(P1 * P2) <-> (S1 * S2)> <=> (&&, P1>, P2>)> + +< P> <=> <(M * S) --> (M * P)>> +< P> <=> <(S * M) --> (P * M)>> +< P> <=> <(M * S) <-> (M * P)>> +< P> <=> <(S * M) <-> (P * M)>> +<

S> <=> <(M * S) <-> (M * P)>> +<

S> <=> <(S * M) <-> (P * M)>> + + +<<(T1 * T2) --> R> <=> (/, R, _, T2)>> +<<(T1 * T2) --> R> <=> (/, R, T1, _)>> +< (T1 * T2)> <=> <(\, R, _, T2) --> T1>> +< (T1 * T2)> <=> <(\, R, T1, _) --> T2>> + +< S3>> <=> <(S1 && S2) ==> S3>> + +<(--, (S1 && S2)) <=> (||, (--, S1), (--, S2))> +<(--, (S1 && S2)) <=> (&&, (--, S1), (--, S2))> +<(--, (S2 && S1)) <=> (||, (--, S1), (--, S2))> +<(--, (S2 && S1)) <=> (&&, (--, S1), (--, S2))> + +< S2> <=> <(--, S1) <=> (--, S2)>> +< S1> <=> <(--, S1) <=> (--, S2)>> + +'not in the NAL book but a nice to have +< (/, R, _, T2)> <=> (/, R, T1, _)>> +''' + +def split_rules(rules: str) -> List[str]: + lines = [] + for line in rules.splitlines(): + if len(line) and not (line.startswith("'") or line.startswith("#")): + lines.append(line) + return lines + +def parse(narsese: str, rule=False): + task = Narsese.parser.parse(narsese) + return task.term if rule else task.sentence + +# used in converting from logic to Narsese +_vars_all = defaultdict(lambda: len(_vars_all)) + +class KanrenEngine: + + _truth_functions = { + 'ded': Truth_deduction, + 'ana': Truth_analogy, + 'res': Truth_resemblance, + 'abd': Truth_abduction, + 'ind': Truth_induction, + 'exe': Truth_exemplification, + 'com': Truth_comparison, + 'int': Truth_intersection, + 'uni': Truth_union, + 'dif': Truth_difference, + + 'neg': Truth_negation, + 'cnv': Truth_conversion, + 'cnt': Truth_contraposition + } + + def __init__(self): + + nal1_rules = split_rules(nal1) + nal2_rules = split_rules(nal2) + nal3_rules = split_rules(nal3) + + nal5_rules = split_rules(nal5) + + # NAL5 includes higher order variants of NAL1-3 rules + for rule in (nal1_rules + nal2_rules): + # replace --> with ==> in NAL1 & NAL2 + rule = rule.replace('-->', '==>') + # replace <-> with <=> in NAL2 + rule = rule.replace('<->', '<=>') + + nal5_rules.append(rule) + + for rule in nal3_rules: + # replace --> with ==> in NAL3 (except difference) + if '(-,' not in rule and '(~,' not in rule: + rule = rule.replace('-->', '==>') + + # replace | with || in NAL3 (except difference) + if '||' not in rule: + parts = rule.split(' |- ') + parts = (part.replace('|', '||') for part in parts) + rule = ' |- '.join(parts) + + # replace & with && in NAL3 (except difference) + if '&&' not in rule: + rule = rule.replace('&', '&&') + + nal5_rules.append(rule) + + rules = nal1_rules + nal2_rules + nal3_rules + nal5_rules + + self._variables = set() # used as scratchpad while converting + + self.rules_strong = [] # populated by the line below for use in structural inference + + self.rules_syllogistic = [self._convert(r) for r in rules] + + self.rules_immediate = [self._convert_immediate(r) for r in split_rules(immediate)] + + self.rules_conditional_compositional = [self._convert(r, True) for r in split_rules(conditional_compositional)] + + self.theorems = [self._convert_theorems(t) for t in split_rules(theorems)] + + + ################################################# + ### Conversion between Narsese and miniKanren ### + ################################################# + + def _convert(self, rule, conditional_compositional=False): + # convert to logical form + premises, conclusion = rule.split(" |- ") + + p1, p2 = premises.strip("{}").split(". ") + conclusion = conclusion.split(" .") + c = conclusion[0] + r = conclusion[1] + + # TODO: can we parse statements instead? + p1 = parse(p1+'.', True) + p2 = parse(p2+'.', True) + c = parse(c+'.', True) + + self._variables.clear() # clear scratchpad + + p1 = self.logic(p1, True) + p2 = self.logic(p2, True) + c = self.logic(c, True) + + var_combinations = list(combinations(self._variables, 2)) + # filter out combinations like (_C, C) allowing them to be the same + cond = lambda x, y: x.token.replace('_', '') != y.token.replace('_', '') + constraints = [neq(c[0], c[1]) for c in var_combinations if cond(c[0], c[1])] + + if not conditional_compositional: # conditional compositional rules require special treatment + if r.replace("'", '') in ['ded', 'ana', 'res', 'int', 'uni', 'dif']: + self.rules_strong.append(((p1, p2, c), (r, constraints))) + + return ((p1, p2, c), (r, constraints)) + + def _convert_immediate(self, rule): + # convert to logical form + premise, conclusion = rule.split(" |- ") + conclusion = conclusion.split(" .") + c = conclusion[0] + r = conclusion[1] + + # TODO: can we parse statements instead? + p = parse(premise+'.', True) + c = parse(c+'.', True) + + self._variables.clear() # clear scratchpad + + p = self.logic(p, True) + c = self.logic(c, True) + + var_combinations = list(combinations(self._variables, 2)) + # filter out combinations like (_C, C) allowing them to be the same + cond = lambda x, y: x.token.replace('_', '') != y.token.replace('_', '') + constraints = [neq(c[0], c[1]) for c in var_combinations if cond(c[0], c[1])] + + return ((p, c), (r, constraints)) + + def _convert_theorems(self, theorem): + # TODO: can we parse statements instead? + t = parse(theorem+'.', True) + l = self.logic(t, True, True, prefix='_theorem_') + sub_terms = set(t.sub_terms) + return (l, sub_terms) + + def logic(self, term: Term, rule=False, substitution=False, var_intro=False, structural=False, prefix='_rule_'): + if term.is_atom: + name = prefix+term.word if rule else term.word + if type(term) is Variable: + vname = term.word + term.name + name = prefix+vname if rule else vname + if rule and not substitution: # collect rule variable names + self._variables.add(var(name)) + return var(name) if not structural else term + if rule and not substitution: # collect rule variable names + self._variables.add(var(name)) + return var(name) if rule else term + if term.is_statement: + return cons(term.copula, *[self.logic(t, rule, substitution, var_intro, structural, prefix) for t in term.terms]) + if term.is_compound: + # when used in variable introduction, treat single component compounds as atoms + if rule and var_intro and len(term.terms) == 1 \ + and term.connector is Connector.ExtensionalSet \ + or term.connector is Connector.IntensionalSet: + name = prefix+term.word + return var(name) + + # extensional and intensional images are not composable + if term.connector is Connector.ExtensionalImage \ + or term.connector is Connector.IntensionalImage: + return cons(term.connector, *[self.logic(t, rule, substitution, var_intro, structural, prefix) for t in term.terms]) + + terms = list(term.terms) + multi = [] + while len(terms) > 2: + t = terms.pop(0) + multi.append(self.logic(t, rule, substitution, var_intro, structural, prefix)) + multi.append(term.connector) + multi.extend(self.logic(t, rule, substitution, var_intro, structural, prefix) for t in terms) + + return cons(term.connector, *multi) + + def term(self, logic, root=True): + # additional variable handling + if root: _vars_all.clear() + def create_var(name, prefix: VarPrefix): + _vars_all[name] + var = Variable(prefix, name) + idx = _vars_all[name] + if prefix is VarPrefix.Independent: + var._vars_independent.add(idx, []) + if prefix is VarPrefix.Dependent: + var._vars_dependent.add(idx, []) + if prefix is VarPrefix.Query: + var._vars_query.add(idx, []) + return var + + if type(logic) is Term: + return logic + if type(logic) is Variable: + return logic + if type(logic) is var or type(logic) is ConstrainedVar: + name = logic.token.replace('_rule_', '').replace('_theorem_', '') + if name[0] == '$': + return create_var(name[1:], VarPrefix.Independent) + if name[0] == '#': + return create_var(name[1:], VarPrefix.Dependent) + if name[0] == '?': + return create_var(name[1:], VarPrefix.Query) + else: + return Term(name) + if type(logic) is cons: + if type(car(logic)) is Copula: + sub = car(cdr(logic)) + cop = car(logic) + pre = cdr(cdr(logic)) + return Statement(self.term(sub, False), cop, self.term(pre, False)) + if type(car(logic)) is Connector: + con = car(logic) + t = cdr(logic) + is_list = type(t) is cons and not (type(car(t)) is Copula or type(car(t)) is Connector) + terms = self.to_list(cdr(logic)) if is_list else [self.term(t, False)] + return Compound(con, *terms) + return logic # cons + + def to_list(self, pair) -> list: + l = [self.term(car(pair), False)] + if type(cdr(pair)) is list and cdr(pair) == []: + () # empty TODO: there's gotta be a better way to check + elif type(cdr(pair)) is cons: + t = self.term(cdr(pair), False) + if type(t) is cons: + l.extend(self.to_list(t)) # recurse + else: + l.append(t) + else: + l.append(self.term(cdr(pair), False)) # atom + return l + + ################################################# + ### quick and dirty example of applying diff #### + ################################################# + + def _diff(self, c): + # TODO: room for improvement + difference = -1 # result of applying diff + + def calculate_difference(l: Term, r: Term): + return (l - r) if l.contains(r) and not l.equal(r) else None + + def do_diff(t: Term): + nonlocal difference + if len(t.terms.terms) == 2: + components = t.terms.terms + difference = calculate_difference(*components) + + + # COMPOUND + if type(c) is Compound and c.connector is Connector.ExtensionalDifference: + if len(c.terms.terms) == 2: + return calculate_difference(*c.terms.terms) + + # STATEMENT + elif type(c) is Statement and c.copula is Copula.Implication: + # check subject + subject = c.subject + if subject.is_compound: + if subject.connector is Connector.ExtensionalDifference: + do_diff(c.subject) + if difference is not None and difference != -1: + subject = difference + + # check for nested difference + elif subject.connector is Connector.Conjunction: + if len(subject.terms.terms) == 2: + components = subject.terms.terms + if components[0].is_compound: + if components[0].connector is Connector.ExtensionalDifference: + do_diff(components[0]) + # if components[0].terms.terms[0] == components[1]: + # difference = None + if difference is not None: + subject = Compound(subject.connector, difference, components[1]) + + # check predicate + predicate = c.predicate + if predicate.is_compound and difference is not None and difference != -1: # already failed one check + if predicate.connector is Connector.ExtensionalDifference: + do_diff(predicate) + if difference is not None: + predicate = difference + + # check for success + if difference == None or difference == -1: + return difference + else: + return Statement(subject, c.copula, predicate) + + return -1 # no difference was applied + + # UNIFICATION + + def _variable_elimination(self, t1: Term, t2: Term) -> list: + unified = filter(None, (unify(self.logic(t), self.logic(t2, True, True)) for t in t1.terms)) + substitution = [] + for u in unified: + d = {k: v for k, v in u.items() if type(self.term(k)) is Variable} + if len(d): + substitution.append(d) + result = [] + for s in substitution: + reified = reify(self.logic(t1), s) + result.append(self.term(reified)) + + return result + + ################################################# + + # INFERENCE + + def step(self, concept: Concept): + '''One step inference.''' + tasks_derived = [] + + Global.States.record_concept(concept) + + # Based on the selected concept, take out a task and a belief for further inference. + task_link: TaskLink = concept.task_links.take(remove=True) + + if task_link is None: + return tasks_derived + + concept.task_links.put_back(task_link) + + task: Task = task_link.target + + # inference for single-premise rules + if task.is_judgement and not task.immediate_structural_rules_applied: # TODO: handle other cases + Global.States.record_premises(task) + + results = [] + + results.extend(self.inference_immediate(task.sentence)) + results.extend(self.inference_structural(task.sentence)) + + for term, truth in results: + # TODO: how to properly handle stamp for immediate rules? + stamp_task: Stamp = task.stamp + + if task.is_judgement: # TODO: hadle other cases + # TODO: calculate budget + budget = Budget_forward(truth, task_link.budget, None) + sentence_derived = Judgement(term[0], stamp_task, truth) + task_derived = Task(sentence_derived, budget) + # set flag to prevent repeated processing + task_derived.immediate_structural_rules_applied = True + # normalize the variable indices + task_derived.term._normalize_variables() + tasks_derived.append(task_derived) + + # record immediate rule application for task + task.immediate_structural_rules_applied = True + + # inference for two-premises rules + term_links = [] + term_link_valid = None + is_valid = False + + for _ in range(len(concept.term_links)): # TODO: should limit max number of links to process + # To find a belief, which is valid to interact with the task, by iterating over the term-links. + term_link: TermLink = concept.term_links.take(remove=True) + term_links.append(term_link) + + if not task_link.novel(term_link, Global.time): + continue + + concept_target: Concept = term_link.target + belief = concept_target.get_belief() # TODO: consider all beliefs. + + if belief is None: + continue + + if task == belief: + # if task.sentence.punct == belief.sentence.punct: + # is_revision = revisible(task, belief) + continue + # TODO: currently causes infinite recursion with variables + # elif task.term.equal(belief.term): + # # TODO: here + # continue + elif not belief.evidential_base.is_overlaped(task.evidential_base): + term_link_valid = term_link + is_valid = True + break + + if is_valid \ + and task.is_judgement: # TODO: handle other cases + + Global.States.record_premises(task, belief) + + # Temporal Projection and Eternalization + if belief is not None: + # TODO: Handle the backward inference. + if not belief.is_eternal and (belief.is_judgement or belief.is_goal): + truth_belief = project_truth(task.sentence, belief.sentence) + belief = belief.eternalize(truth_belief) + # beleif_eternalized = belief # TODO: should it be added into the `tasks_derived`? + + results = self.inference(task.sentence, belief.sentence) + + results.extend(self.inference_compositional(task.sentence, belief.sentence)) + + # print(">>>", results) + + for term, truth in results: + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + stamp = Stamp_merge(stamp_task, stamp_belief) + + # TODO: calculate budget + budget = Budget_forward(truth, task_link.budget, term_link_valid.budget) + sentence_derived = Judgement(term[0], stamp, truth) + + task_derived = Task(sentence_derived, budget) + # normalize the variable indices + task_derived.term._normalize_variables() + tasks_derived.append(task_derived) + + if term_link is not None: # TODO: Check here whether the budget updating is the same as OpenNARS 3.0.4. + for task in tasks_derived: + TermLink.update_budget(term_link.budget, task.budget.quality, belief.budget.priority if belief is not None else concept_target.budget.priority) + + for term_link in term_links: + concept.term_links.put_back(term_link) + + return list(filter(lambda t: t.truth.c > 0, tasks_derived)) + + def inference(self, t1: Sentence, t2: Sentence) -> list: + results = [] + + t1e = self._variable_elimination(t1.term, t2.term) + t2e = self._variable_elimination(t2.term, t1.term) + + # TODO: what about other possibilities? + t1t = t1e[0] if len(t1e) else t1.term + t2t = t2e[0] if len(t2e) else t2.term + + l1 = self.logic(t1t) + l2 = self.logic(t2t) + for rule in self.rules_syllogistic: + res = self.apply(rule, l1, l2) + if res is not None: + r, _ = rule[1] + inverse = True if r[-1] == "'" else False + r = r.replace("'", '') # remove trailing ' + tr1, tr2 = (t1.truth, t2.truth) if not inverse else (t2.truth, t1.truth) + truth = self._truth_functions[r](tr1, tr2) + results.append((res, truth)) + + return results + + def apply(self, rule, l1, l2): + # print("\nRULE:", rule) + (p1, p2, c), (r, constraints) = rule[0], rule[1] + + result = run(1, c, eq((p1, p2), (l1, l2)), *constraints) + # print(result) + + if result: + conclusion = self.term(result[0]) + # print(conclusion) + # apply diff connector + difference = self._diff(conclusion) + if difference == None: + # print("Rule application failed.") + return None + elif difference == -1: + # print(conclusion) # no diff application + return (conclusion, r) + else: + # print(difference) # diff applied successfully + return (difference, r) + else: + # print("Rule application failed.") + return None + + def inference_immediate(self, t: Sentence): + results = [] + + l = self.logic(t.term) + for rule in self.rules_immediate: + (p, c), (r, constraints) = rule[0], rule[1] + + result = run(1, c, eq(p, l), *constraints) + # print(result) + + if result: + conclusion = self.term(result[0]) + # print(conclusion) + truth = self._truth_functions[r](t.truth) + results.append(((conclusion, r), truth)) + + return results + + def inference_structural(self, t: Sentence): + results = [] + + l1 = self.logic(t.term, structural=True) + for (l2, sub_terms) in self.theorems: + for rule in self.rules_strong: + res = self.apply(rule, l1, l2) + if res is not None: + # ensure no theorem terms in conclusion + if sub_terms.isdisjoint(res[0].sub_terms): + r, _ = rule[1] + inverse = True if r[-1] == "'" else False + r = r.replace("'", '') # remove trailing ' + tr1, tr2 = (t.truth, truth_analytic) if not inverse else (truth_analytic, t.truth) + truth = self._truth_functions[r](tr1, tr2) + results.append((res, truth)) + + return results + + '''variable introduction''' + def inference_compositional(self, t1: Sentence, t2: Sentence): + results = [] + + common = set(t1.term.sub_terms).intersection(t2.term.sub_terms) + + if len(common) == 0: + return results + + l1 = self.logic(t1.term) + l2 = self.logic(t2.term) + for rule in self.rules_conditional_compositional: + res = self.apply(rule, l1, l2) + if res is not None: + prefix = '$' if res[0].is_statement else '#' + substitution = {self.logic(c, True, var_intro=True): var(prefix=prefix) for c in common} + reified = reify(self.logic(res[0], True, var_intro=True), substitution) + + conclusion = self.term(reified) + + r, _ = rule[1] + tr1, tr2 = (t1.truth, t2.truth) + truth = self._truth_functions[r](tr1, tr2) + + results.append(((conclusion, r), truth)) + + return results + + +################################################# +### EXAMPLES ### + +# engine = KanrenEngine() + +# from time import time + +# j1 = parse(' (&, animal, [flying])>.') + +# t = time() +# print( +# engine.inference_structural(j1) +# ) +# print(time() - t) + +# print("\n\n") + +# t1 = parse('robin>. %1.000;0.474%') +# t2 = parse('animal>. %1.000;0.900%') +# print(engine.inference_compositional(t1, t2)) + +# print("\n") + +# exit() +''' +# CONDITIONAL + +t1 = parse('<(&&, A, B, C, D) ==> Z>.') + +t2 = parse('B.') # positive example +print(engine.inference(t1, t2)) + +t2 = parse('U.') # negative example +print(engine.inference(t1, t2)) + +t2 = parse('(&&, B, C).') # complex example +print(engine.inference(t1, t2)) + +print('\n--NAL 5--') + +t2 = parse(' B>.') +print(engine.inference(t1, t2)) + +t2 = parse(' Z>.') +# print(engine.inference(t1, t2)) +for r in engine.inference(t1, t2): + print(r) + +t2 = parse(' B>.') +print(engine.inference(t1, t2)) + +print('\n----DEDUCTION') + +import time +def timeit(): + t = time.time() + engine.inference(t1, t2) + t = time.time() - t + print(len(engine.rules), 'rules processed in', t, 'seconds') + +# DEDUCTION + +t1 = parse(' animal>.') +t2 = parse(' bird>.') +print(engine.inference(t1, t2)) +timeit() + +print("\n\n----VARIABLE SUBSTITUTION") + +# CONDITIONAL SYLLOGISTIC + +print('\n--nal6.7') +t1 = parse('<<$x --> bird> ==> <$x --> animal>>.') +t2 = parse(' bird>.') +print(engine.inference(t1, t2)) +timeit() + +print('\n--nal6.8') +t1 = parse('<<$x --> bird> ==> <$x --> animal>>.') +t2 = parse(' animal>.') +print(engine.inference(t1, t2)) +timeit() + +print('\n--nal6.12') + +t1 = parse('<(&&,<$x --> flyer>,<$x --> [chirping]>, <(*, $x, worms) --> food>) ==> <$x --> bird>>.') +t2 = parse('<{Tweety} --> flyer>.') +print(engine.inference(t1, t2)) +timeit() + + +# THEOREMS + +print('\n\n----THEOREMS') + +theorem = parse('<<$S <-> $P> ==> <$S --> $P>>.', False) + +t1 = parse(' pet>.', False) + +# t2 = engine._variable_elimination(theorem, t1)[0] + +# from pynars.Narsese import Base +# from pynars import Global + +# t1 = Sentence(t1, Punctuation.Judgement, Stamp(Global.time, Global.time, None, Base((Global.get_input_id(),)), is_external=False)) +# t2 = Sentence(t2, Punctuation.Judgement, Stamp(Global.time, Global.time, None, Base((Global.get_input_id(),)), is_external=False)) +# print(t1, t2) +print(engine.inference(theorem, t1)) +''' \ No newline at end of file diff --git a/pynars/NARS/InferenceEngine/Logic.py b/pynars/NARS/InferenceEngine/KanrenEngine/Logic.py similarity index 83% rename from pynars/NARS/InferenceEngine/Logic.py rename to pynars/NARS/InferenceEngine/KanrenEngine/Logic.py index 6765ca4..9c7fbba 100644 --- a/pynars/NARS/InferenceEngine/Logic.py +++ b/pynars/NARS/InferenceEngine/KanrenEngine/Logic.py @@ -12,13 +12,9 @@ # print(p1, p2, c) # ['M', '-->', 'P'] ['S', '-->', 'M'] ['S', '-->', 'P'] -_vars = {} # mappings of terms to vars - def replace_terms(t: str): if t.isalpha(): - if not t in _vars: - _vars[t] = var() - return _vars[t] + return var(t) else: return t @@ -26,7 +22,7 @@ def replace_terms(t: str): p2 = list(map(replace_terms, p2)) c = list(map(replace_terms, c)) -# print(p1, p2, c) # [~_1, '-->', ~_2] [~_3, '-->', ~_1] [~_3, '-->', ~_2] +# print(p1, p2, c) # [~M, '-->', ~P] [~S, '-->', ~M] [~S, '-->', ~P] t1, t2 = "bird --> animal", "robin --> bird" t1, t2 = t1.split(), t2.split() # ["bird", "-->", "animal"], ["robin", "-->", "bird"] diff --git a/pynars/NARS/InferenceEngine/KanrenEngine/NAL_rules.txt b/pynars/NARS/InferenceEngine/KanrenEngine/NAL_rules.txt new file mode 100644 index 0000000..e13686a --- /dev/null +++ b/pynars/NARS/InferenceEngine/KanrenEngine/NAL_rules.txt @@ -0,0 +1,130 @@ +NAL-1 +{ P>. M>} |- P> .ded +{

M>. S>} |-

S> .ded' +{ P>. S>} |- P> .ind +{ P>. S>} |-

S> .ind' +{

M>. M>} |- P> .abd +{

M>. M>} |-

S> .abd' +{

M>. S>} |- P> .exe +{ P>. M>} |-

S> .exe' + +NAL2 +{ P>. M>} |- P> .res +{ P>. S>} |- P> .res +{

M>. M>} |- P> .res +{

M>. S>} |- P> .res +{ P>. S>} |- P> .com +{

M>. M>} |- P> .com' +{ P>. M>} |- P> .ana +{ P>. S>} |- P> .ana +{

M>. M>} |-

S> .ana +{

M>. S>} |-

S> .ana +{ P>. M>} |- P> .ana' +{

M>. M>} |- P> .ana' +{ P>. S>} |-

S> .ana' +{

M>. S>} |-

S> .ana' + +NAL3 +'composition +{ T1>. T2>} |- (&, T1, T2)> .int +{ M>. M>} |- <(|, T1, T2) --> M> .int +{ T1>. T2>} |- (|, T1, T2)> .uni +{ M>. M>} |- <(&, T1, T2) --> M> .uni +{ T1>. T2>} |- (-, T1, T2)> .dif +{ T1>. T2>} |- (-, T2, T1)> .dif' +{ M>. M>} |- <(~, T1, T2) --> M> .dif +{ M>. M>} |- <(~, T2, T1) --> M> .dif' +'decomposition +{(--, (&, T1, T2)>). T1>} |- (--, T2>) .ded +{ (|, T1, T2)>. (--, T1>)} |- T2> .ded +{(--, (-, T1, T2)>). T1>} |- T2> .ded +{(--, (-, T2, T1)>). (--, T1>)} |- (--, T2>) .ded +{(--, <(|, T2, T1) --> M>). M>} |- (--, M>) .ded +{<(&, T2, T1) --> M>. (--, M>)} |- M> .ded +{(--, <(~, T1, T2) --> M>). M>} |- M> .ded +{(--, <(~, T2, T1) --> M>). (--, M>)} |- (--, M>) .ded +{(--, (&&, T1, T2)). T1} |- (--, T2) .ded +{(||, T1, T2). (--, T1)} |- T2 .ded + +NAL4 +no new rules + +NAL5 +replace --> with ==> in NAL1 & NAL2 +replace <-> with <=> in NAL2 +replace --> with ==> in NAL3 (except difference) +replace | with || in NAL3 (except difference) +replace & with && in NAL3 (except difference) + +'conditional syllogistic +{ P>. S} |- P .ded +{

S>. S} |- P .abd +{S. P>} |- P .ana +{S.

S>} |- P .ana +{ P>. S} |- P .ana’ +{

S>. S} |- P .ana’ + +'conditional conjunctive +'(C ^ S) => P, S |- C => P (alternative syntax below) +{<(&&, C, S) ==> P>. _S} |- <((&&, C, S) - _S) ==> P> .ded + +'(C ^ S) => P, M => S |- (C ^ M) => P (alternative syntax below) +{<(&&, C, S) ==> P>. _S>} |- <(&&, ((&&, C, S) - _S), M) ==> P> .ded + +'(C ^ S) => P, C => P |- S (alternative syntax below) +{<(&&, C, S) ==> P>. <_C ==> P>} |- ((&&, C, S) - _C) .abd + +'(C ^ S) => P, (C ^ M) => P |- M => S (alternative syntax below) +{<(&&, C, S) ==> P>. <(&&, _C, M) ==> P>} |- <((&&, _C, M) - C) ==> ((&&, C, S) - _C)> .abd + +'{ P>. S} |- <(&&, C, S) ==> P> .ind (alternative syntax below) +{<(&&, C, M) ==> P>. (&&, S, M)} |- <(&&, C, S, M) ==> P> .ind + +'(C ^ M) => P, M => S |- (C ^ S) => P (alternative syntax below) +{<(&&, C, M) ==> P>. <_M ==> S>} |- <(&&, ((&&, C, M) - _M), S) ==> P> .ind + + +NAL6 +variable elimination already works with miniKanren, are explicit rules needed? +? - are there additional formal rules for variable introduction besides (Table 10.6)? +? - can these be handled by a combination of conditional compositional rules and substitution? + +NAL7 +no new rules +. - atemporal versions of copulas and connectors can be used +. - if the temporal order of events in the conclusion can be decided +. - a temporal conclusion is formed from the atemporal one + +NAL8 +no new rules + +NAL9 +no new rules + + + +######### + +QUESTIONS + +######### + +- When should we apply immediate rules? +(conversion, negation, contraposition) + +- When to apply decomposition rules? +(always or only in certain conditions like with temporal compositional) + +- Are there higher order composition rules for .dif? + +- Is any arbitrary S valid for this rule? +{ P>. S} |- <(&&, C, S) ==> P> .ind + +- Should these be handled outside of normal flow? +- Can these also be the variable introduction rules (substituting {M/#x})? +'conditional compositional i.e. {P, S} |- S => P, {P, S} |- (P ^ S) + +- Theorems can be handled explicitly using miniKanren +should they be applied at the same time as immediate rules? + + diff --git a/pynars/NARS/InferenceEngine/KanrenEngine/__init__.py b/pynars/NARS/InferenceEngine/KanrenEngine/__init__.py new file mode 100644 index 0000000..158bae2 --- /dev/null +++ b/pynars/NARS/InferenceEngine/KanrenEngine/__init__.py @@ -0,0 +1 @@ +from .KanrenEngine import KanrenEngine \ No newline at end of file diff --git a/pynars/NARS/InferenceEngine/__init__.py b/pynars/NARS/InferenceEngine/__init__.py index e31806b..939ef9a 100644 --- a/pynars/NARS/InferenceEngine/__init__.py +++ b/pynars/NARS/InferenceEngine/__init__.py @@ -1,3 +1,4 @@ from .GeneralEngine import GeneralEngine from .TemporalEngine import TemporalEngine -from .VariableEngine import VariableEngine \ No newline at end of file +from .VariableEngine import VariableEngine +from .KanrenEngine import KanrenEngine \ No newline at end of file diff --git a/pynars/Narsese/_py/Task.py b/pynars/Narsese/_py/Task.py index 7e48e64..0fe75ca 100644 --- a/pynars/Narsese/_py/Task.py +++ b/pynars/Narsese/_py/Task.py @@ -9,6 +9,7 @@ class Task(Item): input_id = -1 best_solution: 'Task' = None + immediate_structural_rules_applied = False processed = False def __init__(self, sentence: Sentence, budget: Budget=None, input_id: int=None) -> None: