diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..28488d3 --- /dev/null +++ b/.gitignore @@ -0,0 +1,118 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class +Tests/*.png +*.pkl +./utils/SparseLUT + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 0000000..95b4ea8 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,224 @@ +{ + // 使用 IntelliSense 了解相关属性。 + // 悬停以查看现有属性的描述。 + // 欲了解更多信息,请访问: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Python: Pong", + "type": "python", + "request": "launch", + "module": "RL.Pong.main" + }, + { + "name": "Python: test_nal9", + "type": "python", + "request": "launch", + "module": "Tests.test_NAL.test_NAL9" + }, + { + "name": "Python: test_nal8", + "type": "python", + "request": "launch", + "module": "Tests.test_NAL.test_NAL8" + }, + { + "name": "Python: test_nal7", + "type": "python", + "request": "launch", + "module": "Tests.test_NAL.test_NAL7" + }, + { + "name": "Python: test_nal6", + "type": "python", + "request": "launch", + "module": "Tests.test_NAL.test_NAL6" + }, + { + "name": "Python: test_nal5", + "type": "python", + "request": "launch", + "module": "Tests.test_NAL.test_NAL5" + }, + { + "name": "Python: test_nal4", + "type": "python", + "request": "launch", + "module": "Tests.test_NAL.test_NAL4" + }, + { + "name": "Python: test_nal3", + "type": "python", + "request": "launch", + "module": "Tests.test_NAL.test_NAL3" + }, + { + "name": "Python: test_nal2", + "type": "python", + "request": "launch", + "module": "Tests.test_NAL.test_NAL2" + }, + { + "name": "Python: test_nal1", + "type": "python", + "request": "launch", + "module": "Tests.test_NAL.test_NAL1" + }, + { + "name": "Python: test_rule_lut", + "type": "python", + "request": "launch", + "module": "Tests.test_RuleMap.test_RuleLUT" + }, + { + "name": "Python: test_term_equal", + "type": "python", + "request": "launch", + "module": "Tests.test_Term_Equal" + }, + { + "name": "Python: test_terms", + "type": "python", + "request": "launch", + "module": "Tests.test_variable.test_Terms" + }, + { + "name": "Python: test_substitution", + "type": "python", + "request": "launch", + "module": "Tests.test_variable.test_Substitution" + }, + { + "name": "Python: test_variable", + "type": "python", + "request": "launch", + "module": "Tests.test_Variable" + }, + { + "name": "Python: test_compound", + "type": "python", + "request": "launch", + "module": "Tests.test_Compound" + }, + { + "name": "Python: test_inference_engine", + "type": "python", + "request": "launch", + "module": "Tests.test_InferenceEngine" + }, + { + "name": "Python: test_link", + "type": "python", + "request": "launch", + "module": "Tests.test_Link" + }, + { + "name": "Python: test_table", + "type": "python", + "request": "launch", + "module": "Tests.test_Table" + }, + { + "name": "Python: test_memory", + "type": "python", + "request": "launch", + "module": "Tests.test_Memory" + }, + { + "name": "Python: test_evidential_base", + "type": "python", + "request": "launch", + "module": "Tests.test_EvidentialBase" + }, + { + "name": "Python: test_rulemap", + "type": "python", + "request": "launch", + "module": "Tests.test_RuleMap" + }, + { + "name": "Python: test_rulemap_v2", + "type": "python", + "request": "launch", + "module": "Tests.test_RuleMap.test_RuleMap_v2" + }, + { + "name": "Python: test_examples", + "type": "python", + "request": "launch", + "module": "Tests.test_Examples" + }, + { + "name": "Python: test_copula", + "type": "python", + "request": "launch", + "module": "Tests.test_Copula" + }, + // { + // "name": "Pypy: test_bag", + // "type": "pypy", + // "request": "launch", + // "module": "Tests.test_Bag" + // }, + { + "name": "Python: test_bag", + "type": "python", + "request": "launch", + "module": "Tests.test_Bag" + }, + { + "name": "Python: test_sparse_lut", + "type": "python", + "request": "launch", + "module": "Tests.test_RuleMap.test_sparse_lut" + }, + { + "name": "Python: test_parser", + "type": "python", + "request": "launch", + "module": "Tests.test_Parser" + }, + { + "name": "Python: test_color", + "type": "python", + "request": "launch", + "module": "Tests.test_color" + }, + { + "name": "Python: test_parse_examples", + "type": "python", + "request": "launch", + "module": "Tests.test_ParseExamples" + }, + { + "name": "Python: console", + "type": "python", + "request": "launch", + "program": "Console.py", + "console": "integratedTerminal", + "args": [ + // "Tests/examples/single_step/nal7/nal7.7.nal" + // "Tests/examples/single_step/nal5.query.nal" + ] + }, + { + "name": "Python: _generate_init_file", + "type": "python", + "request": "launch", + "module": "NAL._generate_init_file" + }, + { + "name": "Python: Parser test", + "type": "python", + "request": "launch", + "module": "Narsese.Parser._test" + }, + { + "name": "Python: 当前文件", + "type": "python", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" + } + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..21281d2 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,12 @@ +{ + "restructuredtext.languageServer.disabled": true, + "python.testing.unittestArgs": [ + "-v", + "-s", + "${workspaceFolder}/Tests", + "-p", + "test_*.py" + ], + "python.testing.pytestEnabled": false, + "python.testing.unittestEnabled": true +} \ No newline at end of file diff --git a/Config.py b/Config.py new file mode 100644 index 0000000..370fb0c --- /dev/null +++ b/Config.py @@ -0,0 +1,176 @@ +''' +Configure some hyper-parameters and some other settings via the file `config.json`. +''' +from pathlib import Path + +try: + import jstyleson as json # json with C/C++ style comments +except: + # import json + raise "please install the module by `pip install jstyleson`" + +class Enable: + temporal_rasoning = False + variable = True + anticipation = False + operation = False + debug = True + +class Config: + priority: float=0.8 + durability: float=0.8 + quality: float=0.5 + num_buckets: int = 100 + max_duration: int = 10000 + f: float=1.0 + c: float=0.9 + c_judgement: float=0.9 + c_goal: float=0.9 + k: int=1 + p_judgement: float=0.8 + d_judgement: float=0.5 + p_question: float=0.9 + d_question: float=0.9 + p_quest: float=0.9 + d_quest: float=0.9 + p_goal: float=0.9 + d_goal: float=0.9 + + p_feedback: float = 0.9 + d_feedback: float = 0.5 + + budget_thresh: float=0.01 + + nlevels_task_link: int=10 + capacity_task_link: int=100 + nlevels_term_link: int=10 + capacity_term_link: int=100 + capacity_table: int=100 + + complexity_unit: float=1.0 # 1.0 - oo + + quality_min: float=0.3 + cycles_per_duration: int=5 + n_forget_durations: int=2 + cycles_forget = cycles_per_duration*n_forget_durations + + revision_max_occurence_distance: int=10 + + truth_epsilon = 0.01 + budget_epsilon = 0.0001 + complexity_unit = 1.0 + + + variable_repr_normalized = False + + rate_discount_c = 0.5 + + rate_discount_p_internal_exp = 0.1 + rate_discount_d_internal_exp = 0.1 + + temporal_duration = 5 + n_sequence_attempts = 10 + n_op_condition_attempts = 10 + + + + @classmethod + def check(cls): + '''Check if each parameter is valid''' + pass + + @classmethod + def apply(cls): + '''Apply setting values of hyper-parameters''' + # from NARS import DataStructures + # # Budget + # DataStructures.Budget.priority = cls.priority + # DataStructures.Budget.durability = cls.durability + # DataStructures.Budget.quality = cls.quality + + +def load(file_path: str): + + file_path: Path = Path(file_path) + + if not file_path.exists(): + raise f"The file `{file_path}`` does not exist." + if file_path.suffix != '.json': + raise f"The file `{file_path}` should be `*.json`." + try: + with open(file_path, 'r') as f: + content = json.load(f) + except: + raise f"Error when openning the file `{file_path}`." + # set driver mode (py/pyx/cypy/cpp) + try: + pass # TODO + except: + pass # TODO + + # set hyper-parameters + try: + defaults: dict = content['HYPER-PARAMS']['DEFAULT'] + budget = defaults.get('BUDGET', None) + if budget is not None: + budget: dict + Config.p_judgement = budget.get('PRIORITY_JUDGEMENT', Config.p_judgement) + Config.d_judgement = budget.get('DURABILITY_JUDGEMENT', Config.d_judgement) + Config.p_question = budget.get('PRIORITY_QUESTION', Config.p_question) + Config.d_question = budget.get('DURABILITY_QUESTION', Config.d_question) + Config.p_quest = budget.get('PRIORITY_QUEST', Config.p_quest) + Config.d_quest = budget.get('DURABILITY_QUEST', Config.d_quest) + Config.p_goal = budget.get('PRIORITY_GOAL', Config.p_goal) + Config.d_goal = budget.get('DURABILITY_GOAL', Config.d_goal) + Config.p_feedback = budget.get('PRIORITY_FEEDBACK', Config.p_feedback) + Config.d_feedback = budget.get('DURABILITY_FEEDBACK', Config.d_feedback) + Config.budget_thresh = budget.get('THRESHOLD', Config.budget_thresh) + + num_buckets = defaults.get('NUM_BUCKETS', None) + if num_buckets is not None: + Config.num_buckets = 100 + truth = defaults.get('TRUTH', None) + if truth is not None: + Config.f = truth['FREQUENCY'] + Config.c = truth['CONFIDENCE'] + Config.c_judgement = truth['CONFIDENCE_JUDGEMENT'] + Config.c_goal = truth['CONFIDENCE_GOAL'] + Config.k = truth['K'] + + max_duration = defaults.get('MAX_DURATION', None) + if max_duration is not None: + Config.max_duration = max_duration + + concept: dict = defaults.get('CONCEPT', None) + if concept is not None: + Config.nlevels_task_link = concept.get('NUM_LEVELS_TASKLINK_BAG', Config.nlevels_task_link) + Config.capacity_task_link = concept.get('CAPACITY_TASKLINK_BAG', Config.capacity_task_link) + Config.nlevels_term_link = concept.get('NUM_LEVELS_TERMLINK_BAG', Config.nlevels_term_link) + Config.capacity_term_link = concept.get('CAPACITY_TERMLINK_BAG', Config.capacity_term_link) + Config.capacity_table = concept.get('CAPACITY_TABLE', Config.capacity_table) + Config.complexity_unit = defaults.get('COMPLEXITY_UNIT', Config.complexity_unit) + Config.quality_min = defaults.get('QUALITY_MIN', Config.quality_min) + Config.cycles_per_duration = defaults.get('CYCLES_PER_DURATION', Config.cycles_per_duration) + Config.n_forget_durations = defaults.get('NUM_FORGET_DURATIONS', Config.n_forget_durations) + Config.cycles_forget = Config.cycles_per_duration * Config.n_forget_durations + Config.revision_max_occurence_distance = defaults.get('REVISION_MAX_OCCURRENCE_DISTANCE', Config.revision_max_occurence_distance) + + Config.rate_discount_c = defaults.get('RATE_DISCOUNT_CONFIDENCE', Config.rate_discount_c) + + Config.rate_discount_p_internal_exp = defaults.get('RATE_DISCOUNT_PRIORITY_INTERNAL_EXPERIENCE', Config.rate_discount_p_internal_exp) + Config.rate_discount_d_internal_exp = defaults.get('RATE_DISCOUNT_DURABILITY_INTERNAL_EXPERIENCE', Config.rate_discount_d_internal_exp) + + hyperparams: dict = content['HYPER-PARAMS'] + Config.truth_epsilon = hyperparams.get('TRUTH_EPSILON', Config.truth_epsilon) + Config.budget_epsilon = hyperparams.get('BUDGET_EPSILON', Config.budget_epsilon) + Config.complexity_unit = hyperparams.get('COMPLEXITY_UNIT', Config.complexity_unit) + Config.temporal_duration = hyperparams.get('TEMPORAL_DURATION', Config.temporal_duration) + Config.n_sequence_attempts = hyperparams.get('TEMPORAL_DURATION', Config.n_sequence_attempts) + Config.n_op_condition_attempts = hyperparams.get('NUM_OP_CONDITION_ATTEMPTS', Config.n_op_condition_attempts) + + pass # TODO + except: + pass # TODO + + Config.check() + Config.apply() \ No newline at end of file diff --git a/Console.py b/Console.py new file mode 100644 index 0000000..da50183 --- /dev/null +++ b/Console.py @@ -0,0 +1,146 @@ +from copy import deepcopy +from typing import Tuple, Union +from pathlib import Path +import Narsese, NAL, NARS +from time import sleep +from multiprocessing import Process +import os +from Narsese.Parser.parser import TreeToNarsese +from Narsese import Sentence +import random +from NARS import Reasoner_3_0_4 as Reasoner +from utils.Print import out_print, PrintType +from Narsese import Task +from typing import List +from utils.tools import rand_seed +import argparse + +def info(title): + print(f''' +============= {title} ============= +module name: {__name__} +parent process: {os.getppid()} +process id: {os.getpid()} +============={'='*(len(title)+2)}============= + ''') + +def run_line(nars: Reasoner, line: str): + '''''' + line = line.strip(' \n') + if line.startswith("//"): + return None + elif line.startswith("''"): + if line.startswith("''outputMustContain('"): + line = line[len("''outputMustContain('"):].rstrip("')\n") + if len(line) == 0: return + try: + content_check = Narsese.parser.parse(line) + out_print(PrintType.INFO, f'OutputContains({content_check.sentence.repr()})') + except: + out_print(PrintType.ERROR, f'Invalid input! Failed to parse: {line}') + # out_print(PrintType.ERROR, f'{file}, line {i}, {line}') + return + elif line.startswith("'"): + return None + elif line.isdigit(): + n_cycle = int(line) + out_print(PrintType.INFO, f'Run {n_cycle} cycles.') + tasks_all_cycles = [] + for _ in range(n_cycle): + tasks_all = nars.cycle() + tasks_all_cycles.append(deepcopy(tasks_all)) + return tasks_all_cycles + else: + line = line.rstrip(' \n') + if len(line) == 0: + return None + try: + success, task, _ = nars.input_narsese(line, go_cycle=False) + if success: out_print(PrintType.IN, task.sentence.repr(), *task.budget) + else: out_print(PrintType.ERROR, f'Invalid input! Failed to parse: {line}') + + tasks_all = nars.cycle() + return [deepcopy(tasks_all)] + except: + out_print(PrintType.ERROR, f'Unknown error: {line}') + +def run_nars(filepath: str=None): + # info('Console') + seed = 137 + rand_seed(137) + out_print(PrintType.COMMENT, f'rand_seed={seed}', comment_title='Setup') + + + out_print(PrintType.COMMENT, 'Init...', comment_title='NARS') + nars = Reasoner(100, 100) + out_print(PrintType.COMMENT, 'Run...', comment_title='NARS') + + def handle_lines(lines: str): + tasks_lines = [] + for line in lines.split('\n'): + if len(line) == 0: continue + + tasks_line = run_line(nars, line) + if tasks_line is not None: + tasks_lines.extend(tasks_line) + + tasks_lines: List[Tuple[List[Task], Task, Task, List[Task], Task, Tuple[Task, Task]]] + for tasks_line in tasks_lines: + tasks_derived, judgement_revised, goal_revised, answers_question, answers_quest, (task_operation_return, task_executed) = tasks_line + for task in tasks_derived: out_print(PrintType.OUT, task.sentence.repr(), *task.budget) + + if judgement_revised is not None: out_print(PrintType.OUT, judgement_revised.sentence.repr(), *judgement_revised.budget) + if goal_revised is not None: out_print(PrintType.OUT, goal_revised.sentence.repr(), *goal_revised.budget) + if answers_question is not None: + for answer in answers_question: out_print(PrintType.ANSWER, answer.sentence.repr(), *answer.budget) + if answers_quest is not None: + for answer in answers_quest: out_print(PrintType.ANSWER, answers_quest.sentence.repr(), *answers_quest.budget) + if task_executed is not None: + out_print(PrintType.EXE, f'{task_executed.term.repr()} = {str(task_operation_return) if task_operation_return is not None else None}') + + + # handle the file + if filepath is not None: + filepath: Path = Path(filepath) + filename = filepath.name + out_print(PrintType.COMMENT, f'Run file <{filename}>.', comment_title='NARS') + with open(filepath, 'r') as f: + lines = f.read() + handle_lines(lines) + out_print(PrintType.COMMENT, 'Console.', comment_title='NARS') + + # console + while True: + out_print(PrintType.COMMENT, '', comment_title='Input', end='') + lines = input() + handle_lines(lines) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Parse NAL files.') + parser.add_argument('filepath', metavar='Path', type=str, nargs='*', + help='file path of an *.nal file.') + args = parser.parse_args() + filepath: Union[list, None] = args.filepath + filepath = filepath[0] if (filepath is not None and len(filepath) > 0) else None + + try: + run_nars(filepath) + except KeyboardInterrupt: + out_print(PrintType.COMMENT, 'Stop...', comment_title='\n\nNARS') + + print('Done.') + +# if __name__ == '__main__': +# # Process +# info('main') +# try: +# p_console = Process(target=run_console, args=()) +# p_nars = Process(target=run_nars, args=()) +# p_console.start() +# p_nars.start() +# p_nars.join() +# p_console.close() +# except KeyboardInterrupt: +# print('\n\nStop NARS...') + +# print('Done.') \ No newline at end of file diff --git a/Global.py b/Global.py new file mode 100644 index 0000000..9def3ee --- /dev/null +++ b/Global.py @@ -0,0 +1,7 @@ +time = 0 +_input_id = 0 +def get_input_id(): + global _input_id + input_id = _input_id + _input_id += 1 + return input_id \ No newline at end of file diff --git a/NAL/Functions/BudgetFunctions.py b/NAL/Functions/BudgetFunctions.py new file mode 100644 index 0000000..ac89953 --- /dev/null +++ b/NAL/Functions/BudgetFunctions.py @@ -0,0 +1,148 @@ +# from NARS.DataStructures._py.Link import TermLink +from Narsese import Budget, Truth +from Config import Config +from Narsese._py.Task import Belief +from Narsese._py.Term import Term +from Narsese._py.Truth import Truth +from .ExtendedBooleanFunctions import * +from copy import deepcopy +from .UncertaintyMappingFunctions import w_to_c + +from .Tools import truth_to_quality + +def Budget_revision(budget_task: Budget, truth_task: Truth, truth_belief: Truth, truth_derived: Truth, budget_tasklink: Budget=None, budget_termlink: Budget=None, replace=True, replace_tasklink=True, replace_termlink=True): + ''' + budget_task (Budget): + budget, of a task, to be revised. + truth_task (Truth): + truth of the task. + truth_belief (Truth): + truth of the belief. + truth_derived (Truth): + truth of a task derived from the task and the belief. + replace (bool): + whether to revise the `budget_task` without a copy. + default: True + budget_tasklink (Budget): default: None + budget, of the tasklink whose post-link task is the one with the `budget`, to be revised. + replace_tasklink (bool): + whether to revise the `budget_tasklink` without a copy. + default:True + budget_termlink (Budget): default: None + budget, of the termlink whose post-link task is the one with the `budget`, to be revised. + replace_termlink (bool): + whether to revise the `replace_termlink` without a copy. + default:True + + Ref: OpenNARS 3.1.0 BudgetFunctions.java line 72~118 + Evaluate the quality of a revision, then de-prioritize the premises + ''' + if not replace: budget_task = deepcopy(budget_task) + diff_task = abs(truth_task.e - truth_derived.e) + budget_task.priority = And(budget_task.priority, 1-diff_task) + budget_task.durability = And(budget_task.durability, 1-diff_task) + + if budget_tasklink is not None: + if not replace_tasklink: budget_tasklink = deepcopy(budget_tasklink) + budget_tasklink.priority = And(budget_task.priority, diff_task) + budget_tasklink.durability = And(budget_task.durability, diff_task) + if budget_termlink is not None: + if not replace_termlink: budget_termlink = deepcopy(budget_termlink) + diff_belief = abs(truth_belief.e - truth_derived.e) + budget_termlink.priority = And(budget_termlink.priority, 1-diff_belief) + budget_termlink.durability = And(budget_termlink.durability, 1-diff_belief) + diff = truth_derived.c - max(truth_task.c, truth_belief.c) + priority = Or(diff, budget_task.priority) + durability = Average(diff, budget_task.durability) + quality = truth_to_quality(truth_derived) + return Budget(priority, durability, quality), budget_task, budget_tasklink, budget_termlink + +def Budget_inference(quality: float, budget_tasklink: Budget, budget_termlink: Budget=None, complexity: float=1.0): + ''' + Ref. OpenNARS 3.1.0 BudgetFunctions.java line 292~317. + ''' + p = budget_tasklink.priority + d = budget_tasklink.durability/complexity + q = quality/complexity + if budget_termlink is not None: + p = Or(p, budget_termlink.priority) + d = And(d, budget_termlink.durability) + # budget_termlink.priority = min(1.0, Or(budget_termlink.priority, Or(q, budget_belief.priority))) + # budget_termlink.durability = min(1.0-Config.budget_epsilon, Or(budget_termlink.durability, q)) + + return Budget(p, d, q) # , budget_termlink + + +def Budget_forward(truth_new_task: Truth, budget_tasklink: Budget, budget_termlink: Budget=None): + return Budget_inference(truth_to_quality(truth_new_task), budget_tasklink, budget_termlink, 1.0) + +def Budget_backward(truth_new_task: Truth, budget_tasklink: Budget, budget_termlink: Budget=None): + return Budget_inference(truth_to_quality(truth_new_task), budget_tasklink, budget_termlink, 1.0) + +def Budget_backward_weak(truth_new_task: Truth, budget_tasklink: Budget, budget_termlink: Budget=None): + return Budget_inference(w_to_c(1, Config.k)*truth_to_quality(truth_new_task), budget_tasklink, budget_termlink, 1.0) + +def Budget_forward_compound(content: Term, truth_new_task: Truth, budget_tasklink: Budget, budget_termlink: Budget=None): + '''Ref. OpenNARS 3.1.0 BudgetFunctions.java line 254~257.''' + return Budget_inference(truth_to_quality(truth_new_task), budget_tasklink, budget_termlink, Config.complexity_unit if content is None else Config.complexity_unit*content.complexity) + +def Budget_backward_compound(content: Term, budget_tasklink: Budget, budget_termlink: Budget=None): + return Budget_inference(1.0, budget_tasklink, budget_termlink, Config.complexity_unit*content.complexity) + +def Budget_backward_weak_compound(content: Term, budget_tasklink: Budget, budget_termlink: Budget=None): + return Budget_inference(w_to_c(1, Config.k), budget_tasklink, budget_termlink, Config.complexity_unit*content.complexity) + + +'''Bag''' +def Budget_decay(budget: Budget, replace=True): + ''' + Ref: The Conceptual Design of OpenNARS 3.1.0 + Ref: OpenNARS 3.1.0 BudgetFunctions.java line 176~196 + Decrease Priority after an item is used, called in Bag. After a constant time, p should become d*p. Since in this period, the item is accessed c*p times, each time p-q should multiple d^(1/(c*p)). The intuitive meaning of the parameter "forgetRate" is: after this number of times of access, priority 1 will become d, it is a system parameter + adjustable in run time. + ''' + if not replace: budget = deepcopy(budget) + Q = Config.quality_min + C = Config.cycles_forget + p = budget.priority + q = budget.quality * Q + d = budget.durability + budget.priority = q + (p-q)*pow(d, 1.0/(p*C)) + # budget.priority = q + (p-q)*pow(d, 1.0/((p-q)*C)) # the implementation in OpenNARS 3.0.4 + return budget + +def Budget_merge(budget_base: Budget, budget_merged: Budget, replace=True): + ''' + Ref: The Conceptual Design of OpenNARS 3.1.0 + When an item is added into a bag where there is another one with the same key, the two will be merged, with their budget accumulated. In this process, the two quality values should be the same, and if not, the max operator is used. The two priority values are combined using or, so that the result will be no smaller than either of the two, while still remains in the [0, 1] range. For the same reason, or is used to combine the two durability values. Consequently, repeatedly appeared items will get more resources, both at the moment and in the near future. + Ref: OpenNARS 3.1.0 BudgetFunctions.java line 161~176, 206~209 + There are two options. + 1) use the `max` function to all three values; + 2) use the `or` function to `priority`, use `arithmetic average` funciton to `durability`, and keep the original value of `quality` of the concept unchanged. + Here the implementation is accordant to the description in the Conceptual Design. + ''' + if not replace: budget_base = deepcopy(budget_base) + # # implementation in the Conceptual Design. + # budget_base.priority = max(budget_base.priority, budget_merged.priority) + # budget_base.durability = Or(budget_base.durability, budget_merged.durability) + # budget_base.quality = Or(budget_base.quality, budget_merged.quality) + + # implementation in OpenNARS 3.1.0 or 3.0.4: + budget_base.priority = Or(budget_base.priority, budget_merged.priority) + budget_base.durability = Average(budget_base.durability, budget_merged.durability) + budget_base.quality = budget_base.quality + + # TODO: which implementation is better? + # Note (2021.1.25): + # I find that if I adopt the former one, which is consistent with the conceptual design, the budget of a term will be very large, because when build term links, there will be several times to call this function. Hence, if we choose the first one, `Concept._build_term_links(...)` and `Concept._build_task_links(...)` should be modified. + + return budget_base + + +'''Task''' +'''Concept''' +'''Task-Link''' +'''Term-Link''' +'''Belief and Desire''' +'''Processing Units''' +'''Goal Evaluations''' diff --git a/NAL/Functions/DesireValueFunctions.py b/NAL/Functions/DesireValueFunctions.py new file mode 100644 index 0000000..42407f3 --- /dev/null +++ b/NAL/Functions/DesireValueFunctions.py @@ -0,0 +1,15 @@ +from typing import Callable +from Narsese import Truth +from .ExtendedBooleanFunctions import * +from .UncertaintyMappingFunctions import w_to_c + +DesireFuncion = Callable[[Truth, Truth], Truth] + + +Desire_strong: DesireFuncion = lambda desire1, desire2: Truth(And(desire1.f, desire2.f), And(desire1.c, desire2.c, desire2.f), desire1.k) + +Desire_weak: DesireFuncion = lambda desire1, desire2: Truth(And(desire1.f, desire2.f), And(desire1.c, desire2.c, desire2.f, w_to_c(1.0, desire1.k)), desire1.k) + +Desire_deduction: DesireFuncion = lambda desire1, desire2: Truth(And(desire1.f, desire2.f), And(desire1.c, desire2.c), desire1.k) + +Desire_induction: DesireFuncion = lambda desire1, desire2: Truth(desire1.f, w_to_c(And(desire2.f, desire1.c, desire2.c), desire1.k), desire1.k) diff --git a/NAL/Functions/ExtendedBooleanFunctions.py b/NAL/Functions/ExtendedBooleanFunctions.py new file mode 100644 index 0000000..5eada78 --- /dev/null +++ b/NAL/Functions/ExtendedBooleanFunctions.py @@ -0,0 +1,6 @@ +import numpy as np + +Not = lambda x: (1-x) +And = lambda *x: np.prod(x) +Or = lambda *x: 1 - np.prod(1-np.array(x)) +Average = lambda *x: np.mean(x) \ No newline at end of file diff --git a/NAL/Functions/StampFunctions.py b/NAL/Functions/StampFunctions.py new file mode 100644 index 0000000..941d06e --- /dev/null +++ b/NAL/Functions/StampFunctions.py @@ -0,0 +1,26 @@ +from typing import Union +from Config import Config +from Narsese import Stamp +from copy import deepcopy +from Narsese import Connector, Copula + + +_temporal_interval = { + Connector.SequentialEvents: Config.temporal_duration, + Copula.PredictiveImplication: Config.temporal_duration, + Copula.PredictiveEquivalence: Config.temporal_duration, + Copula.RetrospectiveImplication: -Config.temporal_duration, +} + +def Stamp_merge(stamp1: Stamp, stamp2: Stamp, order_mark: Union[Copula, Connector]=None, reverse_order=False, t_bias=0): + stamp: Stamp = deepcopy(stamp1) + if stamp is not None: + stamp.extend_evidenital_base(stamp2.evidential_base) + if not stamp1.is_eternal and not stamp2.is_eternal: + stamp.t_occurrence = max(stamp1.t_occurrence, stamp2.t_occurrence) + if not stamp1.is_eternal: + # occurrence time interval + interval = _temporal_interval.get(order_mark, 0) + if reverse_order: interval = -interval + stamp.t_occurrence += interval + t_bias + return stamp \ No newline at end of file diff --git a/NAL/Functions/TemporalFunctions.py b/NAL/Functions/TemporalFunctions.py new file mode 100644 index 0000000..d5badc1 --- /dev/null +++ b/NAL/Functions/TemporalFunctions.py @@ -0,0 +1,48 @@ + +from Narsese import Truth +from .UncertaintyMappingFunctions import w_to_c + + +def project(truth: Truth, t_source: int, t_current: int, t_target: int): + ''' + Reference: + [1] OpenNARS 3.1.0 TruthFunctions.java line 492~495: + ``` + public static final float temporalProjection(final long sourceTime, final long targetTime, final long currentTime, Parameters param) { + final double a = 100000.0 * param.PROJECTION_DECAY; //projection less strict as we changed in v2.0.0 10000.0 slower decay than 100000.0 + return 1.0f - abs(sourceTime - targetTime) / (float) (abs(sourceTime - currentTime) + abs(targetTime - currentTime) + a); + } + ``` + [2] Hammer, Patrick, Tony Lofthouse, and Pei Wang. "The OpenNARS implementation of the non-axiomatic reasoning system." International conference on artificial general intelligence. Springer, Cham, 2016. + + Section 5. Projection and Eternalization + + $$k_c = \frac{|tB - tT|}{|tB - tC| + |tT - tC|}$$ + + $$c_{new} = (1 - k_c) * c_{old}$$ + ''' + k_c = abs(t_source - t_target) / (abs(t_source - t_current) + abs(t_target - t_current)) + c_new = (1 - k_c) * truth.c + return Truth(truth.f, c_new, truth.k) + + +def eternalize(truth: Truth): + ''' + Reference: + [1] OpenNARS 3.1.0 TruthFunctions.java line 485~490: + ``` + public static final EternalizedTruthValue eternalize(final TruthValue v1, Parameters narParameters) { + final float f1 = v1.getFrequency(); + final double c1 = v1.getConfidence(); + final double c = w2c(c1, narParameters); + return new EternalizedTruthValue(f1, c, narParameters); + } + ``` + [2] Hammer, Patrick, Tony Lofthouse, and Pei Wang. "The OpenNARS implementation of the non-axiomatic reasoning system." International conference on artificial general intelligence. Springer, Cham, 2016. + + Section 5. Projection and Eternalization + + $$c_{eternal} = \frac{1}{k + c_{temporal}}$$ + TODO: The two dealing with eternalizaiton are different. Which is right? Using the first one for the moment, because it seems more reasonable. + ''' + return Truth(truth.f, w_to_c(truth.c, truth.k), truth.k) \ No newline at end of file diff --git a/NAL/Functions/Tools.py b/NAL/Functions/Tools.py new file mode 100644 index 0000000..4036a08 --- /dev/null +++ b/NAL/Functions/Tools.py @@ -0,0 +1,193 @@ +# from ...Narsese import Compound + +# def compound_remove_components + +from typing import Union +import Global +from Config import Config, Enable +from NAL.Functions.TemporalFunctions import eternalize, project +from Narsese import Budget +from Narsese import Sentence, Judgement, Truth, Task +from copy import deepcopy +# import Config, Global +from math import sqrt +from Narsese import Sentence, Stamp +from Narsese import TRUE, FALSE, UNSURE +from Narsese._py.Sentence import Goal, Quest, Question +from Narsese._py.Task import Belief +from Narsese._py.Term import Term + +def truth_to_quality(truth: Truth) -> float: + return max(truth.e, (1 - truth.e)*0.75); + +def distribute_budget_among_links(budget: Budget, n_links: int) -> Budget: + return Budget(budget.priority/sqrt(n_links), budget.durability, budget.quality) + +def project_truth(premise1: Union[Judgement, Goal, Question, Quest], premise2: Union[Judgement, Goal]): + ''' + project the truth of the belief to that of the task. + Ref: + [1] OpenNARS 3.0.4 Sentence.java line 362~380 + ``` + public TruthValue projectionTruth(final long targetTime, final long currentTime, Memory mem) { + TruthValue newTruth = null; + if (!stamp.isEternal()) { + newTruth = TruthFunctions.eternalize(truth, mem.narParameters); + if (targetTime != Stamp.ETERNAL) { + final long occurrenceTime = stamp.getOccurrenceTime(); + final float factor = TruthFunctions.temporalProjection(occurrenceTime, targetTime, currentTime, mem.narParameters); + final double projectedConfidence = factor * truth.getConfidence(); + if (projectedConfidence > newTruth.getConfidence()) { + newTruth = new TruthValue(truth.getFrequency(), projectedConfidence, mem.narParameters); + } + } + } + if (newTruth == null) newTruth = truth.clone(); + return newTruth; + } + ``` + [2] Hammer, Patrick, Tony Lofthouse, and Pei Wang. "The OpenNARS implementation of the non-axiomatic reasoning system." International conference on artificial general intelligence. Springer, Cham, 2016. + + The two methods seem different. I adopt the second one. + ''' + truth = premise2.truth + if not premise2.is_eternal: + if not premise1.is_eternal: + t_target = premise1.stamp.t_occurrence + t_source = premise2.stamp.t_occurrence + truth = project(truth, t_source, Global.time, t_target) + truth = eternalize(truth) + return truth + + + +def calculate_solution_quality(s_in: Sentence, s_solution: Sentence, rate_by_confidence: bool=True): + ''' + Evaluate the quality of the judgment as a solution to a problem + + Ref: OpenNARS 3.1.0 line 262~286; Source Code: + ``` + public static float solutionQuality(final boolean rateByConfidence, final Task probT, final Sentence solution, final Memory memory, final Timable time) { + final Sentence problem = probT.sentence; + + if ((probT.sentence.punctuation != solution.punctuation && solution.term.hasVarQuery()) || !matchingOrder(problem.getTemporalOrder(), solution.getTemporalOrder())) { + return 0.0F; + } + + TruthValue truth = solution.truth; + if (problem.getOccurenceTime()!=solution.getOccurenceTime()) { + truth = solution.projectionTruth(problem.getOccurenceTime(), time.time(), memory); + } + + //when the solutions are comparable, we have to use confidence!! else truth expectation. + //this way negative evidence can update the solution instead of getting ignored due to lower truth expectation. + //so the previous handling to let whether the problem has query vars decide was wrong. + if (!rateByConfidence) { + /* + * just some function that decreases quality of solution if it is complex, and increases if it has a high truth expecation + */ + + return (float) (truth.getExpectation() / Math.sqrt(Math.sqrt(Math.sqrt(solution.term.getComplexity()*memory.narParameters.COMPLEXITY_UNIT)))); + } else { + return (float)truth.getConfidence(); + } + } + ``` + Args: + s_in (Sentence): the sentence in an input task. + s_solution (Sentence): the sentence in the memory for solving the task. + Returns: + quality (float): the quality of the solution. + ''' + + if ( + (s_in.punct != s_solution.punct and s_solution.term.has_qvar) #or not temporal_matching_order(s_in, s_solution) + ): + return 0.0 + + truth = s_solution.truth + + t_occur_in = s_in.stamp.t_occurrence + t_occur_solution = s_solution.stamp.t_occurrence + if t_occur_solution != t_occur_in: + truth = project_truth(s_in, s_solution) + + # When the solutions are comparable, we have to use confidence!! else truth expectation. This way negative evidence can update the solution instead of getting ignored due to lower truth expectation. So the previous handling to let whether the problem has query vars decide was wrong. + if not rate_by_confidence: + # Just some function that decreases quality of solution if it is complex, and increases if it has a high truth expecation + # raise "what does `Config.complexity_unit` mean?" + return truth.e / sqrt(sqrt(sqrt(s_solution.term.complexity*Config.complexity_unit))) + else: + return truth.c + + +# def temporal_matching_order(s1: Sentence, s2: Sentence): +# if Enable.temporal_rasoning: +# # raise 'Eliminate this line.' +# order1 = s1.temporal_order +# order2 = s2.temporal_order +# return (order1 == order2) or (order1 == TemporalOrder.NONE) or (order2 == TemporalOrder.NONE) +# else: +# return True + +''' +The rules out of the book *Non-Axiomatic-Logic* +''' +def revisible(task: Task, belief: Task): + ''' Check whether two sentences can be used in revision + Ref: OpenNARS 3.1.0 LocalRules.java line 91~106; Source Code: + ``` + public static boolean revisible(final Sentence s1, final Sentence s2, Parameters narParameters) { + if(!s1.isEternal() && !s2.isEternal() && Math.abs(s1.getOccurenceTime() - s2.getOccurenceTime()) > narParameters.REVISION_MAX_OCCURRENCE_DISTANCE) { + return false; + } + if(s1.term.term_indices != null && s2.term.term_indices != null) { + for(int i=0;i Config.revision_max_occurence_distance: + return False + + # if(s1.term.term_indices != null and s2.term.term_indices != null): + # for(int i=0;i 0.66: return TRUE + elif f < 0.33: return FALSE + else: return UNSURE diff --git a/NAL/Functions/TruthValueFunctions.py b/NAL/Functions/TruthValueFunctions.py new file mode 100644 index 0000000..faa39f6 --- /dev/null +++ b/NAL/Functions/TruthValueFunctions.py @@ -0,0 +1,100 @@ + +from typing import Callable +from Narsese import Truth, truth_analytic +from .ExtendedBooleanFunctions import * +# from .Tools import * +from .UncertaintyMappingFunctions import * +TruthFunction = Callable[[Truth, Truth], Truth] +TruthImmedFunction = Callable[[Truth], Truth] + +'''local inference''' +# F_rev +F_rev = F_revision = lambda w_p_1, w_p_2, w_m_1, w_m_2: (w_p_1+w_p_2, w_m_1+w_m_2) # return: w+, w- + +def Truth_revision(truth1: Truth, truth2: Truth): + w_p_1 = fc_to_w_plus(truth1.f, truth1.c, truth1.k) + w_p_2 = fc_to_w_plus(truth2.f, truth2.c, truth2.k) + w_m_1 = fc_to_w_minus(truth1.f, truth1.c, truth1.k) + w_m_2 = fc_to_w_minus(truth2.f, truth2.c, truth2.k) + w_p, w_m = F_revision(w_p_1, w_p_2, w_m_1, w_m_2) + truth = truth_from_w(w_p, w_m+w_p, truth1.k) + return truth + +# F_exp +F_exp = F_expectation = lambda f, c: (c*(f - 0.5) + 0.5) # return: e + +# F_dec +F_dec = F_decision = lambda p, d: p*(d - 0.5) # return: g + +'''immediate inference''' +# F_neg +F_neg = F_negation = lambda w_plus, w_minus: (w_minus, w_plus) # return: w+, w- +def Truth_negation(truth: Truth) -> Truth: + k = truth.k + w_plus, w_minus = F_negation(*w_from_truth(truth)) + w = w_plus + w_minus + return Truth(w_to_f(w_plus, w), w_to_c(w, k) , k) + +# F_cnv +F_cnv = F_conversion = lambda f, c: (And(f, c), 0) # return: w+, w- +def Truth_conversion(truth: Truth) -> Truth: + w_plus, w_minus = F_conversion(truth.f, truth.c) + return truth_from_w(w_plus, w_plus + w_minus, truth.k) + + +# F_cnt +F_cnt = F_contraposition = lambda f, c: (0, And(Not(f), c)) # return: w+, w- +def Truth_contraposition(truth: Truth) -> Truth: + w_plus, w_minus = F_contraposition(truth.f, truth.c) + return truth_from_w(w_plus, w_plus + w_minus, truth.k) + +'''strong syllogism''' +# F_ded +F_ded = F_deduction = lambda f1, c1, f2, c2: (And(f1, f2), And(f1, f2, c1, c2)) # return: f, c +Truth_deduction: TruthFunction = lambda truth1, truth2: Truth(*F_deduction(truth1.f, truth1.c, truth2.f, truth2.c), truth1.k) + +# F_ana +F_ana = F_analogy = lambda f1, c1, f2, c2: (And(f1, f2), And(f2, c1, c2)) # return: f, c +Truth_analogy: TruthFunction = lambda truth1, truth2: Truth(*F_analogy(truth1.f, truth1.c, truth2.f, truth2.c), truth1.k) + +# F_res +F_res = F_resemblance = lambda f1, c1, f2, c2: (And(f1, f2), And(Or(f1, f2), c1, c2)) # return: f, c +Truth_resemblance: TruthFunction = lambda truth1, truth2: Truth(*F_resemblance(truth1.f, truth1.c, truth2.f, truth2.c), truth1.k) + +'''weak syllogism''' +# F_abd +F_abd = F_abduction = lambda f1, c1, f2, c2:(And(f1, f2, c1, c2), And(f1, c1, c2)) # return: w+, w +Truth_abduction: TruthFunction = lambda truth1, truth2: truth_from_w(*F_abduction(truth1.f, truth1.c, truth2.f, truth2.c), truth1.k) + +# F_ind +F_ind = F_induction = lambda f1, c1, f2, c2: (And(f1, f2, c1, c2), And(f2, c1, c2)) # return: w+, w +Truth_induction: TruthFunction = lambda truth1, truth2: truth_from_w(*F_induction(truth1.f, truth1.c, truth2.f, truth2.c), truth1.k) + +# F_exe +F_ind = F_exemplification = lambda f1, c1, f2, c2: (And(f1, f2, c1, c2), And(f1, f2, c1, c2)) # return: w+, w +Truth_exemplification: TruthFunction = lambda truth1, truth2: truth_from_w(*F_exemplification(truth1.f, truth1.c, truth2.f, truth2.c), truth1.k) +# def Truth_exemplification(truth1: Truth, truth2: Truth) -> Truth: +# return truth_from_w(*F_exemplification(truth1.f, truth1.c, truth2.f, truth2.c), truth1.k) + +# F_com +F_com = F_comparison = lambda f1, c1, f2, c2: (And(f1, f2, c1, c2), And(Or(f1, f2), c1, c2)) # return: w+, w +Truth_comparison: TruthFunction = lambda truth1, truth2: truth_from_w(*F_comparison(truth1.f, truth1.c, truth2.f, truth2.c), truth1.k) + +'''term composition''' +# F_int +F_int = F_intersection = lambda f1, c1, f2, c2: (And(f1, f2), And(c1, c2)) # return: f, c +Truth_intersection: TruthFunction = lambda truth1, truth2: Truth(*F_intersection(truth1.f, truth1.c, truth2.f, truth2.c), truth1.k) + +# F_uni +F_uni = F_union = lambda f1, c1, f2, c2: (Or(f1, f2), And(c1, c2)) # return: f, c +Truth_union: TruthFunction = lambda truth1, truth2: Truth(*F_union(truth1.f, truth1.c, truth2.f, truth2.c), truth1.k) + +# F_dif +F_dif = F_difference = lambda f1, c1, f2, c2: (And(f1, Not(f2)), And( c1, c2)) # return: f, c +Truth_difference: TruthFunction = lambda truth1, truth2: Truth(*F_difference(truth1.f, truth1.c, truth2.f, truth2.c), truth1.k) + +# F_dcj {(&&, A, B).; B.} |- A. +Truth_deconjuntion: TruthFunction = lambda truth1, truth2: Truth_negation(Truth_deduction(Truth_intersection(Truth_negation(truth1), truth2), truth_analytic)) + +# F_ddj {(||, A, B).; B.} |- A. +Truth_dedisjunction: TruthFunction = lambda truth1, truth2: Truth_deduction(Truth_intersection(truth1, Truth_negation(truth2)), truth_analytic) \ No newline at end of file diff --git a/NAL/Functions/UncertaintyMappingFunctions.py b/NAL/Functions/UncertaintyMappingFunctions.py new file mode 100644 index 0000000..4f2501d --- /dev/null +++ b/NAL/Functions/UncertaintyMappingFunctions.py @@ -0,0 +1,18 @@ +from Narsese import Truth + +fc_to_w_plus = lambda f, c, k: k*f*c/(1-c) +fc_to_w = lambda f, c, k: k*c/(1-c) +fc_to_w_minus = lambda f, c, k: k*(1-f)*c/(1-c) + +w_to_f = lambda w_plus, w: w_plus/w +w_to_c = lambda w, k : w/(w+k) + +# lu_to_w_plus = lambda + +def truth_from_w(w_plus, w, k): + f, c = (w_to_f(w_plus, w), w_to_c(w, k)) if w != 0 else (0.5, 0.0) + return Truth(f, c, k) + +def w_from_truth(truth: Truth): + f, c, k = truth.f, truth.c, truth.k + return fc_to_w_plus(f, c, k), fc_to_w_minus(f, c, k) \ No newline at end of file diff --git a/NAL/Functions/__init__.py b/NAL/Functions/__init__.py new file mode 100644 index 0000000..227c5ca --- /dev/null +++ b/NAL/Functions/__init__.py @@ -0,0 +1,7 @@ +from .ExtendedBooleanFunctions import * +from .TruthValueFunctions import * +from .UncertaintyMappingFunctions import * +from .BudgetFunctions import * +from .TemporalFunctions import * +from .StampFunctions import * +from .DesireValueFunctions import * \ No newline at end of file diff --git a/NAL/Inference/CompositionalRules.py b/NAL/Inference/CompositionalRules.py new file mode 100644 index 0000000..7299216 --- /dev/null +++ b/NAL/Inference/CompositionalRules.py @@ -0,0 +1,356 @@ +''' +Composition rules +''' +from Narsese import Copula, Statement, Compound, Connector, Task, Belief, Budget, Truth + +from ..Functions import * +from Narsese import Judgement, Goal, Quest, Question + +'''first-order With common subject''' +def intersection_extension(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: T1>. %f1;c1% + j2: T2>. %f2;c2% + |- + j3: (&, T1, T2)>. %F_int% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = Compound.ExtensionalIntersection(stat1.predicate, stat2.predicate) + statement = Statement(stat1.subject, Copula.Inheritance, compound) + + if task.is_judgement: + truth = Truth_intersection(premise1.truth, premise2.truth) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + +def union_extension(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: T1>. %f1;c1% + j2: T2>. %f2;c2% + |- + j3: (|, T1, T2)>. %F_uni% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = Compound.IntensionalIntersection(stat1.predicate, stat2.predicate) + statement = Statement(stat1.subject, Copula.Inheritance, compound) + + if task.is_judgement: + truth = Truth_union(premise1.truth, premise2.truth) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + +def difference_extension(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: T1>. %f1;c1% + j2: T2>. %f2;c2% + |- + j3: (-, T1, T2)>. %F_dif% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = Compound.ExtensionalIntersection(stat1.predicate, stat2.predicate) + statement = Statement(stat1.subject, Copula.Inheritance, compound) + + if task.is_judgement: + truth = Truth_difference(premise1.truth, premise2.truth) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + +def difference_extension2(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: T1>. %f1;c1% + j2: T2>. %f2;c2% + |- + j3: (-, T2, T1)>. %F_dif'% + ''' + difference_extension(task, belief, budget_tasklink, budget_termlink, inverse_premise=True) + + +'''First-order with common predicate''' +def intersection_intension(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: M>. %f1;c1% + j2: M>. %f2;c2% + |- + j3: <(|, T1, T2) --> M>. %F_int% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = Compound.IntensionalIntersection(stat1.subject, stat2.subject) + statement = Statement(compound, Copula.Inheritance, stat1.predicate) + + if task.is_judgement: + truth = Truth_intersection(premise1.truth, premise2.truth) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + +def union_intension(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: M>. %f1;c1% + j2: M>. %f2;c2% + |- + j3: <(&, T1, T2) --> M>. %F_uni% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = Compound.ExtensionalIntersection(stat1.subject, stat2.subject) + statement = Statement(compound, Copula.Inheritance, stat1.predicate) + + if task.is_judgement: + truth = Truth_union(premise1.truth, premise2.truth) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + +def difference_intension(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: M>. %f1;c1% + j2: M>. %f2;c2% + |- + j3: <(~, T1, T2) --> M>. %F_uni% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = Compound.IntensionalDifference(stat1.subject, stat2.subject) + statement = Statement(compound, Copula.Inheritance, stat1.predicate) + + if task.is_judgement: + truth = Truth_union(premise1.truth, premise2.truth) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + +def difference_intension2(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: M>. %f1;c1% + j2: M>. %f2;c2% + |- + j3: <(~, T2, T1) --> M>. %F_uni% + ''' + difference_intension(task, belief, budget_tasklink, budget_termlink, inverse_premise=True) + + + +'''Higher-order with common subject''' +def conjunction_extension(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: T1>. %f1;c1% + j2: T2>. %f2;c2% + |- + j3: (&&, T1, T2)>. %F_int% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = Compound.Conjunction(stat1.predicate, stat2.predicate) + statement = Statement(stat1.subject, Copula.Implication, compound) + + if task.is_judgement: + truth = Truth_intersection(premise1.truth, premise2.truth) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + +def disjunction_extension(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: T1>. %f1;c1% + j2: T2>. %f2;c2% + |- + j3: (||, T1, T2)>. %F_uni% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = Compound.Disjunction(stat1.predicate, stat2.predicate) + statement = Statement(stat1.subject, Copula.Implication, compound) + + if task.is_judgement: + truth = Truth_union(premise1.truth, premise2.truth) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + +'''Higher-order with common predicate''' +def disjunction_intension(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: M>. %f1;c1% + j2: M>. %f2;c2% + |- + j3: <(||, T1, T2) ==> M>. %F_int% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = Compound.Disjunction(stat1.subject, stat2.subject) + statement = Statement(compound, Copula.Implication, stat1.predicate) + + if task.is_judgement: + truth = Truth_intersection(premise1.truth, premise2.truth) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + +def conjunction_intension(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: M>. %f1;c1% + j2: M>. %f2;c2% + |- + j3: <(&&, T1, T2) ==> M>. %F_uni% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = Compound.Conjunction(stat1.subject, stat2.subject) + statement = Statement(compound, Copula.Implication, stat1.predicate) + + if task.is_judgement: + truth = Truth_union(premise1.truth, premise2.truth) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + +'''Higher-order composition''' +def conjunstion_composition(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: T1. %f1;c1% + j2: T2. %f2;c2% + |- + j3: (&&, T1, T2). %F_int% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + statement = Compound.Conjunction(stat1.subject, stat2.subject) + + if task.is_judgement: + truth = Truth_intersection(premise1.truth, premise2.truth) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + +def disjunction_composition(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: T1. %f1;c1% + j2: T2. %f2;c2% + |- + j3: (||, T1, T2). %F_uni% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + statement = Compound.Disjunction(stat1.subject, stat2.subject) + + if task.is_judgement: + truth = Truth_union(premise1.truth, premise2.truth) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + diff --git a/NAL/Inference/ConditionalSyllogisticRules.py b/NAL/Inference/ConditionalSyllogisticRules.py new file mode 100644 index 0000000..c7c7e6a --- /dev/null +++ b/NAL/Inference/ConditionalSyllogisticRules.py @@ -0,0 +1,529 @@ +''' +Conditional syllogism + +@ Author: Bowen XU +@ Contact: bowen.xu@pku.edu.cn +@ Update: 2021.11.7 +@ Comment: + The general form: + def syllogistic_rule(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): ... + The param `inverse_premise` means whether to inverse the order of term in the task and term in the belief as the two premises, for example, if the terms in the task and the belief are {M>, P>}, and the `inverse_premise` equals `True`, then the premises are {P>, M>}. + The param `inverse_copula` means whether to inverse the order of the subject and predicate in the task, for example, if the term in the task is M>, and the `inverse_copula` equals `True`, then the premise1 is S>. + The param `inverse_copula` means whether to inverse the order of the subject and predicate in the task, for example, if the term in the task is M>, and the `inverse_copula` equals `True`, then the premise1 is S>. + +''' +from Narsese import Copula, Statement, Compound, Connector, Task, Belief, Budget, Stamp, Truth + +from ..Functions import F_deduction, F_analogy, F_comparison, F_abduction, F_induction, \ + fc_to_w_minus, fc_to_w_plus +from ..Functions import * +from Narsese import Judgement, Goal, Quest, Question + +''' +The Conditional Syllogistic Rules (Table B.2) + + J1 J2 J F function-name +------------------------------------------------------------------------------ +1 S S <=> P P F_ana analogy +2 S P S <=> P F_com comparison +3 S ==> P S P F_ded deduction +4 P ==> S S P F_abd abduction +5 P S S ==> P F_ind induction +6 (&&, C, S) ==> P S C ==> P F_ded deduction_compound_eliminate +7 (&&, C, S) ==> P C ==> P S F_abd abduction_compound_eliminate +8 C ==> P S (&&, C, S) ==> P F_ind induction_compound_compose +9 (&&, C, S) ==> P M ==> S (&&, C, M) ==> P F_ded deduction_compound_replace +10 (&&, C, S) ==> P (&&, C, M) ==> P M ==> S F_abd abduction_compound_eliminate2 +11 (&&, C, M) ==> P M ==> S (&&, C, S) ==> P F_ind induction_compound_replace + +Additional Rules, Ref: OpenNARS 3.0.4 SyllogysticRules.java + + J1 J2 J F +------------------------------------------------------------------------------ +12 (&&, C, S) <=> P S C <=> P F_ana analogy_compound_replace + +''' + +'''Compound irrelevant rules''' + +def deduction(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: P>. %f1;c1% + j2: S. %f2;c2% + |- + j3: P. %F_ded% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief, stat1.copula) + + statement = stat1.predicate + + if task.is_judgement: + truth = Truth_deduction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_induction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def abduction(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1:

S>. %f1;c1% + j2: S. %f2;c2% + |- + j3: P. %F_abd% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief, stat1.copula, reverse_order=True) + + statement = stat1.subject + + if task.is_judgement: + truth = Truth_abduction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_deduction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def induction(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: P. %f1;c1% + j2: S. %f2;c2% + |- + j3: P>. %F_ind% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + statement = Statement(stat2, Copula.Implication, stat1) + + if task.is_judgement: + truth = Truth_induction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_deduction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def analogy(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: S. %f1;c1% + j2: P>. %f2;c2% + |- + j3: P. %F_ana% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + statement = stat2.predicate if not inverse_copula else stat2.subject + + if task.is_judgement: + truth = Truth_analogy(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_deduction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def comparison(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: S. %f1;c1% + j2: P. %f2;c2% + |- + j3: P>. %F_com% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + statement = Statement(stat1, Copula.Equivalence, stat2) + + if task.is_judgement: + truth = Truth_comparison(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_weak(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +'''Compound relevant rules''' + +def deduction_compound_eliminate(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: <(&&, C, S, ...) ==> P>. %f1;c1% + j2: S. %f2;c2% + |- + j3: P>. %F_ded% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = stat1.subject + compound = compound - stat2 + statement = Statement(compound, Copula.Implication, stat1.predicate) + + if task.is_judgement: + truth = Truth_deduction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_induction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def deduction_compound_replace(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: <(&&, C, S, ...) ==> P>. %f1;c1% + j2: S>. %f2;c2% + |- + j3: <(&&, C, M, ...) ==> P>. %F_ded% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = stat1.subject + compound = compound.replace(stat2.predicate, stat2.subject) + statement = Statement(compound, Copula.Implication, stat1.predicate) + + if task.is_judgement: + truth = Truth_deduction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_induction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def abduction_compound_eliminate(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: <(&&, C, S, ...) ==> P>. %f1;c1% + j2: P>. %f2;c2% + |- + j3: (&&, S, ...). %F_abd% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = stat1.subject + statement = compound - stat2.subject + + if task.is_judgement: + truth = Truth_abduction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_induction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def abduction_compound_eliminate2(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: <(&&, C, S, ...) ==> P>. %f1;c1% + j2: <(&&, C, M, ...) ==> P>. %f2;c2% + |- + j3: S>. %F_abd% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound1: Compound = stat1.subject + compound2: Compound = stat2.subject + statement = Statement(compound2-compound1, Copula.Implication, compound1-compound2) + + if task.is_judgement: + truth = Truth_abduction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_induction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def induction_compound_compose(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: P>. %f1;c1% + j2: S. %f2;c2% + |- + j3: <(&&, C, S) ==> P>. %F_ind% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = Compound.Conjunction(stat1.subject, stat2) + statement = Statement(compound, Copula.Implication, stat1.predicate) + + if task.is_judgement: + truth = Truth_induction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_deduction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def induction_compound_replace(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + j1: <(&&, C, M, ...) ==> P>. %f1;c1% + j2: S>. %f2;c2% + |- + j3: <(&&, C, S, ...) ==> P>. %F_ind% + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = stat1.subject + compound = compound.replace(stat2.subject, stat2.predicate) + statement = Statement(compound, Copula.Implication, stat1.predicate) + + + if task.is_judgement: + truth = Truth_induction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_deduction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def analogy_compound_replace(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + S + (&&, C, S) <=> P + |- + C <=> P + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + compound: Compound = stat2.subject + compound = compound - stat1 + statement = Statement(compound, stat2.copula, stat2.predicate) + + if task.is_judgement: + truth = Truth_analogy(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + raise # TODO: if inverse the premises, the Desire function may diverse. + truth = Desire_deduction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) \ No newline at end of file diff --git a/NAL/Inference/ImmediateRules.py b/NAL/Inference/ImmediateRules.py new file mode 100644 index 0000000..0ec5d8b --- /dev/null +++ b/NAL/Inference/ImmediateRules.py @@ -0,0 +1,101 @@ +from Narsese._py.Sentence import Sentence +from ..Functions.TruthValueFunctions import * +from Narsese import Copula, Statement, Compound, Connector, Term, Task, Budget, Stamp +from ..Functions.BudgetFunctions import * + +from ..Functions import F_negation, F_conversion, F_contraposition, \ + fc_to_w_minus, fc_to_w_plus, w_to_f, w_to_c +from Narsese import Judgement, Truth, Goal, Quest, Question + +# TODO: P> |- P> OpenNARS 3.0.4 LocalRules.java line 424~441 + +def negation(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + S |- (--, S). %F_neg% + ''' + stamp_task: Stamp = task.stamp + premise: Sentence = task.sentence + + term_task = task.term + term_neg = Compound.Negation(term_task) + + stamp = stamp_task + if premise.is_judgement: + truth = Truth_negation(premise.truth) + sentence_derived = Judgement(term_neg, stamp, truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + elif premise.is_goal: + truth = Truth_negation(premise.truth) + sentence_derived = Goal(term_neg, stamp, truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + elif premise.is_question: + sentence_derived = Question(term_neg, stamp) + budget = Budget_backward_compound(premise.term, budget_tasklink, budget_termlink) + elif premise.is_quest: + sentence_derived = Quest(term_neg, stamp) + budget = Budget_backward_compound(premise.term, budget_tasklink, budget_termlink) + else: raise 'Invalid case.' + + return Task(sentence_derived, budget) + + +def conversion(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + P> |-

S> + P> |-

S> + ''' + stamp_task: Stamp = task.stamp + premise: Sentence = task.sentence + stat: Statement = premise.term + + subject = stat.predicate + predicate = stat.subject + statement = Statement(subject, stat.copula.reverse, predicate) + + stamp = stamp_task + if premise.is_judgement: + truth = Truth_conversion(premise.truth) + sentence_derived = Judgement(statement, stamp, truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + # elif premise.is_goal: + # truth = Truth_negation(premise.truth) + # sentence_derived = Goal(term_concept, stamp, truth) + # budget = Budget_forward(truth, budget_tasklink, budget_termlink) + # elif premise.is_question: + # sentence_derived = Question(term_concept, stamp) + # budget = Budget_backward_compound(premise.term, budget_tasklink, budget_termlink) + # elif premise.is_quest: + # sentence_derived = Quest(term_concept, stamp) + # budget = Budget_backward_compound(premise.term, budget_tasklink, budget_termlink) + else: raise 'Invalid case.' + + return Task(sentence_derived, budget) + + +def contraposition(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> |- <(--, P) ==> (--, S)>>. %F_cnt% + ''' + stamp_task: Stamp = task.stamp + premise: Sentence = task.sentence + stat: Statement = premise.term + + subject = Compound.Negation(stat.predicate) + predicate = Compound.Negation(stat.subject) + statement = Statement(subject, stat.copula, predicate) + + stamp = stamp_task + if premise.is_judgement: + truth = Truth_contraposition(premise.truth) + sentence_derived = Judgement(statement, stamp, truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + elif premise.is_goal: + truth = Truth_negation(premise.truth) + sentence_derived = Goal(term_concept, stamp, truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + elif premise.is_question or premise.is_quest: + sentence_derived = Question(term_concept, stamp) + budget = Budget_backward_weak_compound(statement, budget_tasklink, budget_termlink) + else: raise 'Invalid case.' + + return Task(sentence_derived, budget) diff --git a/NAL/Inference/LocalRules.py b/NAL/Inference/LocalRules.py new file mode 100644 index 0000000..e9df3e9 --- /dev/null +++ b/NAL/Inference/LocalRules.py @@ -0,0 +1,64 @@ +from typing import List, Union +from NAL.Functions.BudgetFunctions import Budget_revision +from NAL.Functions.ExtendedBooleanFunctions import Or +from Narsese import Stamp, Task +from Narsese._py.Budget import Budget +from Narsese._py.Sentence import Goal, Quest, Question +from Narsese._py.Task import Belief +from ..Functions import Truth_revision +from Narsese import Sentence, Judgement, Truth +# from .TemporalRules import matching_order +from copy import deepcopy +import Global +from Config import Enable + +from NAL.Functions.Tools import calculate_solution_quality, truth_to_quality + +def revision(task: Task, belief: Task, budget_tasklink: Budget=None, budget_termlink: Budget=None): + premise1: Judgement = task.sentence + premise2: Judgement = belief.sentence + truth1 = premise1.truth + truth2 = premise2.truth + if Enable.temporal_rasoning: + # boolean useNewBeliefTerm = intervalProjection(nal, newBelief.getTerm(), oldBelief.getTerm(), beliefConcept.recent_intervals, newTruth); + raise + truth = Truth_revision(truth1, truth2) + budget, *_ = Budget_revision(task.budget, truth1, truth2, truth, budget_tasklink=budget_tasklink, budget_termlink=budget_termlink) + term = premise1.term + stamp: Stamp = deepcopy(task.sentence.stamp) # Stamp(Global.time, task.sentence.stamp.t_occurrence, None, (j1.stamp.evidential_base | j2.stamp.evidential_base)) + stamp.evidential_base.extend(premise2.evidential_base) + return Task(Judgement(term, stamp, truth), budget) + +def solution_question(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None): + question: Union[Question, Quest] = task.sentence + answer: Union[Judgement, Goal] = belief.sentence + answer_best = question.answer_best + if answer_best is None: question.answer_best = answer + else: + quality_new = calculate_solution_quality(question, answer) + quality_old = calculate_solution_quality(question, answer_best) + if quality_new <= quality_old: answer = None + else: question.answer_best = answer + + if answer is not None and question.answer_best is answer: + quality = calculate_solution_quality(question, answer, question.term.has_qvar) + # reward the belief + budget_answer = Budget(Or(task.budget.priority, quality), task.budget.durability, truth_to_quality(answer.truth)) + belief = Belief(Judgement(answer.term, answer.stamp, answer.truth), budget_answer) + + # de-prioritize the question + task.budget.priority = min(1-quality, task.budget.priority) # BUG: here, after setting the priority, the level of the task should change within a Bag. + + return belief if answer is not None else None + +def solution_query(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None): + ''' + sulution for query + ''' + # if task.is_query and task.term.equal(belief.term): # BUG: here, variable unification should be executed. + return solution_question(task, belief, budget_tasklink, budget_termlink) + + + +def solve_query(task: Task, belief: Task, budget_tasklink: Budget=None, budget_termlink: Budget=None): + raise "TODO" \ No newline at end of file diff --git a/NAL/Inference/SyllogisticRules.py b/NAL/Inference/SyllogisticRules.py new file mode 100644 index 0000000..3c2b3cf --- /dev/null +++ b/NAL/Inference/SyllogisticRules.py @@ -0,0 +1,555 @@ +''' +First-order syllogism & higher-order syllogism + +@ Author: Bowen XU +@ Contact: bowen.xu@pku.edu.cn +@ Update: 2021.11.6 +@ Comment: + The general form: + def syllogistic_rule(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): ... + The param `inverse_premise` means whether to inverse the order of term in the task and term in the belief as the two premises, for example, if the terms in the task and the belief are {M>, P>}, and the `inverse_premise` equals `True`, then the premises are {P>, M>}. + The param `inverse_copula` means whether to inverse the order of the subject and predicate in the task, for example, if the term in the task is M>, and the `inverse_copula` equals `True`, then the premise1 is S>. + The param `inverse_copula` means whether to inverse the order of the subject and predicate in the task, for example, if the term in the task is M>, and the `inverse_copula` equals `True`, then the premise1 is S>. + +''' +import math +from NAL.Functions.DesireValueFunctions import Desire_strong, Desire_weak, Desire_deduction, Desire_induction +from NAL.Functions.TruthValueFunctions import * +from NAL.Functions.BudgetFunctions import Budget_backward_weak, Budget_forward, Budget_inference, Budget_backward +from Narsese import Term, Copula, Statement, Truth, Task, Belief, Budget, Stamp +from Narsese import Punctuation, Sentence, Judgement, Goal, Question, Quest +from ..Functions import F_deduction, fc_to_w_minus, fc_to_w_plus +from copy import deepcopy +from ..Functions.StampFunctions import * + + + + +''' +strong syllogism +''' + +def deduction(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + First-order: + premise1: P> + premise2: M> + |- + conclusion: P> + Higher-order: + premise1: P> + premise2: M> + |- + conclusion: P> + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + punct_task: Punctuation = task.sentence.punct + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + truth_belief: Truth = belief.truth + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + statement = Statement(stat2.subject, stat1.copula, stat1.predicate) + + if punct_task.is_judgement: + truth = Truth_deduction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif punct_task.is_goal: + truth = Desire_weak(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif punct_task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif punct_task.is_quest: + curiosity = None # TODO + budget = Budget_backward(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def analogy(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + First-order: + premise1: P> (inverse:

M>) + premise2: M> (inverse: M>) + |- + conclusion: P> (inverse:

S>) + + premise1: P> (inverse:

M>) + premise2: S> (inverse: S>) + |- + conclusion: P> (inverse:

S>) + + Higher-order: + premise1: P> (inverse:

M>) + premise2: S> (inverse: S>) + |- + conclusion: P> (inverse:

S>) + + premise1: P> (inverse:

M>) + premise2: M> (inverse: M>) + |- + conclusion: P> (inverse:

S>) + + ------------- + + premise1: P> (inverse:

M>) + premise2: M> (inverse: M>) + |- + conclusion: P> (inverse:

S>) + + premise1: P> (inverse:

M>) + premise2: S> (inverse: S>) + |- + conclusion: P> (inverse:

S>) + ''' + # premise1, premise2 = (task.sentence, belief.sentence) if belief.term.is_commutative else (belief.sentence, task.sentence) + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) # to ensure that the copula of premise1 is inheritence, and that the copula of premise2 is similarity. + + punct_task: Punctuation = task.sentence.punct + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + truth_belief: Truth = belief.truth + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + # TODO + if not inverse_copula: + if stat2.predicate == stat1.subject: + statement = Statement(stat2.subject, stat1.copula, stat1.predicate) + elif stat2.subject == stat1.subject: + statement = Statement(stat2.predicate, stat1.copula, stat1.predicate) + else: raise "Invalid case." + else: + if stat2.predicate == stat1.predicate: + statement = Statement(stat1.subject, stat1.copula, stat2.subject) + elif stat2.subject == stat1.predicate: + statement = Statement(stat1.subject, stat1.copula, stat2.predicate) + else: raise "Invalid case." + + + if punct_task.is_judgement: + truth = Truth_analogy(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif punct_task.is_goal: + Desire_function = Desire_weak if task.term.is_commutative else Desire_strong + truth = Desire_function(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif punct_task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif punct_task.is_quest: + curiosity = None # TODO + budget = Budget_backward(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def resemblance(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + First-order: + premise1: P> (inverse:

M>) + premise2: M> (inverse: M>) + |- + conclusion: P> (inverse:

S>) + + premise1: P> (inverse:

M>) + premise2: S> (inverse: S>) + |- + conclusion: P> (inverse:

S>) + + Higher-order: + premise1: P> (inverse:

M>) + premise2: S> (inverse: S>) + |- + conclusion: P> (inverse:

S>) + + premise1: P> (inverse:

M>) + premise2: M> (inverse: M>) + |- + conclusion: P> (inverse:

S>) + + ------------- + + premise1: P> (inverse:

M>) + premise2: S> (inverse: S>) + |- + conclusion: P> (inverse:

S>) + + premise1: P> (inverse:

M>) + premise2: M> (inverse: M>) + |- + conclusion: P> (inverse:

S>) + + ------------- + + premise1: P> (inverse:

M>) + premise2: M> (inverse: M>) + |- + conclusion: P> (inverse:

S>) + + premise1: P> (inverse:

M>) + premise2: S> (inverse: S>) + |- + conclusion: P> (inverse:

S>) + ''' + + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) # to ensure that the premise2 is a higher-order statement. + + punct_task: Punctuation = task.sentence.punct + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + truth_belief: Truth = belief.truth + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + if not inverse_copula: + if stat2.predicate == stat1.subject: + statement = Statement(stat2.subject, stat2.copula, stat1.predicate) + elif stat2.subject == stat1.subject: + statement = Statement(stat2.predicate, stat2.copula, stat1.predicate) + else: raise "Invalid case." + else: + if stat2.predicate == stat1.predicate: + statement = Statement(stat1.subject, stat2.copula, stat2.subject) + elif stat2.subject == stat1.predicate: + statement = Statement(stat1.subject, stat2.copula, stat2.predicate) + else: raise "Invalid case." + + if punct_task.is_judgement: + truth = Truth_resemblance(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif punct_task.is_goal: + truth = Desire_strong(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif punct_task.is_question: + curiosity = None # TODO + budget = Budget_backward(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif punct_task.is_quest: + curiosity = None # TODO + budget = Budget_backward(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +''' +weak syllogism +''' + +def abduction(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + First-order: + premise1:

M> + premise2: M> + |- + conclusion: P> + Higher-order: + premise1:

M> + premise2: M> + |- + conclusion: P> + + premise1:

M> + premise2: M> + |- + conclusion: P> + + premise1:

M> + premise2: M> + |- + conclusion: P> + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + punct_task: Punctuation = task.sentence.punct + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + truth_belief: Truth = belief.truth + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + statement = Statement(stat2.subject, stat2.copula, stat1.subject) + + if punct_task.is_judgement: + truth = Truth_abduction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif punct_task.is_goal: + Desire_function = Desire_strong if not inverse_premise else Desire_weak + truth = Desire_function(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif punct_task.is_question: + curiosity = None # TODO + Budget_function = Budget_backward if not inverse_premise else Budget_backward_weak + budget = Budget_function(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif punct_task.is_quest: + curiosity = None # TODO + Budget_function = Budget_backward_weak if not inverse_premise else Budget_backward + budget = Budget_function(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def induction(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + First-order: + premise1: P> + premise2: S> + |- + conclusion: P> + Higher-order: + premise1: P> + premise2: S> + |- + conclusion: P> + + premise1: P> + premise2: S> + |- + conclusion: P> + + premise1: P> + premise2: S> + |- + conclusion: P> + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + punct_task: Punctuation = task.sentence.punct + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + truth_belief: Truth = belief.truth + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + statement = Statement(stat2.predicate, stat1.copula, stat1.predicate) + + if punct_task.is_judgement: + truth = Truth_induction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif punct_task.is_goal: + Desire_function = Desire_strong if not inverse_premise else Desire_weak + truth = Desire_function(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif punct_task.is_question: + curiosity = None # TODO + Budget_function = Budget_backward if not inverse_premise else Budget_backward_weak + budget = Budget_function(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif punct_task.is_quest: + curiosity = None # TODO + Budget_function = Budget_backward_weak if not inverse_premise else Budget_backward + budget = Budget_function(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def exemplification(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + First-order: + premise2:

M> + premise1: S> + |- + conclusion: P> + Higher-order: + premise1:

M> + premise2: S> + |- + conclusion: P> + + premise1:

M> + premise2: S> + |- + conclusion: P> + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + punct_task: Punctuation = task.sentence.punct + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + truth_belief: Truth = belief.truth + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + statement = Statement(stat2.predicate, stat1.copula.reverse, stat1.subject) + + if punct_task.is_judgement: + truth = Truth_exemplification(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif punct_task.is_goal: + truth = Desire_weak(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif punct_task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif punct_task.is_quest: + curiosity = None # TODO + budget = Budget_backward(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def comparison(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + '''''' + ''' + First-order: + premise1: P> (inverse_copula:

M>) + premise2: S> (inverse_copula: M>) + |- + conclusion: P> + Higher-order: + premise1: P> (inverse_copula:

M>) + premise2: S> (inverse_copula: M>) + |- + conclusion: P> + + premise1: P> (inverse_copula:

M>) + premise2: S> (inverse_copula: M>) + |- + conclusion: P> + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not (inverse_copula ^ inverse_premise) else (belief.sentence, task.sentence) + + punct_task: Punctuation = task.sentence.punct + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + truth_belief: Truth = belief.truth + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + copula = stat1.copula.symmetrize if not inverse_copula else stat2.copula.symmetrize + statement = Statement(stat2.predicate, copula, stat1.predicate) if not inverse_copula else Statement(stat2.subject, copula, stat1.subject) + + if punct_task.is_judgement: + truth = Truth_comparison(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif punct_task.is_goal: + truth = Desire_strong(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif punct_task.is_question: + curiosity = None # TODO + budget = Budget_backward(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif punct_task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(truth_belief, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +'''Other rules out of the book''' + +# This is a special rule, which is different from those of the syllogystic rules above. The form of the derived task differs between those derived from different types of sentence. +def reversion(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + '''''' + ''' + First-order: + For Judgement: + premise1: P>. + premise2:

S>. + |- + conclusion: P>. + + For Goal, Question, and Quest + premise1: P>? + premise2:

S>. + |- + conclusion: P>. + Higher-order: + For Judgement: + premise1: P>. + premise2:

S>. + |- + conclusion: P>. + + For Goal, Question, and Quest + premise1: P>? + premise2:

S>. + |- + conclusion: P>. + Proof: + For a judgement, + Given ` P>.` and `

S>.`, + according to the rule `intersection_composition` in `CompositionalRules`, that is `{T1. T2.} |- (&&, T1, T2). %F_int%`, + it is derived that `(&&, P>,

S>)`. + According to the theorem `equivalence_theorem1` in `StructuralRules`, that is ` P> <=> (&&, P>,

S>)`, + it is derived that P>. + Hence, { P>.

S>.} |- P>. + This is essentially a 2-step inference. + + For other cases, the rule is obviously right. + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + # punct_task: Punctuation = task.sentence.punct + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + truth_belief: Truth = belief.truth + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + stamp = Stamp_merge(stamp_task, stamp_belief) + + if task.is_judgement: + statement = Statement(stat2.subject, stat1.copula.symmetrize, stat1.subject) + truth = Truth_intersection(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal or task.is_question or task.is_quest: + statement = task.term + truth = Truth_conversion(belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + diff --git a/NAL/Inference/TemporalRules.py b/NAL/Inference/TemporalRules.py new file mode 100644 index 0000000..27fc1b8 --- /dev/null +++ b/NAL/Inference/TemporalRules.py @@ -0,0 +1,322 @@ +from Narsese import Task, Belief, Sentence, Judgement, Goal, Question, Quest +from Narsese import Statement, Term, Compound +from Narsese import Budget, Stamp +from Narsese import truth_analytic +from Config import Enable +from Narsese._py.Interval import Interval +from ..Functions import * +from copy import copy, deepcopy + +# TODO: Implement temporal rules here. +# Ref: OpenNARS 3.1.0 TemporalRules.java; OpenNARS 3.0.4 TemporalRules.java. + +def deduction_sequence_eliminate(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + Testcase: nal7.18.nal + judgements of both of task and belief should be events. + + + premise1: <(&/, C, +100, S, ...) =/> P>. + premise2: C. :|: + |- + conclusion: P>. :!105: + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + + compound: Compound = stat1.subject + compound = compound - stat2 + + + interval: Interval = compound.terms[0] + t_bias = 0 + if (compound.is_compound and interval.is_interval): + t_bias = int(interval) + if len(compound) == 1: compound = None + else: compound = Compound(compound.connector, compound.terms[1:]) + elif compound.is_interval: + t_bias = int(interval) + compound = None + + if compound is not None: statement = Statement(compound, stat1.copula, stat1.predicate) + else: statement = stat1.predicate + + stamp = Stamp_merge(stamp_task, stamp_belief, stat1.copula, t_bias=t_bias) + + if task.is_judgement: + truth = Truth_deduction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_induction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def abduction(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + Testcase: nal7.18.nal + judgements of both of task and belief should be events. + + + premise1: <(&/, C, S, ... +100) =/> P>. + premise2: P. :|: + |- + conclusion: (&/, C, S, ...). :!-105: + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + + compound: Compound = stat1.subject + t_bias = 0 + if compound.is_compound and compound.connector is Connector.SequentialEvents: + interval: Interval = compound.terms[-1] + if interval.is_interval: + t_bias = -int(interval) + compound_terms = compound.terms[:-1] + if compound.is_multiple_only and len(compound_terms)==1: + compound = compound_terms[0] + else: + compound = Compound(compound.connector, *compound_terms) + + statement = compound + + stamp = Stamp_merge(stamp_task, stamp_belief, stat1.copula, reverse_order=True, t_bias=t_bias) + + if task.is_judgement: + truth = Truth_abduction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_deduction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + elif task.is_question: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Question(statement, stamp, curiosity) + elif task.is_quest: + curiosity = None # TODO + budget = Budget_backward_weak(belief.truth, budget_tasklink, budget_termlink) + sentence_derived = Quest(statement, stamp, curiosity) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def sequence(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + (&/, A, B, C)! + A. + |- + (&/, B, C)! + + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + + compound: Compound = stat1 + if compound.connector is Connector.SequentialEvents: + compound_terms = compound.terms[1:] + if compound.is_multiple_only and len(compound_terms)==1: + compound = compound_terms[0] + else: + compound = Compound(compound.connector, *compound_terms) + + statement = compound + + stamp = Stamp_merge(stamp_task, stamp_belief) + + if task.is_judgement: + truth = Truth_deduction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_deduction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def sequence_predictive_implication(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + (&/, A, B, C) =/> D! + A. + |- + (&/, B, C) =/> D! + + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + + compound: Compound = stat1.subject + if compound.connector is Connector.SequentialEvents: + compound_terms = compound.terms[1:] + if compound.is_multiple_only and len(compound_terms)==1: + compound = compound_terms[0] + else: + compound = Compound(compound.connector, *compound_terms) + + statement = Statement(compound, stat1.copula, stat1.predicate) + + stamp = Stamp_merge(stamp_task, stamp_belief) + + if task.is_judgement: + truth = Truth_deduction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + elif task.is_goal: + truth = Desire_deduction(task.truth, belief.truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +'''Immediate Rules''' + +def immediate_goal_deriviation(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + (&/, A, B, C)! + ''' + # belief is None. + premise = task.sentence + stat: Statement = premise.term + + statement = stat[0] + stamp = copy(premise.stamp) + + if task.is_goal: + truth = Truth_deduction(premise.truth, truth_analytic) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + sentence_derived = Goal(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +'''Temporal Induction Rules''' +def induction_implication(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + judgements of both of task and belief should be events. + + premise1: A. :\: + premise2: B. :|: + conclusion: B>. :|: + + + Reference: + [1] OpenNARS 3.0.4 TemporalRules.java line 147~263 temporalInduction(...) + + Testcase: nal7.6.nal + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + + time_diff = premise2.stamp.t_occurrence - premise1.stamp.t_occurrence + interval = Interval(abs(time_diff)) + if abs(time_diff) < Config.temporal_duration: + # concurrent + statement = Statement(stat1, Copula.ConcurrentImplication, stat2) + stamp = Stamp_merge(stamp_task, stamp_belief) + elif time_diff > 0: + # predictive + statement = Statement(Compound.SequentialEvents(stat1, interval), Copula.PredictiveImplication, stat2) + stamp = Stamp_merge(premise2.stamp, premise1.stamp) + else: # time_diff < 0 + # retrospective + statement = Statement(stat1, Copula.RetrospectiveImplication, Compound.SequentialEvents(stat2, interval)) + stamp = Stamp_merge(premise1.stamp, premise2.stamp) + + if task.is_judgement: + truth = Truth_induction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink if budget_tasklink is not None else task.budget, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def induction_composition(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + judgements of both of task and belief should be events. + + premise1: A. :\: + premise2: C>. :|: + conclusion: <(&/, A, +5, B)=/>C>. :|: + + + Reference: + [1] OpenNARS 3.0.4 TemporalRules.java line 147~263 temporalInduction(...) + + Testcase: nal7.7.nal + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + + time_diff = premise2.stamp.t_occurrence - premise1.stamp.t_occurrence + interval = Interval(abs(time_diff)) + if abs(time_diff) < Config.temporal_duration: + # concurrent + statement = Statement(Compound.ParallelEvents(stat1, stat2.subject), Copula.ConcurrentImplication, stat2.predicate) + stamp = Stamp_merge(stamp_task, stamp_belief) + elif time_diff > 0: + # predictive + statement = Statement(Compound.SequentialEvents(stat1, interval, stat2.subject), Copula.PredictiveImplication, stat2.predicate) + stamp = Stamp_merge(premise2.stamp, premise1.stamp) + else: # time_diff < 0 + # retrospective + statement = Statement(stat2.subject, Copula.RetrospectiveImplication, Compound.SequentialEvents(stat2.predicate, interval, stat1)) + stamp = Stamp_merge(premise1.stamp, premise2.stamp) + + if task.is_judgement: + truth = Truth_induction(premise1.truth, premise2.truth) + budget = Budget_forward(truth, budget_tasklink if budget_tasklink is not None else task.budget, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) diff --git a/NAL/Inference/__init__.py b/NAL/Inference/__init__.py new file mode 100644 index 0000000..921feb1 --- /dev/null +++ b/NAL/Inference/__init__.py @@ -0,0 +1,63 @@ +'''This file is generated by `_generate_init_file.py`''' + + +from .CompositionalRules import \ + conjunction_extension as compositional__conjunction_extension, \ + conjunction_intension as compositional__conjunction_intension, \ + conjunstion_composition as compositional__conjunstion_composition, \ + difference_extension as compositional__difference_extension, \ + difference_extension2 as compositional__difference_extension2, \ + difference_intension as compositional__difference_intension, \ + difference_intension2 as compositional__difference_intension2, \ + disjunction_composition as compositional__disjunction_composition, \ + disjunction_extension as compositional__disjunction_extension, \ + disjunction_intension as compositional__disjunction_intension, \ + intersection_extension as compositional__intersection_extension, \ + intersection_intension as compositional__intersection_intension, \ + union_extension as compositional__union_extension, \ + union_intension as compositional__union_intension + +from .ConditionalSyllogisticRules import \ + abduction as conditional__abduction, \ + abduction_compound_eliminate as conditional__abduction_compound_eliminate, \ + abduction_compound_eliminate2 as conditional__abduction_compound_eliminate2, \ + analogy as conditional__analogy, \ + analogy_compound_replace as conditional__analogy_compound_replace, \ + comparison as conditional__comparison, \ + deduction as conditional__deduction, \ + deduction_compound_eliminate as conditional__deduction_compound_eliminate, \ + deduction_compound_replace as conditional__deduction_compound_replace, \ + induction as conditional__induction, \ + induction_compound_compose as conditional__induction_compound_compose, \ + induction_compound_replace as conditional__induction_compound_replace + +from .ImmediateRules import \ + contraposition as immediate__contraposition, \ + conversion as immediate__conversion, \ + negation as immediate__negation + +from .LocalRules import \ + revision as local__revision, \ + solution_query as local__solution_query, \ + solution_question as local__solution_question, \ + solve_query as local__solve_query + +from .SyllogisticRules import \ + abduction as syllogistic__abduction, \ + analogy as syllogistic__analogy, \ + comparison as syllogistic__comparison, \ + deduction as syllogistic__deduction, \ + exemplification as syllogistic__exemplification, \ + induction as syllogistic__induction, \ + resemblance as syllogistic__resemblance, \ + reversion as syllogistic__reversion + +from .TemporalRules import \ + abduction as temporal__abduction, \ + deduction_sequence_eliminate as temporal__deduction_sequence_eliminate, \ + immediate_goal_deriviation as temporal__immediate_goal_deriviation, \ + induction_composition as temporal__induction_composition, \ + induction_implication as temporal__induction_implication, \ + sequence as temporal__sequence, \ + sequence_predictive_implication as temporal__sequence_predictive_implication + diff --git a/NAL/MentalOperation/__init__.py b/NAL/MentalOperation/__init__.py new file mode 100644 index 0000000..871ad41 --- /dev/null +++ b/NAL/MentalOperation/__init__.py @@ -0,0 +1,13 @@ +from ._register import * +from ._execute import * + +register(Anticipate, anticipate) +register(Believe, believe) +register(Doubt, doubt) +register(Evaluate, evaluate) +register(Hesitate, hesitate) +register(Want, want) +register(Wonder, wonder) + + + diff --git a/NAL/MentalOperation/_aware.py b/NAL/MentalOperation/_aware.py new file mode 100644 index 0000000..7480f23 --- /dev/null +++ b/NAL/MentalOperation/_aware.py @@ -0,0 +1,57 @@ +from copy import copy +from typing import List +from Config import Config +from Narsese._py import SELF +from Narsese._py.Budget import Budget +from Narsese._py.Compound import Compound +from Narsese._py.Connector import Connector +from Narsese._py.Operation import * +from Narsese._py.Sentence import Goal, Judgement, Quest, Question, Sentence, Stamp +from Narsese._py.Statement import Statement +from Narsese._py.Task import Belief, Desire, Task +from Narsese._py.Truth import Truth +from ._register import registered_operations +from Narsese import Term +from ..Functions.Tools import truth_from_term, truth_to_quality, truth_to_term +from Narsese import Base +import Global + + +def _aware(statement: Statement, stamp: Stamp, budget_task: Budget=None): + '''''' + stamp = copy(stamp) + stamp.t_occurrence = Global.time + truth_aware = Truth(1.0, Config.c_judgement, Config.k) + if budget_task is None: + budget = Budget(Config.p_judgement*Config.rate_discount_p_internal_exp, Config.d_judgement*Config.rate_discount_d_internal_exp, truth_to_quality(truth_aware)) + else: + budget = Budget(budget_task.priority*Config.rate_discount_p_internal_exp, budget_task.durability*Config.rate_discount_d_internal_exp, truth_to_quality(truth_aware)) + + sentence = Judgement(statement, stamp, truth_aware) + return Task(sentence, budget) + + +def believe(judgement: Judgement, truth: Truth, budget_task: Budget=None): + '''''' + stat_aware = Statement.Inheritance(Compound(Connector.Product, SELF, judgement.term, truth_to_term(truth)), Believe) + return _aware(stat_aware, judgement.stamp, budget_task) + + +def want(goal: Goal, truth: Truth, budget_task: Budget=None): + '''''' + stat_aware = Statement.Inheritance(Compound(Connector.Product, SELF, goal.term, truth_to_term(truth)), Believe) + return _aware(stat_aware, goal.stamp, budget_task) + + +def evaluate(quest: Quest, budget_task: Budget=None): + '''''' + stat_aware = Statement.Inheritance(Compound(Connector.Product, SELF, quest.term), Evaluate) + return _aware(stat_aware, quest.stamp, budget_task) + + + +def wonder(question: Question, budget_task: Budget=None): + '''''' + stat_aware = Statement.Inheritance(Compound(Connector.Product, SELF, question.term), Wonder) + return _aware(stat_aware, question.stamp, budget_task) + diff --git a/NAL/MentalOperation/_execute.py b/NAL/MentalOperation/_execute.py new file mode 100644 index 0000000..6e7993f --- /dev/null +++ b/NAL/MentalOperation/_execute.py @@ -0,0 +1,76 @@ +from typing import List +from Config import Config +from Narsese._py.Budget import Budget +from Narsese._py.Operation import * +from Narsese._py.Sentence import Goal, Judgement, Quest, Question, Sentence, Stamp +from Narsese._py.Statement import Statement +from Narsese._py.Task import Belief, Desire, Task +from Narsese._py.Truth import Truth +from ._register import registered_operations +from Narsese import Term +from ..Functions.Tools import truth_from_term, truth_to_quality +from Narsese import Base +import Global + +def execute(task: Task): + '''''' + stat: Statement = task.term + if stat.is_executable: + operation: Operation = stat.predicate + args = stat.terms + return registered_operations[operation](task, *args) + else: + return None + +def anticipate(task: Task, *args: Term): + '''''' + +def believe(statement: Term, term_truth: Term): + '''''' + truth = truth_from_term(term_truth) + budget = Budget(Config.p_judgement, Config.d_judgement, truth_to_quality(truth)) + stamp = Stamp(Global.time, Global.time, None, Base((Global.get_input_id(),))) + sentence = Judgement(statement, stamp=stamp, truth=truth) + return Task(sentence, budget) + + +def doubt(beliefs: List[Belief]): + '''''' + for belief in beliefs: + # discount the confidence of the beleif + belief.truth.c = belief.truth.c * Config.rate_discount_c + return None + + +def evaluate(statement: Term): + '''''' + budget = Budget(Config.p_quest, Config.d_quest, 1.0) + stamp = Stamp(Global.time, Global.time, None, Base((Global.get_input_id(),))) + sentence = Quest(statement, stamp=stamp) + return Task(sentence, budget) + + +def hesitate(desires: List[Desire]): + '''''' + for desire in desires: + # discount the confidence of the desire + desire.truth.c = desire.truth.c * Config.rate_discount_c + return None + + +def want(statement: Term): + '''''' + truth = Truth(1.0, Config.c_judgement, Config.k) + budget = Budget(Config.p_judgement, Config.d_judgement, truth_to_quality(truth)) + stamp = Stamp(Global.time, Global.time, None, Base((Global.get_input_id(),))) + sentence = Goal(statement, stamp, truth) + return Task(sentence, budget) + + +def wonder(statement: Term): + '''''' + budget = Budget(Config.p_question, Config.d_question, 1) + stamp = Stamp(Global.time, Global.time, None, Base((Global.get_input_id(),))) + sentence = Question(statement, stamp=stamp) + return Task(sentence, budget) + diff --git a/NAL/MentalOperation/_register.py b/NAL/MentalOperation/_register.py new file mode 100644 index 0000000..74ec0cd --- /dev/null +++ b/NAL/MentalOperation/_register.py @@ -0,0 +1,10 @@ + +from typing import Callable, Dict +from Narsese._py.Operation import * + +registered_operations: Dict[Operation, Callable] = {} + +def register(operation: Operation, callable: Callable): + '''''' + global registered_operations + registered_operations[operation] = callable \ No newline at end of file diff --git a/NAL/MetaLevelInference/GoalDerivation.py b/NAL/MetaLevelInference/GoalDerivation.py new file mode 100644 index 0000000..c5e527f --- /dev/null +++ b/NAL/MetaLevelInference/GoalDerivation.py @@ -0,0 +1,3 @@ +''' +**Goal derivation.** A goal G and a judgment J produce a derived goal G', if and only if the solution to G', call it J', can be used with J to derive a solution to G by a two-premise inference rule; a goal G by itself produces a derived goal G', if and only if the solution to G', call it J', can be used to derive a solution to G by a one-premise inference rule. In both cases, the desire-value of G' is derived as the truth-value of G' ⇒ D from the desire-value of G, as the truth-value of G ⇒ D, as well as the truth-value of J (if it is involved). +''' \ No newline at end of file diff --git a/NAL/MetaLevelInference/QuestionDerivation.py b/NAL/MetaLevelInference/QuestionDerivation.py new file mode 100644 index 0000000..102ec72 --- /dev/null +++ b/NAL/MetaLevelInference/QuestionDerivation.py @@ -0,0 +1,4 @@ +''' +**Question derivation.** A question Q and a judgment J produce a derived question Q', if and only if the answer to Q', call it J', can be used with J to derive an answer to Q by a two-premise inference rule; a question Q by itself produces a derived question Q', if and only if the answer to Q', call it J', can be used to derive an answer to Q by a one-premise inference rule. +''' + diff --git a/NAL/MetaLevelInference/TemporalInference.py b/NAL/MetaLevelInference/TemporalInference.py new file mode 100644 index 0000000..919a7f4 --- /dev/null +++ b/NAL/MetaLevelInference/TemporalInference.py @@ -0,0 +1,3 @@ +''' +**Temporal inference.** Temporal inference is carried out by processing the logical factor and the temporal factor in the premises in parallel. First, temporal variants of IL rules are obtained by turning some statements in the premises into events by adding temporal order among them, and the conclusion must keep the same temporal information. Then these rules are extended into strong NAL rules by using the same truth-value function. The rules of weak inference are formed as the inverse of the strong rules. +''' \ No newline at end of file diff --git a/NAL/MetaLevelInference/VariableSubstitution/Elimination.py b/NAL/MetaLevelInference/VariableSubstitution/Elimination.py new file mode 100644 index 0000000..0aaf645 --- /dev/null +++ b/NAL/MetaLevelInference/VariableSubstitution/Elimination.py @@ -0,0 +1,83 @@ +from typing import Dict, List, Tuple + +from bidict import bidict +from Narsese import Term +from utils.IndexVar import IntVar + +from .Substitution import Substitution + + +class Elimination(Substitution): + ''' + the substitution of var-to-const + ''' + def __init__(self, term_src: Term, term_tgt: Term, ivar_src: List[IntVar]=None, iconst_tgt: List[Term]=None, dvar_src: List[IntVar]=None, dconst_tgt: List[Term]=None, qvar_src: List[IntVar]=None, qconst_tgt: List[Term]=None) -> None: + super().__init__(term_src, term_tgt) #, ivar_src, iconst_tgt, dvar_src, dconst_tgt, qvar_src, qconst_tgt) + + # is_conflict_ivar = is_conflict_dvar = is_conflict_qvar = False + if (ivar_src is not None and iconst_tgt is not None): + self.is_conflict_ivar, self.mapping_ivar = self.check_conflict(ivar_src, iconst_tgt) + if (dvar_src is not None and dconst_tgt is not None): + self.is_conflict_dvar, self.mapping_dvar = self.check_conflict(dvar_src, dconst_tgt) + if (qvar_src is not None and qconst_tgt is not None): + self.is_conflict_qvar, self.mapping_qvar = self.check_conflict(qvar_src, qconst_tgt) + + # self._is_conflict = is_conflict_ivar or is_conflict_dvar or is_conflict_qvar + + @property + def is_valid(self): + return self.is_qvar_valid or self.is_dvar_valid or self.is_ivar_valid + + @property + def is_qvar_valid(self): + return not self.is_conflict_qvar and len(self.mapping_qvar) > 0 + + @property + def is_dvar_valid(self): + return not self.is_conflict_dvar and len(self.mapping_dvar) > 0 + + @property + def is_ivar_valid(self): + return not self.is_conflict_ivar and len(self.mapping_ivar) > 0 + + @staticmethod + def check_conflict(vars: List[IntVar], consts: List[Term]) -> Tuple[bool, Dict[IntVar, Term]]: + ''' + no conflict: + (&&, <$x-->A>, <$y-->A>) + (&&, A>, A>) + [0, 1], [B, C] + [0, 1], [C, B] + conflict: + (&&, <$x-->A>, <$x-->B>) + (&&, A>, B>) + [0, 0], [C, D] + ''' + mapping_ret = bidict() + if len(vars) != len(consts): return True, mapping_ret + mapping = {key: set() for key in set(vars)} + is_conflict = False + for var, const in zip(vars, consts): + var_list = mapping[var] + var_list.add(const) + if len(var_list) > 1: + is_conflict = True + break + + if not is_conflict: + mapping_ret = bidict({key: list(value)[0] for key, value in mapping.items()}) + return is_conflict, mapping_ret + + + def apply(self, term_src: Term=None, term_tgt: Term=None): + '''''' + term_src = term_src if term_src is not None else self.term_src + term_tgt = term_tgt if term_tgt is not None else self.term_tgt + mapping_ivar = self.mapping_ivar + mapping_dvar = self.mapping_dvar + mapping_qvar = self.mapping_qvar + mapping_const = self.mapping_const + + # TODO: replace var with const + + pass \ No newline at end of file diff --git a/NAL/MetaLevelInference/VariableSubstitution/Introduction.py b/NAL/MetaLevelInference/VariableSubstitution/Introduction.py new file mode 100644 index 0000000..4a1739e --- /dev/null +++ b/NAL/MetaLevelInference/VariableSubstitution/Introduction.py @@ -0,0 +1,26 @@ +from typing import List +from Narsese import Term +from utils.IndexVar import IntVar + +from .Substitution import Substitution + +class Introduction(Substitution): + ''' + the substitution of const-to-var + ''' + def __init__(self, term_src: Term, term_tgt: Term, iconst_src: List[Term]=None, ivar_tgt: List[IntVar]=None, dconst_src: List[Term]=None, dvar_tgt: List[IntVar]=None, qconst_src: List[Term]=None, qvar_tgt: List[IntVar]=None) -> None: + super().__init__(term_src, term_tgt, iconst_src, ivar_tgt, dconst_src, dvar_tgt, qconst_src, qvar_tgt) + + + def apply(self, term_src: Term=None, term_tgt: Term=None): + '''''' + term_src = term_src if term_src is not None else self.term_src + term_tgt = term_tgt if term_tgt is not None else self.term_tgt + mapping_ivar = self.mapping_ivar + mapping_dvar = self.mapping_dvar + mapping_qvar = self.mapping_qvar + mapping_const = self.mapping_const + + # TODO: replace const with var + + pass diff --git a/NAL/MetaLevelInference/VariableSubstitution/Substitution.py b/NAL/MetaLevelInference/VariableSubstitution/Substitution.py new file mode 100644 index 0000000..71d314a --- /dev/null +++ b/NAL/MetaLevelInference/VariableSubstitution/Substitution.py @@ -0,0 +1,91 @@ +from copy import deepcopy +from typing import List + +from bidict import bidict +from Narsese import Term +from utils.IndexVar import IntVar + + +class Substitution: + ''' + the substitutions between the terms of the same type, that is, ivar-to-ivar, dvar-to-dvar, qvar-to-qvar, const-to-const + ''' + def __init__(self, term_src: Term, term_tgt: Term, ivar_src: List[IntVar]=None, ivar_tgt: List[IntVar]=None, dvar_src: List[IntVar]=None, dvar_tgt: List[IntVar]=None, qvar_src: List[IntVar]=None, qvar_tgt: List[IntVar]=None, const_src: List[Term]=None, const_tgt: List[Term]=None) -> None: + ''' + len(src) == len(tgt) + ''' + self.term_src = term_src + self.term_tgt = term_tgt + self.mapping_ivar = None + self.mapping_dvar = None + self.mapping_qvar = None + self.mapping_const = None + + if (ivar_src is not None and ivar_tgt is not None): + self.mapping_ivar = self._build_mapping(term_src._index_var.var_independent, term_tgt._index_var.var_independent, ivar_src, ivar_tgt) + if (dvar_src is not None and dvar_tgt is not None): + self.mapping_dvar = self._build_mapping(term_src._index_var.var_dependent, term_tgt._index_var.var_dependent, dvar_src, dvar_tgt) + if (qvar_src is not None and qvar_tgt is not None): + self.mapping_qvar = self._build_mapping(term_src._index_var.var_query, term_tgt._index_var.var_query, qvar_src, qvar_tgt) + if (const_src is not None and const_tgt is not None): + self.mapping_const = bidict(zip(const_src, const_tgt)) + + @property + def is_valid(self): + return len(self.mapping_dvar) > 0 or len(self.mapping_ivar) > 0 or len(self.mapping_qvar) > 0 + + @property + def is_qvar_valid(self): + return len(self.mapping_qvar) > 0 + + @property + def is_dvar_valid(self): + return len(self.mapping_dvar) > 0 + + @property + def is_ivar_valid(self): + return len(self.mapping_ivar) > 0 + + + def apply(self, term_src: Term=None, term_tgt: Term=None, inverse=False): + '''''' + term_src = term_src if term_src is not None else self.term_src + term_tgt = term_tgt if term_tgt is not None else self.term_tgt + mapping_ivar = self.mapping_ivar + mapping_dvar = self.mapping_dvar + mapping_qvar = self.mapping_qvar + mapping_const = self.mapping_const + if inverse: + term_src, term_tgt = term_tgt, term_src + mapping_ivar = mapping_ivar.inverse if mapping_ivar is not None else None + mapping_dvar = mapping_dvar.inverse if mapping_dvar is not None else None + mapping_qvar = mapping_qvar.inverse if mapping_qvar is not None else None + mapping_const = self.mapping_const.inverse if mapping_const is not None else None + + + ivar = [int(var) for var in term_src.index_var.var_independent] + dvar = [int(var) for var in term_src.index_var.var_dependent] + qvar = [int(var) for var in term_src.index_var.var_query] + + # TODO: replace var with var + term = deepcopy(term_src) + term.index_var.var_independent = [var(mapping_ivar.get(var_int, None)) for var, var_int in zip(term._index_var.var_independent, ivar)] + term.index_var.var_dependent = [var(mapping_dvar.get(var_int, None)) for var, var_int in zip(term._index_var.var_dependent, dvar)] + term.index_var.var_query = [var(mapping_qvar.get(var_int, None)) for var, var_int in zip(term._index_var.var_query, qvar)] + + return term + + @staticmethod + def _build_mapping(variables1, variables2, var_common1, var_common2): + if len(variables1) > 0 and len(variables2) > 0: + var_diff1 = sorted(list(set(variables1)-set(var_common1))) + var_diff2 = sorted(list(set(variables2)-set(var_common2))) + var_bias1 = max(variables1) + 1 + var_bias2 = max(variables2) + 1 + var_diff_new1 = [ivar+var_bias2 for ivar in var_diff1] + var_diff_new2 = [ivar+var_bias1 for ivar in var_diff2] + # mapping the second to the first + mapping = bidict({int(key): int(value) for key, value in (*zip(var_common1, var_common2), *zip( var_diff_new2, var_diff2), *zip(var_diff1, var_diff_new1))}) + else: + mapping = bidict() + return mapping \ No newline at end of file diff --git a/NAL/MetaLevelInference/VariableSubstitution/Unification.py b/NAL/MetaLevelInference/VariableSubstitution/Unification.py new file mode 100644 index 0000000..0d8140d --- /dev/null +++ b/NAL/MetaLevelInference/VariableSubstitution/Unification.py @@ -0,0 +1,75 @@ +''' +**Variable substitution.** All occurrences of an independent variable term in a statement can be substituted by another term (constant or variable); all occurrences of a term (constant or variable) in a statement can be substituted by a dependent variable term. The reverse cases of these substitution are limited to the cases discussed in NAL-6. A query variable in a question can be substituted by a constant term in a judgment. +''' + +from copy import deepcopy +from typing import Callable, Dict, List, Tuple, Union + +from bidict import bidict +from Narsese import Term +from Narsese import Statement, Compound +from utils.IndexVar import IndexVar, IntVar +from utils.tools import find_pos_with_pos, find_var_with_pos + +from .Substitution import Substitution +from .Elimination import Elimination +from .Introduction import Introduction + +# find_var_with_pos: Callable = lambda pos_search, variables, positions: [var for var, pos in zip(variables, positions) if pos[:len(pos_search)] == pos_search] # find those variables with a common head of position. e.g. pos_search=[0], variables=[1, 1, 2, 2], and positions=[[0, 2, 0, 0], [0, 2, 1, 0], [0, 3, 0], [1, 0]], then return [1, 1, 2] +# find_pos_with_pos: Callable = lambda pos_search, positions: [pos for pos in positions if pos[:len(pos_search)] == pos_search] +def unification__var_var(term1: Term, term2: Term, pos_common1: List[IntVar], pos_common2: List[IntVar]) -> Substitution: + ''' + It should be ensured that `term1[pos_common1].equal(term2[pos_common2]) == True`. + ''' + # 1. find the variables in the first common position + ivar1 = find_var_with_pos(pos_common1, term1._index_var.var_independent, term1._index_var.positions_ivar) + dvar1 = find_var_with_pos(pos_common1, term1._index_var.var_dependent, term1._index_var.positions_dvar) + qvar1 = find_var_with_pos(pos_common1, term1._index_var.var_query, term1._index_var.positions_qvar) + + # 2. find the variables in the second common position + ivar2 = find_var_with_pos(pos_common2, term2._index_var.var_independent, term2._index_var.positions_ivar) + dvar2 = find_var_with_pos(pos_common2, term2._index_var.var_dependent, term2._index_var.positions_dvar) + qvar2 = find_var_with_pos(pos_common2, term2._index_var.var_query, term2._index_var.positions_qvar) + + return Substitution(term1, term2, ivar1, ivar2, dvar1, dvar2, qvar1, qvar2) + + +def unification__const_var(term1: Term, term2: Term, pos_common1: List[IntVar], pos_common2: List[IntVar]) -> Introduction: + '''''' + + return Introduction(...) + + +def unification__var_const(term1: Term, term2: Term, pos_common1: List[IntVar], pos_common2: List[IntVar]) -> Elimination: + ''' + It should be ensured that `term1[pos_common1].equal(term2[pos_common2]) == True`. + e.g. + term1: <<$0-->A>==><$0-->B>>> + term2: <B>==>D>>> + pos_common1: [1] + pos_common1: [0] + ''' + ivar = find_var_with_pos(pos_common1, term1.index_var.var_independent, term1.index_var.positions_ivar) + dvar = find_var_with_pos(pos_common1, term1.index_var.var_dependent, term1.index_var.positions_dvar) + qvar = find_var_with_pos(pos_common1, term1.index_var.var_query, term1.index_var.positions_qvar) + + iconst = [term2[pos_common2][pos[len(pos_common2):]] for pos in find_pos_with_pos(pos_common1, term1.index_var.positions_ivar)] + dconst = [term2[pos_common2][pos[len(pos_common2):]] for pos in find_pos_with_pos(pos_common1, term1.index_var.positions_dvar)] + qconst = [term2[pos_common2][pos[len(pos_common2):]] for pos in find_pos_with_pos(pos_common1, term1.index_var.positions_qvar)] + + # 1. To find an option: there might be multiple options, and choose one of them randomly, e.g., [$x, $y] might be [A, B] or [B, A]. + + # 2. Check conflicts: there should be no conflicts, e.g., $x cannot be A and B simultaneously. + + # BUG: + # when the compound is commutative, the positions of const-terms and variables are not in correspondance. + # testcase: + # (&&, b>, d>). + # (&&, b>, d>)? + # 1 + # ''outputMustContain('(&&, b>, d>).') + return Elimination(term1, term2, ivar, iconst, dvar, dconst, qvar, qconst) + + +def unification() -> Substitution: + '''''' \ No newline at end of file diff --git a/NAL/MetaLevelInference/VariableSubstitution/__init__.py b/NAL/MetaLevelInference/VariableSubstitution/__init__.py new file mode 100644 index 0000000..a304dad --- /dev/null +++ b/NAL/MetaLevelInference/VariableSubstitution/__init__.py @@ -0,0 +1,4 @@ +from .Substitution import Substitution +from .Elimination import Elimination +from .Introduction import Introduction +from .Unification import * \ No newline at end of file diff --git a/NAL/MetaLevelInference/_trash/VariableSubstitution_v1.py b/NAL/MetaLevelInference/_trash/VariableSubstitution_v1.py new file mode 100644 index 0000000..e26febb --- /dev/null +++ b/NAL/MetaLevelInference/_trash/VariableSubstitution_v1.py @@ -0,0 +1,156 @@ +''' +**Variable substitution.** All occurrences of an independent variable term in a statement can be substituted by another term (constant or variable); all occurrences of a term (constant or variable) in a statement can be substituted by a dependent variable term. The reverse cases of these substitution are limited to the cases discussed in NAL-6. A query variable in a question can be substituted by a constant term in a judgment. +''' + +from typing import Callable, List, Union + +from bidict import bidict +from Narsese import Term +from Narsese import Statement, Compound +from utils.IndexVar import IndexVar + + +class SubstituteVar: + '''''' + def __init__(self, mapping_ivar: bidict, mapping_dvar: bidict, mapping_qvar: bidict) -> None: + self.mapping_ivar = mapping_ivar + self.mapping_dvar = mapping_dvar + self.mapping_qvar = mapping_qvar + + @property + def is_valid(self): + return len(self.mapping_dvar) > 0 or len(self.mapping_ivar) > 0 or len(self.mapping_qvar) > 0 + + @property + def is_qvar_valid(self): + return len(self.mapping_qvar) > 0 + + @property + def is_dvar_valid(self): + return len(self.mapping_dvar) > 0 + + @property + def is_ivar_valid(self): + return len(self.mapping_ivar) > 0 + + def apply(self, term1: Term, term2: Term, inverse=False): + mapping_ivar = self.mapping_ivar + mapping_dvar = self.mapping_dvar + mapping_qvar = self.mapping_qvar + if inverse: + term1, term2 = term2, term1 + mapping_ivar = mapping_ivar.inverse + mapping_dvar = mapping_dvar.inverse + mapping_qvar = mapping_qvar.inverse + ivar = [int(var) for var in term2._index_var.var_independent] + dvar = [int(var) for var in term2._index_var.var_dependent] + qvar = [int(var) for var in term2._index_var.var_query] + + term2._index_var.var_independent = [var(mapping_ivar[var_int]) for var, var_int in zip(term2._index_var.var_independent, ivar)] + term2._index_var.var_dependent = [var(mapping_dvar[var_int]) for var, var_int in zip(term2._index_var.var_dependent, dvar)] + term2._index_var.var_query = [var(mapping_qvar[var_int]) for var, var_int in zip(term2._index_var.var_query, qvar)] + # TODO: to recursively apply the variable-mapping to the terms. + + +def substitution(R: Union[Term, Statement, Compound], S: Term, T: Term) -> Term: + ''' + Refer to the definition in NAL: + Definition 10.6. For given terms R, S, T, a substitution R{S/T} produces a new term by replacing all occurrences of S by T in R, under the condition that S does not occur in T. + Args: + R (Term): the original term. + S (Term): the term to be substituted in R. + T (Term): the term to substitute S. + Returns: + R_new (Term): the new term after substitution. + ''' + if R.is_atom: + R_new = T if R == S else R + elif R.is_statement: + subject = substitution(R.subject, S, T) + predicate = substitution(R.predicate, S, T) + R_new = Statement(subject, R.copula, predicate) + elif R.is_compound: + components = (substitution(component, S, T) for component in R) + R_new = Compound(R.connector, *components) + else: raise "Invalid case." + return R_new + + +_find_var_with_pos: Callable = lambda pos_search, variables, positions: [var for var, pos in zip(variables, positions) if pos[:len(pos_search)] == pos_search] + +def _build_mapping(variables1, variables2, var_common1, var_common2): + if len(variables1) == 0 and len(variables2) == 0: + mapping = bidict() + elif len(variables1) > 0 and len(variables2) > 0: + var_diff1 = sorted(list(set(variables1)-set(var_common1))) + var_diff2 = sorted(list(set(variables2)-set(var_common2))) + var_bias1 = max(variables1) + 1 + var_bias2 = max(variables2) + 1 + var_diff_new1 = [ivar+var_bias2 for ivar in var_diff1] + var_diff_new2 = [ivar+var_bias1 for ivar in var_diff2] + # mapping the second to the first + mapping = bidict({int(key): int(value) for key, value in (*zip(var_common2, var_common1), *zip(var_diff2, var_diff_new2), *zip(var_diff_new1, var_diff1))}) + else: # (len(variables1) > 0) ^ (len(variables2) > 0) + + mapping = bidict() + pass + return mapping + +def unification_variable(term1: Term, term2: Term, pos_common1: List[int], pos_common2: List[int]): + '''''' + # 1. find the variables in the first common position + ivar1 = _find_var_with_pos(pos_common1, term1._index_var.var_independent, term1._index_var.positions_ivar) + dvar1 = _find_var_with_pos(pos_common1, term1._index_var.var_dependent, term1._index_var.positions_dvar) + qvar1 = _find_var_with_pos(pos_common1, term1._index_var.var_query, term1._index_var.positions_qvar) + + # 2. find the variables in the second common position + ivar2 = _find_var_with_pos(pos_common2, term2._index_var.var_independent, term2._index_var.positions_ivar) + dvar2 = _find_var_with_pos(pos_common2, term2._index_var.var_dependent, term2._index_var.positions_dvar) + qvar2 = _find_var_with_pos(pos_common2, term2._index_var.var_query, term2._index_var.positions_qvar) + + # 3. build the mapping + mapping_ivar = _build_mapping(term1._index_var.var_independent, term2._index_var.var_independent, ivar1, ivar2) + mapping_dvar = _build_mapping(term1._index_var.var_dependent, term2._index_var.var_dependent, dvar1, dvar2) + mapping_qvar = _build_mapping(term1._index_var.var_query, term2._index_var.var_query, qvar1, qvar2) + + return SubstituteVar(mapping_ivar, mapping_dvar, mapping_qvar) + + +def unification_var_to_const(term_var: Term, term_const: Term, pos_common_var: List[int], pos_common_const: List[int]): + '''''' + # 1. find the variables in the first common position + ivar = _find_var_with_pos(pos_common_var, term_var._index_var.var_independent, term_var._index_var.positions_ivar) + dvar = _find_var_with_pos(pos_common_var, term_var._index_var.var_dependent, term_var._index_var.positions_dvar) + qvar = _find_var_with_pos(pos_common_var, term_var._index_var.var_query, term_var._index_var.positions_qvar) + + # 2. find the variables in the second common position + iconst = _find_var_with_pos(pos_common_const, term_const._index_var.var_independent, term_const._index_var.positions_ivar) + dconst = _find_var_with_pos(pos_common_const, term_const._index_var.var_dependent, term_const._index_var.positions_dvar) + qconst = _find_var_with_pos(pos_common_const, term_const._index_var.var_query, term_const._index_var.positions_qvar) + + # 3. build the mapping + mapping_ivar = _build_mapping(term_var._index_var.var_independent, term_const._index_var.var_independent, ivar, iconst) + mapping_dvar = _build_mapping(term_var._index_var.var_dependent, term_const._index_var.var_dependent, dvar, dconst) + mapping_qvar = _build_mapping(term_var._index_var.var_query, term_const._index_var.var_query, qvar, qconst) + + return SubstituteVar(mapping_ivar, mapping_dvar, mapping_qvar) + +def unification(term1: Term, term2: Term, term_common1: Term, term_common2: Term): + ''' + According to the variable-indexes of `term_common1` and `term_common2`, get a map and apply it to `term2`. The variable-index of the `term2` will be unified to that of `term1`. + ''' + # 1. variable substitution + + # 2. variable introduction + + # 3. variable elimination + term_unified: Term = None + return term_unified + + +def introduction(term: Term): + '''''' + + +def elimination(term: Term): + '''''' \ No newline at end of file diff --git a/NAL/Theorems/DecompositionalRules.py b/NAL/Theorems/DecompositionalRules.py new file mode 100644 index 0000000..e992e95 --- /dev/null +++ b/NAL/Theorems/DecompositionalRules.py @@ -0,0 +1,302 @@ + +from Narsese import Copula, Statement, Compound, Connector, Term, Truth, Task, Belief, Budget +from Narsese import Judgement, Goal, Quest, Question +from Narsese import truth_analytic +from ..Functions import * + +''' +Decompositional rules + +Including, + S1 S2 S + ----------------------------------------------------------------------- +1 ok (--, (&, T1, T2)>). T1>. |- (--, T2>). +2 ok (|, T1, T2)>. (--, T1>). |- T2>. +3 (--, (-, T1, T2)>). T1>. |- T2>. +4 (--, (-, T2, T1)>). (--, T1>). |- (--, T2>). +5 (--, <(|, T2, T1) --> M>). M>. |- (--, M>). +6 <(&, T1, T2) --> M>. (--, M>). |- (--, M>). +7 (--, <(~, T1, T2) --> M>). M>. |- M>. +8 (--, <(~, T2, T1) --> M>). (--, M>). |- (--, M>). + +9 (--, (&&, T1, T2)). T1. |- (--, T2). +10 (||, T1, T2). (--, T1). |- T2. + +Each rule corresponds to a knowledge (with analytics truth) following the form + <(&&, S1, S2) ==> S>. %1.0;1.0% + +TODO Doubt: +When any task comes into the reasoner, any theorem can be used to derive an analytically true knowledge. +Then, which rule should be took? + +TODO: + Make a general check function to get the valid knowledge according to the two premises. + And in each rule function, do no checking and get the derived knowedge directly. +''' + +def decomposition_theorem1(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + Original: (--, (&, T1, T2)>). T1>. |- (--, T2>). + Practical: (&, T1, T2)>. T1>. |- T2>. + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stamp = Stamp_merge(stamp_task, stamp_belief) + + stat1: Statement = premise1.term[0] + stat2: Statement = premise2.term + compound: Compound = stat1.predicate + + statement = Statement(stat2.subject, Copula.Inheritance, compound - stat2.predicate) + + if task.is_judgement: + truth = Truth_negation(Truth_deduction(Truth_intersection(Truth_negation(premise1.truth), premise2.truth), truth_analytic)) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def decomposition_theorem2(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + Original: (|, T1, T2)>. (--, T1>). |- T2>. + Practical: (|, T1, T2)>. T1>. |- T2>. + + Proof (of Theorem 7.8 in the NAL book): + According to propositional logic, implication of the definition of extensional intersection ((M → T1)∧(M → T2)) =⇒ (M → (T1∩T2)) can be rewritten equivalently into ((M → T1) ∧ ¬(M → (T1 ∩ T2))) =⇒ ¬(M → T2), and ((T1 ∩ T2) → M) =⇒ (((T1 → M) ∨ (T2 → M)) into (¬(T1 → M) ∧ (T1 ∩ T2) → M) =⇒ (T2 → M). The conclusions on intensional intersection can be proved in parallel. + According to the proof, similarly, { (|, T1, T2)>. (--, ( (|, T1, T3)>).} |- T2>. + + ((M → T1)∧(M → T2)∧(M → T3)∧(M → T3)) =⇒ (M → (T1∩T2∩T3∩T4)) + ((M → (T1∩T2))∧¬(M → (T1∩T2∩T3∩T4)) =⇒ ¬(M → (T3∩T4)) + + + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stamp = Stamp_merge(stamp_task, stamp_belief) + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + compound: Compound = stat1.predicate + + statement = Statement(stat2.subject, stat1.copula, compound - stat2.predicate) + + if task.is_judgement: + # # As a theorem to apply, the truth should be calculated with the analytic truth using the deduction rule, isn't it? + # truth = Truth_deduction(Truth_intersection(premise1.truth, Truth_negation(premise2.truth)), truth_analytic) + truth = Truth_intersection(premise1.truth, Truth_negation(premise2.truth)) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def decomposition_theorem3(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + Original: (--, (-, T1, T2)>). T1>. |- T2>. + Practical: (-, T1, T2)>). T1>. |- T2>. + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stamp = Stamp_merge(stamp_task, stamp_belief) + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + compound: Compound = stat1.predicate + + statement = Statement(stat2.subject, stat1.copula, compound - stat2.predicate) + + if task.is_judgement: + truth = Truth_intersection(Truth_negation(premise1.truth), premise2.truth) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def decomposition_theorem4(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + Original: (--, (-, T2, T1)>). (--, T1>). |- (--, T2>). + Practical: (-, T1, T2)>). T1>. |- T2>. + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stamp = Stamp_merge(stamp_task, stamp_belief) + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + compound: Compound = stat1.predicate + + statement = Statement(stat2.subject, Copula.Inheritance, compound - stat2.predicate) + + if task.is_judgement: + truth = Truth_negation(Truth_intersection(Truth_negation(premise1.truth), Truth_negation(premise2.truth))) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + +def decomposition_theorem5(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + '''(--, <(|, T1, T2) --> M>). M>. |- (--, M>).''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stamp = Stamp_merge(stamp_task, stamp_belief) + + stat1: Statement = premise1.term[0] + stat2: Statement = premise2.term + compound: Compound = stat1.subject + + statement = Compound.Negation(Statement(compound - stat2.subject, Copula.Inheritance, stat2.subject)) + + if task.is_judgement: + truth = Truth_deduction(Truth_intersection(premise1.truth, premise2.truth), truth_analytic) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def decomposition_theorem6(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + '''<(&, T1, T2) --> M>. (--, M>) |- (--, M>).''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stamp = Stamp_merge(stamp_task, stamp_belief) + + stat1: Statement = premise1.term + stat2: Statement = premise2.term[0] + compound: Compound = stat1.subject + + statement = Compound.Negation(Statement(compound - stat2.subject, Copula.Inheritance, stat2.subject)) + + if task.is_judgement: + truth = Truth_deduction(Truth_intersection(premise1.truth, premise2.truth), truth_analytic) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def decomposition_theorem7(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + '''(--, <(~, T1, T2) --> M>). M>. |- M>.''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stamp = Stamp_merge(stamp_task, stamp_belief) + + stat1: Statement = premise1.term[0] + stat2: Statement = premise2.term + compound: Compound = stat1.subject + + statement = Statement(compound - stat2.subject, Copula.Inheritance, stat2.subject) + + if task.is_judgement: + truth = Truth_deduction(Truth_intersection(premise1.truth, premise2.truth), truth_analytic) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def decomposition_theorem8(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + '''(--, <(~, T2, T1) --> M>). (--, M>). |- (--, M>).''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stamp = Stamp_merge(stamp_task, stamp_belief) + + stat1: Statement = premise1.term[0] + stat2: Statement = premise2.term[0] + compound: Compound = stat1.subject + + statement = Compound.Negation(Statement(compound - stat2.subject, Copula.Inheritance, stat2.subject)) + + if task.is_judgement: + truth = Truth_deduction(Truth_intersection(premise1.truth, premise2.truth), truth_analytic) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def decomposition_theorem9(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + Original: (--, (&&, T2, T1)). T1. |- (--, T2). + Practical: (&&, T2, T1). T1. |- T2. + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stamp = Stamp_merge(stamp_task, stamp_belief) + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + compound: Compound = stat1 + + statement = compound - stat2 + + if task.is_judgement: + truth = Truth_negation(Truth_intersection(Truth_negation(premise1.truth), premise2.truth)) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def decomposition_theorem10(task: Task, belief: Belief, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + Original: (||, T1, T2). (--, T1). |- T2. + Practical: (||, T1, T2). T1. |- T2. + ''' + premise1, premise2 = (task.sentence, belief.sentence) if not inverse_premise else (belief.sentence, task.sentence) + + stamp_task: Stamp = task.stamp + stamp_belief: Stamp = belief.stamp + + stamp = Stamp_merge(stamp_task, stamp_belief) + + stat1: Statement = premise1.term + stat2: Statement = premise2.term + compound: Compound = stat1 + + statement = compound - stat2 + + if task.is_judgement: + truth = Truth_intersection(premise1.truth, Truth_negation(premise2.truth)) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + sentence_derived = Judgement(statement, stamp, truth) + else: raise "Invalid case." + + return Task(sentence_derived, budget) diff --git a/NAL/Theorems/StructuralRules.py b/NAL/Theorems/StructuralRules.py new file mode 100644 index 0000000..d3fcaf0 --- /dev/null +++ b/NAL/Theorems/StructuralRules.py @@ -0,0 +1,1223 @@ +from typing import List +from Narsese import Copula, Statement, Compound, Connector, Term, Judgement, Truth, Task, Belief, Budget, Stamp, Goal, Quest, Question +from Narsese import place_holder, truth_analytic + +from ..Functions.TruthValueFunctions import * +from ..Functions.DesireValueFunctions import * +from ..Functions.StampFunctions import * +from ..Functions.BudgetFunctions import * + + +'''uni-composition (unilateral composition)''' +def uni_composition(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + e.g. +

S>. (inverse_copula: P>.) + (|, P, Q) (inverse_copula: (&, P, Q)) + |- + <(&, P, Q) --> S>. (inverse_copula: (|, P, Q)>.) + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + stat: Statement = premise.term + + subject = term_concept if not inverse_copula else stat.subject + predicate = stat.predicate if not inverse_copula else term_concept + + truth = Truth_deduction(premise.truth, truth_analytic) + statement = Statement(subject, stat.copula, predicate) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + stamp = stamp_task + sentence_derived = Judgement(statement, stamp, truth) + + return Task(sentence_derived, budget) + +def uni_composition_prime(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + e.g. +

S>. (inverse_copula: P>.) + (~, Q, P) (inverse_copula: (-, Q, P)) + |- + <(~, Q, P) --> S>. (inverse_copula: (-, Q, P)>.) + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + stat: Statement = premise.term + + subject = term_concept if not inverse_copula else stat.subject + predicate = stat.predicate if not inverse_copula else term_concept + + truth = Truth_negation(Truth_deduction(premise.truth, truth_analytic)) + statement = Statement(subject, stat.copula, predicate) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + stamp = stamp_task + sentence_derived = Judgement(statement, stamp, truth) + + return Task(sentence_derived, budget) + +'''uni-decomposition (unilateral composition)''' +# def uni_decomposition(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): +# ''' +# e.g. +# <(S|T) --> P>. (inverse_copula:

(S&T)>.) +# S (inverse_copula: S) +# |- +# P>. (inverse_copula:

S>.) +# ''' +# stamp_task: Stamp = task.stamp +# premise: Judgement = task.sentence +# stat: Statement = premise.term + +# subject: Compound | Term = stat.subject - set(term_concept) if not inverse_copula else stat.subject +# predicate: Compound | Term = stat.predicate if not inverse_copula else stat.predicate - set(term_concept) + +# truth = Truth_deduction(premise.truth, truth_analytic) +# statement = Statement(subject, stat.copula, predicate) +# budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) +# stamp = stamp_task +# sentence_derived = Judgement(statement, stamp, truth) + +# return Task(sentence_derived, budget) + + +def uni_decomposition(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + e.g. + <(S|T) --> P>. (inverse_copula:

(S&T)>.) + S (inverse_copula: S) + |- + P>. (inverse_copula:

S>.) + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + stat: Statement = premise.term + + subject: Compound | Term = stat.subject if not inverse_copula else term_concept + predicate: Compound | Term = term_concept if not inverse_copula else stat.predicate + + truth = Truth_deduction(premise.truth, truth_analytic) + statement = Statement(subject, stat.copula, predicate) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + stamp = stamp_task + sentence_derived = Judgement(statement, stamp, truth) + + return Task(sentence_derived, budget) + +'''bi-composition (bilateral compose)''' +def bi_composition(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + e.g. + P>. (inverse_copula:

S>.) + (|, S, M) (inverse_copula: (|, S, M)) + |- + <(|, S, M) --> (|, P, M)>. (inverse_copula: <(|, P, M) --> (|, S, M)>.) + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + stat: Statement = premise.term + compound: Compound = term_concept + + subject = compound if not inverse_copula else compound.replace(stat.predicate, stat.subject) + predicate = compound.replace(stat.subject, stat.predicate) if not inverse_copula else compound + + statement = Statement(subject, stat.copula, predicate) + truth = Truth_deduction(premise.truth, truth_analytic) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + stamp = stamp_task + sentence_derived = Judgement(statement, stamp, truth) + + return Task(sentence_derived, budget) + +def bi_composition_prime(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + e.g. + P>. (inverse_copula:

S>.) + (-, M, S) (inverse_copula: (-, M, S)) + |- + <(-, M, P) --> (-, M, S)>. (inverse_copula: <(-, M, S) --> (-, M, P)>.) + + P>. (inverse_copula:

S>.) + (/, M1, M2, _ S) (inverse_copula: (/, M1, M2, _ S)) + |- + <(/, M1, M2, _ P) --> (/, M1, M2, _ S)>. (inverse_copula: <(/, M1, M2, _ S) --> (/, M1, M2, _ P)>.) + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + stat: Statement = premise.term + compound: Compound = term_concept + + subject = compound if not inverse_copula else compound.replace(stat.predicate, stat.subject) + predicate = compound.replace(stat.subject, stat.predicate) if not inverse_copula else compound + + statement = Statement(predicate, stat.copula, subject) + truth = Truth_deduction(premise.truth, truth_analytic) + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + stamp = stamp_task + sentence_derived = Judgement(statement, stamp, truth) + + return Task(sentence_derived, budget) + + +'''bi-decomposition (bilateral decompose)''' +def bi_decomposition(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + e.g. + <(S*T) --> (P*T)>. (inverse_copula: <(P*T) --> (S*T)>.) + S (inverse_copula: S) + |- + P>. (inverse_copula:

S>.) + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + stat: Statement = premise.term + compound_subject: Compound = stat.subject + compound_predicate: Compound = stat.predicate + + subject = compound_subject - compound_predicate + predicate = compound_predicate - compound_subject + statement = Statement(subject, stat.copula, predicate) + + truth = Truth_deduction(premise.truth, truth_analytic) + if premise.is_judgement or premise.is_goal: + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + elif premise.is_question or premise.is_quest: + budget = Budget_backward_compound(truth, budget_tasklink, budget_termlink) + + stamp = stamp_task + sentence_derived = Judgement(statement, stamp, truth) + + return Task(sentence_derived, budget) + + +'''transform product to image''' +def transform_product_to_image(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + inverse_copula=False: + <(*, T1, T2) --> R> |- (/, R, _, T2)> + <(*, T1, T2) --> R> |- (/, R, T1, _)> + inverse_copula=True: + (*, T1, T2)> |- <(\, R, _, T2) --> T1> + (*, T1, T2)> |- <(\, R, T1, _) --> T2> + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + stat: Statement = premise.term + + subject = term_concept if not inverse_copula else Compound.IntensionalImage(term_concept, stat.subject, compound_product=stat.predicate) + predicate = Compound.ExtensionalImage(term_concept, stat.predicate, compound_product=stat.subject) if not inverse_copula else term_concept + statement = Statement(subject, stat.copula, predicate) + + truth = premise.truth + if premise.is_judgement or premise.is_goal: + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + elif premise.is_question or premise.is_quest: + budget = Budget_backward_compound(truth, budget_tasklink, budget_termlink) + + stamp = stamp_task + sentence_derived = Judgement(statement, stamp, truth) + + return Task(sentence_derived, budget) + +# TODO: The higher-order case of product-image transformation. +# Ref: OpenNARS 3.0.4 StructuralRules.java line 375~474 +def transform_product_to_image_higher_order(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + e.g. + <(&&,<(*,a,b) --> R>,...) ==> C>. |- <(&&, (/,R,_,b)>,...) ==> C> + <(&&,<(*,a,b) --> R>,...) <=> C>. |- <(&&, (/,R,_,b)>,...) <=> C> + ''' + + +'''transform image to product''' +def transform_image_to_product(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + inverse_copula=False: + (/, R, _, T2)> |- <(*, T1, T2) --> R> + (/, R, T1, _)> |- <(*, T1, T2) --> R> + inverse_copula=True: + <(\, R, _, T2) --> T1> |- (*, T1, T2)> + <(\, R, T1, _) --> T2> |- (*, T1, T2)> + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + stat: Statement = premise.term + + compound_subject: Compound = stat.subject + compound_predicate: Compound = stat.predicate + subject = Compound.Product(stat.subject, compound_product=compound_predicate) if not inverse_copula else compound_predicate[0] + predicate = compound_subject[0] if not inverse_copula else Compound.Product(stat.predicate, compound_product=compound_subject) + + statement = Statement(subject, stat.copula, predicate) + truth = premise.truth + if premise.is_judgement or premise.is_goal: + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + elif premise.is_question or premise.is_quest: + budget = Budget_backward_compound(truth, budget_tasklink, budget_termlink) + + stamp = stamp_task + sentence_derived = Judgement(statement, stamp, truth) + + return Task(sentence_derived, budget) + + +''' +Inheritence Theorems + +term1 --> term2 +---------------------------------------- +ok 1 (&, T1, T2) T1 +ok 2 T1 (|, T1, T2) +ok 3 (-, T1, T2) T1 +ok 4 T1 (~, T1, T2) +5 (*, (/, R, _, T), T) R +6 R (*, (\, R, _, T), T) + +''' + +# uni-composition, uni-decomposition-------------------- + +def inheritance_theorem1(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=None): + ''' + <(&, T1, T2) --> T1>. + unicompsition (inverse_premise=False): + S>. + <(&, P, Q) --> P>. (analytic truth) + |- + <(&, P, Q) --> S>. + unidecomposition (inverse_premise=True): + <(&, P, Q) --> P>. (analytic truth) + (&, P, Q)>. + |- + P>. + ''' + return uni_composition(task, term_concept, budget_tasklink, budget_termlink, inverse_copula=True) if not inverse_premise else uni_decomposition(task, term_concept, budget_tasklink, budget_termlink, inverse_copula=True) + + +def inheritance_theorem2(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=None): + ''' + (|, T1, T2)>. + unicompsition (inverse_premise=False): +

(|, P, Q)>. (analytic truth) + P>. + |- + (|, P, Q)>. + unidecomposition (inverse_premise=True): + <(|, P, Q)-->S>. +

(|, P, Q)>. (analytic truth) + |- +

S>. + ''' + return uni_composition(task, term_concept, budget_tasklink, budget_termlink, inverse_copula=False) if not inverse_premise else uni_decomposition(task, term_concept, budget_tasklink, budget_termlink, inverse_copula=False) + + +def inheritance_theorem3(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=None): + ''' + <(-, T1, T2) --> T1>. + unicompsition (inverse_premise=False): + S>. + <(-, P, Q) --> P>. (analytic truth) + |- + <(-, P, Q) --> S>. + unidecomposition (inverse_premise=True): + <(-, P, Q) --> P>. (analytic truth) + (&, P, Q)>. + |- + P>. + ''' + return uni_composition(task, term_concept, budget_tasklink, budget_termlink, inverse_copula=True) if not inverse_premise else uni_decomposition(task, term_concept, budget_tasklink, budget_termlink, inverse_copula=True) + + +def inheritance_theorem4(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=None): + ''' + (~, T1, T2)>. + unicompsition (inverse_premise=False): +

(~, P, Q)>. (analytic truth) + P>. + |- + (~, P, Q)>. + unidecomposition (inverse_premise=True): + (~, P, Q)-->S>. +

(~, P, Q)>. (analytic truth) + |- +

S>. + ''' + return uni_composition(task, term_concept, budget_tasklink, budget_termlink, inverse_copula=False) if not inverse_premise else uni_decomposition(task, term_concept, budget_tasklink, budget_termlink, inverse_copula=False) + +# ------------------------------------------------------ + + +def inheritance_theorem5(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=None): + ''' + <(*, (/, R, _, T), T) --> R>. + ''' + return Judgement( + Statement( + Compound( + Connector.Product, + Compound( + Connector.ExtensionalImage, R, place_holder, T + ), + T + ), + Copula.Inheritance, + R + ), + Truth(1, 1, 0) + ) + + +def inheritance_theorem6(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=None): + ''' + (*, (\, R, _, T), T)>. + ''' + return Judgement( + Statement( + R, + Copula.Inheritance, + Compound( + Connector.Product, + Compound( + Connector.IntensionalImage, R, place_holder, T + ), + T + ) + ), + Truth(1, 1, 0) + ) + + +''' +Similarity Theorems + +term1 <-> term2 +--------------------------------------- +1 (--, (--, T)) T +2 (|, ({, T1), ..., ({, Tn)) ({, T1, ..., Tn) +3 (&, ([, T1), ..., ([, Tn)) ([, T1, ..., Tn) +4 (-, ({, T1, ..., Tn), ({, Tn)) ({, T1, ..., Tn-1) +5 (~, ([, T1, ..., Tn), ([, Tn)) ([, T1, ..., Tn-1) +6 (/, (*, T1, T2), _, T2) T1 +7 (\, (*, T1, T2), _, T2) T1 +''' + +def similarity_theorem1(R: Term, T: Term): + ''' + <(--, (--, T)) <-> T>. + ''' + return Judgement( + Statement( + Compound(Connector.Negation, Compound(Connector.Negation, T)), + Copula.Inheritance, + T + ), + Truth(1, 1, 0) + ) + +def similarity_theorem2_1(T1: Term, T2: Term, *Ts: Term): + ''' + <(|, ({, T1), ..., ({, Tn)) <-> ({, T1, ..., Tn)>. + ''' + Ts: List[Compound] = (T1, T2, *Ts) + for T in Ts: + T: Compound + assert isinstance(T, Compound) and T.connector == Connector.ExtensionalSet + + term1 = Compound(Connector.IntensionalIntersection, *Ts) + term2 = Compound(Connector.ExtensionalSet, *(T[0] for T in Ts)) + + return Judgement( + Statement( + term1, + Copula.Similarity, + term2 + ), + Truth(1, 1, 0) + ) + +def similarity_theorem2_2(T1: Term, T2: Term, *Ts: Term): + ''' + <({, T1, ..., Tn) <-> (|, ({, T1), ..., ({, Tn))>. + ''' + Ts: List[Compound] = (T1, T2, *Ts) + + term1 = Compound(Connector.IntensionalIntersection, *(Compound(Connector.ExtensionalSet, T) for T in Ts)) + term2 = Compound(Connector.ExtensionalSet, *Ts) + + return Judgement( + Statement( + term1, + Copula.Similarity, + term2 + ), + Truth(1, 1, 0) + ) + +def similarity_theorem3_1(T1: Term, T2: Term, *Ts: Term): + '''(&, ([, T1), ..., ([, Tn)) ([, T1, ..., Tn)''' + Ts: List[Compound] = (T1, T2, *Ts) + for T in Ts: + T: Compound + assert isinstance(T, Compound) and T.connector == Connector.ExtensionalSet + + term1 = Compound(Connector.ExtensionalIntersection, *Ts) + term2 = Compound(Connector.IntensionalSet, *(T[0] for T in Ts)) + + return Judgement( + Statement( + term1, + Copula.Similarity, + term2 + ), + Truth(1, 1, 0) + ) + +def similarity_theorem3_2(T1: Term, T2: Term, *Ts: Term): + '''(&, ([, T1), ..., ([, Tn)) ([, T1, ..., Tn)''' + Ts: List[Compound] = (T1, T2, *Ts) + + term1 = Compound(Connector.ExtensionalIntersection, *(Compound(Connector.IntensionalSet, T) for T in Ts)) + term2 = Compound(Connector.IntensionalSet, *Ts) + + return Judgement( + Statement( + term1, + Copula.Similarity, + term2 + ), + Truth(1, 1, 0) + ) + +def similarity_theorem4(T1: Term, T2: Term, *Ts: Term): + ''' + (-, ({, T1, ..., Tn), ({, Tn)) <-> ({, T1, ..., Tn-1) + ''' + Ts: List[Compound] = (T1, T2, *Ts) + Ts = Ts[:-1] + Tn = Ts[-1] + term1 = Compound(Connector.ExtensionalDifference, Compound(Connector.ExtensionalSet, *Ts), Compound(Connector.ExtensionalSet, Tn)) + term2 = Compound(Connector.ExtensionalSet, *Ts) + + return Judgement( + Statement( + term1, + Copula.Similarity, + term2 + ), + Truth(1, 1, 0) + ) + +def similarity_theorem5(T1: Term, T2: Term, *Ts: Term): + ''' + (~, ([, T1, ..., Tn), ([, Tn)) <-> ([, T1, ..., Tn-1) + ''' + Ts: List[Compound] = (T1, T2, *Ts) + Ts = Ts[:-1] + Tn = Ts[-1] + term1 = Compound(Connector.IntensionalDifference, Compound(Connector.IntensionalSet, *Ts), Compound(Connector.IntensionalSet, Tn)) + term2 = Compound(Connector.IntensionalSet, *Ts) + + return Judgement( + Statement( + term1, + Copula.Similarity, + term2 + ), + Truth(1, 1, 0) + ) + +def similarity_theorem6(T1: Term, T2: Term): + ''' + (/, (*, T1, T2), _, T2) <-> T1 + ''' + term1 = Compound(Connector.ExtensionalImage, Compound(Connector.Product, T1, T2), place_holder, T2) + term2 = T1 + + return Judgement( + Statement( + term1, + Copula.Similarity, + term2 + ), + Truth(1, 1, 0) + ) + +def similarity_theorem7(T1: Term, T2: Term): + ''' + (\, (*, T1, T2), _, T2) <-> T1 + ''' + term1 = Compound(Connector.IntensionalImage, Compound(Connector.Product, T1, T2), place_holder, T2) + term2 = T1 + + return Judgement( + Statement( + term1, + Copula.Similarity, + term2 + ), + Truth(1, 1, 0) + ) + +''' +The Implication Theorems + +statement1 ==> statement2 +--------------------------------------- + +ok 1 P> P> +ok 2 P> P> +ok 3 (&&, S1, S2) S1 +4 S1 (||, S1, S2) +ok 5 P> <(|, S, M) --> (|, P, M)> +ok 6 P> <(&, S, M) --> (&, P, M)> +ok 7 P> <(|, S, M) <-> (|, P, M)> +ok 8 P> <(&, S, M) <-> (&, P, M)> +ok 9 P> <(||, S, M) ==> (||, P, M)> +ok 10 P> <(&&, S, M) ==> (&&, P, M)> +ok 11 P> <(||, S, M) <=> (||, P, M)> +ok 12 P> <(&&, S, M) <=> (&&, P, M)> +ok 13 P> <(-, S, M) --> (-, P, M)> +ok 14 P> <(-, M, P) --> (-, M, S)> +ok 15 P> <(~, S, M) --> (~, P, M)> +ok 16 P> <(~, M, P) --> (~, M, S)> +ok 17 P> <(-, S, M) <-> (-, P, M)> +ok 18 P> <(-, M, P) <-> (-, M, S)> +ok 19 P> <(~, S, M) <-> (~, P, M)> +ok 20 P> <(~, M, P) <-> (~, M, S)> +21 (-, T1, T2)> (--, T2>) +22 <(~, T1, T2) --> M> (--, M>) +23 P> <(/, S, M) --> (/, P, M)> +24 P> <(\, S, M) --> (\, P, M)> +25 P> <(/, M, P) --> (/, M, S)> +26 P> <(\, M, P) --> (\, M, S)> +''' + +def implication_theorem1(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> P>>. + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + stat: Statement = premise.term + + subject = stat.subject if not inverse_copula else stat.predicate + predicate = stat.predicate if not inverse_copula else stat.subject + statement = Statement(subject, stat.copula, predicate) + stamp = stamp_task + if premise.is_judgement: + truth = Truth_deduction(premise.truth, truth_analytic) + sentence_derived = Judgement(statement, stamp, truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + # elif premise.is_goal: + # truth = Truth_deduction(premise.truth, truth_analytic) + # sentence_derived = Goal(term_concept, stamp, truth) + # budget = Budget_forward(truth, budget_tasklink, budget_termlink) + # elif premise.is_question: + # sentence_derived = Question(term_concept, stamp) + # budget = Budget_backward_compound(premise.term, budget_tasklink, budget_termlink) + # elif premise.is_quest: + # sentence_derived = Quest(term_concept, stamp) + # budget = Budget_backward_compound(premise.term, budget_tasklink, budget_termlink) + else: raise 'Invalid case.' + + return Task(sentence_derived, budget) + + +def implication_theorem2(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> P>>. + ''' + return implication_theorem1(task, term_concept, budget_tasklink, budget_termlink, inverse_premise, inverse_copula) + + +def implication_theorem3(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + <(&&, S1, S2) ==> S1>. + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + + stamp = stamp_task + if premise.is_judgement: + truth = Truth_deduction(premise.truth, truth_analytic) + sentence_derived = Judgement(term_concept, stamp, truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + elif premise.is_goal: + truth = Truth_deduction(premise.truth, truth_analytic) + sentence_derived = Goal(term_concept, stamp, truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + elif premise.is_question: + sentence_derived = Question(term_concept, stamp) + budget = Budget_backward_compound(premise.term, budget_tasklink, budget_termlink) + elif premise.is_quest: + sentence_derived = Quest(term_concept, stamp) + budget = Budget_backward_compound(premise.term, budget_tasklink, budget_termlink) + else: raise 'Invalid case.' + + return Task(sentence_derived, budget) + + +def implication_theorem4(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + (||, S1, S2)>. + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + + stamp = stamp_task + if premise.is_judgement: + truth = Truth_deduction(premise.truth, truth_analytic) + sentence_derived = Judgement(term_concept, stamp, truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + elif premise.is_goal: + truth = Truth_deduction(premise.truth, truth_analytic) + sentence_derived = Goal(term_concept, stamp, truth) + budget = Budget_forward(truth, budget_tasklink, budget_termlink) + elif premise.is_question: + sentence_derived = Question(term_concept, stamp) + budget = Budget_backward_compound(term_concept, budget_tasklink, budget_termlink) + elif premise.is_quest: + sentence_derived = Quest(term_concept, stamp) + budget = Budget_backward_compound(term_concept, budget_tasklink, budget_termlink) + else: raise 'Invalid case.' + + return Task(sentence_derived, budget) + + +# bi-composition---------------------------------------- + +def implication_theorem5(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(|, S, M) --> (|, P, M)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem6(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(&, S, M) --> (&, P, M)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem7(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(|, S, M) <-> (|, P, M)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem8(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(&, S, M) <-> (&, P, M)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem9(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(||, S, M) ==> (||, P, M)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem10(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(&&, S, M) ==> (&&, P, M)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem11(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(||, S, M) <=> (||, P, M)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem12(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(&&, S, M) <=> (&&, P, M)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem13(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(-, S, M) --> (-, P, M)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem14(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(-, M, P) --> (-, M, S)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem15(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(~, S, M) --> (-, P, M)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem16(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(~, M, P) --> (~, M, S)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem17(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(-, S, M) <-> (-, P, M)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem18(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(-, M, P) <-> (-, M, S)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem19(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(~, S, M) <-> (~, P, M)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + + +def implication_theorem20(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(~, M, P) <-> (~, M, S)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) + +# ------------------------------------------------------ + + +def implication_theorem21(M: Term, T1: Term, T2: Term): + ''' + < (-, T1, T2)> ==> (--, T2>)>. + ''' + return Judgement( + Statement( + Statement(M, Copula.Inheritance, Compound(Connector.ExtensionalDifference, T1, T2)), + Copula.Implication, + Compound(Connector.Negation, Statement(M, Copula.Inheritance, T2)) + ), + Truth(1, 1, 0) + ) + +def implication_theorem22(M: Term, T1: Term, T2: Term): + ''' + <<(~, T1, T2) --> M> ==> (--, M>)>. + ''' + return Judgement( + Statement( + Statement(Compound(Connector.IntensionalDifference, T1, T2), Copula.Inheritance, M), + Copula.Implication, + Compound(Connector.Negation, Statement(T2, Copula.Inheritance, M)) + ), + Truth(1, 1, 0) + ) + + +def implication_theorem23(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(/, S, M) --> (/, P, M)>>. + ''' + return Judgement( + Statement( + Statement(S, Copula.Inheritance, P), + Copula.Implication, + Statement(Compound(Connector.ExtensionalImage, S, M), Copula.Inheritance,Compound(Connector.ExtensionalImage, P, M)) + ), + Truth(1, 1, 0) + ) + + +def implication_theorem24(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(\, S, M) --> (\, P, M)>>. + ''' + return Judgement( + Statement( + Statement(S, Copula.Inheritance, P), + Copula.Implication, + Statement(Compound(Connector.IntensionalImage, S, M), Copula.Inheritance,Compound(Connector.IntensionalImage, P, M)) + ), + Truth(1, 1, 0) + ) + + +def implication_theorem25(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(/, M, P) --> (/, M, S)>>. + ''' + return Judgement( + Statement( + Statement(S, Copula.Inheritance, P), + Copula.Implication, + Statement(Compound(Connector.ExtensionalImage, M, P), Copula.Inheritance,Compound(Connector.ExtensionalImage, M, S)) + ), + Truth(1, 1, 0) + ) + + +def implication_theorem26(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> ==> <(\, M, P) --> (\, M, S)>>. + ''' + return Judgement( + Statement( + Statement(S, Copula.Inheritance, P), + Copula.Implication, + Statement(Compound(Connector.IntensionalImage, M, P), Copula.Inheritance,Compound(Connector.IntensionalImage, M, S)) + ), + Truth(1, 1, 0) + ) + + +''' +The Equivalence Theorems (Table B.9) + +statement1 <=> statement2 +------------------------------------------------------------------------------ +1 P> (&&, P>,

S>) +2 P> (&&, P>,

S>) +ok 3 P> <({, S) <-> ({, P)> +ok 4 P> <([, S) <-> ([, P)> +ok 5 ({, P)> ([, P)> +ok 6 <([, S) --> P> <([, S) <-> P> +7 <(*, S1, S2) --> (*, P1, P2)> (&&, P1>, P2>) +8 <(*, S1, S2) <-> (*, P1, P2)> (&&, P1>, P2>) +ok 9 P> <(*, M, S) --> (*, M, P)> +ok 10 P> <(*, S, M) --> (*, P, M)> +ok 11 P> <(*, M, S) <-> (*, M, P)> +ok 12 P> <(*, S, M) <-> (*, P, M)> +ok <(*, T1, T2) --> R> (/, R, _, T2)> +ok <(*, T1, T2) --> R> (/, R, T1, _)> +ok (*, T1, T2)> <(\, R, _, T2) --> T1> +ok (*, T1, T2)> <(\, R, T1, _) --> T2> +17 S3>> <(&&, S1, S2) ==> S3> +18 (--, (&&, S1, S2)) (||, (--, S1), (--, S2)) +19 (--, (||, S1, S2)) (&&, (--, S1), (--, S2)) +20 S2> <(--, S1) <=> (--, S2)> + +''' + +def equivalence_theorem1(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> <=> (&&, P>,

S>)>. + ''' + return Judgement( + Statement( + Statement(S, Copula.Similarity, P), + Copula.Equivalence, + Compound(Connector.Conjunction, Statement(S, Copula.Inheritance, P), Statement(P, Copula.Inheritance, S)) + ), + Truth(1, 1, 0) + ) + +def equivalence_theorem2(S: Term, P: Term): + ''' + < P> <=> (&&, P>,

S>)>. + ''' + return Judgement( + Statement( + Statement(S, Copula.Equivalence, P), + Copula.Equivalence, + Compound(Connector.Conjunction, Statement(S, Copula.Implication, P), Statement(P, Copula.Implication, S)) + ), + Truth(1, 1, 0) + ) + + +# ok --------------------------------------------------- + +def equivalence_theorem3(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> <=> <({, S) <-> ({, P)>>. + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + stat: Statement = premise.term + truth = premise.truth + copula = stat.copula + + if not inverse_copula: + subject = Compound.Instance(stat.subject) + predicate = Compound.Instance(stat.predicate) + else: + compound_subject: Compound = stat.subject + compound_predicate: Compound = stat.predicate + subject = compound_subject[0] + predicate = compound_predicate[0] + + statement = Statement(subject, copula, predicate) + + if premise.is_judgement: + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + elif premise.is_goal or premise.is_question or premise.is_quest: + budget = Budget_backward_compound(truth, budget_tasklink, budget_termlink) + + stamp = stamp_task + sentence_derived = Judgement(statement, stamp, truth) + + return Task(sentence_derived, budget) + + +def equivalence_theorem4(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> <=> <({, S) <-> ({, P)>>. + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + stat: Statement = premise.term + truth = premise.truth + copula = stat.copula + + + if not inverse_copula: + subject = Compound.Property(stat.subject) + predicate = Compound.Property(stat.predicate) + else: + compound_subject: Compound = stat.subject + compound_predicate: Compound = stat.predicate + subject = compound_subject[0] + predicate = compound_predicate[0] + + statement = Statement(subject, copula, predicate) + if premise.is_judgement: + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + elif premise.is_goal or premise.is_question or premise.is_quest: + budget = Budget_backward_compound(truth, budget_tasklink, budget_termlink) + + + stamp = stamp_task + sentence_derived = Judgement(statement, stamp, truth) + + return Task(sentence_derived, budget) + + +def equivalence_theorem5(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < {P}> <=> {P}>>. + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + stat: Statement = premise.term + truth = premise.truth + if not inverse_copula: + copula = Copula.Similarity + subject = stat.subject + predicate = stat.predicate + else: + copula = Copula.Inheritance + compound_subject: Compound = stat.subject + compound_predicate: Compound = stat.predicate + if compound_subject.is_compound and compound_subject.connector == Connector.ExtensionalSet: + subject = stat.subject + predicate = stat.predicate + elif compound_predicate.is_compound and compound_predicate.connector == Connector.ExtensionalSet: + subject = stat.predicate + predicate = stat.subject + + statement = Statement(subject, copula, predicate) + + if premise.is_judgement: + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + elif premise.is_goal or premise.is_question or premise.is_quest: + budget = Budget_backward_compound(truth, budget_tasklink, budget_termlink) + + statement = Statement(subject, copula, predicate) + stamp = stamp_task + sentence_derived = Judgement(statement, stamp, truth) + + return Task(sentence_derived, budget) + + +def equivalence_theorem6(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + <<[S] --> P> <=> <[S] <-> P>>. + ''' + stamp_task: Stamp = task.stamp + premise: Judgement = task.sentence + stat: Statement = premise.term + truth = premise.truth + if not inverse_copula: + copula = Copula.Similarity + subject = stat.subject + predicate = stat.predicate + else: + copula = Copula.Inheritance + compound_subject: Compound = stat.subject + compound_predicate: Compound = stat.predicate + if compound_subject.is_compound and compound_subject.connector == Connector.IntensionalSet: + subject = stat.subject + predicate = stat.predicate + elif compound_predicate.is_compound and compound_predicate.connector == Connector.IntensionalSet: + subject = stat.predicate + predicate = stat.subject + + statement = Statement(subject, copula, predicate) + + if premise.is_judgement: + budget = Budget_forward_compound(statement, truth, budget_tasklink, budget_termlink) + elif premise.is_goal or premise.is_question or premise.is_quest: + budget = Budget_backward_compound(truth, budget_tasklink, budget_termlink) + + stamp = stamp_task + sentence_derived = Judgement(statement, stamp, truth) + + return Task(sentence_derived, budget) + +# ------------------------------------------------------ + + +def equivalence_theorem7(S1: Term, S2: Term, P1: Term, P2: Term): + ''' + <<(*, S1, S2) --> (*, P1, P2)> <=> (&&, P1>, P2>)>. + ''' + return Judgement( + Statement( + Statement(Compound(Connector.Product, S1, S2), Copula.Inheritance, Compound(Connector.Product, P1, P2)), + Copula.Equivalence, + Compound(Connector.Conjunction, Statement(S1, Copula.Inheritance, P1), Statement(S2, Copula.Inheritance, P2)) + ), + Truth(1, 1, 0) + ) + +def equivalence_theorem8(S1: Term, S2: Term, P1: Term, P2: Term): + ''' + <<(*, S1, S2) <-> (*, P1, P2)> <=>(&&, P1>, P2>)>. + ''' + return Judgement( + Statement( + Statement(Compound(Connector.Product, S1, S2), Copula.Similarity, Compound(Connector.Product, P1, P2)), + Copula.Equivalence, + Compound(Connector.Conjunction, Statement(S1, Copula.Similarity, P1), Statement(S2, Copula.Similarity, P2)) + ), + Truth(1, 1, 0) + ) + + +# bi-composition, bi-decomposition---------------------- + +def equivalence_theorem9(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> <=> <(*, M, S) --> (*, M, P)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) if not inverse_copula else bi_decomposition(task, term_concept, budget_tasklink, budget_termlink) + + +def equivalence_theorem10(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> <=> <(*, S, M) --> (*, P, M)>>. + ''' + return equivalence_theorem9(task, term_concept, budget_tasklink, budget_termlink, inverse_premise, inverse_copula) + + +def equivalence_theorem11(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> <=> <(*, M, S) <-> (*, M, P)>>. + ''' + return bi_composition(task, term_concept, budget_tasklink, budget_termlink) if not inverse_copula else bi_decomposition(task, term_concept, budget_tasklink, budget_termlink) + + +def equivalence_theorem12(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < P> <=> <(*, S, M) <-> (*, P, M)>>. + ''' + return equivalence_theorem11(task, term_concept, budget_tasklink, budget_termlink, inverse_premise, inverse_copula) + +# ------------------------------------------------------ + + +def equivalence_theorem13(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + <<(*, T1, T2) --> R> <=> (/, R, _, T2)>>. + ''' + return transform_product_to_image(task, term_concept, budget_tasklink, budget_termlink, inverse_copula=False) if not inverse_copula else transform_image_to_product(task, term_concept, budget_tasklink, budget_termlink, inverse_copula=False) + + +def equivalence_theorem14(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + <<(*, T1, T2) --> R> <=> (/, R, T1, _)>>. + ''' + return equivalence_theorem13(task, term_concept, budget_tasklink, budget_termlink, inverse_premise, inverse_copula) + + +def equivalence_theorem15(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < (*, T1, T2)> <=> <(\, R, _, T2) --> T1>>. + ''' + return transform_product_to_image(task, term_concept, budget_tasklink, budget_termlink, inverse_copula=True) if not inverse_copula else transform_image_to_product(task, term_concept, budget_tasklink, budget_termlink, inverse_copula=True) + + +def equivalence_theorem16(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False): + ''' + < (*, T1, T2)> <=> <(\, R, T1, _) --> T2>>. + ''' + return equivalence_theorem15(task, term_concept, budget_tasklink, budget_termlink, inverse_premise, inverse_copula) + + +def equivalence_theorem17(S1: Term, S2: Term, S3: Term): + ''' + < S3>> <=> <(&&, S1, S2) ==> S3>>. + ''' + assert isinstance(S1, Statement) and isinstance(S2, Statement) and isinstance(S3, Statement) + return Judgement( + Statement( + Statement(S1, Copula.Implication, Statement(S2, Copula.Implication, S3)), + Copula.Equivalence, + Statement(Compound(Connector.Conjunction, S1, S2), Copula.Implication, S3) + ), + Truth(1, 1, 0) + ) + +def equivalence_theorem18(S1: Term, S2: Term): + ''' + <(--, (&&, S1, S2)) <=> (||, (--, S1), (--, S2))>. + ''' + assert isinstance(S1, Statement) and isinstance(S2, Statement) + return Judgement( + Statement( + Compound(Connector.Negation, Compound(Connector.Conjunction, S1, S2)), + Copula.Equivalence, + Compound(Connector.Disjunction, Compound(Connector.Negation, S1), Compound(Connector.Negation, S2)) + ), + Truth(1, 1, 0) + ) + +def equivalence_theorem19(S1: Term, S2: Term): + ''' + <(--, (||, S1, S2)) <=> (&&, (--, S1), (--, S2))>. + ''' + assert isinstance(S1, Statement) and isinstance(S2, Statement) + return Judgement( + Statement( + Compound(Connector.Negation, Compound(Connector.Disjunction, S1, S2)), + Copula.Equivalence, + Compound(Connector.Conjunction, Compound(Connector.Negation, S1), Compound(Connector.Negation, S2)) + ), + Truth(1, 1, 0) + ) + +def equivalence_theorem20(S1: Term, S2: Term): + ''' + < S2> <=> <(--, S1) <=> (--, S2)>>. + ''' + assert isinstance(S1, Statement) and isinstance(S2, Statement) + return Judgement( + Statement( + Statement(S1, Copula.Equivalence, S2), + Copula.Equivalence, + Statement(Compound(Connector.Negation, S1), Copula.Equivalence, Compound(Connector.Negation, S2)) + ), + Truth(1, 1, 0) + ) diff --git a/NAL/Theorems/TransformRules.py b/NAL/Theorems/TransformRules.py new file mode 100644 index 0000000..ebe397a --- /dev/null +++ b/NAL/Theorems/TransformRules.py @@ -0,0 +1,125 @@ +'''Although there are some theorems about transform between product and image, they are highly specialized, which can only handle some special forms or cases, e.g. `equivalence_theorem13()` in `StructuralRules.py`. +In this file, some more generalized functions of transform are implemented, though with a little differences in terms of parameters. +''' +from typing import List +from Narsese import Copula, Statement, Compound, Connector, Term, Judgement, Truth, Task, Belief, Budget, Stamp, Goal, Quest, Question +from Narsese import place_holder +from Narsese._py.Sentence import Sentence + +from ..Functions.TruthValueFunctions import * +from ..Functions.DesireValueFunctions import * +from ..Functions.StampFunctions import * +from ..Functions.BudgetFunctions import * + + +def product_to_image(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False, index: tuple=None): + ''' + it should be ensured that `len(index) >= 2` + e.g. <(&&,<(*,a,b) --> R>,...) ==> C>. |- <(&&, (/,R,_,b)>,...) ==> C> + ''' + term_task = task.term + stat_product: Statement = term_task[index[:-2]] # <(*,a,b) --> R> + compound_product: Compound = stat_product[index[-2]] # (*,a,b) + idx_relation = 1-index[-2] + idx_product = index[-1] + term_relation = stat_product[idx_relation] # R + if idx_relation == 0: # intensional image + predicate = compound_product[idx_product] + subject = Compound.IntensionalImage(term_relation, compound_product=compound_product, idx=idx_product) + elif idx_relation == 1: # extensional image + subject = compound_product[idx_product] + predicate = Compound.ExtensionalImage(term_relation, compound_product=compound_product, idx=idx_product) + else: raise "Invalid case." + stat_image = Statement(subject, stat_product.copula, predicate) + budget = task.budget + stamp = task.stamp + + if task.is_judgement: + truth = task.truth + sentence_derived = Judgement(stat_image, stamp, truth) + elif task.is_goal: + truth = task.truth + sentence_derived = Goal(stat_image, stamp, truth) + elif task.is_question: + sentence_derived = Question(stat_image, stamp) + elif task.is_quest: + sentence_derived = Quest(stat_image, stamp) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def image_to_product(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False, index=None): + '''''' + term_task = task.term + stat_image: Statement = term_task[index[:-2]] # (/,R,_,b)> + compound_image: Compound = stat_image[index[-2]] # (/,R,_,b) + idx_term = 1-index[-2] + idx_image = index[-1] + term_relation = compound_image[0] # R + term = stat_image[1-index[-2]] + + compound_product = Compound.Product(term, compound_image=compound_image) + + if idx_term == 0: + subject = compound_product + predicate = term_relation + elif idx_term == 1: + subject = term_relation + predicate = compound_product + + else: raise "Invalid case." + + stat_image = Statement(subject, stat_image.copula, predicate) + budget = task.budget + stamp = task.stamp + + if task.is_judgement: + truth = task.truth + sentence_derived = Judgement(stat_image, stamp, truth) + elif task.is_goal: + truth = task.truth + sentence_derived = Goal(stat_image, stamp, truth) + elif task.is_question: + sentence_derived = Question(stat_image, stamp) + elif task.is_quest: + sentence_derived = Quest(stat_image, stamp) + else: raise "Invalid case." + + return Task(sentence_derived, budget) + + +def image_to_image(task: Task, term_concept: Term, budget_tasklink: Budget=None, budget_termlink: Budget=None, inverse_premise: bool=False, inverse_copula: bool=False, index=None): + '''''' + term_task = task.term + stat_image: Statement = term_task[index[:-2]] # (/,R,_,b)> + compound_image: Compound = stat_image[index[-2]] # (/,R,_,b) + idx_term = 1-index[-2] + idx_image = index[-1] + term = stat_image[1-index[-2]] + term_replaced = compound_image[idx_image] + compound_image = Compound.Image(term, compound_image, idx_image) + if idx_term == 0: + subject = term_replaced + predicate = compound_image + elif idx_term == 1: + subject = compound_image + predicate = term_replaced + + stat_image = Statement(subject, stat_image.copula, predicate) + budget = task.budget + stamp = task.stamp + + if task.is_judgement: + truth = task.truth + sentence_derived = Judgement(stat_image, stamp, truth) + elif task.is_goal: + truth = task.truth + sentence_derived = Goal(stat_image, stamp, truth) + elif task.is_question: + sentence_derived = Question(stat_image, stamp) + elif task.is_quest: + sentence_derived = Quest(stat_image, stamp) + else: raise "Invalid case." + + return Task(sentence_derived, budget) \ No newline at end of file diff --git a/NAL/Theorems/__init__.py b/NAL/Theorems/__init__.py new file mode 100644 index 0000000..7f6aac9 --- /dev/null +++ b/NAL/Theorems/__init__.py @@ -0,0 +1,92 @@ +'''This file is generated by `_generate_init_file.py`''' + + +from .DecompositionalRules import \ + decomposition_theorem1 as decompositional__decomposition_theorem1, \ + decomposition_theorem10 as decompositional__decomposition_theorem10, \ + decomposition_theorem2 as decompositional__decomposition_theorem2, \ + decomposition_theorem3 as decompositional__decomposition_theorem3, \ + decomposition_theorem4 as decompositional__decomposition_theorem4, \ + decomposition_theorem5 as decompositional__decomposition_theorem5, \ + decomposition_theorem6 as decompositional__decomposition_theorem6, \ + decomposition_theorem7 as decompositional__decomposition_theorem7, \ + decomposition_theorem8 as decompositional__decomposition_theorem8, \ + decomposition_theorem9 as decompositional__decomposition_theorem9 + +from .StructuralRules import \ + bi_composition as structural__bi_composition, \ + bi_composition_prime as structural__bi_composition_prime, \ + bi_decomposition as structural__bi_decomposition, \ + equivalence_theorem1 as structural__equivalence_theorem1, \ + equivalence_theorem10 as structural__equivalence_theorem10, \ + equivalence_theorem11 as structural__equivalence_theorem11, \ + equivalence_theorem12 as structural__equivalence_theorem12, \ + equivalence_theorem13 as structural__equivalence_theorem13, \ + equivalence_theorem14 as structural__equivalence_theorem14, \ + equivalence_theorem15 as structural__equivalence_theorem15, \ + equivalence_theorem16 as structural__equivalence_theorem16, \ + equivalence_theorem17 as structural__equivalence_theorem17, \ + equivalence_theorem18 as structural__equivalence_theorem18, \ + equivalence_theorem19 as structural__equivalence_theorem19, \ + equivalence_theorem2 as structural__equivalence_theorem2, \ + equivalence_theorem20 as structural__equivalence_theorem20, \ + equivalence_theorem3 as structural__equivalence_theorem3, \ + equivalence_theorem4 as structural__equivalence_theorem4, \ + equivalence_theorem5 as structural__equivalence_theorem5, \ + equivalence_theorem6 as structural__equivalence_theorem6, \ + equivalence_theorem7 as structural__equivalence_theorem7, \ + equivalence_theorem8 as structural__equivalence_theorem8, \ + equivalence_theorem9 as structural__equivalence_theorem9, \ + implication_theorem1 as structural__implication_theorem1, \ + implication_theorem10 as structural__implication_theorem10, \ + implication_theorem11 as structural__implication_theorem11, \ + implication_theorem12 as structural__implication_theorem12, \ + implication_theorem13 as structural__implication_theorem13, \ + implication_theorem14 as structural__implication_theorem14, \ + implication_theorem15 as structural__implication_theorem15, \ + implication_theorem16 as structural__implication_theorem16, \ + implication_theorem17 as structural__implication_theorem17, \ + implication_theorem18 as structural__implication_theorem18, \ + implication_theorem19 as structural__implication_theorem19, \ + implication_theorem2 as structural__implication_theorem2, \ + implication_theorem20 as structural__implication_theorem20, \ + implication_theorem21 as structural__implication_theorem21, \ + implication_theorem22 as structural__implication_theorem22, \ + implication_theorem23 as structural__implication_theorem23, \ + implication_theorem24 as structural__implication_theorem24, \ + implication_theorem25 as structural__implication_theorem25, \ + implication_theorem26 as structural__implication_theorem26, \ + implication_theorem3 as structural__implication_theorem3, \ + implication_theorem4 as structural__implication_theorem4, \ + implication_theorem5 as structural__implication_theorem5, \ + implication_theorem6 as structural__implication_theorem6, \ + implication_theorem7 as structural__implication_theorem7, \ + implication_theorem8 as structural__implication_theorem8, \ + implication_theorem9 as structural__implication_theorem9, \ + inheritance_theorem1 as structural__inheritance_theorem1, \ + inheritance_theorem2 as structural__inheritance_theorem2, \ + inheritance_theorem3 as structural__inheritance_theorem3, \ + inheritance_theorem4 as structural__inheritance_theorem4, \ + inheritance_theorem5 as structural__inheritance_theorem5, \ + inheritance_theorem6 as structural__inheritance_theorem6, \ + similarity_theorem1 as structural__similarity_theorem1, \ + similarity_theorem2_1 as structural__similarity_theorem2_1, \ + similarity_theorem2_2 as structural__similarity_theorem2_2, \ + similarity_theorem3_1 as structural__similarity_theorem3_1, \ + similarity_theorem3_2 as structural__similarity_theorem3_2, \ + similarity_theorem4 as structural__similarity_theorem4, \ + similarity_theorem5 as structural__similarity_theorem5, \ + similarity_theorem6 as structural__similarity_theorem6, \ + similarity_theorem7 as structural__similarity_theorem7, \ + transform_image_to_product as structural__transform_image_to_product, \ + transform_product_to_image as structural__transform_product_to_image, \ + transform_product_to_image_higher_order as structural__transform_product_to_image_higher_order, \ + uni_composition as structural__uni_composition, \ + uni_composition_prime as structural__uni_composition_prime, \ + uni_decomposition as structural__uni_decomposition + +from .TransformRules import \ + image_to_image as transform__image_to_image, \ + image_to_product as transform__image_to_product, \ + product_to_image as transform__product_to_image + diff --git a/NAL/_generate_init_file.py b/NAL/_generate_init_file.py new file mode 100644 index 0000000..794a659 --- /dev/null +++ b/NAL/_generate_init_file.py @@ -0,0 +1,39 @@ +import os +from pathlib import Path +from inspect import getmembers, isfunction +import importlib +import re + + +name_modules = [ + 'Inference', 'Theorems' +] +for name_module in name_modules: + root_path = Path(__file__).parent/name_module + this_name = Path(__file__).name + module_names = [path for path in root_path.glob('*') if not ('__init__.py' in str(path) or this_name in str(path) or '__' in str(path))] + + try: + os.remove(root_path/'__init__.py') + except: + pass + content = "'''This file is generated by `_generate_init_file.py`'''\n\n\n" + for path in module_names: + module = importlib.import_module(f'NAL.{name_module}.{path.stem}') + funcs = [mem[0] for mem in getmembers(module, isfunction) + if mem[1].__module__ == module.__name__] + if len(funcs) == 0: continue + temp = "" + temp += f"from .{path.stem} import \\\n" + for i, func in enumerate(funcs): + temp += f"\t{func} as {re.findall('[A-Z][^A-Z]*', path.name)[0].lower()}__{func}" + if i < len(funcs)-1: + temp += ", \\\n" + else: + temp += " \n\n" + content += temp + + with open(root_path/'__init__.py', 'w') as f: + f.write(content) + +print('done.') diff --git a/NARS/Control/Reasoner_OpenNARS_3_0_4.py b/NARS/Control/Reasoner_OpenNARS_3_0_4.py new file mode 100644 index 0000000..0b9cbc3 --- /dev/null +++ b/NARS/Control/Reasoner_OpenNARS_3_0_4.py @@ -0,0 +1,156 @@ +from os import remove +from NAL.Functions.Tools import truth_to_quality + +from NARS.DataStructures._py.Link import TaskLink +from NARS.InferenceEngine import TemporalEngine +from Narsese._py.Budget import Budget +from ..DataStructures import Bag, Memory, NarseseChannel, Buffer, Task, Concept +from ..InferenceEngine import GeneralEngine +import Config +from Config import Enable +from typing import List, Tuple, Union +import NARS.MentalOperation as MentalOperation +import Global + +class Reasoner: + + def __init__(self, n_memory, capacity, config='./config.json') -> None: + # print('''Init...''') + Config.load(config) + + self.inference = GeneralEngine() + self.temporal_inference = TemporalEngine() # for temporal causal reasoning + + self.memory = Memory(n_memory) + self.overall_experience = Buffer(capacity) + self.internal_experience = Buffer(capacity) + self.narsese_channel = NarseseChannel(capacity) + self.channels = [ + self.narsese_channel + ] # TODO: other channels + + self.sequence_buffer = Buffer(capacity) + self.operations_buffer = Buffer(capacity) + + + def reset(self): + '''''' + # TODO + + def cycles(self, n_cycle: int): + for _ in range(n_cycle): + self.cycle() + + def input_narsese(self, text, go_cycle: bool=True) -> Tuple[bool, Union[Task, None], Union[Task, None]]: + success, task, task_overflow = self.narsese_channel.put(text) + if go_cycle: self.cycle() + return success, task, task_overflow + + def cycle(self): + '''Everything to do by NARS in a single working cycle''' + + # step 1. Take out an Item from `Channels`, and then put it into the `Overall Experience` + task_in: Task = self.narsese_channel.take() + if task_in is not None: + self.overall_experience.put(task_in) + + # step 2. Take out an Item from the `Internal Experience`, with putting it back afterwards, and then put it into the `Overall Experience` + task: Task = self.internal_experience.take(remove=True) + if task is not None: + self.overall_experience.put(task) + self.internal_experience.put_back(task) + + # step 3. Process a task of global experience buffer + task: Task = self.overall_experience.take() + if task is not None: + judgement_revised, goal_revised, answers_question, answers_quest = self.memory.accept(task) + # self.sequence_buffer.put_back(task) # globalBuffer.putBack(task, narParameters.GLOBAL_BUFFER_FORGET_DURATIONS, this) + + if Enable.temporal_rasoning: + # TODO: Temporal Inference + # Ref: OpenNARS 3.1.0 line 409~411 + # if (!task.sentence.isEternal() && !(task.sentence.term instanceof Operation)) { + # globalBuffer.eventInference(task, cont, false); //can be triggered by Buffer itself in the future + # } + raise + + if judgement_revised is not None: + self.internal_experience.put(judgement_revised) + if goal_revised is not None: + self.internal_experience.put(goal_revised) + if answers_question is not None: + for answer in answers_question: + self.internal_experience.put(answer) + if answers_quest is not None: + for answer in answers_quest: + self.internal_experience.put(answer) + else: + judgement_revised, goal_revised, answers_question, answers_quest = None, None, None, None + + # step 4. Apply general inference step + concept: Concept = self.memory.take(remove=True) + tasks_derived: List[Task] = [] + if concept is not None: + tasks_inference_derived = self.inference.step(concept) + tasks_derived.extend(tasks_inference_derived) + + # TODO: relevant process + is_concept_valid = True + if is_concept_valid: + self.memory.put_back(concept) + + # temporal induction in NAL-7 + if task is not None and task.is_judgement and task.is_event: + concept_task: Concept = self.memory.take_by_key(task.term, remove=False) + tasks_derived.extend( + self.temporal_inference.step( + task, concept_task, + self.sequence_buffer, + self.operations_buffer + ) + ) + else: + pass # TODO: select a task from `self.sequence_buffer`? + + # mental operation of NAL-9 + task_operation_return, task_executed, belief_awared = self.mental_operation(task, concept, answers_question, answers_quest) + if task_operation_return is not None: tasks_derived.append(task_operation_return) + if task_executed is not None: tasks_derived.append(task_executed) + if belief_awared is not None: tasks_derived.append(belief_awared) + + # put the tasks-derived into the internal-experience. + for task_derived in tasks_derived: + self.internal_experience.put(task_derived) + + + # handle the sense of time + Global.time += 1 + + return tasks_derived, judgement_revised, goal_revised, answers_question, answers_quest, (task_operation_return, task_executed) + + def mental_operation(self, task: Task, concept: Concept, answers_question: Task, answers_quest: Task): + # handle the mental operations in NAL-9 + task_operation_return, task_executed, belief_awared = None, None, None + + # belief-awareness + for answers in (answers_question, answers_quest): + if answers is None: continue + for answer in answers: + belief_awared = MentalOperation.aware__believe(answer) + + if task is not None: + # question-awareness + if task.is_question: + belief_awared = MentalOperation.aware__wonder(task) + # quest-awareness + elif task.is_quest: + belief_awared = MentalOperation.aware__evaluate(task) + + # execute mental operation + if task is not None and task.is_executable: + task_operation_return, task_executed = MentalOperation.execute(task, concept, self.memory) + + + return task_operation_return, task_executed, belief_awared + + diff --git a/NARS/Control/__init__.py b/NARS/Control/__init__.py new file mode 100644 index 0000000..5beba0a --- /dev/null +++ b/NARS/Control/__init__.py @@ -0,0 +1 @@ +from .Reasoner_OpenNARS_3_0_4 import Reasoner as Reasoner_3_0_4 \ No newline at end of file diff --git a/NARS/DataStructures/__init__.py b/NARS/DataStructures/__init__.py new file mode 100644 index 0000000..82435da --- /dev/null +++ b/NARS/DataStructures/__init__.py @@ -0,0 +1,17 @@ +''' + +''' +if True: + # import from _py + # from ._py import * + from ._py.Bag import * + from ._py.Buffer import * + from ._py.Channel import * + from ._py.Concept import * + from ._py.Memory import * + from ._py.Link import * + from ._py.Table import * + from ._py.Link import * +else: + # import from _pyx + pass \ No newline at end of file diff --git a/NARS/DataStructures/_py/Bag.py b/NARS/DataStructures/_py/Bag.py new file mode 100644 index 0000000..c22de7c --- /dev/null +++ b/NARS/DataStructures/_py/Bag.py @@ -0,0 +1,217 @@ +from collections import OrderedDict +import random +import math +from depq import DEPQ +from Config import Config +from Narsese import Item, Task +from NAL.Functions.BudgetFunctions import * +from typing import Union + +class Bag: + # TODO: Re-implement this DataStructure, in order to optimize the complexity. + class LUT: + def __init__(self, *args, **kwargs): + self.lut = OrderedDict(*args, **kwargs) + + def get(self, key, default=None): + return self.lut.get(hash(key), default) + + def pop(self, key, default=None): + return self.lut.pop(hash(key), default) + + + def __getitem__(self, k): + return self.lut.__getitem__(hash(k)) + + def __setitem__(self, k, v): + return self.lut.__setitem__(hash(k), v) + + def __contains__(self, o: object) -> bool: + return self.lut.__contains__(hash(o)) + + def __len__(self): + return len(self.lut) + + def __init__(self, capacity: int, n_buckets: int=None, take_in_order: bool=True) -> None: + ''' + Args: + capacity (int): the maximum number of items. + n_buckets (int): the number of buckets. + take_in_order (bool): if True, an item is taken out in order within a bucket, otherwise a random item is taken out. + ''' + self.capacity = capacity + self.pointer = 0 # Pointing to the Bag's current bucket number + self.take_in_order = take_in_order + self.item_lut = self.LUT() # look up table + self.n_levels = n_buckets if n_buckets is not None else Config.num_buckets + self.levels = tuple(list() for i in range(self.n_levels)) # initialize buckets between 0 and capacity + # self.buckets = self.Depq(maxlen=self.n_buckets) + n_digits = int(math.log10(self.n_levels))+3 + def map_priority(priority: float): + idx = int(round(priority*self.n_levels, n_digits)) + return idx if idx < self.n_levels else self.n_levels-1 + + self.map_priority = map_priority + + def take(self, remove=True) -> Item: + if len(self) == 0: return None + + if self._is_current_level_empty(): + self._move_to_next_nonempty_level() + + + if self.take_in_order: + # take the first item from the current bucket + idx = 0 + else: + # take an item randomly from the current bucket + rnd = random.random() + cnt = len(self.levels[self.pointer]) + idx = int(rnd * cnt) + + if remove: + bucket: list = self.levels[self.pointer] + item = bucket.pop(idx) + self.item_lut.pop(item) + else: + item = self.levels[self.pointer][idx] + + + bucket_probability = self.pointer/self.n_levels + rnd = random.random() # [0.0, 1.0) + if rnd > bucket_probability: + self._move_to_next_nonempty_level() + + + return item + + def take_by_key(self, key, remove=True) -> Union[Item, None]: + if remove: + item: Item = self.item_lut.pop(key) + if item is not None: + bucket = self.levels[self.map_priority(item.budget.priority)] + if item in bucket: + bucket.remove(item) + else: + item = self.item_lut.get(key, None) + return item + + def take_min(self, remove=True) -> Item: + '''Take the item with lowest prioity''' + if len(self) == 0: + return None + pointer = self._get_min_nonempty_level() + if not remove: + item = self.levels[pointer][0] + else: + item = self.levels[pointer].pop(0) + self.item_lut.pop(item) + return item + + def take_max(self, remove=True) -> Item: + '''Take the item with highest prioity''' + if len(self) == 0: + return None + pointer = self._get_max_nonempty_level() + item = self.levels[pointer][-1] + if not remove: + item = self.levels[pointer][-1] + else: + item = self.levels[pointer].pop() + self.item_lut.pop(item) + return item + + + def put(self, item: Item): + item_popped = None + old_item: Item = self.take_by_key(item, remove=False) + if old_item is not None: + Budget_merge(old_item.budget, item.budget) + return item_popped + pointer_new = self.map_priority(item.budget.priority) + if len(self.item_lut) >= self.capacity: + pointer = self._get_min_nonempty_level() + if pointer_new >= pointer: + item_lowest = self.levels[self.pointer].pop(0) + self.item_lut.pop(item_lowest) + item_popped = item_lowest + else: + item_popped = item + return item_popped + + self.item_lut[item] = item + level: list = self.levels[pointer_new] + level.append(item) + + return item_popped + + def put_back(self, item: Item): + '''''' + # return putIn(oldItem); + Bag.decay(item) + self.put(item) + + @classmethod + def decay(cls, item: Item): + '''''' + # item.budget.decay() + Budget_decay(item.budget) + + @classmethod + def merge(cls, item_base: Item, item_merged: Item): + Budget_merge(item_base.budget, item_merged.budget) + + def count(self): + return sum((len(level) for level in self.levels)) + + def __contains__(self, item): + return item in self.item_lut + + def __iter__(self): + return iter(self.item_lut.lut.values()) + + def __len__(self): + return len(self.item_lut) + + def _is_current_level_empty(self): + return len(self.levels[self.pointer]) == 0 + + def _move_to_next_nonempty_level(self): + if len(self) == 0: return + self._move_upward_to_next_level() + while len(self.levels[self.pointer]) == 0: + self._move_upward_to_next_level() + + def _move_to_max_nonempty_level(self): + if len(self) == 0: return + self.pointer = self.n_levels - 1 + while len(self.levels[self.pointer]) == 0: + self._move_down_to_next_level() + + def _get_min_nonempty_level(self): + pointer_cache = self.pointer + self._move_to_min_nonempty_level() + pointer = self.pointer + self.pointer = pointer_cache + return pointer + + def _get_max_nonempty_level(self): + pointer_cache = self.pointer + self._move_to_max_nonempty_level() + pointer = self.pointer + self.pointer = pointer_cache + return pointer + + def _move_to_min_nonempty_level(self): + self.pointer = 0 + self._move_to_next_nonempty_level() + + def _move_down_to_next_level(self): + self.pointer = (self.pointer - 1) % self.n_levels + + def _move_upward_to_next_level(self): + self.pointer = (self.pointer + 1) % self.n_levels + + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}: #items={len(self)}, #levels={len(self.levels)}, capacity={self.capacity}>" \ No newline at end of file diff --git a/NARS/DataStructures/_py/Buffer.py b/NARS/DataStructures/_py/Buffer.py new file mode 100644 index 0000000..60fca76 --- /dev/null +++ b/NARS/DataStructures/_py/Buffer.py @@ -0,0 +1,29 @@ +from .Bag import Bag +from Config import Config +from Narsese import Item + +class Buffer(Bag): + ''' + According to *the Conceptual Design of OpenNARS 3.1.0*: + A buffer is a time-restricted bag containing new (input or derived) tasks. + A buffer has the following major routines: + **put**: As defined in bag. + **take**: As defined in bag, except that if the selected task is already expired, + the selection will repeat up to a predetermined times. Also, in buffer this + operation is not directly invoked from the outside, but from insider, as + part of observe. + **observe**: If the buffer does not carry out temporal composition, this routine + just call take to get a task, and return it. Otherwise it also uses the selected + task and every other tasks to form tasks containing compounds events. + The new tasks are put into the buffers. Given their high complexity, + most of them will be removed. The remaining ones usually correspond to + existing concepts in the memory or tasks in the buffer. + ''' + + def __init__(self, capacity: int, n_buckets: int=None, take_in_order: bool=False, max_duration: int=None) -> None: + Bag.__init__(self, capacity, n_buckets=n_buckets, take_in_order=take_in_order) + self.max_duration = max_duration if max_duration is not None else Config.max_duration + + + def is_expired(self, put_time, current_time): + return (current_time - put_time) > self.max_duration diff --git a/NARS/DataStructures/_py/Channel.py b/NARS/DataStructures/_py/Channel.py new file mode 100644 index 0000000..e899521 --- /dev/null +++ b/NARS/DataStructures/_py/Channel.py @@ -0,0 +1,27 @@ +from Narsese import Sentence +from .Buffer import Buffer +from queue import Queue +from Narsese import Task +from Narsese import parser +from utils.Print import out_print, PrintType + +class Channel(Buffer): + '''''' + +class NarseseChannel(Channel): + '''''' + def put(self, text: str): + try: + task: Task = parser.parse(text) + except: + task = None + return False, None, None + + task_overflow = Buffer.put(self, task) + return True, task, task_overflow + + def take(self) -> Sentence: + return Buffer.take_max(self, remove=True) + + + \ No newline at end of file diff --git a/NARS/DataStructures/_py/Concept.py b/NARS/DataStructures/_py/Concept.py new file mode 100644 index 0000000..fcc7ada --- /dev/null +++ b/NARS/DataStructures/_py/Concept.py @@ -0,0 +1,233 @@ +from typing import Tuple, Type, List, Union +from NAL.Functions.Tools import calculate_solution_quality, distribute_budget_among_links +from NAL.Functions.BudgetFunctions import Budget_merge +from Narsese import Belief, Task, Item, Budget, Sentence, Term, Task, Judgement, Goal +from Narsese._py.Sentence import Quest, Question +# from .Link import Link, TermLink, TaskLink, LinkType +from .Link import * +from .Table import Table +from .Bag import Bag +from Config import Config, Enable +from Narsese import place_holder + + +class Concept(Item): + '''Ref: OpenNARS 3.0.4 Concept.java''' + + # seq_before: Bag # Recent events that happened before the operation the concept represents was executed. + task_links: Bag + term_links: Bag + + # *Note*: since this is iterated frequently, an array should be used. To avoid iterator allocation, use .get(n) in a for-loop + question_table: Table # Pending Question directly asked about the term + quest_table: Table # Pending Question directly asked about the term + executable_preconditions: Table + belief_table: Table # Judgments directly made about the term Use List because of access and insertion in the middle + general_executable_preconditions: Table + + desire_table: Table # Desire values on the term, similar to the above one + + termLinkTemplates: List[TermLink] # Link templates of TermLink, only in concepts with CompoundTerm Templates are used to improve the efficiency of TermLink building + + _subterms: List[Term] + + + def __init__(self, term: Term, budget: Budget, capacity_table: int=None) -> None: + super().__init__(hash(term), budget) + self._term = term + + capacity_table = Config.capacity_table if capacity_table is None else capacity_table + nlevels_term_link_bag = Config.nlevels_term_link + capacity_term_link_bag = Config.capacity_term_link + nlevels_task_link_bag = Config.nlevels_task_link + capacity_task_link_bag = Config.capacity_task_link + + self._term = term + self.belief_table = Table(capacity_table) + self.desire_table = Table(capacity_table) + self.question_table = Table(capacity_table) + self.quest_table = Table(capacity_table) + self.term_links = Bag(capacity_term_link_bag, nlevels_term_link_bag) + self.task_links = Bag(capacity_task_link_bag, nlevels_task_link_bag) + + self.executable_preconditions = Table(capacity_table) + self.general_executable_preconditions = Table(capacity_table) + + self.task_links = Bag(Config.capacity_task_link, Config.nlevels_task_link) + self.term_links = Bag(Config.capacity_term_link, Config.nlevels_term_link) + + # self._cache_subterms() + # self.accept(task) + + @property + def term(self) -> Term: + return self._term + + def get_belief(self) -> Belief: + '''''' + if Enable.temporal_rasoning: + # final Sentence belief = beliefT.sentence; + # nal.emit(BeliefSelect.class, belief); + # nal.setTheNewStamp(taskStamp, belief.stamp, currentTime); + + # final Sentence projectedBelief = belief.projection(taskStamp.getOccurrenceTime(), nal.time.time(), nal.memory); + # /*if (projectedBelief.getOccurenceTime() != belief.getOccurenceTime()) { + # nal.singlePremiseTask(projectedBelief, task.budget); + # }*/ + + # return projectedBelief; // return the first satisfying belief + raise + return self.belief_table.first() + + # def match_candidate(self, sentence: Sentence) -> Task | Belief: + # if sentence.is_judgement: + # return self.match_belief(sentence) + # elif sentence.is_goal: + # return self.match_desire(sentence) + # else: + # raise "Invalid type." # TODO: What about question and quest? + + def match_belief(self, sentence: Union[Judgement, Question]) -> Belief: + ''' + Select a belief with highest `belief_quality`, within the belief_table, according the task + ''' + belief_table: List[Task] = self.belief_table + if len(belief_table) == 0: return None + qualities = [(calculate_solution_quality(sentence, task.sentence), task) for task in belief_table] + _, item_max = max(qualities, key=lambda quality: quality[0]) + return item_max + + def match_desire(self, goal: Goal) -> Task: + ''' + Select a belief with highest `belief_quality`, within the belief_table, according the task + ''' + desire_table: List[Tuple[Task, float]] = self.desire_table + if len(desire_table) == 0: return None + qualities = [(calculate_solution_quality(goal, task.sentence), task) for task in desire_table] + _, item_max = max(qualities, key=lambda quality: quality[0]) + return item_max + + def add_belief(self, task: Task) -> Union[Judgement, None]: + '''''' + self.belief_table.add(task, task.truth.c) + + def add_desire(self, task: Task) -> Union[Task, None]: + '''''' + # goal: Goal = task.sentence + self.desire_table.add(task, task.truth.c) + + def accept(self, task: Task, concepts: Bag=None, conceptualize: bool=True): + ''' + Ref: The Conceptual Design of OpenNARS 3.1.0 + **accept task-link:** Pre-process the task using the information local to the con- + cept, then add the link into the task-link bag so as to process it repeatedly + in the future. + ''' + # if task.is_judgement: + # self.belief_table.add(task, task.sentence.truth.c) + if concepts is None: return + + budget = task.budget + if budget.is_above_thresh: + if conceptualize: + concept = Concept._conceptualize(concepts, self.term, budget) + if concept is None: return # The memroy is full, and the concept fails to get into the memory. + self._build_task_links(concepts, task) + self._build_term_links(concepts, task, budget) + + def _build_task_links(self, concepts: Bag, task: Task): + '''''' + budget = task.budget + task_link = TaskLink(self, task, budget, True, index=[]) + self._insert_task_link(task_link) + if self.term.is_atom: return + sub_budget = budget.distribute(self.term.count()-1) # TODO: It seems that the budget is not the same with that in OpenNARS 3.0.4/3.1.0. Check here. + for term in self.term.components: + if term == place_holder: continue # should it skip the `place_holder?` + concept = Concept._conceptualize(concepts, term, sub_budget) + if concept is None: continue + + indices = Link.get_index(self.term, term) + for index in indices: + task_link = TaskLink(concept, task, sub_budget, index=index) + concept._insert_task_link(task_link) + + def _build_term_links(self, concepts: Bag, task: Task, budget: Budget): + ''' + Get component-terms to be concepualized and build links by DFS (Depth-Fist-Search). + ''' + if self.term.count() == 1: return # atomic term + + sub_budget = budget.distribute(self.term.count()-1) # TODO: in the case that there are some terms not being used to build term-links, the count here is not valid, which should be modified. + if sub_budget.is_above_thresh: + if self.term.is_atom: return + + for term in self.term.components: + if term == place_holder: continue # should it skip the `place_holder?` + + # Option 1 + # # in _build_task_links(...), the terms all have been conceptualized. + # # therefore, here if a concept is not in memory, it should not be used for term-links construction. + # sub_concept: Concept = concepts.take_by_key(term, False) + + # Option 2 + # again, conceptualize + sub_concept: Concept = Concept._conceptualize(concepts, term, task.budget) + if sub_concept is None: continue + + indices = Link.get_index(self.term, term) + for index in indices: + self._insert_term_link(TermLink(self, sub_concept, sub_budget, False, index=index)) + sub_concept._insert_term_link(TermLink(sub_concept, self, sub_budget, True, index=index)) + + sub_concept._build_term_links(concepts, task, sub_budget) + + + def _insert_task_link(self, task_link: TaskLink): + self.task_links.put(task_link) + # TODO: more handling. see OpenNARS 3.1.0 Concept.java line 318~366. + + def _insert_term_link(self, term_link: TermLink): + self.term_links.put(term_link) + # TODO: more handling. see OpenNARS 3.1.0 Concept.java line 318~366. + + @classmethod + def _conceptualize(cls, concepts: Bag, term: Term, budget: Budget): + ''' + Conceptualize a task. + If the concept of the task is already in the memory, then merge the concept into the existed one. + Otherwise, make up a new concept and add it into the memory. + ''' + if Enable.temporal_rasoning: + # if(term instanceof Interval) { + # return null; + # } + # term = CompoundTerm.replaceIntervals(term); + raise # TODO + + if term.is_var: return None + + concept = concepts.take_by_key(term, True) # take the concept from the bag + + if concept is not None: + Budget_merge(concept.budget, budget) # Merge the term into the concept if the concept has existed + # Note: The budget handling here is sort of different from that in OpenNARS 3.1.0, see `Memory.java line 207` and `BudgetFunction.java line 167~170` in OpenNARS 3.1.0. + else: + concept = Concept(term, budget) # build the current concept if there has not been the concept in the bag + + concept_popped = concepts.put_back(concept) # TODO: Check here. `put` or `put_back`? + if concept_popped is not None and concept == concept_popped: + concept = None + return concept + + def __eq__(self, concept: Type['Concept']): + return concept.term == self.term + + def __hash__(self): + return hash(self.term) + + def __str__(self): + return f'{self.budget} {self.term}' + + def __repr__(self): + return f'' diff --git a/NARS/DataStructures/_py/Link.py b/NARS/DataStructures/_py/Link.py new file mode 100644 index 0000000..e7388b6 --- /dev/null +++ b/NARS/DataStructures/_py/Link.py @@ -0,0 +1,248 @@ +from enum import Enum +import enum +from Narsese import Item, Budget, Task, Term +from typing import List, Type, Union +from Narsese._py.Compound import Compound +from Narsese._py.Connector import Connector + +from Narsese._py.Copula import Copula +from Narsese._py.Statement import Statement +from Narsese._py.Truth import Truth +# from .Concept import * +from .Concept import * +# from . import Concept +from copy import copy, deepcopy +from NAL.Functions.ExtendedBooleanFunctions import * +from Config import Config + +class LinkType(Enum): + SELF = 0 # At C, point to C; TaskLink only + COMPONENT = 1 # At (&&, A, C), point to C + COMPOUND = 2 # At C, point to (&&, A, C) + COMPONENT_STATEMENT = 3 # At A>, point to C + COMPOUND_STATEMENT = 4 # At C, point to A> + COMPONENT_CONDITION = 5 # At <(&&, C, B) ==> A>, point to C + COMPOUND_CONDITION = 6 # At C, point to <(&&, C, B) ==> A> + TRANSFORM = 7 # At C, point to <(*, C, B) --> A>; TaskLink only + TEMPORAL = 8 # At C, point to B, potentially without common subterm term + + def __int__(self): + return self.value + + +class Link(Item): + link_id = 0 + type: LinkType = None + component_index: List[List[int]] # TODO: refer to OpenNARS 3.0.4, TermLink.java line 75 and TaskLink.java line 85. But why use it? + def __init__(self, source: Type['Concept'], target: Task, budget: Budget, source_is_component: bool=None, copy_budget=True, index: list=None) -> None: + self.link_id = Link.link_id + self.component_index = tuple(index) + + hash_value = hash((source, target, self.component_index)) + super().__init__(hash_value, budget=budget,copy_budget=copy_budget) + Link.link_id += 1 + + self.source: Type['Concept'] = source + self.target: Task = target + + + self.source_is_component = source_is_component + self.set_type(source_is_component) + + + def set_type(self, source_is_component=None, type: LinkType=None, enable_transform=False): + if type is not None: + self.type = type + return + + term_source: Term = self.source.term + term_target: Term = self.target.term + if source_is_component is None: + source_is_component = term_source in term_target + self.source_is_component = source_is_component + + # Identify the link-type according to the term-types of the source-term and the target-term. + if source_is_component: + if term_target == term_source: self.type = LinkType.SELF + elif term_target.is_statement: + statement: Statement = term_target + is_product_or_image = False + if enable_transform: # only for tasklink + index = self.component_index + if len(index) >= 2: + statement_product: Statement = self.target.term[index[:-2]] + if statement_product.is_statement: + compound_product: Compound = statement_product[index[-2]] + if compound_product.is_compound: + if compound_product.connector in (Connector.Product, Connector.ExtensionalImage, Connector.IntensionalImage): + is_product_or_image = True + if is_product_or_image: self.type = LinkType.TRANSFORM + else: + if term_target.is_statement and term_target.copula.is_higher_order: + # in (Copula.Implication, Copula.Equivalence, Copula.PredictiveImplication, Copula.ConcurrentImplication, Copula.RetrospectiveImplication, Copula.PredictiveEquivalence, Copula.ConcurrentEquivalence): + if term_source == statement.subject or term_source == statement.predicate: self.type = LinkType.COMPOUND_CONDITION + else: self.type = LinkType.COMPOUND_STATEMENT + elif term_target.copula in (Copula.Inheritance, Copula.Similarity): self.type = LinkType.COMPOUND_STATEMENT + else: self.type = None + elif term_target.is_compound: self.type = LinkType.COMPOUND + else: self.type = None + # self.component_index = Link.get_index(term_source, term_target) + else: + if term_source.is_statement: + if term_source.copula in (Copula.Implication, Copula.Equivalence, Copula.PredictiveImplication, Copula.ConcurrentImplication, Copula.RetrospectiveImplication, Copula.PredictiveEquivalence, Copula.ConcurrentEquivalence): + statement: Statement = term_source + if term_target == statement.subject: self.type = LinkType.COMPONENT_STATEMENT + else: self.type = LinkType.COMPONENT_CONDITION + elif term_source.copula in (Copula.Inheritance, Copula.Similarity): self.type = LinkType.COMPONENT_STATEMENT + else: self.type = None + elif term_source.is_compound: self.type = LinkType.COMPONENT + else: self.type = None + # self.component_index = Link.get_index(term_target, term_source) + + + # @classmethod + # def get_index(cls, term_component: Term, term_compound: Term): + # ''' + # Get the index of term_component in term_compound, + # e.g. if term_component = A, term_compound = <(&,B,A)-->C>, then the index = [[0,1]]; if term_component = A, term_compound = A>, then the index = [[1]]; if term_component = A, term_compound = <(&,B,A)-->(&,A,C)>, then the index = [[0,1], [1,0]]. + # ''' + # def _get_index(term_component: Term, term_compound: Term, index: List[int]): + # index_new = [] + # if term_compound.is_atom or term_component == term_compound: + # index_new.append([]) + # elif term_compound.is_statement: + # statement: Statement = term_compound + # is_in_subject = False + # is_in_predicate = False + # if term_component in statement.subject: + # is_in_subject = True + # index_new.extend( + # _get_index(term_component, statement.subject, [0]) + # ) + + # if term_component in statement.predicate: + # is_in_predicate = True + # index_new.extend( + # _get_index(term_component, statement.predicate, [1]) + # ) + + # if not (is_in_subject or is_in_predicate): + # raise "Invalid case." + + # elif term_compound.is_compound: + # compound: Compound = term_compound + # valid = False + # for i, component in enumerate(compound): + # if term_component in component: + # valid = True + # index_new.extend( + # _get_index(term_component, component, [i]) + # ) + + # if not valid: + # raise "Invalid case." + # else: + # raise "Invalid case." + # indexes = [] + # for idx in index_new: + # indexes.append(index+idx) + # return indexes + # return _get_index(term_component, term_compound, []) + + + # This is another implementation of `get_index` (,it may be a better one). + @classmethod + def get_index(cls, main_term: Union[Term, Statement, Compound], sub_term: Union[Term, Statement, Compound], index=None, indices=None): + '''This function is temporary. The index of a term within another term should be obtained when constructing the latter one.''' + + if sub_term not in main_term: return None + + indices = [] if indices is None else indices + index = [] if index is None else index + + if main_term == sub_term: + return indices.append(index) + + + # if main_term.is_statement: + # if sub_term in main_term[0]: idx = 0 + # elif sub_term in main_term[1]: idx = 1 + # else: raise "Invalid case." + # index.append(idx) + # cls.get_index(main_term.terms[idx], sub_term, index, indices) + if main_term.is_compound or main_term.is_statement: + if sub_term in main_term: + for idx, term in enumerate(main_term.terms): + # idx = main_term.terms.index(sub_term) + if sub_term in term: + index_copy = copy(index) + index_copy.append(idx) + cls.get_index(term, sub_term, index_copy, indices) + # elif main_term.is_atom: + # pass + else: raise "Invalid case." + + return indices + + @classmethod + def update_budget(cls, budget: Budget, q: float, p_belief: float): + budget.priority = min(1.0, Or(budget.priority, Or(q, p_belief))) + budget.durability = min(1.0-Config.budget_epsilon, Or(budget.durability, q)) + + + + @property + def is_valid(self): + return self.type is not None + + + def __str__(self) -> str: + return f'{self.budget} {(self.source)} --- {self.target}, {"+" if self.source_is_component else "-"}{self.component_index}' + + +class TermLink(Link): + def __init__(self, source: Type['Concept'], target: Task, budget: Budget, source_is_component: bool=None, copy_budget=True, index: list=None) -> None: + super().__init__(source, target, budget, source_is_component, copy_budget=copy_budget, index=index) + + def set_type(self, source_is_component=True, type: LinkType=None): + Link.set_type(self, source_is_component, type) + if not self.is_valid: self.type = None + + @property + def is_valid(self): + return self.type in ( + LinkType.COMPONENT, + LinkType.COMPOUND, + LinkType.COMPONENT_STATEMENT, + LinkType.COMPOUND_STATEMENT, + LinkType.COMPONENT_CONDITION, + LinkType.COMPOUND_CONDITION, + LinkType.TEMPORAL + ) + + def __str__(self) -> str: + return f'{self.budget} {self.source.term} --- {self.target.term}, {"+" if self.source_is_component else "-"}{self.component_index}' + +class TaskLink(Link): + def __init__(self, source: Type['Concept'], target: Type['Concept'], budget: Budget, copy_budget=True, index: list=None) -> None: + super().__init__(source, target, budget, True, copy_budget=copy_budget, index=index) + + def set_type(self, source_is_component=True, type: LinkType=None): + Link.set_type(self, source_is_component, type, enable_transform=True) + if not self.is_valid: self.type = None + + @property + def is_valid(self) -> bool: + return self.type in ( + LinkType.SELF, + LinkType.COMPOUND, + LinkType.COMPOUND_STATEMENT, + LinkType.COMPOUND_CONDITION, + LinkType.TRANSFORM, + LinkType.TEMPORAL + ) + + def __str__(self) -> str: + return f'{self.budget} {self.source.term} --- {self.target.term}{self.target.sentence.punct.value}, {"+" if self.source_is_component else "-"}{self.component_index}' + + \ No newline at end of file diff --git a/NARS/DataStructures/_py/Memory.py b/NARS/DataStructures/_py/Memory.py new file mode 100644 index 0000000..b89dcbe --- /dev/null +++ b/NARS/DataStructures/_py/Memory.py @@ -0,0 +1,276 @@ +from Config import Enable +from NAL.Inference.LocalRules import solve_query, solution_query, solution_question +from NAL.MetaLevelInference.VariableSubstitution import unification__var_const + +from NARS.DataStructures._py.Link import TaskLink +from Narsese._py.Sentence import Goal, Judgement, Question +from Narsese import Statement, Term, Sentence, Budget, Task +from Narsese._py.Task import Belief, Desire +from .Concept import Concept +from .Bag import Bag +from NAL.Functions.Tools import revisible +from NAL.Inference import local__revision + +class Memory: + def __init__(self, capacity: int, n_buckets: int = None, take_in_order: bool = False) -> None: + self.concepts = Bag(capacity, n_buckets=n_buckets, take_in_order=take_in_order) + + def accept(self, task: Task): + ''' + **Accept task**: Accept a task from the `Overall Experience`, and link it from all directly related concepts. Ref: *The Conceptual Design of OpenNARS 3.1.0*. + ''' + # merging the new task as a concept into the memory + concept: Concept = Concept._conceptualize(self, task.term, task.budget) + if concept is None: return None # The memroy is full. The concept fails to get into the memory. + + # then process each task according to its type + task_revised, goal_derived, answers_question, answer_quest = None, None, None, None + if task.is_judgement: + # revised the belief if there has been one, and try to solve question if there has been a corresponding one. + task_revised, answers_question = self._accept_judgement(task, concept) + elif task.is_goal: + task_revised, belief_selected = self._accept_goal(task, concept) + elif task.is_question: + # add the question to the question-table of the concept, and try to find a solution. + answers_question = self._accept_question(task, concept) + elif task.is_quest: + answer_quest = self._accept_quest(task, concept) + else: + raise f"Invalid type {task.sentence.punct}" + + # Build the concepts corresponding to the terms of those components within the task. + concept.accept(task, self.concepts, conceptualize=False) + + if Enable.temporal_rasoning or Enable.operation: + # if (!task.sentence.isEternal() && !(task.sentence.term instanceof Operation)) { + # globalBuffer.eventInference(task, cont, false); //can be triggered by Buffer itself in the future + # } + raise # TODO + + return task_revised, goal_derived, answers_question, answer_quest + + + def _accept_judgement(self, task: Task, concept: Concept): + '''''' + belief_revised = None + answers = None + if Enable.operation: raise # InternalExperienceBuffer.handleOperationFeedback(task, nal); + if Enable.anticipation: raise # ProcessAnticipation.confirmAnticipation(task, concept, nal); + + # j1: Judgement = task.sentence + belief: Belief = concept.match_belief(task.sentence) + if belief is not None: + # j2: Judgement = belief.sentence + if revisible(task, belief): + if Enable.temporal_rasoning: + ''' + nal.setTheNewStamp(newStamp, oldStamp, nal.time.time()); + final Sentence projectedBelief = oldBelief.projection(nal.time.time(), newStamp.getOccurrenceTime(), concept.memory); + + if (projectedBelief!=null) { + nal.setCurrentBelief(projectedBelief); + revision(judg, projectedBelief, concept, false, nal); + task.setAchievement(calcTaskAchievement(task.sentence.truth, projectedBelief.truth)); + } + ''' + raise + belief_revised = local__revision(task, belief) # TODO: handling the stamps + # reduce priority by achieving level + task.reduce_budget_by_achieving_level(belief) + + if task.budget.is_above_thresh: + '''final int nnq = concept.questions.size(); + for (int i = 0; i < nnq; i++) { + trySolution(judg, concept.questions.get(i), nal, true); + } + final int nng = concept.desires.size(); + for (int i = 0; i < nng; i++) { + trySolution(judg, concept.desires.get(i), nal, true); + }''' + concept.add_belief(task) + + # try to solve questions + answers = self._solve_judgement(task, concept) + + + return belief_revised, answers + + + def _accept_question(self, task: Task, concept: Concept): + '''''' + concept.question_table.add(task, 0.5) + + if task.is_query: + answers = self._solve_query(task, concept) + else: + answers = self._solve_question(task, concept) + + return answers + + + def _accept_goal(self, task: Task, concept: Concept): + '''''' + desire_revised = None + belief_selected = None + if Enable.operation: raise # InternalExperienceBuffer.handleOperationFeedback(task, nal); + if Enable.anticipation: raise # ProcessAnticipation.confirmAnticipation(task, concept, nal); + + g1: Goal = task.sentence + desire: Desire = concept.match_desire(g1) + if desire is not None: + g2: Goal = desire.sentence + if revisible(task, desire): + # TODO: Temporal projection + desire_revised = local__revision(task, desire) # TODO: handling the stamps + # reduce priority by achieving level + task.reduce_budget_by_achieving_level(desire) + + if task.budget.is_above_thresh: + ''' + for (final Task iQuest : concept.quests ) { + trySolution(task.sentence, iQuest, nal, true); + } + if (beliefT != null) { + // check if the Goal is already satisfied (manipulate budget) + trySolution(beliefT.sentence, task, nal, true); + } + ''' + + # 1. try to solve questions + + # 2. try to solve quests + + + concept.add_desire(task) + + return desire_revised, belief_selected + + + def _accept_quest(self, task: Task, concept: Concept): + '''''' + concept.quest_table.add(task, 0.5) + + if task.is_query: + answers = self._solve_query(task, concept) + else: + answers = self._solve_quest(task, concept) + + return answers + + + def _solve_judgement(self, belief: Task, concept: Concept): + ''' + It should be ensured that the task has no query-variables. + + Args: + task (Task): Its sentence should be a judgement. + concept (Concept): The concept corresponding to the task. + ''' + answers = [] + # 1. try to solve yn-questions + for question in concept.question_table: + answer = solution_question(question, belief) + if answer is not None: answers.append(answer) + # 2. try to solve wh-questions + sub_terms = belief.term.sub_terms + for sub_term in sub_terms: + concept_term: Concept = self.concepts.take_by_key(sub_term, remove=False) + if concept_term is None: continue + task_link: TaskLink + for task_link in concept_term.task_links: + query = task_link.target + if query is None: continue + if not query.is_query: continue + if not query.term.equal(belief.term): continue + answer = solution_query(query, belief) + if answer is not None: answers.append(answer) + + return answers + + + + def _solve_question(self, question: Task, concept: Concept): + ''' + Args: + task (Task): Its sentence should be a question. + concept (Concept): The concept corresponding to the task. + ''' + answers = [] + # 1. try to solve yn-questions + belief_answer: Belief = concept.match_belief(question.sentence) + if belief_answer is not None: + answer = solution_question(question, belief_answer) + if answer is not None: answers.append(answer) + return answers + + + def _solve_query(self, query: Task, concept: Concept): + ''' + Args: + task (Task): Its sentence should be a question or a quest and contains query-variable(s). + concept (Concept): The concept corresponding to the task. + ''' + answers = [] + # 1. try to solve wh-questions + if query.is_question: + sub_terms = query.term.sub_terms + for sub_term in sub_terms: + if sub_term.is_qvar: continue + concept_term: Concept = self.concepts.take_by_key(sub_term, remove=False) + if concept_term is None: continue + task_link: TaskLink + for task_link in concept_term.task_links: + concept_target: Concept = self.concepts.take_by_key(task_link.target.term, False) + if concept_target is None: continue + if not query.term.equal(concept_target.term): continue + subst = unification__var_const(query.term, concept_target.term, [], []) + if not subst.is_qvar_valid: continue + # if not (concept_target.term.equal(query.term) and subst is not None): continue + for belief in concept_target.belief_table: + answer = solution_query(query, belief) + if answer is not None: answers.append(answer) + pass + elif query.is_quest: + pass + else: raise "Invalid case." + return answers + + + def _solve_goal(self, task: Task, concept: Concept): + ''' + Args: + task (Task): Its sentence should be a goal. + concept (Concept): The concept corresponding to the task. + ''' + + + def _solve_quest(self, task: Task, concept: Concept): + ''' + Args: + task (Task): Its sentence should be a quest. + concept (Concept): The concept corresponding to the task. + ''' + answers = [] + return answers + + + def take(self, remove=True) -> Concept: + ''' + Take out a concept according to priority. + ''' + return self.concepts.take(remove) + + def take_by_key(self, key: Task, remove=True) -> Concept: + return self.concepts.take_by_key(key, remove) + + def put(self, concept: Concept): + return self.concepts.put(concept) + + def put_back(self, concept: Concept): + return self.concepts.put_back(concept) + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}: #items={len(self.concepts)}, #buckets={len(self.concepts.levels)}>" + + def __len__(self) -> int: + return len(self.concepts) \ No newline at end of file diff --git a/NARS/DataStructures/_py/Table.py b/NARS/DataStructures/_py/Table.py new file mode 100644 index 0000000..37737c7 --- /dev/null +++ b/NARS/DataStructures/_py/Table.py @@ -0,0 +1,50 @@ +from typing import Union +from depq import DEPQ +from Narsese import Task, Belief + +class Table: + ''' + Utilized for belief table, desire table, etc. in the `Concept`. + ''' + def __init__(self, capacity): + self._table = DEPQ(maxlen=capacity) + + def add(self, task: Task, p: float): + self._table.insert(task, p) + + # def remove(self, task: Task): + # self._table.elim(task) + + @property + def empty(self): + return self._table.is_empty() + + def first(self): + return self._table.first() if len(self._table) > 0 else None + + def last(self): + return self._table.last() if len(self._table) > 0 else None + + def __iter__(self): + return (value for value, _ in self._table) + + def values(self): + return tuple(iter(self)) + + def items(self): + return tuple(iter(self._table)) + + def keys(self): + return tuple(key for _, key in self._table) + + def __getitem__(self, idx: int) -> Union[Task, Belief]: + return self._table[idx][0] + + def __len__(self): + return len(self._table) + + def __str__(self): + return f'' + + def __repr__(self): + return str(self) \ No newline at end of file diff --git a/NARS/InferenceEngine/Engine.py b/NARS/InferenceEngine/Engine.py new file mode 100644 index 0000000..abb8c2e --- /dev/null +++ b/NARS/InferenceEngine/Engine.py @@ -0,0 +1,26 @@ +from copy import copy +from NAL.Functions.Tools import project_truth, revisible +from Narsese._py.Budget import Budget +from Narsese._py.Term import Term +from ..DataStructures import Task, Belief, Concept, TaskLink, TermLink +from typing import Callable, List, Tuple +from ..RuleMap import RuleCallable, RuleMap_v2 +from NAL.Inference import local__revision +import Global + + +class Engine: + + rule_map = RuleMap_v2() + + def __init__(self): + pass + + + @classmethod + def match(cls, *args, **kwargs): + pass + + + def step(self, *args, **kwargs): + pass \ No newline at end of file diff --git a/NARS/InferenceEngine/GeneralEngine.py b/NARS/InferenceEngine/GeneralEngine.py new file mode 100644 index 0000000..dadb70a --- /dev/null +++ b/NARS/InferenceEngine/GeneralEngine.py @@ -0,0 +1,125 @@ +from copy import copy +from NAL.Functions.Tools import project_truth, revisible +from Narsese._py.Budget import Budget +from Narsese._py.Term import Term +from ..DataStructures import Task, Belief, Concept, TaskLink, TermLink +from typing import Callable, List, Tuple +from ..RuleMap import RuleCallable, RuleMap_v2 +from NAL.Inference import local__revision +import Global +from .Engine import Engine + + +class GeneralEngine(Engine): + + rule_map = RuleMap_v2() + + def __init__(self): + '''''' + super().__init__() + + + @classmethod + def match(cls, task: Task, belief: Belief, belief_term: Term, task_link, term_link): + '''To verify whether the task and the belief can interact with each other''' + + is_valid = False + is_revision = False + rules = [] + if belief is not None: + if task == belief: + if task.sentence.punct == belief.sentence.punct: + is_revision = revisible(task, belief) + elif task.term.equal(belief.term): + # TODO: here + pass + elif not belief.evidential_base.is_overlaped(task.evidential_base): + # Engine.rule_map.verify(task_link, term_link) + rules = GeneralEngine.rule_map.match(task, belief, belief_term, task_link, term_link) + if rules is not None and len(rules) > 0: + is_valid = True + elif belief_term is not None: # belief is None + if task.term == belief_term:pass + elif task.term.equal(belief_term): pass + else: + rules = GeneralEngine.rule_map.match(task, belief, belief_term, task_link, term_link) + if rules is not None and len(rules) > 0: + is_valid = True + else: # belief is None and belief_term is None + rules = GeneralEngine.rule_map.match(task, belief, belief_term, task_link, term_link) + if rules is not None and len(rules) > 0: + is_valid = True + + return is_valid, is_revision, rules + + + def step(self, concept: Concept): + '''One step inference.''' + tasks_derived = [] + + # Based on the selected concept, take out a task and a belief for further inference. + task_link_valid: TaskLink = concept.task_links.take(remove=True) + if task_link_valid is None: return tasks_derived + concept.task_links.put_back(task_link_valid) + + task: Task = task_link_valid.target + + # inference for single-premise rules + is_valid, _, rules_immediate = GeneralEngine.match(task, None, None, task_link_valid, None) + if is_valid: + tasks = self.inference(task, None, None, task_link_valid, None, rules_immediate) + tasks_derived.extend(tasks) + + # inference for two-premises rules + term_links = [] + term_link_valid = None + is_valid = False + for _ in range(len(concept.term_links)): + #To find a belief, which is valid to interact with the task, by iterating over the term-links. + term_link: TaskLink = concept.term_links.take(remove=True) + term_links.append(term_link) + + concept_target: Concept = term_link.target + belief = concept_target.get_belief() # TODO: consider all beliefs. + term_belief = concept_target.term + # if belief is None: continue + # Verify the validity of the interaction, and find a pair which is valid for inference. + is_valid, is_revision, rules = GeneralEngine.match(task, belief, term_belief, task_link_valid, term_link) + if is_revision: tasks_derived.append(local__revision(task, belief, task_link_valid.budget, term_link.budget)) + if is_valid: + term_link_valid = term_link + break + + + if is_valid: + tasks = self.inference(task, belief, term_belief, task_link_valid, term_link_valid, rules) + if term_link_valid is not None: # TODO: Check here whether the budget updating is the same as OpenNARS 3.0.4. + for task in tasks: TermLink.update_budget(term_link_valid.budget, task.budget.quality, belief.budget.priority if belief is not None else concept_target.budget.priority) + + tasks_derived.extend(tasks) + + for term_link in term_links: concept.term_links.put_back(term_link) + + return tasks_derived + + @staticmethod + def inference(task: Task, belief: Belief, term_belief: Term, task_link: TaskLink, term_link: TermLink, rules: List[RuleCallable]) -> List[Task]: # Tuple[List[Task], List[Tuple[Budget, float, float]]]: + ''' + It should be ensured that + 1. the task and the belief can interact with each other; + 2. the task is the target node of the task-link, and the concept correspoding to the belief is the target node of the term-link. + 3. there is a function, indexed by the task_link and the term_link, in the RuleMap. + ''' + # Temporal Projection and Eternalization + if belief is not None: + # TODO: Hanlde the backward inference. + if not belief.is_eternal and (belief.is_judgement or belief.is_goal): + truth_belief = project_truth(task.sentence, belief.sentence) + belief = belief.eternalize(truth_belief) + # beleif_eternalized = belief # TODO: should it be added into the `tasks_derived`? + + belief = belief if belief is not None else term_belief + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + + return tasks_derived + diff --git a/NARS/InferenceEngine/TemporalEngine.py b/NARS/InferenceEngine/TemporalEngine.py new file mode 100644 index 0000000..604975c --- /dev/null +++ b/NARS/InferenceEngine/TemporalEngine.py @@ -0,0 +1,107 @@ +from copy import copy +from typing import Union +from distutils.unixccompiler import UnixCCompiler +from NAL.Functions.Tools import project_truth, revisible, truth_to_quality +from NARS.DataStructures._py.Buffer import Buffer +from Narsese._py.Budget import Budget +from Narsese._py.Copula import Copula +from Narsese._py.Evidence import Base +from Narsese._py.Statement import Statement +from Narsese._py.Term import Term +from ..DataStructures import Task, Belief, Concept, TaskLink, TermLink +from typing import Callable, List, Tuple +from ..RuleMap import RuleCallable, RuleMap_v2 +from NAL.Inference import local__revision +from Config import Config +import Global +from NAL.Inference import * +from .Engine import Engine + +class TemporalEngine(Engine): + '''''' + + rule_map = RuleMap_v2() + + def __init__(self) -> None: + '''''' + super().__init__() + + + def step(self, task: Task, concept: Union[Concept, None], sequence_buffer: Buffer, operations_buffer: Buffer): + '''''' + tasks_derived = [] + + if not (task.is_judgement and task.is_event): + return tasks_derived + + tasks_derived.extend(self.inference_with_sequence(task, sequence_buffer)) + self.add_event(task, concept, sequence_buffer) + + tasks_derived.extend(self.inference_with_operations(task, operations_buffer)) + self.add_operation_feedback(task, operations_buffer) + + return tasks_derived + + + def inference_with_sequence(self, task_event: Task, sequence_buffer: Buffer): + ''' + inference with `self.sequence_buffer` + Ref: OpenNARS 3.0.4 TemporalInferenceControl.java line 85~104 + ''' + tasks_derived = [] + tasks_attempted = [] + for _ in range(Config.n_sequence_attempts): + task: Task = sequence_buffer.take(True) + if task is None: break + tasks_attempted.append(task) + if task.evidential_base.is_overlaped(task_event.evidential_base): + continue + + # temporal induction + # TODO: use SparseLUT to find the rules. + task1, task2 = (task_event, task) if task.stamp.t_occurrence > task_event.stamp.t_occurrence else (task, task_event) + + task = temporal__induction_implication(task1, task2, None, None) + tasks_derived.append(task) + if task2.term.is_statement and task2.term.copula in (Copula.ConcurrentImplication, Copula.PredictiveImplication, Copula.RetrospectiveImplication): + task = temporal__induction_composition(task1, task2, None, None) + else: + task = temporal__induction_composition(task2, task1, None, None) + tasks_derived.append(task) + # TODO: The current impication above is problematic. + + for task in tasks_attempted: + sequence_buffer.put_back(task) + + + + return tasks_derived + + + def inference_with_operations(self, task_event: Task, operations_buffer: Buffer): + ''' + inference with `self.operations_buffer` + Ref: OpenNARS 3.0.4 TemporalInferenceControl.java line 107~162 + ''' + tasks_derived = [] + + return tasks_derived + + + def add_event(self, task_event: Task, concept: Concept, sequence_buffer: Buffer): + ''' + add the event task to `self.sequence_buffer` and `self.operations_buffer` + Ref: OpenNARS 3.0.4 TemporalInferenceControl.java line 167~213 + ''' + q = truth_to_quality(task_event.truth) + p = max(q, concept.budget.priority) if concept is not None else q + d = 1.0/task_event.term.complexity + budget = Budget(p, d, q) + task = Task(task_event.sentence, budget) + sequence_buffer.put(task) + + def add_operation_feedback(self, task_event: Task, operations_buffer: Buffer): + '''''' + # Ref: OpenNARS 3.0.4 ProcessJudgement.java line 116~126, TemporalInferenceControl.java line 215~245 + if task_event.is_operation and not task_event.is_mental_operation: + operations_buffer.put(task_event) \ No newline at end of file diff --git a/NARS/InferenceEngine/__init__.py b/NARS/InferenceEngine/__init__.py new file mode 100644 index 0000000..fb213f1 --- /dev/null +++ b/NARS/InferenceEngine/__init__.py @@ -0,0 +1,2 @@ +from .GeneralEngine import GeneralEngine +from .TemporalEngine import TemporalEngine \ No newline at end of file diff --git a/NARS/MentalOperation/Execution.py b/NARS/MentalOperation/Execution.py new file mode 100644 index 0000000..a911a63 --- /dev/null +++ b/NARS/MentalOperation/Execution.py @@ -0,0 +1,46 @@ +from typing import Callable, List +from Config import Config +from NARS.DataStructures._py.Concept import Concept +from NARS.DataStructures._py.Memory import Memory +from Narsese._py.Budget import Budget +from Narsese._py.Operation import * +from Narsese._py.Sentence import Goal, Judgement, Quest, Question, Sentence, Stamp +from Narsese._py.Statement import Statement +from Narsese._py.Task import Belief, Desire, Task +from Narsese._py.Truth import Truth +from .Register import registered_operations +from Narsese import Term +from NAL.Functions.Tools import truth_from_term, truth_to_quality +from Narsese import Base +import Global + +def executed_task(task: Task): + ''' + ''' + input_id = Global.get_input_id() + truth = Truth(1.0, Config.c_judgement, Config.k) + stamp = Stamp(Global.time, Global.time, None, Base((input_id,))) + budget = Budget(Config.p_feedback, Config.d_feedback, truth_to_quality(task.truth)) + + return Task(Judgement(task.term, stamp, truth), budget, input_id) + + + +def execute(task: Task, concept: Concept, memory: Memory): + ''' + it should be ensured that the task is executable, i.e., `task.is_executable == True`. + ''' + if task.term != concept.term: + concept = memory.take_by_key(task.term, remove=False) + stat: Statement = task.term + operation: Operation = stat.predicate + args = stat.subject.terms + function_op: Callable = registered_operations.get(operation, None) + + if function_op is not None: + belief = executed_task(task) + if concept is not None: + concept.add_belief(belief) + return function_op(args, task, memory), belief + else: + return None, None \ No newline at end of file diff --git a/NARS/MentalOperation/Interface_Awareness.py b/NARS/MentalOperation/Interface_Awareness.py new file mode 100644 index 0000000..7d44c52 --- /dev/null +++ b/NARS/MentalOperation/Interface_Awareness.py @@ -0,0 +1,40 @@ +from typing import List +import NAL.MentalOperation._aware as aware +from NARS.DataStructures._py.Concept import Concept +from NARS.DataStructures._py.Memory import Memory +from Narsese import Task, Term +from ..DataStructures import Bag +from copy import copy, deepcopy + +def aware__believe(task: Task, memory: Memory=None): + '''''' + return aware.believe(task.sentence, task.truth, task.budget) + +def aware__wonder(task: Task, memory: Memory=None): + '''''' + return aware.wonder(task.sentence, task.budget) + +# def _aware__doubt(arguments: List[Term], task: Task=None, memory: Memory=None): +# '''''' +# term = arguments[1] +# concept = Concept._conceptualize(memory.concepts, term, task.budget) +# return execute.doubt(list(concept.belief_table)) + + +def aware__evaluate(task: Task, memory: Memory=None): + '''''' + return aware.evaluate(task.sentence, task.budget) + + +# def _aware__hesitate(arguments: List[Term], task: Task=None, memory: Memory=None): +# '''''' +# term = arguments[1] +# concept = Concept._conceptualize(memory.concepts, term, task.budget) +# return execute.hesitate(list(concept.desire_table)) + + +# def _aware__want(arguments: List[Term], task: Task=None, memory: Memory=None): +# '''''' +# statement = arguments[1] +# return execute.want(statement) + \ No newline at end of file diff --git a/NARS/MentalOperation/Interface_Execution.py b/NARS/MentalOperation/Interface_Execution.py new file mode 100644 index 0000000..260858c --- /dev/null +++ b/NARS/MentalOperation/Interface_Execution.py @@ -0,0 +1,46 @@ +from typing import Iterable, List +import NAL.MentalOperation._execute as _execute +from NARS.DataStructures._py.Concept import Concept +from NARS.DataStructures._py.Memory import Memory +from Narsese import Task, Term +from ..DataStructures import Bag +from copy import copy, deepcopy + +def execute__believe(arguments: Iterable[Term], task: Task=None, memory: Memory=None): + '''''' + statement, truth_term = arguments[1], arguments[2] + return _execute.believe(statement, truth_term) + + +def execute__doubt(arguments: Iterable[Term], task: Task=None, memory: Memory=None): + '''''' + term = arguments[1] + concept = Concept._conceptualize(memory.concepts, term, task.budget) + return _execute.doubt(list(concept.belief_table)) + + +def execute__evaluate(arguments: Iterable[Term], task: Task=None, memory: Memory=None): + '''''' + statement = arguments[1] + return _execute.evaluate(statement) + + +def execute__wonder(arguments: Iterable[Term], task: Task=None, memory: Memory=None): + '''''' + statement = arguments[1] + return _execute.wonder(statement) + + +def execute__hesitate(arguments: Iterable[Term], task: Task=None, memory: Memory=None): + '''''' + term = arguments[1] + concept = Concept._conceptualize(memory.concepts, term, task.budget) + return _execute.hesitate(list(concept.desire_table)) + + +def execute__want(arguments: Iterable[Term], task: Task=None, memory: Memory=None): + '''''' + statement = arguments[1] + return _execute.want(statement) + + diff --git a/NARS/MentalOperation/Register.py b/NARS/MentalOperation/Register.py new file mode 100644 index 0000000..1a3389f --- /dev/null +++ b/NARS/MentalOperation/Register.py @@ -0,0 +1,9 @@ +from typing import Callable, Dict +from Narsese._py.Operation import * + +registered_operations: Dict[Operation, Callable] = {} + +def register(operation: Operation, callable: Callable): + '''''' + global registered_operations + registered_operations[operation] = callable \ No newline at end of file diff --git a/NARS/MentalOperation/__init__.py b/NARS/MentalOperation/__init__.py new file mode 100644 index 0000000..b57e2ce --- /dev/null +++ b/NARS/MentalOperation/__init__.py @@ -0,0 +1,4 @@ +from .Interface_Execution import * +from .Interface_Awareness import * +from .Register import * +from .Execution import * \ No newline at end of file diff --git a/NARS/RuleMap/Interface/Interface_CompositionalRules.py b/NARS/RuleMap/Interface/Interface_CompositionalRules.py new file mode 100644 index 0000000..957c068 --- /dev/null +++ b/NARS/RuleMap/Interface/Interface_CompositionalRules.py @@ -0,0 +1,84 @@ +from NARS.DataStructures import Link, TaskLink, TermLink, LinkType, Task +from Narsese import Belief +from NAL.Inference import * +from NAL.Theorems import * +import Global + + +'''first-order With common subject''' + +def _compositional__intersection_extension__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return compositional__intersection_extension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +def _compositional__union_extension__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return compositional__union_extension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +'''First-order with common predicate''' + +def _compositional__intersection_intension__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return compositional__intersection_intension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +def _compositional__union_intension__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return compositional__union_intension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +'''higher-order With common subject''' + +def _compositional__conjunction_extension__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return compositional__conjunction_extension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +def _compositional__disjunction_extension__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return compositional__disjunction_extension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +'''higher-order With common predicate''' +def _compositional__conjunction_intension__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return compositional__conjunction_intension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +def _compositional__disjunction_intension__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return compositional__disjunction_intension(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +'''Theorems''' + +'''structural rules''' +def _structural__bi_composition__0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return structural__bi_composition(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False) + +def _structural__bi_composition__1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return structural__bi_composition(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=True) + +def _structural__bi_composition__0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return structural__bi_composition_prime(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False) + +def _structural__bi_composition__1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return structural__bi_composition_prime(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=True) + +def _structural__uni_composition__0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return structural__uni_composition(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False) + +def _structural__uni_composition__1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return structural__uni_composition(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=True) + +def _structural__uni_composition__0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return structural__uni_composition_prime(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False) + +def _structural__uni_composition__1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return structural__uni_composition_prime(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=True) + + +def _structural__uni_decomposition__0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return structural__uni_decomposition(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False) + +def _structural__uni_decomposition__1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return structural__uni_decomposition(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=True) + + +'''implication theorems''' +def _structural__implication_theorem3(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return structural__implication_theorem3(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False) + + +def _structural__implication_theorem4(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return structural__implication_theorem4(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_copula=False) diff --git a/NARS/RuleMap/Interface/Interface_ConditionalRules.py b/NARS/RuleMap/Interface/Interface_ConditionalRules.py new file mode 100644 index 0000000..8bc1fbe --- /dev/null +++ b/NARS/RuleMap/Interface/Interface_ConditionalRules.py @@ -0,0 +1,145 @@ +from NARS.DataStructures import Link, TaskLink, TermLink, LinkType, Task +from Narsese import Belief +from NAL.Inference import * +from NAL.Theorems import * +import Global + +'''deduction''' +def _conditional__deduction__0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{ P>. S.} |- P.''' + return conditional__deduction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +def _conditional__deduction__0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{S. P>.} |- P.''' + return conditional__deduction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +def _conditional__deduction_compound_eliminate__0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{<(&&, C, S, ...) ==> P>. P>.} |- P.''' + return conditional__deduction_compound_eliminate(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +def _conditional__deduction_compound_eliminate__0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{ P>. <(&&, C, S, ...) ==> P>.} |- P.''' + return conditional__deduction_compound_eliminate(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +def _conditional__deduction_compound_replace__0_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{<(&&, C, S, ...) ==> P>. P>.} |- P.''' + return conditional__deduction_compound_replace(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +def _conditional__deduction_compound_replace__1_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{ P>. <(&&, C, S, ...) ==> P>.} |- P.''' + return conditional__deduction_compound_replace(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +'''abduction''' +def _conditional__abduction__1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return conditional__abduction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +def _conditional__abduction__1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return conditional__abduction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + +def _conditional__abduction_compound_eliminate__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{<(&&, C, S, ...) ==> P>. P>.} |- (&&, S, ...).''' + return conditional__abduction_compound_eliminate(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +def _conditional__abduction_compound_eliminate__1_1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{<(&&, C, S, ...) ==> P>. P>.} |- (&&, S, ...).''' + return conditional__abduction_compound_eliminate(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +def _conditional__abduction_compound_eliminate2__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{<(&&, C, S, ...) ==> P>. P>.} |- (&&, S, ...).''' + return conditional__abduction_compound_eliminate2(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +def _conditional__abduction_compound_eliminate2__1_1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{<(&&, C, S, ...) ==> P>. P>.} |- (&&, S, ...).''' + return conditional__abduction_compound_eliminate2(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +'''induction''' +def _conditional__induction_compound_replace__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{<(&&, C, M, ...) ==> P>. S>.} |- <(&&, C, S, ...) ==> P>.%''' + return conditional__induction_compound_replace(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +def _conditional__induction_compound_replace__0_0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{ S>. <(&&, C, M, ...) ==> P>.} |- <(&&, C, S, ...) ==> P>.%''' + return conditional__induction_compound_replace(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +'''analogy''' +def _conditional__analogy__0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{S. P>.} |- P.''' + return conditional__analogy(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False, inverse_copula=False) + + +def _conditional__analogy__0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{P>. S.} |- P.''' + return conditional__analogy(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True, inverse_copula=False) + +def _conditional__analogy__1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{S. S>.} |- P.''' + return conditional__analogy(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False, inverse_copula=True) + + +def _conditional__analogy__1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{S>. S.} |- P.''' + return conditional__analogy(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True, inverse_copula=True) + +# def _syllogistic__analogy__1_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# return syllogistic__analogy(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False if belief.term.is_commutative else True, inverse_copula=True if belief.term.is_commutative else False) + + +# def _syllogistic__analogy__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# return syllogistic__analogy(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False if belief.term.is_commutative else True, inverse_copula=False) + + +# def _syllogistic__analogy__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# return syllogistic__analogy(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False if belief.term.is_commutative else True, inverse_copula=True) + + +# '''resemblance''' +# def _syllogistic__resemblance__0_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# inverse_premise = True if (task.term.is_higher_order and (not belief.term.is_higher_order)) else False +# return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=inverse_premise) + + +# def _syllogistic__resemblance__1_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# inverse_premise = True if (task.term.is_higher_order and (not belief.term.is_higher_order)) else False +# return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=(not inverse_premise)) + + +# def _syllogistic__resemblance__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# inverse_premise = True if (task.term.is_higher_order and (not belief.term.is_higher_order)) else False +# return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=False) + + +# def _syllogistic__resemblance__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# inverse_premise = True if (task.term.is_higher_order and (not belief.term.is_higher_order)) else False +# return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=True) + + +# def _syllogistic__resemblance__0_1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# inverse_premise = True if ((not task.term.is_higher_order) and belief.term.is_higher_order) else False +# return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=inverse_premise) + + +# def _syllogistic__resemblance__1_0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# inverse_premise = True if ((not task.term.is_higher_order) and belief.term.is_higher_order) else False +# return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=(not inverse_premise)) + + +# def _syllogistic__resemblance__0_0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# inverse_premise = True if ((not task.term.is_higher_order) and belief.term.is_higher_order) else False +# return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=False) + + +# def _syllogistic__resemblance__1_1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# inverse_premise = True if ((not task.term.is_higher_order) and belief.term.is_higher_order) else False +# return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=True) + +# '''reversion''' +# def _syllogistic__reversion(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# return syllogistic__reversion(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None)) \ No newline at end of file diff --git a/NARS/RuleMap/Interface/Interface_DecompositionalRules.py b/NARS/RuleMap/Interface/Interface_DecompositionalRules.py new file mode 100644 index 0000000..63215f2 --- /dev/null +++ b/NARS/RuleMap/Interface/Interface_DecompositionalRules.py @@ -0,0 +1,37 @@ +from NARS.DataStructures import Link, TaskLink, TermLink, LinkType, Task +from Narsese import Belief +from NAL.Inference import * +from NAL.Theorems import * +import Global + +def _decompositional__decomposition_theorem2__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return decompositional__decomposition_theorem2(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +def _decompositional__decomposition_theorem2__0_0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return decompositional__decomposition_theorem2(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + +def _decompositional__decomposition_theorem3__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return decompositional__decomposition_theorem3(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +def _decompositional__decomposition_theorem3__0_0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return decompositional__decomposition_theorem3(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + +# def _decompositional__decomposition_theorem4__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# return decomposition_theorem4(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +# def _decompositional__decomposition_theorem4__0_0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): +# return decomposition_theorem4(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +def _decompositional__decomposition_theorem9(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return decompositional__decomposition_theorem9(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +def _decompositional__decomposition_theorem9_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return decompositional__decomposition_theorem9(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +def _decompositional__decomposition_theorem10(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return decompositional__decomposition_theorem10(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + +def _decompositional__decomposition_theorem10_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return decompositional__decomposition_theorem10(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) diff --git a/NARS/RuleMap/Interface/Interface_SyllogisticRules.py b/NARS/RuleMap/Interface/Interface_SyllogisticRules.py new file mode 100644 index 0000000..3ba8534 --- /dev/null +++ b/NARS/RuleMap/Interface/Interface_SyllogisticRules.py @@ -0,0 +1,119 @@ +from NARS.DataStructures import Link, TaskLink, TermLink, LinkType, Task +from Narsese import Belief +from NAL.Inference import * +from NAL.Theorems import * +import Global + +'''deduction''' +def _syllogistic__deduction__0_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{P>, M>} |- P>''' + return syllogistic__deduction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +def _syllogistic__deduction__1_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{M>, P>} |- P>''' + return syllogistic__deduction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +'''exemplification''' +def _syllogistic__exemplification__0_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{S>, M>} |- P>''' + return syllogistic__exemplification(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +def _syllogistic__exemplification__1_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{M>, S>} |- P>''' + return syllogistic__exemplification(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +'''induction''' +def _syllogistic__induction__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return syllogistic__induction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +def _syllogistic__induction__0_0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return syllogistic__induction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +'''abduction''' +def _syllogistic__abduction__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return syllogistic__abduction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +def _syllogistic__abduction__1_1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return syllogistic__abduction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +def _syllogistic__comparison__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return syllogistic__comparison(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False,inverse_copula=False) + +def _syllogistic__comparison__0_0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return syllogistic__comparison(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True, inverse_copula=False) + +def _syllogistic__comparison__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return syllogistic__comparison(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False, inverse_copula=True) + +def _syllogistic__comparison__1_1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return syllogistic__comparison(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True, inverse_copula=True) + + +'''analogy''' +def _syllogistic__analogy__0_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return syllogistic__analogy(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False if belief.term.is_commutative else True, inverse_copula=False if belief.term.is_commutative else True) + + +def _syllogistic__analogy__1_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return syllogistic__analogy(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False if belief.term.is_commutative else True, inverse_copula=True if belief.term.is_commutative else False) + + +def _syllogistic__analogy__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return syllogistic__analogy(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False if belief.term.is_commutative else True, inverse_copula=False) + + +def _syllogistic__analogy__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return syllogistic__analogy(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False if belief.term.is_commutative else True, inverse_copula=True) + + +'''resemblance''' +def _syllogistic__resemblance__0_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + inverse_premise = True if (task.term.is_higher_order and (not belief.term.is_higher_order)) else False + return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=inverse_premise) + + +def _syllogistic__resemblance__1_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + inverse_premise = True if (task.term.is_higher_order and (not belief.term.is_higher_order)) else False + return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=(not inverse_premise)) + + +def _syllogistic__resemblance__0_0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + inverse_premise = True if (task.term.is_higher_order and (not belief.term.is_higher_order)) else False + return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=False) + + +def _syllogistic__resemblance__1_1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + inverse_premise = True if (task.term.is_higher_order and (not belief.term.is_higher_order)) else False + return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=True) + + +def _syllogistic__resemblance__0_1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + inverse_premise = True if ((not task.term.is_higher_order) and belief.term.is_higher_order) else False + return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=inverse_premise) + + +def _syllogistic__resemblance__1_0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + inverse_premise = True if ((not task.term.is_higher_order) and belief.term.is_higher_order) else False + return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=(not inverse_premise)) + + +def _syllogistic__resemblance__0_0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + inverse_premise = True if ((not task.term.is_higher_order) and belief.term.is_higher_order) else False + return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=False) + + +def _syllogistic__resemblance__1_1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + inverse_premise = True if ((not task.term.is_higher_order) and belief.term.is_higher_order) else False + return syllogistic__resemblance(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=inverse_premise, inverse_copula=True) + +'''reversion''' +def _syllogistic__reversion(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return syllogistic__reversion(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None)) \ No newline at end of file diff --git a/NARS/RuleMap/Interface/Interface_TemporalRules.py b/NARS/RuleMap/Interface/Interface_TemporalRules.py new file mode 100644 index 0000000..33608ff --- /dev/null +++ b/NARS/RuleMap/Interface/Interface_TemporalRules.py @@ -0,0 +1,46 @@ +from NARS.DataStructures import Link, TaskLink, TermLink, LinkType, Task +from Narsese import Belief +from NAL.Inference import * +from NAL.Theorems import * +import Global + + +def _temporal__deduction_sequence_eliminate__0(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{<(&/, C, +100, S, ...) =/> P>. C. :|:} |- P>. :!105:''' + return temporal__deduction_sequence_eliminate(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +def _temporal__deduction_sequence_eliminate__0_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{C. :|: <(&/, C, +100, S, ...) =/> P>.} |- P>. :!105:''' + return temporal__deduction_sequence_eliminate(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +def _temporal__abduction__1(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{<(&/, C, +100, S, ...) =/> P>. C. :|:} |- P>. :!105:''' + return temporal__abduction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +def _temporal__abduction__1_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''{C. :|: <(&/, C, +100, S, ...) =/> P>.} |- P>. :!105:''' + return temporal__abduction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +def _temporal__implication_induction(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''''' + return temporal__implication_induction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +def _temporal__implication_induction_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''''' + return temporal__implication_induction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + + +def _temporal__composition_induction(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''''' + return temporal__composition_induction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=False) + + +def _temporal__composition_induction_prime(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + '''''' + return temporal__composition_induction(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), inverse_premise=True) + diff --git a/NARS/RuleMap/Interface/Interface_TransformRules.py b/NARS/RuleMap/Interface/Interface_TransformRules.py new file mode 100644 index 0000000..1b681c5 --- /dev/null +++ b/NARS/RuleMap/Interface/Interface_TransformRules.py @@ -0,0 +1,26 @@ +from NARS.DataStructures import Link, TaskLink, TermLink, LinkType, Task +from Narsese import Belief +from NAL.Inference import * +from NAL.Theorems import * +import Global + +'''negation''' +def _transform__negation(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return immediate__negation(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None)) + + +'''contraposition''' +def _transform__contraposition(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return immediate__contraposition(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None)) + +'''product and image''' +def _transform__product_to_image(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return transform__product_to_image(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), index=tasklink.component_index) + + +def _transform__image_to_product(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return transform__image_to_product(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), index=tasklink.component_index) + + +def _transform__image_to_image(task: Task, belief: Belief, tasklink: TaskLink=None, termlink: TermLink=None): + return transform__image_to_image(task, belief, (tasklink.budget if tasklink is not None else None), (termlink.budget if termlink is not None else None), index=tasklink.component_index) \ No newline at end of file diff --git a/NARS/RuleMap/Interface/__init__.py b/NARS/RuleMap/Interface/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/NARS/RuleMap/RuleMap_v2.py b/NARS/RuleMap/RuleMap_v2.py new file mode 100644 index 0000000..573492a --- /dev/null +++ b/NARS/RuleMap/RuleMap_v2.py @@ -0,0 +1,383 @@ +from operator import imod +import os +from pathlib import Path +from inspect import getmembers, isfunction +import importlib +import re +from typing import Any, List, Tuple, Union +from typing_extensions import Protocol +from collections import OrderedDict + +from numpy import product + +from Config import Enable +from Narsese import Copula, Task +from Narsese import Connector, Statement, Belief, Term, Truth, Compound, Budget +from ..DataStructures import LinkType, TaskLink, TermLink +from NAL.Inference import * +from utils.SparseLUT import SparseLUT +from utils.tools import get_size + +from utils.Print import out_print, PrintType + +import time +from datetime import datetime +import pickle +import sty +from ._extract_feature import extract_feature, _compound_has_common, _compound_at +import Global + +from .Rules import * + +class RuleMap: + def __init__(self, build=True, add_rules={1,2,3,4,5,6,7,8,9}) -> None: + n_link_types = max([t.value for t in LinkType.__members__.values()]) + n_copula = len(Copula) + n_has_common_id = 2 + n_match_reverse = 2 + n_common_id = 4 + n_compound_common_id = 4 + n_connector = len(Connector) + n_sentence_type = 4 + + n_has_compound_common_id = 2 + n_has_at = 2 + n_has_compound_at = 2 + n_the_other_compound_has_common = 2 + n_the_other_compound_p1_at_p2 = 2 + n_the_other_compound_p2_at_p1 = 2 + n_is_belief_valid = 2 + n_at_compound_pos = 2 + n_p1_at_p2 = 2 + n_p2_at_p1 = 2 + + self._init_type_map( + ("is_belief_valid", bool, n_is_belief_valid), + + ("sentence_type", int, n_sentence_type), + + ("match_reverse", bool, n_match_reverse), + + ("LinkType1", LinkType, n_link_types), + ("LinkType2", LinkType, n_link_types), + + ("Copula1", Copula, n_copula), + ("Copula2", Copula, max(n_copula, n_connector)), + + ("Connector1", Connector, n_connector), + ("Connector2", Connector, n_connector), + + ("has_compound_at", bool, n_has_compound_at), + ("at_compound_pos", int, n_at_compound_pos), + ("the_other_compound_has_common", bool, n_the_other_compound_has_common), + ("the_other_compound_p1_at_p2", bool, n_the_other_compound_p1_at_p2), + ("the_other_compound_p2_at_p1", bool, n_the_other_compound_p2_at_p1), + + + ("compound_common_id", CommonId, n_compound_common_id), + + ("has_common_id", bool, n_has_common_id), + ("has_compound_common_id", bool, n_has_compound_common_id), + ("has_at", bool, n_has_at), + ("p1_at_p2", bool, n_p1_at_p2), + ("p2_at_p1", bool, n_p2_at_p1), + ("common_id", CommonId, n_common_id), + + ) + + add_rules__NAL1(self.map, self.structure_map) if 1 in add_rules else None + add_rules__NAL2(self.map, self.structure_map) if 2 in add_rules else None + add_rules__NAL3(self.map, self.structure_map) if 3 in add_rules else None + add_rules__NAL4(self.map, self.structure_map) if 4 in add_rules else None + add_rules__NAL5(self.map, self.structure_map) if 5 in add_rules else None + add_rules__NAL6(self.map, self.structure_map) if 6 in add_rules else None + add_rules__NAL7(self.map, self.structure_map) if 7 in add_rules else None + add_rules__NAL8(self.map, self.structure_map) if 8 in add_rules else None + add_rules__NAL9(self.map, self.structure_map) if 9 in add_rules else None + + self.build() if build else None + + + pass + + + def _init_type_map(self, *slots: Tuple[object, str, int]): + ''' + slots (List[Tuple[object, str, int]]): each slot is filled in with the type, which is a int number, of an object. + ''' + self.structure_map = OrderedDict([(slot[0], tuple(slot[1:])) for slot in slots]) + # self.map = np.empty([n_type for *_, n_type in slots], dtype=object) # Shape: [LinkType, LinkType, Copula, Copula, match_reverse, common_id, Connector, Connector]. It cost about 1.2GB in memory... it's too expensive. So we have to adopt a more economic way. + shape = tuple([n_type for *_, n_type in slots]) + self.map = SparseLUT(shape) + pass + + + def build(self, clear=True): + root_path = Path(__file__).parent + def check_update(): + cache_path = root_path/'LUT.pkl' + try: + if not cache_path.exists(): return True + this_filepath = Path(__file__) + filepaths = (this_filepath.parent/"Rules").glob("NAL*.py") + mtime_cache = datetime.fromtimestamp(cache_path.stat().st_mtime) + for filepath in filepaths: + mtime_this = datetime.fromtimestamp(filepath.stat().st_mtime) + if mtime_this > mtime_cache: + return True + return False + except: + if not cache_path.exists(): return True + else: return False + + if check_update(): + # t_start = time.time() + # self.map.build(clear) + # t_end = time.time() + # self.map.dump(str(root_path)) + # if Enable.debug: out_print(PrintType.INFO, f'Building time cost: {t_end-t_start}s.') + self.rebuild(root_path, clear) + else: + self.map.load(str(root_path)) + + # if Enable.debug: out_print(PrintType.INFO, f'The size of map: {get_size(self.map.lut)/1024/1024:.6f}MB') + + + def rebuild(self, root_path: str, clear=True): + '''''' + t_start = time.time() + self.map.build(clear) + t_end = time.time() + self.map.dump(str(root_path)) + if Enable.debug: out_print(PrintType.INFO, f'Building time cost: {t_end-t_start}s.') + + + def draw(self, show_labels=True): + self.map.draw(show_labels) + + + def match(self, task: Task, belief: Union[Belief, None], belief_term: Union[Term, Compound, Statement, None], task_link: TaskLink, term_link: TermLink): + ''' + Given a task and a belief, find the matched rules for one step inference. + ''' + link1 = task_link.type + link2 = term_link.type if term_link is not None else None # `term_link` may be `None` in case of single premise inference. + + the_other_compound_has_common = the_other_compound_p1_at_p2 = the_other_compound_p2_at_p1 = False + connector1 = connector2 = None + at_compound_pos = None + + common_id = None + compound_common_id = None + + feature = extract_feature(task.term, (belief.term if belief is not None else belief_term)) + if belief_term is None: + if link1 is LinkType.TRANSFORM: + compound_transform: Compound = task.term[task_link.component_index[:-1]] + if compound_transform.is_compound: + connector1 = compound_transform.connector + if connector1 in (Connector.ExtensionalImage, Connector.IntensionalImage) and task_link.component_index[-1] == 0: + connector1 = None + + else: + if feature.match_reverse is True: + pass + elif feature.has_common_id: + if feature.has_at: + if feature.common_id_task is not None: + common_id = feature.common_id_task + elif feature.common_id_belief is not None: + common_id = feature.common_id_belief + else: raise "Invalid case." + elif feature.has_compound_at: + if feature.compound_common_id_task is not None: + common_id = feature.compound_common_id_task + compound: Compound = task.term[common_id] + connector1 = compound.connector + elif feature.compound_common_id_belief is not None: + common_id = feature.compound_common_id_belief + compound: Compound = belief_term[common_id] + connector2 = compound.connector + else: raise "Invalid case." + + if compound.is_double_only: + if compound[0] == belief_term: + at_compound_pos = 0 + elif compound[1] == belief_term: + at_compound_pos = 1 + else: raise "Invalid case." + elif feature.has_compound_common_id: + + if feature.compound_common_id_belief is not None and feature.compound_common_id_task is not None: + # Now, both `task` and `belief` are not None. + compound_task_term: Compound = task.term[feature.compound_common_id_task] + compound_belief_term: Compound = belief_term[feature.compound_common_id_belief] + compound_p1_at_p2 = _compound_at(compound_task_term, compound_belief_term, False) + compound_p2_at_p1 = _compound_at(compound_belief_term, compound_task_term, False) + if compound_p1_at_p2 and compound_belief_term.is_compound: + connector2 = compound_belief_term.connector + if compound_p2_at_p1 and compound_task_term.is_compound: + connector1 = compound_task_term.connector + + compound_common_id = feature.compound_common_id_task*2 + feature.compound_common_id_belief + elif feature.compound_common_id_belief is None and belief_term.is_compound: + # Now, `belief` is None + compound_common_id = feature.compound_common_id_task + connector2 = belief_term.connector + + common_term = task.term[compound_common_id] + if belief_term.is_double_only: + if common_term == belief_term[0]: + at_compound_pos = 0 + elif common_term == belief_term[1]: + at_compound_pos = 1 + else: raise "Invalid case." + elif belief_term.is_multiple_only: + if common_term == belief_term[0]: + at_compound_pos = 0 + else: + at_compound_pos = 1 + pass + + elif feature.compound_common_id_task is None: + # Now, `task` is None + compound_common_id = feature.compound_common_id_belief + task_term: Compound = task.term + if task_term.is_compound: + connector1 = task_term.connector + # # raise "Is this case valid?" + + + # compound_common_id = feature.compound_common_id_belief + # connector1 = task_term.connector + + # common_term = belief.term[compound_common_id] + # if task_term.is_double_only: + # if common_term == task_term[0]: + # at_compound_pos = 0 + # elif common_term == task_term[1]: + # at_compound_pos = 1 + # else: raise "Invalid case." + # elif task_term.is_multiple_only: + # if common_term == task_term[0]: + # at_compound_pos = 0 + # else: + # at_compound_pos = 1 + # pass + + elif feature.common_id_task is not None and feature.common_id_belief is not None: + common_id = feature.common_id_task*2 + feature.common_id_belief + else: + if feature.p1_at_p2 and belief_term.is_compound: + connector2 = belief_term.connector + if feature.p2_at_p1 and task.term.is_compound: + connector1 = task.term.connector + else: + if task.term.is_compound: + connector1 = task.term.connector + if belief_term.is_compound: + connector2 = belief_term.connector + + term1, term2 = feature.the_other1, feature.the_other2 + if term1 is not None and term2 is not None: + the_other_compound_has_common = _compound_has_common(term1, term2) + # _the_other_compound_has_common1 = _the_other_compound_has_common2 = False + + + if the_other_compound_has_common: + the_other_compound_p1_at_p2 = _compound_at(term1, term2, the_other_compound_has_common) + the_other_compound_p2_at_p1 = _compound_at(term2, term1, the_other_compound_has_common) + + if the_other_compound_p1_at_p2 and the_other_compound_p2_at_p1: + term1: Compound + term2: Compound + connector1 = term1.connector + connector2 = term2.connector + elif the_other_compound_p1_at_p2: + term2: Compound + connector1 = None + connector2 = term2.connector + # if term2.is_double_only: + # if term1 == term2[0]: at_compound_pos = 0 + # elif term1 == term2[1]: at_compound_pos = 1 + # else: raise "Invalid case." + elif the_other_compound_p2_at_p1: + term1: Compound + connector1 = term1.connector + connector2 = None + # if term1.is_double_only: + # if term2 == term1[0]: at_compound_pos = 0 + # elif term2 == term1[1]: at_compound_pos = 1 + # else: raise "Invalid case." + + + + indices = ( + int(False) if belief is None else int(True), + task_type_id(task), + int(feature.match_reverse), + + link1.value, + link2.value if link2 is not None else None, + + int(task.term.copula) if not task.term.is_atom else None, + int(belief.term.copula) if belief is not None else (int(belief_term.connector) if ((belief_term is not None) and (not belief_term.is_atom) and belief_term.is_compound) else None), + + int(connector1) if connector1 is not None else None, + int(connector2) if connector2 is not None else None, + + int(feature.has_compound_at), + at_compound_pos, + int(the_other_compound_has_common), + int(the_other_compound_p1_at_p2), + int(the_other_compound_p2_at_p1), + + compound_common_id, + + int(feature.has_common_id), + int(feature.has_compound_common_id), + int(feature.has_at), + int(feature.p1_at_p2) if feature.p1_at_p2 is not None else None, + int(feature.p2_at_p1) if feature.p2_at_p1 is not None else None, + common_id, + + + ) + rules: RuleCallable = self.map[indices] + return rules + + + def verify(self, task_link: TaskLink, term_link: TermLink, *args): + raise "Invalid function." + + + def diagnose(self, indices): + ''' + Given a `indices`, check whether a valid rule can be retrieved. + If not, return the index of the position where an error occurs. + Else, return None. + In each case, Prompt message is printed. + ''' + for i in range(1,len(indices)): + if self.map[indices[:i]] is None: + name_str, (name_type, _) = list(self.structure_map.items())[i] + print(f"{sty.fg.blue}Diagnose: {sty.fg.red}ERROR.\n {sty.fg.blue}{i}: {sty.ef.bold}{name_str}, {name_type}{sty.rs.all}") + + return i + print(f"{sty.fg.blue}Diagnose: {sty.fg.green}PASS.{sty.rs.all}") + return None + + + def __repr__(self) -> str: + '''print self.type_map''' + r = f"\n" + for key, item in self.structure_map.items(): + r += f" {key}, {item}\n" + return r + + + +if __name__ == '__main__': + root_path = Path(__file__).parent + rulemap = RuleMap(build=False) + rulemap.rebuild(root_path) diff --git a/NARS/RuleMap/Rules/NAL1.py b/NARS/RuleMap/Rules/NAL1.py new file mode 100644 index 0000000..2fe4465 --- /dev/null +++ b/NARS/RuleMap/Rules/NAL1.py @@ -0,0 +1,118 @@ +from collections import OrderedDict +from NARS.DataStructures import LinkType, TaskLink, TermLink +from utils.SparseLUT import SparseLUT +import Global +from .add_rule import * + +def add_rules__NAL1(sparse_lut: SparseLUT, structure: OrderedDict): + + '''deduction''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__deduction__0_1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(0, 1), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__deduction__1_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(1, 0), + has_compound_at = False + ) + + '''exemplification''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__exemplification__0_1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(0, 1), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__exemplification__1_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(1, 0), + has_compound_at = False + ) + + '''induction''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__induction__0_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(0, 0), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__induction__0_0_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(0, 0), + has_compound_at = False + ) + + '''abduction''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__abduction__1_1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(1, 1), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__abduction__1_1_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(1, 1), + has_compound_at = False + ) + + '''reversion''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__reversion, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = True + ) \ No newline at end of file diff --git a/NARS/RuleMap/Rules/NAL2.py b/NARS/RuleMap/Rules/NAL2.py new file mode 100644 index 0000000..ecba284 --- /dev/null +++ b/NARS/RuleMap/Rules/NAL2.py @@ -0,0 +1,201 @@ +from collections import OrderedDict +from NARS.DataStructures import LinkType, TaskLink, TermLink +from utils.SparseLUT import SparseLUT +import Global +from .add_rule import * + + +def add_rules__NAL2(sparse_lut: SparseLUT, structure: OrderedDict): + '''''' + '''comparison''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__comparison__0_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, # --> + Copula2 = Copula.Inheritance, # --> + match_reverse = False, + common_id = CommonId(0, 0), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__comparison__0_0_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, # --> + Copula2 = Copula.Inheritance, # --> + match_reverse = False, + common_id = CommonId(0, 0), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__comparison__1_1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, # --> + Copula2 = Copula.Inheritance, # --> + match_reverse = False, + common_id = CommonId(1, 1), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__comparison__1_1_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, # --> + Copula2 = Copula.Inheritance, # --> + match_reverse = False, + common_id = CommonId(1, 1), + has_compound_at = False + ) + '''analogy''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__0_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, # --> + Copula2 = Copula.Similarity, # <-> + match_reverse = False, + common_id = CommonId(0, 0), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__0_1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, # --> + Copula2 = Copula.Similarity, # <-> + match_reverse = False, + common_id = CommonId(0, 1), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__1_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, # --> + Copula2 = Copula.Similarity, # <-> + match_reverse = False, + common_id = CommonId(1, 0), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__1_1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, # --> + Copula2 = Copula.Similarity, # <-> + match_reverse = False, + common_id = CommonId(1, 1), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__0_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Similarity, # <-> + Copula2 = Copula.Inheritance, # --> + match_reverse = False, + common_id = CommonId(0, 0), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__0_1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Similarity, # <-> + Copula2 = Copula.Inheritance, # --> + match_reverse = False, + common_id = CommonId(0, 1), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__1_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Similarity, # <-> + Copula2 = Copula.Inheritance, # --> + match_reverse = False, + common_id = CommonId(1, 0), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__1_1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Similarity, # <-> + Copula2 = Copula.Inheritance, # --> + match_reverse = False, + common_id = CommonId(1, 1), + has_compound_at = False + ) + '''resemblance''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__resemblance__0_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Similarity, # <-> + Copula2 = Copula.Similarity, # <-> + match_reverse = False, + common_id = CommonId(0, 0), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__resemblance__0_1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Similarity, # <-> + Copula2 = Copula.Similarity, # <-> + match_reverse = False, + common_id = CommonId(0, 1), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__resemblance__1_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Similarity, # <-> + Copula2 = Copula.Similarity, # <-> + match_reverse = False, + common_id = CommonId(1, 0), + has_compound_at = False + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__resemblance__1_1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Similarity, # <-> + Copula2 = Copula.Similarity, # <-> + match_reverse = False, + common_id = CommonId(1, 1), + has_compound_at = False + ) \ No newline at end of file diff --git a/NARS/RuleMap/Rules/NAL3.py b/NARS/RuleMap/Rules/NAL3.py new file mode 100644 index 0000000..310399f --- /dev/null +++ b/NARS/RuleMap/Rules/NAL3.py @@ -0,0 +1,391 @@ +from collections import OrderedDict +from NARS.DataStructures import LinkType, TaskLink, TermLink +from utils.SparseLUT import SparseLUT +import Global +from .add_rule import * + +def add_rules__NAL3(sparse_lut: SparseLUT, structure: OrderedDict): + '''''' + ''' + Compositinal Rules + ''' + '''First-order with common subject''' + + '''intersection_extension''' + add_rule(sparse_lut, structure, + Interface_CompositionalRules._compositional__intersection_extension__0_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(0, 0), + sentence_type = class_sentence_to_list(Judgement) + ) + + '''union_extension''' + add_rule(sparse_lut, structure, + Interface_CompositionalRules._compositional__union_extension__0_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(0, 0), + sentence_type = class_sentence_to_list(Judgement) + ) + + + '''First-order with common predicate''' + + '''intersection_intension''' + add_rule(sparse_lut, structure, + Interface_CompositionalRules._compositional__intersection_intension__1_1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(1, 1), + sentence_type = class_sentence_to_list(Judgement) + ) + + '''union_intension''' + add_rule(sparse_lut, structure, + Interface_CompositionalRules._compositional__union_intension__1_1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(1, 1), + sentence_type = class_sentence_to_list(Judgement) + ) + + + ''' + Decompositinal Rules + ''' + + '''intensional intersection''' + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem2__0_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(0, 0), + sentence_type = class_sentence_to_list(Judgement), + the_other_compound_has_common = True, + the_other_compound_p1_at_p2 = False, + the_other_compound_p2_at_p1 = True, + Connector1 = Connector.IntensionalIntersection, + Connector2 = Any + ) + + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem2__0_0_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(0, 0), + sentence_type = class_sentence_to_list(Judgement), + the_other_compound_has_common = True, + the_other_compound_p1_at_p2 = True, + the_other_compound_p2_at_p1 = False, + Connector1 = Any, + Connector2 = Connector.IntensionalIntersection + ) + + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem2__0_0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(0, 0), + sentence_type = class_sentence_to_list(Judgement), + the_other_compound_has_common = True, + the_other_compound_p1_at_p2 = True, + the_other_compound_p2_at_p1 = True, + Connector1 = Connector.ExtensionalSet, + Connector2 = Connector.ExtensionalSet + ) + + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem2__0_0_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(0, 0), + sentence_type = class_sentence_to_list(Judgement), + the_other_compound_has_common = True, + the_other_compound_p1_at_p2 = True, + the_other_compound_p2_at_p1 = True, + Connector1 = Connector.ExtensionalSet, + Connector2 = Connector.ExtensionalSet + ) + + '''extensional difference''' + add_rule(sparse_lut, structure, + #[ + Interface_DecompositionalRules._decompositional__decomposition_theorem3__0_0, + # Interface_DecompositionalRules._decompositional__decomposition_theorem4__0_0, + # ], + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(0, 0), + sentence_type = class_sentence_to_list(Judgement), + the_other_compound_has_common = True, + the_other_compound_p1_at_p2 = False, + the_other_compound_p2_at_p1 = True, + Connector1 = Connector.ExtensionalDifference, + Connector2 = Any + ) + + add_rule(sparse_lut, structure, + # [ + Interface_DecompositionalRules._decompositional__decomposition_theorem3__0_0_prime, + # Interface_DecompositionalRules._decompositional__decomposition_theorem4__0_0_prime, + # ], + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = Copula.Inheritance, + match_reverse = False, + common_id = CommonId(0, 0), + sentence_type = class_sentence_to_list(Judgement), + the_other_compound_has_common = True, + the_other_compound_p1_at_p2 = True, + the_other_compound_p2_at_p1 = False, + Connector1 = Any, + Connector2 = Connector.ExtensionalDifference + ) + + '''Theorems''' + + '''bi-composition''' + add_rule(sparse_lut, structure, + Interface_CompositionalRules._structural__bi_composition__0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = [ + Connector.ExtensionalIntersection, + Connector.IntensionalIntersection, + Connector.ExtensionalDifference, + Connector.IntensionalDifference + ], + has_compound_common_id = True, + compound_common_id = CommonId(0), + is_belief_valid = False, + at_compound_pos = [0, None] + ) + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._structural__bi_composition__1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = [ + Connector.ExtensionalIntersection, + Connector.IntensionalIntersection, + Connector.ExtensionalDifference, + Connector.IntensionalDifference + ], + has_compound_common_id = True, + compound_common_id = CommonId(1), + is_belief_valid = False, + at_compound_pos = [0, None] + ) + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._structural__bi_composition__0_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = [ + Connector.ExtensionalDifference, + Connector.IntensionalDifference + ], + has_compound_common_id = True, + compound_common_id = CommonId(0), + is_belief_valid = False, + at_compound_pos = 1 + ) + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._structural__bi_composition__1_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = [ + Connector.ExtensionalDifference, + Connector.IntensionalDifference + ], + has_compound_common_id = True, + compound_common_id = CommonId(1), + is_belief_valid = False, + at_compound_pos = 1 + ) + + + '''uni-composition''' + add_rule(sparse_lut, structure, + + Interface_CompositionalRules._structural__uni_composition__0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = [ + Connector.ExtensionalIntersection, + Connector.ExtensionalDifference, + ], + has_compound_common_id = True, + compound_common_id = CommonId(0), + is_belief_valid = False, + at_compound_pos = [0, None] + ) + + add_rule(sparse_lut, structure, + + Interface_CompositionalRules._structural__uni_composition__1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = [ + Connector.IntensionalIntersection, + Connector.IntensionalDifference + ], + has_compound_common_id = True, + compound_common_id = CommonId(1), + is_belief_valid = False, + at_compound_pos = [0, None] + ) + + + add_rule(sparse_lut, structure, + + Interface_CompositionalRules._structural__uni_composition__0_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = Connector.IntensionalDifference, + has_compound_common_id = True, + compound_common_id = CommonId(0), + is_belief_valid = False, + at_compound_pos = 1 + ) + + add_rule(sparse_lut, structure, + + Interface_CompositionalRules._structural__uni_composition__1_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = Connector.ExtensionalDifference, + has_compound_common_id = True, + compound_common_id = CommonId(1), + is_belief_valid = False, + at_compound_pos = 1 + ) + + '''uni-decomposition''' + add_rule(sparse_lut, structure, + + Interface_CompositionalRules._structural__uni_decomposition__0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPONENT, + has_common_id = True, + has_compound_at = True, + Copula1 = Copula.Inheritance, + Copula2 = Any, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = [ + Connector.ExtensionalIntersection, + Connector.ExtensionalDifference + ], + Connector2 = None, + is_belief_valid = False, + at_compound_pos = [0, None] + ) + + add_rule(sparse_lut, structure, + + Interface_CompositionalRules._structural__uni_decomposition__1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPONENT, + has_common_id = True, + has_compound_at = True, + Copula1 = Copula.Inheritance, + Copula2 = Any, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = [ + Connector.IntensionalIntersection, + Connector.IntensionalDifference + ], + Connector2 = None, + is_belief_valid = False, + at_compound_pos = [0, None] + ) \ No newline at end of file diff --git a/NARS/RuleMap/Rules/NAL4.py b/NARS/RuleMap/Rules/NAL4.py new file mode 100644 index 0000000..d566008 --- /dev/null +++ b/NARS/RuleMap/Rules/NAL4.py @@ -0,0 +1,158 @@ +from collections import OrderedDict +from NARS.DataStructures import LinkType, TaskLink, TermLink +from utils.SparseLUT import SparseLUT +import Global +from .add_rule import * + + +def add_rules__NAL4(sparse_lut: SparseLUT, structure: OrderedDict): + '''''' + '''transform''' + add_rule(sparse_lut, structure, + Interface_TransformRules._transform__product_to_image, + LinkType1 = LinkType.TRANSFORM, + LinkType2 = None, + has_common_id = True, + Connector1 = Connector.Product + ) + + add_rule(sparse_lut, structure, + Interface_TransformRules._transform__image_to_product, + LinkType1 = LinkType.TRANSFORM, + LinkType2 = None, + has_common_id = True, + Connector1 = [ + Connector.IntensionalImage, + Connector.ExtensionalImage + ] + ) + + add_rule(sparse_lut, structure, + Interface_TransformRules._transform__image_to_image, + LinkType1 = LinkType.TRANSFORM, + LinkType2 = [None], + has_common_id = True, + Connector1 = [ + Connector.IntensionalImage, + Connector.ExtensionalImage + ] + ) + '''Theorems''' + + '''bi-composition''' + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._structural__bi_composition__0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = Connector.Product, + has_compound_common_id = True, + compound_common_id = CommonId(0), + is_belief_valid = False, + # at_compound_pos = 0 + ) + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._structural__bi_composition__1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = Connector.Product, + has_compound_common_id = True, + compound_common_id = CommonId(1), + is_belief_valid = False, + # at_compound_pos = 0 + ) + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._structural__bi_composition__0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = [ + # Connector.Product, + Connector.ExtensionalImage, + Connector.IntensionalImage + ], + has_compound_common_id = True, + compound_common_id = CommonId(0), + is_belief_valid = False, + at_compound_pos = 0 + ) + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._structural__bi_composition__1, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = [ + # Connector.Product, + Connector.ExtensionalImage, + Connector.IntensionalImage + ], + has_compound_common_id = True, + compound_common_id = CommonId(1), + is_belief_valid = False, + at_compound_pos = 0 + ) + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._structural__bi_composition__0_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = [ + Connector.ExtensionalImage, + Connector.IntensionalImage + ], + has_compound_common_id = True, + compound_common_id = CommonId(0), + is_belief_valid = False, + at_compound_pos = 1 + ) + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._structural__bi_composition__1_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + has_common_id = True, + Copula1 = Copula.Inheritance, + Copula2 = None, + match_reverse = False, + sentence_type = class_sentence_to_list(Judgement), + Connector1 = None, + Connector2 = [ + Connector.ExtensionalImage, + Connector.IntensionalImage + ], + has_compound_common_id = True, + compound_common_id = CommonId(1), + is_belief_valid = False, + at_compound_pos = 1 + ) \ No newline at end of file diff --git a/NARS/RuleMap/Rules/NAL5.py b/NARS/RuleMap/Rules/NAL5.py new file mode 100644 index 0000000..e9466db --- /dev/null +++ b/NARS/RuleMap/Rules/NAL5.py @@ -0,0 +1,855 @@ +from collections import OrderedDict +from NARS.DataStructures import LinkType, TaskLink, TermLink +from utils.SparseLUT import SparseLUT +import Global +from .add_rule import * + + +def add_rules__NAL5(sparse_lut: SparseLUT, structure: OrderedDict): + '''''' + '''syllogystic rules''' + + '''---------NAL 1---------''' + + '''deduction''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__deduction__0_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(0, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__deduction__1_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(1, 0) + ) + + '''exemplification''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__exemplification__0_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(0, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__exemplification__1_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(1, 0) + ) + + '''induction''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__induction__0_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(0, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__induction__0_0_prime, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(0, 0) + ) + + '''abduction''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__abduction__1_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(1, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__abduction__1_1_prime, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(1, 1) + ) + + '''reversion''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__reversion, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = True + ) + + + '''---------NAL 2---------''' + '''comparison''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__comparison__0_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, # ==> + Copula2 = Copula.Implication, # ==> + match_reverse = False, + common_id = CommonId(0, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__comparison__0_0_prime, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, # ==> + Copula2 = Copula.Implication, # ==> + match_reverse = False, + common_id = CommonId(0, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__comparison__1_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, # ==> + Copula2 = Copula.Implication, # ==> + match_reverse = False, + common_id = CommonId(1, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__comparison__1_1_prime, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, # ==> + Copula2 = Copula.Implication, # ==> + match_reverse = False, + common_id = CommonId(1, 1) + ) + '''analogy''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__0_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, # ==> + Copula2 = Copula.Equivalence, # <=> + match_reverse = False, + common_id = CommonId(0, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__0_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, # ==> + Copula2 = Copula.Equivalence, # <=> + match_reverse = False, + common_id = CommonId(0, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__1_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, # ==> + Copula2 = Copula.Equivalence, # <=> + match_reverse = False, + common_id = CommonId(1, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__1_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Implication, # ==> + Copula2 = Copula.Equivalence, # <=> + match_reverse = False, + common_id = CommonId(1, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__0_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Equivalence, # <=> + Copula2 = Copula.Implication, # ==> + match_reverse = False, + common_id = CommonId(0, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__0_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Equivalence, # <=> + Copula2 = Copula.Implication, # ==> + match_reverse = False, + common_id = CommonId(0, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__1_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Equivalence, # <=> + Copula2 = Copula.Implication, # ==> + match_reverse = False, + common_id = CommonId(1, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__analogy__1_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Equivalence, # <=> + Copula2 = Copula.Implication, # ==> + match_reverse = False, + common_id = CommonId(1, 1) + ) + '''resemblance''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__resemblance__0_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Equivalence, # <=> + Copula2 = Copula.Equivalence, # <=> + match_reverse = False, + common_id = CommonId(0, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__resemblance__0_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Equivalence, # <=> + Copula2 = Copula.Equivalence, # <=> + match_reverse = False, + common_id = CommonId(0, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__resemblance__1_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Equivalence, # <=> + Copula2 = Copula.Equivalence, # <=> + match_reverse = False, + common_id = CommonId(1, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__resemblance__1_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Equivalence, # <=> + Copula2 = Copula.Equivalence, # <=> + match_reverse = False, + common_id = CommonId(1, 1) + ) + + '''---------NAL 3---------''' + + ''' + Compositional Rules + ''' + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._compositional__conjunction_extension__0_0, + LinkType1 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + LinkType2 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(0, 0), + sentence_type = class_sentence_to_list(Judgement) + ) + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._compositional__conjunction_intension__1_1, + LinkType1 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + LinkType2 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(1, 1), + sentence_type = class_sentence_to_list(Judgement) + ) + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._compositional__disjunction_extension__0_0, + LinkType1 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + LinkType2 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(0, 0), + sentence_type = class_sentence_to_list(Judgement) + ) + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._compositional__disjunction_intension__1_1, + LinkType1 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + LinkType2 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(1, 1), + sentence_type = class_sentence_to_list(Judgement) + ) + + ''' + Decompositional Rules + ''' + '''conjunction''' + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem2__0_0, + LinkType1 = [LinkType.COMPOUND_STATEMENT, LinkType.COMPOUND_CONDITION], + LinkType2 = [LinkType.COMPOUND_STATEMENT, LinkType.COMPOUND_CONDITION], + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(0, 0), + sentence_type = class_sentence_to_list(Judgement), + the_other_compound_has_common = True, + the_other_compound_p1_at_p2 = False, + the_other_compound_p2_at_p1 = True, + Connector1 = Connector.Conjunction, + Connector2 = Any + ) + + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem2__0_0_prime, + LinkType1 = [LinkType.COMPOUND_STATEMENT, LinkType.COMPOUND_CONDITION], + LinkType2 = [LinkType.COMPOUND_STATEMENT, LinkType.COMPOUND_CONDITION], + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(0, 0), + sentence_type = class_sentence_to_list(Judgement), + the_other_compound_has_common = True, + the_other_compound_p1_at_p2 = True, + the_other_compound_p2_at_p1 = False, + Connector1 = Any, + Connector2 = Connector.Conjunction + ) + + + + + '''---------NAL 5---------''' + + '''conditianal rules''' + + '''deduction''' + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__deduction__0, + LinkType1 = LinkType.COMPOUND_CONDITION, + # LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Implication, + # Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(0), + has_at = True, + p2_at_p1=True + ) + + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__deduction__0_prime, + # LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + # Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(0), + has_at = True, + p1_at_p2=True + ) + '''deduction (compound eliminate)''' + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__deduction_compound_eliminate__0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + has_compound_at = True, + Copula1 = Copula.Implication, + # Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(0), + Connector1 = Connector.Conjunction, + is_belief_valid = True + ) + + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__deduction_compound_eliminate__0_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + has_compound_at = True, + # Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(0), + Connector2 = Connector.Conjunction, + is_belief_valid = True + ) + + '''deduction (compound replace)''' + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__deduction_compound_replace__0_1, + LinkType1 = [ + LinkType.COMPOUND_STATEMENT, + LinkType.COMPOUND_CONDITION + ], + LinkType2 = [ + LinkType.COMPOUND_STATEMENT, + LinkType.COMPOUND_CONDITION + ], + has_common_id = True, + has_compound_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + compound_common_id = CommonId(0, 1), + Connector1 = Connector.Conjunction + ) + + + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__deduction_compound_replace__1_0, + LinkType1 = [ + LinkType.COMPOUND_STATEMENT, + LinkType.COMPOUND_CONDITION + ], + LinkType2 = [ + LinkType.COMPOUND_STATEMENT, + LinkType.COMPOUND_CONDITION + ], + has_common_id = True, + has_compound_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + compound_common_id = CommonId(1, 0), + Connector2 = Connector.Conjunction + ) + + '''abduction''' + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__abduction__1, + LinkType1 = LinkType.COMPOUND_CONDITION, + # LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.Implication, + # Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(1), + has_at = True, + p2_at_p1=True + ) + + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__abduction__1_prime, + # LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + # Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(1), + has_at = True, + p1_at_p2=True + ) + + '''abudction (compound eliminate)''' + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__abduction_compound_eliminate__1_1, + LinkType1 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + LinkType2 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(1, 1), + the_other_compound_has_common = True, + the_other_compound_p1_at_p2 = False, + the_other_compound_p2_at_p1 = True, + Connector1 = Connector.Conjunction, + Connector2 = None + ) + + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__abduction_compound_eliminate__1_1_prime, + LinkType1 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + LinkType2 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(1, 1), + the_other_compound_has_common = True, + the_other_compound_p1_at_p2 = True, + the_other_compound_p2_at_p1 = False, + Connector1 = None, + Connector2 = Connector.Conjunction + ) + + add_rule(sparse_lut, structure, + [ + Interface_ConditionalRules._conditional__abduction_compound_eliminate2__1_1, + Interface_ConditionalRules._conditional__abduction_compound_eliminate2__1_1_prime + ], + LinkType1 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + LinkType2 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + has_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + common_id = CommonId(1, 1), + the_other_compound_has_common = True, + the_other_compound_p1_at_p2 = True, + the_other_compound_p2_at_p1 = True, + Connector1 = Connector.Conjunction, + Connector2 = Connector.Conjunction + ) + + '''induction (compound replace)''' + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__induction_compound_replace__0_0, + LinkType1 = [ + LinkType.COMPOUND_STATEMENT, + LinkType.COMPOUND_CONDITION + ], + LinkType2 = [ + LinkType.COMPOUND_STATEMENT, + LinkType.COMPOUND_CONDITION + ], + has_common_id = True, + has_compound_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + compound_common_id = CommonId(0, 0), + Connector1 = Connector.Conjunction + ) + + + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__induction_compound_replace__0_0_prime, + LinkType1 = [ + LinkType.COMPOUND_STATEMENT, + LinkType.COMPOUND_CONDITION + ], + LinkType2 = [ + LinkType.COMPOUND_STATEMENT, + LinkType.COMPOUND_CONDITION + ], + has_common_id = True, + has_compound_common_id = True, + Copula1 = Copula.Implication, + Copula2 = Copula.Implication, + match_reverse = False, + compound_common_id = CommonId(0, 0), + Connector2 = Connector.Conjunction + ) + + '''analogy''' + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__analogy__0, + # LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + # Copula1 = Copula.Implication, + Copula2 = Copula.Equivalence, + match_reverse = False, + common_id = CommonId(0), + has_at = True, + p1_at_p2=True + ) + + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__analogy__0_prime, + LinkType1 = LinkType.COMPOUND_CONDITION, + # LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Equivalence, + # Copula2 = Copula.Equivalence, + match_reverse = False, + common_id = CommonId(0), + has_at = True, + p2_at_p1=True + ) + + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__analogy__1, + # LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + # Copula1 = Copula.Implication, + Copula2 = Copula.Equivalence, + match_reverse = False, + common_id = CommonId(1), + has_at = True, + p1_at_p2=True + ) + + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__analogy__1_prime, + LinkType1 = LinkType.COMPOUND_CONDITION, + # LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.Equivalence, + # Copula2 = Copula.Equivalence, + match_reverse = False, + common_id = CommonId(1), + has_at = True, + p2_at_p1=True + ) + + ''' + Decompositional Theorems + ''' + + '''decompositional theorem 9''' + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem9, + LinkType1 = LinkType.COMPOUND, + LinkType2 = LinkType.COMPOUND_STATEMENT, + Connector1 = Connector.Conjunction, + p2_at_p1 = True, + is_belief_valid = True + ) + + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem9, + LinkType1 = LinkType.SELF, + LinkType2 = LinkType.COMPONENT, + Connector1 = Connector.Conjunction, + p2_at_p1 = True, + is_belief_valid = True + ) + + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem9_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + Connector2 = Connector.Conjunction, + p1_at_p2 = True, + is_belief_valid = True + ) + + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem9_prime, + LinkType1 = LinkType.SELF, + LinkType2 = LinkType.COMPOUND, + Connector2 = Connector.Conjunction, + p1_at_p2 = True, + is_belief_valid = True + ) + + '''decompositional theorem 10''' + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem10, + LinkType1 = LinkType.COMPOUND, + LinkType2 = LinkType.COMPOUND_STATEMENT, + Connector1 = Connector.Disjunction, + p2_at_p1 = True, + is_belief_valid = True, + sentence_type = class_sentence_to_list(Judgement) + ) + + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem10, + LinkType1 = LinkType.SELF, + LinkType2 = LinkType.COMPONENT, + Connector1 = Connector.Disjunction, + p2_at_p1 = True, + is_belief_valid = True, + sentence_type = class_sentence_to_list(Judgement) + ) + + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem10_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND, + Connector2 = Connector.Disjunction, + p1_at_p2 = True, + is_belief_valid = True, + sentence_type = class_sentence_to_list(Judgement) + ) + + add_rule(sparse_lut, structure, + Interface_DecompositionalRules._decompositional__decomposition_theorem10_prime, + LinkType1 = LinkType.SELF, + LinkType2 = LinkType.COMPOUND, + Connector2 = Connector.Disjunction, + p1_at_p2 = True, + is_belief_valid = True, + sentence_type = class_sentence_to_list(Judgement) + ) + + + ''' + Implication Theorems + ''' + add_rule(sparse_lut, structure, + Interface_CompositionalRules._structural__implication_theorem3, + LinkType1 = [LinkType.COMPOUND, LinkType.SELF], + # LinkType2 = LinkType.COMPOUND, + Connector1 = Connector.Conjunction, + p2_at_p1 = True, + is_belief_valid = False + ) + + add_rule(sparse_lut, structure, + Interface_CompositionalRules._structural__implication_theorem4, + # LinkType1 = LinkType.COMPOUND, + LinkType2 = LinkType.COMPOUND, + Connector2 = Connector.Disjunction, + p1_at_p2 = True, + is_belief_valid = False + ) + + '''transform negation''' + add_rule(sparse_lut, structure, + Interface_TransformRules._transform__negation, + LinkType1 = LinkType.SELF, + LinkType2 = LinkType.COMPONENT, + Connector1 = Connector.Negation, + # p2_at_p1 = True, + is_belief_valid = False + ) + + add_rule(sparse_lut, structure, + Interface_TransformRules._transform__negation, + LinkType1 = LinkType.COMPOUND, + LinkType2 = LinkType.COMPOUND_STATEMENT, + Connector1 = Connector.Negation, + # p2_at_p1 = True, + is_belief_valid = False + ) + + add_rule(sparse_lut, structure, + Interface_TransformRules._transform__negation, + LinkType1 = [LinkType.SELF, LinkType.COMPOUND_STATEMENT], + LinkType2 = LinkType.COMPOUND, + Connector2 = Connector.Negation, + # p2_at_p1 = True, + is_belief_valid = False + ) + + '''contraposition''' + add_rule(sparse_lut, structure, + Interface_TransformRules._transform__contraposition, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPONENT, + Connector1 = Connector.Negation, + has_compound_at = True, + c2_at_c1 = True, + is_belief_valid = False + ) + # TODO: other cases should be considered. + # add_rule(sparse_lut, structure, + # Interface_TransformRules._transform__contraposition, + # LinkType1 = LinkType.COMPOUND_CONDITION, + # LinkType2 = LinkType.COMPONENT, + # Connector1 = Connector.Negation, + # has_compound_common_id = True, + # c2_at_c1 = True, + # is_belief_valid = False + # ) \ No newline at end of file diff --git a/NARS/RuleMap/Rules/NAL6.py b/NARS/RuleMap/Rules/NAL6.py new file mode 100644 index 0000000..c275c8a --- /dev/null +++ b/NARS/RuleMap/Rules/NAL6.py @@ -0,0 +1,10 @@ +from collections import OrderedDict +from NARS.DataStructures import LinkType, TaskLink, TermLink +from utils.SparseLUT import SparseLUT +import Global +from .add_rule import * + + +def add_rules__NAL6(sparse_lut: SparseLUT, structure: OrderedDict): + '''''' + diff --git a/NARS/RuleMap/Rules/NAL7.py b/NARS/RuleMap/Rules/NAL7.py new file mode 100644 index 0000000..3a392de --- /dev/null +++ b/NARS/RuleMap/Rules/NAL7.py @@ -0,0 +1,675 @@ +from collections import OrderedDict +from NARS.DataStructures import LinkType, TaskLink, TermLink +from utils.SparseLUT import SparseLUT +import Global +from .add_rule import * + + +def add_rules__NAL7(sparse_lut: SparseLUT, structure: OrderedDict): + '''''' + '''deduction''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__deduction__0_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.RetrospectiveImplication, + Copula2 = Copula.RetrospectiveImplication, + match_reverse = False, + common_id = CommonId(0, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__deduction__1_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.RetrospectiveImplication, + Copula2 = Copula.RetrospectiveImplication, + match_reverse = False, + common_id = CommonId(1, 0) + ) + + '''exemplification''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__exemplification__0_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.RetrospectiveImplication, + Copula2 = Copula.RetrospectiveImplication, + match_reverse = False, + common_id = CommonId(0, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__exemplification__1_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.RetrospectiveImplication, + Copula2 = Copula.RetrospectiveImplication, + match_reverse = False, + common_id = CommonId(1, 0) + ) + + '''induction''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__induction__0_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.PredictiveImplication, + Copula2 = Copula.RetrospectiveImplication, + match_reverse = False, + common_id = CommonId(0, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__induction__0_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.RetrospectiveImplication, + Copula2 = Copula.PredictiveImplication, + match_reverse = False, + common_id = CommonId(0, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__induction__0_0_prime, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.PredictiveImplication, + Copula2 = Copula.RetrospectiveImplication, + match_reverse = False, + common_id = CommonId(0, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__induction__0_0_prime, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.RetrospectiveImplication, + Copula2 = Copula.PredictiveImplication, + match_reverse = False, + common_id = CommonId(0, 0) + ) + + '''abduction''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__abduction__1_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.PredictiveImplication, # =/> + Copula2 = Copula.RetrospectiveImplication, # =\> + match_reverse = False, + common_id = CommonId(1, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__abduction__1_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.RetrospectiveImplication, # =\> + Copula2 = Copula.PredictiveImplication, # =/> + match_reverse = False, + common_id = CommonId(1, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__abduction__1_1_prime, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.PredictiveImplication, # =/> + Copula2 = Copula.RetrospectiveImplication, # =\> + match_reverse = False, + common_id = CommonId(1, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__abduction__1_1_prime, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.RetrospectiveImplication, # =\> + Copula2 = Copula.PredictiveImplication, # =/> + match_reverse = False, + common_id = CommonId(1, 1) + ) + + '''comparison''' + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__comparison__0_0, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.PredictiveImplication, # =/> + Copula2 = Copula.RetrospectiveImplication, # =\> + match_reverse = False, + common_id = CommonId(0, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__comparison__0_0_prime, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.RetrospectiveImplication, # =\> + Copula2 = Copula.PredictiveImplication, # =/> + match_reverse = False, + common_id = CommonId(0, 0) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__comparison__1_1, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.PredictiveImplication, # =/> + Copula2 = Copula.RetrospectiveImplication, # =\> + match_reverse = False, + common_id = CommonId(1, 1) + ) + + add_rule(sparse_lut, structure, + Interface_SyllogisticRules._syllogistic__comparison__1_1_prime, + LinkType1 = LinkType.COMPOUND_CONDITION, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + Copula1 = Copula.RetrospectiveImplication, # =\> + Copula2 = Copula.PredictiveImplication, # =/> + match_reverse = False, + common_id = CommonId(1, 1) + ) + + '''reversion?''' + + '''---------------''' + + + '''---------NAL 5---------''' + + '''conditianal rules''' + + '''deduction''' + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__deduction__0, + LinkType1 = LinkType.COMPOUND_CONDITION, + # LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = Copula.PredictiveImplication, + # Copula2 = Copula.PredictiveImplication, + match_reverse = False, + common_id = CommonId(0), + has_at = True, + p2_at_p1=True + ) + + add_rule(sparse_lut, structure, + Interface_ConditionalRules._conditional__deduction__0_prime, + # LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_CONDITION, + has_common_id = True, + # Copula1 = Copula.PredictiveImplication, + Copula2 = Copula.PredictiveImplication, + match_reverse = False, + common_id = CommonId(0), + has_at = True, + p1_at_p2=True + ) + # '''deduction (compound eliminate)''' + # add_rule(sparse_lut, structure, + # Interface_ConditionalRules._conditional__deduction_compound_eliminate__0, + # LinkType1 = LinkType.COMPOUND_STATEMENT, + # LinkType2 = LinkType.COMPOUND_STATEMENT, + # has_common_id = True, + # has_compound_at = True, + # Copula1 = Copula.PredictiveImplication, + # # Copula2 = Copula.PredictiveImplication, + # match_reverse = False, + # common_id = CommonId(0), + # Connector1 = Connector.Conjunction + # ) + + # add_rule(sparse_lut, structure, + # Interface_ConditionalRules._conditional__deduction_compound_eliminate__0_prime, + # LinkType1 = LinkType.COMPOUND_STATEMENT, + # LinkType2 = LinkType.COMPOUND_STATEMENT, + # has_common_id = True, + # has_compound_at = True, + # # Copula1 = Copula.PredictiveImplication, + # Copula2 = Copula.PredictiveImplication, + # match_reverse = False, + # common_id = CommonId(0), + # Connector2 = Connector.Conjunction + # ) + + # '''deduction (compound replace)''' + # add_rule(sparse_lut, structure, + # Interface_ConditionalRules._conditional__deduction_compound_replace__0_1, + # LinkType1 = [ + # LinkType.COMPOUND_STATEMENT, + # LinkType.COMPOUND_CONDITION + # ], + # LinkType2 = [ + # LinkType.COMPOUND_STATEMENT, + # LinkType.COMPOUND_CONDITION + # ], + # has_common_id = True, + # has_compound_common_id = True, + # Copula1 = Copula.Implication, + # Copula2 = Copula.Implication, + # match_reverse = False, + # compound_common_id = CommonId(0, 1), + # Connector1 = Connector.Conjunction + # ) + + + # add_rule(sparse_lut, structure, + # Interface_ConditionalRules._conditional__deduction_compound_replace__1_0, + # LinkType1 = [ + # LinkType.COMPOUND_STATEMENT, + # LinkType.COMPOUND_CONDITION + # ], + # LinkType2 = [ + # LinkType.COMPOUND_STATEMENT, + # LinkType.COMPOUND_CONDITION + # ], + # has_common_id = True, + # has_compound_common_id = True, + # Copula1 = Copula.Implication, + # Copula2 = Copula.Implication, + # match_reverse = False, + # compound_common_id = CommonId(1, 0), + # Connector2 = Connector.Conjunction + # ) + + '''abduction''' + add_rule(sparse_lut, structure, + Interface_TemporalRules._temporal__abduction__1, + LinkType1 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + # LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + Copula1 = [ + Copula.PredictiveImplication, + Copula.ConcurrentImplication, + Copula.RetrospectiveImplication + ], + # Copula2 = Copula.PredictiveImplication, + match_reverse = False, + common_id = CommonId(1), + has_at = True, + p2_at_p1=True + ) + + add_rule(sparse_lut, structure, + Interface_TemporalRules._temporal__abduction__1_prime, + # LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = [ + LinkType.COMPOUND_CONDITION, + LinkType.COMPOUND_STATEMENT + ], + has_common_id = True, + # Copula1 = Copula.PredictiveImplication, + Copula2 = [ + Copula.PredictiveImplication, + Copula.ConcurrentImplication, + Copula.RetrospectiveImplication + ], + match_reverse = False, + common_id = CommonId(1), + has_at = True, + p1_at_p2=True + ) + + # '''abudction (compound eliminate)''' + # add_rule(sparse_lut, structure, + # Interface_ConditionalRules._conditional__abduction_compound_eliminate__1_1, + # LinkType1 = [ + # LinkType.COMPOUND_CONDITION, + # LinkType.COMPOUND_STATEMENT + # ], + # LinkType2 = [ + # LinkType.COMPOUND_CONDITION, + # LinkType.COMPOUND_STATEMENT + # ], + # has_common_id = True, + # Copula1 = Copula.Implication, + # Copula2 = Copula.Implication, + # match_reverse = False, + # common_id = CommonId(1, 1), + # the_other_compound_has_common = True, + # the_other_compound_p1_at_p2 = False, + # the_other_compound_p2_at_p1 = True, + # Connector1 = Connector.Conjunction, + # Connector2 = None + # ) + + # add_rule(sparse_lut, structure, + # Interface_ConditionalRules._conditional__abduction_compound_eliminate__1_1_prime, + # LinkType1 = [ + # LinkType.COMPOUND_CONDITION, + # LinkType.COMPOUND_STATEMENT + # ], + # LinkType2 = [ + # LinkType.COMPOUND_CONDITION, + # LinkType.COMPOUND_STATEMENT + # ], + # has_common_id = True, + # Copula1 = Copula.Implication, + # Copula2 = Copula.Implication, + # match_reverse = False, + # common_id = CommonId(1, 1), + # the_other_compound_has_common = True, + # the_other_compound_p1_at_p2 = True, + # the_other_compound_p2_at_p1 = False, + # Connector1 = None, + # Connector2 = Connector.Conjunction + # ) + + # add_rule(sparse_lut, structure, + # [ + # Interface_ConditionalRules._conditional__abduction_compound_eliminate2__1_1, + # Interface_ConditionalRules._conditional__abduction_compound_eliminate2__1_1_prime + # ], + # LinkType1 = [ + # LinkType.COMPOUND_CONDITION, + # LinkType.COMPOUND_STATEMENT + # ], + # LinkType2 = [ + # LinkType.COMPOUND_CONDITION, + # LinkType.COMPOUND_STATEMENT + # ], + # has_common_id = True, + # Copula1 = Copula.Implication, + # Copula2 = Copula.Implication, + # match_reverse = False, + # common_id = CommonId(1, 1), + # the_other_compound_has_common = True, + # the_other_compound_p1_at_p2 = True, + # the_other_compound_p2_at_p1 = True, + # Connector1 = Connector.Conjunction, + # Connector2 = Connector.Conjunction + # ) + + # '''induction (compound replace)''' + # add_rule(sparse_lut, structure, + # Interface_ConditionalRules._conditional__induction_compound_replace__0_0, + # LinkType1 = [ + # LinkType.COMPOUND_STATEMENT, + # LinkType.COMPOUND_CONDITION + # ], + # LinkType2 = [ + # LinkType.COMPOUND_STATEMENT, + # LinkType.COMPOUND_CONDITION + # ], + # has_common_id = True, + # has_compound_common_id = True, + # Copula1 = Copula.Implication, + # Copula2 = Copula.Implication, + # match_reverse = False, + # compound_common_id = CommonId(0, 0), + # Connector1 = Connector.Conjunction + # ) + + + # add_rule(sparse_lut, structure, + # Interface_ConditionalRules._conditional__induction_compound_replace__0_0_prime, + # LinkType1 = [ + # LinkType.COMPOUND_STATEMENT, + # LinkType.COMPOUND_CONDITION + # ], + # LinkType2 = [ + # LinkType.COMPOUND_STATEMENT, + # LinkType.COMPOUND_CONDITION + # ], + # has_common_id = True, + # has_compound_common_id = True, + # Copula1 = Copula.Implication, + # Copula2 = Copula.Implication, + # match_reverse = False, + # compound_common_id = CommonId(0, 0), + # Connector2 = Connector.Conjunction + # ) + + # '''analogy''' + # add_rule(sparse_lut, structure, + # Interface_ConditionalRules._conditional__analogy__0, + # # LinkType1 = LinkType.COMPOUND_CONDITION, + # LinkType2 = LinkType.COMPOUND_CONDITION, + # has_common_id = True, + # # Copula1 = Copula.Implication, + # Copula2 = Copula.Equivalence, + # match_reverse = False, + # common_id = CommonId(0), + # has_at = True, + # p1_at_p2=True + # ) + + # add_rule(sparse_lut, structure, + # Interface_ConditionalRules._conditional__analogy__0_prime, + # LinkType1 = LinkType.COMPOUND_CONDITION, + # # LinkType2 = LinkType.COMPOUND_CONDITION, + # has_common_id = True, + # Copula1 = Copula.Equivalence, + # # Copula2 = Copula.Equivalence, + # match_reverse = False, + # common_id = CommonId(0), + # has_at = True, + # p2_at_p1=True + # ) + + # add_rule(sparse_lut, structure, + # Interface_ConditionalRules._conditional__analogy__1, + # # LinkType1 = LinkType.COMPOUND_CONDITION, + # LinkType2 = LinkType.COMPOUND_CONDITION, + # has_common_id = True, + # # Copula1 = Copula.Implication, + # Copula2 = Copula.Equivalence, + # match_reverse = False, + # common_id = CommonId(1), + # has_at = True, + # p1_at_p2=True + # ) + + # add_rule(sparse_lut, structure, + # Interface_ConditionalRules._conditional__analogy__1_prime, + # LinkType1 = LinkType.COMPOUND_CONDITION, + # # LinkType2 = LinkType.COMPOUND_CONDITION, + # has_common_id = True, + # Copula1 = Copula.Equivalence, + # # Copula2 = Copula.Equivalence, + # match_reverse = False, + # common_id = CommonId(1), + # has_at = True, + # p2_at_p1=True + # ) + + # ''' + # Decompositional Theorems + # ''' + + # '''decompositional theorem 9''' + # add_rule(sparse_lut, structure, + # Interface_DecompositionalRules._decompositional__decomposition_theorem9, + # LinkType1 = LinkType.COMPOUND, + # LinkType2 = LinkType.COMPOUND_STATEMENT, + # Connector1 = Connector.Conjunction, + # p2_at_p1 = True, + # is_belief_valid = True + # ) + + # add_rule(sparse_lut, structure, + # Interface_DecompositionalRules._decompositional__decomposition_theorem9, + # LinkType1 = LinkType.SELF, + # LinkType2 = LinkType.COMPONENT, + # Connector1 = Connector.Conjunction, + # p2_at_p1 = True, + # is_belief_valid = True + # ) + + # add_rule(sparse_lut, structure, + # Interface_DecompositionalRules._decompositional__decomposition_theorem9_prime, + # LinkType1 = LinkType.COMPOUND_STATEMENT, + # LinkType2 = LinkType.COMPOUND, + # Connector2 = Connector.Conjunction, + # p1_at_p2 = True, + # is_belief_valid = True + # ) + + # add_rule(sparse_lut, structure, + # Interface_DecompositionalRules._decompositional__decomposition_theorem9_prime, + # LinkType1 = LinkType.SELF, + # LinkType2 = LinkType.COMPOUND, + # Connector2 = Connector.Conjunction, + # p1_at_p2 = True, + # is_belief_valid = True + # ) + + # '''decompositional theorem 10''' + # add_rule(sparse_lut, structure, + # Interface_DecompositionalRules._decompositional__decomposition_theorem10, + # LinkType1 = LinkType.COMPOUND, + # LinkType2 = LinkType.COMPOUND_STATEMENT, + # Connector1 = Connector.Disjunction, + # p2_at_p1 = True, + # is_belief_valid = True + # ) + + # add_rule(sparse_lut, structure, + # Interface_DecompositionalRules._decompositional__decomposition_theorem10, + # LinkType1 = LinkType.SELF, + # LinkType2 = LinkType.COMPONENT, + # Connector1 = Connector.Disjunction, + # p2_at_p1 = True, + # is_belief_valid = True + # ) + + # add_rule(sparse_lut, structure, + # Interface_DecompositionalRules._decompositional__decomposition_theorem10_prime, + # LinkType1 = LinkType.COMPOUND_STATEMENT, + # LinkType2 = LinkType.COMPOUND, + # Connector2 = Connector.Disjunction, + # p1_at_p2 = True, + # is_belief_valid = True + # ) + + # add_rule(sparse_lut, structure, + # Interface_DecompositionalRules._decompositional__decomposition_theorem10_prime, + # LinkType1 = LinkType.SELF, + # LinkType2 = LinkType.COMPOUND, + # Connector2 = Connector.Disjunction, + # p1_at_p2 = True, + # is_belief_valid = True + # ) + + + # ''' + # Implication Theorems + # ''' + # add_rule(sparse_lut, structure, + # Interface_CompositionalRules._structural__implication_theorem3, + # LinkType1 = [LinkType.COMPOUND, LinkType.SELF], + # # LinkType2 = LinkType.COMPOUND, + # Connector1 = Connector.Conjunction, + # p2_at_p1 = True, + # is_belief_valid = False + # ) + + # add_rule(sparse_lut, structure, + # Interface_CompositionalRules._structural__implication_theorem4, + # # LinkType1 = LinkType.COMPOUND, + # LinkType2 = LinkType.COMPOUND, + # Connector2 = Connector.Disjunction, + # p1_at_p2 = True, + # is_belief_valid = False + # ) + + # '''transform negation''' + # add_rule(sparse_lut, structure, + # Interface_TransformRules._transform__negation, + # LinkType1 = LinkType.SELF, + # LinkType2 = LinkType.COMPONENT, + # Connector1 = Connector.Negation, + # # p2_at_p1 = True, + # is_belief_valid = False + # ) + + # add_rule(sparse_lut, structure, + # Interface_TransformRules._transform__negation, + # LinkType1 = LinkType.COMPOUND, + # LinkType2 = LinkType.COMPOUND_STATEMENT, + # Connector1 = Connector.Negation, + # # p2_at_p1 = True, + # is_belief_valid = False + # ) + + # add_rule(sparse_lut, structure, + # Interface_TransformRules._transform__negation, + # LinkType1 = [LinkType.SELF, LinkType.COMPOUND_STATEMENT], + # LinkType2 = LinkType.COMPOUND, + # Connector2 = Connector.Negation, + # # p2_at_p1 = True, + # is_belief_valid = False + # ) + + # '''contraposition''' + # add_rule(sparse_lut, structure, + # Interface_TransformRules._transform__contraposition, + # LinkType1 = LinkType.COMPOUND_CONDITION, + # LinkType2 = LinkType.COMPONENT, + # Connector1 = Connector.Negation, + # has_compound_at = True, + # c2_at_c1 = True, + # is_belief_valid = False + # ) + # # TODO: other cases should be considered. + # # add_rule(sparse_lut, structure, + # # Interface_TransformRules._transform__contraposition, + # # LinkType1 = LinkType.COMPOUND_CONDITION, + # # LinkType2 = LinkType.COMPONENT, + # # Connector1 = Connector.Negation, + # # has_compound_common_id = True, + # # c2_at_c1 = True, + # # is_belief_valid = False + # # ) + + '''---------NAL 7---------''' + '''sequential conditional deduction (compound eliminate)''' + add_rule(sparse_lut, structure, + Interface_TemporalRules._temporal__deduction_sequence_eliminate__0, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + has_compound_at = True, + Copula1 = Copula.PredictiveImplication, + # Copula2 = Copula.PredictiveImplication, + match_reverse = False, + common_id = CommonId(0), + Connector1 = Connector.SequentialEvents, + is_belief_valid=True + ) + + add_rule(sparse_lut, structure, + Interface_TemporalRules._temporal__deduction_sequence_eliminate__0_prime, + LinkType1 = LinkType.COMPOUND_STATEMENT, + LinkType2 = LinkType.COMPOUND_STATEMENT, + has_common_id = True, + has_compound_at = True, + # Copula1 = Copula.PredictiveImplication, + Copula2 = Copula.PredictiveImplication, + match_reverse = False, + common_id = CommonId(0), + Connector2 = Connector.SequentialEvents, + is_belief_valid=True + ) + diff --git a/NARS/RuleMap/Rules/NAL8.py b/NARS/RuleMap/Rules/NAL8.py new file mode 100644 index 0000000..23281ff --- /dev/null +++ b/NARS/RuleMap/Rules/NAL8.py @@ -0,0 +1,10 @@ +from collections import OrderedDict +from NARS.DataStructures import LinkType, TaskLink, TermLink +from utils.SparseLUT import SparseLUT +import Global +from .add_rule import * + + +def add_rules__NAL8(sparse_lut: SparseLUT, structure: OrderedDict): + '''''' + diff --git a/NARS/RuleMap/Rules/NAL9.py b/NARS/RuleMap/Rules/NAL9.py new file mode 100644 index 0000000..a077bfb --- /dev/null +++ b/NARS/RuleMap/Rules/NAL9.py @@ -0,0 +1,16 @@ +from collections import OrderedDict +from NARS.DataStructures import LinkType, TaskLink, TermLink +from utils.SparseLUT import SparseLUT +import Global +from .add_rule import * +from NARS.MentalOperation import * + +def add_rules__NAL9(sparse_lut: SparseLUT=None, structure: OrderedDict=None): + '''''' + register(Believe, execute__believe) + register(Doubt, execute__doubt) + register(Evaluate, execute__evaluate) + register(Hesitate, execute__hesitate) + register(Want, execute__want) + register(Wonder, execute__wonder) + # register(Anticipate, execute__anticipate) diff --git a/NARS/RuleMap/Rules/__init__.py b/NARS/RuleMap/Rules/__init__.py new file mode 100644 index 0000000..5946543 --- /dev/null +++ b/NARS/RuleMap/Rules/__init__.py @@ -0,0 +1,10 @@ +from .add_rule import * +from .NAL1 import * +from .NAL2 import * +from .NAL3 import * +from .NAL4 import * +from .NAL5 import * +from .NAL6 import * +from .NAL7 import * +from .NAL8 import * +from .NAL9 import * \ No newline at end of file diff --git a/NARS/RuleMap/Rules/add_rule.py b/NARS/RuleMap/Rules/add_rule.py new file mode 100644 index 0000000..3e02d59 --- /dev/null +++ b/NARS/RuleMap/Rules/add_rule.py @@ -0,0 +1,124 @@ +from operator import imod +import os +from pathlib import Path +from inspect import getmembers, isfunction +import importlib +import re +from typing import Any, List, Tuple, Union +from typing_extensions import Protocol +from collections import OrderedDict + +from numpy import product + +from Config import Enable +from NARS.RuleMap.Interface import Interface_CompositionalRules, Interface_SyllogisticRules, Interface_DecompositionalRules, Interface_TransformRules, Interface_ConditionalRules, Interface_TemporalRules +from Narsese import Copula, Task +from Narsese._py.Connector import Connector +from Narsese._py.Sentence import Goal, Judgement, Quest, Question +from Narsese._py.Statement import Statement +from Narsese._py.Term import Term +from Narsese import Belief, Term, Truth, Compound, Budget +# from .RuleMap_v1 import RuleMap as RuleMap_v1, RuleCallable +from ...DataStructures import LinkType, TaskLink, TermLink +from NAL.Inference import * +from utils.SparseLUT import SparseLUT +from utils.tools import get_size + +from utils.Print import out_print, PrintType + +import time +from datetime import datetime +import pickle +import sty +from .._extract_feature import extract_feature, _compound_has_common, _compound_at +import Global + +class RuleCallable(Protocol): + def __call__(self, + task: Task, + belief: Belief, + budget_tasklink: Budget=None, + budget_termlink: Budget=None + ) -> Tuple[Task, Tuple[Budget, float, float]]: ... + +class RuleMapCallable(Protocol): + def __call__(self, + task: Task, + term_belief: Union[Statement, Term], + truth_belief: Union[Truth, None], + task_link: TaskLink, + term_link: TermLink + ) -> List[RuleCallable]: ... + + +def task_type_id(task: Task): + if task.is_judgement: return 0 + elif task.is_goal: return 1 + elif task.is_question: return 2 + elif task.is_quest: return 3 + else: raise "Invalid case." + +_class_convert = { + Judgement: 0, + Goal: 1, + Question: 2, + Quest: 3 +} +def class_sentence_to_list(*types): + if isinstance(types, list): types = [types] + return [_class_convert[t] for t in types] + + +class CommonId: + def __init__(self, first, second=None) -> None: + self.first = first + self.second = second + + def __int__(self): + return self.first*2 + self.second if self.second is not None else self.first + + +def add_rule(sparse_lut: SparseLUT, structure: OrderedDict, rules: List[RuleCallable],**kwargs): + '''''' + indices = [kwargs.get(key, None) for key in structure.keys()] + + # convert the indices into a normalized form. + indices_norm = [] + values = iter(structure.values()) + for index in indices: + _type, _cnt_type = next(values) + # if index is Ellipsis or index is None: index = None + if index is Any: pass + elif index is None: pass + elif isinstance(index, tuple): + assert 0 < len(index) <= 3, "It shouldn't be bigger than 3, and shouldn't be 0." + _index = index + index = [] + for i, idx in enumerate(_index): + if i < 2: assert isinstance(idx, _type), "It should be the type identified in `self.structure_map`" + idx = int(idx) + if i < 2: assert idx < _cnt_type, "It shouldn't be bigger than the maximum index of the type." + index.append(index) + index = slice(*index) + elif isinstance(index, slice): pass + elif isinstance(index, list): + _index = index + index = [] + for idx in _index: + assert idx is None or isinstance(idx, _type), "It should be the type identified in `self.structure_map`" + idx = int(idx) if idx is not None else idx + assert (idx if idx is not None else 0) < _cnt_type , "It shouldn't be bigger than the maximum index of the type." + index.append(idx) + else: + assert isinstance(index, _type), f"The `{index}` should be the type identified in `self.structure_map`" + index = int(index) + assert index < _cnt_type, "It shouldn't be bigger than the maximum index of the type." + indices_norm.append(index) + indices = tuple(indices_norm) + + # add the rule to the map + if not(isinstance(rules, tuple) or isinstance(rules, list)): + rules = (rules,) + for rule in rules: + sparse_lut.add(list(indices), rule) + return indices \ No newline at end of file diff --git a/NARS/RuleMap/__init__.py b/NARS/RuleMap/__init__.py new file mode 100644 index 0000000..dc76446 --- /dev/null +++ b/NARS/RuleMap/__init__.py @@ -0,0 +1,2 @@ +# from .RuleMap_v1 import RuleMap as RuleMap_v1, RuleCallable +from .RuleMap_v2 import RuleMap as RuleMap_v2, RuleCallable \ No newline at end of file diff --git a/NARS/RuleMap/_extract_feature.py b/NARS/RuleMap/_extract_feature.py new file mode 100644 index 0000000..28c308e --- /dev/null +++ b/NARS/RuleMap/_extract_feature.py @@ -0,0 +1,531 @@ +from typing import Union +from collections import namedtuple + +from Narsese._py.Connector import Connector +from NAL.Inference import * +from Narsese import Statement, Term, Compound + +Feature = namedtuple( + 'Feature', + [ + 'match_reverse', + 'has_common_id', + 'common_id_task', + 'common_id_belief', + 'has_at', + 'p1_at_p2', + 'p2_at_p1', + 'has_compound_at', + 'c1_at_c2', + 'c2_at_c1', + 'has_compound_common_id', + 'compound_common_id_task', + 'compound_common_id_belief', + 'the_other1', + 'the_other2', + # 'the_other_compound_has_common', + # 'the_other_compound_p1_at_p2', + # 'the_other_compound_p2_at_p1', + # 'the_other_connector1', + # 'the_other_connector2' + ], + defaults=[False, True, None, None, False, None, None, False, None, None, False, None, None, None, None]# False, False, False, None, None] +) +def _mirorr_feature(premise1: Union[Term, Compound, Statement], premise2: Union[Term, Compound, Statement]): + feature = extract_feature(premise2, premise1) + return Feature( + feature.match_reverse, + feature.has_common_id, + feature.common_id_belief, + feature.common_id_task, + feature.has_at, + feature.p2_at_p1, + feature.p1_at_p2, + feature.has_compound_at, + feature.c2_at_c1, + feature.c2_at_c1, + feature.has_compound_common_id, + feature.compound_common_id_belief, + feature.compound_common_id_task, + feature.the_other2, + feature.the_other1 + ) + +def _compound_has_common(term1: Union[Term, Compound, Statement], term2: Union[Term, Compound, Statement]): + if term1.is_compound: + return (term2 in term1.terms) or term1.has_common(term2) + elif term2.is_compound: + return (term1 in term2.terms) or term1.has_common(term2) + else: return False + +def _compound_at(term1: Union[Term, Compound, Statement], term2: Compound, compound_has_common: bool=None): + if term2.is_compound: + if not term1.is_compound: + if term2.connector is Connector.SequentialEvents: + return term2.terms[0] == term1 + else: + return term2.contains(term1) + else: + empty = True if len(term2.terms - term1.terms) == 0 else False + if term2.connector is Connector.SequentialEvents: + return (not empty) and term2.terms[:len(term1.terms)] == term1.terms + else: + return (not empty) and (compound_has_common if compound_has_common is not None else _compound_has_common(term1, term2)) + else: return False + +def extract_feature(premise1: Union[Term, Compound, Statement], premise2: Union[Term, Compound, Statement]) -> Feature: + ''' + It should be ensured that premise1 and premise2 aren't identical. + ''' + if premise2 is None: return Feature() + if premise1.is_statement: + ''' + P> + P> + + <T>-->P> + <Q>> + <P>-->Q>> + + <T>==>P> + <Q>> + <P>==>Q>> + + <(&,S,T)-->P> + (|,P,Q)> + <(&,S,T)-->(|,P,Q)> + + <(&,S,T)==>P> + (|,P,Q)> + <(&,S,T)==>(|,P,Q)> + <(&&,T>, R>)==>(|,P,Q)> + ''' + if premise2.is_statement: + # M>, P> + # M>, P> + # <T>==>M>, P> + if premise1.subject == premise2.predicate and premise1.predicate == premise2.subject: + # P>, S> + return Feature( + match_reverse=True + ) + elif premise1.subject == premise2.subject: + # S>, P> + return Feature( + has_common_id=True, + common_id_task=0, + common_id_belief=0, + the_other1=premise1.predicate, + the_other2=premise2.predicate + ) + elif premise1.subject == premise2.predicate: + # P>, M> + return Feature( + has_common_id=True, + common_id_task=0, + common_id_belief=1, + the_other1=premise1.predicate, + the_other2=premise2.subject + ) + elif premise1.predicate == premise2.subject: + # M>, P> + return Feature( + has_common_id=True, + common_id_task=1, + common_id_belief=0, + the_other1=premise1.subject, + the_other2=premise2.predicate + ) + elif premise1.predicate == premise2.predicate: + # M>, M> + return Feature( + has_common_id=True, + common_id_task=1, + common_id_belief=1, + the_other1=premise1.subject, + the_other2=premise2.subject + ) + # <T>==>M>, T>; TaskLink is COMPOUND_CONDITION + elif premise1.subject == premise2: + # <T>==>M>, T> + return Feature( + has_at=True, + p2_at_p1=True, + common_id_task=0 + ) + elif premise1.predicate == premise2: + # T>>, T> + return Feature( + has_at=True, + p2_at_p1=True, + common_id_task=1 + ) + elif premise2.subject == premise1: + # T>, <T>==>M> + return Feature( + has_at=True, + p1_at_p2=True, + common_id_belief=0 + ) + elif premise2.predicate == premise1: + # T>, T>> + return Feature( + has_at=True, + p1_at_p2=True, + common_id_task=1 + ) + # <(&&, M>, Q>)==>T>, M> + elif premise1.subject.is_compound and premise2 in premise1.subject.terms: + # <(&&, M>, Q>)==>T>, M> + return Feature( + has_compound_at=True, + c2_at_c1=True, + compound_common_id_task=0, + ) + elif premise1.predicate.is_compound and premise2 in premise1.predicate.terms: + # (&&, M>, Q>)>, M> + return Feature( + has_compound_at=True, + c2_at_c1=True, + compound_common_id_task=1, + ) + elif premise2.subject.is_compound and premise1 in premise2.subject.terms: + # M>, <(&&, M>, Q>)==>T> + return Feature( + has_compound_at=True, + c1_at_c2=True, + compound_common_id_belief=0, + ) + elif premise2.predicate.is_compound and premise1 in premise2.predicate.terms: + # M>, (&&, M>, Q>)> + return Feature( + has_compound_at=True, + c1_at_c2=True, + compound_common_id_belief=1, + ) + # <(&&,S,P)==>T>, M> + # <(&&,S,P,Q)==>T>, <(&&,S,P)==>M> + elif premise1.subject.is_compound and premise2.subject in premise1.subject.terms: + # <(&&,S,P)==>T>, M> + return Feature( + has_compound_common_id=True, + compound_common_id_task=0, + compound_common_id_belief=0, + the_other1=premise1.predicate, + the_other2=premise2.predicate + ) + elif premise1.predicate.is_compound and premise2.subject in premise1.predicate.terms: + # (&&,S,P)>, M> + return Feature( + has_compound_common_id=True, + compound_common_id_task=1, + compound_common_id_belief=0, + the_other1=premise1.subject, + the_other2=premise2.predicate + ) + elif premise1.subject.is_compound and premise2.predicate in premise1.subject.terms: + # <(&&,S,P)==>T>, S> + return Feature( + has_compound_common_id=True, + compound_common_id_task=0, + compound_common_id_belief=1, + the_other1=premise1.predicate, + the_other2=premise2.subject + ) + elif premise1.predicate.is_compound and premise2.predicate in premise1.predicate.terms: + # (&&,S,P)>, S> + return Feature( + has_compound_common_id=True, + compound_common_id_task=1, + compound_common_id_belief=1, + the_other1=premise1.subject, + the_other2=premise2.subject + ) + + elif premise2.subject.is_compound and premise1.subject in premise2.subject.terms: + # M>, <(&&,S,P)==>T> + return Feature( + has_compound_common_id=True, + compound_common_id_task=0, + compound_common_id_belief=0, + the_other1=premise1.predicate, + the_other2=premise2.predicate + ) + elif premise2.predicate.is_compound and premise1.subject in premise2.predicate.terms: + # M>, (&&,S,P)> + return Feature( + has_compound_common_id=True, + compound_common_id_task=0, + compound_common_id_belief=1, + the_other1=premise1.predicate, + the_other2=premise2.subject + ) + elif premise2.subject.is_compound and premise1.predicate in premise2.subject.terms: + # S>, <(&&,S,P)==>T> + return Feature( + has_compound_common_id=True, + compound_common_id_task=1, + compound_common_id_belief=0, + the_other1=premise1.subject, + the_other2=premise2.predicate + ) + elif premise2.predicate.is_compound and premise1.predicate in premise2.predicate.terms: + # S>, (&&,S,P)> + return Feature( + has_compound_common_id=True, + compound_common_id_task=1, + compound_common_id_belief=1, + the_other1=premise1.subject, + the_other2=premise2.subject + ) + + # <(&&,S,P,Q)==>T>, <(&&,S,P)==>M> + elif premise1.subject.is_compound and premise2.subject.is_compound and premise2.subject.has_common(premise1.subject): + # <(&&,S,P,Q)==>T>, <(&&,S,P)==>M> + return Feature( + has_compound_common_id=True, + compound_common_id_task=0, + compound_common_id_belief=0, + the_other1=premise1.predicate, + the_other2=premise2.predicate + ) + elif premise1.predicate.is_compound and premise2.subject.is_compound and premise2.subject.has_common(premise1.predicate): + # (&&,S,P,Q)>, <(&&,S,P)==>M> + return Feature( + has_compound_common_id=True, + compound_common_id_task=1, + compound_common_id_belief=0, + the_other1=premise1.subject, + the_other2=premise2.predicate + ) + elif premise1.subject.is_compound and premise2.predicate.is_compound and premise2.predicate.has_common(premise1.subject): + # <(&&,S,P,Q)==>T>, (&&,S,P)> + return Feature( + has_compound_common_id=True, + compound_common_id_task=0, + compound_common_id_belief=1, + the_other1=premise1.predicate, + the_other2=premise2.subject + ) + elif premise1.predicate.is_compound and premise2.predicate.is_compound and premise2.predicate.has_common(premise1.predicate): + # (&&,S,P,Q)>, (&&,S,P)> + return Feature( + has_compound_common_id=True, + compound_common_id_task=1, + compound_common_id_belief=1, + the_other1=premise1.subject, + the_other2=premise2.subject + ) + + else: + return Feature( + has_common_id=False + ) + elif premise2.is_compound: + # P>, (|, P, T) + # P>, (&&, S, T) + # (|, P, Q)>, (|, P, Q, T) + # P>, (&&, P>, T) + if premise1.subject in premise2.terms: + # P>, (|, S, T) + return Feature( + match_reverse=False, + has_compound_common_id=True, + compound_common_id_task=0 + ) + elif premise1.predicate in premise2.terms: + # P>, (|, P, T) + return Feature( + match_reverse=False, + has_compound_common_id=True, + compound_common_id_task=1 + ) + elif premise1 in premise2.terms: + # P>, (&&, P>, T) + return Feature( + match_reverse=False, + p1_at_p2=True + ) + # <(&,S,P)==>Q>, (&,S,M) + # <(&,S,P,M)==>Q>, (&,S,M) + # <(&,S,M)==>Q>, (&,S,P,M) + elif premise1.subject.is_compound and premise1.subject.has_common(premise2): + # <(&&,S,M)==>P>, (&&,M,Q,T) + return Feature( + match_reverse=False, + has_compound_common_id=True, + compound_common_id_task=0 + ) + elif premise1.predicate.is_compound and premise1.predicate.has_common(premise2): + # (||,M,P)>, (||,M,Q,T) + return Feature( + match_reverse=False, + has_compound_common_id=True, + compound_common_id_task=1 + ) + else: + return Feature( + match_reverse=False, + has_common_id=False + ) + elif premise2.is_atom: + # P>, S + if premise2 == premise1.subject: + # P>, S + return Feature( + has_at=True, + p2_at_p1=True, + common_id_task=0 + ) + elif premise2 == premise1.predicate: + # P>, P + return Feature( + has_at=True, + p2_at_p1=True, + common_id_task=1 + ) + # <(&&,S,T)==>P>, S + elif premise1.subject.is_compound and premise2 in premise1.subject.terms: + # <(&&,S,T)==>P>, S + return Feature( + has_compound_at=True, + p2_at_p1=True, + compound_common_id_task=0 + ) + elif premise1.predicate.is_compound and premise2 in premise1.predicate.terms: + # (&&,S,T)>, S + return Feature( + has_compound_at=True, + p2_at_p1=True, + compound_common_id_task=1 + ) + else: + return Feature( + match_reverse=False, + has_common_id=False + ) + + else: raise "Invalide case" + elif premise1.is_compound: + ''' + (&, S, T) + (&, P>, T) + (&&, P>, Q>) + (&, P>, T) + ''' + if premise2.is_statement: + return _mirorr_feature(premise1, premise2) + elif premise2.is_compound: + if premise1.has_common(premise2): + # (&&, A, B, C), (&&, A, B) + # (&&, A, B, C), (&&, A, B, D) + return Feature( + has_compound_common_id=True + ) + else: + return Feature( + has_common_id=False + ) + elif premise2.is_atom: + if premise2 in premise1.terms: + # (&&, A, B, C), A. + return Feature( + has_compound_common_id=True + ) + else: + return Feature( + match_reverse=False, + has_common_id=False + ) + else: raise "Invalide case" + elif premise1.is_atom: + ''' + S. + ''' + if premise2.is_statement: + return _mirorr_feature(premise1, premise2) + elif premise2.is_compound: + return _mirorr_feature(premise1, premise2) + elif premise2.is_atom: + return Feature( + match_reverse=False, + has_common_id=False + ) + else: raise "Invalide case" + + + +def _at(compound: Union[Compound, Statement], term: Term): + ''' + To judge whether the `component` is in the `compound`. + + e.g. A@(&&,A,B), then return (True, 0); + B@(&&,A,B), then return (True, 1); + C@(&&,A,B), then return (False, None) + ''' + if compound.is_atom: + return (False, None) + else: + if compound.is_compound: + terms = compound + elif compound.is_statement: + terms = (compound.subject, compound.predicate) + else: raise "Invalid case." + + for i, component in enumerate(terms): + if component == term: + return (True, i) + else: + return (False, None) + + +def _common(premise1: Statement, premise2: Statement): + ''' + To judge whether the `premise1` and the `premise2` have common term. + + e.g. M>, P>, then return (True, 1, 0); + P>, M>, then return (True, 0, 1); + P>, S>, then return (True, 0, 0); + M>, M>, then return (True, 1, 1); + B>, A, then return (True, 0, 0) + B>, B, then return (True, 1, 0) + + C>>, C> + (&, B, C)>, (&, B, C) + (&, B, C, D)>, (&, B, C) + (|, B, C), C> |- B> + <(&, A, B)-->(|, C, D), <(&, A, B)-->D> |- <(&, A, B)-->C> + + Return: + has_common_id (bool), common_id_task (int), common_id_belief (int), match_reverse (bool) + ''' + if premise1.is_statement and premise2.is_statement: + if premise1.subject == premise2.predicate and premise1.predicate == premise2.subject: + return True, None, None, True + if premise1.subject == premise2.subject: + return True, 0, 0, False + elif premise1.subject == premise2.predicate: + return True, 0, 1, False + elif premise1.predicate == premise2.subject: + return True, 1, 0, False + elif premise1.predicate == premise2.predicate: + return True, 1, 1, False + else: + return False, None, None, False + elif premise1.is_statement and premise2.is_atom: + if premise1.subject == premise2: + return True, 0, 0, False + elif premise1.predicate == premise2: + return True, 1, 0, False + else: + return False, None, None, False + elif premise2.is_statement and premise1.is_atom: + if premise2.subject == premise1: + return True, 0, 0, False + elif premise2.predicate == premise1: + return True, 0, 1, False + else: + return False, None, None, False + else: + return False, None, None, False + diff --git a/NARS/RuleMap/_trash/RuleMap_v1.py b/NARS/RuleMap/_trash/RuleMap_v1.py new file mode 100644 index 0000000..6d3e725 --- /dev/null +++ b/NARS/RuleMap/_trash/RuleMap_v1.py @@ -0,0 +1,432 @@ +from typing import Callable, Tuple, Type, Protocol, List +from NARS.DataStructures import Link, TaskLink, TermLink +from Narsese.Parser.narsese_lark import Rule +from Narsese import Belief, Term, Truth, Compound, Budget +from Narsese._py.Copula import Copula +from Narsese._py.Statement import Statement +from Narsese._py.Term import TermType +from ..DataStructures import LinkType, Task +import numpy as np +from NAL.Inference import * + +from . import Interface_SyllogisticRules + +def _at(compound: Compound, component: Term): + ''' + To judge whether the `component` is in the `compound`. + + e.g. A@(&&,A,B), then return (True, 0); + B@(&&,A,B), then return (True, 1); + C@(&&,A,B), then return (False, None) + ''' + +def _common(premise1: Statement, premise2: Statement): + ''' + To judge whether the `premise1` and the `premise2` have common term. + + e.g. M>, P>, then return (True, 1, 0); + P>, M>, then return (True, 0, 1); + P>, S>, then return (True, 0, 0); + M>, M>, then return (True, 1, 1); + B>, A, then return (True, 0, 0) + B>, B, then return (True, 1, 0) + + Return: + has_common_id (bool), common_id_task (int), common_id_belief (int), match_reverse (bool) + ''' + if premise1.is_statement and premise2.is_statement: + if premise1.subject == premise2.predicate and premise1.predicate == premise2.subject: + return True, None, None, True + if premise1.subject == premise2.subject: + return True, 0, 0, False + elif premise1.subject == premise2.predicate: + return True, 0, 1, False + elif premise1.predicate == premise2.subject: + return True, 1, 0, False + elif premise1.predicate == premise2.predicate: + return True, 1, 1, False + else: + return False, None, None, False + elif premise1.is_statement and premise2.is_atom: + if premise1.subject == premise2: + return True, 0, 0, False + elif premise1.predicate == premise2: + return True, 1, 0, False + else: + return False, None, None, False + elif premise2.is_statement and premise1.is_atom: + if premise2.subject == premise1: + return True, 0, 0, False + elif premise2.predicate == premise1: + return True, 0, 1, False + else: + return False, None, None, False + else: + return False, None, None, False + + +class RuleCallable(Protocol): + def __call__(self, + task: Task, + belief: Belief, + budget_tasklink: Budget=None, + budget_termlink: Budget=None + ) -> Tuple[Task, Tuple[Budget, float, float]]: ... + +class RuleMapCallable(Protocol): + def __call__(self, + task: Task, + term_belief: Statement | Term, + truth_belief: Truth | None, + task_link: TaskLink, + term_link: TermLink + ) -> List[RuleCallable]: ... + + + +class RuleMap: + # type_map: np.ndarray + def __init__(self) -> None: + ''' + given to premises, including the type of links and the relation (`_at` or `_common`), matched rules are obtained. + ''' + n_types = max([t.value for t in LinkType.__members__.values()]) + 1 + self.type_map = np.empty((n_types, n_types), dtype=object) # There are 10 types of task-link and 8 types of term-link. + # self.type_map[:, :] = None + # self.type_map[LinkType.SELF.value, LinkType.COMPONENT.value] = self._self__component + # self.type_map[LinkType.SELF.value, LinkType.COMPOUND.value] = self._self__compound + # self.type_map[LinkType.SELF.value, LinkType.COMPONENT_STATEMENT.value] = self._self__component_statement + # self.type_map[LinkType.SELF.value, LinkType.COMPOUND_STATEMENT.value] = self._self__compound_statement + # self.type_map[LinkType.SELF.value, LinkType.COMPONENT_CONDITION.value] = self._self__component_condition + # self.type_map[LinkType.SELF.value, LinkType.COMPOUND_CONDITION.value] = self._self__compound_condition + # self.type_map[LinkType.COMPOUND.value, LinkType.COMPOUND.value] = self._compound__compound + # self.type_map[LinkType.COMPOUND.value, LinkType.COMPOUND_STATEMENT.value] = self._compound__compound_statement + # self.type_map[LinkType.COMPOUND.value, LinkType.COMPOUND_CONDITION.value] = self._compound__compound_condition + # self.type_map[LinkType.COMPOUND_STATEMENT.value, LinkType.COMPONENT.value] = self._compound_statement__component + # self.type_map[LinkType.COMPOUND_STATEMENT.value, LinkType.COMPOUND.value] = self._compound_statement__compound + self.type_map[LinkType.COMPOUND_STATEMENT.value, LinkType.COMPOUND_STATEMENT.value] = self._compound_statement__compound_statement + # self.type_map[LinkType.COMPOUND_STATEMENT.value, LinkType.COMPOUND_CONDITION.value] = self._compound_statement__compound_condition + # self.type_map[LinkType.COMPOUND_CONDITION.value, LinkType.COMPOUND.value] = self._compound_condition__compound + # self.type_map[LinkType.COMPOUND_CONDITION.value, LinkType.COMPOUND_STATEMENT.value] = self._compound_condition__compound_statement + + pass + + + + + # def __call__(self, task: Task, belief: Belief) -> Type['RuleMap']: + # pass + + def _self__compound(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + ''' + task: C + belief: (&&, A, C) + ''' + return [] + + def _self__component(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + '''''' + return [] + + + def _self__component_statement(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + '''''' + return [] + + + def _self__compound_statement(self, + task: Task, + term_belief: Statement | Term, + truth_belief: Truth | None, + task_link: TaskLink, + term_link: TermLink + ): + '''''' + rules = [] + + term_task: Statement = task.term + truth_task = task.sentence.truth + copula_task = term_task.copula + copula_belief = term_belief.copula + connector_task_subject = term_task.subject.copula if term_task.type == TermType.STATEMENT and term_task.subject.type == TermType.COMPOUND else None + connector_task_predicate = term_task.predicate.copula if term_task.type == TermType.STATEMENT and term_task.predicate.type == TermType.COMPOUND else None + connector_beleif_subject = term_belief.subject.copula if term_belief.type == TermType.STATEMENT and term_belief.subject.type == TermType.COMPOUND else None + connector_beleif_predicate = term_belief.predicate.copula if term_belief.type == TermType.STATEMENT and term_belief.predicate.type == TermType.COMPOUND else None + + has_common_id, common_id_task, common_id_belief, match_reverse = _common(term_task, term_belief) + if not has_common_id: return rules + match (copula_task, copula_belief): + case (Copula.Implication, None): + # {B>. A.} |- B. + match (common_id_task, common_id_belief): + case (0, 1): + pass + case (1, 0): + pass + case (0, 0): + pass + case (1, 1): + pass + case _: + raise "Error: No matched case!" + return rules + + + def _self__component_condition(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + '''''' + return [] + + + def _self__compound_condition(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + '''''' + return [] + + + def _compound__compound(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + '''''' + return [] + + + def _compound__compound_statement(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + '''''' + return [] + + + def _compound__compound_condition(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + '''''' + return [] + + def _compound_statement__component(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + '''''' + return [] + + def _compound_statement__compound(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + '''''' + return [] + + def _compound_statement__compound_statement(self, + task: Task, + term_belief: Statement | Term, + truth_belief: Truth | None, + task_link: TaskLink, + term_link: TermLink + ): + rules = [] + + term_task: Statement = task.term + truth_task = task.sentence.truth + copula_task = term_task.copula + copula_belief = term_belief.copula + + if term_task == term_belief: return rules + has_common_id, common_id_task, common_id_belief, match_reverse = _common(term_task, term_belief) + if not has_common_id: return rules + + match (copula_task, copula_belief): + case (Copula.Inheritance, Copula.Inheritance): # OK + if match_reverse: + rules.append(Interface_SyllogisticRules._syllogistic__reversion) + else: + match (common_id_task, common_id_belief): + case (0, 1): + rules.append(Interface_SyllogisticRules._syllogistic__deduction__0_1) + rules.append(Interface_SyllogisticRules._syllogistic__exemplification__0_1) + case (1, 0): + rules.append(Interface_SyllogisticRules._syllogistic__deduction__1_0) + rules.append(Interface_SyllogisticRules._syllogistic__exemplification__1_0) + case (0, 0): + rules.append(Interface_SyllogisticRules._syllogistic__induction__0_0) + rules.append(Interface_SyllogisticRules._syllogistic__induction__0_0_prime) + rules.append(Interface_SyllogisticRules._syllogistic__comparison__0_0) + case (1, 1): + rules.append(Interface_SyllogisticRules._syllogistic__abduction__1_1) + rules.append(Interface_SyllogisticRules._syllogistic__abduction__1_1_prime) + rules.append(Interface_SyllogisticRules._syllogistic__comparison__1_1) + case _: + raise "Error: No matched case!" + case (Copula.Inheritance, Copula.Similarity): + if not match_reverse: + match (common_id_task, common_id_belief): + case (0, 1): + rules.append(Interface_SyllogisticRules._syllogistic__analogy__0_1) + case (1, 0): + rules.append(Interface_SyllogisticRules._syllogistic__analogy__1_0) + case (0, 0): + rules.append(Interface_SyllogisticRules._syllogistic__analogy__0_0) + case (1, 1): + rules.append(Interface_SyllogisticRules._syllogistic__analogy__1_1) + case _: + raise "Error: No matched case!" + case (Copula.Similarity, Copula.Inheritance): + if not match_reverse: + match (common_id_task, common_id_belief): + case (0, 1): + rules.append(Interface_SyllogisticRules._syllogistic__analogy__0_1) + case (1, 0): + rules.append(Interface_SyllogisticRules._syllogistic__analogy__1_0) + case (0, 0): + rules.append(Interface_SyllogisticRules._syllogistic__analogy__0_0) + case (1, 1): + rules.append(Interface_SyllogisticRules._syllogistic__analogy__1_1) + case _: + raise "Error: No matched case!" + case (Copula.Similarity, Copula.Similarity): + if not match_reverse: + match (common_id_task, common_id_belief): + case (0, 1): + rules.append(Interface_SyllogisticRules._syllogistic__resemblance__0_1) + case (1, 0): + rules.append(Interface_SyllogisticRules._syllogistic__resemblance__1_0) + case (0, 0): + rules.append(Interface_SyllogisticRules._syllogistic__resemblance__0_0) + case (1, 1): + rules.append(Interface_SyllogisticRules._syllogistic__resemblance__1_1) + case _: + raise "Error: No matched case!" + case (Copula.Implication, Copula.Similarity): + if not match_reverse: + match (common_id_task, common_id_belief): + case (0, 1): + pass + case (1, 0): + pass + case (0, 0): + pass + case (1, 1): + pass + case _: + raise "Error: No matched case!" + case (Copula.Similarity, Copula.Implication): + if not match_reverse: + match (common_id_task, common_id_belief): + case (0, 1): + pass + case (1, 0): + pass + case (0, 0): + pass + case (1, 1): + pass + case _: + raise "Error: No matched case!" + case (Copula.Equivalence, Copula.Similarity): + if not match_reverse: + match (common_id_task, common_id_belief): + case (0, 1): + pass + case (1, 0): + pass + case (0, 0): + pass + case (1, 1): + pass + case _: + raise "Error: No matched case!" + case (Copula.Similarity, Copula.Equivalence): + if not match_reverse: + match (common_id_task, common_id_belief): + case (0, 1): + pass + case (1, 0): + pass + case (0, 0): + pass + case (1, 1): + pass + case _: + raise "Error: No matched case!" + case (Copula.Implication, Copula.Implication): # OK + if match_reverse: + rules.append(Interface_SyllogisticRules._syllogistic__reversion) + else: + match (common_id_task, common_id_belief): + case (0, 1): + rules.append(Interface_SyllogisticRules._syllogistic__deduction__0_1) + rules.append(Interface_SyllogisticRules._syllogistic__exemplification__0_1) + case (1, 0): + rules.append(Interface_SyllogisticRules._syllogistic__deduction__1_0) + rules.append(Interface_SyllogisticRules._syllogistic__exemplification__1_0) + case (0, 0): + rules.append(Interface_SyllogisticRules._syllogistic__induction__0_0) + rules.append(Interface_SyllogisticRules._syllogistic__induction__0_0_prime) + rules.append(Interface_SyllogisticRules._syllogistic__comparison__0_0) + case (1, 1): + rules.append(Interface_SyllogisticRules._syllogistic__abduction__1_1) + rules.append(Interface_SyllogisticRules._syllogistic__abduction__1_1_prime) + rules.append(Interface_SyllogisticRules._syllogistic__comparison__1_1) + case _: + raise "Error: No matched case!" + case (Copula.Implication, Copula.Equivalence): + if not match_reverse: + match (common_id_task, common_id_belief): + case (0, 1): + pass + case (1, 0): + pass + case (0, 0): + pass + case (1, 1): + pass + case _: + raise "Error: No matched case!" + case (Copula.Equivalence, Copula.Implication): + if not match_reverse: + match (common_id_task, common_id_belief): + case (0, 1): + pass + case (1, 0): + pass + case (0, 0): + pass + case (1, 1): + pass + case _: + raise "Error: No matched case!" + case (Copula.Equivalence, Copula.Equivalence): + if not match_reverse: + match (common_id_task, common_id_belief): + case (0, 1): + pass + case (1, 0): + pass + case (0, 0): + pass + case (1, 1): + pass + case _: + raise "Error: No matched case!" + case _: + raise "Error: No matched case!" + + + return rules + + def _compound_statement__compound_condition(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + '''''' + return [] + + def _compound_condition__compound(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + '''''' + return [] + + def _compound_condition__compound_statement(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + '''''' + return [] + + def verify(self, task_link: TaskLink, term_link: TermLink, *args): + return self.type_map[task_link.type.value, term_link.type.value] is not None + + def match(self, task: Task, belief: Belief, task_link: TaskLink, term_link: TermLink): + ''' + Given a task and a belief, find the matched rules for one step inference. + ''' + rule_map: RuleMapCallable = self.type_map[task_link.type.value, term_link.type.value] + rules = rule_map(task, belief.term, belief.truth, task_link, term_link) + return rules + + + def __repr__(self) -> str: + '''print self.type_map''' + pass + + \ No newline at end of file diff --git a/NARS/__init__.py b/NARS/__init__.py new file mode 100644 index 0000000..91e36af --- /dev/null +++ b/NARS/__init__.py @@ -0,0 +1 @@ +from .Control import Reasoner_3_0_4 \ No newline at end of file diff --git a/Narsese/Parser/__init__.py b/Narsese/Parser/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Narsese/Parser/_test.py b/Narsese/Parser/_test.py new file mode 100644 index 0000000..49fad9d --- /dev/null +++ b/Narsese/Parser/_test.py @@ -0,0 +1,47 @@ +from .parser import parser +import re +re_parser = re.compile(r'[^<^>^=^"^&^|^!^.^?^@^~^%^;^\,^:^\/^\\^*^#^$^\[^\]^\{^\}^\(^\)^\ ]+') +re_parser.findall('lock>.') +# result = parser.parse('(&&, <#x --> lock>, <"a"-->"b">).') +result = parser.parse('lock>.') +print(result.pretty()) + +result = parser.parse('<"鸟" --> "动-物">. %0.9; 0.9%') +print(result.pretty()) +result = parser.parse(' swan>. %0.9; 0.9%') +print(result.pretty()) +result = parser.parse(' (&,bird,swimmer)>. %0.9; 0.9%') +print(result.pretty()) +result = parser.parse(' {Mars,Pluto,Venus}>. %0.9; 0.9%') +print(result.pretty()) +result = parser.parse('<(&,bird,swimmer) --> (&,animal,swimmer)>?') +print(result.pretty()) +result = parser.parse('<(~, boy, girl) --> [strong]>.') +print(result.pretty()) +result = parser.parse(' (/,reaction,_,base)>.') +print(result.pretty()) +result = parser.parse('<(*,acid,base) --> reaction>.') +print(result.pretty()) +result = parser.parse('<<$y --> bird> ==> <$y --> flyer>>.') +print(result.pretty()) +result = parser.parse('<(&&,<$1 --> [chirping]>,<$1 --> [with_wings]>) ==> <$1 --> bird>>.') +print(result.pretty()) +result = parser.parse('<<#x --> lock> ==> <<$y --> key> ==> <#x --> (/,open,$y,_)>>>.') +result = parser.parse('(&&,<#x --> lock>,<<$y --> key> ==> <#x --> (/,open,$y,_)>>).') +result = parser.parse('(&&, lock>,<<$y --> key> ==> <#x --> (/,open,$y,_)>>)?') +print(result.pretty()) +result = parser.parse('<<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>.') +print(result.pretty()) +result = parser.parse('<(*,John,room_101) --> enter>. :\: %1.00;0.90%') +print(result.pretty()) +result = parser.parse('<<(*,John,door_101) --> open><(*,John,room_101) --> enter>>.') +print(result.pretty()) +result = parser.parse('(&/,<(*,SELF,{t002}) --> hold>,<(*,SELF,{t001}) --> at>,(^open,{t001}))!') +print(result.pretty()) +result = parser.parse(' (/,at,_,{t003})>. :\:') +print(result.pretty()) +result = parser.parse('(&/,<(*,SELF,{t002}) --> reachable>,(^pick,{t002}))!') +print(result.pretty()) +result = parser.parse('(^doubt,{SELF}, b>)!') +print(result.pretty()) +print('done.') \ No newline at end of file diff --git a/Narsese/Parser/_test_earley.py b/Narsese/Parser/_test_earley.py new file mode 100644 index 0000000..a72eb94 --- /dev/null +++ b/Narsese/Parser/_test_earley.py @@ -0,0 +1,15 @@ +from lark import Lark +from Narsese.Parser.parser import TreeToNarsese +from pathlib import Path + +filepath = Path(r'Narsese\Parser\narsese.lark') +with open(filepath, 'r') as f: + gramma = ''.join(f.readlines()) +lark = Lark(grammar=gramma, parser='lalr') +content = lark.parse(r'$0.90;0.90;0.9$ bird>.') + +lark = Lark(grammar=gramma, parser='earley') +content = lark.parse(r'$0.90;0.90;0.9$ bird>.') + + +print('done.') \ No newline at end of file diff --git a/Narsese/Parser/narsese.lark b/Narsese/Parser/narsese.lark new file mode 100644 index 0000000..7232e88 --- /dev/null +++ b/Narsese/Parser/narsese.lark @@ -0,0 +1,139 @@ +?start: task + +task : [budget] sentence // (* task to be processed *) +?sentence.0 : (term_nonvar|statement) "." [tense] [truth] -> judgement // (* judgement to be absorbed into beliefs *) + | (term_nonvar|statement) "?" [tense] -> question // (* question on truth-value to be answered *) + | (term_nonvar|statement) "!" [tense] [desire] -> goal // (* goal to be realized by operations *) + | (term_nonvar|statement) "@" [tense] -> quest // (* question on desire-value to be answered *) + +?statement.0 : "<" term copula term ">" // (* two terms related to each other *) + | "(" term copula term ")" // (* two terms related to each other, new notation *) + // | term // (* a term can name a statement *) + | "(" op ("," term)* ")" -> statement_operation1 // (* an operation to be executed *) + | word "(" term ("," term)* ")" -> statement_operation2 // (* an operation to be executed, new notation *) +?copula : "-->" -> inheritance // (* inheritance *) + | "<->" -> similarity // (* similarity *) + | "{--" -> instance // (* instance *) + | "--]" -> property // (* property *) + | "{-]" -> instance_property // (* instance-property *) + | "==>" -> implication // (* implication *) + | "=/>" -> predictive_implication // (* predictive implication *) + | "=|>" -> concurrent_implication // (* concurrent implication *) + | "=\>" -> retrospective_implication // (* retrospective implication *) + | "<=>" -> equivalence // (* equivalence *) + | "" -> predictive_equivalence // (* predictive equivalence *) + | "<|>" -> concurrent_equivalence // (* concurrent equivalence *) + +?term : variable -> variable_term // (* an atomic variable term *) + | term_nonvar + +?term_nonvar: interval + | word -> atom_term // (* an atomic constant term *) + | compound_term -> compound_term // (* a term with internal structure *) + | statement -> statement_term // (* a statement can serve as a term *) + | op + + + +op : "^" word +interval: "+" NUMBER + +?compound_term : set + | multi // (* with prefix or with infix operator *) + | single // (* with prefix or with infix operator *) + | ext_image // (* special case, extensional image *) + | int_image // (* special case, \ intensional image *) + | negation // (* negation *) + +?set : int_set + | ext_set + // | list_set +?int_set : con_int_set term ("," term)* "]" -> set // (* intensional set *) +?ext_set : con_ext_set term ("," term)* "}" -> set // (* extensional set *) +// list_set: "(" "#" "," term ("," term)+ ")" + +negation : con_negation term // (* negation *) + | "(" con_negation "," term ")" // (* negation, new notation *) +int_image : "(" con_int_image "," term ("," term)* ")" // (* intensional image *) +ext_image : "(" con_ext_image "," term ("," term)* ")" // (* extensional image *) +?multi : "(" con_multi "," term ("," term)+ ")" -> multi_prefix // (* with prefix operator *) + | "(" multi_infix_expr ")" // (* with infix operator *) + | "(" term ("," term)+ ")" -> multi_prefix_product// (* product, new notation *) + | "(" con_product "," term ("," term)* ")" -> multi_prefix // (* with prefix operator *) + +?single : "(" con_single "," (term|multi_infix_expr) "," (term|multi_infix_expr) ")" -> single_prefix // (* with prefix operator *) + | "(" (term|multi_infix_expr) con_single (term|multi_infix_expr) ")" -> single_infix // (* with infix operator *) + +?multi_infix_expr : multi_extint_expr + | multi_intint_expr + | multi_parallel_expr + | multi_sequential_expr + | multi_conj_expr + | multi_disj_expr + | multi_prod_expr + +// precedence: +// "&" > "|" > "&|" > "&/" > "&&" > "||" > "*" +?multi_prod_expr : term6 ("*" term6)+ +?term6 : (term5|multi_disj_expr) +?multi_disj_expr: term5 ("||" term5)+ +?term5 : (term4|multi_conj_expr) +?multi_conj_expr: term4 ("&&" term4)+ +?term4 : (term3|multi_sequential_expr) +?multi_sequential_expr: term3 ("&/" term3)+ +?term3 : (term2|multi_parallel_expr) +?multi_parallel_expr: term2 ("&|" term2)+ +?term2 : (term1|multi_intint_expr) +?multi_intint_expr : term1 ("|" term1)+ +?term1 : (term|multi_extint_expr) +?multi_extint_expr : term ("&" term)+ + + + +?con_multi : "&&" -> con_conjunction // (* conjunction *) + | "||" -> con_disjunction // (* disjunction *) + | "&|" -> con_parallel_events // (* parallel events *) + | "&/" -> con_sequential_events // (* sequential events *) + | "|" -> con_intensional_intersection // (* intensional intersection *) + | "&" -> con_extensional_intersection // (* extensional intersection *) +con_product: "*" // (* product *) + + +?con_single : "-" -> con_extensional_difference // (* extensional difference *) + | "~" -> con_intensional_difference // (* intensional difference *) +?con_int_set: "[" // (* intensional set *) +?con_ext_set: "{" // (* extensional set *) + +?con_negation : "--" // (* negation *) + +?con_int_image : "\\" // (* intensional image *) +?con_ext_image : "/" // (* extensional image *) + +?variable.0 : "$" word -> independent_var // (* independent variable *) + | "#" word -> dependent_var // (* dependent variable *) + | "?" word -> query_var // (* query variable in question *) + +?tense : ":!" NUMBER ":" -> tense_time + | ":/:" -> tense_future // (* future event *) + | ":|:" -> tense_present // (* present event *) + | ":\:" -> tense_past // (* past event *) + +?desire : truth // (* same format, different interpretations *) +truth : "%" frequency [";" confidence [";" k_evidence]] "%" // (* two numbers in [0,1]x(0,1) *) +budget.2: "$" priority [";" durability [";" quality]] "$" // (* three numbers in [0,1]x(0,1)x[0,1] *) + +?word : string_raw | string // /[^\ ]+/ //(* unicode string *) +?priority : /([0]?\.[0-9]+|1\.[0]*|1|0)/ //(* 0 <= x <= 1 *) +?durability : /[0]?\.[0]*[1-9]{1}[0-9]*/ // (* 0 < x < 1 *) +?quality : /([0]?\.[0-9]+|1\.[0]*|1|0)/ // (* 0 <= x <= 1 *) +?frequency : /([0]?\.[0-9]+|1\.[0]*|1|0)/ // (* 0 <= x <= 1 *) +?confidence : /[0]?\.[0]*[1-9]{1}[0-9]*/ // (* 0 < x < 1 *) +?k_evidence: /[1-9]{1}[0-9]*/ // (* x > 0 *) + +?string: /"[^"]+"/ +?string_raw: /[^\-^\+^<^>^=^"^&^|^!^.^?^@^~^%^;^\,^:^\/^\\^*^#^$^\[^\]^\{^\}^\(^\)^\ ]+/ + +%import common.WS +%import common.SIGNED_INT -> NUMBER +// %import common.INT -> NATURAL_NUMBER +%ignore WS \ No newline at end of file diff --git a/Narsese/Parser/narsese_lark.py b/Narsese/Parser/narsese_lark.py new file mode 100644 index 0000000..e592f09 --- /dev/null +++ b/Narsese/Parser/narsese_lark.py @@ -0,0 +1,2905 @@ +# The file was automatically generated by Lark v0.12.0 +__version__ = "0.12.0" + +# +# +# Lark Stand-alone Generator Tool +# ---------------------------------- +# Generates a stand-alone LALR(1) parser with a standard lexer +# +# Git: https://github.com/erezsh/lark +# Author: Erez Shinan (erezshin@gmail.com) +# +# +# >>> LICENSE +# +# This tool and its generated code use a separate license from Lark, +# and are subject to the terms of the Mozilla Public License, v. 2.0. +# If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +# +# If you wish to purchase a commercial license for this tool and its +# generated code, you may contact me via email or otherwise. +# +# If MPL2 is incompatible with your free or open-source project, +# contact me and we'll work it out. +# +# + +from io import open + + + +class LarkError(Exception): + pass + + +class ConfigurationError(LarkError, ValueError): + pass + + +def assert_config(value, options, msg='Got %r, expected one of %s'): + if value not in options: + raise ConfigurationError(msg % (value, options)) + + +class GrammarError(LarkError): + pass + + +class ParseError(LarkError): + pass + + +class LexError(LarkError): + pass + + +class UnexpectedInput(LarkError): + #-- + pos_in_stream = None + _terminals_by_name = None + + def get_context(self, text, span=40): + #-- + assert self.pos_in_stream is not None, self + pos = self.pos_in_stream + start = max(pos - span, 0) + end = pos + span + if not isinstance(text, bytes): + before = text[start:pos].rsplit('\n', 1)[-1] + after = text[pos:end].split('\n', 1)[0] + return before + after + '\n' + ' ' * len(before.expandtabs()) + '^\n' + else: + before = text[start:pos].rsplit(b'\n', 1)[-1] + after = text[pos:end].split(b'\n', 1)[0] + return (before + after + b'\n' + b' ' * len(before.expandtabs()) + b'^\n').decode("ascii", "backslashreplace") + + def match_examples(self, parse_fn, examples, token_type_match_fallback=False, use_accepts=False): + #-- + assert self.state is not None, "Not supported for this exception" + + if isinstance(examples, dict): + examples = examples.items() + + candidate = (None, False) + for i, (label, example) in enumerate(examples): + assert not isinstance(example, STRING_TYPE) + + for j, malformed in enumerate(example): + try: + parse_fn(malformed) + except UnexpectedInput as ut: + if ut.state == self.state: + if use_accepts and hasattr(self, 'accepts') and ut.accepts != self.accepts: + logger.debug("Different accepts with same state[%d]: %s != %s at example [%s][%s]" % + (self.state, self.accepts, ut.accepts, i, j)) + continue + try: + if ut.token == self.token: ## + + logger.debug("Exact Match at example [%s][%s]" % (i, j)) + return label + + if token_type_match_fallback: + ## + + if (ut.token.type == self.token.type) and not candidate[-1]: + logger.debug("Token Type Fallback at example [%s][%s]" % (i, j)) + candidate = label, True + + except AttributeError: + pass + if candidate[0] is None: + logger.debug("Same State match at example [%s][%s]" % (i, j)) + candidate = label, False + + return candidate[0] + + def _format_expected(self, expected): + if self._terminals_by_name: + d = self._terminals_by_name + expected = [d[t_name].user_repr() if t_name in d else t_name for t_name in expected] + return "Expected one of: \n\t* %s\n" % '\n\t* '.join(expected) + + +class UnexpectedEOF(ParseError, UnexpectedInput): + #-- + + def __init__(self, expected, state=None, terminals_by_name=None): + super(UnexpectedEOF, self).__init__() + + self.expected = expected + self.state = state + from .lexer import Token + self.token = Token("", "") ## + + self.pos_in_stream = -1 + self.line = -1 + self.column = -1 + self._terminals_by_name = terminals_by_name + + + def __str__(self): + message = "Unexpected end-of-input. " + message += self._format_expected(self.expected) + return message + + +class UnexpectedCharacters(LexError, UnexpectedInput): + #-- + + def __init__(self, seq, lex_pos, line, column, allowed=None, considered_tokens=None, state=None, token_history=None, + terminals_by_name=None, considered_rules=None): + super(UnexpectedCharacters, self).__init__() + + ## + + self.line = line + self.column = column + self.pos_in_stream = lex_pos + self.state = state + self._terminals_by_name = terminals_by_name + + self.allowed = allowed + self.considered_tokens = considered_tokens + self.considered_rules = considered_rules + self.token_history = token_history + + if isinstance(seq, bytes): + self.char = seq[lex_pos:lex_pos + 1].decode("ascii", "backslashreplace") + else: + self.char = seq[lex_pos] + self._context = self.get_context(seq) + + + def __str__(self): + message = "No terminal matches '%s' in the current parser context, at line %d col %d" % (self.char, self.line, self.column) + message += '\n\n' + self._context + if self.allowed: + message += self._format_expected(self.allowed) + if self.token_history: + message += '\nPrevious tokens: %s\n' % ', '.join(repr(t) for t in self.token_history) + return message + + +class UnexpectedToken(ParseError, UnexpectedInput): + #-- + + def __init__(self, token, expected, considered_rules=None, state=None, interactive_parser=None, terminals_by_name=None, token_history=None): + super(UnexpectedToken, self).__init__() + + ## + + self.line = getattr(token, 'line', '?') + self.column = getattr(token, 'column', '?') + self.pos_in_stream = getattr(token, 'start_pos', None) + self.state = state + + self.token = token + self.expected = expected ## + + self._accepts = NO_VALUE + self.considered_rules = considered_rules + self.interactive_parser = interactive_parser + self._terminals_by_name = terminals_by_name + self.token_history = token_history + + + @property + def accepts(self): + if self._accepts is NO_VALUE: + self._accepts = self.interactive_parser and self.interactive_parser.accepts() + return self._accepts + + def __str__(self): + message = ("Unexpected token %r at line %s, column %s.\n%s" + % (self.token, self.line, self.column, self._format_expected(self.accepts or self.expected))) + if self.token_history: + message += "Previous tokens: %r\n" % self.token_history + + return message + + @property + def puppet(self): + warn("UnexpectedToken.puppet attribute has been renamed to interactive_parser", DeprecationWarning) + return self.interactive_parser + + + +class VisitError(LarkError): + #-- + + def __init__(self, rule, obj, orig_exc): + message = 'Error trying to process rule "%s":\n\n%s' % (rule, orig_exc) + super(VisitError, self).__init__(message) + + self.rule = rule + self.obj = obj + self.orig_exc = orig_exc + + +import sys, re +import logging +from io import open +logger = logging.getLogger("lark") +logger.addHandler(logging.StreamHandler()) +## + +## + +logger.setLevel(logging.CRITICAL) + +if sys.version_info[0]>2: + from abc import ABC, abstractmethod +else: + from abc import ABCMeta, abstractmethod + class ABC(object): ## + + __slots__ = () + __metclass__ = ABCMeta + + +Py36 = (sys.version_info[:2] >= (3, 6)) + +NO_VALUE = object() + + +def classify(seq, key=None, value=None): + d = {} + for item in seq: + k = key(item) if (key is not None) else item + v = value(item) if (value is not None) else item + if k in d: + d[k].append(v) + else: + d[k] = [v] + return d + + +def _deserialize(data, namespace, memo): + if isinstance(data, dict): + if '__type__' in data: ## + + class_ = namespace[data['__type__']] + return class_.deserialize(data, memo) + elif '@' in data: + return memo[data['@']] + return {key:_deserialize(value, namespace, memo) for key, value in data.items()} + elif isinstance(data, list): + return [_deserialize(value, namespace, memo) for value in data] + return data + + +class Serialize(object): + #-- + + def memo_serialize(self, types_to_memoize): + memo = SerializeMemoizer(types_to_memoize) + return self.serialize(memo), memo.serialize() + + def serialize(self, memo=None): + if memo and memo.in_types(self): + return {'@': memo.memoized.get(self)} + + fields = getattr(self, '__serialize_fields__') + res = {f: _serialize(getattr(self, f), memo) for f in fields} + res['__type__'] = type(self).__name__ + if hasattr(self, '_serialize'): + self._serialize(res, memo) + return res + + @classmethod + def deserialize(cls, data, memo): + namespace = getattr(cls, '__serialize_namespace__', []) + namespace = {c.__name__:c for c in namespace} + + fields = getattr(cls, '__serialize_fields__') + + if '@' in data: + return memo[data['@']] + + inst = cls.__new__(cls) + for f in fields: + try: + setattr(inst, f, _deserialize(data[f], namespace, memo)) + except KeyError as e: + raise KeyError("Cannot find key for class", cls, e) + + if hasattr(inst, '_deserialize'): + inst._deserialize() + + return inst + + +class SerializeMemoizer(Serialize): + #-- + + __serialize_fields__ = 'memoized', + + def __init__(self, types_to_memoize): + self.types_to_memoize = tuple(types_to_memoize) + self.memoized = Enumerator() + + def in_types(self, value): + return isinstance(value, self.types_to_memoize) + + def serialize(self): + return _serialize(self.memoized.reversed(), None) + + @classmethod + def deserialize(cls, data, namespace, memo): + return _deserialize(data, namespace, memo) + + +try: + STRING_TYPE = basestring +except NameError: ## + + STRING_TYPE = str + + +import types +from functools import wraps, partial +from contextlib import contextmanager + +Str = type(u'') +try: + classtype = types.ClassType ## + +except AttributeError: + classtype = type ## + + + +def smart_decorator(f, create_decorator): + if isinstance(f, types.FunctionType): + return wraps(f)(create_decorator(f, True)) + + elif isinstance(f, (classtype, type, types.BuiltinFunctionType)): + return wraps(f)(create_decorator(f, False)) + + elif isinstance(f, types.MethodType): + return wraps(f)(create_decorator(f.__func__, True)) + + elif isinstance(f, partial): + ## + + return wraps(f.func)(create_decorator(lambda *args, **kw: f(*args[1:], **kw), True)) + + else: + return create_decorator(f.__func__.__call__, True) + + +try: + import regex +except ImportError: + regex = None + +import sre_parse +import sre_constants +categ_pattern = re.compile(r'\\p{[A-Za-z_]+}') + +def get_regexp_width(expr): + if regex: + ## + + ## + + ## + + regexp_final = re.sub(categ_pattern, 'A', expr) + else: + if re.search(categ_pattern, expr): + raise ImportError('`regex` module must be installed in order to use Unicode categories.', expr) + regexp_final = expr + try: + return [int(x) for x in sre_parse.parse(regexp_final).getwidth()] + except sre_constants.error: + if not regex: + raise ValueError(expr) + else: + ## + + ## + + c = regex.compile(regexp_final) + if c.match('') is None: + return 1, sre_constants.MAXREPEAT + else: + return 0, sre_constants.MAXREPEAT + + +from collections import OrderedDict + + +class Meta: + def __init__(self): + self.empty = True + + +class Tree(object): + #-- + def __init__(self, data, children, meta=None): + self.data = data + self.children = children + self._meta = meta + + @property + def meta(self): + if self._meta is None: + self._meta = Meta() + return self._meta + + def __repr__(self): + return 'Tree(%r, %r)' % (self.data, self.children) + + def _pretty_label(self): + return self.data + + def _pretty(self, level, indent_str): + if len(self.children) == 1 and not isinstance(self.children[0], Tree): + return [indent_str*level, self._pretty_label(), '\t', '%s' % (self.children[0],), '\n'] + + l = [indent_str*level, self._pretty_label(), '\n'] + for n in self.children: + if isinstance(n, Tree): + l += n._pretty(level+1, indent_str) + else: + l += [indent_str*(level+1), '%s' % (n,), '\n'] + + return l + + def pretty(self, indent_str=' '): + #-- + return ''.join(self._pretty(0, indent_str)) + + def __eq__(self, other): + try: + return self.data == other.data and self.children == other.children + except AttributeError: + return False + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash((self.data, tuple(self.children))) + + def iter_subtrees(self): + #-- + queue = [self] + subtrees = OrderedDict() + for subtree in queue: + subtrees[id(subtree)] = subtree + queue += [c for c in reversed(subtree.children) + if isinstance(c, Tree) and id(c) not in subtrees] + + del queue + return reversed(list(subtrees.values())) + + def find_pred(self, pred): + #-- + return filter(pred, self.iter_subtrees()) + + def find_data(self, data): + #-- + return self.find_pred(lambda t: t.data == data) + + +from inspect import getmembers, getmro + + +class Discard(Exception): + #-- + pass + +## + + + +class _Decoratable: + #-- + + @classmethod + def _apply_decorator(cls, decorator, **kwargs): + mro = getmro(cls) + assert mro[0] is cls + libmembers = {name for _cls in mro[1:] for name, _ in getmembers(_cls)} + for name, value in getmembers(cls): + + ## + + if name.startswith('_') or (name in libmembers and name not in cls.__dict__): + continue + if not callable(value): + continue + + ## + + if hasattr(cls.__dict__[name], 'vargs_applied') or hasattr(value, 'vargs_applied'): + continue + + static = isinstance(cls.__dict__[name], (staticmethod, classmethod)) + setattr(cls, name, decorator(value, static=static, **kwargs)) + return cls + + def __class_getitem__(cls, _): + return cls + + +class Transformer(_Decoratable): + #-- + __visit_tokens__ = True ## + + + def __init__(self, visit_tokens=True): + self.__visit_tokens__ = visit_tokens + + def _call_userfunc(self, tree, new_children=None): + ## + + children = new_children if new_children is not None else tree.children + try: + f = getattr(self, tree.data) + except AttributeError: + return self.__default__(tree.data, children, tree.meta) + else: + try: + wrapper = getattr(f, 'visit_wrapper', None) + if wrapper is not None: + return f.visit_wrapper(f, tree.data, children, tree.meta) + else: + return f(children) + except (GrammarError, Discard): + raise + except Exception as e: + raise VisitError(tree.data, tree, e) + + def _call_userfunc_token(self, token): + try: + f = getattr(self, token.type) + except AttributeError: + return self.__default_token__(token) + else: + try: + return f(token) + except (GrammarError, Discard): + raise + except Exception as e: + raise VisitError(token.type, token, e) + + def _transform_children(self, children): + for c in children: + try: + if isinstance(c, Tree): + yield self._transform_tree(c) + elif self.__visit_tokens__ and isinstance(c, Token): + yield self._call_userfunc_token(c) + else: + yield c + except Discard: + pass + + def _transform_tree(self, tree): + children = list(self._transform_children(tree.children)) + return self._call_userfunc(tree, children) + + def transform(self, tree): + #-- + return self._transform_tree(tree) + + def __mul__(self, other): + #-- + return TransformerChain(self, other) + + def __default__(self, data, children, meta): + #-- + return Tree(data, children, meta) + + def __default_token__(self, token): + #-- + return token + + +def merge_transformers(base_transformer=None, **transformers_to_merge): + #-- + if base_transformer is None: + base_transformer = Transformer() + for prefix, transformer in transformers_to_merge.items(): + for method_name in dir(transformer): + method = getattr(transformer, method_name) + if not callable(method): + continue + if method_name.startswith("_") or method_name == "transform": + continue + prefixed_method = prefix + "__" + method_name + if hasattr(base_transformer, prefixed_method): + raise AttributeError("Cannot merge: method '%s' appears more than once" % prefixed_method) + + setattr(base_transformer, prefixed_method, method) + + return base_transformer + + +class InlineTransformer(Transformer): ## + + def _call_userfunc(self, tree, new_children=None): + ## + + children = new_children if new_children is not None else tree.children + try: + f = getattr(self, tree.data) + except AttributeError: + return self.__default__(tree.data, children, tree.meta) + else: + return f(*children) + + +class TransformerChain(object): + def __init__(self, *transformers): + self.transformers = transformers + + def transform(self, tree): + for t in self.transformers: + tree = t.transform(tree) + return tree + + def __mul__(self, other): + return TransformerChain(*self.transformers + (other,)) + + +class Transformer_InPlace(Transformer): + #-- + def _transform_tree(self, tree): ## + + return self._call_userfunc(tree) + + def transform(self, tree): + for subtree in tree.iter_subtrees(): + subtree.children = list(self._transform_children(subtree.children)) + + return self._transform_tree(tree) + + +class Transformer_NonRecursive(Transformer): + #-- + + def transform(self, tree): + ## + + rev_postfix = [] + q = [tree] + while q: + t = q.pop() + rev_postfix.append(t) + if isinstance(t, Tree): + q += t.children + + ## + + stack = [] + for x in reversed(rev_postfix): + if isinstance(x, Tree): + size = len(x.children) + if size: + args = stack[-size:] + del stack[-size:] + else: + args = [] + stack.append(self._call_userfunc(x, args)) + elif self.__visit_tokens__ and isinstance(x, Token): + stack.append(self._call_userfunc_token(x)) + else: + stack.append(x) + + t ,= stack ## + + return t + + +class Transformer_InPlaceRecursive(Transformer): + #-- + def _transform_tree(self, tree): + tree.children = list(self._transform_children(tree.children)) + return self._call_userfunc(tree) + + +## + + +class VisitorBase: + def _call_userfunc(self, tree): + return getattr(self, tree.data, self.__default__)(tree) + + def __default__(self, tree): + #-- + return tree + + def __class_getitem__(cls, _): + return cls + + +class Visitor(VisitorBase): + #-- + + def visit(self, tree): + #-- + for subtree in tree.iter_subtrees(): + self._call_userfunc(subtree) + return tree + + def visit_topdown(self,tree): + #-- + for subtree in tree.iter_subtrees_topdown(): + self._call_userfunc(subtree) + return tree + + +class Visitor_Recursive(VisitorBase): + #-- + + def visit(self, tree): + #-- + for child in tree.children: + if isinstance(child, Tree): + self.visit(child) + + self._call_userfunc(tree) + return tree + + def visit_topdown(self,tree): + #-- + self._call_userfunc(tree) + + for child in tree.children: + if isinstance(child, Tree): + self.visit_topdown(child) + + return tree + + +def visit_children_decor(func): + #-- + @wraps(func) + def inner(cls, tree): + values = cls.visit_children(tree) + return func(cls, values) + return inner + + +class Interpreter(_Decoratable): + #-- + + def visit(self, tree): + f = getattr(self, tree.data) + wrapper = getattr(f, 'visit_wrapper', None) + if wrapper is not None: + return f.visit_wrapper(f, tree.data, tree.children, tree.meta) + else: + return f(tree) + + def visit_children(self, tree): + return [self.visit(child) if isinstance(child, Tree) else child + for child in tree.children] + + def __getattr__(self, name): + return self.__default__ + + def __default__(self, tree): + return self.visit_children(tree) + + +## + + +def _apply_decorator(obj, decorator, **kwargs): + try: + _apply = obj._apply_decorator + except AttributeError: + return decorator(obj, **kwargs) + else: + return _apply(decorator, **kwargs) + + +def _inline_args__func(func): + @wraps(func) + def create_decorator(_f, with_self): + if with_self: + def f(self, children): + return _f(self, *children) + else: + def f(self, children): + return _f(*children) + return f + + return smart_decorator(func, create_decorator) + + +def inline_args(obj): ## + + return _apply_decorator(obj, _inline_args__func) + + +def _visitor_args_func_dec(func, visit_wrapper=None, static=False): + def create_decorator(_f, with_self): + if with_self: + def f(self, *args, **kwargs): + return _f(self, *args, **kwargs) + else: + def f(self, *args, **kwargs): + return _f(*args, **kwargs) + return f + + if static: + f = wraps(func)(create_decorator(func, False)) + else: + f = smart_decorator(func, create_decorator) + f.vargs_applied = True + f.visit_wrapper = visit_wrapper + return f + + +def _vargs_inline(f, _data, children, _meta): + return f(*children) +def _vargs_meta_inline(f, _data, children, meta): + return f(meta, *children) +def _vargs_meta(f, _data, children, meta): + return f(children, meta) ## + +def _vargs_tree(f, data, children, meta): + return f(Tree(data, children, meta)) + + +def v_args(inline=False, meta=False, tree=False, wrapper=None): + #-- + if tree and (meta or inline): + raise ValueError("Visitor functions cannot combine 'tree' with 'meta' or 'inline'.") + + func = None + if meta: + if inline: + func = _vargs_meta_inline + else: + func = _vargs_meta + elif inline: + func = _vargs_inline + elif tree: + func = _vargs_tree + + if wrapper is not None: + if func is not None: + raise ValueError("Cannot use 'wrapper' along with 'tree', 'meta' or 'inline'.") + func = wrapper + + def _visitor_args_dec(obj): + return _apply_decorator(obj, _visitor_args_func_dec, visit_wrapper=func) + return _visitor_args_dec + + + + +class Symbol(Serialize): + __slots__ = ('name',) + + is_term = NotImplemented + + def __init__(self, name): + self.name = name + + def __eq__(self, other): + assert isinstance(other, Symbol), other + return self.is_term == other.is_term and self.name == other.name + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash(self.name) + + def __repr__(self): + return '%s(%r)' % (type(self).__name__, self.name) + + fullrepr = property(__repr__) + + +class Terminal(Symbol): + __serialize_fields__ = 'name', 'filter_out' + + is_term = True + + def __init__(self, name, filter_out=False): + self.name = name + self.filter_out = filter_out + + @property + def fullrepr(self): + return '%s(%r, %r)' % (type(self).__name__, self.name, self.filter_out) + + +class NonTerminal(Symbol): + __serialize_fields__ = 'name', + + is_term = False + + +class RuleOptions(Serialize): + __serialize_fields__ = 'keep_all_tokens', 'expand1', 'priority', 'template_source', 'empty_indices' + + def __init__(self, keep_all_tokens=False, expand1=False, priority=None, template_source=None, empty_indices=()): + self.keep_all_tokens = keep_all_tokens + self.expand1 = expand1 + self.priority = priority + self.template_source = template_source + self.empty_indices = empty_indices + + def __repr__(self): + return 'RuleOptions(%r, %r, %r, %r)' % ( + self.keep_all_tokens, + self.expand1, + self.priority, + self.template_source + ) + + +class Rule(Serialize): + #-- + __slots__ = ('origin', 'expansion', 'alias', 'options', 'order', '_hash') + + __serialize_fields__ = 'origin', 'expansion', 'order', 'alias', 'options' + __serialize_namespace__ = Terminal, NonTerminal, RuleOptions + + def __init__(self, origin, expansion, order=0, alias=None, options=None): + self.origin = origin + self.expansion = expansion + self.alias = alias + self.order = order + self.options = options or RuleOptions() + self._hash = hash((self.origin, tuple(self.expansion))) + + def _deserialize(self): + self._hash = hash((self.origin, tuple(self.expansion))) + + def __str__(self): + return '<%s : %s>' % (self.origin.name, ' '.join(x.name for x in self.expansion)) + + def __repr__(self): + return 'Rule(%r, %r, %r, %r)' % (self.origin, self.expansion, self.alias, self.options) + + def __hash__(self): + return self._hash + + def __eq__(self, other): + if not isinstance(other, Rule): + return False + return self.origin == other.origin and self.expansion == other.expansion + + + +from warnings import warn +from copy import copy + + +class Pattern(Serialize): + raw = None + type = None + + def __init__(self, value, flags=(), raw=None): + self.value = value + self.flags = frozenset(flags) + self.raw = raw + + def __repr__(self): + return repr(self.to_regexp()) + + ## + + def __hash__(self): + return hash((type(self), self.value, self.flags)) + + def __eq__(self, other): + return type(self) == type(other) and self.value == other.value and self.flags == other.flags + + def to_regexp(self): + raise NotImplementedError() + + def min_width(self): + raise NotImplementedError() + + def max_width(self): + raise NotImplementedError() + + if Py36: + ## + + def _get_flags(self, value): + for f in self.flags: + value = ('(?%s:%s)' % (f, value)) + return value + + else: + def _get_flags(self, value): + for f in self.flags: + value = ('(?%s)' % f) + value + return value + + + +class PatternStr(Pattern): + __serialize_fields__ = 'value', 'flags' + + type = "str" + + def to_regexp(self): + return self._get_flags(re.escape(self.value)) + + @property + def min_width(self): + return len(self.value) + max_width = min_width + + +class PatternRE(Pattern): + __serialize_fields__ = 'value', 'flags', '_width' + + type = "re" + + def to_regexp(self): + return self._get_flags(self.value) + + _width = None + def _get_width(self): + if self._width is None: + self._width = get_regexp_width(self.to_regexp()) + return self._width + + @property + def min_width(self): + return self._get_width()[0] + + @property + def max_width(self): + return self._get_width()[1] + + +class TerminalDef(Serialize): + __serialize_fields__ = 'name', 'pattern', 'priority' + __serialize_namespace__ = PatternStr, PatternRE + + def __init__(self, name, pattern, priority=1): + assert isinstance(pattern, Pattern), pattern + self.name = name + self.pattern = pattern + self.priority = priority + + def __repr__(self): + return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern) + + def user_repr(self): + if self.name.startswith('__'): ## + + return self.pattern.raw or self.name + else: + return self.name + + +class Token(Str): + #-- + __slots__ = ('type', 'start_pos', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos') + + def __new__(cls, type_, value, start_pos=None, line=None, column=None, end_line=None, end_column=None, end_pos=None, pos_in_stream=None): + try: + inst = super(Token, cls).__new__(cls, value) + except UnicodeDecodeError: + value = value.decode('latin1') + inst = super(Token, cls).__new__(cls, value) + + inst.type = type_ + inst.start_pos = start_pos if start_pos is not None else pos_in_stream + inst.value = value + inst.line = line + inst.column = column + inst.end_line = end_line + inst.end_column = end_column + inst.end_pos = end_pos + return inst + + @property + def pos_in_stream(self): + warn("Attribute Token.pos_in_stream was renamed to Token.start_pos", DeprecationWarning, 2) + return self.start_pos + + def update(self, type_=None, value=None): + return Token.new_borrow_pos( + type_ if type_ is not None else self.type, + value if value is not None else self.value, + self + ) + + @classmethod + def new_borrow_pos(cls, type_, value, borrow_t): + return cls(type_, value, borrow_t.start_pos, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos) + + def __reduce__(self): + return (self.__class__, (self.type, self.value, self.start_pos, self.line, self.column)) + + def __repr__(self): + return 'Token(%r, %r)' % (self.type, self.value) + + def __deepcopy__(self, memo): + return Token(self.type, self.value, self.start_pos, self.line, self.column) + + def __eq__(self, other): + if isinstance(other, Token) and self.type != other.type: + return False + + return Str.__eq__(self, other) + + __hash__ = Str.__hash__ + + +class LineCounter: + __slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char' + + def __init__(self, newline_char): + self.newline_char = newline_char + self.char_pos = 0 + self.line = 1 + self.column = 1 + self.line_start_pos = 0 + + def __eq__(self, other): + if not isinstance(other, LineCounter): + return NotImplemented + + return self.char_pos == other.char_pos and self.newline_char == other.newline_char + + def feed(self, token, test_newline=True): + #-- + if test_newline: + newlines = token.count(self.newline_char) + if newlines: + self.line += newlines + self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1 + + self.char_pos += len(token) + self.column = self.char_pos - self.line_start_pos + 1 + + +class UnlessCallback: + def __init__(self, scanner): + self.scanner = scanner + + def __call__(self, t): + res = self.scanner.match(t.value, 0) + if res: + _value, t.type = res + return t + + +class CallChain: + def __init__(self, callback1, callback2, cond): + self.callback1 = callback1 + self.callback2 = callback2 + self.cond = cond + + def __call__(self, t): + t2 = self.callback1(t) + return self.callback2(t) if self.cond(t2) else t2 + + +def _get_match(re_, regexp, s, flags): + m = re_.match(regexp, s, flags) + if m: + return m.group(0) + +def _create_unless(terminals, g_regex_flags, re_, use_bytes): + tokens_by_type = classify(terminals, lambda t: type(t.pattern)) + assert len(tokens_by_type) <= 2, tokens_by_type.keys() + embedded_strs = set() + callback = {} + for retok in tokens_by_type.get(PatternRE, []): + unless = [] + for strtok in tokens_by_type.get(PatternStr, []): + if strtok.priority > retok.priority: + continue + s = strtok.pattern.value + if s == _get_match(re_, retok.pattern.to_regexp(), s, g_regex_flags): + unless.append(strtok) + if strtok.pattern.flags <= retok.pattern.flags: + embedded_strs.add(strtok) + if unless: + callback[retok.name] = UnlessCallback(Scanner(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes)) + + new_terminals = [t for t in terminals if t not in embedded_strs] + return new_terminals, callback + + + +class Scanner: + def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False): + self.terminals = terminals + self.g_regex_flags = g_regex_flags + self.re_ = re_ + self.use_bytes = use_bytes + self.match_whole = match_whole + + self.allowed_types = {t.name for t in self.terminals} + + self._mres = self._build_mres(terminals, len(terminals)) + + def _build_mres(self, terminals, max_size): + ## + + ## + + ## + + postfix = '$' if self.match_whole else '' + mres = [] + while terminals: + pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size]) + if self.use_bytes: + pattern = pattern.encode('latin-1') + try: + mre = self.re_.compile(pattern, self.g_regex_flags) + except AssertionError: ## + + return self._build_mres(terminals, max_size//2) + + mres.append((mre, {i: n for n, i in mre.groupindex.items()})) + terminals = terminals[max_size:] + return mres + + def match(self, text, pos): + for mre, type_from_index in self._mres: + m = mre.match(text, pos) + if m: + return m.group(0), type_from_index[m.lastindex] + + +def _regexp_has_newline(r): + #-- + return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r) + + +class Lexer(object): + #-- + lex = NotImplemented + + def make_lexer_state(self, text): + line_ctr = LineCounter(b'\n' if isinstance(text, bytes) else '\n') + return LexerState(text, line_ctr) + + +class TraditionalLexer(Lexer): + + def __init__(self, conf): + terminals = list(conf.terminals) + assert all(isinstance(t, TerminalDef) for t in terminals), terminals + + self.re = conf.re_module + + if not conf.skip_validation: + ## + + for t in terminals: + try: + self.re.compile(t.pattern.to_regexp(), conf.g_regex_flags) + except self.re.error: + raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern)) + + if t.pattern.min_width == 0: + raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern)) + + if not (set(conf.ignore) <= {t.name for t in terminals}): + raise LexError("Ignore terminals are not defined: %s" % (set(conf.ignore) - {t.name for t in terminals})) + + ## + + self.newline_types = frozenset(t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp())) + self.ignore_types = frozenset(conf.ignore) + + terminals.sort(key=lambda x: (-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name)) + self.terminals = terminals + self.user_callbacks = conf.callbacks + self.g_regex_flags = conf.g_regex_flags + self.use_bytes = conf.use_bytes + self.terminals_by_name = conf.terminals_by_name + + self._scanner = None + + def _build_scanner(self): + terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes) + assert all(self.callback.values()) + + for type_, f in self.user_callbacks.items(): + if type_ in self.callback: + ## + + self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_) + else: + self.callback[type_] = f + + self._scanner = Scanner(terminals, self.g_regex_flags, self.re, self.use_bytes) + + @property + def scanner(self): + if self._scanner is None: + self._build_scanner() + return self._scanner + + def match(self, text, pos): + return self.scanner.match(text, pos) + + def lex(self, state, parser_state): + with suppress(EOFError): + while True: + yield self.next_token(state, parser_state) + + def next_token(self, lex_state, parser_state=None): + line_ctr = lex_state.line_ctr + while line_ctr.char_pos < len(lex_state.text): + res = self.match(lex_state.text, line_ctr.char_pos) + if not res: + allowed = self.scanner.allowed_types - self.ignore_types + if not allowed: + allowed = {""} + raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column, + allowed=allowed, token_history=lex_state.last_token and [lex_state.last_token], + state=parser_state, terminals_by_name=self.terminals_by_name) + + value, type_ = res + + if type_ not in self.ignore_types: + t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) + line_ctr.feed(value, type_ in self.newline_types) + t.end_line = line_ctr.line + t.end_column = line_ctr.column + t.end_pos = line_ctr.char_pos + if t.type in self.callback: + t = self.callback[t.type](t) + if not isinstance(t, Token): + raise LexError("Callbacks must return a token (returned %r)" % t) + lex_state.last_token = t + return t + else: + if type_ in self.callback: + t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) + self.callback[type_](t2) + line_ctr.feed(value, type_ in self.newline_types) + + ## + + raise EOFError(self) + + +class LexerState(object): + __slots__ = 'text', 'line_ctr', 'last_token' + + def __init__(self, text, line_ctr, last_token=None): + self.text = text + self.line_ctr = line_ctr + self.last_token = last_token + + def __eq__(self, other): + if not isinstance(other, LexerState): + return NotImplemented + + return self.text is other.text and self.line_ctr == other.line_ctr and self.last_token == other.last_token + + def __copy__(self): + return type(self)(self.text, copy(self.line_ctr), self.last_token) + + +class ContextualLexer(Lexer): + + def __init__(self, conf, states, always_accept=()): + terminals = list(conf.terminals) + terminals_by_name = conf.terminals_by_name + + trad_conf = copy(conf) + trad_conf.terminals = terminals + + lexer_by_tokens = {} + self.lexers = {} + for state, accepts in states.items(): + key = frozenset(accepts) + try: + lexer = lexer_by_tokens[key] + except KeyError: + accepts = set(accepts) | set(conf.ignore) | set(always_accept) + lexer_conf = copy(trad_conf) + lexer_conf.terminals = [terminals_by_name[n] for n in accepts if n in terminals_by_name] + lexer = TraditionalLexer(lexer_conf) + lexer_by_tokens[key] = lexer + + self.lexers[state] = lexer + + assert trad_conf.terminals is terminals + self.root_lexer = TraditionalLexer(trad_conf) + + def make_lexer_state(self, text): + return self.root_lexer.make_lexer_state(text) + + def lex(self, lexer_state, parser_state): + try: + while True: + lexer = self.lexers[parser_state.position] + yield lexer.next_token(lexer_state, parser_state) + except EOFError: + pass + except UnexpectedCharacters as e: + ## + + ## + + try: + last_token = lexer_state.last_token ## + + token = self.root_lexer.next_token(lexer_state, parser_state) + raise UnexpectedToken(token, e.allowed, state=parser_state, token_history=[last_token], terminals_by_name=self.root_lexer.terminals_by_name) + except UnexpectedCharacters: + raise e ## + + +class LexerThread(object): + #-- + + def __init__(self, lexer, text): + self.lexer = lexer + self.state = lexer.make_lexer_state(text) + + def lex(self, parser_state): + return self.lexer.lex(self.state, parser_state) + + def __copy__(self): + copied = object.__new__(LexerThread) + copied.lexer = self.lexer + copied.state = copy(self.state) + return copied + + + +class LexerConf(Serialize): + __serialize_fields__ = 'terminals', 'ignore', 'g_regex_flags', 'use_bytes', 'lexer_type' + __serialize_namespace__ = TerminalDef, + + def __init__(self, terminals, re_module, ignore=(), postlex=None, callbacks=None, g_regex_flags=0, skip_validation=False, use_bytes=False): + self.terminals = terminals + self.terminals_by_name = {t.name: t for t in self.terminals} + assert len(self.terminals) == len(self.terminals_by_name) + self.ignore = ignore + self.postlex = postlex + self.callbacks = callbacks or {} + self.g_regex_flags = g_regex_flags + self.re_module = re_module + self.skip_validation = skip_validation + self.use_bytes = use_bytes + self.lexer_type = None + + @property + def tokens(self): + warn("LexerConf.tokens is deprecated. Use LexerConf.terminals instead", DeprecationWarning) + return self.terminals + + def _deserialize(self): + self.terminals_by_name = {t.name: t for t in self.terminals} + + def __deepcopy__(self, memo=None): + return type(self)( + deepcopy(self.terminals, memo), + self.re_module, + deepcopy(self.ignore, memo), + deepcopy(self.postlex, memo), + deepcopy(self.callbacks, memo), + deepcopy(self.g_regex_flags, memo), + deepcopy(self.skip_validation, memo), + deepcopy(self.use_bytes, memo), + ) + + +class ParserConf(Serialize): + __serialize_fields__ = 'rules', 'start', 'parser_type' + + def __init__(self, rules, callbacks, start): + assert isinstance(start, list) + self.rules = rules + self.callbacks = callbacks + self.start = start + + self.parser_type = None + + +from functools import partial, wraps +from itertools import repeat, product + + +class ExpandSingleChild: + def __init__(self, node_builder): + self.node_builder = node_builder + + def __call__(self, children): + if len(children) == 1: + return children[0] + else: + return self.node_builder(children) + + + +class PropagatePositions: + def __init__(self, node_builder, node_filter=None): + self.node_builder = node_builder + self.node_filter = node_filter + + def __call__(self, children): + res = self.node_builder(children) + + if isinstance(res, Tree): + ## + + ## + + ## + + ## + + + res_meta = res.meta + + first_meta = self._pp_get_meta(children) + if first_meta is not None: + if not hasattr(res_meta, 'line'): + ## + + res_meta.line = getattr(first_meta, 'container_line', first_meta.line) + res_meta.column = getattr(first_meta, 'container_column', first_meta.column) + res_meta.start_pos = getattr(first_meta, 'container_start_pos', first_meta.start_pos) + res_meta.empty = False + + res_meta.container_line = getattr(first_meta, 'container_line', first_meta.line) + res_meta.container_column = getattr(first_meta, 'container_column', first_meta.column) + + last_meta = self._pp_get_meta(reversed(children)) + if last_meta is not None: + if not hasattr(res_meta, 'end_line'): + res_meta.end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) + res_meta.end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) + res_meta.end_pos = getattr(last_meta, 'container_end_pos', last_meta.end_pos) + res_meta.empty = False + + res_meta.container_end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) + res_meta.container_end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) + + return res + + def _pp_get_meta(self, children): + for c in children: + if self.node_filter is not None and not self.node_filter(c): + continue + if isinstance(c, Tree): + if not c.meta.empty: + return c.meta + elif isinstance(c, Token): + return c + +def make_propagate_positions(option): + if callable(option): + return partial(PropagatePositions, node_filter=option) + elif option is True: + return PropagatePositions + elif option is False: + return None + + raise ConfigurationError('Invalid option for propagate_positions: %r' % option) + + +class ChildFilter: + def __init__(self, to_include, append_none, node_builder): + self.node_builder = node_builder + self.to_include = to_include + self.append_none = append_none + + def __call__(self, children): + filtered = [] + + for i, to_expand, add_none in self.to_include: + if add_none: + filtered += [None] * add_none + if to_expand: + filtered += children[i].children + else: + filtered.append(children[i]) + + if self.append_none: + filtered += [None] * self.append_none + + return self.node_builder(filtered) + + +class ChildFilterLALR(ChildFilter): + #-- + + def __call__(self, children): + filtered = [] + for i, to_expand, add_none in self.to_include: + if add_none: + filtered += [None] * add_none + if to_expand: + if filtered: + filtered += children[i].children + else: ## + + filtered = children[i].children + else: + filtered.append(children[i]) + + if self.append_none: + filtered += [None] * self.append_none + + return self.node_builder(filtered) + + +class ChildFilterLALR_NoPlaceholders(ChildFilter): + #-- + def __init__(self, to_include, node_builder): + self.node_builder = node_builder + self.to_include = to_include + + def __call__(self, children): + filtered = [] + for i, to_expand in self.to_include: + if to_expand: + if filtered: + filtered += children[i].children + else: ## + + filtered = children[i].children + else: + filtered.append(children[i]) + return self.node_builder(filtered) + + +def _should_expand(sym): + return not sym.is_term and sym.name.startswith('_') + + +def maybe_create_child_filter(expansion, keep_all_tokens, ambiguous, _empty_indices): + ## + + if _empty_indices: + assert _empty_indices.count(False) == len(expansion) + s = ''.join(str(int(b)) for b in _empty_indices) + empty_indices = [len(ones) for ones in s.split('0')] + assert len(empty_indices) == len(expansion)+1, (empty_indices, len(expansion)) + else: + empty_indices = [0] * (len(expansion)+1) + + to_include = [] + nones_to_add = 0 + for i, sym in enumerate(expansion): + nones_to_add += empty_indices[i] + if keep_all_tokens or not (sym.is_term and sym.filter_out): + to_include.append((i, _should_expand(sym), nones_to_add)) + nones_to_add = 0 + + nones_to_add += empty_indices[len(expansion)] + + if _empty_indices or len(to_include) < len(expansion) or any(to_expand for i, to_expand,_ in to_include): + if _empty_indices or ambiguous: + return partial(ChildFilter if ambiguous else ChildFilterLALR, to_include, nones_to_add) + else: + ## + + return partial(ChildFilterLALR_NoPlaceholders, [(i, x) for i,x,_ in to_include]) + + +class AmbiguousExpander: + #-- + def __init__(self, to_expand, tree_class, node_builder): + self.node_builder = node_builder + self.tree_class = tree_class + self.to_expand = to_expand + + def __call__(self, children): + def _is_ambig_tree(t): + return hasattr(t, 'data') and t.data == '_ambig' + + ## + + ## + + ## + + ## + + ambiguous = [] + for i, child in enumerate(children): + if _is_ambig_tree(child): + if i in self.to_expand: + ambiguous.append(i) + + child.expand_kids_by_data('_ambig') + + if not ambiguous: + return self.node_builder(children) + + expand = [iter(child.children) if i in ambiguous else repeat(child) for i, child in enumerate(children)] + return self.tree_class('_ambig', [self.node_builder(list(f[0])) for f in product(zip(*expand))]) + + +def maybe_create_ambiguous_expander(tree_class, expansion, keep_all_tokens): + to_expand = [i for i, sym in enumerate(expansion) + if keep_all_tokens or ((not (sym.is_term and sym.filter_out)) and _should_expand(sym))] + if to_expand: + return partial(AmbiguousExpander, to_expand, tree_class) + + +class AmbiguousIntermediateExpander: + #-- + + def __init__(self, tree_class, node_builder): + self.node_builder = node_builder + self.tree_class = tree_class + + def __call__(self, children): + def _is_iambig_tree(child): + return hasattr(child, 'data') and child.data == '_iambig' + + def _collapse_iambig(children): + #-- + + ## + + ## + + if children and _is_iambig_tree(children[0]): + iambig_node = children[0] + result = [] + for grandchild in iambig_node.children: + collapsed = _collapse_iambig(grandchild.children) + if collapsed: + for child in collapsed: + child.children += children[1:] + result += collapsed + else: + new_tree = self.tree_class('_inter', grandchild.children + children[1:]) + result.append(new_tree) + return result + + collapsed = _collapse_iambig(children) + if collapsed: + processed_nodes = [self.node_builder(c.children) for c in collapsed] + return self.tree_class('_ambig', processed_nodes) + + return self.node_builder(children) + + +def ptb_inline_args(func): + @wraps(func) + def f(children): + return func(*children) + return f + + +def inplace_transformer(func): + @wraps(func) + def f(children): + ## + + tree = Tree(func.__name__, children) + return func(tree) + return f + + +def apply_visit_wrapper(func, name, wrapper): + if wrapper is _vargs_meta or wrapper is _vargs_meta_inline: + raise NotImplementedError("Meta args not supported for internal transformer") + + @wraps(func) + def f(children): + return wrapper(func, name, children, None) + return f + + +class ParseTreeBuilder: + def __init__(self, rules, tree_class, propagate_positions=False, ambiguous=False, maybe_placeholders=False): + self.tree_class = tree_class + self.propagate_positions = propagate_positions + self.ambiguous = ambiguous + self.maybe_placeholders = maybe_placeholders + + self.rule_builders = list(self._init_builders(rules)) + + def _init_builders(self, rules): + propagate_positions = make_propagate_positions(self.propagate_positions) + + for rule in rules: + options = rule.options + keep_all_tokens = options.keep_all_tokens + expand_single_child = options.expand1 + + wrapper_chain = list(filter(None, [ + (expand_single_child and not rule.alias) and ExpandSingleChild, + maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, options.empty_indices if self.maybe_placeholders else None), + propagate_positions, + self.ambiguous and maybe_create_ambiguous_expander(self.tree_class, rule.expansion, keep_all_tokens), + self.ambiguous and partial(AmbiguousIntermediateExpander, self.tree_class) + ])) + + yield rule, wrapper_chain + + def create_callback(self, transformer=None): + callbacks = {} + + for rule, wrapper_chain in self.rule_builders: + + user_callback_name = rule.alias or rule.options.template_source or rule.origin.name + try: + f = getattr(transformer, user_callback_name) + ## + + wrapper = getattr(f, 'visit_wrapper', None) + if wrapper is not None: + f = apply_visit_wrapper(f, user_callback_name, wrapper) + else: + if isinstance(transformer, InlineTransformer): + f = ptb_inline_args(f) + elif isinstance(transformer, Transformer_InPlace): + f = inplace_transformer(f) + except AttributeError: + f = partial(self.tree_class, user_callback_name) + + for w in wrapper_chain: + f = w(f) + + if rule in callbacks: + raise GrammarError("Rule '%s' already exists" % (rule,)) + + callbacks[rule] = f + + return callbacks + + + +class LALR_Parser(Serialize): + def __init__(self, parser_conf, debug=False): + analysis = LALR_Analyzer(parser_conf, debug=debug) + analysis.compute_lalr() + callbacks = parser_conf.callbacks + + self._parse_table = analysis.parse_table + self.parser_conf = parser_conf + self.parser = _Parser(analysis.parse_table, callbacks, debug) + + @classmethod + def deserialize(cls, data, memo, callbacks, debug=False): + inst = cls.__new__(cls) + inst._parse_table = IntParseTable.deserialize(data, memo) + inst.parser = _Parser(inst._parse_table, callbacks, debug) + return inst + + def serialize(self, memo): + return self._parse_table.serialize(memo) + + def parse_interactive(self, lexer, start): + return self.parser.parse(lexer, start, start_interactive=True) + + def parse(self, lexer, start, on_error=None): + try: + return self.parser.parse(lexer, start) + except UnexpectedInput as e: + if on_error is None: + raise + + while True: + if isinstance(e, UnexpectedCharacters): + s = e.interactive_parser.lexer_state.state + p = s.line_ctr.char_pos + + if not on_error(e): + raise e + + if isinstance(e, UnexpectedCharacters): + ## + + if p == s.line_ctr.char_pos: + s.line_ctr.feed(s.text[p:p+1]) + + try: + return e.interactive_parser.resume_parse() + except UnexpectedToken as e2: + if (isinstance(e, UnexpectedToken) + and e.token.type == e2.token.type == '$END' + and e.interactive_parser == e2.interactive_parser): + ## + + raise e2 + e = e2 + except UnexpectedCharacters as e2: + e = e2 + + +class ParseConf(object): + __slots__ = 'parse_table', 'callbacks', 'start', 'start_state', 'end_state', 'states' + + def __init__(self, parse_table, callbacks, start): + self.parse_table = parse_table + + self.start_state = self.parse_table.start_states[start] + self.end_state = self.parse_table.end_states[start] + self.states = self.parse_table.states + + self.callbacks = callbacks + self.start = start + + +class ParserState(object): + __slots__ = 'parse_conf', 'lexer', 'state_stack', 'value_stack' + + def __init__(self, parse_conf, lexer, state_stack=None, value_stack=None): + self.parse_conf = parse_conf + self.lexer = lexer + self.state_stack = state_stack or [self.parse_conf.start_state] + self.value_stack = value_stack or [] + + @property + def position(self): + return self.state_stack[-1] + + ## + + def __eq__(self, other): + if not isinstance(other, ParserState): + return NotImplemented + return len(self.state_stack) == len(other.state_stack) and self.position == other.position + + def __copy__(self): + return type(self)( + self.parse_conf, + self.lexer, ## + + copy(self.state_stack), + deepcopy(self.value_stack), + ) + + def copy(self): + return copy(self) + + def feed_token(self, token, is_end=False): + state_stack = self.state_stack + value_stack = self.value_stack + states = self.parse_conf.states + end_state = self.parse_conf.end_state + callbacks = self.parse_conf.callbacks + + while True: + state = state_stack[-1] + try: + action, arg = states[state][token.type] + except KeyError: + expected = {s for s in states[state].keys() if s.isupper()} + raise UnexpectedToken(token, expected, state=self, interactive_parser=None) + + assert arg != end_state + + if action is Shift: + ## + + assert not is_end + state_stack.append(arg) + value_stack.append(token if token.type not in callbacks else callbacks[token.type](token)) + return + else: + ## + + rule = arg + size = len(rule.expansion) + if size: + s = value_stack[-size:] + del state_stack[-size:] + del value_stack[-size:] + else: + s = [] + + value = callbacks[rule](s) + + _action, new_state = states[state_stack[-1]][rule.origin.name] + assert _action is Shift + state_stack.append(new_state) + value_stack.append(value) + + if is_end and state_stack[-1] == end_state: + return value_stack[-1] + +class _Parser(object): + def __init__(self, parse_table, callbacks, debug=False): + self.parse_table = parse_table + self.callbacks = callbacks + self.debug = debug + + def parse(self, lexer, start, value_stack=None, state_stack=None, start_interactive=False): + parse_conf = ParseConf(self.parse_table, self.callbacks, start) + parser_state = ParserState(parse_conf, lexer, state_stack, value_stack) + if start_interactive: + return InteractiveParser(self, parser_state, parser_state.lexer) + return self.parse_from_state(parser_state) + + + def parse_from_state(self, state): + ## + + try: + token = None + for token in state.lexer.lex(state): + state.feed_token(token) + + end_token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1) + return state.feed_token(end_token, True) + except UnexpectedInput as e: + try: + e.interactive_parser = InteractiveParser(self, state, state.lexer) + except NameError: + pass + raise e + except Exception as e: + if self.debug: + print("") + print("STATE STACK DUMP") + print("----------------") + for i, s in enumerate(state.state_stack): + print('%d)' % i , s) + print("") + + raise + + +class Action: + def __init__(self, name): + self.name = name + def __str__(self): + return self.name + def __repr__(self): + return str(self) + +Shift = Action('Shift') +Reduce = Action('Reduce') + + +class ParseTable: + def __init__(self, states, start_states, end_states): + self.states = states + self.start_states = start_states + self.end_states = end_states + + def serialize(self, memo): + tokens = Enumerator() + rules = Enumerator() + + states = { + state: {tokens.get(token): ((1, arg.serialize(memo)) if action is Reduce else (0, arg)) + for token, (action, arg) in actions.items()} + for state, actions in self.states.items() + } + + return { + 'tokens': tokens.reversed(), + 'states': states, + 'start_states': self.start_states, + 'end_states': self.end_states, + } + + @classmethod + def deserialize(cls, data, memo): + tokens = data['tokens'] + states = { + state: {tokens[token]: ((Reduce, Rule.deserialize(arg, memo)) if action==1 else (Shift, arg)) + for token, (action, arg) in actions.items()} + for state, actions in data['states'].items() + } + return cls(states, data['start_states'], data['end_states']) + + +class IntParseTable(ParseTable): + + @classmethod + def from_ParseTable(cls, parse_table): + enum = list(parse_table.states) + state_to_idx = {s:i for i,s in enumerate(enum)} + int_states = {} + + for s, la in parse_table.states.items(): + la = {k:(v[0], state_to_idx[v[1]]) if v[0] is Shift else v + for k,v in la.items()} + int_states[ state_to_idx[s] ] = la + + + start_states = {start:state_to_idx[s] for start, s in parse_table.start_states.items()} + end_states = {start:state_to_idx[s] for start, s in parse_table.end_states.items()} + return cls(int_states, start_states, end_states) + + + +def _wrap_lexer(lexer_class): + future_interface = getattr(lexer_class, '__future_interface__', False) + if future_interface: + return lexer_class + else: + class CustomLexerWrapper(Lexer): + def __init__(self, lexer_conf): + self.lexer = lexer_class(lexer_conf) + def lex(self, lexer_state, parser_state): + return self.lexer.lex(lexer_state.text) + return CustomLexerWrapper + + +class MakeParsingFrontend: + def __init__(self, parser_type, lexer_type): + self.parser_type = parser_type + self.lexer_type = lexer_type + + def __call__(self, lexer_conf, parser_conf, options): + assert isinstance(lexer_conf, LexerConf) + assert isinstance(parser_conf, ParserConf) + parser_conf.parser_type = self.parser_type + lexer_conf.lexer_type = self.lexer_type + return ParsingFrontend(lexer_conf, parser_conf, options) + + def deserialize(self, data, memo, lexer_conf, callbacks, options): + parser_conf = ParserConf.deserialize(data['parser_conf'], memo) + parser = LALR_Parser.deserialize(data['parser'], memo, callbacks, options.debug) + parser_conf.callbacks = callbacks + return ParsingFrontend(lexer_conf, parser_conf, options, parser=parser) + + + + +class ParsingFrontend(Serialize): + __serialize_fields__ = 'lexer_conf', 'parser_conf', 'parser', 'options' + + def __init__(self, lexer_conf, parser_conf, options, parser=None): + self.parser_conf = parser_conf + self.lexer_conf = lexer_conf + self.options = options + + ## + + if parser: ## + + self.parser = parser + else: + create_parser = { + 'lalr': create_lalr_parser, + 'earley': create_earley_parser, + 'cyk': CYK_FrontEnd, + }[parser_conf.parser_type] + self.parser = create_parser(lexer_conf, parser_conf, options) + + ## + + lexer_type = lexer_conf.lexer_type + self.skip_lexer = False + if lexer_type in ('dynamic', 'dynamic_complete'): + assert lexer_conf.postlex is None + self.skip_lexer = True + return + + try: + create_lexer = { + 'standard': create_traditional_lexer, + 'contextual': create_contextual_lexer, + }[lexer_type] + except KeyError: + assert issubclass(lexer_type, Lexer), lexer_type + self.lexer = _wrap_lexer(lexer_type)(lexer_conf) + else: + self.lexer = create_lexer(lexer_conf, self.parser, lexer_conf.postlex) + + if lexer_conf.postlex: + self.lexer = PostLexConnector(self.lexer, lexer_conf.postlex) + + def _verify_start(self, start=None): + if start is None: + start_decls = self.parser_conf.start + if len(start_decls) > 1: + raise ConfigurationError("Lark initialized with more than 1 possible start rule. Must specify which start rule to parse", start_decls) + start ,= start_decls + elif start not in self.parser_conf.start: + raise ConfigurationError("Unknown start rule %s. Must be one of %r" % (start, self.parser_conf.start)) + return start + + def parse(self, text, start=None, on_error=None): + chosen_start = self._verify_start(start) + stream = text if self.skip_lexer else LexerThread(self.lexer, text) + kw = {} if on_error is None else {'on_error': on_error} + return self.parser.parse(stream, chosen_start, **kw) + + def parse_interactive(self, text=None, start=None): + chosen_start = self._verify_start(start) + if self.parser_conf.parser_type != 'lalr': + raise ConfigurationError("parse_interactive() currently only works with parser='lalr' ") + stream = text if self.skip_lexer else LexerThread(self.lexer, text) + return self.parser.parse_interactive(stream, chosen_start) + + +def get_frontend(parser, lexer): + assert_config(parser, ('lalr', 'earley', 'cyk')) + if not isinstance(lexer, type): ## + + expected = { + 'lalr': ('standard', 'contextual'), + 'earley': ('standard', 'dynamic', 'dynamic_complete'), + 'cyk': ('standard', ), + }[parser] + assert_config(lexer, expected, 'Parser %r does not support lexer %%r, expected one of %%s' % parser) + + return MakeParsingFrontend(parser, lexer) + + +def _get_lexer_callbacks(transformer, terminals): + result = {} + for terminal in terminals: + callback = getattr(transformer, terminal.name, None) + if callback is not None: + result[terminal.name] = callback + return result + +class PostLexConnector: + def __init__(self, lexer, postlexer): + self.lexer = lexer + self.postlexer = postlexer + + def make_lexer_state(self, text): + return self.lexer.make_lexer_state(text) + + def lex(self, lexer_state, parser_state): + i = self.lexer.lex(lexer_state, parser_state) + return self.postlexer.process(i) + + + +def create_traditional_lexer(lexer_conf, parser, postlex): + return TraditionalLexer(lexer_conf) + +def create_contextual_lexer(lexer_conf, parser, postlex): + states = {idx:list(t.keys()) for idx, t in parser._parse_table.states.items()} + always_accept = postlex.always_accept if postlex else () + return ContextualLexer(lexer_conf, states, always_accept=always_accept) + +def create_lalr_parser(lexer_conf, parser_conf, options=None): + debug = options.debug if options else False + return LALR_Parser(parser_conf, debug=debug) + + +create_earley_parser = NotImplemented +CYK_FrontEnd = NotImplemented + + + +class LarkOptions(Serialize): + #-- + OPTIONS_DOC = """ + **=== General Options ===** + + start + The start symbol. Either a string, or a list of strings for multiple possible starts (Default: "start") + debug + Display debug information and extra warnings. Use only when debugging (default: False) + When used with Earley, it generates a forest graph as "sppf.png", if 'dot' is installed. + transformer + Applies the transformer to every parse tree (equivalent to applying it after the parse, but faster) + propagate_positions + Propagates (line, column, end_line, end_column) attributes into all tree branches. + Accepts ``False``, ``True``, or a callable, which will filter which nodes to ignore when propagating. + maybe_placeholders + When ``True``, the ``[]`` operator returns ``None`` when not matched. + + When ``False``, ``[]`` behaves like the ``?`` operator, and returns no value at all. + (default= ``False``. Recommended to set to ``True``) + cache + Cache the results of the Lark grammar analysis, for x2 to x3 faster loading. LALR only for now. + + - When ``False``, does nothing (default) + - When ``True``, caches to a temporary file in the local directory + - When given a string, caches to the path pointed by the string + regex + When True, uses the ``regex`` module instead of the stdlib ``re``. + g_regex_flags + Flags that are applied to all terminals (both regex and strings) + keep_all_tokens + Prevent the tree builder from automagically removing "punctuation" tokens (default: False) + tree_class + Lark will produce trees comprised of instances of this class instead of the default ``lark.Tree``. + + **=== Algorithm Options ===** + + parser + Decides which parser engine to use. Accepts "earley" or "lalr". (Default: "earley"). + (there is also a "cyk" option for legacy) + lexer + Decides whether or not to use a lexer stage + + - "auto" (default): Choose for me based on the parser + - "standard": Use a standard lexer + - "contextual": Stronger lexer (only works with parser="lalr") + - "dynamic": Flexible and powerful (only with parser="earley") + - "dynamic_complete": Same as dynamic, but tries *every* variation of tokenizing possible. + ambiguity + Decides how to handle ambiguity in the parse. Only relevant if parser="earley" + + - "resolve": The parser will automatically choose the simplest derivation + (it chooses consistently: greedy for tokens, non-greedy for rules) + - "explicit": The parser will return all derivations wrapped in "_ambig" tree nodes (i.e. a forest). + - "forest": The parser will return the root of the shared packed parse forest. + + **=== Misc. / Domain Specific Options ===** + + postlex + Lexer post-processing (Default: None) Only works with the standard and contextual lexers. + priority + How priorities should be evaluated - auto, none, normal, invert (Default: auto) + lexer_callbacks + Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution. + use_bytes + Accept an input of type ``bytes`` instead of ``str`` (Python 3 only). + edit_terminals + A callback for editing the terminals before parse. + import_paths + A List of either paths or loader functions to specify from where grammars are imported + source_path + Override the source of from where the grammar was loaded. Useful for relative imports and unconventional grammar loading + **=== End of Options ===** + """ + if __doc__: + __doc__ += OPTIONS_DOC + + + ## + + ## + + ## + + ## + + ## + + ## + + ## + + ## + + _defaults = { + 'debug': False, + 'keep_all_tokens': False, + 'tree_class': None, + 'cache': False, + 'postlex': None, + 'parser': 'earley', + 'lexer': 'auto', + 'transformer': None, + 'start': 'start', + 'priority': 'auto', + 'ambiguity': 'auto', + 'regex': False, + 'propagate_positions': False, + 'lexer_callbacks': {}, + 'maybe_placeholders': False, + 'edit_terminals': None, + 'g_regex_flags': 0, + 'use_bytes': False, + 'import_paths': [], + 'source_path': None, + } + + def __init__(self, options_dict): + o = dict(options_dict) + + options = {} + for name, default in self._defaults.items(): + if name in o: + value = o.pop(name) + if isinstance(default, bool) and name not in ('cache', 'use_bytes', 'propagate_positions'): + value = bool(value) + else: + value = default + + options[name] = value + + if isinstance(options['start'], STRING_TYPE): + options['start'] = [options['start']] + + self.__dict__['options'] = options + + + assert_config(self.parser, ('earley', 'lalr', 'cyk', None)) + + if self.parser == 'earley' and self.transformer: + raise ConfigurationError('Cannot specify an embedded transformer when using the Earley algorithm. ' + 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. LALR)') + + if o: + raise ConfigurationError("Unknown options: %s" % o.keys()) + + def __getattr__(self, name): + try: + return self.__dict__['options'][name] + except KeyError as e: + raise AttributeError(e) + + def __setattr__(self, name, value): + assert_config(name, self.options.keys(), "%r isn't a valid option. Expected one of: %s") + self.options[name] = value + + def serialize(self, memo): + return self.options + + @classmethod + def deserialize(cls, data, memo): + return cls(data) + + +## + +## + +_LOAD_ALLOWED_OPTIONS = {'postlex', 'transformer', 'lexer_callbacks', 'use_bytes', 'debug', 'g_regex_flags', 'regex', 'propagate_positions', 'tree_class'} + +_VALID_PRIORITY_OPTIONS = ('auto', 'normal', 'invert', None) +_VALID_AMBIGUITY_OPTIONS = ('auto', 'resolve', 'explicit', 'forest') + + +class PostLex(ABC): + @abstractmethod + def process(self, stream): + return stream + + always_accept = () + + +class Lark(Serialize): + #-- + def __init__(self, grammar, **options): + self.options = LarkOptions(options) + + ## + + use_regex = self.options.regex + if use_regex: + if regex: + re_module = regex + else: + raise ImportError('`regex` module must be installed if calling `Lark(regex=True)`.') + else: + re_module = re + + ## + + if self.options.source_path is None: + try: + self.source_path = grammar.name + except AttributeError: + self.source_path = '' + else: + self.source_path = self.options.source_path + + ## + + try: + read = grammar.read + except AttributeError: + pass + else: + grammar = read() + + cache_fn = None + cache_md5 = None + if isinstance(grammar, STRING_TYPE): + self.source_grammar = grammar + if self.options.use_bytes: + if not isascii(grammar): + raise ConfigurationError("Grammar must be ascii only, when use_bytes=True") + if sys.version_info[0] == 2 and self.options.use_bytes != 'force': + raise ConfigurationError("`use_bytes=True` may have issues on python2." + "Use `use_bytes='force'` to use it at your own risk.") + + if self.options.cache: + if self.options.parser != 'lalr': + raise ConfigurationError("cache only works with parser='lalr' for now") + + unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals') + options_str = ''.join(k+str(v) for k, v in options.items() if k not in unhashable) + from . import __version__ + s = grammar + options_str + __version__ + str(sys.version_info[:2]) + cache_md5 = hashlib.md5(s.encode('utf8')).hexdigest() + + if isinstance(self.options.cache, STRING_TYPE): + cache_fn = self.options.cache + else: + if self.options.cache is not True: + raise ConfigurationError("cache argument must be bool or str") + ## + + cache_fn = tempfile.gettempdir() + '/.lark_cache_%s_%s_%s.tmp' % ((cache_md5,) + sys.version_info[:2]) + + if FS.exists(cache_fn): + logger.debug('Loading grammar from cache: %s', cache_fn) + ## + + for name in (set(options) - _LOAD_ALLOWED_OPTIONS): + del options[name] + with FS.open(cache_fn, 'rb') as f: + old_options = self.options + try: + file_md5 = f.readline().rstrip(b'\n') + cached_used_files = pickle.load(f) + if file_md5 == cache_md5.encode('utf8') and verify_used_files(cached_used_files): + cached_parser_data = pickle.load(f) + self._load(cached_parser_data, **options) + return + except Exception: ## + + logger.exception("Failed to load Lark from cache: %r. We will try to carry on." % cache_fn) + + ## + + ## + + self.options = old_options + + + ## + + self.grammar, used_files = load_grammar(grammar, self.source_path, self.options.import_paths, self.options.keep_all_tokens) + else: + assert isinstance(grammar, Grammar) + self.grammar = grammar + + + if self.options.lexer == 'auto': + if self.options.parser == 'lalr': + self.options.lexer = 'contextual' + elif self.options.parser == 'earley': + if self.options.postlex is not None: + logger.info("postlex can't be used with the dynamic lexer, so we use standard instead. " + "Consider using lalr with contextual instead of earley") + self.options.lexer = 'standard' + else: + self.options.lexer = 'dynamic' + elif self.options.parser == 'cyk': + self.options.lexer = 'standard' + else: + assert False, self.options.parser + lexer = self.options.lexer + if isinstance(lexer, type): + assert issubclass(lexer, Lexer) ## + + else: + assert_config(lexer, ('standard', 'contextual', 'dynamic', 'dynamic_complete')) + if self.options.postlex is not None and 'dynamic' in lexer: + raise ConfigurationError("Can't use postlex with a dynamic lexer. Use standard or contextual instead") + + if self.options.ambiguity == 'auto': + if self.options.parser == 'earley': + self.options.ambiguity = 'resolve' + else: + assert_config(self.options.parser, ('earley', 'cyk'), "%r doesn't support disambiguation. Use one of these parsers instead: %s") + + if self.options.priority == 'auto': + self.options.priority = 'normal' + + if self.options.priority not in _VALID_PRIORITY_OPTIONS: + raise ConfigurationError("invalid priority option: %r. Must be one of %r" % (self.options.priority, _VALID_PRIORITY_OPTIONS)) + assert self.options.ambiguity not in ('resolve__antiscore_sum', ), 'resolve__antiscore_sum has been replaced with the option priority="invert"' + if self.options.ambiguity not in _VALID_AMBIGUITY_OPTIONS: + raise ConfigurationError("invalid ambiguity option: %r. Must be one of %r" % (self.options.ambiguity, _VALID_AMBIGUITY_OPTIONS)) + + if self.options.parser is None: + terminals_to_keep = '*' + elif self.options.postlex is not None: + terminals_to_keep = set(self.options.postlex.always_accept) + else: + terminals_to_keep = set() + + ## + + self.terminals, self.rules, self.ignore_tokens = self.grammar.compile(self.options.start, terminals_to_keep) + + if self.options.edit_terminals: + for t in self.terminals: + self.options.edit_terminals(t) + + self._terminals_dict = {t.name: t for t in self.terminals} + + ## + + ## + + if self.options.priority == 'invert': + for rule in self.rules: + if rule.options.priority is not None: + rule.options.priority = -rule.options.priority + ## + + ## + + ## + + elif self.options.priority is None: + for rule in self.rules: + if rule.options.priority is not None: + rule.options.priority = None + + ## + + self.lexer_conf = LexerConf( + self.terminals, re_module, self.ignore_tokens, self.options.postlex, + self.options.lexer_callbacks, self.options.g_regex_flags, use_bytes=self.options.use_bytes + ) + + if self.options.parser: + self.parser = self._build_parser() + elif lexer: + self.lexer = self._build_lexer() + + if cache_fn: + logger.debug('Saving grammar to cache: %s', cache_fn) + with FS.open(cache_fn, 'wb') as f: + f.write(cache_md5.encode('utf8') + b'\n') + pickle.dump(used_files, f) + self.save(f) + + if __doc__: + __doc__ += "\n\n" + LarkOptions.OPTIONS_DOC + + __serialize_fields__ = 'parser', 'rules', 'options' + + def _build_lexer(self, dont_ignore=False): + lexer_conf = self.lexer_conf + if dont_ignore: + from copy import copy + lexer_conf = copy(lexer_conf) + lexer_conf.ignore = () + return TraditionalLexer(lexer_conf) + + def _prepare_callbacks(self): + self._callbacks = {} + ## + + if self.options.ambiguity != 'forest': + self._parse_tree_builder = ParseTreeBuilder( + self.rules, + self.options.tree_class or Tree, + self.options.propagate_positions, + self.options.parser != 'lalr' and self.options.ambiguity == 'explicit', + self.options.maybe_placeholders + ) + self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer) + self._callbacks.update(_get_lexer_callbacks(self.options.transformer, self.terminals)) + + def _build_parser(self): + self._prepare_callbacks() + parser_class = get_frontend(self.options.parser, self.options.lexer) + parser_conf = ParserConf(self.rules, self._callbacks, self.options.start) + return parser_class(self.lexer_conf, parser_conf, options=self.options) + + def save(self, f): + #-- + data, m = self.memo_serialize([TerminalDef, Rule]) + pickle.dump({'data': data, 'memo': m}, f, protocol=pickle.HIGHEST_PROTOCOL) + + @classmethod + def load(cls, f): + #-- + inst = cls.__new__(cls) + return inst._load(f) + + def _deserialize_lexer_conf(self, data, memo, options): + lexer_conf = LexerConf.deserialize(data['lexer_conf'], memo) + lexer_conf.callbacks = options.lexer_callbacks or {} + lexer_conf.re_module = regex if options.regex else re + lexer_conf.use_bytes = options.use_bytes + lexer_conf.g_regex_flags = options.g_regex_flags + lexer_conf.skip_validation = True + lexer_conf.postlex = options.postlex + return lexer_conf + + def _load(self, f, **kwargs): + if isinstance(f, dict): + d = f + else: + d = pickle.load(f) + memo_json = d['memo'] + data = d['data'] + + assert memo_json + memo = SerializeMemoizer.deserialize(memo_json, {'Rule': Rule, 'TerminalDef': TerminalDef}, {}) + options = dict(data['options']) + if (set(kwargs) - _LOAD_ALLOWED_OPTIONS) & set(LarkOptions._defaults): + raise ConfigurationError("Some options are not allowed when loading a Parser: {}" + .format(set(kwargs) - _LOAD_ALLOWED_OPTIONS)) + options.update(kwargs) + self.options = LarkOptions.deserialize(options, memo) + self.rules = [Rule.deserialize(r, memo) for r in data['rules']] + self.source_path = '' + parser_class = get_frontend(self.options.parser, self.options.lexer) + self.lexer_conf = self._deserialize_lexer_conf(data['parser'], memo, self.options) + self.terminals = self.lexer_conf.terminals + self._prepare_callbacks() + self._terminals_dict = {t.name: t for t in self.terminals} + self.parser = parser_class.deserialize( + data['parser'], + memo, + self.lexer_conf, + self._callbacks, + self.options, ## + + ) + return self + + @classmethod + def _load_from_dict(cls, data, memo, **kwargs): + inst = cls.__new__(cls) + return inst._load({'data': data, 'memo': memo}, **kwargs) + + @classmethod + def open(cls, grammar_filename, rel_to=None, **options): + #-- + if rel_to: + basepath = os.path.dirname(rel_to) + grammar_filename = os.path.join(basepath, grammar_filename) + with open(grammar_filename, encoding='utf8') as f: + return cls(f, **options) + + @classmethod + def open_from_package(cls, package, grammar_path, search_paths=("",), **options): + #-- + package_loader = FromPackageLoader(package, search_paths) + full_path, text = package_loader(None, grammar_path) + options.setdefault('source_path', full_path) + options.setdefault('import_paths', []) + options['import_paths'].append(package_loader) + return cls(text, **options) + + def __repr__(self): + return 'Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source_path, self.options.parser, self.options.lexer) + + + def lex(self, text, dont_ignore=False): + #-- + if not hasattr(self, 'lexer') or dont_ignore: + lexer = self._build_lexer(dont_ignore) + else: + lexer = self.lexer + lexer_thread = LexerThread(lexer, text) + stream = lexer_thread.lex(None) + if self.options.postlex: + return self.options.postlex.process(stream) + return stream + + def get_terminal(self, name): + #-- + return self._terminals_dict[name] + + def parse_interactive(self, text=None, start=None): + #-- + return self.parser.parse_interactive(text, start=start) + + def parse(self, text, start=None, on_error=None): + #-- + return self.parser.parse(text, start=start, on_error=on_error) + + @property + def source(self): + warn("Attribute Lark.source was renamed to Lark.source_path", DeprecationWarning) + return self.source_path + + @source.setter + def source(self, value): + self.source_path = value + + @property + def grammar_source(self): + warn("Attribute Lark.grammar_source was renamed to Lark.source_grammar", DeprecationWarning) + return self.source_grammar + + @grammar_source.setter + def grammar_source(self, value): + self.source_grammar = value + + + +class DedentError(LarkError): + pass + +class Indenter(PostLex): + def __init__(self): + self.paren_level = None + self.indent_level = None + assert self.tab_len > 0 + + def handle_NL(self, token): + if self.paren_level > 0: + return + + yield token + + indent_str = token.rsplit('\n', 1)[1] ## + + indent = indent_str.count(' ') + indent_str.count('\t') * self.tab_len + + if indent > self.indent_level[-1]: + self.indent_level.append(indent) + yield Token.new_borrow_pos(self.INDENT_type, indent_str, token) + else: + while indent < self.indent_level[-1]: + self.indent_level.pop() + yield Token.new_borrow_pos(self.DEDENT_type, indent_str, token) + + if indent != self.indent_level[-1]: + raise DedentError('Unexpected dedent to column %s. Expected dedent to %s' % (indent, self.indent_level[-1])) + + def _process(self, stream): + for token in stream: + if token.type == self.NL_type: + for t in self.handle_NL(token): + yield t + else: + yield token + + if token.type in self.OPEN_PAREN_types: + self.paren_level += 1 + elif token.type in self.CLOSE_PAREN_types: + self.paren_level -= 1 + assert self.paren_level >= 0 + + while len(self.indent_level) > 1: + self.indent_level.pop() + yield Token(self.DEDENT_type, '') + + assert self.indent_level == [0], self.indent_level + + def process(self, stream): + self.paren_level = 0 + self.indent_level = [0] + return self._process(stream) + + ## + + @property + def always_accept(self): + return (self.NL_type,) + + +import pickle, zlib, base64 +DATA = ( +{'parser': {'lexer_conf': {'terminals': [{'@': 0}, {'@': 1}, {'@': 2}, {'@': 3}, {'@': 4}, {'@': 5}, {'@': 6}, {'@': 7}, {'@': 8}, {'@': 9}, {'@': 10}, {'@': 11}, {'@': 12}, {'@': 13}, {'@': 14}, {'@': 15}, {'@': 16}, {'@': 17}, {'@': 18}, {'@': 19}, {'@': 20}, {'@': 21}, {'@': 22}, {'@': 23}, {'@': 24}, {'@': 25}, {'@': 26}, {'@': 27}, {'@': 28}, {'@': 29}, {'@': 30}, {'@': 31}, {'@': 32}, {'@': 33}, {'@': 34}, {'@': 35}, {'@': 36}, {'@': 37}, {'@': 38}, {'@': 39}, {'@': 40}, {'@': 41}, {'@': 42}, {'@': 43}, {'@': 44}, {'@': 45}, {'@': 46}, {'@': 47}, {'@': 48}, {'@': 49}, {'@': 50}, {'@': 51}, {'@': 52}, {'@': 53}, {'@': 54}], 'ignore': ['WS'], 'g_regex_flags': 0, 'use_bytes': False, 'lexer_type': 'contextual', '__type__': 'LexerConf'}, 'parser_conf': {'rules': [{'@': 55}, {'@': 56}, {'@': 57}, {'@': 58}, {'@': 59}, {'@': 60}, {'@': 61}, {'@': 62}, {'@': 63}, {'@': 64}, {'@': 65}, {'@': 66}, {'@': 67}, {'@': 68}, {'@': 69}, {'@': 70}, {'@': 71}, {'@': 72}, {'@': 73}, {'@': 74}, {'@': 75}, {'@': 76}, {'@': 77}, {'@': 78}, {'@': 79}, {'@': 80}, {'@': 81}, {'@': 82}, {'@': 83}, {'@': 84}, {'@': 85}, {'@': 86}, {'@': 87}, {'@': 88}, {'@': 89}, {'@': 90}, {'@': 91}, {'@': 92}, {'@': 93}, {'@': 94}, {'@': 95}, {'@': 96}, {'@': 97}, {'@': 98}, {'@': 99}, {'@': 100}, {'@': 101}, {'@': 102}, {'@': 103}, {'@': 104}, {'@': 105}, {'@': 106}, {'@': 107}, {'@': 108}, {'@': 109}, {'@': 110}, {'@': 111}, {'@': 112}, {'@': 113}, {'@': 114}, {'@': 115}, {'@': 116}, {'@': 117}, {'@': 118}, {'@': 119}, {'@': 120}, {'@': 121}, {'@': 122}, {'@': 123}, {'@': 124}, {'@': 125}, {'@': 126}, {'@': 127}, {'@': 128}, {'@': 129}, {'@': 130}, {'@': 131}, {'@': 132}, {'@': 133}, {'@': 134}, {'@': 135}, {'@': 136}, {'@': 137}, {'@': 138}, {'@': 139}, {'@': 140}, {'@': 141}, {'@': 142}, {'@': 143}, {'@': 144}, {'@': 145}, {'@': 146}, {'@': 147}, {'@': 148}, {'@': 149}, {'@': 150}, {'@': 151}, {'@': 152}, {'@': 153}, {'@': 154}, {'@': 155}, {'@': 156}, {'@': 157}, {'@': 158}, {'@': 159}, {'@': 160}, {'@': 161}, {'@': 162}, {'@': 163}, {'@': 164}, {'@': 165}, {'@': 166}, {'@': 167}, {'@': 168}, {'@': 169}, {'@': 170}, {'@': 171}, {'@': 172}, {'@': 173}, {'@': 174}, {'@': 175}, {'@': 176}, {'@': 177}, {'@': 178}, {'@': 179}, {'@': 180}, {'@': 181}, {'@': 182}, {'@': 183}, {'@': 184}, {'@': 185}, {'@': 186}, {'@': 187}, {'@': 188}, {'@': 189}, {'@': 190}, {'@': 191}, {'@': 192}, {'@': 193}, {'@': 194}, {'@': 195}, {'@': 196}, {'@': 197}, {'@': 198}, {'@': 199}, {'@': 200}, {'@': 201}, {'@': 202}, {'@': 203}, {'@': 204}, {'@': 205}, {'@': 206}, {'@': 207}, {'@': 208}, {'@': 209}, {'@': 210}, {'@': 211}, {'@': 212}, {'@': 213}, {'@': 214}, {'@': 215}, {'@': 216}, {'@': 217}, {'@': 218}, {'@': 219}], 'start': ['start'], 'parser_type': 'lalr', '__type__': 'ParserConf'}, 'parser': {'tokens': {0: 'STAR', 1: 'DOT', 2: '__ANON_9', 3: '__ANON_11', 4: '__ANON_14', 5: 'BANG', 6: 'TILDE', 7: '__ANON_0', 8: '__ANON_13', 9: '__ANON_1', 10: '__ANON_6', 11: '__ANON_7', 12: 'COMMA', 13: 'AMPERSAND', 14: '__ANON_5', 15: 'QMARK', 16: '__ANON_3', 17: 'RBRACE', 18: '__ANON_15', 19: '__ANON_12', 20: '__ANON_2', 21: '__ANON_4', 22: '__ANON_8', 23: 'MORETHAN', 24: 'RPAR', 25: '__ANON_10', 26: 'MINUS', 27: 'RSQB', 28: 'AT', 29: 'VBAR', 30: '$END', 31: 'term_nonvar', 32: 'interval', 33: 'LPAR', 34: 'variable', 35: 'set', 36: 'term', 37: 'multi', 38: 'negation', 39: 'word', 40: 'compound_term', 41: 'HASH', 42: 'con_negation', 43: 'string', 44: 'con_int_set', 45: 'LESSTHAN', 46: 'statement', 47: 'PLUS', 48: 'string_raw', 49: '__ANON_16', 50: 'ext_image', 51: 'con_ext_set', 52: '__ANON_25', 53: 'single', 54: 'DOLLAR', 55: 'int_image', 56: 'CIRCUMFLEX', 57: '__ANON_24', 58: 'LSQB', 59: 'ext_set', 60: 'int_set', 61: 'LBRACE', 62: 'op', 63: '__statement_star_0', 64: 'term5', 65: 'multi_disj_expr', 66: 'multi_conj_expr', 67: 'term6', 68: 'term4', 69: 'multi_extint_expr', 70: 'multi_sequential_expr', 71: 'multi_infix_expr', 72: 'term3', 73: 'multi_prod_expr', 74: 'term2', 75: 'term1', 76: 'multi_parallel_expr', 77: 'multi_intint_expr', 78: 'SEMICOLON', 79: 'PERCENT', 80: '__ANON_18', 81: 'tense', 82: '__ANON_20', 83: 'truth', 84: '__ANON_19', 85: '__ANON_17', 86: 'k_evidence', 87: '__ANON_23', 88: 'desire', 89: '__multi_intint_expr_plus_6', 90: 'sentence', 91: 'task', 92: 'budget', 93: 'start', 94: 'priority', 95: '__ANON_21', 96: '__multi_sequential_expr_plus_4', 97: 'NUMBER', 98: 'confidence', 99: '__ANON_22', 100: 'con_single', 101: 'con_int_image', 102: 'con_ext_image', 103: 'con_multi', 104: 'SLASH', 105: 'con_product', 106: 'BACKSLASH', 107: 'copula', 108: '__multi_parallel_expr_plus_5', 109: 'quality', 110: 'durability', 111: '__multi_extint_expr_plus_7', 112: '__multi_disj_expr_plus_2', 113: '__multi_prod_expr_plus_1', 114: '__multi_conj_expr_plus_3', 115: 'frequency', 116: 'COLON'}, 'states': {0: {0: (1, {'@': 116}), 1: (1, {'@': 116}), 2: (1, {'@': 116}), 3: (1, {'@': 116}), 4: (1, {'@': 116}), 5: (1, {'@': 116}), 6: (1, {'@': 116}), 7: (1, {'@': 116}), 8: (1, {'@': 116}), 9: (1, {'@': 116}), 10: (1, {'@': 116}), 11: (1, {'@': 116}), 12: (1, {'@': 116}), 13: (1, {'@': 116}), 14: (1, {'@': 116}), 15: (1, {'@': 116}), 16: (1, {'@': 116}), 17: (1, {'@': 116}), 18: (1, {'@': 116}), 19: (1, {'@': 116}), 20: (1, {'@': 116}), 21: (1, {'@': 116}), 22: (1, {'@': 116}), 23: (1, {'@': 116}), 24: (1, {'@': 116}), 25: (1, {'@': 116}), 26: (1, {'@': 116}), 27: (1, {'@': 116}), 28: (1, {'@': 116}), 29: (1, {'@': 116})}, 1: {12: (1, {'@': 172})}, 2: {30: (1, {'@': 56})}, 3: {31: (0, 4), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 36: (0, 87), 37: (0, 32), 38: (0, 76), 39: (0, 19), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 55: (0, 69), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 15: (0, 9), 62: (0, 11)}, 4: {0: (1, {'@': 101}), 1: (1, {'@': 101}), 2: (1, {'@': 101}), 3: (1, {'@': 101}), 4: (1, {'@': 101}), 5: (1, {'@': 101}), 6: (1, {'@': 101}), 7: (1, {'@': 101}), 8: (1, {'@': 101}), 9: (1, {'@': 101}), 10: (1, {'@': 101}), 11: (1, {'@': 101}), 12: (1, {'@': 101}), 13: (1, {'@': 101}), 14: (1, {'@': 101}), 15: (1, {'@': 101}), 16: (1, {'@': 101}), 17: (1, {'@': 101}), 18: (1, {'@': 101}), 19: (1, {'@': 101}), 20: (1, {'@': 101}), 21: (1, {'@': 101}), 22: (1, {'@': 101}), 23: (1, {'@': 101}), 24: (1, {'@': 101}), 25: (1, {'@': 101}), 26: (1, {'@': 101}), 27: (1, {'@': 101}), 28: (1, {'@': 101}), 29: (1, {'@': 101})}, 5: {13: (0, 160), 0: (1, {'@': 165}), 19: (1, {'@': 165}), 24: (1, {'@': 165}), 8: (1, {'@': 165}), 4: (1, {'@': 165}), 18: (1, {'@': 165}), 29: (1, {'@': 165}), 12: (1, {'@': 165}), 6: (1, {'@': 165}), 26: (1, {'@': 165})}, 6: {30: (1, {'@': 58})}, 7: {0: (1, {'@': 134}), 1: (1, {'@': 134}), 2: (1, {'@': 134}), 3: (1, {'@': 134}), 4: (1, {'@': 134}), 5: (1, {'@': 134}), 6: (1, {'@': 134}), 7: (1, {'@': 134}), 8: (1, {'@': 134}), 9: (1, {'@': 134}), 10: (1, {'@': 134}), 11: (1, {'@': 134}), 12: (1, {'@': 134}), 13: (1, {'@': 134}), 14: (1, {'@': 134}), 15: (1, {'@': 134}), 16: (1, {'@': 134}), 17: (1, {'@': 134}), 18: (1, {'@': 134}), 19: (1, {'@': 134}), 20: (1, {'@': 134}), 21: (1, {'@': 134}), 22: (1, {'@': 134}), 23: (1, {'@': 134}), 24: (1, {'@': 134}), 25: (1, {'@': 134}), 26: (1, {'@': 134}), 27: (1, {'@': 134}), 28: (1, {'@': 134}), 29: (1, {'@': 134})}, 8: {0: (1, {'@': 139}), 1: (1, {'@': 139}), 2: (1, {'@': 139}), 3: (1, {'@': 139}), 4: (1, {'@': 139}), 5: (1, {'@': 139}), 6: (1, {'@': 139}), 7: (1, {'@': 139}), 8: (1, {'@': 139}), 9: (1, {'@': 139}), 10: (1, {'@': 139}), 11: (1, {'@': 139}), 12: (1, {'@': 139}), 13: (1, {'@': 139}), 14: (1, {'@': 139}), 15: (1, {'@': 139}), 16: (1, {'@': 139}), 17: (1, {'@': 139}), 18: (1, {'@': 139}), 19: (1, {'@': 139}), 20: (1, {'@': 139}), 21: (1, {'@': 139}), 22: (1, {'@': 139}), 23: (1, {'@': 139}), 24: (1, {'@': 139}), 25: (1, {'@': 139}), 26: (1, {'@': 139}), 27: (1, {'@': 139}), 28: (1, {'@': 139}), 29: (1, {'@': 139})}, 9: {52: (0, 131), 48: (0, 48), 39: (0, 226), 43: (0, 119), 57: (0, 236)}, 10: {63: (0, 157), 12: (0, 108), 17: (0, 166)}, 11: {0: (1, {'@': 106}), 1: (1, {'@': 106}), 2: (1, {'@': 106}), 3: (1, {'@': 106}), 4: (1, {'@': 106}), 5: (1, {'@': 106}), 6: (1, {'@': 106}), 7: (1, {'@': 106}), 8: (1, {'@': 106}), 9: (1, {'@': 106}), 10: (1, {'@': 106}), 11: (1, {'@': 106}), 12: (1, {'@': 106}), 13: (1, {'@': 106}), 14: (1, {'@': 106}), 15: (1, {'@': 106}), 16: (1, {'@': 106}), 17: (1, {'@': 106}), 18: (1, {'@': 106}), 19: (1, {'@': 106}), 20: (1, {'@': 106}), 21: (1, {'@': 106}), 22: (1, {'@': 106}), 23: (1, {'@': 106}), 24: (1, {'@': 106}), 25: (1, {'@': 106}), 26: (1, {'@': 106}), 27: (1, {'@': 106}), 28: (1, {'@': 106}), 29: (1, {'@': 106})}, 12: {32: (0, 125), 33: (0, 116), 34: (0, 221), 64: (0, 138), 36: (0, 168), 38: (0, 76), 39: (0, 19), 40: (0, 17), 65: (0, 203), 66: (0, 222), 67: (0, 169), 42: (0, 3), 43: (0, 119), 44: (0, 96), 46: (0, 248), 47: (0, 83), 48: (0, 48), 68: (0, 217), 50: (0, 37), 69: (0, 161), 70: (0, 158), 71: (0, 177), 59: (0, 0), 72: (0, 80), 73: (0, 97), 61: (0, 27), 55: (0, 69), 31: (0, 4), 62: (0, 11), 74: (0, 124), 75: (0, 41), 35: (0, 35), 37: (0, 32), 76: (0, 112), 41: (0, 178), 77: (0, 191), 45: (0, 52), 49: (0, 23), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149), 15: (0, 9)}, 13: {0: (1, {'@': 117}), 1: (1, {'@': 117}), 2: (1, {'@': 117}), 3: (1, {'@': 117}), 4: (1, {'@': 117}), 5: (1, {'@': 117}), 6: (1, {'@': 117}), 7: (1, {'@': 117}), 8: (1, {'@': 117}), 9: (1, {'@': 117}), 10: (1, {'@': 117}), 11: (1, {'@': 117}), 12: (1, {'@': 117}), 13: (1, {'@': 117}), 14: (1, {'@': 117}), 15: (1, {'@': 117}), 16: (1, {'@': 117}), 17: (1, {'@': 117}), 18: (1, {'@': 117}), 19: (1, {'@': 117}), 20: (1, {'@': 117}), 21: (1, {'@': 117}), 22: (1, {'@': 117}), 23: (1, {'@': 117}), 24: (1, {'@': 117}), 25: (1, {'@': 117}), 26: (1, {'@': 117}), 27: (1, {'@': 117}), 28: (1, {'@': 117}), 29: (1, {'@': 117})}, 14: {0: (1, {'@': 135}), 1: (1, {'@': 135}), 2: (1, {'@': 135}), 3: (1, {'@': 135}), 4: (1, {'@': 135}), 5: (1, {'@': 135}), 6: (1, {'@': 135}), 7: (1, {'@': 135}), 8: (1, {'@': 135}), 9: (1, {'@': 135}), 10: (1, {'@': 135}), 11: (1, {'@': 135}), 12: (1, {'@': 135}), 13: (1, {'@': 135}), 14: (1, {'@': 135}), 15: (1, {'@': 135}), 16: (1, {'@': 135}), 17: (1, {'@': 135}), 18: (1, {'@': 135}), 19: (1, {'@': 135}), 20: (1, {'@': 135}), 21: (1, {'@': 135}), 22: (1, {'@': 135}), 23: (1, {'@': 135}), 24: (1, {'@': 135}), 25: (1, {'@': 135}), 26: (1, {'@': 135}), 27: (1, {'@': 135}), 28: (1, {'@': 135}), 29: (1, {'@': 135})}, 15: {5: (0, 57), 1: (0, 66), 28: (0, 73), 15: (0, 81)}, 16: {0: (1, {'@': 211}), 12: (1, {'@': 211}), 6: (1, {'@': 211}), 19: (1, {'@': 211}), 24: (1, {'@': 211}), 8: (1, {'@': 211}), 26: (1, {'@': 211})}, 17: {0: (1, {'@': 104}), 1: (1, {'@': 104}), 2: (1, {'@': 104}), 3: (1, {'@': 104}), 4: (1, {'@': 104}), 5: (1, {'@': 104}), 6: (1, {'@': 104}), 7: (1, {'@': 104}), 8: (1, {'@': 104}), 9: (1, {'@': 104}), 10: (1, {'@': 104}), 11: (1, {'@': 104}), 12: (1, {'@': 104}), 13: (1, {'@': 104}), 14: (1, {'@': 104}), 15: (1, {'@': 104}), 16: (1, {'@': 104}), 17: (1, {'@': 104}), 18: (1, {'@': 104}), 19: (1, {'@': 104}), 20: (1, {'@': 104}), 21: (1, {'@': 104}), 22: (1, {'@': 104}), 23: (1, {'@': 104}), 24: (1, {'@': 104}), 25: (1, {'@': 104}), 26: (1, {'@': 104}), 27: (1, {'@': 104}), 28: (1, {'@': 104}), 29: (1, {'@': 104})}, 18: {0: (1, {'@': 107}), 1: (1, {'@': 107}), 2: (1, {'@': 107}), 3: (1, {'@': 107}), 4: (1, {'@': 107}), 5: (1, {'@': 107}), 6: (1, {'@': 107}), 7: (1, {'@': 107}), 8: (1, {'@': 107}), 9: (1, {'@': 107}), 10: (1, {'@': 107}), 11: (1, {'@': 107}), 12: (1, {'@': 107}), 13: (1, {'@': 107}), 14: (1, {'@': 107}), 15: (1, {'@': 107}), 16: (1, {'@': 107}), 17: (1, {'@': 107}), 18: (1, {'@': 107}), 19: (1, {'@': 107}), 20: (1, {'@': 107}), 21: (1, {'@': 107}), 22: (1, {'@': 107}), 23: (1, {'@': 107}), 24: (1, {'@': 107}), 25: (1, {'@': 107}), 26: (1, {'@': 107}), 27: (1, {'@': 107}), 28: (1, {'@': 107}), 29: (1, {'@': 107})}, 19: {33: (0, 218), 0: (1, {'@': 103}), 1: (1, {'@': 103}), 2: (1, {'@': 103}), 3: (1, {'@': 103}), 4: (1, {'@': 103}), 5: (1, {'@': 103}), 6: (1, {'@': 103}), 7: (1, {'@': 103}), 8: (1, {'@': 103}), 9: (1, {'@': 103}), 10: (1, {'@': 103}), 11: (1, {'@': 103}), 12: (1, {'@': 103}), 13: (1, {'@': 103}), 14: (1, {'@': 103}), 15: (1, {'@': 103}), 16: (1, {'@': 103}), 17: (1, {'@': 103}), 18: (1, {'@': 103}), 19: (1, {'@': 103}), 20: (1, {'@': 103}), 21: (1, {'@': 103}), 22: (1, {'@': 103}), 23: (1, {'@': 103}), 24: (1, {'@': 103}), 25: (1, {'@': 103}), 26: (1, {'@': 103}), 27: (1, {'@': 103}), 28: (1, {'@': 103}), 29: (1, {'@': 103})}, 20: {15: (1, {'@': 97}), 56: (1, {'@': 97}), 61: (1, {'@': 97}), 52: (1, {'@': 97}), 54: (1, {'@': 97}), 47: (1, {'@': 97}), 33: (1, {'@': 97}), 57: (1, {'@': 97}), 58: (1, {'@': 97}), 41: (1, {'@': 97}), 49: (1, {'@': 97}), 45: (1, {'@': 97})}, 21: {78: (0, 130), 54: (0, 141)}, 22: {30: (1, {'@': 189})}, 23: {15: (1, {'@': 177}), 56: (1, {'@': 177}), 61: (1, {'@': 177}), 52: (1, {'@': 177}), 54: (1, {'@': 177}), 47: (1, {'@': 177}), 33: (1, {'@': 177}), 57: (1, {'@': 177}), 58: (1, {'@': 177}), 41: (1, {'@': 177}), 49: (1, {'@': 177}), 45: (1, {'@': 177}), 12: (1, {'@': 177})}, 24: {12: (1, {'@': 174}), 15: (1, {'@': 174}), 56: (1, {'@': 174}), 61: (1, {'@': 174}), 52: (1, {'@': 174}), 54: (1, {'@': 174}), 47: (1, {'@': 174}), 33: (1, {'@': 174}), 57: (1, {'@': 174}), 58: (1, {'@': 174}), 41: (1, {'@': 174}), 49: (1, {'@': 174}), 45: (1, {'@': 174})}, 25: {79: (0, 241), 80: (0, 250), 81: (0, 257), 82: (0, 128), 83: (0, 139), 84: (0, 146), 85: (0, 154), 30: (1, {'@': 65})}, 26: {24: (0, 189), 12: (0, 137)}, 27: {15: (1, {'@': 176}), 56: (1, {'@': 176}), 61: (1, {'@': 176}), 52: (1, {'@': 176}), 54: (1, {'@': 176}), 47: (1, {'@': 176}), 33: (1, {'@': 176}), 57: (1, {'@': 176}), 58: (1, {'@': 176}), 41: (1, {'@': 176}), 49: (1, {'@': 176}), 45: (1, {'@': 176})}, 28: {32: (0, 125), 33: (0, 116), 34: (0, 221), 64: (0, 138), 36: (0, 175), 38: (0, 76), 39: (0, 19), 40: (0, 17), 65: (0, 203), 66: (0, 222), 67: (0, 169), 42: (0, 3), 43: (0, 119), 44: (0, 96), 46: (0, 248), 47: (0, 83), 48: (0, 48), 68: (0, 217), 71: (0, 184), 50: (0, 37), 69: (0, 161), 70: (0, 158), 59: (0, 0), 72: (0, 80), 73: (0, 97), 61: (0, 27), 55: (0, 69), 31: (0, 4), 62: (0, 11), 74: (0, 124), 75: (0, 41), 35: (0, 35), 37: (0, 32), 76: (0, 112), 41: (0, 178), 77: (0, 191), 45: (0, 52), 49: (0, 23), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149), 15: (0, 9)}, 29: {15: (1, {'@': 89}), 56: (1, {'@': 89}), 61: (1, {'@': 89}), 52: (1, {'@': 89}), 54: (1, {'@': 89}), 47: (1, {'@': 89}), 33: (1, {'@': 89}), 57: (1, {'@': 89}), 58: (1, {'@': 89}), 41: (1, {'@': 89}), 49: (1, {'@': 89}), 45: (1, {'@': 89})}, 30: {54: (1, {'@': 197}), 78: (1, {'@': 197})}, 31: {86: (0, 47), 87: (0, 56)}, 32: {0: (1, {'@': 110}), 1: (1, {'@': 110}), 2: (1, {'@': 110}), 3: (1, {'@': 110}), 4: (1, {'@': 110}), 5: (1, {'@': 110}), 6: (1, {'@': 110}), 7: (1, {'@': 110}), 8: (1, {'@': 110}), 9: (1, {'@': 110}), 10: (1, {'@': 110}), 11: (1, {'@': 110}), 12: (1, {'@': 110}), 13: (1, {'@': 110}), 14: (1, {'@': 110}), 15: (1, {'@': 110}), 16: (1, {'@': 110}), 17: (1, {'@': 110}), 18: (1, {'@': 110}), 19: (1, {'@': 110}), 20: (1, {'@': 110}), 21: (1, {'@': 110}), 22: (1, {'@': 110}), 23: (1, {'@': 110}), 24: (1, {'@': 110}), 25: (1, {'@': 110}), 26: (1, {'@': 110}), 27: (1, {'@': 110}), 28: (1, {'@': 110}), 29: (1, {'@': 110})}, 33: {79: (0, 241), 83: (0, 162), 88: (0, 171), 82: (0, 128), 84: (0, 146), 81: (0, 179), 85: (0, 154), 80: (0, 250), 30: (1, {'@': 77})}, 34: {0: (1, {'@': 124}), 1: (1, {'@': 124}), 2: (1, {'@': 124}), 3: (1, {'@': 124}), 4: (1, {'@': 124}), 5: (1, {'@': 124}), 6: (1, {'@': 124}), 7: (1, {'@': 124}), 8: (1, {'@': 124}), 9: (1, {'@': 124}), 10: (1, {'@': 124}), 11: (1, {'@': 124}), 12: (1, {'@': 124}), 13: (1, {'@': 124}), 14: (1, {'@': 124}), 15: (1, {'@': 124}), 16: (1, {'@': 124}), 17: (1, {'@': 124}), 18: (1, {'@': 124}), 19: (1, {'@': 124}), 20: (1, {'@': 124}), 21: (1, {'@': 124}), 22: (1, {'@': 124}), 23: (1, {'@': 124}), 24: (1, {'@': 124}), 25: (1, {'@': 124}), 26: (1, {'@': 124}), 27: (1, {'@': 124}), 28: (1, {'@': 124}), 29: (1, {'@': 124})}, 35: {0: (1, {'@': 109}), 1: (1, {'@': 109}), 2: (1, {'@': 109}), 3: (1, {'@': 109}), 4: (1, {'@': 109}), 5: (1, {'@': 109}), 6: (1, {'@': 109}), 7: (1, {'@': 109}), 8: (1, {'@': 109}), 9: (1, {'@': 109}), 10: (1, {'@': 109}), 11: (1, {'@': 109}), 12: (1, {'@': 109}), 13: (1, {'@': 109}), 14: (1, {'@': 109}), 15: (1, {'@': 109}), 16: (1, {'@': 109}), 17: (1, {'@': 109}), 18: (1, {'@': 109}), 19: (1, {'@': 109}), 20: (1, {'@': 109}), 21: (1, {'@': 109}), 22: (1, {'@': 109}), 23: (1, {'@': 109}), 24: (1, {'@': 109}), 25: (1, {'@': 109}), 26: (1, {'@': 109}), 27: (1, {'@': 109}), 28: (1, {'@': 109}), 29: (1, {'@': 109})}, 36: {19: (0, 190), 0: (1, {'@': 150}), 24: (1, {'@': 150}), 12: (1, {'@': 150}), 26: (1, {'@': 150}), 6: (1, {'@': 150})}, 37: {0: (1, {'@': 112}), 1: (1, {'@': 112}), 2: (1, {'@': 112}), 3: (1, {'@': 112}), 4: (1, {'@': 112}), 5: (1, {'@': 112}), 6: (1, {'@': 112}), 7: (1, {'@': 112}), 8: (1, {'@': 112}), 9: (1, {'@': 112}), 10: (1, {'@': 112}), 11: (1, {'@': 112}), 12: (1, {'@': 112}), 13: (1, {'@': 112}), 14: (1, {'@': 112}), 15: (1, {'@': 112}), 16: (1, {'@': 112}), 17: (1, {'@': 112}), 18: (1, {'@': 112}), 19: (1, {'@': 112}), 20: (1, {'@': 112}), 21: (1, {'@': 112}), 22: (1, {'@': 112}), 23: (1, {'@': 112}), 24: (1, {'@': 112}), 25: (1, {'@': 112}), 26: (1, {'@': 112}), 27: (1, {'@': 112}), 28: (1, {'@': 112}), 29: (1, {'@': 112})}, 38: {15: (1, {'@': 93}), 56: (1, {'@': 93}), 61: (1, {'@': 93}), 52: (1, {'@': 93}), 54: (1, {'@': 93}), 47: (1, {'@': 93}), 33: (1, {'@': 93}), 57: (1, {'@': 93}), 58: (1, {'@': 93}), 41: (1, {'@': 93}), 49: (1, {'@': 93}), 45: (1, {'@': 93})}, 39: {23: (0, 148)}, 40: {56: (1, {'@': 191}), 61: (1, {'@': 191}), 52: (1, {'@': 191}), 47: (1, {'@': 191}), 33: (1, {'@': 191}), 57: (1, {'@': 191}), 58: (1, {'@': 191}), 49: (1, {'@': 191}), 45: (1, {'@': 191})}, 41: {89: (0, 185), 29: (0, 192), 0: (1, {'@': 160}), 8: (1, {'@': 160}), 4: (1, {'@': 160}), 18: (1, {'@': 160}), 19: (1, {'@': 160}), 12: (1, {'@': 160}), 6: (1, {'@': 160}), 24: (1, {'@': 160}), 26: (1, {'@': 160})}, 42: {81: (0, 187), 85: (0, 154), 80: (0, 250), 82: (0, 128), 84: (0, 146), 30: (1, {'@': 81})}, 43: {12: (0, 137), 24: (0, 196)}, 44: {32: (0, 125), 33: (0, 116), 34: (0, 221), 36: (0, 197), 38: (0, 76), 39: (0, 19), 40: (0, 17), 42: (0, 3), 43: (0, 119), 44: (0, 96), 46: (0, 248), 47: (0, 83), 64: (0, 206), 48: (0, 48), 68: (0, 217), 50: (0, 37), 59: (0, 0), 72: (0, 80), 66: (0, 213), 61: (0, 27), 55: (0, 69), 74: (0, 124), 31: (0, 4), 62: (0, 11), 69: (0, 223), 75: (0, 41), 35: (0, 35), 37: (0, 32), 41: (0, 178), 77: (0, 231), 45: (0, 52), 49: (0, 23), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 70: (0, 240), 76: (0, 247), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149), 15: (0, 9)}, 45: {15: (1, {'@': 88}), 56: (1, {'@': 88}), 61: (1, {'@': 88}), 52: (1, {'@': 88}), 54: (1, {'@': 88}), 47: (1, {'@': 88}), 33: (1, {'@': 88}), 57: (1, {'@': 88}), 58: (1, {'@': 88}), 41: (1, {'@': 88}), 49: (1, {'@': 88}), 45: (1, {'@': 88})}, 46: {0: (1, {'@': 119}), 1: (1, {'@': 119}), 2: (1, {'@': 119}), 3: (1, {'@': 119}), 4: (1, {'@': 119}), 5: (1, {'@': 119}), 6: (1, {'@': 119}), 7: (1, {'@': 119}), 8: (1, {'@': 119}), 9: (1, {'@': 119}), 10: (1, {'@': 119}), 11: (1, {'@': 119}), 12: (1, {'@': 119}), 13: (1, {'@': 119}), 14: (1, {'@': 119}), 15: (1, {'@': 119}), 16: (1, {'@': 119}), 17: (1, {'@': 119}), 18: (1, {'@': 119}), 19: (1, {'@': 119}), 20: (1, {'@': 119}), 21: (1, {'@': 119}), 22: (1, {'@': 119}), 23: (1, {'@': 119}), 24: (1, {'@': 119}), 25: (1, {'@': 119}), 26: (1, {'@': 119}), 27: (1, {'@': 119}), 28: (1, {'@': 119}), 29: (1, {'@': 119})}, 47: {79: (0, 65)}, 48: {0: (1, {'@': 194}), 1: (1, {'@': 194}), 2: (1, {'@': 194}), 3: (1, {'@': 194}), 4: (1, {'@': 194}), 5: (1, {'@': 194}), 6: (1, {'@': 194}), 7: (1, {'@': 194}), 8: (1, {'@': 194}), 9: (1, {'@': 194}), 10: (1, {'@': 194}), 11: (1, {'@': 194}), 12: (1, {'@': 194}), 13: (1, {'@': 194}), 14: (1, {'@': 194}), 15: (1, {'@': 194}), 16: (1, {'@': 194}), 17: (1, {'@': 194}), 18: (1, {'@': 194}), 19: (1, {'@': 194}), 20: (1, {'@': 194}), 21: (1, {'@': 194}), 22: (1, {'@': 194}), 23: (1, {'@': 194}), 24: (1, {'@': 194}), 33: (1, {'@': 194}), 25: (1, {'@': 194}), 27: (1, {'@': 194}), 28: (1, {'@': 194}), 26: (1, {'@': 194}), 29: (1, {'@': 194})}, 49: {32: (0, 125), 33: (0, 116), 90: (0, 61), 38: (0, 76), 39: (0, 19), 46: (0, 122), 40: (0, 17), 31: (0, 15), 42: (0, 3), 43: (0, 119), 44: (0, 96), 47: (0, 83), 48: (0, 48), 50: (0, 37), 54: (0, 59), 59: (0, 0), 61: (0, 27), 55: (0, 69), 62: (0, 11), 91: (0, 72), 35: (0, 35), 37: (0, 32), 45: (0, 52), 92: (0, 109), 49: (0, 23), 51: (0, 89), 52: (0, 131), 93: (0, 133), 53: (0, 151), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149)}, 50: {81: (0, 194), 85: (0, 154), 80: (0, 250), 82: (0, 128), 84: (0, 146), 30: (1, {'@': 69})}, 51: {0: (1, {'@': 126}), 1: (1, {'@': 126}), 2: (1, {'@': 126}), 3: (1, {'@': 126}), 4: (1, {'@': 126}), 5: (1, {'@': 126}), 6: (1, {'@': 126}), 7: (1, {'@': 126}), 8: (1, {'@': 126}), 9: (1, {'@': 126}), 10: (1, {'@': 126}), 11: (1, {'@': 126}), 12: (1, {'@': 126}), 13: (1, {'@': 126}), 14: (1, {'@': 126}), 15: (1, {'@': 126}), 16: (1, {'@': 126}), 17: (1, {'@': 126}), 18: (1, {'@': 126}), 19: (1, {'@': 126}), 20: (1, {'@': 126}), 21: (1, {'@': 126}), 22: (1, {'@': 126}), 23: (1, {'@': 126}), 24: (1, {'@': 126}), 25: (1, {'@': 126}), 26: (1, {'@': 126}), 27: (1, {'@': 126}), 28: (1, {'@': 126}), 29: (1, {'@': 126})}, 52: {31: (0, 4), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 37: (0, 32), 38: (0, 76), 39: (0, 19), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 36: (0, 120), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 55: (0, 69), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 15: (0, 9), 62: (0, 11)}, 53: {31: (0, 4), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 37: (0, 32), 38: (0, 76), 39: (0, 19), 40: (0, 17), 41: (0, 178), 36: (0, 255), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 55: (0, 69), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 15: (0, 9), 62: (0, 11)}, 54: {15: (1, {'@': 95}), 56: (1, {'@': 95}), 61: (1, {'@': 95}), 52: (1, {'@': 95}), 54: (1, {'@': 95}), 47: (1, {'@': 95}), 33: (1, {'@': 95}), 57: (1, {'@': 95}), 58: (1, {'@': 95}), 41: (1, {'@': 95}), 49: (1, {'@': 95}), 45: (1, {'@': 95})}, 55: {32: (0, 125), 33: (0, 116), 34: (0, 221), 64: (0, 138), 36: (0, 156), 38: (0, 76), 39: (0, 19), 65: (0, 203), 40: (0, 17), 66: (0, 222), 67: (0, 169), 42: (0, 3), 43: (0, 119), 44: (0, 96), 46: (0, 248), 47: (0, 83), 48: (0, 48), 68: (0, 217), 50: (0, 37), 69: (0, 161), 70: (0, 158), 59: (0, 0), 72: (0, 80), 73: (0, 97), 61: (0, 27), 55: (0, 69), 31: (0, 4), 62: (0, 11), 74: (0, 124), 75: (0, 41), 35: (0, 35), 37: (0, 32), 76: (0, 112), 41: (0, 178), 77: (0, 191), 45: (0, 52), 49: (0, 23), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149), 71: (0, 164), 15: (0, 9)}, 56: {79: (1, {'@': 201})}, 57: {79: (0, 241), 81: (0, 200), 83: (0, 162), 82: (0, 128), 88: (0, 209), 84: (0, 146), 85: (0, 154), 80: (0, 250), 30: (1, {'@': 73})}, 58: {24: (0, 202), 12: (0, 137)}, 59: {94: (0, 106), 95: (0, 113)}, 60: {24: (0, 262), 12: (0, 137)}, 61: {30: (1, {'@': 57})}, 62: {12: (1, {'@': 169})}, 63: {24: (0, 186), 12: (0, 137)}, 64: {32: (0, 125), 33: (0, 116), 34: (0, 221), 64: (0, 138), 36: (0, 173), 38: (0, 76), 39: (0, 19), 65: (0, 203), 40: (0, 17), 66: (0, 222), 67: (0, 169), 42: (0, 3), 43: (0, 119), 44: (0, 96), 71: (0, 181), 46: (0, 248), 47: (0, 83), 48: (0, 48), 68: (0, 217), 50: (0, 37), 69: (0, 161), 70: (0, 158), 59: (0, 0), 72: (0, 80), 73: (0, 97), 61: (0, 27), 55: (0, 69), 31: (0, 4), 62: (0, 11), 74: (0, 124), 75: (0, 41), 35: (0, 35), 37: (0, 32), 76: (0, 112), 41: (0, 178), 77: (0, 191), 45: (0, 52), 49: (0, 23), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149), 15: (0, 9)}, 65: {30: (1, {'@': 188})}, 66: {79: (0, 241), 81: (0, 219), 83: (0, 228), 82: (0, 128), 84: (0, 146), 85: (0, 154), 80: (0, 250), 30: (1, {'@': 61})}, 67: {0: (1, {'@': 213}), 12: (1, {'@': 213}), 4: (1, {'@': 213}), 19: (1, {'@': 213}), 6: (1, {'@': 213}), 24: (1, {'@': 213}), 8: (1, {'@': 213}), 26: (1, {'@': 213})}, 68: {15: (1, {'@': 94}), 56: (1, {'@': 94}), 61: (1, {'@': 94}), 52: (1, {'@': 94}), 54: (1, {'@': 94}), 47: (1, {'@': 94}), 33: (1, {'@': 94}), 57: (1, {'@': 94}), 58: (1, {'@': 94}), 41: (1, {'@': 94}), 49: (1, {'@': 94}), 45: (1, {'@': 94})}, 69: {0: (1, {'@': 113}), 1: (1, {'@': 113}), 2: (1, {'@': 113}), 3: (1, {'@': 113}), 4: (1, {'@': 113}), 5: (1, {'@': 113}), 6: (1, {'@': 113}), 7: (1, {'@': 113}), 8: (1, {'@': 113}), 9: (1, {'@': 113}), 10: (1, {'@': 113}), 11: (1, {'@': 113}), 12: (1, {'@': 113}), 13: (1, {'@': 113}), 14: (1, {'@': 113}), 15: (1, {'@': 113}), 16: (1, {'@': 113}), 17: (1, {'@': 113}), 18: (1, {'@': 113}), 19: (1, {'@': 113}), 20: (1, {'@': 113}), 21: (1, {'@': 113}), 22: (1, {'@': 113}), 23: (1, {'@': 113}), 24: (1, {'@': 113}), 25: (1, {'@': 113}), 26: (1, {'@': 113}), 27: (1, {'@': 113}), 28: (1, {'@': 113}), 29: (1, {'@': 113})}, 70: {0: (1, {'@': 85}), 1: (1, {'@': 85}), 2: (1, {'@': 85}), 3: (1, {'@': 85}), 4: (1, {'@': 85}), 5: (1, {'@': 85}), 6: (1, {'@': 85}), 7: (1, {'@': 85}), 8: (1, {'@': 85}), 9: (1, {'@': 85}), 10: (1, {'@': 85}), 11: (1, {'@': 85}), 12: (1, {'@': 85}), 13: (1, {'@': 85}), 14: (1, {'@': 85}), 15: (1, {'@': 85}), 16: (1, {'@': 85}), 17: (1, {'@': 85}), 18: (1, {'@': 85}), 19: (1, {'@': 85}), 20: (1, {'@': 85}), 21: (1, {'@': 85}), 22: (1, {'@': 85}), 23: (1, {'@': 85}), 24: (1, {'@': 85}), 25: (1, {'@': 85}), 26: (1, {'@': 85}), 27: (1, {'@': 85}), 28: (1, {'@': 85}), 29: (1, {'@': 85})}, 71: {0: (1, {'@': 209}), 12: (1, {'@': 209}), 6: (1, {'@': 209}), 19: (1, {'@': 209}), 24: (1, {'@': 209}), 26: (1, {'@': 209})}, 72: {30: (1, {'@': 55})}, 73: {82: (0, 128), 81: (0, 237), 85: (0, 154), 80: (0, 250), 84: (0, 146), 30: (1, {'@': 79})}, 74: {0: (1, {'@': 215}), 12: (1, {'@': 215}), 4: (1, {'@': 215}), 18: (1, {'@': 215}), 19: (1, {'@': 215}), 6: (1, {'@': 215}), 24: (1, {'@': 215}), 8: (1, {'@': 215}), 26: (1, {'@': 215})}, 75: {31: (0, 4), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 37: (0, 32), 38: (0, 76), 39: (0, 19), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 55: (0, 69), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 36: (0, 145), 61: (0, 27), 15: (0, 9), 62: (0, 11)}, 76: {0: (1, {'@': 114}), 1: (1, {'@': 114}), 2: (1, {'@': 114}), 3: (1, {'@': 114}), 4: (1, {'@': 114}), 5: (1, {'@': 114}), 6: (1, {'@': 114}), 7: (1, {'@': 114}), 8: (1, {'@': 114}), 9: (1, {'@': 114}), 10: (1, {'@': 114}), 11: (1, {'@': 114}), 12: (1, {'@': 114}), 13: (1, {'@': 114}), 14: (1, {'@': 114}), 15: (1, {'@': 114}), 16: (1, {'@': 114}), 17: (1, {'@': 114}), 18: (1, {'@': 114}), 19: (1, {'@': 114}), 20: (1, {'@': 114}), 21: (1, {'@': 114}), 22: (1, {'@': 114}), 23: (1, {'@': 114}), 24: (1, {'@': 114}), 25: (1, {'@': 114}), 26: (1, {'@': 114}), 27: (1, {'@': 114}), 28: (1, {'@': 114}), 29: (1, {'@': 114})}, 77: {12: (1, {'@': 167})}, 78: {0: (0, 193), 24: (1, {'@': 147}), 12: (1, {'@': 147}), 26: (1, {'@': 147}), 6: (1, {'@': 147})}, 79: {12: (1, {'@': 205}), 17: (1, {'@': 205}), 27: (1, {'@': 205}), 24: (1, {'@': 205})}, 80: {96: (0, 152), 4: (0, 159), 0: (1, {'@': 154}), 19: (1, {'@': 154}), 8: (1, {'@': 154}), 12: (1, {'@': 154}), 24: (1, {'@': 154}), 6: (1, {'@': 154}), 26: (1, {'@': 154})}, 81: {81: (0, 244), 85: (0, 154), 80: (0, 250), 82: (0, 128), 84: (0, 146), 30: (1, {'@': 67})}, 82: {0: (1, {'@': 217}), 4: (1, {'@': 217}), 6: (1, {'@': 217}), 8: (1, {'@': 217}), 12: (1, {'@': 217}), 18: (1, {'@': 217}), 19: (1, {'@': 217}), 24: (1, {'@': 217}), 26: (1, {'@': 217}), 29: (1, {'@': 217})}, 83: {97: (0, 101)}, 84: {15: (1, {'@': 98}), 56: (1, {'@': 98}), 61: (1, {'@': 98}), 52: (1, {'@': 98}), 54: (1, {'@': 98}), 47: (1, {'@': 98}), 33: (1, {'@': 98}), 57: (1, {'@': 98}), 58: (1, {'@': 98}), 41: (1, {'@': 98}), 49: (1, {'@': 98}), 45: (1, {'@': 98})}, 85: {32: (0, 125), 33: (0, 116), 34: (0, 221), 64: (0, 138), 36: (0, 197), 67: (0, 199), 38: (0, 76), 39: (0, 19), 40: (0, 17), 42: (0, 3), 43: (0, 119), 44: (0, 96), 46: (0, 248), 47: (0, 83), 48: (0, 48), 68: (0, 217), 50: (0, 37), 59: (0, 0), 72: (0, 80), 66: (0, 213), 61: (0, 27), 55: (0, 69), 65: (0, 208), 31: (0, 4), 62: (0, 11), 74: (0, 124), 69: (0, 223), 75: (0, 41), 35: (0, 35), 37: (0, 32), 41: (0, 178), 77: (0, 231), 45: (0, 52), 49: (0, 23), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 70: (0, 240), 76: (0, 247), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149), 15: (0, 9)}, 86: {0: (1, {'@': 83}), 1: (1, {'@': 83}), 2: (1, {'@': 83}), 3: (1, {'@': 83}), 4: (1, {'@': 83}), 5: (1, {'@': 83}), 6: (1, {'@': 83}), 7: (1, {'@': 83}), 8: (1, {'@': 83}), 9: (1, {'@': 83}), 10: (1, {'@': 83}), 11: (1, {'@': 83}), 12: (1, {'@': 83}), 13: (1, {'@': 83}), 14: (1, {'@': 83}), 15: (1, {'@': 83}), 16: (1, {'@': 83}), 17: (1, {'@': 83}), 18: (1, {'@': 83}), 19: (1, {'@': 83}), 20: (1, {'@': 83}), 21: (1, {'@': 83}), 22: (1, {'@': 83}), 23: (1, {'@': 83}), 24: (1, {'@': 83}), 25: (1, {'@': 83}), 26: (1, {'@': 83}), 27: (1, {'@': 83}), 28: (1, {'@': 83}), 29: (1, {'@': 83})}, 87: {0: (1, {'@': 121}), 1: (1, {'@': 121}), 2: (1, {'@': 121}), 3: (1, {'@': 121}), 4: (1, {'@': 121}), 5: (1, {'@': 121}), 6: (1, {'@': 121}), 7: (1, {'@': 121}), 8: (1, {'@': 121}), 9: (1, {'@': 121}), 10: (1, {'@': 121}), 11: (1, {'@': 121}), 12: (1, {'@': 121}), 13: (1, {'@': 121}), 14: (1, {'@': 121}), 15: (1, {'@': 121}), 16: (1, {'@': 121}), 17: (1, {'@': 121}), 18: (1, {'@': 121}), 19: (1, {'@': 121}), 20: (1, {'@': 121}), 21: (1, {'@': 121}), 22: (1, {'@': 121}), 23: (1, {'@': 121}), 24: (1, {'@': 121}), 25: (1, {'@': 121}), 26: (1, {'@': 121}), 27: (1, {'@': 121}), 28: (1, {'@': 121}), 29: (1, {'@': 121})}, 88: {0: (1, {'@': 131}), 1: (1, {'@': 131}), 2: (1, {'@': 131}), 3: (1, {'@': 131}), 4: (1, {'@': 131}), 5: (1, {'@': 131}), 6: (1, {'@': 131}), 7: (1, {'@': 131}), 8: (1, {'@': 131}), 9: (1, {'@': 131}), 10: (1, {'@': 131}), 11: (1, {'@': 131}), 12: (1, {'@': 131}), 13: (1, {'@': 131}), 14: (1, {'@': 131}), 15: (1, {'@': 131}), 16: (1, {'@': 131}), 17: (1, {'@': 131}), 18: (1, {'@': 131}), 19: (1, {'@': 131}), 20: (1, {'@': 131}), 21: (1, {'@': 131}), 22: (1, {'@': 131}), 23: (1, {'@': 131}), 24: (1, {'@': 131}), 25: (1, {'@': 131}), 26: (1, {'@': 131}), 27: (1, {'@': 131}), 28: (1, {'@': 131}), 29: (1, {'@': 131})}, 89: {32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 37: (0, 32), 36: (0, 10), 38: (0, 76), 39: (0, 19), 15: (0, 9), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 55: (0, 69), 31: (0, 4), 62: (0, 11)}, 90: {15: (1, {'@': 90}), 56: (1, {'@': 90}), 61: (1, {'@': 90}), 52: (1, {'@': 90}), 54: (1, {'@': 90}), 47: (1, {'@': 90}), 33: (1, {'@': 90}), 57: (1, {'@': 90}), 58: (1, {'@': 90}), 41: (1, {'@': 90}), 49: (1, {'@': 90}), 45: (1, {'@': 90})}, 91: {12: (1, {'@': 173}), 15: (1, {'@': 173}), 56: (1, {'@': 173}), 61: (1, {'@': 173}), 52: (1, {'@': 173}), 54: (1, {'@': 173}), 47: (1, {'@': 173}), 33: (1, {'@': 173}), 57: (1, {'@': 173}), 58: (1, {'@': 173}), 41: (1, {'@': 173}), 49: (1, {'@': 173}), 45: (1, {'@': 173})}, 92: {31: (0, 4), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 37: (0, 32), 38: (0, 76), 39: (0, 19), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 36: (0, 215), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 55: (0, 69), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 15: (0, 9), 62: (0, 11)}, 93: {0: (1, {'@': 219}), 13: (1, {'@': 219}), 4: (1, {'@': 219}), 18: (1, {'@': 219}), 19: (1, {'@': 219}), 6: (1, {'@': 219}), 24: (1, {'@': 219}), 8: (1, {'@': 219}), 26: (1, {'@': 219}), 29: (1, {'@': 219}), 12: (1, {'@': 219})}, 94: {63: (0, 252), 12: (0, 108), 27: (0, 259)}, 95: {24: (0, 211), 12: (0, 137)}, 96: {32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 36: (0, 94), 37: (0, 32), 38: (0, 76), 39: (0, 19), 15: (0, 9), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 55: (0, 69), 31: (0, 4), 62: (0, 11)}, 97: {24: (1, {'@': 146}), 12: (1, {'@': 146}), 26: (1, {'@': 146}), 6: (1, {'@': 146})}, 98: {15: (1, {'@': 92}), 56: (1, {'@': 92}), 61: (1, {'@': 92}), 52: (1, {'@': 92}), 54: (1, {'@': 92}), 47: (1, {'@': 92}), 33: (1, {'@': 92}), 57: (1, {'@': 92}), 58: (1, {'@': 92}), 41: (1, {'@': 92}), 49: (1, {'@': 92}), 45: (1, {'@': 92})}, 99: {0: (1, {'@': 128}), 1: (1, {'@': 128}), 2: (1, {'@': 128}), 3: (1, {'@': 128}), 4: (1, {'@': 128}), 5: (1, {'@': 128}), 6: (1, {'@': 128}), 7: (1, {'@': 128}), 8: (1, {'@': 128}), 9: (1, {'@': 128}), 10: (1, {'@': 128}), 11: (1, {'@': 128}), 12: (1, {'@': 128}), 13: (1, {'@': 128}), 14: (1, {'@': 128}), 15: (1, {'@': 128}), 16: (1, {'@': 128}), 17: (1, {'@': 128}), 18: (1, {'@': 128}), 19: (1, {'@': 128}), 20: (1, {'@': 128}), 21: (1, {'@': 128}), 22: (1, {'@': 128}), 23: (1, {'@': 128}), 24: (1, {'@': 128}), 25: (1, {'@': 128}), 26: (1, {'@': 128}), 27: (1, {'@': 128}), 28: (1, {'@': 128}), 29: (1, {'@': 128})}, 100: {0: (1, {'@': 136}), 1: (1, {'@': 136}), 2: (1, {'@': 136}), 3: (1, {'@': 136}), 4: (1, {'@': 136}), 5: (1, {'@': 136}), 6: (1, {'@': 136}), 7: (1, {'@': 136}), 8: (1, {'@': 136}), 9: (1, {'@': 136}), 10: (1, {'@': 136}), 11: (1, {'@': 136}), 12: (1, {'@': 136}), 13: (1, {'@': 136}), 14: (1, {'@': 136}), 15: (1, {'@': 136}), 16: (1, {'@': 136}), 17: (1, {'@': 136}), 18: (1, {'@': 136}), 19: (1, {'@': 136}), 20: (1, {'@': 136}), 21: (1, {'@': 136}), 22: (1, {'@': 136}), 23: (1, {'@': 136}), 24: (1, {'@': 136}), 25: (1, {'@': 136}), 26: (1, {'@': 136}), 27: (1, {'@': 136}), 28: (1, {'@': 136}), 29: (1, {'@': 136})}, 101: {0: (1, {'@': 108}), 1: (1, {'@': 108}), 2: (1, {'@': 108}), 3: (1, {'@': 108}), 4: (1, {'@': 108}), 5: (1, {'@': 108}), 6: (1, {'@': 108}), 7: (1, {'@': 108}), 8: (1, {'@': 108}), 9: (1, {'@': 108}), 10: (1, {'@': 108}), 11: (1, {'@': 108}), 12: (1, {'@': 108}), 13: (1, {'@': 108}), 14: (1, {'@': 108}), 15: (1, {'@': 108}), 16: (1, {'@': 108}), 17: (1, {'@': 108}), 18: (1, {'@': 108}), 19: (1, {'@': 108}), 20: (1, {'@': 108}), 21: (1, {'@': 108}), 22: (1, {'@': 108}), 23: (1, {'@': 108}), 24: (1, {'@': 108}), 25: (1, {'@': 108}), 26: (1, {'@': 108}), 27: (1, {'@': 108}), 28: (1, {'@': 108}), 29: (1, {'@': 108})}, 102: {0: (1, {'@': 86}), 1: (1, {'@': 86}), 2: (1, {'@': 86}), 3: (1, {'@': 86}), 4: (1, {'@': 86}), 5: (1, {'@': 86}), 6: (1, {'@': 86}), 7: (1, {'@': 86}), 8: (1, {'@': 86}), 9: (1, {'@': 86}), 10: (1, {'@': 86}), 11: (1, {'@': 86}), 12: (1, {'@': 86}), 13: (1, {'@': 86}), 14: (1, {'@': 86}), 15: (1, {'@': 86}), 16: (1, {'@': 86}), 17: (1, {'@': 86}), 18: (1, {'@': 86}), 19: (1, {'@': 86}), 20: (1, {'@': 86}), 21: (1, {'@': 86}), 22: (1, {'@': 86}), 23: (1, {'@': 86}), 24: (1, {'@': 86}), 25: (1, {'@': 86}), 26: (1, {'@': 86}), 27: (1, {'@': 86}), 28: (1, {'@': 86}), 29: (1, {'@': 86})}, 103: {15: (1, {'@': 96}), 56: (1, {'@': 96}), 61: (1, {'@': 96}), 52: (1, {'@': 96}), 54: (1, {'@': 96}), 47: (1, {'@': 96}), 33: (1, {'@': 96}), 57: (1, {'@': 96}), 58: (1, {'@': 96}), 41: (1, {'@': 96}), 49: (1, {'@': 96}), 45: (1, {'@': 96})}, 104: {32: (0, 125), 33: (0, 116), 34: (0, 221), 64: (0, 138), 36: (0, 227), 38: (0, 76), 39: (0, 19), 65: (0, 203), 40: (0, 17), 66: (0, 222), 67: (0, 169), 42: (0, 3), 43: (0, 119), 44: (0, 96), 46: (0, 248), 47: (0, 83), 48: (0, 48), 68: (0, 217), 50: (0, 37), 69: (0, 161), 70: (0, 158), 59: (0, 0), 72: (0, 80), 71: (0, 235), 73: (0, 97), 61: (0, 27), 55: (0, 69), 31: (0, 4), 62: (0, 11), 74: (0, 124), 75: (0, 41), 35: (0, 35), 37: (0, 32), 76: (0, 112), 41: (0, 178), 77: (0, 191), 45: (0, 52), 49: (0, 23), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149), 15: (0, 9)}, 105: {0: (1, {'@': 137}), 1: (1, {'@': 137}), 2: (1, {'@': 137}), 3: (1, {'@': 137}), 4: (1, {'@': 137}), 5: (1, {'@': 137}), 6: (1, {'@': 137}), 7: (1, {'@': 137}), 8: (1, {'@': 137}), 9: (1, {'@': 137}), 10: (1, {'@': 137}), 11: (1, {'@': 137}), 12: (1, {'@': 137}), 13: (1, {'@': 137}), 14: (1, {'@': 137}), 15: (1, {'@': 137}), 16: (1, {'@': 137}), 17: (1, {'@': 137}), 18: (1, {'@': 137}), 19: (1, {'@': 137}), 20: (1, {'@': 137}), 21: (1, {'@': 137}), 22: (1, {'@': 137}), 23: (1, {'@': 137}), 24: (1, {'@': 137}), 25: (1, {'@': 137}), 26: (1, {'@': 137}), 27: (1, {'@': 137}), 28: (1, {'@': 137}), 29: (1, {'@': 137})}, 106: {78: (0, 132), 54: (0, 142)}, 107: {98: (0, 224), 99: (0, 230)}, 108: {31: (0, 4), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 37: (0, 32), 38: (0, 76), 39: (0, 19), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 36: (0, 153), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 55: (0, 69), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 15: (0, 9), 62: (0, 11)}, 109: {32: (0, 125), 33: (0, 116), 38: (0, 76), 39: (0, 19), 46: (0, 122), 40: (0, 17), 31: (0, 15), 42: (0, 3), 43: (0, 119), 44: (0, 96), 47: (0, 83), 48: (0, 48), 50: (0, 37), 90: (0, 2), 59: (0, 0), 61: (0, 27), 55: (0, 69), 62: (0, 11), 35: (0, 35), 37: (0, 32), 45: (0, 52), 49: (0, 23), 51: (0, 89), 52: (0, 131), 53: (0, 151), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149)}, 110: {32: (0, 125), 33: (0, 116), 34: (0, 221), 36: (0, 197), 38: (0, 76), 39: (0, 19), 40: (0, 17), 42: (0, 3), 43: (0, 119), 44: (0, 96), 46: (0, 248), 47: (0, 83), 48: (0, 48), 50: (0, 37), 59: (0, 0), 72: (0, 80), 61: (0, 27), 55: (0, 69), 74: (0, 124), 31: (0, 4), 62: (0, 11), 69: (0, 223), 75: (0, 41), 35: (0, 35), 37: (0, 32), 68: (0, 242), 41: (0, 178), 77: (0, 231), 45: (0, 52), 49: (0, 23), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 70: (0, 240), 76: (0, 247), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149), 15: (0, 9)}, 111: {0: (1, {'@': 207}), 12: (1, {'@': 207}), 24: (1, {'@': 207}), 26: (1, {'@': 207}), 6: (1, {'@': 207})}, 112: {24: (1, {'@': 142}), 0: (1, {'@': 158}), 4: (1, {'@': 158}), 19: (1, {'@': 158}), 8: (1, {'@': 158}), 12: (1, {'@': 142}), 26: (1, {'@': 142}), 6: (1, {'@': 142})}, 113: {54: (1, {'@': 196}), 78: (1, {'@': 196})}, 114: {30: (1, {'@': 190})}, 115: {15: (1, {'@': 99}), 56: (1, {'@': 99}), 61: (1, {'@': 99}), 52: (1, {'@': 99}), 54: (1, {'@': 99}), 47: (1, {'@': 99}), 33: (1, {'@': 99}), 57: (1, {'@': 99}), 58: (1, {'@': 99}), 41: (1, {'@': 99}), 49: (1, {'@': 99}), 45: (1, {'@': 99})}, 116: {32: (0, 125), 100: (0, 225), 33: (0, 116), 34: (0, 221), 64: (0, 138), 36: (0, 134), 62: (0, 174), 38: (0, 76), 39: (0, 19), 65: (0, 203), 40: (0, 17), 66: (0, 222), 67: (0, 169), 42: (0, 216), 43: (0, 119), 44: (0, 96), 46: (0, 248), 71: (0, 183), 47: (0, 83), 48: (0, 48), 68: (0, 217), 101: (0, 233), 50: (0, 37), 102: (0, 165), 69: (0, 161), 70: (0, 158), 103: (0, 204), 8: (0, 143), 104: (0, 135), 59: (0, 0), 72: (0, 80), 6: (0, 24), 73: (0, 97), 4: (0, 62), 61: (0, 27), 55: (0, 69), 31: (0, 4), 74: (0, 124), 75: (0, 41), 35: (0, 35), 26: (0, 91), 37: (0, 32), 19: (0, 77), 76: (0, 112), 41: (0, 178), 77: (0, 191), 45: (0, 52), 49: (0, 23), 29: (0, 205), 18: (0, 212), 105: (0, 243), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 106: (0, 249), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149), 13: (0, 256), 0: (0, 1), 15: (0, 9)}, 117: {8: (0, 251), 0: (1, {'@': 153}), 24: (1, {'@': 153}), 19: (1, {'@': 153}), 12: (1, {'@': 153}), 26: (1, {'@': 153}), 6: (1, {'@': 153})}, 118: {0: (1, {'@': 122}), 1: (1, {'@': 122}), 2: (1, {'@': 122}), 3: (1, {'@': 122}), 4: (1, {'@': 122}), 5: (1, {'@': 122}), 6: (1, {'@': 122}), 7: (1, {'@': 122}), 8: (1, {'@': 122}), 9: (1, {'@': 122}), 10: (1, {'@': 122}), 11: (1, {'@': 122}), 12: (1, {'@': 122}), 13: (1, {'@': 122}), 14: (1, {'@': 122}), 15: (1, {'@': 122}), 16: (1, {'@': 122}), 17: (1, {'@': 122}), 18: (1, {'@': 122}), 19: (1, {'@': 122}), 20: (1, {'@': 122}), 21: (1, {'@': 122}), 22: (1, {'@': 122}), 23: (1, {'@': 122}), 24: (1, {'@': 122}), 25: (1, {'@': 122}), 26: (1, {'@': 122}), 27: (1, {'@': 122}), 28: (1, {'@': 122}), 29: (1, {'@': 122})}, 119: {0: (1, {'@': 195}), 1: (1, {'@': 195}), 2: (1, {'@': 195}), 3: (1, {'@': 195}), 4: (1, {'@': 195}), 5: (1, {'@': 195}), 6: (1, {'@': 195}), 7: (1, {'@': 195}), 8: (1, {'@': 195}), 9: (1, {'@': 195}), 10: (1, {'@': 195}), 11: (1, {'@': 195}), 12: (1, {'@': 195}), 13: (1, {'@': 195}), 14: (1, {'@': 195}), 15: (1, {'@': 195}), 16: (1, {'@': 195}), 17: (1, {'@': 195}), 18: (1, {'@': 195}), 19: (1, {'@': 195}), 20: (1, {'@': 195}), 21: (1, {'@': 195}), 22: (1, {'@': 195}), 23: (1, {'@': 195}), 24: (1, {'@': 195}), 33: (1, {'@': 195}), 25: (1, {'@': 195}), 27: (1, {'@': 195}), 28: (1, {'@': 195}), 26: (1, {'@': 195}), 29: (1, {'@': 195})}, 120: {16: (0, 123), 2: (0, 20), 9: (0, 29), 14: (0, 38), 7: (0, 45), 10: (0, 68), 107: (0, 150), 25: (0, 84), 20: (0, 90), 21: (0, 98), 11: (0, 54), 22: (0, 103), 3: (0, 115)}, 121: {30: (1, {'@': 183}), 79: (1, {'@': 183})}, 122: {1: (0, 25), 5: (0, 33), 28: (0, 42), 15: (0, 50)}, 123: {15: (1, {'@': 91}), 56: (1, {'@': 91}), 61: (1, {'@': 91}), 52: (1, {'@': 91}), 54: (1, {'@': 91}), 47: (1, {'@': 91}), 33: (1, {'@': 91}), 57: (1, {'@': 91}), 58: (1, {'@': 91}), 41: (1, {'@': 91}), 49: (1, {'@': 91}), 45: (1, {'@': 91})}, 124: {108: (0, 167), 18: (0, 176), 0: (1, {'@': 157}), 4: (1, {'@': 157}), 19: (1, {'@': 157}), 8: (1, {'@': 157}), 12: (1, {'@': 157}), 6: (1, {'@': 157}), 24: (1, {'@': 157}), 26: (1, {'@': 157})}, 125: {0: (1, {'@': 102}), 1: (1, {'@': 102}), 2: (1, {'@': 102}), 3: (1, {'@': 102}), 4: (1, {'@': 102}), 5: (1, {'@': 102}), 6: (1, {'@': 102}), 7: (1, {'@': 102}), 8: (1, {'@': 102}), 9: (1, {'@': 102}), 10: (1, {'@': 102}), 11: (1, {'@': 102}), 12: (1, {'@': 102}), 13: (1, {'@': 102}), 14: (1, {'@': 102}), 15: (1, {'@': 102}), 16: (1, {'@': 102}), 17: (1, {'@': 102}), 18: (1, {'@': 102}), 19: (1, {'@': 102}), 20: (1, {'@': 102}), 21: (1, {'@': 102}), 22: (1, {'@': 102}), 23: (1, {'@': 102}), 24: (1, {'@': 102}), 25: (1, {'@': 102}), 26: (1, {'@': 102}), 27: (1, {'@': 102}), 28: (1, {'@': 102}), 29: (1, {'@': 102})}, 126: {32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 37: (0, 32), 38: (0, 76), 39: (0, 19), 15: (0, 9), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 36: (0, 258), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 55: (0, 69), 31: (0, 4), 62: (0, 11)}, 127: {0: (1, {'@': 138}), 1: (1, {'@': 138}), 2: (1, {'@': 138}), 3: (1, {'@': 138}), 4: (1, {'@': 138}), 5: (1, {'@': 138}), 6: (1, {'@': 138}), 7: (1, {'@': 138}), 8: (1, {'@': 138}), 9: (1, {'@': 138}), 10: (1, {'@': 138}), 11: (1, {'@': 138}), 12: (1, {'@': 138}), 13: (1, {'@': 138}), 14: (1, {'@': 138}), 15: (1, {'@': 138}), 16: (1, {'@': 138}), 17: (1, {'@': 138}), 18: (1, {'@': 138}), 19: (1, {'@': 138}), 20: (1, {'@': 138}), 21: (1, {'@': 138}), 22: (1, {'@': 138}), 23: (1, {'@': 138}), 24: (1, {'@': 138}), 25: (1, {'@': 138}), 26: (1, {'@': 138}), 27: (1, {'@': 138}), 28: (1, {'@': 138}), 29: (1, {'@': 138})}, 128: {30: (1, {'@': 186}), 79: (1, {'@': 186})}, 129: {12: (0, 108), 63: (0, 43), 24: (0, 51)}, 130: {95: (0, 239), 109: (0, 246)}, 131: {0: (1, {'@': 203}), 1: (1, {'@': 203}), 2: (1, {'@': 203}), 3: (1, {'@': 203}), 4: (1, {'@': 203}), 5: (1, {'@': 203}), 6: (1, {'@': 203}), 7: (1, {'@': 203}), 8: (1, {'@': 203}), 9: (1, {'@': 203}), 10: (1, {'@': 203}), 11: (1, {'@': 203}), 12: (1, {'@': 203}), 13: (1, {'@': 203}), 14: (1, {'@': 203}), 15: (1, {'@': 203}), 16: (1, {'@': 203}), 17: (1, {'@': 203}), 18: (1, {'@': 203}), 19: (1, {'@': 203}), 20: (1, {'@': 203}), 21: (1, {'@': 203}), 22: (1, {'@': 203}), 23: (1, {'@': 203}), 24: (1, {'@': 203}), 33: (1, {'@': 203}), 25: (1, {'@': 203}), 27: (1, {'@': 203}), 28: (1, {'@': 203}), 26: (1, {'@': 203}), 29: (1, {'@': 203})}, 132: {110: (0, 21), 99: (0, 30)}, 133: {}, 134: {13: (0, 53), 26: (0, 91), 63: (0, 60), 10: (0, 68), 107: (0, 75), 25: (0, 84), 20: (0, 90), 21: (0, 98), 22: (0, 103), 12: (0, 108), 3: (0, 115), 16: (0, 123), 111: (0, 5), 6: (0, 24), 100: (0, 12), 2: (0, 20), 9: (0, 29), 14: (0, 38), 7: (0, 45), 11: (0, 54), 0: (1, {'@': 163}), 19: (1, {'@': 163}), 8: (1, {'@': 163}), 4: (1, {'@': 163}), 18: (1, {'@': 163}), 29: (1, {'@': 163})}, 135: {12: (1, {'@': 179})}, 136: {32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 36: (0, 129), 37: (0, 32), 38: (0, 76), 39: (0, 19), 15: (0, 9), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 55: (0, 69), 31: (0, 4), 62: (0, 11)}, 137: {31: (0, 4), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 37: (0, 32), 38: (0, 76), 39: (0, 19), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 36: (0, 79), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 55: (0, 69), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 15: (0, 9), 62: (0, 11)}, 138: {112: (0, 36), 19: (0, 44), 0: (1, {'@': 148}), 12: (1, {'@': 148}), 24: (1, {'@': 148}), 26: (1, {'@': 148}), 6: (1, {'@': 148})}, 139: {30: (1, {'@': 64})}, 140: {12: (0, 108), 63: (0, 58)}, 141: {56: (1, {'@': 192}), 61: (1, {'@': 192}), 52: (1, {'@': 192}), 47: (1, {'@': 192}), 33: (1, {'@': 192}), 57: (1, {'@': 192}), 58: (1, {'@': 192}), 49: (1, {'@': 192}), 45: (1, {'@': 192})}, 142: {56: (1, {'@': 193}), 61: (1, {'@': 193}), 52: (1, {'@': 193}), 47: (1, {'@': 193}), 33: (1, {'@': 193}), 57: (1, {'@': 193}), 58: (1, {'@': 193}), 49: (1, {'@': 193}), 45: (1, {'@': 193})}, 143: {12: (1, {'@': 166})}, 144: {31: (0, 4), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 37: (0, 32), 36: (0, 140), 38: (0, 76), 39: (0, 19), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 55: (0, 69), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 15: (0, 9), 62: (0, 11)}, 145: {24: (0, 86)}, 146: {30: (1, {'@': 185}), 79: (1, {'@': 185})}, 147: {32: (0, 125), 33: (0, 116), 34: (0, 221), 36: (0, 197), 38: (0, 76), 39: (0, 19), 40: (0, 17), 42: (0, 3), 43: (0, 119), 44: (0, 96), 46: (0, 248), 47: (0, 83), 48: (0, 48), 50: (0, 37), 59: (0, 0), 61: (0, 27), 55: (0, 69), 74: (0, 124), 31: (0, 4), 62: (0, 11), 69: (0, 223), 75: (0, 41), 35: (0, 35), 37: (0, 32), 72: (0, 67), 41: (0, 178), 77: (0, 231), 45: (0, 52), 49: (0, 23), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 76: (0, 247), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149), 15: (0, 9)}, 148: {0: (1, {'@': 82}), 1: (1, {'@': 82}), 2: (1, {'@': 82}), 3: (1, {'@': 82}), 4: (1, {'@': 82}), 5: (1, {'@': 82}), 6: (1, {'@': 82}), 7: (1, {'@': 82}), 8: (1, {'@': 82}), 9: (1, {'@': 82}), 10: (1, {'@': 82}), 11: (1, {'@': 82}), 12: (1, {'@': 82}), 13: (1, {'@': 82}), 14: (1, {'@': 82}), 15: (1, {'@': 82}), 16: (1, {'@': 82}), 17: (1, {'@': 82}), 18: (1, {'@': 82}), 19: (1, {'@': 82}), 20: (1, {'@': 82}), 21: (1, {'@': 82}), 22: (1, {'@': 82}), 23: (1, {'@': 82}), 24: (1, {'@': 82}), 25: (1, {'@': 82}), 26: (1, {'@': 82}), 27: (1, {'@': 82}), 28: (1, {'@': 82}), 29: (1, {'@': 82})}, 149: {0: (1, {'@': 115}), 1: (1, {'@': 115}), 2: (1, {'@': 115}), 3: (1, {'@': 115}), 4: (1, {'@': 115}), 5: (1, {'@': 115}), 6: (1, {'@': 115}), 7: (1, {'@': 115}), 8: (1, {'@': 115}), 9: (1, {'@': 115}), 10: (1, {'@': 115}), 11: (1, {'@': 115}), 12: (1, {'@': 115}), 13: (1, {'@': 115}), 14: (1, {'@': 115}), 15: (1, {'@': 115}), 16: (1, {'@': 115}), 17: (1, {'@': 115}), 18: (1, {'@': 115}), 19: (1, {'@': 115}), 20: (1, {'@': 115}), 21: (1, {'@': 115}), 22: (1, {'@': 115}), 23: (1, {'@': 115}), 24: (1, {'@': 115}), 25: (1, {'@': 115}), 26: (1, {'@': 115}), 27: (1, {'@': 115}), 28: (1, {'@': 115}), 29: (1, {'@': 115})}, 150: {31: (0, 4), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 37: (0, 32), 36: (0, 39), 38: (0, 76), 39: (0, 19), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 55: (0, 69), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 15: (0, 9), 62: (0, 11)}, 151: {0: (1, {'@': 111}), 1: (1, {'@': 111}), 2: (1, {'@': 111}), 3: (1, {'@': 111}), 4: (1, {'@': 111}), 5: (1, {'@': 111}), 6: (1, {'@': 111}), 7: (1, {'@': 111}), 8: (1, {'@': 111}), 9: (1, {'@': 111}), 10: (1, {'@': 111}), 11: (1, {'@': 111}), 12: (1, {'@': 111}), 13: (1, {'@': 111}), 14: (1, {'@': 111}), 15: (1, {'@': 111}), 16: (1, {'@': 111}), 17: (1, {'@': 111}), 18: (1, {'@': 111}), 19: (1, {'@': 111}), 20: (1, {'@': 111}), 21: (1, {'@': 111}), 22: (1, {'@': 111}), 23: (1, {'@': 111}), 24: (1, {'@': 111}), 25: (1, {'@': 111}), 26: (1, {'@': 111}), 27: (1, {'@': 111}), 28: (1, {'@': 111}), 29: (1, {'@': 111})}, 152: {4: (0, 147), 0: (1, {'@': 156}), 24: (1, {'@': 156}), 19: (1, {'@': 156}), 8: (1, {'@': 156}), 12: (1, {'@': 156}), 6: (1, {'@': 156}), 26: (1, {'@': 156})}, 153: {12: (1, {'@': 204}), 17: (1, {'@': 204}), 27: (1, {'@': 204}), 24: (1, {'@': 204})}, 154: {97: (0, 245)}, 155: {0: (1, {'@': 212}), 12: (1, {'@': 212}), 4: (1, {'@': 212}), 19: (1, {'@': 212}), 6: (1, {'@': 212}), 24: (1, {'@': 212}), 8: (1, {'@': 212}), 26: (1, {'@': 212})}, 156: {24: (0, 254), 13: (0, 53), 111: (0, 5), 0: (1, {'@': 163}), 19: (1, {'@': 163}), 8: (1, {'@': 163}), 4: (1, {'@': 163}), 18: (1, {'@': 163}), 29: (1, {'@': 163})}, 157: {17: (0, 46), 12: (0, 137)}, 158: {0: (1, {'@': 155}), 19: (1, {'@': 155}), 8: (1, {'@': 155}), 24: (1, {'@': 143}), 12: (1, {'@': 143}), 26: (1, {'@': 143}), 6: (1, {'@': 143})}, 159: {32: (0, 125), 33: (0, 116), 34: (0, 221), 36: (0, 197), 38: (0, 76), 39: (0, 19), 40: (0, 17), 42: (0, 3), 43: (0, 119), 44: (0, 96), 46: (0, 248), 47: (0, 83), 48: (0, 48), 50: (0, 37), 59: (0, 0), 61: (0, 27), 55: (0, 69), 74: (0, 124), 31: (0, 4), 62: (0, 11), 69: (0, 223), 75: (0, 41), 35: (0, 35), 37: (0, 32), 41: (0, 178), 77: (0, 231), 45: (0, 52), 49: (0, 23), 72: (0, 155), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 76: (0, 247), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149), 15: (0, 9)}, 160: {31: (0, 4), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 37: (0, 32), 38: (0, 76), 39: (0, 19), 40: (0, 17), 41: (0, 178), 36: (0, 93), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 55: (0, 69), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 15: (0, 9), 62: (0, 11)}, 161: {24: (1, {'@': 140}), 0: (1, {'@': 164}), 19: (1, {'@': 164}), 8: (1, {'@': 164}), 4: (1, {'@': 164}), 18: (1, {'@': 164}), 29: (1, {'@': 164}), 12: (1, {'@': 140}), 26: (1, {'@': 140}), 6: (1, {'@': 140})}, 162: {30: (1, {'@': 187})}, 163: {31: (0, 4), 69: (0, 223), 75: (0, 41), 32: (0, 125), 34: (0, 221), 35: (0, 35), 33: (0, 116), 36: (0, 197), 37: (0, 32), 38: (0, 76), 39: (0, 19), 15: (0, 9), 40: (0, 17), 41: (0, 178), 42: (0, 3), 77: (0, 231), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 74: (0, 74), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 56: (0, 232), 57: (0, 236), 59: (0, 0), 58: (0, 182), 60: (0, 149), 61: (0, 27), 55: (0, 69), 62: (0, 11)}, 164: {24: (0, 261)}, 165: {12: (0, 136)}, 166: {0: (1, {'@': 120}), 1: (1, {'@': 120}), 2: (1, {'@': 120}), 3: (1, {'@': 120}), 4: (1, {'@': 120}), 5: (1, {'@': 120}), 6: (1, {'@': 120}), 7: (1, {'@': 120}), 8: (1, {'@': 120}), 9: (1, {'@': 120}), 10: (1, {'@': 120}), 11: (1, {'@': 120}), 12: (1, {'@': 120}), 13: (1, {'@': 120}), 14: (1, {'@': 120}), 15: (1, {'@': 120}), 16: (1, {'@': 120}), 17: (1, {'@': 120}), 18: (1, {'@': 120}), 19: (1, {'@': 120}), 20: (1, {'@': 120}), 21: (1, {'@': 120}), 22: (1, {'@': 120}), 23: (1, {'@': 120}), 24: (1, {'@': 120}), 25: (1, {'@': 120}), 26: (1, {'@': 120}), 27: (1, {'@': 120}), 28: (1, {'@': 120}), 29: (1, {'@': 120})}, 167: {18: (0, 163), 0: (1, {'@': 159}), 24: (1, {'@': 159}), 8: (1, {'@': 159}), 4: (1, {'@': 159}), 19: (1, {'@': 159}), 12: (1, {'@': 159}), 6: (1, {'@': 159}), 26: (1, {'@': 159})}, 168: {13: (0, 53), 111: (0, 5), 24: (0, 100), 0: (1, {'@': 163}), 19: (1, {'@': 163}), 8: (1, {'@': 163}), 4: (1, {'@': 163}), 18: (1, {'@': 163}), 29: (1, {'@': 163})}, 169: {113: (0, 78), 0: (0, 85)}, 170: {52: (0, 131), 48: (0, 48), 43: (0, 119), 39: (0, 214), 57: (0, 236)}, 171: {30: (1, {'@': 76})}, 172: {0: (1, {'@': 214}), 12: (1, {'@': 214}), 4: (1, {'@': 214}), 18: (1, {'@': 214}), 19: (1, {'@': 214}), 6: (1, {'@': 214}), 24: (1, {'@': 214}), 8: (1, {'@': 214}), 26: (1, {'@': 214})}, 173: {13: (0, 53), 111: (0, 5), 24: (0, 7), 0: (1, {'@': 163}), 19: (1, {'@': 163}), 8: (1, {'@': 163}), 4: (1, {'@': 163}), 18: (1, {'@': 163}), 29: (1, {'@': 163})}, 174: {63: (0, 63), 24: (0, 70), 12: (0, 108), 0: (1, {'@': 106}), 2: (1, {'@': 106}), 3: (1, {'@': 106}), 4: (1, {'@': 106}), 6: (1, {'@': 106}), 7: (1, {'@': 106}), 9: (1, {'@': 106}), 8: (1, {'@': 106}), 10: (1, {'@': 106}), 11: (1, {'@': 106}), 13: (1, {'@': 106}), 14: (1, {'@': 106}), 16: (1, {'@': 106}), 18: (1, {'@': 106}), 19: (1, {'@': 106}), 20: (1, {'@': 106}), 21: (1, {'@': 106}), 25: (1, {'@': 106}), 22: (1, {'@': 106}), 26: (1, {'@': 106}), 29: (1, {'@': 106})}, 175: {12: (0, 55), 13: (0, 53), 111: (0, 5), 0: (1, {'@': 163}), 19: (1, {'@': 163}), 8: (1, {'@': 163}), 4: (1, {'@': 163}), 18: (1, {'@': 163}), 29: (1, {'@': 163})}, 176: {31: (0, 4), 69: (0, 223), 75: (0, 41), 32: (0, 125), 34: (0, 221), 35: (0, 35), 33: (0, 116), 36: (0, 197), 37: (0, 32), 38: (0, 76), 39: (0, 19), 15: (0, 9), 40: (0, 17), 41: (0, 178), 74: (0, 172), 42: (0, 3), 77: (0, 231), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 56: (0, 232), 57: (0, 236), 59: (0, 0), 58: (0, 182), 60: (0, 149), 61: (0, 27), 55: (0, 69), 62: (0, 11)}, 177: {24: (0, 105)}, 178: {52: (0, 131), 48: (0, 48), 43: (0, 119), 39: (0, 198), 57: (0, 236)}, 179: {79: (0, 241), 88: (0, 253), 83: (0, 162), 30: (1, {'@': 75})}, 180: {31: (0, 4), 69: (0, 223), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 36: (0, 197), 37: (0, 32), 38: (0, 76), 39: (0, 19), 15: (0, 9), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 75: (0, 82), 53: (0, 151), 54: (0, 170), 52: (0, 131), 56: (0, 232), 57: (0, 236), 59: (0, 0), 58: (0, 182), 60: (0, 149), 61: (0, 27), 55: (0, 69), 62: (0, 11)}, 181: {24: (0, 14)}, 182: {15: (1, {'@': 175}), 56: (1, {'@': 175}), 61: (1, {'@': 175}), 52: (1, {'@': 175}), 54: (1, {'@': 175}), 47: (1, {'@': 175}), 33: (1, {'@': 175}), 57: (1, {'@': 175}), 58: (1, {'@': 175}), 41: (1, {'@': 175}), 49: (1, {'@': 175}), 45: (1, {'@': 175})}, 183: {24: (0, 99), 100: (0, 104), 6: (0, 24), 26: (0, 91)}, 184: {12: (0, 64)}, 185: {29: (0, 180), 0: (1, {'@': 162}), 24: (1, {'@': 162}), 8: (1, {'@': 162}), 4: (1, {'@': 162}), 18: (1, {'@': 162}), 19: (1, {'@': 162}), 12: (1, {'@': 162}), 6: (1, {'@': 162}), 26: (1, {'@': 162})}, 186: {0: (1, {'@': 84}), 1: (1, {'@': 84}), 2: (1, {'@': 84}), 3: (1, {'@': 84}), 4: (1, {'@': 84}), 5: (1, {'@': 84}), 6: (1, {'@': 84}), 7: (1, {'@': 84}), 8: (1, {'@': 84}), 9: (1, {'@': 84}), 10: (1, {'@': 84}), 11: (1, {'@': 84}), 12: (1, {'@': 84}), 13: (1, {'@': 84}), 14: (1, {'@': 84}), 15: (1, {'@': 84}), 16: (1, {'@': 84}), 17: (1, {'@': 84}), 18: (1, {'@': 84}), 19: (1, {'@': 84}), 20: (1, {'@': 84}), 21: (1, {'@': 84}), 22: (1, {'@': 84}), 23: (1, {'@': 84}), 24: (1, {'@': 84}), 25: (1, {'@': 84}), 26: (1, {'@': 84}), 27: (1, {'@': 84}), 28: (1, {'@': 84}), 29: (1, {'@': 84})}, 187: {30: (1, {'@': 80})}, 188: {0: (1, {'@': 216}), 4: (1, {'@': 216}), 6: (1, {'@': 216}), 8: (1, {'@': 216}), 12: (1, {'@': 216}), 18: (1, {'@': 216}), 19: (1, {'@': 216}), 24: (1, {'@': 216}), 26: (1, {'@': 216}), 29: (1, {'@': 216})}, 189: {0: (1, {'@': 123}), 1: (1, {'@': 123}), 2: (1, {'@': 123}), 3: (1, {'@': 123}), 4: (1, {'@': 123}), 5: (1, {'@': 123}), 6: (1, {'@': 123}), 7: (1, {'@': 123}), 8: (1, {'@': 123}), 9: (1, {'@': 123}), 10: (1, {'@': 123}), 11: (1, {'@': 123}), 12: (1, {'@': 123}), 13: (1, {'@': 123}), 14: (1, {'@': 123}), 15: (1, {'@': 123}), 16: (1, {'@': 123}), 17: (1, {'@': 123}), 18: (1, {'@': 123}), 19: (1, {'@': 123}), 20: (1, {'@': 123}), 21: (1, {'@': 123}), 22: (1, {'@': 123}), 23: (1, {'@': 123}), 24: (1, {'@': 123}), 25: (1, {'@': 123}), 26: (1, {'@': 123}), 27: (1, {'@': 123}), 28: (1, {'@': 123}), 29: (1, {'@': 123})}, 190: {32: (0, 125), 33: (0, 116), 34: (0, 221), 36: (0, 197), 38: (0, 76), 39: (0, 19), 40: (0, 17), 42: (0, 3), 43: (0, 119), 44: (0, 96), 46: (0, 248), 47: (0, 83), 48: (0, 48), 64: (0, 71), 68: (0, 217), 50: (0, 37), 59: (0, 0), 72: (0, 80), 66: (0, 213), 61: (0, 27), 55: (0, 69), 74: (0, 124), 31: (0, 4), 62: (0, 11), 69: (0, 223), 75: (0, 41), 35: (0, 35), 37: (0, 32), 41: (0, 178), 77: (0, 231), 45: (0, 52), 49: (0, 23), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 70: (0, 240), 76: (0, 247), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149), 15: (0, 9)}, 191: {24: (1, {'@': 141}), 0: (1, {'@': 161}), 8: (1, {'@': 161}), 4: (1, {'@': 161}), 18: (1, {'@': 161}), 19: (1, {'@': 161}), 12: (1, {'@': 141}), 26: (1, {'@': 141}), 6: (1, {'@': 141})}, 192: {31: (0, 4), 69: (0, 223), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 36: (0, 197), 37: (0, 32), 38: (0, 76), 39: (0, 19), 15: (0, 9), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 75: (0, 188), 56: (0, 232), 57: (0, 236), 59: (0, 0), 58: (0, 182), 60: (0, 149), 61: (0, 27), 55: (0, 69), 62: (0, 11)}, 193: {32: (0, 125), 33: (0, 116), 34: (0, 221), 64: (0, 138), 36: (0, 197), 38: (0, 76), 39: (0, 19), 40: (0, 17), 42: (0, 3), 43: (0, 119), 44: (0, 96), 46: (0, 248), 47: (0, 83), 48: (0, 48), 68: (0, 217), 50: (0, 37), 59: (0, 0), 72: (0, 80), 66: (0, 213), 61: (0, 27), 55: (0, 69), 65: (0, 208), 31: (0, 4), 67: (0, 111), 62: (0, 11), 74: (0, 124), 69: (0, 223), 75: (0, 41), 35: (0, 35), 37: (0, 32), 41: (0, 178), 77: (0, 231), 45: (0, 52), 49: (0, 23), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 70: (0, 240), 76: (0, 247), 56: (0, 232), 57: (0, 236), 58: (0, 182), 60: (0, 149), 15: (0, 9)}, 194: {30: (1, {'@': 68})}, 195: {24: (0, 88), 12: (0, 108), 63: (0, 95)}, 196: {0: (1, {'@': 125}), 1: (1, {'@': 125}), 2: (1, {'@': 125}), 3: (1, {'@': 125}), 4: (1, {'@': 125}), 5: (1, {'@': 125}), 6: (1, {'@': 125}), 7: (1, {'@': 125}), 8: (1, {'@': 125}), 9: (1, {'@': 125}), 10: (1, {'@': 125}), 11: (1, {'@': 125}), 12: (1, {'@': 125}), 13: (1, {'@': 125}), 14: (1, {'@': 125}), 15: (1, {'@': 125}), 16: (1, {'@': 125}), 17: (1, {'@': 125}), 18: (1, {'@': 125}), 19: (1, {'@': 125}), 20: (1, {'@': 125}), 21: (1, {'@': 125}), 22: (1, {'@': 125}), 23: (1, {'@': 125}), 24: (1, {'@': 125}), 25: (1, {'@': 125}), 26: (1, {'@': 125}), 27: (1, {'@': 125}), 28: (1, {'@': 125}), 29: (1, {'@': 125})}, 197: {13: (0, 53), 111: (0, 5), 0: (1, {'@': 163}), 12: (1, {'@': 163}), 4: (1, {'@': 163}), 18: (1, {'@': 163}), 6: (1, {'@': 163}), 19: (1, {'@': 163}), 24: (1, {'@': 163}), 8: (1, {'@': 163}), 26: (1, {'@': 163}), 29: (1, {'@': 163})}, 198: {0: (1, {'@': 181}), 1: (1, {'@': 181}), 2: (1, {'@': 181}), 3: (1, {'@': 181}), 4: (1, {'@': 181}), 5: (1, {'@': 181}), 6: (1, {'@': 181}), 7: (1, {'@': 181}), 8: (1, {'@': 181}), 9: (1, {'@': 181}), 10: (1, {'@': 181}), 11: (1, {'@': 181}), 12: (1, {'@': 181}), 13: (1, {'@': 181}), 14: (1, {'@': 181}), 15: (1, {'@': 181}), 16: (1, {'@': 181}), 17: (1, {'@': 181}), 18: (1, {'@': 181}), 19: (1, {'@': 181}), 20: (1, {'@': 181}), 21: (1, {'@': 181}), 22: (1, {'@': 181}), 23: (1, {'@': 181}), 24: (1, {'@': 181}), 25: (1, {'@': 181}), 26: (1, {'@': 181}), 27: (1, {'@': 181}), 28: (1, {'@': 181}), 29: (1, {'@': 181})}, 199: {0: (1, {'@': 206}), 12: (1, {'@': 206}), 24: (1, {'@': 206}), 26: (1, {'@': 206}), 6: (1, {'@': 206})}, 200: {79: (0, 241), 88: (0, 260), 83: (0, 162), 30: (1, {'@': 71})}, 201: {24: (0, 102), 12: (0, 137)}, 202: {0: (1, {'@': 127}), 1: (1, {'@': 127}), 2: (1, {'@': 127}), 3: (1, {'@': 127}), 4: (1, {'@': 127}), 5: (1, {'@': 127}), 6: (1, {'@': 127}), 7: (1, {'@': 127}), 8: (1, {'@': 127}), 9: (1, {'@': 127}), 10: (1, {'@': 127}), 11: (1, {'@': 127}), 12: (1, {'@': 127}), 13: (1, {'@': 127}), 14: (1, {'@': 127}), 15: (1, {'@': 127}), 16: (1, {'@': 127}), 17: (1, {'@': 127}), 18: (1, {'@': 127}), 19: (1, {'@': 127}), 20: (1, {'@': 127}), 21: (1, {'@': 127}), 22: (1, {'@': 127}), 23: (1, {'@': 127}), 24: (1, {'@': 127}), 25: (1, {'@': 127}), 26: (1, {'@': 127}), 27: (1, {'@': 127}), 28: (1, {'@': 127}), 29: (1, {'@': 127})}, 203: {0: (1, {'@': 149}), 24: (1, {'@': 145}), 12: (1, {'@': 145}), 26: (1, {'@': 145}), 6: (1, {'@': 145})}, 204: {12: (0, 144)}, 205: {12: (1, {'@': 170})}, 206: {0: (1, {'@': 208}), 12: (1, {'@': 208}), 6: (1, {'@': 208}), 19: (1, {'@': 208}), 24: (1, {'@': 208}), 26: (1, {'@': 208})}, 207: {36: (0, 195), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 37: (0, 32), 38: (0, 76), 39: (0, 19), 15: (0, 9), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 55: (0, 69), 31: (0, 4), 62: (0, 11)}, 208: {0: (1, {'@': 149}), 12: (1, {'@': 149}), 24: (1, {'@': 149}), 26: (1, {'@': 149}), 6: (1, {'@': 149})}, 209: {30: (1, {'@': 72})}, 210: {0: (1, {'@': 87}), 1: (1, {'@': 87}), 2: (1, {'@': 87}), 3: (1, {'@': 87}), 4: (1, {'@': 87}), 5: (1, {'@': 87}), 6: (1, {'@': 87}), 7: (1, {'@': 87}), 8: (1, {'@': 87}), 9: (1, {'@': 87}), 10: (1, {'@': 87}), 11: (1, {'@': 87}), 12: (1, {'@': 87}), 13: (1, {'@': 87}), 14: (1, {'@': 87}), 15: (1, {'@': 87}), 16: (1, {'@': 87}), 17: (1, {'@': 87}), 18: (1, {'@': 87}), 19: (1, {'@': 87}), 20: (1, {'@': 87}), 21: (1, {'@': 87}), 22: (1, {'@': 87}), 23: (1, {'@': 87}), 24: (1, {'@': 87}), 25: (1, {'@': 87}), 26: (1, {'@': 87}), 27: (1, {'@': 87}), 28: (1, {'@': 87}), 29: (1, {'@': 87})}, 211: {0: (1, {'@': 130}), 1: (1, {'@': 130}), 2: (1, {'@': 130}), 3: (1, {'@': 130}), 4: (1, {'@': 130}), 5: (1, {'@': 130}), 6: (1, {'@': 130}), 7: (1, {'@': 130}), 8: (1, {'@': 130}), 9: (1, {'@': 130}), 10: (1, {'@': 130}), 11: (1, {'@': 130}), 12: (1, {'@': 130}), 13: (1, {'@': 130}), 14: (1, {'@': 130}), 15: (1, {'@': 130}), 16: (1, {'@': 130}), 17: (1, {'@': 130}), 18: (1, {'@': 130}), 19: (1, {'@': 130}), 20: (1, {'@': 130}), 21: (1, {'@': 130}), 22: (1, {'@': 130}), 23: (1, {'@': 130}), 24: (1, {'@': 130}), 25: (1, {'@': 130}), 26: (1, {'@': 130}), 27: (1, {'@': 130}), 28: (1, {'@': 130}), 29: (1, {'@': 130})}, 212: {12: (1, {'@': 168})}, 213: {0: (1, {'@': 152}), 12: (1, {'@': 152}), 19: (1, {'@': 152}), 24: (1, {'@': 152}), 26: (1, {'@': 152}), 6: (1, {'@': 152})}, 214: {0: (1, {'@': 180}), 1: (1, {'@': 180}), 2: (1, {'@': 180}), 3: (1, {'@': 180}), 4: (1, {'@': 180}), 5: (1, {'@': 180}), 6: (1, {'@': 180}), 7: (1, {'@': 180}), 8: (1, {'@': 180}), 9: (1, {'@': 180}), 10: (1, {'@': 180}), 11: (1, {'@': 180}), 12: (1, {'@': 180}), 13: (1, {'@': 180}), 14: (1, {'@': 180}), 15: (1, {'@': 180}), 16: (1, {'@': 180}), 17: (1, {'@': 180}), 18: (1, {'@': 180}), 19: (1, {'@': 180}), 20: (1, {'@': 180}), 21: (1, {'@': 180}), 22: (1, {'@': 180}), 23: (1, {'@': 180}), 24: (1, {'@': 180}), 25: (1, {'@': 180}), 26: (1, {'@': 180}), 27: (1, {'@': 180}), 28: (1, {'@': 180}), 29: (1, {'@': 180})}, 215: {24: (0, 118)}, 216: {31: (0, 4), 32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 36: (0, 87), 37: (0, 32), 38: (0, 76), 39: (0, 19), 12: (0, 92), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 55: (0, 69), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 15: (0, 9), 62: (0, 11)}, 217: {8: (0, 110), 114: (0, 117), 0: (1, {'@': 151}), 19: (1, {'@': 151}), 12: (1, {'@': 151}), 24: (1, {'@': 151}), 26: (1, {'@': 151}), 6: (1, {'@': 151})}, 218: {32: (0, 125), 33: (0, 116), 34: (0, 221), 35: (0, 35), 37: (0, 32), 38: (0, 76), 39: (0, 19), 15: (0, 9), 40: (0, 17), 41: (0, 178), 42: (0, 3), 43: (0, 119), 44: (0, 96), 45: (0, 52), 46: (0, 248), 47: (0, 83), 48: (0, 48), 49: (0, 23), 36: (0, 234), 50: (0, 37), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 56: (0, 232), 57: (0, 236), 58: (0, 182), 59: (0, 0), 60: (0, 149), 61: (0, 27), 55: (0, 69), 31: (0, 4), 62: (0, 11)}, 219: {79: (0, 241), 83: (0, 6), 30: (1, {'@': 59})}, 220: {79: (1, {'@': 199}), 78: (1, {'@': 199})}, 221: {0: (1, {'@': 100}), 1: (1, {'@': 100}), 2: (1, {'@': 100}), 3: (1, {'@': 100}), 4: (1, {'@': 100}), 5: (1, {'@': 100}), 6: (1, {'@': 100}), 7: (1, {'@': 100}), 8: (1, {'@': 100}), 9: (1, {'@': 100}), 10: (1, {'@': 100}), 11: (1, {'@': 100}), 12: (1, {'@': 100}), 13: (1, {'@': 100}), 14: (1, {'@': 100}), 15: (1, {'@': 100}), 16: (1, {'@': 100}), 17: (1, {'@': 100}), 18: (1, {'@': 100}), 19: (1, {'@': 100}), 20: (1, {'@': 100}), 21: (1, {'@': 100}), 22: (1, {'@': 100}), 23: (1, {'@': 100}), 24: (1, {'@': 100}), 25: (1, {'@': 100}), 26: (1, {'@': 100}), 27: (1, {'@': 100}), 28: (1, {'@': 100}), 29: (1, {'@': 100})}, 222: {0: (1, {'@': 152}), 19: (1, {'@': 152}), 24: (1, {'@': 144}), 12: (1, {'@': 144}), 26: (1, {'@': 144}), 6: (1, {'@': 144})}, 223: {0: (1, {'@': 164}), 12: (1, {'@': 164}), 4: (1, {'@': 164}), 18: (1, {'@': 164}), 6: (1, {'@': 164}), 19: (1, {'@': 164}), 24: (1, {'@': 164}), 8: (1, {'@': 164}), 26: (1, {'@': 164}), 29: (1, {'@': 164})}, 224: {79: (0, 22), 78: (0, 31)}, 225: {12: (0, 28)}, 226: {0: (1, {'@': 182}), 1: (1, {'@': 182}), 2: (1, {'@': 182}), 3: (1, {'@': 182}), 4: (1, {'@': 182}), 5: (1, {'@': 182}), 6: (1, {'@': 182}), 7: (1, {'@': 182}), 8: (1, {'@': 182}), 9: (1, {'@': 182}), 10: (1, {'@': 182}), 11: (1, {'@': 182}), 12: (1, {'@': 182}), 13: (1, {'@': 182}), 14: (1, {'@': 182}), 15: (1, {'@': 182}), 16: (1, {'@': 182}), 17: (1, {'@': 182}), 18: (1, {'@': 182}), 19: (1, {'@': 182}), 20: (1, {'@': 182}), 21: (1, {'@': 182}), 22: (1, {'@': 182}), 23: (1, {'@': 182}), 24: (1, {'@': 182}), 25: (1, {'@': 182}), 26: (1, {'@': 182}), 27: (1, {'@': 182}), 28: (1, {'@': 182}), 29: (1, {'@': 182})}, 227: {13: (0, 53), 111: (0, 5), 24: (0, 127), 0: (1, {'@': 163}), 19: (1, {'@': 163}), 8: (1, {'@': 163}), 4: (1, {'@': 163}), 18: (1, {'@': 163}), 29: (1, {'@': 163})}, 228: {30: (1, {'@': 60})}, 229: {78: (0, 107), 79: (0, 114)}, 230: {78: (1, {'@': 200}), 79: (1, {'@': 200})}, 231: {0: (1, {'@': 161}), 12: (1, {'@': 161}), 4: (1, {'@': 161}), 18: (1, {'@': 161}), 6: (1, {'@': 161}), 19: (1, {'@': 161}), 24: (1, {'@': 161}), 8: (1, {'@': 161}), 26: (1, {'@': 161})}, 232: {52: (0, 131), 48: (0, 48), 43: (0, 119), 39: (0, 18), 57: (0, 236)}, 233: {12: (0, 126)}, 234: {63: (0, 201), 12: (0, 108), 24: (0, 210)}, 235: {24: (0, 8)}, 236: {0: (1, {'@': 202}), 1: (1, {'@': 202}), 2: (1, {'@': 202}), 3: (1, {'@': 202}), 4: (1, {'@': 202}), 5: (1, {'@': 202}), 6: (1, {'@': 202}), 7: (1, {'@': 202}), 8: (1, {'@': 202}), 9: (1, {'@': 202}), 10: (1, {'@': 202}), 11: (1, {'@': 202}), 12: (1, {'@': 202}), 13: (1, {'@': 202}), 14: (1, {'@': 202}), 15: (1, {'@': 202}), 16: (1, {'@': 202}), 17: (1, {'@': 202}), 18: (1, {'@': 202}), 19: (1, {'@': 202}), 20: (1, {'@': 202}), 21: (1, {'@': 202}), 22: (1, {'@': 202}), 23: (1, {'@': 202}), 24: (1, {'@': 202}), 33: (1, {'@': 202}), 25: (1, {'@': 202}), 27: (1, {'@': 202}), 28: (1, {'@': 202}), 26: (1, {'@': 202}), 29: (1, {'@': 202})}, 237: {30: (1, {'@': 78})}, 238: {30: (1, {'@': 62})}, 239: {54: (1, {'@': 198})}, 240: {0: (1, {'@': 155}), 12: (1, {'@': 155}), 24: (1, {'@': 155}), 8: (1, {'@': 155}), 6: (1, {'@': 155}), 26: (1, {'@': 155}), 19: (1, {'@': 155})}, 241: {95: (0, 220), 115: (0, 229)}, 242: {0: (1, {'@': 210}), 12: (1, {'@': 210}), 6: (1, {'@': 210}), 19: (1, {'@': 210}), 24: (1, {'@': 210}), 8: (1, {'@': 210}), 26: (1, {'@': 210})}, 243: {12: (0, 207)}, 244: {30: (1, {'@': 66})}, 245: {116: (0, 121)}, 246: {54: (0, 40)}, 247: {0: (1, {'@': 158}), 12: (1, {'@': 158}), 4: (1, {'@': 158}), 6: (1, {'@': 158}), 19: (1, {'@': 158}), 24: (1, {'@': 158}), 8: (1, {'@': 158}), 26: (1, {'@': 158})}, 248: {0: (1, {'@': 105}), 1: (1, {'@': 105}), 2: (1, {'@': 105}), 3: (1, {'@': 105}), 4: (1, {'@': 105}), 5: (1, {'@': 105}), 6: (1, {'@': 105}), 7: (1, {'@': 105}), 8: (1, {'@': 105}), 9: (1, {'@': 105}), 10: (1, {'@': 105}), 11: (1, {'@': 105}), 12: (1, {'@': 105}), 13: (1, {'@': 105}), 14: (1, {'@': 105}), 15: (1, {'@': 105}), 16: (1, {'@': 105}), 17: (1, {'@': 105}), 18: (1, {'@': 105}), 19: (1, {'@': 105}), 20: (1, {'@': 105}), 21: (1, {'@': 105}), 22: (1, {'@': 105}), 23: (1, {'@': 105}), 24: (1, {'@': 105}), 25: (1, {'@': 105}), 26: (1, {'@': 105}), 27: (1, {'@': 105}), 28: (1, {'@': 105}), 29: (1, {'@': 105})}, 249: {12: (1, {'@': 178})}, 250: {30: (1, {'@': 184}), 79: (1, {'@': 184})}, 251: {32: (0, 125), 33: (0, 116), 34: (0, 221), 36: (0, 197), 38: (0, 76), 39: (0, 19), 40: (0, 17), 42: (0, 3), 43: (0, 119), 44: (0, 96), 46: (0, 248), 47: (0, 83), 48: (0, 48), 50: (0, 37), 59: (0, 0), 72: (0, 80), 61: (0, 27), 55: (0, 69), 74: (0, 124), 31: (0, 4), 62: (0, 11), 69: (0, 223), 75: (0, 41), 35: (0, 35), 37: (0, 32), 41: (0, 178), 77: (0, 231), 45: (0, 52), 49: (0, 23), 51: (0, 89), 52: (0, 131), 53: (0, 151), 54: (0, 170), 70: (0, 240), 76: (0, 247), 56: (0, 232), 68: (0, 16), 57: (0, 236), 58: (0, 182), 60: (0, 149), 15: (0, 9)}, 252: {12: (0, 137), 27: (0, 13)}, 253: {30: (1, {'@': 74})}, 254: {0: (1, {'@': 132}), 1: (1, {'@': 132}), 2: (1, {'@': 132}), 3: (1, {'@': 132}), 4: (1, {'@': 132}), 5: (1, {'@': 132}), 6: (1, {'@': 132}), 7: (1, {'@': 132}), 8: (1, {'@': 132}), 9: (1, {'@': 132}), 10: (1, {'@': 132}), 11: (1, {'@': 132}), 12: (1, {'@': 132}), 13: (1, {'@': 132}), 14: (1, {'@': 132}), 15: (1, {'@': 132}), 16: (1, {'@': 132}), 17: (1, {'@': 132}), 18: (1, {'@': 132}), 19: (1, {'@': 132}), 20: (1, {'@': 132}), 21: (1, {'@': 132}), 22: (1, {'@': 132}), 23: (1, {'@': 132}), 24: (1, {'@': 132}), 25: (1, {'@': 132}), 26: (1, {'@': 132}), 27: (1, {'@': 132}), 28: (1, {'@': 132}), 29: (1, {'@': 132})}, 255: {0: (1, {'@': 218}), 13: (1, {'@': 218}), 4: (1, {'@': 218}), 18: (1, {'@': 218}), 19: (1, {'@': 218}), 6: (1, {'@': 218}), 24: (1, {'@': 218}), 8: (1, {'@': 218}), 26: (1, {'@': 218}), 29: (1, {'@': 218}), 12: (1, {'@': 218})}, 256: {12: (1, {'@': 171})}, 257: {79: (0, 241), 83: (0, 238), 30: (1, {'@': 63})}, 258: {63: (0, 26), 12: (0, 108), 24: (0, 34)}, 259: {0: (1, {'@': 118}), 1: (1, {'@': 118}), 2: (1, {'@': 118}), 3: (1, {'@': 118}), 4: (1, {'@': 118}), 5: (1, {'@': 118}), 6: (1, {'@': 118}), 7: (1, {'@': 118}), 8: (1, {'@': 118}), 9: (1, {'@': 118}), 10: (1, {'@': 118}), 11: (1, {'@': 118}), 12: (1, {'@': 118}), 13: (1, {'@': 118}), 14: (1, {'@': 118}), 15: (1, {'@': 118}), 16: (1, {'@': 118}), 17: (1, {'@': 118}), 18: (1, {'@': 118}), 19: (1, {'@': 118}), 20: (1, {'@': 118}), 21: (1, {'@': 118}), 22: (1, {'@': 118}), 23: (1, {'@': 118}), 24: (1, {'@': 118}), 25: (1, {'@': 118}), 26: (1, {'@': 118}), 27: (1, {'@': 118}), 28: (1, {'@': 118}), 29: (1, {'@': 118})}, 260: {30: (1, {'@': 70})}, 261: {0: (1, {'@': 133}), 1: (1, {'@': 133}), 2: (1, {'@': 133}), 3: (1, {'@': 133}), 4: (1, {'@': 133}), 5: (1, {'@': 133}), 6: (1, {'@': 133}), 7: (1, {'@': 133}), 8: (1, {'@': 133}), 9: (1, {'@': 133}), 10: (1, {'@': 133}), 11: (1, {'@': 133}), 12: (1, {'@': 133}), 13: (1, {'@': 133}), 14: (1, {'@': 133}), 15: (1, {'@': 133}), 16: (1, {'@': 133}), 17: (1, {'@': 133}), 18: (1, {'@': 133}), 19: (1, {'@': 133}), 20: (1, {'@': 133}), 21: (1, {'@': 133}), 22: (1, {'@': 133}), 23: (1, {'@': 133}), 24: (1, {'@': 133}), 25: (1, {'@': 133}), 26: (1, {'@': 133}), 27: (1, {'@': 133}), 28: (1, {'@': 133}), 29: (1, {'@': 133})}, 262: {0: (1, {'@': 129}), 1: (1, {'@': 129}), 2: (1, {'@': 129}), 3: (1, {'@': 129}), 4: (1, {'@': 129}), 5: (1, {'@': 129}), 6: (1, {'@': 129}), 7: (1, {'@': 129}), 8: (1, {'@': 129}), 9: (1, {'@': 129}), 10: (1, {'@': 129}), 11: (1, {'@': 129}), 12: (1, {'@': 129}), 13: (1, {'@': 129}), 14: (1, {'@': 129}), 15: (1, {'@': 129}), 16: (1, {'@': 129}), 17: (1, {'@': 129}), 18: (1, {'@': 129}), 19: (1, {'@': 129}), 20: (1, {'@': 129}), 21: (1, {'@': 129}), 22: (1, {'@': 129}), 23: (1, {'@': 129}), 24: (1, {'@': 129}), 25: (1, {'@': 129}), 26: (1, {'@': 129}), 27: (1, {'@': 129}), 28: (1, {'@': 129}), 29: (1, {'@': 129})}}, 'start_states': {'start': 49}, 'end_states': {'start': 133}}, 'options': {'debug': False, 'keep_all_tokens': False, 'tree_class': None, 'cache': False, 'postlex': None, 'parser': 'lalr', 'lexer': 'contextual', 'transformer': None, 'start': ['start'], 'priority': 'normal', 'ambiguity': 'auto', 'regex': False, 'propagate_positions': False, 'lexer_callbacks': {}, 'maybe_placeholders': False, 'edit_terminals': None, 'g_regex_flags': 0, 'use_bytes': False, 'import_paths': [], 'source_path': None}, '__type__': 'ParsingFrontend'}, 'rules': [{'@': 55}, {'@': 56}, {'@': 57}, {'@': 58}, {'@': 59}, {'@': 60}, {'@': 61}, {'@': 62}, {'@': 63}, {'@': 64}, {'@': 65}, {'@': 66}, {'@': 67}, {'@': 68}, {'@': 69}, {'@': 70}, {'@': 71}, {'@': 72}, {'@': 73}, {'@': 74}, {'@': 75}, {'@': 76}, {'@': 77}, {'@': 78}, {'@': 79}, {'@': 80}, {'@': 81}, {'@': 82}, {'@': 83}, {'@': 84}, {'@': 85}, {'@': 86}, {'@': 87}, {'@': 88}, {'@': 89}, {'@': 90}, {'@': 91}, {'@': 92}, {'@': 93}, {'@': 94}, {'@': 95}, {'@': 96}, {'@': 97}, {'@': 98}, {'@': 99}, {'@': 100}, {'@': 101}, {'@': 102}, {'@': 103}, {'@': 104}, {'@': 105}, {'@': 106}, {'@': 107}, {'@': 108}, {'@': 109}, {'@': 110}, {'@': 111}, {'@': 112}, {'@': 113}, {'@': 114}, {'@': 115}, {'@': 116}, {'@': 117}, {'@': 118}, {'@': 119}, {'@': 120}, {'@': 121}, {'@': 122}, {'@': 123}, {'@': 124}, {'@': 125}, {'@': 126}, {'@': 127}, {'@': 128}, {'@': 129}, {'@': 130}, {'@': 131}, {'@': 132}, {'@': 133}, {'@': 134}, {'@': 135}, {'@': 136}, {'@': 137}, {'@': 138}, {'@': 139}, {'@': 140}, {'@': 141}, {'@': 142}, {'@': 143}, {'@': 144}, {'@': 145}, {'@': 146}, {'@': 147}, {'@': 148}, {'@': 149}, {'@': 150}, {'@': 151}, {'@': 152}, {'@': 153}, {'@': 154}, {'@': 155}, {'@': 156}, {'@': 157}, {'@': 158}, {'@': 159}, {'@': 160}, {'@': 161}, {'@': 162}, {'@': 163}, {'@': 164}, {'@': 165}, {'@': 166}, {'@': 167}, {'@': 168}, {'@': 169}, {'@': 170}, {'@': 171}, {'@': 172}, {'@': 173}, {'@': 174}, {'@': 175}, {'@': 176}, {'@': 177}, {'@': 178}, {'@': 179}, {'@': 180}, {'@': 181}, {'@': 182}, {'@': 183}, {'@': 184}, {'@': 185}, {'@': 186}, {'@': 187}, {'@': 188}, {'@': 189}, {'@': 190}, {'@': 191}, {'@': 192}, {'@': 193}, {'@': 194}, {'@': 195}, {'@': 196}, {'@': 197}, {'@': 198}, {'@': 199}, {'@': 200}, {'@': 201}, {'@': 202}, {'@': 203}, {'@': 204}, {'@': 205}, {'@': 206}, {'@': 207}, {'@': 208}, {'@': 209}, {'@': 210}, {'@': 211}, {'@': 212}, {'@': 213}, {'@': 214}, {'@': 215}, {'@': 216}, {'@': 217}, {'@': 218}, {'@': 219}], 'options': {'debug': False, 'keep_all_tokens': False, 'tree_class': None, 'cache': False, 'postlex': None, 'parser': 'lalr', 'lexer': 'contextual', 'transformer': None, 'start': ['start'], 'priority': 'normal', 'ambiguity': 'auto', 'regex': False, 'propagate_positions': False, 'lexer_callbacks': {}, 'maybe_placeholders': False, 'edit_terminals': None, 'g_regex_flags': 0, 'use_bytes': False, 'import_paths': [], 'source_path': None}, '__type__': 'Lark'} +) +MEMO = ( +{0: {'name': 'NUMBER', 'pattern': {'value': '(?:(?:\\+|\\-))?(?:[0-9])+', 'flags': [], '_width': [1, 4294967295], '__type__': 'PatternRE'}, 'priority': 1, '__type__': 'TerminalDef'}, 1: {'name': 'WS', 'pattern': {'value': '(?:[ \t\x0c\r\n])+', 'flags': [], '_width': [1, 4294967295], '__type__': 'PatternRE'}, 'priority': 1, '__type__': 'TerminalDef'}, 2: {'name': 'DOT', 'pattern': {'value': '.', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 3: {'name': 'QMARK', 'pattern': {'value': '?', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 4: {'name': 'BANG', 'pattern': {'value': '!', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 5: {'name': 'AT', 'pattern': {'value': '@', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 6: {'name': 'COMMA', 'pattern': {'value': ',', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 7: {'name': 'LPAR', 'pattern': {'value': '(', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 8: {'name': 'RPAR', 'pattern': {'value': ')', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 9: {'name': 'LESSTHAN', 'pattern': {'value': '<', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 10: {'name': 'MORETHAN', 'pattern': {'value': '>', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 11: {'name': '__ANON_0', 'pattern': {'value': '-->', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 12: {'name': '__ANON_1', 'pattern': {'value': '<->', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 13: {'name': '__ANON_2', 'pattern': {'value': '{--', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 14: {'name': '__ANON_3', 'pattern': {'value': '--]', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 15: {'name': '__ANON_4', 'pattern': {'value': '{-]', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 16: {'name': '__ANON_5', 'pattern': {'value': '==>', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 17: {'name': '__ANON_6', 'pattern': {'value': '=/>', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 18: {'name': '__ANON_7', 'pattern': {'value': '=|>', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 19: {'name': '__ANON_8', 'pattern': {'value': '=\\>', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 20: {'name': '__ANON_9', 'pattern': {'value': '<=>', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 21: {'name': '__ANON_10', 'pattern': {'value': '', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 22: {'name': '__ANON_11', 'pattern': {'value': '<|>', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 23: {'name': 'CIRCUMFLEX', 'pattern': {'value': '^', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 24: {'name': 'PLUS', 'pattern': {'value': '+', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 25: {'name': 'RSQB', 'pattern': {'value': ']', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 26: {'name': 'RBRACE', 'pattern': {'value': '}', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 27: {'name': 'STAR', 'pattern': {'value': '*', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 28: {'name': '__ANON_12', 'pattern': {'value': '||', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 29: {'name': '__ANON_13', 'pattern': {'value': '&&', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 30: {'name': '__ANON_14', 'pattern': {'value': '&/', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 31: {'name': '__ANON_15', 'pattern': {'value': '&|', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 32: {'name': 'VBAR', 'pattern': {'value': '|', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 33: {'name': 'AMPERSAND', 'pattern': {'value': '&', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 34: {'name': 'MINUS', 'pattern': {'value': '-', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 35: {'name': 'TILDE', 'pattern': {'value': '~', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 36: {'name': 'LSQB', 'pattern': {'value': '[', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 37: {'name': 'LBRACE', 'pattern': {'value': '{', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 38: {'name': '__ANON_16', 'pattern': {'value': '--', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 39: {'name': 'BACKSLASH', 'pattern': {'value': '\\', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 40: {'name': 'SLASH', 'pattern': {'value': '/', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 41: {'name': 'DOLLAR', 'pattern': {'value': '$', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 42: {'name': 'HASH', 'pattern': {'value': '#', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 43: {'name': '__ANON_17', 'pattern': {'value': ':!', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 44: {'name': 'COLON', 'pattern': {'value': ':', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 45: {'name': '__ANON_18', 'pattern': {'value': ':/:', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 46: {'name': '__ANON_19', 'pattern': {'value': ':|:', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 47: {'name': '__ANON_20', 'pattern': {'value': ':\\:', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 48: {'name': 'SEMICOLON', 'pattern': {'value': ';', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 49: {'name': 'PERCENT', 'pattern': {'value': '%', 'flags': [], '__type__': 'PatternStr'}, 'priority': 1, '__type__': 'TerminalDef'}, 50: {'name': '__ANON_21', 'pattern': {'value': '([0]?\\.[0-9]+|1\\.[0]*|1|0)', 'flags': [], '_width': [1, 4294967295], '__type__': 'PatternRE'}, 'priority': 1, '__type__': 'TerminalDef'}, 51: {'name': '__ANON_22', 'pattern': {'value': '[0]?\\.[0]*[1-9]{1}[0-9]*', 'flags': [], '_width': [2, 4294967295], '__type__': 'PatternRE'}, 'priority': 1, '__type__': 'TerminalDef'}, 52: {'name': '__ANON_23', 'pattern': {'value': '[1-9]{1}[0-9]*', 'flags': [], '_width': [1, 4294967295], '__type__': 'PatternRE'}, 'priority': 1, '__type__': 'TerminalDef'}, 53: {'name': '__ANON_24', 'pattern': {'value': '"[^"]+"', 'flags': [], '_width': [3, 4294967295], '__type__': 'PatternRE'}, 'priority': 1, '__type__': 'TerminalDef'}, 54: {'name': '__ANON_25', 'pattern': {'value': '[^\\-^\\+^<^>^=^"^&^|^!^.^?^@^~^%^;^\\,^:^\\/^\\\\^*^#^$^\\[^\\]^\\{^\\}^\\(^\\)^\\ ]+', 'flags': [], '_width': [1, 4294967295], '__type__': 'PatternRE'}, 'priority': 1, '__type__': 'TerminalDef'}, 55: {'origin': {'name': 'start', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'task', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 56: {'origin': {'name': 'task', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'budget', '__type__': 'NonTerminal'}, {'name': 'sentence', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 57: {'origin': {'name': 'task', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'sentence', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': [True, False], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 58: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term_nonvar', '__type__': 'NonTerminal'}, {'name': 'DOT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'tense', '__type__': 'NonTerminal'}, {'name': 'truth', '__type__': 'NonTerminal'}], 'order': 0, 'alias': 'judgement', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 59: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term_nonvar', '__type__': 'NonTerminal'}, {'name': 'DOT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'tense', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'judgement', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, False, True], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 60: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term_nonvar', '__type__': 'NonTerminal'}, {'name': 'DOT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'truth', '__type__': 'NonTerminal'}], 'order': 2, 'alias': 'judgement', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, True, False], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 61: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term_nonvar', '__type__': 'NonTerminal'}, {'name': 'DOT', 'filter_out': True, '__type__': 'Terminal'}], 'order': 3, 'alias': 'judgement', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, True, True], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 62: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}, {'name': 'DOT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'tense', '__type__': 'NonTerminal'}, {'name': 'truth', '__type__': 'NonTerminal'}], 'order': 4, 'alias': 'judgement', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 63: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}, {'name': 'DOT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'tense', '__type__': 'NonTerminal'}], 'order': 5, 'alias': 'judgement', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, False, True], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 64: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}, {'name': 'DOT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'truth', '__type__': 'NonTerminal'}], 'order': 6, 'alias': 'judgement', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, True, False], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 65: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}, {'name': 'DOT', 'filter_out': True, '__type__': 'Terminal'}], 'order': 7, 'alias': 'judgement', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, True, True], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 66: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term_nonvar', '__type__': 'NonTerminal'}, {'name': 'QMARK', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'tense', '__type__': 'NonTerminal'}], 'order': 8, 'alias': 'question', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 67: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term_nonvar', '__type__': 'NonTerminal'}, {'name': 'QMARK', 'filter_out': True, '__type__': 'Terminal'}], 'order': 9, 'alias': 'question', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, True], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 68: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}, {'name': 'QMARK', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'tense', '__type__': 'NonTerminal'}], 'order': 10, 'alias': 'question', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 69: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}, {'name': 'QMARK', 'filter_out': True, '__type__': 'Terminal'}], 'order': 11, 'alias': 'question', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, True], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 70: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term_nonvar', '__type__': 'NonTerminal'}, {'name': 'BANG', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'tense', '__type__': 'NonTerminal'}, {'name': 'desire', '__type__': 'NonTerminal'}], 'order': 12, 'alias': 'goal', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 71: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term_nonvar', '__type__': 'NonTerminal'}, {'name': 'BANG', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'tense', '__type__': 'NonTerminal'}], 'order': 13, 'alias': 'goal', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, False, True], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 72: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term_nonvar', '__type__': 'NonTerminal'}, {'name': 'BANG', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'desire', '__type__': 'NonTerminal'}], 'order': 14, 'alias': 'goal', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, True, False], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 73: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term_nonvar', '__type__': 'NonTerminal'}, {'name': 'BANG', 'filter_out': True, '__type__': 'Terminal'}], 'order': 15, 'alias': 'goal', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, True, True], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 74: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}, {'name': 'BANG', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'tense', '__type__': 'NonTerminal'}, {'name': 'desire', '__type__': 'NonTerminal'}], 'order': 16, 'alias': 'goal', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 75: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}, {'name': 'BANG', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'tense', '__type__': 'NonTerminal'}], 'order': 17, 'alias': 'goal', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, False, True], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 76: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}, {'name': 'BANG', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'desire', '__type__': 'NonTerminal'}], 'order': 18, 'alias': 'goal', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, True, False], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 77: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}, {'name': 'BANG', 'filter_out': True, '__type__': 'Terminal'}], 'order': 19, 'alias': 'goal', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, True, True], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 78: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term_nonvar', '__type__': 'NonTerminal'}, {'name': 'AT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'tense', '__type__': 'NonTerminal'}], 'order': 20, 'alias': 'quest', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 79: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term_nonvar', '__type__': 'NonTerminal'}, {'name': 'AT', 'filter_out': True, '__type__': 'Terminal'}], 'order': 21, 'alias': 'quest', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, True], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 80: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}, {'name': 'AT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'tense', '__type__': 'NonTerminal'}], 'order': 22, 'alias': 'quest', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 81: {'origin': {'name': 'sentence', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}, {'name': 'AT', 'filter_out': True, '__type__': 'Terminal'}], 'order': 23, 'alias': 'quest', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': [False, False, True], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 82: {'origin': {'name': 'statement', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LESSTHAN', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'copula', '__type__': 'NonTerminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'MORETHAN', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 83: {'origin': {'name': 'statement', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'copula', '__type__': 'NonTerminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 84: {'origin': {'name': 'statement', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'op', '__type__': 'NonTerminal'}, {'name': '__statement_star_0', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 2, 'alias': 'statement_operation1', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 85: {'origin': {'name': 'statement', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'op', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 3, 'alias': 'statement_operation1', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 86: {'origin': {'name': 'statement', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'word', '__type__': 'NonTerminal'}, {'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': '__statement_star_0', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 4, 'alias': 'statement_operation2', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 87: {'origin': {'name': 'statement', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'word', '__type__': 'NonTerminal'}, {'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 5, 'alias': 'statement_operation2', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 88: {'origin': {'name': 'copula', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_0', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': 'inheritance', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 89: {'origin': {'name': 'copula', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_1', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': 'similarity', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 90: {'origin': {'name': 'copula', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_2', 'filter_out': True, '__type__': 'Terminal'}], 'order': 2, 'alias': 'instance', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 91: {'origin': {'name': 'copula', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_3', 'filter_out': True, '__type__': 'Terminal'}], 'order': 3, 'alias': 'property', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 92: {'origin': {'name': 'copula', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_4', 'filter_out': True, '__type__': 'Terminal'}], 'order': 4, 'alias': 'instance_property', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 93: {'origin': {'name': 'copula', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_5', 'filter_out': True, '__type__': 'Terminal'}], 'order': 5, 'alias': 'implication', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 94: {'origin': {'name': 'copula', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_6', 'filter_out': True, '__type__': 'Terminal'}], 'order': 6, 'alias': 'predictive_implication', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 95: {'origin': {'name': 'copula', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_7', 'filter_out': True, '__type__': 'Terminal'}], 'order': 7, 'alias': 'concurrent_implication', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 96: {'origin': {'name': 'copula', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_8', 'filter_out': True, '__type__': 'Terminal'}], 'order': 8, 'alias': 'retrospective_implication', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 97: {'origin': {'name': 'copula', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_9', 'filter_out': True, '__type__': 'Terminal'}], 'order': 9, 'alias': 'equivalence', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 98: {'origin': {'name': 'copula', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_10', 'filter_out': True, '__type__': 'Terminal'}], 'order': 10, 'alias': 'predictive_equivalence', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 99: {'origin': {'name': 'copula', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_11', 'filter_out': True, '__type__': 'Terminal'}], 'order': 11, 'alias': 'concurrent_equivalence', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 100: {'origin': {'name': 'term', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'variable', '__type__': 'NonTerminal'}], 'order': 0, 'alias': 'variable_term', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 101: {'origin': {'name': 'term', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term_nonvar', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 102: {'origin': {'name': 'term_nonvar', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'interval', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 103: {'origin': {'name': 'term_nonvar', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'word', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'atom_term', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 104: {'origin': {'name': 'term_nonvar', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'compound_term', '__type__': 'NonTerminal'}], 'order': 2, 'alias': 'compound_term', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 105: {'origin': {'name': 'term_nonvar', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}], 'order': 3, 'alias': 'statement_term', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 106: {'origin': {'name': 'term_nonvar', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'op', '__type__': 'NonTerminal'}], 'order': 4, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 107: {'origin': {'name': 'op', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'CIRCUMFLEX', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'word', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 108: {'origin': {'name': 'interval', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'PLUS', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'NUMBER', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 109: {'origin': {'name': 'compound_term', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'set', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 110: {'origin': {'name': 'compound_term', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 111: {'origin': {'name': 'compound_term', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'single', '__type__': 'NonTerminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 112: {'origin': {'name': 'compound_term', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'ext_image', '__type__': 'NonTerminal'}], 'order': 3, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 113: {'origin': {'name': 'compound_term', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'int_image', '__type__': 'NonTerminal'}], 'order': 4, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 114: {'origin': {'name': 'compound_term', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'negation', '__type__': 'NonTerminal'}], 'order': 5, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 115: {'origin': {'name': 'set', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'int_set', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 116: {'origin': {'name': 'set', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'ext_set', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 117: {'origin': {'name': 'int_set', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'con_int_set', '__type__': 'NonTerminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': '__statement_star_0', '__type__': 'NonTerminal'}, {'name': 'RSQB', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': 'set', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 118: {'origin': {'name': 'int_set', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'con_int_set', '__type__': 'NonTerminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'RSQB', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': 'set', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 119: {'origin': {'name': 'ext_set', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'con_ext_set', '__type__': 'NonTerminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': '__statement_star_0', '__type__': 'NonTerminal'}, {'name': 'RBRACE', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': 'set', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 120: {'origin': {'name': 'ext_set', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'con_ext_set', '__type__': 'NonTerminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'RBRACE', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': 'set', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 121: {'origin': {'name': 'negation', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'con_negation', '__type__': 'NonTerminal'}, {'name': 'term', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 122: {'origin': {'name': 'negation', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'con_negation', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 123: {'origin': {'name': 'int_image', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'con_int_image', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': '__statement_star_0', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 124: {'origin': {'name': 'int_image', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'con_int_image', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 125: {'origin': {'name': 'ext_image', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'con_ext_image', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': '__statement_star_0', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 126: {'origin': {'name': 'ext_image', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'con_ext_image', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 127: {'origin': {'name': 'multi', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'con_multi', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': '__statement_star_0', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': 'multi_prefix', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 128: {'origin': {'name': 'multi', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 129: {'origin': {'name': 'multi', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': '__statement_star_0', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 2, 'alias': 'multi_prefix_product', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 130: {'origin': {'name': 'multi', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'con_product', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': '__statement_star_0', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 3, 'alias': 'multi_prefix', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 131: {'origin': {'name': 'multi', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'con_product', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 4, 'alias': 'multi_prefix', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 132: {'origin': {'name': 'single', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'con_single', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': 'single_prefix', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 133: {'origin': {'name': 'single', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'con_single', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': 'single_prefix', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 134: {'origin': {'name': 'single', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'con_single', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 2, 'alias': 'single_prefix', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 135: {'origin': {'name': 'single', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'con_single', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 3, 'alias': 'single_prefix', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 136: {'origin': {'name': 'single', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'con_single', '__type__': 'NonTerminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 4, 'alias': 'single_infix', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 137: {'origin': {'name': 'single', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'con_single', '__type__': 'NonTerminal'}, {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 5, 'alias': 'single_infix', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 138: {'origin': {'name': 'single', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, {'name': 'con_single', '__type__': 'NonTerminal'}, {'name': 'term', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 6, 'alias': 'single_infix', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 139: {'origin': {'name': 'single', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, {'name': 'con_single', '__type__': 'NonTerminal'}, {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 7, 'alias': 'single_infix', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 140: {'origin': {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi_extint_expr', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 141: {'origin': {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi_intint_expr', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 142: {'origin': {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi_parallel_expr', '__type__': 'NonTerminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 143: {'origin': {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi_sequential_expr', '__type__': 'NonTerminal'}], 'order': 3, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 144: {'origin': {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi_conj_expr', '__type__': 'NonTerminal'}], 'order': 4, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 145: {'origin': {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi_disj_expr', '__type__': 'NonTerminal'}], 'order': 5, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 146: {'origin': {'name': 'multi_infix_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi_prod_expr', '__type__': 'NonTerminal'}], 'order': 6, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 147: {'origin': {'name': 'multi_prod_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term6', '__type__': 'NonTerminal'}, {'name': '__multi_prod_expr_plus_1', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 148: {'origin': {'name': 'term6', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term5', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 149: {'origin': {'name': 'term6', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi_disj_expr', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 150: {'origin': {'name': 'multi_disj_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term5', '__type__': 'NonTerminal'}, {'name': '__multi_disj_expr_plus_2', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 151: {'origin': {'name': 'term5', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term4', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 152: {'origin': {'name': 'term5', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi_conj_expr', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 153: {'origin': {'name': 'multi_conj_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term4', '__type__': 'NonTerminal'}, {'name': '__multi_conj_expr_plus_3', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 154: {'origin': {'name': 'term4', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term3', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 155: {'origin': {'name': 'term4', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi_sequential_expr', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 156: {'origin': {'name': 'multi_sequential_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term3', '__type__': 'NonTerminal'}, {'name': '__multi_sequential_expr_plus_4', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 157: {'origin': {'name': 'term3', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term2', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 158: {'origin': {'name': 'term3', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi_parallel_expr', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 159: {'origin': {'name': 'multi_parallel_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term2', '__type__': 'NonTerminal'}, {'name': '__multi_parallel_expr_plus_5', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 160: {'origin': {'name': 'term2', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term1', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 161: {'origin': {'name': 'term2', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi_intint_expr', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 162: {'origin': {'name': 'multi_intint_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term1', '__type__': 'NonTerminal'}, {'name': '__multi_intint_expr_plus_6', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 163: {'origin': {'name': 'term1', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 164: {'origin': {'name': 'term1', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'multi_extint_expr', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 165: {'origin': {'name': 'multi_extint_expr', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term', '__type__': 'NonTerminal'}, {'name': '__multi_extint_expr_plus_7', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 166: {'origin': {'name': 'con_multi', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_13', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': 'con_conjunction', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 167: {'origin': {'name': 'con_multi', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_12', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': 'con_disjunction', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 168: {'origin': {'name': 'con_multi', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_15', 'filter_out': True, '__type__': 'Terminal'}], 'order': 2, 'alias': 'con_parallel_events', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 169: {'origin': {'name': 'con_multi', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_14', 'filter_out': True, '__type__': 'Terminal'}], 'order': 3, 'alias': 'con_sequential_events', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 170: {'origin': {'name': 'con_multi', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'VBAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 4, 'alias': 'con_intensional_intersection', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 171: {'origin': {'name': 'con_multi', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'AMPERSAND', 'filter_out': True, '__type__': 'Terminal'}], 'order': 5, 'alias': 'con_extensional_intersection', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 172: {'origin': {'name': 'con_product', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'STAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 173: {'origin': {'name': 'con_single', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'MINUS', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': 'con_extensional_difference', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 174: {'origin': {'name': 'con_single', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'TILDE', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': 'con_intensional_difference', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 175: {'origin': {'name': 'con_int_set', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LSQB', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 176: {'origin': {'name': 'con_ext_set', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LBRACE', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 177: {'origin': {'name': 'con_negation', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_16', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 178: {'origin': {'name': 'con_int_image', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'BACKSLASH', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 179: {'origin': {'name': 'con_ext_image', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'SLASH', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 180: {'origin': {'name': 'variable', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'DOLLAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'word', '__type__': 'NonTerminal'}], 'order': 0, 'alias': 'independent_var', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 181: {'origin': {'name': 'variable', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'HASH', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'word', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'dependent_var', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 182: {'origin': {'name': 'variable', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'QMARK', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'word', '__type__': 'NonTerminal'}], 'order': 2, 'alias': 'query_var', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': 0, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 183: {'origin': {'name': 'tense', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_17', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'NUMBER', 'filter_out': False, '__type__': 'Terminal'}, {'name': 'COLON', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': 'tense_time', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 184: {'origin': {'name': 'tense', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_18', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': 'tense_future', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 185: {'origin': {'name': 'tense', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_19', 'filter_out': True, '__type__': 'Terminal'}], 'order': 2, 'alias': 'tense_present', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 186: {'origin': {'name': 'tense', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_20', 'filter_out': True, '__type__': 'Terminal'}], 'order': 3, 'alias': 'tense_past', 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 187: {'origin': {'name': 'desire', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'truth', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 188: {'origin': {'name': 'truth', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'PERCENT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'frequency', '__type__': 'NonTerminal'}, {'name': 'SEMICOLON', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'confidence', '__type__': 'NonTerminal'}, {'name': 'SEMICOLON', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'k_evidence', '__type__': 'NonTerminal'}, {'name': 'PERCENT', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 189: {'origin': {'name': 'truth', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'PERCENT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'frequency', '__type__': 'NonTerminal'}, {'name': 'SEMICOLON', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'confidence', '__type__': 'NonTerminal'}, {'name': 'PERCENT', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': [False, False, False, False, True, False], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 190: {'origin': {'name': 'truth', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'PERCENT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'frequency', '__type__': 'NonTerminal'}, {'name': 'PERCENT', 'filter_out': True, '__type__': 'Terminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': [False, False, True, False], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 191: {'origin': {'name': 'budget', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'DOLLAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'priority', '__type__': 'NonTerminal'}, {'name': 'SEMICOLON', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'durability', '__type__': 'NonTerminal'}, {'name': 'SEMICOLON', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'quality', '__type__': 'NonTerminal'}, {'name': 'DOLLAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': 2, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 192: {'origin': {'name': 'budget', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'DOLLAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'priority', '__type__': 'NonTerminal'}, {'name': 'SEMICOLON', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'durability', '__type__': 'NonTerminal'}, {'name': 'DOLLAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': 2, 'template_source': None, 'empty_indices': [False, False, False, False, True, False], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 193: {'origin': {'name': 'budget', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'DOLLAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'priority', '__type__': 'NonTerminal'}, {'name': 'DOLLAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': 2, 'template_source': None, 'empty_indices': [False, False, True, False], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 194: {'origin': {'name': 'word', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'string_raw', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 195: {'origin': {'name': 'word', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'string', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 196: {'origin': {'name': 'priority', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_21', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 197: {'origin': {'name': 'durability', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_22', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 198: {'origin': {'name': 'quality', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_21', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 199: {'origin': {'name': 'frequency', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_21', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 200: {'origin': {'name': 'confidence', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_22', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 201: {'origin': {'name': 'k_evidence', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_23', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 202: {'origin': {'name': 'string', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_24', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 203: {'origin': {'name': 'string_raw', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_25', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 204: {'origin': {'name': '__statement_star_0', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 205: {'origin': {'name': '__statement_star_0', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__statement_star_0', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 206: {'origin': {'name': '__multi_prod_expr_plus_1', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'STAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term6', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 207: {'origin': {'name': '__multi_prod_expr_plus_1', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__multi_prod_expr_plus_1', '__type__': 'NonTerminal'}, {'name': 'STAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term6', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 208: {'origin': {'name': '__multi_disj_expr_plus_2', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_12', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term5', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 209: {'origin': {'name': '__multi_disj_expr_plus_2', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__multi_disj_expr_plus_2', '__type__': 'NonTerminal'}, {'name': '__ANON_12', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term5', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 210: {'origin': {'name': '__multi_conj_expr_plus_3', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_13', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term4', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 211: {'origin': {'name': '__multi_conj_expr_plus_3', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__multi_conj_expr_plus_3', '__type__': 'NonTerminal'}, {'name': '__ANON_13', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term4', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 212: {'origin': {'name': '__multi_sequential_expr_plus_4', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_14', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term3', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 213: {'origin': {'name': '__multi_sequential_expr_plus_4', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__multi_sequential_expr_plus_4', '__type__': 'NonTerminal'}, {'name': '__ANON_14', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term3', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 214: {'origin': {'name': '__multi_parallel_expr_plus_5', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_15', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term2', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 215: {'origin': {'name': '__multi_parallel_expr_plus_5', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__multi_parallel_expr_plus_5', '__type__': 'NonTerminal'}, {'name': '__ANON_15', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term2', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 216: {'origin': {'name': '__multi_intint_expr_plus_6', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'VBAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term1', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 217: {'origin': {'name': '__multi_intint_expr_plus_6', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__multi_intint_expr_plus_6', '__type__': 'NonTerminal'}, {'name': 'VBAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term1', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 218: {'origin': {'name': '__multi_extint_expr_plus_7', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'AMPERSAND', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 219: {'origin': {'name': '__multi_extint_expr_plus_7', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__multi_extint_expr_plus_7', '__type__': 'NonTerminal'}, {'name': 'AMPERSAND', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}} +) +Shift = 0 +Reduce = 1 +def Lark_StandAlone(**kwargs): + return Lark._load_from_dict(DATA, MEMO, **kwargs) diff --git a/Narsese/Parser/parser.py b/Narsese/Parser/parser.py new file mode 100644 index 0000000..d02b72c --- /dev/null +++ b/Narsese/Parser/parser.py @@ -0,0 +1,481 @@ +from Narsese._py.Evidence import Base +from Narsese._py.Operation import Operation +from Narsese._py.Budget import Budget +from Narsese._py.Task import Task +from Narsese._py.Sentence import Goal, Punctuation, Question, Quest, Sentence +from Narsese._py.Variable import VarPrefix, Variable +from Narsese._py.Connector import Connector +from Narsese._py.Compound import Compound +from Narsese._py import SELF +from Narsese import Term, Judgement, Tense, Statement, Copula, Truth, Stamp, Interval +from pathlib import Path +from datetime import datetime +import Config, Global + +root_path = Path(__file__).parent +narsese_path = root_path/Path('./narsese.lark') + + +narsese_py_path = root_path/Path('./narsese_lark.py') +# get the last modified time of the file +mtime_lark = datetime.fromtimestamp(narsese_path.stat().st_mtime) +mtime_py = datetime.fromtimestamp(narsese_py_path.stat().st_mtime) +if mtime_lark > mtime_py: + # re-generate the ``nasese_lark.py'' file + import os + print(f'generating [{narsese_py_path}] ...') + os.system(f'python -m lark.tools.standalone {narsese_path} > {narsese_py_path}') + +import sys +try: + from .narsese_lark import Lark_StandAlone, Transformer, v_args, Token +except: + print('Wrong generation.') + exit() +inline_args = v_args(inline=True) + + +class TreeToNarsese(Transformer): + + k: int + p_judgement: float + d_judgement: float + p_question: float + d_question: float + p_quest: float + d_quest: float + p_goal: float + d_goal: float + + f: float + c_judgement: float + c_goal: float + k: int + + temporal_window: int + + @inline_args + def task(self, *args): + kwargs = dict(args) + sentence: Sentence = kwargs['sentence'] + budget = kwargs.get('budget', None) + # budget = (p, d, q) + priority, durability, quality = budget or (None, None, None) + if budget is None or durability is None or quality is None: + if sentence.punct == Punctuation.Judgement: # judgement + judgement: Judgement = sentence + p = priority or self.p_judgement + d = durability or self.d_judgement + q = quality or Budget.quality_from_truth(judgement.truth) + elif sentence.punct == Punctuation.Question: # question + p = priority or self.p_question + d = durability or self.d_question + q = quality or 1.0 + elif sentence.punct == Punctuation.Quest: # quest + p = priority or self.p_quest + d = durability or self.d_quest + q = quality or 1.0 + elif sentence.punct == Punctuation.Goal: # goal + goal: Goal = sentence + p = priority or self.p_goal + d = durability or self.d_goal + q = quality or Budget.quality_from_truth(goal.truth) + else: + p, d, q = priority, durability, quality + + budget = Budget(p, d, q) + + kwargs['sentence'] = sentence + kwargs['budget'] = budget + return Task(**kwargs) + + @inline_args + def judgement(self, statement, *args): + kwargs = dict(args) + truth = kwargs.pop('truth', None) + tense = kwargs.pop('tense', None) + if truth is not None: + f, c, k = truth + if c is None: + c = self.c_judgement + else: + f, c, k = self.f, self.c_judgement, self.k + + tense = Global.time + tense if tense is not None else tense + base = Base((Global.get_input_id(),)) + kwargs['truth'] = Truth(f,c,k) + kwargs['stamp'] = Stamp(Global.time, tense, None, base) + return ('sentence', Judgement(statement, **kwargs)) + + @inline_args + def question(self, statement, *args): + kwargs = dict(args) + tense = kwargs.pop('tense', None) + tense = Global.time + tense if tense is not None else tense + base = Base((Global.get_input_id(),)) + kwargs['stamp'] = Stamp(Global.time, tense, None, base) + return ('sentence', Question(statement, **kwargs)) + + @inline_args + def quest(self, statement, *args): + kwargs = dict(args) + tense = kwargs.pop('tense', None) + tense = Global.time + tense if tense is not None else tense + base = Base((Global.get_input_id(),)) + kwargs['stamp'] = Stamp(Global.time, tense, None, base) + return ('sentence', Quest(statement, **kwargs)) + + @inline_args + def goal(self, statement, *args): + kwargs = dict(args) + desire = kwargs.pop('truth', None) + tense = kwargs.pop('tense', None) + if desire is not None: + f, c, k = desire + if c is None: + c = self.c_goal + else: + f, c, k = self.f, self.c_goal, self.k + tense = Global.time + tense if tense is not None else tense + base = Base((Global.get_input_id(),)) + kwargs['desire'] = Truth(f,c,k) + kwargs['stamp'] = Stamp(Global.time, tense, None, base) + return ('sentence', Goal(statement, **kwargs)) + + + @inline_args + def statement(self, term1, copula, term2): + if copula == Copula.Instance: + term1 = Compound(Connector.ExtensionalSet, term1, is_input=True) + copula = Copula.Inheritance + elif copula == Copula.Property: + term2 = Compound(Connector.IntensionalSet, term2, is_input=True) + copula = Copula.Inheritance + if copula == Copula.InstanceProperty: + term1 = Compound(Connector.ExtensionalSet, term1, is_input=True) + term2 = Compound(Connector.IntensionalSet, term2, is_input=True) + copula = Copula.Inheritance + return Statement(term1, copula, term2, is_input=True) + @inline_args + def truth(self, f: Token, c: Token=None, k: Token=None): + # truth : "%" frequency [";" confidence [";" k_evidence]] "%" + f = float(f.value) + c = float(c.value) if c is not None else None + k = float(k.value) if k is not None else self.k + return ('truth',(f, c, k)) + + # @inline_args + # def desire(self, truth: tuple): + # # desire : truth + # return ('desire', truth[1]) + + @inline_args + def budget(self, p: Token, d: Token=None, q: Token=None): + # budget : "$" priority [";" durability [";" quality]] "$" + p = float(p.value) + d = float(d.value) if d is not None else None + q = float(q.value) if q is not None else None + return ('budget', (p, d, q)) + + @inline_args + def atom_term(self, word: Token): + word = word.value + return Term(word, is_input=True) + + @inline_args + def op(self, word: Token): + word = word.value + return Operation(word) + + @inline_args + def interval(self, word: Token): + num = int(word.value) + return Interval(num) + + @inline_args + def variable_term(self, var): + return var + + @inline_args + def compound_term(self, compound): + return compound + + @inline_args + def statement_term(self, statement): + return statement + + @inline_args + def statement_operation1(self, op: Operation, *args: str): + terms = (term for term in args) + return Statement(Compound(Connector.Product, *terms, is_input=True), Copula.Inheritance, op, is_input=True) + + @inline_args + def statement_operation2(self, word: Token, *args: str): + op = word.value + terms = (term for term in args) + return Statement(Compound(Connector.Product, *terms, is_input=True), Copula.Inheritance, Operation(op), is_input=True) + + @inline_args + def inheritance(self): + return Copula.Inheritance + + @inline_args + def similarity(self): + return Copula.Similarity + + @inline_args + def instance(self): + return Copula.Instance + + @inline_args + def property(self): + return Copula.Property + + @inline_args + def instance_property(self): + return Copula.InstanceProperty + + @inline_args + def implication(self): + return Copula.Implication + + @inline_args + def predictive_implication(self): + return Copula.PredictiveImplication + + + @inline_args + def concurrent_implication(self): + return Copula.ConcurrentImplication + + @inline_args + def retrospective_implication(self): + return Copula.RetrospectiveImplication + + @inline_args + def equivalence(self): + return Copula.Equivalence + + @inline_args + def predictive_equivalence(self): + return Copula.PredictiveEquivalence + + @inline_args + def concurrent_equivalence(self): + return Copula.ConcurrentEquivalence + + + + + + '''tense''' + # @inline_args + def tense_present(self, *args): + return ('tense', 0) + + @inline_args + def tense_future(self, *args): + return ('tense', self.temporal_window) + + @inline_args + def tense_past(self, *args): + return ('tense', -self.temporal_window) + + @inline_args + def tense_time(self, number: Token): + return ('tense', int(number.value)) + + + '''multi''' + @inline_args + def multi_prefix(self, connector, *args): + return Compound(connector, *args, is_input=True) + + @inline_args + def multi_prefix_product(self, *args): + return self.multi_prefix(Connector.Product, *args) + + @inline_args + def multi_infix(self, expr): + # connector = args[1] # TODO: Parse expression according to priorities of the connectors. + # terms = [term for i, term in enumerate(args) if i%2==0] + # return Compound(connector, *terms, is_input=True) + return expr + + @inline_args + def multi_prod_expr(self, *args): + return Compound(Connector.Product, *args, is_input=True) + + @inline_args + def multi_extint_expr(self, *args): + return Compound(Connector.ExtensionalIntersection, *args, is_input=True) + + @inline_args + def multi_intint_expr(self, *args): + return Compound(Connector.IntensionalIntersection, *args, is_input=True) + + @inline_args + def multi_parallel_expr(self, *args): + return Compound(Connector.ParallelEvents, *args, is_input=True) + + @inline_args + def multi_sequential_expr(self, *args): + return Compound(Connector.SequentialEvents, *args, is_input=True) + + @inline_args + def multi_conj_expr(self, *args): + return Compound(Connector.Conjunction, *args, is_input=True) + + @inline_args + def multi_disj_expr(self, *args): + return Compound(Connector.Disjunction, *args, is_input=True) + + '''single''' + @inline_args + def single_prefix(self, connector, term1, term2): + return Compound(connector, term1, term2, is_input=True) + + @inline_args + def single_infix(self, term1, connector, term2): + return Compound(connector, term1, term2, is_input=True) + + @inline_args + def negation(self, connector, term): + return Compound(connector, term, is_input=True) + + @inline_args + def ext_image(self, connector, *args): + return Compound(connector, *args, is_input=True) + + @inline_args + def int_image(self, connector, *args): + return Compound(connector, *args, is_input=True) + + '''connectors''' + + @inline_args + def con_conjunction(self): + return Connector.Conjunction + + @inline_args + def con_product(self): + return Connector.Product + + @inline_args + def con_disjunction(self): + return Connector.Disjunction + + @inline_args + def con_parallel_events(self): + return Connector.ParallelEvents + + @inline_args + def con_sequential_events(self): + return Connector.SequentialEvents + + @inline_args + def con_intensional_intersection(self): + return Connector.IntensionalIntersection + + @inline_args + def con_extensional_intersection(self): + return Connector.ExtensionalIntersection + + @inline_args + def con_extensional_difference(self): + return Connector.ExtensionalDifference + + @inline_args + def con_intensional_difference(self): + return Connector.IntensionalDifference + + @inline_args + def con_int_set(self): + return Connector.IntensionalSet + + @inline_args + def con_ext_set(self): + return Connector.ExtensionalSet + + @inline_args + def con_negation(self): + return Connector.Negation + + @inline_args + def con_int_image(self): + return Connector.IntensionalImage + + @inline_args + def con_ext_image(self): + return Connector.ExtensionalImage + + @inline_args + def independent_var(self, term: Token): + return Variable(VarPrefix.Independent, term.value) + + @inline_args + def dependent_var(self, term: Token): + return Variable(VarPrefix.Dependent, term.value) + + @inline_args + def query_var(self, term: Token): + return Variable(VarPrefix.Query, term.value) + + '''set''' + @inline_args + def set(self, connector, *terms): + return Compound(connector, *terms, is_input=True) + + @inline_args + def con_int_set(self): + return Connector.IntensionalSet + + @inline_args + def con_ext_set(self, *args): + return Connector.ExtensionalSet + + # '''list_set''' + # def list_set(self, terms): + # return Compound(Connector.List, *terms, is_input=True) + + +class LarkParser: + def __init__(self) -> None: + self.config() + self._parser = Lark_StandAlone(transformer=TreeToNarsese()) + + def config(self, config_path='./config.json'): + Config.load(config_path) + + # budget + TreeToNarsese.p_judgement = Config.Config.p_judgement + TreeToNarsese.d_judgement = Config.Config.d_judgement + TreeToNarsese.p_question = Config.Config.p_question + TreeToNarsese.d_question = Config.Config.d_question + TreeToNarsese.p_quest = Config.Config.p_quest + TreeToNarsese.d_quest = Config.Config.d_quest + TreeToNarsese.p_goal = Config.Config.p_goal + TreeToNarsese.d_goal = Config.Config.d_goal + + # truth + TreeToNarsese.f = Config.Config.f + TreeToNarsese.c_judgement = Config.Config.c_judgement + TreeToNarsese.c_goal = Config.Config.c_goal + TreeToNarsese.k = Config.Config.k + + # temporal reasoning relative + TreeToNarsese.temporal_window = Config.Config.temporal_duration + + + def parse(self, text: str) -> Task: + return self._parser.parse(text) + +parser = LarkParser() +def parse(text: str): return parser.parse(text) + +# from lark import Lark, Transformer +# parser = Lark.open(narsese_path, parser='lalr') + +# if __name__ == '__main__': +# with open(sys.argv[1]) as f: +# print(parser.parse(f.read())) \ No newline at end of file diff --git a/Narsese/__init__.py b/Narsese/__init__.py new file mode 100644 index 0000000..df904b3 --- /dev/null +++ b/Narsese/__init__.py @@ -0,0 +1,11 @@ +''' + +''' +if True: + # import from _py + from ._py import * +else: + # import from _pyx + pass + +from .Parser.parser import parser, parse diff --git a/Narsese/_py/Budget.py b/Narsese/_py/Budget.py new file mode 100644 index 0000000..0cc0a5b --- /dev/null +++ b/Narsese/_py/Budget.py @@ -0,0 +1,53 @@ +from math import sqrt +from Config import Config +from typing import Type +from .Truth import Truth + +class Budget: + priority: float = 0.9 + durability: float = 0.9 + quality: float = 0.5 + + def __init__(self, priority: float, durability: float, quality: float): + self.priority = priority if priority is not None else Budget.priority + self.durability = durability if durability is not None else Budget.durability + self.quality = quality if durability is not None else Budget.quality + + @property + def summary(self) -> float: + return self.durability*(self.priority+self.quality)/2.0 + + @property + def is_above_thresh(self) -> bool: + return self.summary > Config.budget_thresh + + + def __str__(self) -> str: + return f'${float(self.priority):.3f};{float(self.durability):.3f};{float(self.quality):.3f}$' + + def __repr__(self) -> str: + return str(self) + + def __iter__(self): + '''return (p, d, q)''' + return iter((self.priority, self.durability, self.quality)) + + + @classmethod + def quality_from_truth(cls, t: Truth): + exp: float = t.e + return max(exp, (1 - exp)*0.75) + + def reduce_by_achieving_level(self, h: float): + self.priority = self.priority * (1 - h) + + def distribute(self, n: int): + ''' + distribute the budget into n parts. + Ref. OpenNARS 3.1.0 BudgetFunctions.java line 144~146: + ``` + final float priority = (float) (b.getPriority() / sqrt(n)); + return new BudgetValue(priority, b.getDurability(), b.getQuality(), narParameters); + ``` + ''' + return Budget(self.priority/sqrt((n if n > 0 else 1)), self.durability, self.quality) \ No newline at end of file diff --git a/Narsese/_py/Compound.py b/Narsese/_py/Compound.py new file mode 100644 index 0000000..224ee36 --- /dev/null +++ b/Narsese/_py/Compound.py @@ -0,0 +1,444 @@ +# from NAL.Functions.ExtendedBooleanFunctions import Or +from copy import copy +import enum + +from matplotlib.pyplot import axis +from Config import Enable +from Narsese._py.Interval import Interval +from Narsese._py.Sentence import Stamp +# from Narsese._py.Statement import * +from utils.IndexVar import IndexVar +from .Term import Term, TermType +from .Terms import Terms +from .Connector import Connector, place_holder +from typing import Iterable, List, Type, Union +from ordered_set import OrderedSet +from typing import Set +from numpy import prod +from utils.tools import list_contains +import numpy as np + +class Compound(Term): #, OrderedSet): + type = TermType.COMPOUND + + _terms: Terms + def __init__(self, connector: Connector, *terms: Term, is_input=False) -> None: + '''''' + self._is_commutative = connector.is_commutative + terms = Terms(terms, self._is_commutative, is_input) + self.connector, self._terms = self.prepocess_terms(connector, terms, is_input) # the connector may be changed, for example, (|, {A}, {B}) is changed into {A, B}. + + + terms = self._terms + word = self._terms_to_word(*terms) + if self.is_commutative: + terms_sorted = sorted(terms, key=hash) + word_sorted = self._terms_to_word(*terms_sorted) + else: word_sorted = None + Term.__init__(self, word, word_sorted=word_sorted) + + compound: Set[Term] = self + self._components = OrderedSet(term for component in compound for term in component.sub_terms) + + self._complexity += sum(term.complexity for term in terms) + self._is_higher_order = False # connector.is_higher_order + + self._is_single_only = self.connector.is_single_only + self._is_double_only = self.connector.is_double_only + self._is_multiple_only = self.connector.is_multiple_only + + if Enable.variable: + self.handle_variables(compound) + # self.handle_index_var(compound, is_input) + + @property + def copula(self): + return self.connector + + @property + def is_commutative(self): + return self._is_commutative + + @property + def is_single_only(self): + return self._is_single_only + + @property + def is_double_only(self): + return self._is_double_only + + @property + def is_multiple_only(self): + return self._is_multiple_only + + @property + def is_higher_order(self): + return super().is_higher_order + + @property + def terms(self) -> Terms: # Union[Set[Term], List[Term]]: + return self._terms + + @property + def index_var(self): + return self._terms._index_var + + def _merge_compounds(self, connector_parent: Connector, connector: Connector, compounds: List[Type['Compound']], is_input: bool): + ''' + The `compounds` should have got the same `connector`. + (&, {A, B}, {B,C}) ====> {A} + (|, {A, B}, {B,C}) ====> {A, B, C} + (&, [A, B], [B,C]) ====> [A, B, C] + (|, [A, B], [B,C]) ====> [A] + (-, {A, B}, {C, D}) ====> {A} + (~, [A, B], [C, D]) ====> [A] + + It is ensured that the concerned components have no further nested compound which should be unfolded, because if there were, it would be unfolded when building its parent compound. + ''' + if connector_parent is Connector.ExtensionalIntersection: + if connector is Connector.ExtensionalSet: + return Terms.intersection(compounds[0].terms, *(compound.terms for compound in compounds[1:]), is_input=is_input) + # return Terms(compounds[0].terms, compounds[0].is_commutative, is_input).intersection(*(Terms(compound, compound.is_commutative, is_input) for compound in compounds[1:])) + elif connector is Connector.IntensionalSet: + # return Terms(compounds[0].terms, compounds[0].is_commutative, is_input).union(*(Terms(compound.terms, compound.is_commutative, is_input) for compound in compounds[1:])) + return Terms.union(compounds[0].terms, *(compound.terms for compound in compounds[1:]), is_input=is_input) + else: + return None + elif connector_parent is Connector.IntensionalIntersection: + if connector is Connector.ExtensionalSet: + # return Terms(compounds[0].terms, compounds[0].is_commutative, is_input).union(*(Terms(compound.terms, compound.is_commutative, is_input) for compound in compounds[1:])) + return Terms.union(compounds[0].terms, *(compound.terms for compound in compounds[1:]), is_input=is_input) + elif connector is Connector.IntensionalSet: + # return Terms(compounds[0].terms, compounds[0].is_commutative, is_input).intersection(*(Terms(compound.terms, compound.is_commutative, is_input) for compound in compounds[1:])) + return Terms.intersection(compounds[0].terms, *(compound.terms for compound in compounds[1:]), is_input=is_input) + else: + return None + + elif connector_parent is Connector.ExtensionalDifference and connector is Connector.ExtensionalSet: + # return Terms(compounds[0].terms, compounds[0].is_commutative, is_input).intersection(*(Terms(compound.terms, compound.is_commutative, is_input) for compound in compounds[1:]), is_input=is_input) + return Terms.intersection(compounds[0].terms, *(compound.terms for compound in compounds[1:]), is_input=is_input) + elif connector_parent is Connector.IntensionalDifference and connector is Connector.IntensionalSet: + # return Terms(compounds[0].terms, compounds[0].is_commutative, is_input).intersection(*(Terms(compound.terms, compound.is_commutative, is_input) for compound in compounds[1:]), is_input=is_input) + return Terms.intersection(compounds[0].terms, *(compound.terms for compound in compounds[1:]), is_input=is_input) + + elif connector_parent is connector: + return Terms((t for ts in compounds for t in ts), connector.is_commutative, is_input) + else: + return None + + + def prepocess_terms(self, connector_parent: Connector, terms: Iterable[Union[Type['Compound'], Term]], is_input=False): + ''' + pre-process the terms, return the connecor of this compound and the set/list of components. + For `{{A, B}, {B, C}}`, return `{, A, B, C`; + proof: {{A, B}, {B, C}}={(|, {A}, {B}), (|, {B}, {C})}=(|, (|, {A}, {B}), (|, {B}, {C}))=(|, {A}, {B}, {C})={A, B, C} + For `[[A, B], [B, C]]`, return `[, A, B, C`; + For `(con, (con, A, B), (con, B, C))`, return `con, A, B, C` if con is commutative, else return `con, A, B, B, C` + For `(|, {A, B}, {B, C})`, return `{, A, B, C`; + For `(&, [A, B], [B, C])`, return `[, A, B, C`; + For `(con, A, B, C)`, return `con, A, B, C`; + For `{{A, B}, {B, C}, D}`, return `{, {A, B, C}, D`; + + Returns: + connector, terms + ''' + # TODO: The `ExtensionalDifference` and `IntensionalDifference` cases. + if self.is_commutative: + # terms with the commutative connetors are also unduplicatable + # if there are terms with the same commutative connector, they should be combined togethor + categories = {} + for term in terms: + connector = term.connector if term.is_compound and term.is_commutative else None # `None` means the connector of the term is not cared about, because it just need to be added into the parent compound-term as a whole. + category = categories.get(connector, None) + if category is None: categories[connector] = category = [] + category.append(term) + + terms_norm: List[Compound] = [] + for connector, compounds in categories.items(): + if connector is None: + terms_norm.extend([(connector, compound) for compound in compounds]) + continue + # Now, `connector` is not `None`. + + terms_merged = self._merge_compounds(connector_parent, connector, compounds, is_input=is_input) + if terms_merged is None: # they don't need to be merged to be a whole + terms_norm.append((connector, compounds)) + continue + + # Now, `terms_merged` is not `None`. + # However, the compounds (in `terms_merged`), as components, shouldn't further be constructed as a compound term immediately, as there are some cases where there is only one compound and the parent connector should be set as the current single compound connector, for example, (|, {A, B}, {C, D}) is handled and `terms_merged` is `A, B, C, D`. In this case, if this function returned a compound `{A, B, C, D}`, the parent compound would be (&, {A, B, C, D}), which is obviously incorrect. + # Hence, the final construction of the compound is out of this function. + + terms_norm.append((connector, terms_merged)) + if len(terms_norm) > 1: + terms = [] + connector: Connector + for connector, term in terms_norm: + if connector is None: terms.append(term) + elif connector is connector_parent: terms.extend(term) + else: + if connector.check_valid(len(term)): + term = Compound(connector, *term, is_input=False) + terms.append(term) + if len(terms) > 1: + return connector_parent, Terms(terms, is_commutative=True, is_input=False) + else: # len(terms) == 1 + term = terms[0] + terms_norm = [[term.connector, term.terms] if term.is_compound else [None, term]] + + # Now, there are at list two terms in `terms_norm` + + # There were only a single compound if the terms were made into the one. + # the connector returned depends on the types of `connector` and `connector_parent`. For examples, + # if `connector_parent` is `&` and `connector` is `{`, the return `connector`; + # if `connector_parent` is `|` and `connector` is `{`, the return `connector`; + # if `connector_parent` is `&` and `connector` is `&`, it makes no difference returning either `connector` or `connector_parent`; + + connector, terms = terms_norm[0] + if connector is None: terms = (terms, ) + + if (connector in (Connector.ExtensionalSet, Connector.IntensionalSet)) and (connector_parent in (Connector.IntensionalIntersection, Connector.ExtensionalIntersection)): + return connector, Terms(terms, is_commutative=True, is_input=is_input) + + # otherwise, return `connector_parent` as the connector. + return connector_parent, Terms(terms, is_commutative=True, is_input=is_input) + else: + return connector_parent, Terms(terms, is_commutative=False, is_input=is_input) + + + def count_components(self): + return len(self.terms) # OrderedSet.__len__(self) + + def contains(self, compound: Type['Compound']) -> bool: + if compound.is_compound and compound.connector is self.connector: + return self.terms.issuperset(compound.terms) + else: return compound in self.terms + + def __iter__(self): + return iter(self.terms) # OrderedSet.__iter__(self) + + def __getitem__(self, index: List[int]) -> Term: + if isinstance(index, int): index = (index,) + if len(index) == 0: return self + + idx = index[0] + if idx > self.count(): raise "Out of bounds." + + index = index[1:] + term: Term = self.terms[idx] # OrderedSet.__getitem__(self, idx) + return term.__getitem__(index) + + def __sub__(self, s: Type['Compound']) -> Union[Type['Compound'], Term]: + # if self.is_double_only or self.is_single_only: raise 'Invalid case.' + # if OrderedSet.__contains__(self, s): s = (s,) + + if self.is_commutative: + if s.is_compound and s.connector: s = s.terms # s should be a type of `Term` + else: s = Terms((s,), False, False) + terms = self.terms - s + else: + if s.is_compound and s.connector: + s = s.terms # s should be a type of `Term` + terms = [term for term in self.terms if term not in s] + else: + terms = [term for term in self.terms if term != s] + + if self.is_multiple_only and len(terms) == 1: + result = terms[0] + else: + result = Compound(self.connector, *terms) + return result + + def __rsub__(self, s: Term) -> Union[Type['Compound'], Term]: + return self - s + + + def has_common(self, compound: Type['Compound'], same_connector: bool=True) -> bool: + if not compound.is_compound: return False + + return ((self.connector is compound.connector) if same_connector else True) and ((not self.terms.isdisjoint(compound.terms)) if self.is_commutative else list_contains(self.terms, list(compound.terms))) + + @classmethod + def copy(cls, compound: Type['Compound']): + ''' + create a new list, but each element in the list is identical to that in the input(old) one correspondingly. + returns a shallow copy of the input compound. + ''' + return cls(compound.connector, *compound) + + def replace(self, term_old: Term, term_new: Term, connector: Connector=None, idx: int=None) -> Type['Compound']: + # if term_old.is_atom: term_old = (term_old,) + # elif term_old.is_compo + terms: Union[OrderedSet, list] = self.terms + idx = terms.index(term_old) if idx is None else idx + return Compound(self.connector if connector is None else connector, *(term if i != idx else term_new for i, term in enumerate(self))) + + def equal(self, o: Type['Compound']) -> bool: + ''' + Return: + is_equal (bool), is_replacable(bool) + ''' + if o.is_compound: + if not self.connector is o.connector: return False + if self.is_commutative: + set1: Iterable[Term] = self.terms - o.terms + set2: Iterable[Term] = o.terms - self.terms + if len(set1) == len(set2) == 0: + return True + eq_array = np.array([[term1.equal(term2) for term2 in set2] for term1 in set1]) + if np.prod(eq_array.sum(axis=0)) > 0 and np.prod(eq_array.sum(axis=1)) > 0: + return True + else: return False + else: + if len(self) != len(o): return False + term1: Term + term2: Term + for term1, term2 in zip(self.terms, o.terms): + if not term1.equal(term2): return False + return True + elif o.is_atom and o.is_var: + return True + else: return False + + + def __repr__(self) -> str: + return f'' + + + def _terms_to_word(self, *terms: Term): + connector = self.connector + if connector == Connector.ExtensionalSet: word = f"{{{', '.join([str(term) for term in terms])}}}" + elif connector == Connector.IntensionalSet: word = f"[{', '.join([str(term) for term in terms])}]" + else: word = f"({connector.value}, {', '.join([str(term) for term in terms])})" + return word + + + def repr_with_var(self, index_var: IndexVar, pos: list): + compound: Set[Term] = self + word_terms = (str(component) if not component.has_var else component.repr_with_var(index_var, pos+[i]) for i, component in enumerate(compound)) + + return self._terms_to_word(*word_terms) + + + @staticmethod + def _convert(compound: Type['Compound']): + ''' + convert the form of the compound. + for example, if the compound is multiple-only and the length of its terms is 1, then return the first term, instead of the compound, of the terms. + ''' + if compound.is_compound: + if compound.is_multiple_only and len(compound.terms) == 1: # it cannot be 0. + return compound[0] + elif compound.is_single_only and len(compound.terms) > 1: + raise "Invalid case!" + elif compound.is_double_only and len(compound.terms) != 2: + raise "Invalid case!" + return compound + + @classmethod + def ExtensionalSet(cls, *terms: Term) -> Type['Compound']: + return cls._convert(Compound(Connector.ExtensionalSet, *terms)) + + @classmethod + def IntensionalSet(cls, *terms: Term) -> Type['Compound']: + return cls._convert(Compound(Connector.IntensionalSet, *terms)) + + @classmethod + def Instance(cls, term: Term) -> Type['Compound']: + return cls._convert(Compound.ExtensionalSet(term)) + + @classmethod + def Property(cls, term: Term) -> Type['Compound']: + return cls._convert(Compound.IntensionalSet(term)) + + @classmethod + def ExtensionalImage(cls, term_relation: Term, *terms: Term, idx: int=None, compound_product: Type['Compound']=None) -> Type['Compound']: + if compound_product is not None: + # if idx is None: + terms = compound_product.terms + idx = terms.index(term_relation) if idx is None else idx + compound = Compound.ExtensionalImage(term_relation, *(tm if i != idx else place_holder for i, tm in enumerate(compound_product))) + elif terms is not None: + if idx is not None: + terms: list = [*terms[:idx], place_holder, *terms[idx:]] + compound = Compound(Connector.ExtensionalImage, term_relation, *terms) + return cls._convert(compound) + + @classmethod + def IntensionalImage(cls, term_relation: Term, *terms: Term, idx: int=None, compound_product: Type['Compound']=None, compound_image: Type['Compound']=None) -> Type['Compound']: + if compound_product is not None: + # if idx is None: + idx = compound_product.terms.index(term_relation) if idx is None else idx + compound = Compound.IntensionalImage(term_relation, *(tm if i != idx else place_holder for i, tm in enumerate(compound_product))) + elif terms is not None: + if idx is not None: + terms: list = [*terms[:idx], place_holder, *terms[idx:]] + compound = Compound(Connector.IntensionalImage, term_relation, *terms) + return cls._convert(compound) + + @classmethod + def Image(cls, replaced_term: Term, compound_image: Type['Compound'], idx_replaced: int=None) -> Type['Compound']: + '''Convert Image to Image''' + idx_replaced = compound_image.terms.index(replaced_term) if idx_replaced is None else idx_replaced + + compound = Compound(compound_image.connector, *((place_holder if i == idx_replaced else tm if tm != place_holder else replaced_term) for i, tm in enumerate(compound_image))) + + return cls._convert(compound) + + @classmethod + def Product(cls, term: Term, *terms: Term, idx: int=None, compound_image: Type['Compound']=None): + if compound_image is not None: + idx = compound_image.terms.index(place_holder)-1 if idx is None else idx + compound = Compound.Product(*((tm if i != idx else term) for i, tm in enumerate(compound_image.terms[1:]))) + else: compound = Compound(Connector.Product, term, *terms) + return cls._convert(compound) + + @classmethod + def Negation(cls, term: Type['Compound']) -> Union[Type['Compound'], Term]: + if term.is_compound and term.connector == Connector.Negation: compound = term[0] + else: compound = Compound(Connector.Negation, term) + return cls._convert(compound) + + @classmethod + def Conjunction(cls, *terms: Union[Term, Type['Compound']]) -> Type['Compound']: + terms = (term for compound in terms for term in (compound if compound.is_compound and compound.connector==Connector.Conjunction else (compound,))) + return cls._convert(Compound(Connector.Conjunction, *terms)) + + @classmethod + def Disjunction(cls, *terms: Union[Term, Type['Compound']]) -> Type['Compound']: + terms = (term for compound in terms for term in (compound if compound.is_compound and compound.connector==Connector.Disjunction else (compound,))) + return cls._convert(Compound(Connector.Disjunction, *terms)) + + @classmethod + def IntensionalIntersection(cls, *terms: Union[Term, Type['Compound']]) -> Type['Compound']: + terms = (term for compound in terms for term in (compound if compound.is_compound and compound.connector==Connector.IntensionalIntersection else (compound,))) + return cls._convert(Compound(Connector.IntensionalIntersection, *terms)) + + @classmethod + def ExtensionalIntersection(cls, *terms: Union[Term, Type['Compound']]) -> Type['Compound']: + terms = (term for compound in terms for term in (compound if compound.is_compound and compound.connector==Connector.ExtensionalIntersection else (compound,))) + return cls._convert(Compound(Connector.ExtensionalIntersection, *terms)) + + @classmethod + def ExtensionalDifference(cls, term1: Term, term2: Term) -> Type['Compound']: + return cls._convert(Compound(Connector.ExtensionalDifference, term1, term2)) + + @classmethod + def IntensionalDifference(cls, term1: Term, term2: Term) -> Type['Compound']: + return cls._convert(Compound(Connector.IntensionalDifference, term1, term2)) + + @classmethod + def SequentialEvents(cls, *terms: Union[Term, Interval]) -> Type['Compound']: + return cls._convert(Compound(Connector.SequentialEvents, *terms)) + + @classmethod + def ParallelEvents(cls, *terms: Term) -> Type['Compound']: + return cls._convert(Compound(Connector.ParallelEvents, *terms)) + + + def clone(self): + if not self.has_var: return self + # now, not self.has_var + clone = copy(self) + clone._terms = self._terms.clone() + return clone diff --git a/Narsese/_py/Connector.py b/Narsese/_py/Connector.py new file mode 100644 index 0000000..9775cc4 --- /dev/null +++ b/Narsese/_py/Connector.py @@ -0,0 +1,80 @@ +# from enum import Enum +from utils.IdEnum import IdEnum +from .Term import Term + +class Connector(IdEnum): + Conjunction = "&&" + Disjunction = "||" + Product = "*" + ParallelEvents = "&|" + SequentialEvents = "&/" + IntensionalIntersection = "|" + ExtensionalIntersection = "&" + ExtensionalDifference = "-" + IntensionalDifference = "~" + Negation = "--" + IntensionalSet = "[" + ExtensionalSet = "{" + IntensionalImage = "\\" + ExtensionalImage = "/" + List = "#" + + @property + def is_commutative(self): + return self in ( + Connector.Conjunction, + Connector.Disjunction, + Connector.ParallelEvents, + Connector.IntensionalIntersection, + Connector.ExtensionalIntersection, + Connector.IntensionalSet, + Connector.ExtensionalSet + ) + @property + def is_single_only(self): + return self in ( + Connector.Negation, + ) + + @property + def is_double_only(self): + return self in ( + Connector.ExtensionalDifference, + Connector.IntensionalDifference + ) + + @property + def is_multiple_only(self): + return self in ( + Connector.Conjunction, + Connector.Disjunction, + Connector.Product, + Connector.ParallelEvents, + Connector.SequentialEvents, + Connector.IntensionalIntersection, + Connector.ExtensionalIntersection, + Connector.ExtensionalDifference, + Connector.IntensionalDifference, + Connector.IntensionalImage, + Connector.ExtensionalImage + ) + + def check_valid(self, len_terms: int): + if self.is_single_only: return len_terms == 1 + elif self.is_double_only: return len_terms == 2 + elif self.is_multiple_only: return len_terms > 1 + else: return len_terms > 0 + + # @property + # def is_higher_order(self): + # return self in ( + # Connector.Conjunction, + # Connector.Disjunction, + # Connector.ParallelEvents, + # Connector.IntensionalIntersection, + # Connector.ExtensionalIntersection, + # Connector.IntensionalSet, + # Connector.ExtensionalSet + # ) + +place_holder = Term('_', True) \ No newline at end of file diff --git a/Narsese/_py/Copula.py b/Narsese/_py/Copula.py new file mode 100644 index 0000000..56f7ee3 --- /dev/null +++ b/Narsese/_py/Copula.py @@ -0,0 +1,55 @@ +# from enum import Enum +from utils.IdEnum import IdEnum + +class Copula(IdEnum): + Inheritance = "-->" + Similarity = "<->" + Instance = "{--" + Property = "--]" + InstanceProperty = "{-]" + Implication = "==>" + PredictiveImplication = "=/>" + ConcurrentImplication = "=|>" + RetrospectiveImplication = "=\>" + Equivalence = "<=>" + PredictiveEquivalence = "" + ConcurrentEquivalence = "<|>" + + @property + def is_commutative(self): + return self in (Copula.Similarity, Copula.Equivalence, Copula.ConcurrentEquivalence) + + @property + def is_higher_order(self): + return self in ( + Copula.Implication, + Copula.PredictiveImplication, + Copula.ConcurrentImplication, + Copula.RetrospectiveImplication, + Copula.Equivalence, + Copula.PredictiveEquivalence, + Copula.ConcurrentEquivalence + ) + + @property + def symmetrize(self): + if self is Copula.Inheritance: + return Copula.Similarity + elif self is Copula.Implication: + return Copula.Equivalence + elif self is Copula.ConcurrentImplication: + return Copula.ConcurrentEquivalence + elif self is Copula.PredictiveImplication: + return Copula.PredictiveEquivalence + else: + raise "Invalid case." + + + @property + def reverse(self): + if self is Copula.PredictiveImplication: + return Copula.RetrospectiveImplication + elif self is Copula.RetrospectiveImplication: + return Copula.PredictiveImplication + else: + return self \ No newline at end of file diff --git a/Narsese/_py/Evidence.py b/Narsese/_py/Evidence.py new file mode 100644 index 0000000..814dbb8 --- /dev/null +++ b/Narsese/_py/Evidence.py @@ -0,0 +1,99 @@ +# from Narsese._py.Task import Task +from .Truth import Truth +from .Term import Term +from .Statement import Statement +from enum import Enum +from .Tense import Tense +from typing import Tuple, Type, Set, List, Union + +from ordered_set import OrderedSet +# from .Evidence import Base +# from .Task import * + +from Config import Config, Enable +import Global + +# class Evidence: +# def __init__(self, task) -> None: +# self._hash_task = hash(task) +# self._input_id = task.input_id + +# def __eq__(self, evidence: Type['Evidence']): +# return (self._hash_task==evidence._hash_task) and (self._input_id==evidence._input_id) + +class Base: + '''Evidential Base''' + def __init__(self, terms: Tuple[int]=tuple()) -> None: + # TODO: DOUBT -- + # IF `B>.`, `C>.`, `.`, THEN it can be derived in a single that `C>.`, `D>.`. + # In the second step, it can be derived that `{B>. D>.} |- (1) D>.`, and `{C>. D>.} |- (2) D>.` + # Is it reasonable theoretically to apply revision rules between (1) and (2)? + + self._set: Set[int] = OrderedSet(terms) + self._hash = None + + @classmethod + def interleave(self, base1, base2) -> Type['Base']: + '''interleave two bases''' + # TODO: DOUBT -- + # What if some evidence is lost (because of forgetting)? + + # TODO: DOUBT -- + # Is the base ordered? What kind of evidence should overflow? + + + # TODO: Ref: OpenNARS 3.1.0 Stamp.java line 178~187. + # TODO: Optimize this loop with cython (with python-style). + # while (j < baseLength) { + # if(i2 < secondLength) { + # evidentialBase[j++] = secondBase[i2++]; + # } + # if(i1 < firstLength) { + # evidentialBase[j++] = firstBase[i1++]; + # } + # } + + def add(self, id_evidence: int): + self._hash = None + self._set.add(id_evidence) + return self + + def extend(self, base: Union[Type['Base'] , None]): + self._hash = None + self._set = self._set.union(base._set) + return self + + def is_overlaped(self, base: Union[Type['Base'], None]) -> bool: + ''' Check whether another `Base` object is overlapped with `self`. + Complexity: O(N) N=min(len(self), len(o)) + ''' + return not self._set.isdisjoint(base._set) if base is not None else False + + def do_hashing(self): + self._hash = hash(frozenset(self._set)) + return self._hash + + def __eq__(self, o: Type['Base']) -> bool: + # TODO: Ref: OpenNARS 3.1.0 Stamp.Java line 334~335, 346~349, 461~516 + if id(o) == id(self): + return True + elif hash(self) != hash(o): + return False + return self._set == o._set + + def __or__(self, base: Type['Base']) -> Type['Base']: + return Base(self._set | base._set) + + def __ior__(self, base: Type['Base']) -> Type['Base']: + self._hash = None + self._set |= base._set + return self + + def __hash__(self) -> int: + return self._hash if self._hash is not None else self.do_hashing() + + def __len__(self) -> int: + return len(self._set) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({len(self._set)})" diff --git a/Narsese/_py/Interval.py b/Narsese/_py/Interval.py new file mode 100644 index 0000000..93f5d69 --- /dev/null +++ b/Narsese/_py/Interval.py @@ -0,0 +1,14 @@ +from .Term import Term + + +class Interval(Term): + is_interval: bool = True + def __init__(self, interval, do_hashing=False, word_sorted=None, is_input=False) -> None: + super().__init__("+"+str(interval), do_hashing=do_hashing, word_sorted=word_sorted, is_input=is_input) + self.interval = int(interval) + + def __repr__(self) -> str: + return f'' + + def __int__(self) -> int: + return self.interval \ No newline at end of file diff --git a/Narsese/_py/Item.py b/Narsese/_py/Item.py new file mode 100644 index 0000000..3b1598f --- /dev/null +++ b/Narsese/_py/Item.py @@ -0,0 +1,25 @@ +from .Budget import Budget +from typing import Type +from Config import Config +from copy import deepcopy +class Item: + def __init__(self, hash_value, budget: Budget=None, copy_budget=True) -> None: + budget = (deepcopy(budget) if copy_budget else budget) if budget is not None else Budget(Config.priority, Config.durability, Config.quality) + self._hash_value = hash_value + self.set_budget(budget) + + def set_budget(self, budget: Budget): + self.budget: Budget = budget + + def __hash__(self) -> int: + return self._hash_value + + def __eq__(self, o: object) -> bool: + return hash(o) == hash(self) + + def __str__(self) -> str: + return f'{self.budget} (Item)' + + def __repr__(self) -> str: + return str(self) + \ No newline at end of file diff --git a/Narsese/_py/Link.py b/Narsese/_py/Link.py new file mode 100644 index 0000000..66474ab --- /dev/null +++ b/Narsese/_py/Link.py @@ -0,0 +1,45 @@ +from enum import Enum +from .Item import Item +from .Budget import Budget +from .Task import Task +from typing import List, Type +# from .Concept import * +from .import Concept + +class LinkType(Enum): + SELF = 0 # At C, point to C; TaskLink only + COMPONENT = 1 # At (&&, A, C), point to C + COMPOUND = 2 # At C, point to (&&, A, C) + COMPONENT_STATEMENT = 3 # At A>, point to C + COMPOUND_STATEMENT = 4 # At C, point to A> + COMPONENT_CONDITION = 5 # At <(&&, C, B) ==> A>, point to C + COMPOUND_CONDITION = 6 # At C, point to <(&&, C, B) ==> A> + TRANSFORM = 7 # At C, point to <(*, C, B) --> A>; TaskLink only + TEMPORAL = 8 # At C, point to B, potentially without common subterm term + + + +class Link(Item): + link_id = 0 + type: LinkType + component_index: List[int] # TODO: refer to OpenNARS 3.0.4, TermLink.java line 75 and TaskLink.java line 85. But why use it? + def __init__(self, source: Item, target: Task, budget: Budget) -> None: + self.link_id = Link.link_id + hash_value = hash((source, target, self.link_id)) + super().__init__(hash_value, budget=budget) + Link.link_id += 1 + + self.source = source + self.target = target + + def __str__(self) -> str: + return f'{self.budget} {self.source.term} --- {self.target.term}' + + +class TermLink(Link): + def __init__(self, source: Type['Concept'], target: Task, budget: Budget) -> None: + super().__init__(source, target, budget) + +class TaskLink(Link): + def __init__(self, source: Type['Concept'], target: Type['Concept'], budget: Budget) -> None: + super().__init__(source, target, budget) \ No newline at end of file diff --git a/Narsese/_py/Operation.py b/Narsese/_py/Operation.py new file mode 100644 index 0000000..f214f40 --- /dev/null +++ b/Narsese/_py/Operation.py @@ -0,0 +1,32 @@ +from .Term import Term + +class Operation(Term): + + is_operation = True + + def __init__(self, word, do_hashing=False, is_mental_operation=False) -> None: + super().__init__(word, do_hashing=do_hashing) + self._is_mental_operation = is_mental_operation + + @property + def is_mental_operation(self): + return self._is_mental_operation + + def __str__(self) -> str: + return "^" + str(self.word) + + def __repr__(self) -> str: + return f'' + + def do_hashing(self): + self._hash_value = hash(str(self)) + return self._hash_value + + +Anticipate = Operation('Anticipate', True, is_mental_operation=True) +Believe = Operation('believe', True, is_mental_operation=True) +Doubt = Operation('doubt', True, is_mental_operation=True) +Evaluate = Operation('evaluate', True, is_mental_operation=True) +Hesitate = Operation('hesitate', True, is_mental_operation=True) +Want = Operation('want', True, is_mental_operation=True) +Wonder = Operation('wonder', True, is_mental_operation=True) \ No newline at end of file diff --git a/Narsese/_py/Sentence.py b/Narsese/_py/Sentence.py new file mode 100644 index 0000000..9b4a189 --- /dev/null +++ b/Narsese/_py/Sentence.py @@ -0,0 +1,204 @@ +from copy import copy +from .Truth import Truth +from .Term import Term +from .Statement import Statement +from enum import Enum +from .Tense import Tense +from typing import Type, Set + +from ordered_set import OrderedSet +from .Evidence import * + +from Config import Config, Enable +import Global + +class Punctuation(Enum): + Judgement = r"." + Question = r"?" + Goal = r"!" + Quest = r"@" + @property + def is_judgement(self): + return self == Punctuation.Judgement + + @property + def is_question(self): + return self == Punctuation.Question + + @property + def is_goal(self): + return self == Punctuation.Goal + + @property + def is_quest(self): + return self == Punctuation.Quest + +class Stamp: + + def __init__(self, t_creation: int, t_occurrence: int, t_put: int, evidential_base: Type['Base']) -> None: + ''' + Args: + t_creation(int): creation time of the stamp + t_occurrence(int): estimated occurrence time of the event + t_put(int): the time when it was put into buffer + ''' + self.t_creation = t_creation + self.t_occurrence = t_occurrence + self.t_put = t_put + self.evidential_base: Type['Base'] = evidential_base + + + @property + def tense(self): + return Tense.Eternal if self.t_occurrence is None else Tense.Future if self.t_occurrence > Global.time+Config.temporal_duration else Tense.Past if self.t_occurrence < Global.time-Config.temporal_duration else Tense.Present + + @property + def is_eternal(self): + return self.t_occurrence is None + + def eternalize(self): + self.t_occurrence = None + + def extend_evidenital_base(self, base: Type['Base']): + if self.evidential_base is None: + if base is None: return + elif self.evidential_base is None: self.evidential_base = Base(()) + self.evidential_base.extend(base) + + def __str__(self): + return f'{self.evidential_base}, {self.tense}' + + def __repr__(self): + return f'' +''' +Doubt that are Question and Quest have got a tense? +''' + +class Sentence: + truth: Truth = None + def __init__(self, term: Term, punct: Punctuation, stamp: Stamp, do_hashing: bool=False) -> None: + '''''' + self.term = term + self.word = term.word + str(punct.value) + self.punct = punct + self.stamp: Stamp = stamp + + @property + def evidential_base(self): + return self.stamp.evidential_base + + @property + def tense(self): + return self.stamp.tense + + # @property + # def temporal_order(self): + # return self.term.temporal_order + + def eternalize(self, truth: Truth=None): + sentence = copy(self) + if truth is not None: + sentence.truth = truth + stamp = copy(sentence.stamp) + stamp.eternalize() + sentence.stamp = stamp + return sentence + + def __hash__(self) -> int: + return hash(self.term) + + def __str__(self) -> str: + return self.word + + def __repr__(self) -> str: + return f'<{"Sentence" if self.is_eternal else "Event"}: {self.term.repr()}{self.punct.value}>' + + # @property + def repr(self, is_input=True): + return self.term.repr(is_input) + + @property + def is_judgement(self) -> bool: + return self.punct == Punctuation.Judgement + + @property + def is_goal(self) -> bool: + return self.punct == Punctuation.Goal + + @property + def is_question(self) -> bool: + return self.punct == Punctuation.Question + + @property + def is_quest(self) -> bool: + return self.punct == Punctuation.Quest + + @property + def is_eternal(self) -> bool: + return self.tense == Tense.Eternal + + @property + def is_event(self) -> bool: + return self.tense != Tense.Eternal + + +class Judgement(Sentence): + def __init__(self, term: Term, stamp: Stamp=None, truth: Truth=None) -> None: + '''''' + stamp = stamp if stamp is not None else Stamp(Global.time, None, None, None) + Sentence.__init__(self, term, Punctuation.Judgement, stamp) + self.truth = truth if truth is not None else Truth(Config.f, Config.c, Config.k) + + def __str__(self) -> str: + return f'{self.word}{(" " + str(self.tense.value)) if self.tense != Tense.Eternal else ""} {self.truth}' + + + def repr(self,is_input=False): + return f'{self.term.repr(is_input)}{self.punct.value}{(" " + str(self.tense.value)) if self.tense != Tense.Eternal else ""} {self.truth}' + +class Goal(Sentence): + def __init__(self, term: Term, stamp: Stamp=None, desire: Truth=None) -> None: + '''''' + stamp = stamp if stamp is not None else Stamp(Global.time, None, None, None, None) + Sentence.__init__(self, term, Punctuation.Goal, stamp) + self.truth = desire if desire is not None else Truth(Config.f, Config.c, Config.k) + + def __str__(self) -> str: + return f'{self.word}{(" " + str(self.tense.value)) if self.tense != Tense.Eternal else ""} {str(self.truth)}' + + + def repr(self, is_input=False): + return f'{self.term.repr(is_input)+self.punct.value}{(" " + str(self.tense.value)) if self.tense != Tense.Eternal else ""} {str(self.truth)}' + +class Question(Sentence): + answer_best: Sentence = None + def __init__(self, term: Term, stamp: Stamp=None, curiosiry: Truth=None) -> None: + '''''' + stamp = stamp if stamp is not None else Stamp(Global.time, None, None, None, None) + # stamp.set_eternal() + Sentence.__init__(self, term, Punctuation.Question, stamp) + self.is_query = False # TODO: if there is a query variable in the sentence, then `self.is_query=True` + + def __str__(self) -> str: + return f'{self.word}{(" " + str(self.tense.value)) if self.tense != Tense.Eternal else ""}' + # return self.word + (str(self.tense.value) if self.tense != Tense.Eternal else "") + + + def repr(self, is_input=False): + return f'{self.term.repr(is_input)+self.punct.value}{(" " + str(self.tense.value)) if self.tense != Tense.Eternal else ""}' + + +class Quest(Sentence): + def __init__(self, term: Term, stamp: Stamp=None, curiosiry: Truth=None) -> None: + '''''' + stamp = stamp if stamp is not None else Stamp(Global.time, None, None, None, None) + # stamp.set_eternal() + Sentence.__init__(self, term, Punctuation.Quest, stamp) + self.is_query = False # TODO: if there is a query variable in the sentence, then `self.is_query=True` + + def __str__(self) -> str: + return f'{self.word}{(" " + str(self.tense.value)) if self.tense != Tense.Eternal else ""}' + # return self.word + (str(self.tense.value) if self.tense != Tense.Eternal else "") + + def repr(self, is_input=False): + return f'{self.term.repr(is_input)+self.punct.value}{(" " + str(self.tense.value)) if self.tense != Tense.Eternal else ""}' diff --git a/Narsese/_py/Statement.py b/Narsese/_py/Statement.py new file mode 100644 index 0000000..4ece41d --- /dev/null +++ b/Narsese/_py/Statement.py @@ -0,0 +1,150 @@ +from copy import copy +import enum +from lib2to3.pgen2.tokenize import StopTokenizing +from Config import Enable +from utils.IndexVar import IndexVar +from .Term import Term, TermType +from .Copula import Copula +from typing import List, Type +# from .Compound import *f +from ordered_set import OrderedSet + +class Statement(Term): + type = TermType.STATEMENT + + def __init__(self, subject: Term, copula: Copula, predicate: Term, is_input: bool=False) -> None: + self._is_commutative = copula.is_commutative + word = "<"+str(subject)+str(copula.value)+str(predicate)+">" + if self.is_commutative: + subject_word, predicate_word = sorted((subject, predicate), key=hash) + word_sorted = "<"+subject_word.word_sorted+str(copula.value)+predicate_word.word_sorted+">" + else: word_sorted = "<"+subject.word_sorted+str(copula.value)+predicate.word_sorted+">" + super().__init__(word, word_sorted=word_sorted) + + self.subject = subject + self.copula = copula + self.predicate = predicate + + self._components = OrderedSet((*self.subject.sub_terms, *self.predicate.sub_terms)) + self._complexity += (subject.complexity + predicate.complexity) + self._is_higher_order = copula.is_higher_order + + self.is_operation = self.predicate.is_operation + + if Enable.variable: + self.handle_index_var((self.subject, self.predicate), is_input) + + pass + + def __getitem__(self, index: List[int]) -> Term: + if isinstance(index, int): index = (index,) + if len(index) == 0: return self + + idx = index[0] + if idx > 1: raise "Out of bounds." + + index = index[1:] + term = self.subject if idx == 0 else self.predicate + return term.__getitem__(index) + + @property + def is_commutative(self): + return self._is_commutative + + @property + def is_higher_order(self): + return self._is_higher_order + + @property + def terms(self): + return (self.subject, self.predicate) + + def equal(self, o: Type['Statement']) -> bool: + ''' + Return: + is_equal (bool), is_replacable(bool) + ''' + + if o.is_statement: + if not self.copula is o.copula: return False + + if self.subject.equal(o.subject) and self.predicate.equal(o.predicate): return True + elif not o.is_commutative: return False + else: return self.subject.equal(o.predicate) and self.predicate.equal(o.subject) + + elif o.is_atom and o.is_var: + return True + else: return False + + + def has_common(self, statement: Type['Statement'], same_copula: bool=True) -> bool: + if not statement.is_statement: return False + return ((statement.copula is self.copula) if same_copula else True) and (not {self.subject, self.predicate}.isdisjoint({statement.subject, statement.predicate})) + + + def __repr__(self) -> str: + return f'' + + def repr_with_var(self, index_var: IndexVar, pos: list): + ''' + index_var (IndexVar): the `index_var` of the root/topmost term. + pos (list): the position of the current term within the root/topmost term. + ''' + word_subject = str(self.subject) if not self.subject.has_var else self.subject.repr_with_var(index_var, pos+[0]) + word_predicate = str(self.predicate) if not self.predicate.has_var else self.predicate.repr_with_var(index_var, pos+[1]) + + return f'<{word_subject+str(self.copula.value)+word_predicate}>' + + @classmethod + def Inheritance(cls, subject: Term, predicate: Term, is_input: bool=False): + return cls(subject, Copula.Inheritance, predicate, is_input) + + + @classmethod + def Implication(cls, subject: Term, predicate: Term, is_input: bool=False): + return cls(subject, Copula.Implication, predicate, is_input) + + + @classmethod + def Similarity(cls, subject: Term, predicate: Term, is_input: bool=False): + return cls(subject, Copula.Similarity, predicate, is_input) + + + @classmethod + def Equivalence(cls, subject: Term, predicate: Term, is_input: bool=False): + return cls(subject, Copula.Equivalence, predicate, is_input) + + + @classmethod + def PredictiveImplication(cls, subject: Term, predicate: Term, is_input: bool=False): + return cls(subject, Copula.PredictiveImplication, predicate, is_input) + + + @classmethod + def ConcurrentImplication(cls, subject: Term, predicate: Term, is_input: bool=False): + return cls(subject, Copula.ConcurrentImplication, predicate, is_input) + + + @classmethod + def RetrospectiveImplication(cls, subject: Term, predicate: Term, is_input: bool=False): + return cls(subject, Copula.RetrospectiveImplication, predicate, is_input) + + + @classmethod + def PredictiveEquivalence(cls, subject: Term, predicate: Term, is_input: bool=False): + return cls(subject, Copula.PredictiveEquivalence, predicate, is_input) + + + @classmethod + def ConcurrentEquivalence(cls, subject: Term, predicate: Term, is_input: bool=False): + return cls(subject, Copula.ConcurrentEquivalence, predicate, is_input) + + def clone(self): + if not self.has_var: return self + # now, not self.has_var + clone = copy(self) + clone.subject = self.subject.clone() + clone.predicate = self.predicate.clone() + clone._index_var = self._index_var.clone() + + return clone diff --git a/Narsese/_py/Task.py b/Narsese/_py/Task.py new file mode 100644 index 0000000..24a6a84 --- /dev/null +++ b/Narsese/_py/Task.py @@ -0,0 +1,109 @@ +from copy import copy +from typing import Type, Union +from .Sentence import Sentence, Judgement, Goal, Quest, Question, Stamp +from .Item import Item +from .Budget import Budget +from .Term import Term +from .Truth import Truth + +class Task(Item): + input_id = -1 + + def __init__(self, sentence: Sentence, budget: Budget=None, input_id: int=None) -> None: + super().__init__(hash(sentence), budget) + self.sentence: Sentence = sentence + self.input_id = self.input_id if input_id is None else input_id + + def achieving_level(self, truth_belief: Truth=None): + if self.is_judgement: + e_belief = truth_belief.e if truth_belief is not None else 0.5 + judgement: Judgement=self.sentence + return 1-abs(judgement.truth.e-e_belief) + elif self.is_goal: + e_belief = truth_belief.e if truth_belief is not None else 0.5 + goal: Goal=self.sentence + return 1-abs(goal.truth.e-e_belief) + elif self.is_question: + question: Question = self.sentence + return truth_belief.e if question.is_query else truth_belief.c + elif self.is_quest: + quest: Quest = self.sentence + return truth_belief.e if quest.is_query else truth_belief.c + else: + raise f'Invalid type! {type(self.sentence)}' + + def reduce_budget_by_achieving_level(self, belief_selected: Union[Type['Belief'], None]): + truth = belief_selected.truth if belief_selected is not None else None + self.budget.reduce_by_achieving_level(self.achieving_level(truth)) + + @property + def stamp(self) -> Stamp: + return self.sentence.stamp + + @property + def evidential_base(self): + return self.sentence.evidential_base + + @property + def term(self) -> Term: + return self.sentence.term + + @property + def truth(self) -> Truth: + return self.sentence.truth + + @property + def is_judgement(self) -> bool: + return self.sentence.is_judgement + + @property + def is_goal(self) -> bool: + return self.sentence.is_goal + + @property + def is_question(self) -> bool: + return self.sentence.is_question + + @property + def is_quest(self) -> bool: + return self.sentence.is_quest + + + @property + def is_query(self) -> bool: + return self.term.has_qvar and (self.is_question or self.is_quest) + + @property + def is_eternal(self) -> bool: + return self.sentence.is_eternal + + @property + def is_event(self) -> bool: + return self.sentence.is_event + + @property + def is_operation(self) -> bool: + return self.term.is_operation + + @property + def is_mental_operation(self) -> bool: + return self.term.is_mental_operation + + @property + def is_executable(self): + return self.is_goal and self.term.is_executable + + def eternalize(self, truth: Truth=None): + task = copy(self) + task.sentence = task.sentence.eternalize(truth) + return task + + def __str__(self) -> str: + '''$p;d;q$ sentence %f;c%''' + return f'{(str(self.budget) if self.budget is not None else "$-;-;-$") + " "}{self.sentence.repr(False)}' + + def __repr__(self) -> str: + return str(self) + +Belief = Task +Desire = Task diff --git a/Narsese/_py/Tense.py b/Narsese/_py/Tense.py new file mode 100644 index 0000000..23de840 --- /dev/null +++ b/Narsese/_py/Tense.py @@ -0,0 +1,15 @@ +from enum import Enum + +class Tense(Enum): + Past = r":\:" + Present = r":|:" + Future = r":/:" + Eternal = r":-:" + +# class TemporalOrder(Enum): +# '''for temporal reasoning''' +# NONE = 2 +# FORWARD = 1 +# CONCURRENT = 0 +# BACKWARD = -1 +# INVALID = -2 \ No newline at end of file diff --git a/Narsese/_py/Term.py b/Narsese/_py/Term.py new file mode 100644 index 0000000..4e45b5c --- /dev/null +++ b/Narsese/_py/Term.py @@ -0,0 +1,204 @@ +from .Copula import Copula +from Config import Enable +from typing import Iterable, List, Set, Type +from enum import Enum +from utils.IndexVar import IndexVar +from numpy import prod +from ordered_set import OrderedSet +from utils.tools import find_pos_with_pos, find_var_with_pos +from copy import copy, deepcopy + +class TermType(Enum): + ATOM = 0 + STATEMENT = 1 + COMPOUND = 2 + +class Term: + + type = TermType.ATOM + copula: Copula = None + _complexity: float = 1.0 # The complexity of the term. Read only. + has_var: bool = False # Whether the term contains variable(s). + has_ivar: bool = False # Whether the term contains independent variable(s). + has_dvar: bool = False # Whether the term contains dependent variable(s). + has_qvar: bool = False # Whether the term contains query variable(s). + is_var: bool = False + is_ivar: bool = False + is_dvar: bool = False + is_qvar: bool = False + is_closed: bool = True # Whether the term is closed or open in terms of variable. + + is_interval: bool = False + + is_operation = False + _index_var: IndexVar = None + + def __init__(self, word, do_hashing=False, word_sorted=None, is_input=False) -> None: + self.word = word + self.word_sorted = word_sorted if word_sorted is not None else word + self._components: Set[Term] = None + + if Enable.variable: + if self._index_var is None: self._index_var = IndexVar() + + if do_hashing: + self.do_hashing() + else: + self._hash_value = None + + + @property + def sub_terms(self) -> Set[Type['Term']]: + return (self, *self._components) if self._components is not None else set((self, )) + + @property + def components(self) ->Set[Type['Term']]: + return self._components + + + def count(self): + '''the number of sub-terms (including this term itself)''' + return len(self._components)+1 if self._components is not None else 1 + + # @property + # def temporal_order(self): + # return TemporalOrder.NONE + + @property + def complexity(self): + return self._complexity + + @property + def is_statement(self): + return self.type == TermType.STATEMENT + + @property + def is_compound(self): + return self.type == TermType.COMPOUND + + @property + def is_atom(self): + return self.type == TermType.ATOM + + @property + def is_commutative(self): + '''whether the components of the term is commutative''' + return False + + @property + def is_higher_order(self): + '''whether the term is higher-ordered''' + return False + + @property + def is_executable(self): + return self.is_statement and self.is_operation + + @property + def terms(self): + return (self, ) + + @property + def index_var(self): + return self._index_var + + @property + def is_mental_operation(self): + return False + + def identical(self, o: Type['Term']) -> bool: + return hash(o) == hash(self) # and hash(o.index_var) == hash(self.index_var) + + def equal(self, o: Type['Term']) -> bool: + ''' + Return: + is_equal (bool), is_replacable(bool) + ''' + + if o.is_atom: + if self.is_var ^ o.is_var: # one of them is variable, while the other is not + return True + elif self.is_var and o.is_var: # the two are both variables + if (self.is_ivar and o.is_ivar) or (self.is_dvar and o.is_dvar) or (self.is_qvar and o.is_qvar): # the two, to be equal, should be the same type of variable + return True + else: + return False, False + elif not self.is_var and not o.is_var: # the two are neither variables: + return self.identical(o) + elif (o.is_compound or o.is_statement) and self.is_var: + return True + else: return False + + def has_common(self, term: Type['Term'], same_term: bool=True) -> bool: + if not term.is_atom: return False + return self == term + + def do_hashing(self): + self._hash_value = hash(self.word_sorted+str(self.index_var.postions_normalized)) + return self._hash_value + + def __hash__(self) -> int: + return self._hash_value if self._hash_value is not None else self.do_hashing() + + def __eq__(self, o: Type['Term']) -> bool: + return self.identical(o) + + def __contains__(self, term: Type['Term']) -> bool: + return term in self.sub_terms + + def __str__(self) -> str: + return self.word + + def __repr__(self) -> str: + return f'' + + def __len__(self): + return len(self._components) + + def __getitem__(self, index: List[int]) -> Type['Term']: + if len(index) > 0 or not (len(index)==0 or index[0]== 0 or index[0] == -1): raise "Out of bounds." + return self + + def repr(self, is_input=False): + return str(self) if not self.has_var else self.repr_with_var(self.index_var, []) + + def repr_with_var(self, index_var: IndexVar, pos: list): + '''''' + # raise "Invalid case." + return str(self) + + def handle_variables(self, terms: Iterable['Term']): + '''''' + self.has_var = bool(sum(tuple(term.has_var for term in terms))) + self.has_ivar = bool(sum(tuple(term.has_ivar for term in terms))) + self.has_dvar = bool(sum(tuple(term.has_dvar for term in terms))) + self.has_qvar = bool(sum(tuple(term.has_qvar for term in terms))) + + def handle_index_var(self, terms: Iterable['Term'], is_input: bool): + if self.index_var is None: self._index_var = IndexVar() + self.handle_variables(terms) + + indices_var_to_merge = [] + for i, component in enumerate(terms): + if component.is_atom and component.is_var: + if component.is_ivar: self.index_var.add_ivar([i], name=repr(component)) + elif component.is_dvar: self.index_var.add_dvar([i], name=repr(component)) + elif component.is_qvar: self.index_var.add_qvar([i], name=repr(component)) + elif component.has_var: # but component itself is not variable + if component.has_ivar: + for index in component.index_var.positions_ivar: + self.index_var.add_ivar([i]+index) + if component.has_dvar: + for index in component.index_var.positions_dvar: + self.index_var.add_dvar([i]+index) + if component.has_qvar: + for index in component.index_var.positions_qvar: + self.index_var.add_qvar([i]+index) + indices_var_to_merge.append(component.index_var) + self.index_var.merge(*indices_var_to_merge, is_input=is_input) + self.index_var.normalize() + + def clone(self): + # clone = copy(self) + return self + \ No newline at end of file diff --git a/Narsese/_py/Terms.py b/Narsese/_py/Terms.py new file mode 100644 index 0000000..c2aa4ab --- /dev/null +++ b/Narsese/_py/Terms.py @@ -0,0 +1,179 @@ +from copy import copy +from typing import Iterable, List, Set, Type +from utils.IndexVar import IndexVar +from ordered_set import OrderedSet +from utils.tools import find_pos_with_pos, find_var_with_pos +from .Term import Term +import Global + +class Terms: + '''''' + # index_var: IndexVar = None + def __init__(self, terms: Iterable[Term], is_commutative: bool, is_input: bool=False) -> None: + self._is_commutative = is_commutative + + terms = tuple(term.clone() for term in terms) + terms_const: Iterable[Term] = tuple(term for term in terms if not term.has_var) + terms_var: Iterable[Term] = tuple(term for term in terms if term.has_var) + + # convert terms's form into (, (ivars), (dvars), (qvars)), so that the terms, which have variables, with the same hash-value can be distinguished. + index_var = self.handle_index_var(terms_var, is_input=is_input) + ivars = tuple(tuple(find_var_with_pos([i], index_var.var_independent, index_var.positions_ivar)) for i in range(len(terms_var))) + dvars = tuple(tuple(find_var_with_pos([i], index_var.var_dependent, index_var.positions_dvar)) for i in range(len(terms_var))) + qvars = tuple(tuple(find_var_with_pos([i], index_var.var_query, index_var.positions_qvar)) for i in range(len(terms_var))) + terms_var = tuple(term for term in zip(terms_var, ivars, dvars, qvars)) + + # store the terms + if is_commutative: + self._terms_const = OrderedSet(terms_const) + self._terms_var = OrderedSet(terms_var) + self._terms = tuple((*self._terms_const, *(term[0] for term in self._terms_var))) + else: + self._terms_const = terms_const + self._terms_var = terms_var + self._terms = terms + + # set index_var + self._index_var = self.handle_index_var(self._terms, is_input=False) + pass + + @property + def is_commutative(self): + return self._is_commutative + + + @property + def terms(self) -> Iterable[Term]: + return self._terms + + def __repr__(self) -> str: + word_terms = (str(component) if not component.has_var else component.repr_with_var(self._index_var, [i]) for i, component in enumerate(self.terms)) + return f"" + + def __iter__(self): + return iter(self._terms) + + def __len__(self): + return len(self._terms) + + def __getitem__(self, index): + return self._terms.__getitem__(index) + + + def __sub__(self, o: Type['Terms']): + '''''' + return self.difference(o) + + # def convert(self): + # if self._is_commutative: + # terms = tuple((*(term for term in self._terms_const), *(term for term in self._terms_var))) + # else: + # bias = len(self._terms_const) + # order = ((bias + i if term.has_var else i) for i, term in enumerate(self._terms)) + # terms = (self._terms_var[i-bias] if term.has_var else self._terms_const[i] for term, i in zip(self._terms, order)) + + # return terms + + @staticmethod + def handle_index_var(terms: List['Term'], is_input: bool): + index_var = IndexVar() + + indices_var_to_merge = [] + for i, term in enumerate(terms): + if term.is_atom and term.is_var: + if term.is_ivar: index_var.add_ivar([i], name=repr(term)) + elif term.is_dvar: index_var.add_dvar([i], name=repr(term)) + elif term.is_qvar: index_var.add_qvar([i], name=repr(term)) + elif term.has_var: # but component itself is not variable + if term.has_ivar: + for index in term.index_var.positions_ivar: + index_var.add_ivar([i]+index) + if term.has_dvar: + for index in term.index_var.positions_dvar: + index_var.add_dvar([i]+index) + if term.has_qvar: + for index in term.index_var.positions_qvar: + index_var.add_qvar([i]+index) + indices_var_to_merge.append(term.index_var) + index_var.merge(*indices_var_to_merge, is_input=is_input) + index_var.normalize() + return index_var + + + def clone(self): + '''''' + clone = copy(self) + clone._index_var = clone._index_var.clone() + return clone + + + def intersection(self, *tuple_terms: Type['Terms'], is_input: bool=False): + ''' + it make sense only when self.is_commutative is True + ''' + if self.is_commutative: + if is_input: Terms.handle_index_var((*(self), *(term for terms in tuple_terms for term in terms)), True) + + + terms_const = OrderedSet.intersection(self._terms_const, *(terms._terms_const for terms in tuple_terms)) + terms_var = OrderedSet.intersection(self._terms_var, *(terms._terms_var for terms in tuple_terms)) + terms = tuple((*terms_const, *(term[0] for term in terms_var))) + terms_intersection = Terms(terms, is_commutative=True, is_input=False) + else: terms_intersection = None + return terms_intersection + + + def union(self, *tuple_terms: Type['Terms'], is_input: bool=False): + ''' + it make sense only when self.is_commutative is True + ''' + if self.is_commutative: + if is_input: self.handle_index_var((*(self), *(term for terms in tuple_terms for term in terms)), True) + terms_const = OrderedSet.union(self._terms_const, *(terms._terms_const for terms in tuple_terms)) + terms_var = OrderedSet.union(self._terms_var, *(terms._terms_var for terms in tuple_terms)) + terms = tuple((*terms_const, *(term[0] for term in terms_var))) + terms_union = Terms(terms, is_commutative=True, is_input=False) + else: terms_union = None + return terms_union + + + def difference(self, *tuple_terms: Type['Terms'], is_input: bool=False): + ''' + it make sense only when self.is_commutative is True + ''' + # if self.is_commutative: + if is_input: self.handle_index_var((*(self), *(term for terms in tuple_terms for term in terms)), True) + terms_const = OrderedSet.difference(self._terms_const, *(terms._terms_const for terms in tuple_terms)) + terms_var = OrderedSet.difference(self._terms_var, *(terms._terms_var for terms in tuple_terms)) + terms = tuple((*terms_const, *(term[0] for term in terms_var))) + terms_difference = Terms(terms, is_commutative=True, is_input=False) + # else: + # # TODO + # raise + # terms_difference = None + return terms_difference + + def issuperset(self, terms_other: Type['Terms']): + '''''' + if self.is_commutative: + issuperset_const = self._terms_const.issuperset(terms_other._terms_const) + issuperset_var = self._terms_var.issuperset(terms_other._terms_var) + else: + issuperset_const = set(self._terms_const).issuperset(terms_other._terms_const) + issuperset_var = set(self._terms_var).issuperset(terms_other._terms_var) + issuperset = issuperset_const and issuperset_var + return issuperset + + def isdisjoint(self, terms_other: Type['Terms']): + if self.is_commutative: + isdisjoint_const = self._terms_const.isdisjoint(terms_other._terms_const) + isdisjoint_var = self._terms_var.isdisjoint(terms_other._terms_var) + else: + isdisjoint_const = set(self._terms_const).isdisjoint(terms_other._terms_const) + isdisjoint_var = set(self._terms_var).isdisjoint(terms_other._terms_var) + isdisjoint = isdisjoint_const and isdisjoint_var + return isdisjoint + + def index(self, term: Term): + '''''' + return self._terms.index(term) \ No newline at end of file diff --git a/Narsese/_py/Truth.py b/Narsese/_py/Truth.py new file mode 100644 index 0000000..a523c40 --- /dev/null +++ b/Narsese/_py/Truth.py @@ -0,0 +1,31 @@ +from Config import Config +from typing import Type +import numpy as np + +class Truth: + # analytic: Type['Truth'] + def __init__(self, f, c, k) -> None: + self.f = f + self.c = c + self.k = k + + @property + def e(self): + return (self.c * (self.f - 0.5) + 0.5) + + # @classmethod + # def from_w(cls, w_plus, w, k): + # f, c = w_to_f(w_plus, w), w_to_c(w_plus, w) + # return Truth(f, c, k) + def __iter__(self): + '''return (f, c, k)''' + return iter((self.f, self.c, self.k)) + + def __str__(self) -> str: + return f'%{self.f:.3f};{self.c:.3f}%' + + def __repr__(self) -> str: + return str(self) + +truth_analytic = Truth(Config.f, Config.c, Config.k) + \ No newline at end of file diff --git a/Narsese/_py/Variable.py b/Narsese/_py/Variable.py new file mode 100644 index 0000000..0b75109 --- /dev/null +++ b/Narsese/_py/Variable.py @@ -0,0 +1,80 @@ +from copy import copy, deepcopy +from enum import Enum +from typing import Type +from Config import Config + +from utils.IndexVar import IndexVar +from .Term import Term + +class VarPrefix(Enum): + Independent = "$" + Dependent = "#" + Query = "?" + + +class Variable(Term): + is_var: bool = True + has_var: bool = True + + def __init__(self, prefix: VarPrefix, word: str, do_hashing=False, is_input=False) -> None: + self.prefix = prefix + self.name = str(word) + word = prefix.value + super().__init__(word, do_hashing=do_hashing) + self.dependents = [] # only for dependent variable. TODO: implement son classes of Variable, including DependentVar, IndependentVar, QueryVar. + self.has_variable: bool = True + + self.is_ivar = self.has_ivar = self.prefix == VarPrefix.Independent + self.is_dvar = self.has_dvar = self.prefix == VarPrefix.Dependent + self.is_qvar = self.has_qvar = self.prefix == VarPrefix.Query + + + def __repr__(self) -> str: + # return f'' + return self.word + self.name + + + # @property + # def repr(self): + # return self.word + self.name + + def repr_with_var(self, index_var: IndexVar, pos: list): + '''''' + if not self.is_var: raise "Invalide case." + if self.is_ivar: + try: idx = index_var.positions_ivar.index(pos) + except: raise "Invalid case: The `pos` is not in `index_var.positions_ivar`" + var = index_var.postions_normalized[0][idx] if Config.variable_repr_normalized else index_var.var_independent[idx] + elif self.is_dvar: + try: idx = index_var.positions_dvar.index(pos) + except: raise "Invalid case: The `pos` is not in `index_var.positions_dvar`" + var = index_var.postions_normalized[1][idx] if Config.variable_repr_normalized else index_var.var_dependent[idx] + elif self.is_qvar: + try: idx = index_var.positions_qvar.index(pos) + except: raise "Invalid case: The `pos` is not in `index_var.positions_qvar`" + var = index_var.postions_normalized[2][idx] if Config.variable_repr_normalized else index_var.var_query[idx] + else: raise "Invalide case." + prefix = self.prefix.value + + return prefix + str(var) + + + @classmethod + def Independent(cls, word: str, do_hashing=False, is_input=False): + return Variable(VarPrefix.Independent, word, do_hashing, is_input) + + + @classmethod + def Dependent(cls, word: str, do_hashing=False, is_input=False): + return Variable(VarPrefix.Dependent, word, do_hashing, is_input) + + + @classmethod + def Query(cls, word: str, do_hashing=False, is_input=False): + return Variable(VarPrefix.Query, word, do_hashing, is_input) + + + def clone(self) -> Type['Variable']: + clone = copy(self) + clone._index_var = deepcopy(self._index_var) + return clone \ No newline at end of file diff --git a/Narsese/_py/__init__.py b/Narsese/_py/__init__.py new file mode 100644 index 0000000..8623a87 --- /dev/null +++ b/Narsese/_py/__init__.py @@ -0,0 +1,21 @@ +from .Term import * +from .Statement import * +from .Variable import * +from .Compound import * +from .Copula import * +from .Connector import * +from .Sentence import * +from .Truth import * +from .Item import * +from .Task import * +from .Budget import * +from .Evidence import * +from .Operation import * +from .Interval import * +from .Terms import * + +SELF = Compound(Connector.ExtensionalSet, Term('SELF', do_hashing=True)) + +TRUE = Term('TRUE', do_hashing=True) +FALSE = Term('FALSE', do_hashing=True) +UNSURE = Term('UNSURE', do_hashing=True) diff --git a/Narsese/_pyx/Term.c b/Narsese/_pyx/Term.c new file mode 100644 index 0000000..06f2230 --- /dev/null +++ b/Narsese/_pyx/Term.c @@ -0,0 +1 @@ +#error Do not use this file, it is the result of a failed Cython compilation. diff --git a/Narsese/_pyx/Term.pyx b/Narsese/_pyx/Term.pyx new file mode 100644 index 0000000..c7d4df0 --- /dev/null +++ b/Narsese/_pyx/Term.pyx @@ -0,0 +1,5 @@ +# +cdef class Term: + cdef bool hashed + cdef long hash_value + atoms = [] diff --git a/README.en.md b/README.en.md new file mode 100644 index 0000000..18c12a7 --- /dev/null +++ b/README.en.md @@ -0,0 +1,38 @@ +# PyNARS + +#### Description +Python implementation of NARS (Non-Axiomatic-Reasoning-System) + +#### Software Architecture +Software architecture description + +#### Installation + +1. xxxx +2. xxxx +3. xxxx + +Note: the version of the python package `tqdm` should be no higher than 3.1.4, otherwise the color display would be abnormal. This is because of a bug of `tqdm`, which leads to conflicts between `sty` and `tqdm` and cause unexpected color display of `sty`. + +#### Instructions + +1. xxxx +2. xxxx +3. xxxx + +#### Contribution + +1. Fork the repository +2. Create Feat_xxx branch +3. Commit your code +4. Create Pull Request + + +#### Gitee Feature + +1. You can use Readme\_XXX.md to support different languages, such as Readme\_en.md, Readme\_zh.md +2. Gitee blog [blog.gitee.com](https://blog.gitee.com) +3. Explore open source project [https://gitee.com/explore](https://gitee.com/explore) +4. The most valuable open source project [GVP](https://gitee.com/gvp) +5. The manual of Gitee [https://gitee.com/help](https://gitee.com/help) +6. The most popular members [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/) diff --git a/README.md b/README.md new file mode 100644 index 0000000..9f6696c --- /dev/null +++ b/README.md @@ -0,0 +1,40 @@ +# PyNARS + +#### 介绍 +Python implementation of NARS (Non-Axiomatic-Reasoning-System). +References: +OpenNARS 3.0.4, +Design Report of OpenNARS 3.1.0 + +#### 软件架构 +软件架构说明 + + +#### 安装教程 + +1. xxxx +2. xxxx +3. xxxx + +#### 使用说明 + +1. xxxx +2. xxxx +3. xxxx + +#### 参与贡献 + +1. Fork 本仓库 +2. 新建 Feat_xxx 分支 +3. 提交代码 +4. 新建 Pull Request + + +#### 特技 + +1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md +2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com) +3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目 +4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目 +5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help) +6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/) diff --git a/RL/Pong/main.py b/RL/Pong/main.py new file mode 100644 index 0000000..9797e45 --- /dev/null +++ b/RL/Pong/main.py @@ -0,0 +1,64 @@ +''' +Pong-v0 + +Maximize your score in the Atari 2600 game Pong. In this environment, the observation is an RGB image of the screen, which is an array of shape (210, 160, 3) Each action is repeatedly performed for a duration of kk frames, where kk is uniformly sampled from {2,3,4}. + +See: + + [1] https://gym.openai.com/envs/Pong-v0/ + +''' + +from typing_extensions import Literal +import gym +from atariari.benchmark.wrapper import AtariARIWrapper +from time import sleep + +rander_mode: Literal['human'] = 'human' + +def policy_random(env: gym.Env): + '''''' + env.reset() + # env.render() + env.reset() + done = False + while not done: + action = env.action_space.sample() + action = 0 + observation, reward, done, info = env.step(action) + print('obs: {}; reward: {}'.format(observation.shape, reward)) + # env.render() + # sleep(0.02) + + +def run(): + ''' + Installation: + 1. gym + ``` + pip install gym + pip install gym[atari] + pip install gym[accept-rom-license] + pip install pyglet + ``` + 2. interface + ``` + pip install git+git://github.com/mila-iqia/atari-representation-learning.git + ``` + ''' + # env = gym.make("Pong-v0", render_mode='human') + env = gym.make("Pong-ram-v0", render_mode='human') + env = AtariARIWrapper(env) + + # observation space + print(env.observation_space.shape) # (210, 160, 3) + # action space + print(env.action_space) # Discrete(6) + + act_meanings = env.unwrapped.get_action_meanings() + + policy_random(env) + + +if __name__ == '__main__': + run() \ No newline at end of file diff --git a/Tests/examples/application/detective.nal b/Tests/examples/application/detective.nal new file mode 100644 index 0000000..23c8bb5 --- /dev/null +++ b/Tests/examples/application/detective.nal @@ -0,0 +1,17 @@ +'the detective claims that tim lives in graz +'<{tim} --> (/,livingIn,_,{graz})>. +'and lawyer claims that this is not the case +<{tim} --> (/,livingIn,_,{graz})>. %0% +100 +'the first deponent, a psychologist, +'claims that people with sunglasses are more aggressive +<<(*,$1,sunglasses) --> own> ==> <$1 --> [aggressive]>>. +'the third deponent claims, that he has seen tom with sunglasses on: +<(*,{tom},sunglasses) --> own>. +'the teacher claims, that people who are aggressive tend to be murders +<<$1 --> [aggressive]> ==> <$1 --> murder>>. +'the second deponent claims, that if the person lives in Graz, he is surely the murder +<<$1 --> (/,livingIn,_,{graz})> ==> <$1 --> murder>>. +'who is the murder? +<{?who} --> murder>? +''outputMustContain('<{tom} --> murder>. %1.00;0.73%') diff --git a/Tests/examples/application/detective2.nal b/Tests/examples/application/detective2.nal new file mode 100644 index 0000000..4636ea7 --- /dev/null +++ b/Tests/examples/application/detective2.nal @@ -0,0 +1,21 @@ +'the detective claims that tim lives in graz +<{tim} --> (/,livingIn,_,{graz})>. +'the lawyer claims that this is not the case +<{tim} --> (/,livingIn,_,{graz})>. %0% +100 +'the first deponent, a psychologist, +'claims that people with sunglasses are more aggressive +<<(*,$1,sunglasses) --> own> ==> <$1 --> [aggressive]>>. +'the third deponent claims, that he has seen tom with black glasses on: +<(*,{tom},(&,[black],glasses)) --> own>. +'the teacher claims, that people who are aggressive tend to be murders +<<$1 --> [aggressive]> ==> <$1 --> murder>>. +'the second deponent claims, that if the person lives in Graz, he is surely the murder +<<$1 --> (/,livingIn,_,{graz})> ==> <$1 --> murder>>. +'the system knows that sunglasses are a special case of black glasses + (&,[black],glasses)>. +'who is the murder? +<{?who} --> murder>? +10000 +''outputMustContain('<{tim} --> murder>. %0.50;0.43%') +''outputMustContain('<{tom} --> murder>. %1.00;0.29%') diff --git a/Tests/examples/application/toothbrush.nal b/Tests/examples/application/toothbrush.nal new file mode 100644 index 0000000..eeb9c2d --- /dev/null +++ b/Tests/examples/application/toothbrush.nal @@ -0,0 +1,11 @@ +<(*,toothbrush,plastic) --> made_of>. +<(&/,<(*,$1,plastic) --> made_of>,(^lighter,{SELF},$1)) =/> <$1 --> [heated]>>. +<<$1 --> [heated]> =/> <$1 --> [melted]>>. +<<$1 --> [melted]> <|> <$1 --> [pliable]>>. +<(&/,<$1 --> [pliable]>,(^reshape,{SELF},$1)) =/> <$1 --> [hardened]>>. +<<$1 --> [hardened]> =|> <$1 --> [unscrewing]>>. + object>. +(&&,<#1 --> object>,<#1 --> [unscrewing]>)! +10000 +''outputMustContain('(^lighter,{SELF},toothbrush)! %1.00;0.39%') +''outputMustContain('(^reshape,{SELF},toothbrush)! %1.00;0.26%') diff --git a/Tests/examples/application/toothbrush2.nal b/Tests/examples/application/toothbrush2.nal new file mode 100644 index 0000000..e99b37c --- /dev/null +++ b/Tests/examples/application/toothbrush2.nal @@ -0,0 +1,14 @@ + object>. + [bendable]>. +<(*,cup,plastic) --> made_of>. + object>. + [bendable]>. +<(&/,<(*,$1,plastic) --> made_of>,(^lighter,{SELF},$1)) =/> <$1 --> [heated]>>. +<<$1 --> [heated]> =/> <$1 --> [melted]>>. +<<$1 --> [melted]> <|> <$1 --> [pliable]>>. +<(&/,<$1 --> [pliable]>,(^reshape,{SELF},$1)) =/> <$1 --> [hardened]>>. +<<$1 --> [hardened]> =|> <$1 --> [unscrewing]>>. +(&&,<#1 --> object>,<#1 --> [unscrewing]>)! +200000 +''outputMustContain('(^lighter,{SELF},cup)! %1.00;0.39%') +''outputMustContain('(^reshape,{SELF},cup)! %1.00;0.24%') diff --git a/Tests/examples/bugs/bug.nal b/Tests/examples/bugs/bug.nal new file mode 100644 index 0000000..8eb7940 --- /dev/null +++ b/Tests/examples/bugs/bug.nal @@ -0,0 +1,15 @@ +// 版本:3.0.4 来源:https://github.com/opennars/opennars +// 错误描述:不存在swimmer相关的证据,该词项仅出现在question中,但是推理出了 swimmer>. %0.09;0.06% +// 代码如下。20个cycle可观察到相同现象。 + bird>. %0.90% + (|,bird,swimmer)>? +20 + +//(^wonder,{SELF}, (|,bird,swimmer)>). :|: %1.00;0.90% +// <{SELF} --> (/,^wonder,_, (|,bird,swimmer)>)>. :-5: %1.00;0.90% +// (|,bird,swimmer)>. %0.90;0.73% +// <(|,swan,swimmer) --> (|,bird,swimmer)>. %0.90;0.73% +// (^believe,{SELF}, (|,bird,swimmer)>,TRUE). :|: %1.00;0.90% +//Answer (|,bird,swimmer)>. %0.90;0.73% +// (|,bird,swimmer)>. %0.90;0.73% +// swimmer>. %0.09;0.06% \ No newline at end of file diff --git a/Tests/examples/bugs/bug2.nal b/Tests/examples/bugs/bug2.nal new file mode 100644 index 0000000..540ac58 --- /dev/null +++ b/Tests/examples/bugs/bug2.nal @@ -0,0 +1,20 @@ +// . +// bird>. +// animal>. +// 30 +// ? +// 10 +// ? +// 1 +//得到答案: +//<{Tweety} --> animal>. %1.00;0.73% + +. + bird>. + animal>. +? +30 +? +1 +// 得到答案 +// <{Tweety} --> animal>. %1.00;0.29% \ No newline at end of file diff --git a/Tests/examples/bugs/bug3.nal b/Tests/examples/bugs/bug3.nal new file mode 100644 index 0000000..fa0a007 --- /dev/null +++ b/Tests/examples/bugs/bug3.nal @@ -0,0 +1,7 @@ +// 本应该要推出(&&, B>, D>). +// 但最后没有答案 + +B>. +D>. +(&&, B>, D>)? +1000 diff --git a/Tests/examples/bugs/bug4.nal b/Tests/examples/bugs/bug4.nal new file mode 100644 index 0000000..942337d --- /dev/null +++ b/Tests/examples/bugs/bug4.nal @@ -0,0 +1,9 @@ +'Robin is not a type of swimmer. + swimmer>. %0.00;0.90% + +'Robin is not a nonswimming mammal. + (-,mammal,swimmer)>. %0.00;0.90% + +// get these two sentence with rebundunt terms??? +// (&, swimmer, (-, mammal, swimmer))>. %0.000;0.810% +// (|, swimmer, (-, mammal, swimmer))>. %0.000;0.810% \ No newline at end of file diff --git a/Tests/examples/bugs/bug5.nal b/Tests/examples/bugs/bug5.nal new file mode 100644 index 0000000..9c1012d --- /dev/null +++ b/Tests/examples/bugs/bug5.nal @@ -0,0 +1,4 @@ +<(&,{A,B,C},{B,C,D}) --> E>. %1.00;0.90% +<{B,C} --> E>? + +// no answer \ No newline at end of file diff --git a/Tests/examples/multi_step/nal1.multistep.nal b/Tests/examples/multi_step/nal1.multistep.nal new file mode 100644 index 0000000..fad27de --- /dev/null +++ b/Tests/examples/multi_step/nal1.multistep.nal @@ -0,0 +1,8 @@ + b>. %1.00;0.90% + c>. %1.00;0.90% + d>. %1.00;0.90% + d>? + +11000 + +''outputMustContain(' d>. %1.00;0.73%') diff --git a/Tests/examples/multi_step/nal3.subtermMapping1.nal b/Tests/examples/multi_step/nal3.subtermMapping1.nal new file mode 100644 index 0000000..f45bced --- /dev/null +++ b/Tests/examples/multi_step/nal3.subtermMapping1.nal @@ -0,0 +1,15 @@ +' from https://code.google.com/archive/p/open-nars/issues/47 +<{t1,{t2,{t3}}} --> [p1,[p2,[p3]]]>. %1.00;0.90% + t3>. %1.00;0.90% + +<{t1,{t2,{c}}} --> [p1,[p2,[p3]]]>? +500 + +' |- + +' answer provided by OpenNARS 3.0.2 and 3.0.3 +''outputMustContain('<{t1,{t2,{c}}} --> [[[p3],p2],p1]>. %1.00;0.59%') + +' right answer should be +'''outputMustContain('<{t1,{t2,{c}}} --> [[[p3],p2],p1]>. %1.00;0.81%') +' because it can use conj diff --git a/Tests/examples/multi_step/nal4.everyday_reasoning.nal b/Tests/examples/multi_step/nal4.everyday_reasoning.nal new file mode 100644 index 0000000..c5f9c4a --- /dev/null +++ b/Tests/examples/multi_step/nal4.everyday_reasoning.nal @@ -0,0 +1,10 @@ +//tom is a cat +<{tom} --> cat>. +//tom likes the sky +<(*,{tom},{sky}) --> likes>. +//the sky is blue +<{sky} --> [blue]>. +//cats like blue? +<(*,cat,[blue]) --> likes>? +10000 +''outputMustContain('<(*,cat,[blue]) --> likes>. %1.00;0.37%') \ No newline at end of file diff --git a/Tests/examples/multi_step/nal4.recursion.nal b/Tests/examples/multi_step/nal4.recursion.nal new file mode 100644 index 0000000..fa9ac3d --- /dev/null +++ b/Tests/examples/multi_step/nal4.recursion.nal @@ -0,0 +1,12 @@ +'*silence=80 + +<0 --> num>. %1.00;0.90% +<<$1 --> num> ==> <(*,$1) --> num>>. %1.00;0.90% +<(*,(*,(*,(*,(*,0))))) --> num>? + +50000 + +''outputMustContain('') +//''outputMustContain('<(*,(*,(*,(*,(*,0))))) --> num>.') +''//outputMustContain('<(*,(*,(*,(*,(*,0))))) --> num>. %1.00') +''//outputMustContain('<(*,(*,(*,(*,(*,0))))) --> num>. %1.00;0.53%') diff --git a/Tests/examples/multi_step/nal4.recursion.small.2.nal b/Tests/examples/multi_step/nal4.recursion.small.2.nal new file mode 100644 index 0000000..f436dd2 --- /dev/null +++ b/Tests/examples/multi_step/nal4.recursion.small.2.nal @@ -0,0 +1,10 @@ +<<$1 --> number> ==> <(/,successor,$1,_) --> number>>. +<0 --> number>. +<(/,successor,(/,successor,0,_),_) --> number>? + +20000 + +''outputMustContain('<(/,successor,0,_) --> number>.') +//''outputMustContain('<(/,successor,(/,successor,0,_),_) --> number>.') +//''outputMustContain('<(/,successor,(/,successor,0,_),_) --> number>. %1.00;0.73%') + diff --git a/Tests/examples/multi_step/nal4.recursion.small.nal b/Tests/examples/multi_step/nal4.recursion.small.nal new file mode 100644 index 0000000..fd3bbaf --- /dev/null +++ b/Tests/examples/multi_step/nal4.recursion.small.nal @@ -0,0 +1,9 @@ +<0 --> num>. %1.00;0.90% + +<<$1 --> num> ==> <(*,$1) --> num>>. %1.00;0.90% + +<(*,(*,(*,0))) --> num>? + +50000 + +''outputMustContain('<(*,(*,(*,0))) --> num>. %1.00;0.66%') diff --git a/Tests/examples/multi_step/nal6.mln1.nal b/Tests/examples/multi_step/nal6.mln1.nal new file mode 100644 index 0000000..47ba1c0 --- /dev/null +++ b/Tests/examples/multi_step/nal6.mln1.nal @@ -0,0 +1,28 @@ +' MLN example by patrick + +' Facts: +<(*,{Anna},{Bob}) --> Friends>. %1.00;0.99% +<(*,{Anna},{Edward}) --> Friends>. %1.00;0.99% +<(*,{Anna},{Frank}) --> Friends>. %1.00;0.99% +<(*,{Edward},{Frank}) --> Friends>. %1.00;0.99% +<(*,{Gary},{Helen}) --> Friends>. %1.00;0.99% +(--,<(*,{Gary},{Frank}) --> Friends>). %1.00;0.99% +<{Anna} --> [Drinks]>. %1.00;0.99% +<{Edward} --> [Smokes]>. %1.00;0.99% + +' Rule0: +' p=0.8, Drinks(x) => Cancer(x) +<<$1 --> [Drinks]> ==> <$1 --> [Cancer]>>. %0.8;0.9% + +' Rule1: +' p=0.8, Smokes(x) => Cancer(x) +<<$1 --> [Smokes]> ==> <$1 --> [Cancer]>>. %0.8;0.9% + +' Rule2: +' p=0.6 Friends(x, y) => (Smokes(x) <=> Smokes(y)) +<<(*,$1,$2) --> Friends> ==> (||, (&&,<$1 --> [Smokes]>,<$2 --> [Smokes]>), (&&,(--,<$1 --> [Smokes]>),(--,<$2 --> [Smokes]>)))>. %0.6;0.9% + + [Cancer]>? +1000 + +''outputMustContain('<{Edward} --> [Cancer]>. %0.80;0.71%') diff --git a/Tests/examples/multi_step/nal7.implSeqABCsimple.nal b/Tests/examples/multi_step/nal7.implSeqABCsimple.nal new file mode 100644 index 0000000..2ee7ff8 --- /dev/null +++ b/Tests/examples/multi_step/nal7.implSeqABCsimple.nal @@ -0,0 +1,8 @@ +' unittset seq + A>. :|: %1.00;0.90% +8 + B>. :|: %1.00;0.90% +8 + C>. :|: %1.00;0.90% +100 +''outputMustContain('<(&/, A>,+8, B>,+8) =/> C>>. :!16: %1.00;0.42%') diff --git a/Tests/examples/multi_step/nal7.predEquiv.nal b/Tests/examples/multi_step/nal7.predEquiv.nal new file mode 100644 index 0000000..28b6e7e --- /dev/null +++ b/Tests/examples/multi_step/nal7.predEquiv.nal @@ -0,0 +1,7 @@ + A>. :|: %1.00;0.90% +8 + B>. :|: %1.00;0.90% +8 + C>. :|: %1.00;0.90% +100 +''outputMustContain('<(&/, A>,+8, B>,+8) C>>. :!16: %1.00;0.42%') diff --git a/Tests/examples/multi_step/nal7.seq3.nal b/Tests/examples/multi_step/nal7.seq3.nal new file mode 100644 index 0000000..6f63082 --- /dev/null +++ b/Tests/examples/multi_step/nal7.seq3.nal @@ -0,0 +1,7 @@ + A>. :|: %1.00;0.90% +8 + B>. :|: %1.00;0.90% +8 + C>. :|: %1.00;0.90% +100 +''outputMustContain('(&/, A>,+8, B>,+8, C>). :!16: %1.00;0.73%') diff --git a/Tests/examples/multi_step/nars_memorize_precondition.nal b/Tests/examples/multi_step/nars_memorize_precondition.nal new file mode 100644 index 0000000..7452858 --- /dev/null +++ b/Tests/examples/multi_step/nars_memorize_precondition.nal @@ -0,0 +1,6 @@ +<(&/, A>,+10,(^pick,{SELF},a),+10) =/> B>>. +100 + A>. :|: +10 + B>! +''outputMustContain('(^pick,{SELF},a). :!110: %1.00;0.90%') diff --git a/Tests/examples/multi_step/nars_memorize_precondition2_parallel.nal b/Tests/examples/multi_step/nars_memorize_precondition2_parallel.nal new file mode 100644 index 0000000..7cb1ced --- /dev/null +++ b/Tests/examples/multi_step/nars_memorize_precondition2_parallel.nal @@ -0,0 +1,7 @@ +<(&/,(&|, #A>, #A>),+10,(^pick,{SELF},a),+10) =/> B>>. +100 + A>. :|: + A>. :|: +10 + B>! +''outputMustContain('(^pick,{SELF},a). :!110: %1.00;0.90%') diff --git a/Tests/examples/multi_step/nars_memorize_precondition2_sequential.nal b/Tests/examples/multi_step/nars_memorize_precondition2_sequential.nal new file mode 100644 index 0000000..6116338 --- /dev/null +++ b/Tests/examples/multi_step/nars_memorize_precondition2_sequential.nal @@ -0,0 +1,8 @@ +<(&/, #A>,+10, #A>,+10,(^pick,{SELF},a),+10) =/> B>>. +100 + A>. :|: +10 + A>. :|: +10 + B>! +''outputMustContain('(^pick,{SELF},a). :!120: %1.00;0.90%') diff --git a/Tests/examples/multi_step/nars_memorize_precondition_sequence.nal b/Tests/examples/multi_step/nars_memorize_precondition_sequence.nal new file mode 100644 index 0000000..72c3fd8 --- /dev/null +++ b/Tests/examples/multi_step/nars_memorize_precondition_sequence.nal @@ -0,0 +1,8 @@ +<(&/, A>,+10, y>,+10,(^pick,{SELF},a),+10) =/> B>>. +100 + A>. :|: +10 + y>. :|: +10 + B>! +''outputMustContain('(^pick,{SELF},a). :!120: %1.00;0.90%') diff --git a/Tests/examples/multi_step/nars_memorize_precondition_var1.nal b/Tests/examples/multi_step/nars_memorize_precondition_var1.nal new file mode 100644 index 0000000..5b01a07 --- /dev/null +++ b/Tests/examples/multi_step/nars_memorize_precondition_var1.nal @@ -0,0 +1,6 @@ +<(&/,<#1 --> A>,+10,(^pick,{SELF},#1),+10) =/> B>>. +100 + A>. :|: +10 + B>! +''outputMustContain('(^pick,{SELF},a). :!110: %1.00;0.90%') diff --git a/Tests/examples/multi_step/nars_memorize_precondition_var2.nal b/Tests/examples/multi_step/nars_memorize_precondition_var2.nal new file mode 100644 index 0000000..fe27529 --- /dev/null +++ b/Tests/examples/multi_step/nars_memorize_precondition_var2.nal @@ -0,0 +1,6 @@ +<(&/, A>,+10,(^pick,{SELF},a),+10) =/> <#1 --> B>>. +100 + A>. :|: +10 + B>! +''outputMustContain('(^pick,{SELF},a). :!110: %1.00;0.90%') diff --git a/Tests/examples/multi_step/nars_memorize_precondition_var3.nal b/Tests/examples/multi_step/nars_memorize_precondition_var3.nal new file mode 100644 index 0000000..56d3d09 --- /dev/null +++ b/Tests/examples/multi_step/nars_memorize_precondition_var3.nal @@ -0,0 +1,6 @@ +<(&/,<$1 --> A>,+10,(^pick,{SELF},$1),+10) =/> <$1 --> B>>. +100 + A>. :|: +10 + B>! +''outputMustContain('(^pick,{SELF},a). :!110: %1.00;0.90%') diff --git a/Tests/examples/multi_step/nars_multistep_1.nal b/Tests/examples/multi_step/nars_multistep_1.nal new file mode 100644 index 0000000..22b7c4c --- /dev/null +++ b/Tests/examples/multi_step/nars_multistep_1.nal @@ -0,0 +1,20 @@ +'****** Mixed inference + bird>. + swimmer>. +50 + swimmer>? +80 + bird>. + swimmer>. +80 + [feathered]>. + [feathered]>. +80 + bird>? +100 + swimmer>? +7000 +''outputMustContain(' swimmer>. %1.00;0.45%') +//''outputMustContain(' swimmer>. %1.00;0.62%') +''outputMustContain(' bird>. %1.00;0.45%') +''outputMustContain(' swimmer>. %1.00;0.28%') diff --git a/Tests/examples/multi_step/nars_multistep_2.nal b/Tests/examples/multi_step/nars_multistep_2.nal new file mode 100644 index 0000000..4f013e7 --- /dev/null +++ b/Tests/examples/multi_step/nars_multistep_2.nal @@ -0,0 +1,13 @@ +// *** Semi-compositionality +<(&,light,[red]) --> traffic_signal>? +5 +<[red] --> traffic_signal>. %0.1% + traffic_signal>. %0.1% +500 +''outputMustContain('<(&,[red],light) --> traffic_signal>. %0.19;0.81%') +1000 +<{light_1} --> (&,light,[red])>. +<{light_1} --> traffic_signal>. +200000 +''outputMustContain('<(&,[red],light) --> traffic_signal>. %0.30;0.83%') + diff --git a/Tests/examples/multi_step/nars_multistep_3.nal b/Tests/examples/multi_step/nars_multistep_3.nal new file mode 100644 index 0000000..a6c70f4 --- /dev/null +++ b/Tests/examples/multi_step/nars_multistep_3.nal @@ -0,0 +1,20 @@ +// *** Fuzzy concepts +<{John} --> (/,taller_than,{Tom},_)>. +<{John} --> boy>. +5 +<{Tom} --> (/,taller_than,_,boy)>? +''outputMustContain(' (/,taller_than,{Tom},_)>. %1.00;0.45%') +6 +<{David} --> boy>. +(--,<{David} --> (/,taller_than,{Tom},_)>). + +1000 + +//''outputMustContain(' (/,taller_than,{Tom},_)>. %0.00;0.40%') + +//''outputMustContain('<{Tom} --> (/,taller_than,_,boy)>. %1.00;0.45%') +1 +<{Karl} --> (/,taller_than,{Tom},_)>. +<{Karl} --> boy>. +500000 +''outputMustContain(' (/,taller_than,{Tom},_)>. %0.67;0.71%') diff --git a/Tests/examples/multi_step/nars_seqABC.nal b/Tests/examples/multi_step/nars_seqABC.nal new file mode 100644 index 0000000..a3fc62a --- /dev/null +++ b/Tests/examples/multi_step/nars_seqABC.nal @@ -0,0 +1,14 @@ + A>. :|: %1.00;0.90% +8 + B>. :|: %1.00;0.90% +8 + C>. :|: %1.00;0.90% +8 + A>. :|: %1.00;0.90% +100 + B>. :|: %1.00;0.90% +20000 + C>>? +''outputMustContain('<(&/, A>,+8, B>,+8) =/> C>>. :!16: %1.00;0.42%') +''outputMustContain('<(&/, B>,+8) =/> C>>. %0.50;0.62%') +''outputMustContain('<(&/, A>,+16) =/> C>>. %0.50;0.62%') diff --git a/Tests/examples/multi_step/nars_transitivity.nal b/Tests/examples/multi_step/nars_transitivity.nal new file mode 100644 index 0000000..d3e413e --- /dev/null +++ b/Tests/examples/multi_step/nars_transitivity.nal @@ -0,0 +1,8 @@ +<(*,a,b) --> like>. %1.00;0.90% +<(*,b,c) --> like>. %1.00;0.90% +<(*,a,c) --> like>. %1.00;0.90% +<<(*,b,$1) --> like> ==> <(*,a,$1) --> like>>? +//<(&&,<(*,#1,$2) --> like>,<(*,$3,#1) --> like>) ==> <(*,$3,$2) --> like>>? +210000 +''outputMustContain('<<(*,b,$1) --> like> ==> <(*,a,$1) --> like>>. %1.00;0.45%') +''outputMustContain('<(&&,<(*,#1,$2) --> like>,<(*,$3,#1) --> like>) ==> <(*,$3,$2) --> like>>. %1.00;0.26%') diff --git a/Tests/examples/multi_step/stresstest_bird1.nal b/Tests/examples/multi_step/stresstest_bird1.nal new file mode 100644 index 0000000..cab67b1 --- /dev/null +++ b/Tests/examples/multi_step/stresstest_bird1.nal @@ -0,0 +1,33 @@ +' stress test +' from https://code.google.com/archive/p/open-nars/issues/1 + +<{b1} --> bird>. +<{b1} --> fly>. +<{b2} --> bird>. +<{b2} --> fly>. +<{b3} --> bird>. +<{b3} --> fly>. +<{b4} --> bird>. +<{b4} --> fly>. +<{b5} --> bird>. +<{b5} --> fly>. +<{b6} --> bird>. +<{b6} --> fly>. +<{b7} --> bird>. +<{b7} --> fly>. +<{b8} --> bird>. +<{b8} --> fly>. +<{b9} --> bird>. +<{b9} --> fly>. +<{bA} --> bird>. +<{bA} --> fly>? + +' expected answers on a good run +' Answer <{bA} --> fly>. %1.00;0.27% {2176 +' Answer <{bA} --> fly>. %1.00;0.40% {6305 +' Answer <{bA} --> fly>. %1.00;0.44% {32125 +' Answer <{bA} --> fly>. %1.00;0.78% {44080 + +' give enough time +60000 +''outputMustContain('<{bA} --> fly>. %1.00;0.77%') diff --git a/Tests/examples/parsing_failed_examples/application/vision.nal b/Tests/examples/parsing_failed_examples/application/vision.nal new file mode 100644 index 0000000..29c4c98 --- /dev/null +++ b/Tests/examples/parsing_failed_examples/application/vision.nal @@ -0,0 +1,55 @@ +//First: Input diamond: +// | ██ | +// | ██ ██ | +// |██ ██| +// | ██ ██ | +// | ██ |: +<{M1[-1.0,0.0]} --> [BRIGHT]>. +<{M1[1.0,0.0]} --> [BRIGHT]>. +<{M1[0.0,1.0]} --> [BRIGHT]>. +<{M1[0.0,-1.0]} --> [BRIGHT]>. +<{M1[0.5,0.5]} --> [BRIGHT]>. +<{M1[-0.5,0.5]} --> [BRIGHT]>. +<{M1[0.5,-0.5]} --> [BRIGHT]>. +<{M1[-0.5,-0.5]} --> [BRIGHT]>. +<{M1} --> (/,called,_,circle)>. + +//Second: Input cross: +// | ██ | +// | ██ | +// |██████████| +// | ██ | +// | ██ |: +<{M2[0.0,1.0]} --> [BRIGHT]>. +<{M2[0.0,0.5]} --> [BRIGHT]>. +<{M2[-1.0,0.0]} --> [BRIGHT]>. +<{M2[-0.5,0.0]} --> [BRIGHT]>. +<{M2[0.0,0.0]} --> [BRIGHT]>. +<{M2[0.5,0.0]} --> [BRIGHT]>. +<{M2[1.0,0.0]} --> [BRIGHT]>. +<{M2[0.0,-1.0]} --> [BRIGHT]>. +<{M2[0.0,-0.5]} --> [BRIGHT]>. +<{M2} --> (/,called,_,cross)>. + + +//Re-observe imperfectly +// |▒▒ ██ | +// | ██ ▒▒ | +// |▒▒ ██| +// | ██ ▒▒▒▒| +// | |: +<{M3[-1.0,1.0]} --> [BRIGHT]>. %0.5% +<{M3[0.0,1.0]} --> [BRIGHT]>. +<{M3[-0.5,0.5]} --> [BRIGHT]>. +<{M3[0.5,0.5]} --> [BRIGHT]>. %0.5% +<{M3[-1.0,0.0]} --> [BRIGHT]>. %0.5% +<{M3[1.0,0.0]} --> [BRIGHT]>. +<{M3[-0.5,-0.5]} --> [BRIGHT]>. +<{M3[0.5,-0.5]} --> [BRIGHT]>. %0.5% +<{M3[1.0,-0.5]} --> [BRIGHT]>. %0.5% + +50000 +//What was observed? +<{M3} --> (/,called,_,?what)>? +//A circle +''outputMustContain('<{M3} --> (/,called,_,circle)>. %0.83;0.36%') diff --git a/Tests/examples/parsing_failed_examples/multi_step/nars_spatialSeq1.nal b/Tests/examples/parsing_failed_examples/multi_step/nars_spatialSeq1.nal new file mode 100644 index 0000000..ed4bbde --- /dev/null +++ b/Tests/examples/parsing_failed_examples/multi_step/nars_spatialSeq1.nal @@ -0,0 +1,6 @@ +(#,a,a,b,b). +(#,c,c,b,b). +<(&,(#,a,a),(#,c,c)) --> (/,#,_,b,b)>? +120000 +''outputMustContain('<(&,(#,a,a),(#,c,c)) --> (/,#,_,b,b)>. %1.00;0.81%') +''outputMustContain('<(#,a,a) <-> (#,c,c)>. %1.00;0.45%') diff --git a/Tests/examples/parsing_failed_examples/single_step/nal8_list.nal b/Tests/examples/parsing_failed_examples/single_step/nal8_list.nal new file mode 100644 index 0000000..5af4c28 --- /dev/null +++ b/Tests/examples/parsing_failed_examples/single_step/nal8_list.nal @@ -0,0 +1,8 @@ +(#,a,b,c,d). +300000 +''outputMustContain('(#,(#,a,b),c,d). %1.00;0.90%') +''outputMustContain('(#,(#,a,b,c),d). %1.00;0.90%') +''outputMustContain('(#,a,(#,b,c),d). %1.00;0.90%') +''outputMustContain('(#,a,(#,b,c,d)). %1.00;0.90%') +''outputMustContain('(#,a,b,c). %1.00;0.81%') +''outputMustContain('(#,b,c,d). %1.00;0.81%') \ No newline at end of file diff --git a/Tests/examples/readme.md b/Tests/examples/readme.md new file mode 100644 index 0000000..ec46004 --- /dev/null +++ b/Tests/examples/readme.md @@ -0,0 +1 @@ +# Readme \ No newline at end of file diff --git a/Tests/examples/single_step/README.txt b/Tests/examples/single_step/README.txt new file mode 100644 index 0000000..b4b4be2 --- /dev/null +++ b/Tests/examples/single_step/README.txt @@ -0,0 +1,17 @@ +These scripts are individual unit tests for verifying correct and +predictable operation with the original default NAR parameters. + + +Lines beginning with: + + [number]: # of cycles to process before continuing to next line + + ' (apostrophe): comments; not interpreted, but instead are re-created as ECHO channel output + + '' (apostrophe): embedded Javascript code evaluated during testing. Examples: + + ''print(out) + out is a reference to the current output buffer, containing a list of strings; one for each output + + ''outputMustContain('...') + used for ensuring that the output buffer so far has a line containing the parameter string diff --git a/Tests/examples/single_step/nal1/desire.1.nal b/Tests/examples/single_step/nal1/desire.1.nal new file mode 100644 index 0000000..72cbda1 --- /dev/null +++ b/Tests/examples/single_step/nal1/desire.1.nal @@ -0,0 +1,4 @@ + B>! + B>@ + +''outputMustContain(' B>! %1.00;0.90%') diff --git a/Tests/examples/single_step/nal1/nal1.0.nal b/Tests/examples/single_step/nal1/nal1.0.nal new file mode 100644 index 0000000..4704150 --- /dev/null +++ b/Tests/examples/single_step/nal1/nal1.0.nal @@ -0,0 +1,12 @@ +'Revision ------ + +'Bird is a type of swimmer. + swimmer>. %1.00;0.90% + +'Bird is probably not a type of swimmer. + swimmer>. %0.10;0.60% + +1 + +'Bird is very likely to be a type of swimmer. +''outputMustContain(' swimmer>. %0.87;0.91%') diff --git a/Tests/examples/single_step/nal1/nal1.1.nal b/Tests/examples/single_step/nal1/nal1.1.nal new file mode 100644 index 0000000..df9556b --- /dev/null +++ b/Tests/examples/single_step/nal1/nal1.1.nal @@ -0,0 +1,13 @@ +'Deduction ------ + +'Bird is a type of animal. + animal>. + +'Robin is a type of bird. + bird>. + +10 + +'Robin is a type of animal. +''outputMustContain(' animal>. %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal1/nal1.2.nal b/Tests/examples/single_step/nal1/nal1.2.nal new file mode 100644 index 0000000..e110a77 --- /dev/null +++ b/Tests/examples/single_step/nal1/nal1.2.nal @@ -0,0 +1,15 @@ +'********** abduction + +'Sport is a type of competition. + competition>. + +'Chess is a type of competition. + competition>. %0.90% + +3 + +'I guess sport is a type of chess. +''outputMustContain(' chess>. %1.00;0.42%') + +'I guess chess is a type of sport. +''outputMustContain(' sport>. %0.90;0.45%') diff --git a/Tests/examples/single_step/nal1/nal1.3.nal b/Tests/examples/single_step/nal1/nal1.3.nal new file mode 100644 index 0000000..3fad82b --- /dev/null +++ b/Tests/examples/single_step/nal1/nal1.3.nal @@ -0,0 +1,15 @@ +'********* induction + +'Swan is a type of swimmer. + swimmer>. %0.90% + +'Swan is a type of bird. + bird>. + +3 + +'I guess bird is a type of swimmer. +''outputMustContain(' swimmer>. %0.90;0.45%') + +'I guess swimmer is a type of bird. +''outputMustContain(' bird>. %1.00;0.42%') diff --git a/Tests/examples/single_step/nal1/nal1.4.nal b/Tests/examples/single_step/nal1/nal1.4.nal new file mode 100644 index 0000000..265e4cb --- /dev/null +++ b/Tests/examples/single_step/nal1/nal1.4.nal @@ -0,0 +1,12 @@ +'********** exemplification + +'Robin is a type of bird. + bird>. + +'A bird is a type of animal. + animal>. + +3 + +'I guess animal is a type of robin. +''outputMustContain(' robin>. %1.00;0.45%') diff --git a/Tests/examples/single_step/nal1/nal1.5.nal b/Tests/examples/single_step/nal1/nal1.5.nal new file mode 100644 index 0000000..060f4e3 --- /dev/null +++ b/Tests/examples/single_step/nal1/nal1.5.nal @@ -0,0 +1,12 @@ +'********** conversion + +'Bird is a type of swimmer. + swimmer>. + +'Is swimmer a type of bird? + bird>? + +6 + +'I guess swimmer is a type of bird. +''outputMustContain(' bird>. %1.00;0.47%') diff --git a/Tests/examples/single_step/nal1/nal1.6.nal b/Tests/examples/single_step/nal1/nal1.6.nal new file mode 100644 index 0000000..996682f --- /dev/null +++ b/Tests/examples/single_step/nal1/nal1.6.nal @@ -0,0 +1,12 @@ +' ********** "y/n" question + +' Bird is a type of swimmer. + swimmer>. + +' Is bird a type of swimmer? + swimmer>? + +1 + +' Bird is a type of swimmer. +''outputMustContain(' swimmer>. %1.00;0.90%') diff --git a/Tests/examples/single_step/nal1/nal1.7.nal b/Tests/examples/single_step/nal1/nal1.7.nal new file mode 100644 index 0000000..359b095 --- /dev/null +++ b/Tests/examples/single_step/nal1/nal1.7.nal @@ -0,0 +1,13 @@ +' ********** "wh" question + +' Bird is a type of swimmer. + swimmer>. %1.00;0.80% + +' What is a type of swimmer? + swimmer>? + +5 + +' Bird is a type of swimmer. +''outputMustContain(' swimmer>. %1.00;0.80%') + diff --git a/Tests/examples/single_step/nal1/nal1.8.nal b/Tests/examples/single_step/nal1/nal1.8.nal new file mode 100644 index 0000000..9f24e80 --- /dev/null +++ b/Tests/examples/single_step/nal1/nal1.8.nal @@ -0,0 +1,15 @@ +' ********** backward inference + +' Bird is a type of swimmer. + swimmer>. %1.00;0.80% + +' What is a type of swimmer? + swimmer>? + +5 + +' What is a type of bird? +''outputMustContain(' bird>?') + +' What is the type of bird? +''outputMustContain(' ?1>?') diff --git a/Tests/examples/single_step/nal2/nal2.0.nal b/Tests/examples/single_step/nal2/nal2.0.nal new file mode 100644 index 0000000..6577f89 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.0.nal @@ -0,0 +1,13 @@ +'********** revision + +'Robin is similar to swan. + swan>. + +'I think robin is not similar to swan. + swan>. %0.10;0.60% + +1 + +'Robin is probably similar to swan. +''outputMustContain(' swan>. %0.87;0.91%') + diff --git a/Tests/examples/single_step/nal2/nal2.1.nal b/Tests/examples/single_step/nal2/nal2.1.nal new file mode 100644 index 0000000..cdfa700 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.1.nal @@ -0,0 +1,15 @@ +'********** comparison + +'Swan is a type of swimmer. + swimmer>. %0.90% + +'Swan is a type of bird. + bird>. + +3 + +'I guess that bird is similar to swimmer. +''outputMustContain(' swimmer>. %0.90;0.45%') + + + diff --git a/Tests/examples/single_step/nal2/nal2.10.nal b/Tests/examples/single_step/nal2/nal2.10.nal new file mode 100644 index 0000000..d1395d6 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.10.nal @@ -0,0 +1,12 @@ +'********** structure transformation + +'Birdie is similar to Tweety + Tweety>. %0.90% + +'Is Birdie similar to Tweety? +<{Birdie} <-> {Tweety}>? + +6 + +'Birdie is similar to Tweety. +''outputMustContain('<{Birdie} <-> {Tweety}>. %0.90;0.73%') diff --git a/Tests/examples/single_step/nal2/nal2.11.nal b/Tests/examples/single_step/nal2/nal2.11.nal new file mode 100644 index 0000000..a1991cc --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.11.nal @@ -0,0 +1,14 @@ +'********** conversions between inheritance and similarity + +'Swan is a type of bird. + bird>. %0.90% + + +'Is bird similar to swan? + swan>? + +6 + +'I guess that bird is similar to swan. +''outputMustContain(' swan>. %0.90;0.47%') + diff --git a/Tests/examples/single_step/nal2/nal2.12.nal b/Tests/examples/single_step/nal2/nal2.12.nal new file mode 100644 index 0000000..c491499 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.12.nal @@ -0,0 +1,13 @@ +'********** conversions between inheritance and similarity + +'a bird is similar to a swan. + swan>. %0.90% + +'Is swan a type of bird? + bird>? + +6 + +'A swan is a type of bird. +''outputMustContain(' bird>. %0.90;0.81%') + diff --git a/Tests/examples/single_step/nal2/nal2.13.nal b/Tests/examples/single_step/nal2/nal2.13.nal new file mode 100644 index 0000000..41a4c36 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.13.nal @@ -0,0 +1,11 @@ +'********** translating instance into inheritance + +'Tweety is a bird. +. + +1 + +''outputMustContain('<{Tweety} --> bird>. %1.00;0.90%') +'//expect.outEmpty + + diff --git a/Tests/examples/single_step/nal2/nal2.14.nal b/Tests/examples/single_step/nal2/nal2.14.nal new file mode 100644 index 0000000..6d5ca38 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.14.nal @@ -0,0 +1,9 @@ +'********** translating property into inheritance + +'Ravens are black. +. + +1 + +'''expect.outEmpty +''outputMustContain(' [black]>.') \ No newline at end of file diff --git a/Tests/examples/single_step/nal2/nal2.15.nal b/Tests/examples/single_step/nal2/nal2.15.nal new file mode 100644 index 0000000..0f64d7d --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.15.nal @@ -0,0 +1,9 @@ +'********** translating instance-property into inheritance + +'Tweety is yellow. +. + +1 + +'''expect.outEmpty +''outputMustContain('<{Tweety} --> [yellow]>.') \ No newline at end of file diff --git a/Tests/examples/single_step/nal2/nal2.16.nal b/Tests/examples/single_step/nal2/nal2.16.nal new file mode 100644 index 0000000..f887f53 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.16.nal @@ -0,0 +1,10 @@ +'********** set definition + +'Tweety is Birdie. +<{Tweety} --> {Birdie}>. + +3 + +'Birdie is similar to Tweety. +''outputMustContain('<{Birdie} <-> {Tweety}>. %1.00;0.90%') + diff --git a/Tests/examples/single_step/nal2/nal2.17.nal b/Tests/examples/single_step/nal2/nal2.17.nal new file mode 100644 index 0000000..4bf1336 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.17.nal @@ -0,0 +1,9 @@ +'********** set definition + +'Smart thing is a type of bright thing. +<[smart] --> [bright]>. + +1 + +'Bright thing is similar to smart thing. +''outputMustContain('<[bright] <-> [smart]>. %1.00;0.90%') diff --git a/Tests/examples/single_step/nal2/nal2.18.nal b/Tests/examples/single_step/nal2/nal2.18.nal new file mode 100644 index 0000000..575d61e --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.18.nal @@ -0,0 +1,12 @@ +'********** set definition + +'Birdie is similar to Tweety. +<{Birdie} <-> {Tweety}>. + +1 + +'Birdie is similar to Tweety. +''outputMustContain(' Tweety>. %1.00;0.90%') + +'Tweety is Birdie. +''outputMustContain('<{Tweety} --> {Birdie}>. %1.00;0.90%') diff --git a/Tests/examples/single_step/nal2/nal2.19.nal b/Tests/examples/single_step/nal2/nal2.19.nal new file mode 100644 index 0000000..c375777 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.19.nal @@ -0,0 +1,12 @@ +'********** set definition + +'Bright thing is similar to smart thing. +<[bright] <-> [smart]>. + +1 + +'Bright is similar to smart. +''outputMustContain(' smart>. %1.00;0.90%') + +'Bright thing is a type of smart thing. +''outputMustContain('<[bright] --> [smart]>. %1.00;0.90%') diff --git a/Tests/examples/single_step/nal2/nal2.2.nal b/Tests/examples/single_step/nal2/nal2.2.nal new file mode 100644 index 0000000..7686b2e --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.2.nal @@ -0,0 +1,12 @@ +'********** backward inference + +'Bird is a type of swimmer. + swimmer>. + +'What is a swimmer? +<{?1} --> swimmer>? + +5 + +'What is a bird? +''outputMustContain('<{?1} --> bird>?') diff --git a/Tests/examples/single_step/nal2/nal2.3.nal b/Tests/examples/single_step/nal2/nal2.3.nal new file mode 100644 index 0000000..4541f02 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.3.nal @@ -0,0 +1,12 @@ +'********** comparison + +'Sport is a type of competition. + competition>. + +'Chess is a type of competition. + competition>. %0.90% + +3 + +'I guess chess is similar to sport. +''outputMustContain(' sport>. %0.90;0.45%') diff --git a/Tests/examples/single_step/nal2/nal2.4.nal b/Tests/examples/single_step/nal2/nal2.4.nal new file mode 100644 index 0000000..a4a4e67 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.4.nal @@ -0,0 +1,15 @@ +'********** analogy + +'Swan is a type of swimmer. + swimmer>. + +'Gull is similar to swan. + swan>. + +3 + +'I think gull is a type of swimmer. +''outputMustContain(' swimmer>. %1.00;0.81%') + + + diff --git a/Tests/examples/single_step/nal2/nal2.5.nal b/Tests/examples/single_step/nal2/nal2.5.nal new file mode 100644 index 0000000..f342b44 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.5.nal @@ -0,0 +1,12 @@ +'********** analogy + +'Gull is a type of swimmer. + swimmer>. + +'Gull is similar to a swan. + swan>. + +3 + +'I believe a swan is a type of swimmer. +''outputMustContain(' swimmer>. %1.00;0.81%') diff --git a/Tests/examples/single_step/nal2/nal2.6.nal b/Tests/examples/single_step/nal2/nal2.6.nal new file mode 100644 index 0000000..4dc51d7 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.6.nal @@ -0,0 +1,12 @@ +'********** resemblance + +'Robin is similar to swan. + swan>. + +'Gull is similar to swan. + swan>. + +3 + +'Gull is similar to robin. +''outputMustContain(' robin>. %1.00;0.81%') diff --git a/Tests/examples/single_step/nal2/nal2.7.nal b/Tests/examples/single_step/nal2/nal2.7.nal new file mode 100644 index 0000000..835c752 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.7.nal @@ -0,0 +1,13 @@ +'********** conversions between inheritance and similarity + +'Swan is a type of bird. + bird>. + +'Bird is not a type of swan. + swan>. %0.10% + +1 + +'Bird is different from swan. +''outputMustContain(' swan>. %0.10;0.81%') + diff --git a/Tests/examples/single_step/nal2/nal2.8.nal b/Tests/examples/single_step/nal2/nal2.8.nal new file mode 100644 index 0000000..cf9e690 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.8.nal @@ -0,0 +1,13 @@ +'********** structure transformation + +'Bright is similar to smart. + smart>. %0.90% + +'Is bright thing a type of smart thing? +<[smart] --> [bright]>? + +6 + +'Bright thing is a type of smart thing. +''outputMustContain('<[bright] <-> [smart]>. %0.90;0.90%') +''outputMustContain('<[smart] --> [bright]>. %0.90;0.66%') diff --git a/Tests/examples/single_step/nal2/nal2.9.nal b/Tests/examples/single_step/nal2/nal2.9.nal new file mode 100644 index 0000000..86c80c5 --- /dev/null +++ b/Tests/examples/single_step/nal2/nal2.9.nal @@ -0,0 +1,13 @@ +'********** conversions between inheritance and similarity + +'Swan is a type of bird. + bird>. + +'Bird is different from swan. + swan>. %0.10% + +1 + +'Bird is probably not a type of swan. +''outputMustContain(' swan>. %0.10;0.73%') + diff --git a/Tests/examples/single_step/nal3/nal3.0.nal b/Tests/examples/single_step/nal3/nal3.0.nal new file mode 100644 index 0000000..8f0c747 --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.0.nal @@ -0,0 +1,19 @@ +'********** compound composition, two premises + +'Swan is a type of swimmer. + swimmer>. %0.90% + +'Swan is a type of bird. + bird>. %0.80% + +16 + +'Swan is a type of bird or a type of swimmer. +''outputMustContain(' (|,bird,swimmer)>. %0.98;0.81%') + + +'Swan is a type of swimming bird. +''outputMustContain(' (&,bird,swimmer)>. %0.72;0.81%') + + + diff --git a/Tests/examples/single_step/nal3/nal3.1.nal b/Tests/examples/single_step/nal3/nal3.1.nal new file mode 100644 index 0000000..4823bba --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.1.nal @@ -0,0 +1,17 @@ +'********** compound composition, two premises + +'Sport is a type of competition. + competition>. %0.90% + +'Chess is a type of competition. + competition>. %0.80% + +16 + +'If something is either chess or sport, then it is a competition. +''outputMustContain('<(|,chess,sport) --> competition>. %0.72;0.81%') + +'If something is both chess and sport, then it is a competition. +''outputMustContain('<(&,chess,sport) --> competition>. %0.98;0.81%') + + diff --git a/Tests/examples/single_step/nal3/nal3.10.nal b/Tests/examples/single_step/nal3/nal3.10.nal new file mode 100644 index 0000000..b639100 --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.10.nal @@ -0,0 +1,13 @@ +'********** compound composition, one premise + +'Swan is a type of bird. + bird>. %0.90% + +'Is swan a type of nonbird swimmer? + (-,swimmer,bird)>? + +60 + +'A swan is not a type of nonbird swimmer. +''outputMustContain(' (-,swimmer,bird)>. %0.10;0.73%') + diff --git a/Tests/examples/single_step/nal3/nal3.11.nal b/Tests/examples/single_step/nal3/nal3.11.nal new file mode 100644 index 0000000..369afe4 --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.11.nal @@ -0,0 +1,12 @@ +'********** compound composition, one premise + +'Swan is a type of bird. + bird>. %0.90% + +'Is being bird what differ swimmer from swan? +<(~,swimmer, swan) --> bird>? + +60 + +'What differs swimmer from swan is not being bird. +''outputMustContain('<(~,swimmer,swan) --> bird>. %0.10;0.73%') \ No newline at end of file diff --git a/Tests/examples/single_step/nal3/nal3.12.nal b/Tests/examples/single_step/nal3/nal3.12.nal new file mode 100644 index 0000000..ae37cd1 --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.12.nal @@ -0,0 +1,9 @@ +'********** compound decomposition, one premise + +'Robin is a type of swimming bird. + (&,bird,swimmer)>. %0.90% + +1 + +'Robin is a type of bird. +''outputMustContain(' bird>. %0.90;0.73%') diff --git a/Tests/examples/single_step/nal3/nal3.13.nal b/Tests/examples/single_step/nal3/nal3.13.nal new file mode 100644 index 0000000..bc67281 --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.13.nal @@ -0,0 +1,10 @@ +'********** compound decomposition, one premise + +'Robin is a type of nonswimming bird. + (-,bird,swimmer)>. %0.90% + +1 + +'Robin is a type of bird. +''outputMustContain(' bird>. %0.90;0.73%') + diff --git a/Tests/examples/single_step/nal3/nal3.14.nal b/Tests/examples/single_step/nal3/nal3.14.nal new file mode 100644 index 0000000..762b13e --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.14.nal @@ -0,0 +1,9 @@ +'********** compound decomposition, one premise + +'Boys and girls are youth. +<(|, boy, girl) --> youth>. %0.90% + +2 + +'Boys are youth. +''outputMustContain(' youth>. %0.90;0.73%') diff --git a/Tests/examples/single_step/nal3/nal3.15.nal b/Tests/examples/single_step/nal3/nal3.15.nal new file mode 100644 index 0000000..fed35dd --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.15.nal @@ -0,0 +1,10 @@ +'********** compound decomposition, one premise + +'What differs boys from gials are being strong. +<(~, boy, girl) --> [strong]>. %0.90% + +2 + +'Boys are strong. +''outputMustContain(' [strong]>. %0.90;0.73%') + diff --git a/Tests/examples/single_step/nal3/nal3.2.nal b/Tests/examples/single_step/nal3/nal3.2.nal new file mode 100644 index 0000000..7915abd --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.2.nal @@ -0,0 +1,15 @@ +'********** compound decomposition, two premises + +'Robin is a type of bird or a type of swimmer. + (|,bird,swimmer)>. + +'Robin is not a type of swimmer. + swimmer>. %0.00% + +3 + +'Robin is a type of bird. + +''outputMustContain(' bird>. %1.00;0.81%') + + diff --git a/Tests/examples/single_step/nal3/nal3.3.nal b/Tests/examples/single_step/nal3/nal3.3.nal new file mode 100644 index 0000000..76b10e8 --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.3.nal @@ -0,0 +1,12 @@ +'********** compound decomposition, two premises + +'Robin is not a type of swimmer. + swimmer>. %0.00% + +'Robin is not a nonswimming mammal. + (-,mammal,swimmer)>. %0.00% + +3 + +'Robin is not a type of mammal. +''outputMustContain(' mammal>. %0.00;0.81%') \ No newline at end of file diff --git a/Tests/examples/single_step/nal3/nal3.4.nal b/Tests/examples/single_step/nal3/nal3.4.nal new file mode 100644 index 0000000..a8c5e6d --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.4.nal @@ -0,0 +1,16 @@ +'********** set operations + +'PlanetX is Mars, Pluto, or Venus. + {Mars,Pluto,Venus}>. %0.90% + +'PlanetX is probably Pluto or Saturn. + {Pluto,Saturn}>. %0.70% + +4 + +'PlanetX is Mars, Pluto, Saturn, or Venus. +''outputMustContain(' {Mars,Pluto,Saturn,Venus}>. %0.97;0.81%') + +'PlanetX is probably Pluto. +''outputMustContain(' {Pluto}>. %0.63;0.81%') + diff --git a/Tests/examples/single_step/nal3/nal3.5.nal b/Tests/examples/single_step/nal3/nal3.5.nal new file mode 100644 index 0000000..1a7e206 --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.5.nal @@ -0,0 +1,15 @@ +'********** set operations + +'PlanetX is Mars, Pluto, or Venus. + {Mars,Pluto,Venus}>. %0.90% + +'PlanetX is probably neither Pluto nor Saturn. + {Pluto,Saturn}>. %0.10% + +5 + +'PlanetX is Mars, Pluto, Saturn, or Venus. +''outputMustContain(' {Mars,Pluto,Saturn,Venus}>. %0.91;0.81%') + +'PlanetX is either Mars or Venus. +''outputMustContain(' {Mars,Venus}>. %0.81;0.81%') \ No newline at end of file diff --git a/Tests/examples/single_step/nal3/nal3.6.nal b/Tests/examples/single_step/nal3/nal3.6.nal new file mode 100644 index 0000000..83886a6 --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.6.nal @@ -0,0 +1,13 @@ +'********** composition on both sides of a statement + +'Bird is a type of animal. + animal>. %0.90% + +'Is a swimming bird a type of swimming animal? +<(&,bird,swimmer) --> (&,animal,swimmer)>? + +15 + +'A swimming bird is probably a type of swimming animal. +''outputMustContain('<(&,bird,swimmer) --> (&,animal,swimmer)>. %0.90;0.73%') + diff --git a/Tests/examples/single_step/nal3/nal3.7.nal b/Tests/examples/single_step/nal3/nal3.7.nal new file mode 100644 index 0000000..adb8bd2 --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.7.nal @@ -0,0 +1,13 @@ +'********** composition on both sides of a statement + +'Bird is a type of animal. + animal>. %0.90% + +'Is a nonanimal swimmer a type of a nonbird swimmer? +<(-,swimmer,animal) --> (-,swimmer,bird)>? + +15 + +'A nonanimal swimmer is probably a type of nonbird swimmer. +''outputMustContain('<(-,swimmer,animal) --> (-,swimmer,bird)>. %0.90;0.73%') + diff --git a/Tests/examples/single_step/nal3/nal3.8.nal b/Tests/examples/single_step/nal3/nal3.8.nal new file mode 100644 index 0000000..980df26 --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.8.nal @@ -0,0 +1,14 @@ +'********** compound composition, one premise + +'Swan is a type of bird. + bird>. %0.90% + +'Is a swan a type of bird or swimmer? + (|,bird,swimmer)>? + +50 + +'A swan is probably a type of bird or swimmer. +''outputMustContain(' (|,bird,swimmer)>. %0.90;0.73%') + + diff --git a/Tests/examples/single_step/nal3/nal3.9.nal b/Tests/examples/single_step/nal3/nal3.9.nal new file mode 100644 index 0000000..efe7a71 --- /dev/null +++ b/Tests/examples/single_step/nal3/nal3.9.nal @@ -0,0 +1,14 @@ +' ********** compound composition, one premise + bird>. %0.90% + +'Swan is a type of bird. +<(&,swan,swimmer) --> bird>? + +'Is swimming swan a type of bird? +50 + +'Swimming swan is a type of bird. +''outputMustContain('<(&,swan,swimmer) --> bird>. %0.90;0.73%') + + + diff --git a/Tests/examples/single_step/nal4/nal4.0.nal b/Tests/examples/single_step/nal4/nal4.0.nal new file mode 100644 index 0000000..8c9462d --- /dev/null +++ b/Tests/examples/single_step/nal4/nal4.0.nal @@ -0,0 +1,12 @@ +'********** structural transformation + +'An acid and a base can have a reaction. +<(*,acid,base) --> reaction>. + +6 + +'Acid can react with base. +''outputMustContain(' (/,reaction,_,base)>. %1.00;0.90%') + +'A base is something that has a reaction with an acid. +''outputMustContain(' (/,reaction,acid,_)>. %1.00;0.90%') diff --git a/Tests/examples/single_step/nal4/nal4.1.nal b/Tests/examples/single_step/nal4/nal4.1.nal new file mode 100644 index 0000000..1e6792d --- /dev/null +++ b/Tests/examples/single_step/nal4/nal4.1.nal @@ -0,0 +1,13 @@ +'********** structural transformation + +'Acid can react with base. + (/,reaction,_,base)>. + +4 + +'An acid and a base can have a reaction. +''outputMustContain('<(*,acid,base) --> reaction>. %1.00;0.90%') + +'A base is something that has a reaction with an acid. +''outputMustContain(' (/,reaction,acid,_)>. %1.00;0.90%') + diff --git a/Tests/examples/single_step/nal4/nal4.2.nal b/Tests/examples/single_step/nal4/nal4.2.nal new file mode 100644 index 0000000..4419520 --- /dev/null +++ b/Tests/examples/single_step/nal4/nal4.2.nal @@ -0,0 +1,12 @@ +'********** structural transformation + +'A base is something that has a reaction with an acid. + (/,reaction,acid,_)>. + +4 + +'Acid can react with base. +''outputMustContain(' (/,reaction,_,base)>. %1.00;0.90%') + +'An acid and a base can have a reaction. +''outputMustContain('<(*,acid,base) --> reaction>. %1.00;0.90%') diff --git a/Tests/examples/single_step/nal4/nal4.3.nal b/Tests/examples/single_step/nal4/nal4.3.nal new file mode 100644 index 0000000..a8bc722 --- /dev/null +++ b/Tests/examples/single_step/nal4/nal4.3.nal @@ -0,0 +1,13 @@ +'********** structural transformation + +'Neutralization is a relation between an acid and a base. + (*,acid,base)>. + +6 + +'Something that can neutralize a base is an acid. +''outputMustContain('<(\,neutralization,_,base) --> acid>. %1.00;0.90%') + +'Something that can be neutralized by an acid is a base. +''outputMustContain('<(\,neutralization,acid,_) --> base>. %1.00;0.90%') + diff --git a/Tests/examples/single_step/nal4/nal4.4.nal b/Tests/examples/single_step/nal4/nal4.4.nal new file mode 100644 index 0000000..40199fe --- /dev/null +++ b/Tests/examples/single_step/nal4/nal4.4.nal @@ -0,0 +1,13 @@ +'********** structural transformation + +'Something that can neutralize a base is an acid. +<(\,neutralization,_,base) --> acid>. + +4 + +'Neutralization is a relation between an acid and a base. +''outputMustContain(' (*,acid,base)>. %1.00;0.90%') + +'Something that can be neutralized by an acid is a base. +''outputMustContain('<(\,neutralization,acid,_) --> base>. %1.00;0.90%') + diff --git a/Tests/examples/single_step/nal4/nal4.5.nal b/Tests/examples/single_step/nal4/nal4.5.nal new file mode 100644 index 0000000..08fcbc6 --- /dev/null +++ b/Tests/examples/single_step/nal4/nal4.5.nal @@ -0,0 +1,12 @@ +'********** structural transformation + +'Something that can be neutralized by an acid is a base. +<(\,neutralization,acid,_) --> base>. + +4 + +'Something that can neutralize a base is an acid. +''outputMustContain('<(\,neutralization,_,base) --> acid>. %1.00;0.90%') + +'Neutralization is a relation between an acid and a base. +''outputMustContain(' (*,acid,base)>. %1.00;0.90%') \ No newline at end of file diff --git a/Tests/examples/single_step/nal4/nal4.6.nal b/Tests/examples/single_step/nal4/nal4.6.nal new file mode 100644 index 0000000..e77b34d --- /dev/null +++ b/Tests/examples/single_step/nal4/nal4.6.nal @@ -0,0 +1,12 @@ +'********** composition on both sides of a statement + +'Bird is a type of animal. + animal>. + +'What is the relation between a bird and a plant? +<(*,bird,plant) --> ?x>? + +50 + +'The relation between bird and plant is a type of relation between animal and plant. +''outputMustContain('<(*,bird,plant) --> (*,animal,plant)>. %1.00;0.81%') diff --git a/Tests/examples/single_step/nal4/nal4.7.nal b/Tests/examples/single_step/nal4/nal4.7.nal new file mode 100644 index 0000000..ae2d154 --- /dev/null +++ b/Tests/examples/single_step/nal4/nal4.7.nal @@ -0,0 +1,12 @@ +'********** composition on both sides of a statement + +'Neutralization is a type of reaction. + reaction>. + +'What can be neutralized by acid? +<(\,neutralization,acid,_) --> ?x>? + +6 + +'What can be neutralized by acid can react with acid. +''outputMustContain('<(\,neutralization,acid,_) --> (\,reaction,acid,_)>. %1.00;0.81%') diff --git a/Tests/examples/single_step/nal4/nal4.8.nal b/Tests/examples/single_step/nal4/nal4.8.nal new file mode 100644 index 0000000..aef3d6a --- /dev/null +++ b/Tests/examples/single_step/nal4/nal4.8.nal @@ -0,0 +1,12 @@ +'********** composition on both sides of a statement + +'Soda is a type of base. + base>. + +'What is something that can neutralize a base? +<(/,neutralization,_,base) --> ?x>? + +6 + +'What can neutraliz base can react with base. +''outputMustContain('<(/,neutralization,_,base) --> (/,neutralization,_,soda)>. %1.00;0.81%') diff --git a/Tests/examples/single_step/nal5.query.nal b/Tests/examples/single_step/nal5.query.nal new file mode 100644 index 0000000..254dba0 --- /dev/null +++ b/Tests/examples/single_step/nal5.query.nal @@ -0,0 +1,6 @@ +(&&, b>, d>). + +(&&, b>, d>)? + +1 +''outputMustContain('(&&, b>, d>).') diff --git a/Tests/examples/single_step/nal5/nal5.0.nal b/Tests/examples/single_step/nal5/nal5.0.nal new file mode 100644 index 0000000..f31c976 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.0.nal @@ -0,0 +1,13 @@ +'********** revision + +'If robin can fly then robin is a type of bird. +< [flying]> ==> bird>>. + +'If robin can fly then robin may not a type of bird. +< [flying]> ==> bird>>. %0.00;0.60% + +1 + +'If robin can fly then robin is a type of bird. +''outputMustContain('< [flying]> ==> bird>>. %0.86;0.91%') + diff --git a/Tests/examples/single_step/nal5/nal5.1.nal b/Tests/examples/single_step/nal5/nal5.1.nal new file mode 100644 index 0000000..8f85603 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.1.nal @@ -0,0 +1,13 @@ +'********** deduction + +'If robin is a type of bird then robin is a type of animal. +< bird> ==> animal>>. + +'If robin can fly then robin is a type of bird. +< [flying]> ==> bird>>. + +14 + +'If robin can fly then robin is a type of animal. +''outputMustContain('< [flying]> ==> animal>>. %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal5/nal5.10.nal b/Tests/examples/single_step/nal5/nal5.10.nal new file mode 100644 index 0000000..703f691 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.10.nal @@ -0,0 +1,13 @@ +'********** analogy + +'Robin is a type of bird. + bird>. + +'Usually, robin is a type of bird if and only if robin can fly. +< bird> <=> [flying]>>. %0.80% + +1 + +'I guess usually robin can fly. +''outputMustContain(' [flying]>. %0.80;0.65%') + diff --git a/Tests/examples/single_step/nal5/nal5.11.nal b/Tests/examples/single_step/nal5/nal5.11.nal new file mode 100644 index 0000000..7905f37 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.11.nal @@ -0,0 +1,13 @@ +'********** resemblance + +'Robin is a type of animal if and only if robin is a type of bird. +< animal> <=> bird>>. + +'Robin is a type of bird if and only if robin can fly. +< bird> <=> [flying]>>. %0.9% + +19 + +'Robin is a type of animal if and only if robin can fly. +''outputMustContain('< [flying]> <=> animal>>. %0.90;0.81%') + diff --git a/Tests/examples/single_step/nal5/nal5.12.nal b/Tests/examples/single_step/nal5/nal5.12.nal new file mode 100644 index 0000000..6952b8b --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.12.nal @@ -0,0 +1,12 @@ +'********** conversions between Implication and Equivalence + +'If robin can fly then robin is a type of bird. +< [flying]> ==> bird>>. %0.9% + +'If robin is a type of bird then robin can fly. +< bird> ==> [flying]>>. %0.9% + +7 + +'Robin can fly if and only if robin is a type of bird. +''outputMustContain('< [flying]> <=> bird>>. %0.81;0.81%') diff --git a/Tests/examples/single_step/nal5/nal5.13.nal b/Tests/examples/single_step/nal5/nal5.13.nal new file mode 100644 index 0000000..56b269b --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.13.nal @@ -0,0 +1,17 @@ +'********** compound composition, two premises + +'If robin is a type of bird then robin is a type of animal. +< bird> ==> animal>>. + +'If robin is a type of bird then robin can fly. +< bird> ==> [flying]>>. %0.9% + +5 + +'If robin is a type of bird then usually robin is a type of animal and can fly. +''outputMustContain('< bird> ==> (&&, [flying]>, animal>)>. %0.90;0.81%') + +'If robin is a type of bird then robin is a type of animal or can fly. +''outputMustContain('< bird> ==> (||, [flying]>, animal>)>. %1.00;0.81%') + + diff --git a/Tests/examples/single_step/nal5/nal5.14.nal b/Tests/examples/single_step/nal5/nal5.14.nal new file mode 100644 index 0000000..5b85fa5 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.14.nal @@ -0,0 +1,16 @@ +'********** compound composition, two premises + +'If robin is a type of bird then robin is a type of animal. +< bird> ==> animal>>. + +'If robin can fly then robin is a type of animal. +< [flying]> ==> animal>>. %0.9% + +5 + +'If robin can fly and is a type of bird then robin is a type of animal. +''outputMustContain('<(&&, [flying]>, bird>) ==> animal>>. %1.00;0.81%') + +'If robin can fly or is a type of bird then robin is a type of animal. +''outputMustContain('<(||, [flying]>, bird>) ==> animal>>. %0.90;0.81%') + diff --git a/Tests/examples/single_step/nal5/nal5.15.nal b/Tests/examples/single_step/nal5/nal5.15.nal new file mode 100644 index 0000000..33365cb --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.15.nal @@ -0,0 +1,13 @@ +'********** compound decomposition, two premises + +'If robin is a type of bird then robin is not a type of flying animal. +< bird> ==> (&&, animal>, [flying]>)>. %0% + +'If robin is a type of bird then robin can fly. +< bird> ==> [flying]>>. + +3 + +'It is unlikely that if a robin is a type of bird then robin is a type of animal. +''outputMustContain('< bird> ==> animal>>. %0.00;0.81%') + diff --git a/Tests/examples/single_step/nal5/nal5.16.nal b/Tests/examples/single_step/nal5/nal5.16.nal new file mode 100644 index 0000000..327057a --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.16.nal @@ -0,0 +1,13 @@ +'********** compound decomposition, two premises + +'Robin cannot be both a flyer and a swimmer. +(&&, [flying]>, swimmer>). %0% + +'Robin can fly. + [flying]>. + +2 + +'Robin cannot swim. +''outputMustContain(' swimmer>. %0.00;0.81%') + diff --git a/Tests/examples/single_step/nal5/nal5.17.nal b/Tests/examples/single_step/nal5/nal5.17.nal new file mode 100644 index 0000000..0557853 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.17.nal @@ -0,0 +1,13 @@ +'********** compound decomposition, two premises + +'Robin can fly or swim. +(||, [flying]>, swimmer>). + +'Robin cannot swim. + swimmer>. %0% + +3 + + 'Robin can fly. +''outputMustContain(' [flying]>. %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal5/nal5.18.nal b/Tests/examples/single_step/nal5/nal5.18.nal new file mode 100644 index 0000000..030e325 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.18.nal @@ -0,0 +1,13 @@ +'********** compound composition, one premises + +'Robin can fly. + [flying]>. + +'Can robin fly or swim? +(||, [flying]>, swimmer>)? + +12 +''//+1 from original + +'Robin can fly or swim. +''outputMustContain('(||, [flying]>, swimmer>). %1.00;0.81%') diff --git a/Tests/examples/single_step/nal5/nal5.19.nal b/Tests/examples/single_step/nal5/nal5.19.nal new file mode 100644 index 0000000..95e7942 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.19.nal @@ -0,0 +1,16 @@ +'********** compound decomposition, one premises + +'Robin can fly and swim. +$0.90;0.90$ (&&, swimmer>, [flying]>). %0.9% + + +1 + +'Robin can swim. +''outputMustContain(' swimmer>. %0.90;0.73%') + +5 +''//+2 from original + +'Robin can fly. +''outputMustContain(' [flying]>. %0.90;0.73%') diff --git a/Tests/examples/single_step/nal5/nal5.2.nal b/Tests/examples/single_step/nal5/nal5.2.nal new file mode 100644 index 0000000..b26389f --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.2.nal @@ -0,0 +1,13 @@ +'********** exemplification + +'If robin can fly then robin is a type of bird. +< [flying]> ==> bird>>. + +'If robin is a type of bird then robin is a type of animal. +< bird> ==> animal>>. + +19 + +'I guess if robin is a type of animal then robin can fly. +''outputMustContain('< animal> ==> [flying]>>. %1.00;0.45%') + diff --git a/Tests/examples/single_step/nal5/nal5.20.nal b/Tests/examples/single_step/nal5/nal5.20.nal new file mode 100644 index 0000000..5d99024 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.20.nal @@ -0,0 +1,9 @@ +'********** negation + +'It is unlikely that robin cannot fly. +(--, [flying]>). %0.1% + +1 + +'Robin can fly. +''outputMustContain(' [flying]>. %0.90;0.90%') diff --git a/Tests/examples/single_step/nal5/nal5.21.nal b/Tests/examples/single_step/nal5/nal5.21.nal new file mode 100644 index 0000000..a9e5df0 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.21.nal @@ -0,0 +1,14 @@ +'********** negation + +'Robin can fly. + [flying]>. %0.9% + +'Can robin fly or not? +(--, [flying]>)? + +''//15 +4 + +'It is unlikely that robin cannot fly. +''outputMustContain('(--, [flying]>). %0.10;0.90%') + diff --git a/Tests/examples/single_step/nal5/nal5.22.nal b/Tests/examples/single_step/nal5/nal5.22.nal new file mode 100644 index 0000000..ae89a15 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.22.nal @@ -0,0 +1,12 @@ +'********** contraposition + +'It is unlikely that if robin is not a type of bird then robin can fly. +<(--, bird>) ==> [flying]>>. %0.1% + +'If robin cannot fly then is robin a type of bird? +<(--, [flying]>) ==> bird>>? + +1 + +'I guess it is unlikely that if robin cannot fly then robin is a type of bird. +''outputMustContain('<(--, [flying]>) ==> bird>>. %0.00;0.45%') diff --git a/Tests/examples/single_step/nal5/nal5.23.nal b/Tests/examples/single_step/nal5/nal5.23.nal new file mode 100644 index 0000000..5645ee1 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.23.nal @@ -0,0 +1,13 @@ +'********** conditional deduction + +'If robin can fly and has wings then robin is a bird. +<(&&, [flying]>, [with_wings]>) ==> bird>>. + +'robin can fly. + [flying]>. + +5 + +'If robin has wings then robin is a bird +''outputMustContain('< [with_wings]> ==> bird>>. %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal5/nal5.24.nal b/Tests/examples/single_step/nal5/nal5.24.nal new file mode 100644 index 0000000..ce61161 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.24.nal @@ -0,0 +1,12 @@ +'********** conditional deduction + +'If robin can fly, has wings, and chirps, then robin is a bird +<(&&, [chirping]>, [flying]>, [with_wings]>) ==> bird>>. + +'robin can fly. + [flying]>. + +5 + +'If robin has wings and chirps then robin is a bird. +''outputMustContain('<(&&, [chirping]>, [with_wings]>) ==> bird>>. %1.00;0.81%') diff --git a/Tests/examples/single_step/nal5/nal5.25.nal b/Tests/examples/single_step/nal5/nal5.25.nal new file mode 100644 index 0000000..7d3a4ea --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.25.nal @@ -0,0 +1,12 @@ +'********** conditional deduction + +'If robin is a bird and it's living, then robin is an animal +<(&&, bird>, [living]>) ==> animal>>. + +'If robin can fly, then robin is a bird +< [flying]> ==> bird>>. + +7 + +'If robin is living and it can fly, then robin is an animal. +''outputMustContain('<(&&, [flying]>, [living]>) ==> animal>>. %1.00;0.81%') diff --git a/Tests/examples/single_step/nal5/nal5.26.nal b/Tests/examples/single_step/nal5/nal5.26.nal new file mode 100644 index 0000000..9d2ce11 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.26.nal @@ -0,0 +1,12 @@ +'********** conditional abduction + +'If robin can fly then robin is a bird. +< [flying]> ==> bird>>. + +'If robin both swims and flys then robin is a bird. +<(&&, swimmer>, [flying]>) ==> bird>>. + +2 + +'I guess robin swims. +''outputMustContain(' swimmer>. %1.00;0.45%') diff --git a/Tests/examples/single_step/nal5/nal5.27.nal b/Tests/examples/single_step/nal5/nal5.27.nal new file mode 100644 index 0000000..a297af8 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.27.nal @@ -0,0 +1,13 @@ +'********** conditional abduction + +'If robin is has wings and chirps, then robin is a bird +<(&&, [with_wings]>, [chirping]>) ==> bird>>. + +'If robin can fly, has wings, and chirps, then robin is a bird +<(&&, [flying]>, [with_wings]>, [chirping]>) ==> bird>>. + +5 + +'I guess that robin can fly. +''outputMustContain(' [flying]>. %1.00;0.45%') + diff --git a/Tests/examples/single_step/nal5/nal5.28.nal b/Tests/examples/single_step/nal5/nal5.28.nal new file mode 100644 index 0000000..e9a05df --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.28.nal @@ -0,0 +1,16 @@ +'********** conditional abduction + +'If robin can fly and it has wings, then robin is living. +<(&&, [flying]>, [with_wings]>) ==> [living]>>. %0.9% + +'If robin can fly and robin is a bird then robin is living. +<(&&, [flying]>, bird>) ==> [living]>>. + +3 + +'I guess if robin is a bird, then robin has wings. +''outputMustContain('< bird> ==> [with_wings]>>. %1.00;0.42%') + +'I guess if robin has wings, then robin is a bird. +''outputMustContain('< [with_wings]> ==> bird>>. %0.90;0.45%') + diff --git a/Tests/examples/single_step/nal5/nal5.29.nal b/Tests/examples/single_step/nal5/nal5.29.nal new file mode 100644 index 0000000..2e627e3 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.29.nal @@ -0,0 +1,12 @@ +'********** conditional induction + +'If robin can fly and robin chirps, then robin is a bird +<(&&, [chirping]>, [flying]>) ==> bird>>. + +'If robin can fly then usually robin has a beak. +< [flying]> ==> [with_beak]>>. %0.90% + +8 + +'I guess that if robin chirps and robin has a beak, then robin is a bird. +''outputMustContain('<(&&, [chirping]>, [with_beak]>) ==> bird>>. %1.00;0.42%') diff --git a/Tests/examples/single_step/nal5/nal5.3.nal b/Tests/examples/single_step/nal5/nal5.3.nal new file mode 100644 index 0000000..e39ae1c --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.3.nal @@ -0,0 +1,16 @@ +'********** induction + +'If robin is a type of bird then robin is a type of animal. +< bird> ==> animal>>. + +'If robin is a type of bird then robin can fly. +< bird> ==> [flying]>>. %0.80% + +5 + +'I guess if robin can fly then robin is a type of animal. +''outputMustContain('< [flying]> ==> animal>>. %1.00;0.39%') + +'I guess if robin is a type of animal then robin can fly. +''outputMustContain('< animal> ==> [flying]>>. %0.80;0.45%') + diff --git a/Tests/examples/single_step/nal5/nal5.4.nal b/Tests/examples/single_step/nal5/nal5.4.nal new file mode 100644 index 0000000..fd36b53 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.4.nal @@ -0,0 +1,17 @@ +'********** abduction + +'If robin is a type of bird then robin is a type of animal. +< bird> ==> animal>>. + +'If robin can fly then robin is probably a type of animal. +< [flying]> ==> animal>>. %0.8% + +5 + +'I guess if robin is a type of bird then robin can fly. +''outputMustContain('< bird> ==> [flying]>>. %1.00;0.39%') + +'I guess if robin can fly then robin is a type of bird. +''outputMustContain('< [flying]> ==> bird>>. %0.80;0.45%') + + diff --git a/Tests/examples/single_step/nal5/nal5.5.nal b/Tests/examples/single_step/nal5/nal5.5.nal new file mode 100644 index 0000000..eb781db --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.5.nal @@ -0,0 +1,13 @@ +'********** detachment + +'If robin is a type of bird then robin can fly. +< bird> ==> animal>>. + +'Robin is a type of bird. + bird>. + +10 + +'Robin is a type of animal. +''outputMustContain(' animal>. %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal5/nal5.6.nal b/Tests/examples/single_step/nal5/nal5.6.nal new file mode 100644 index 0000000..3efb365 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.6.nal @@ -0,0 +1,12 @@ +'********** detachment + +'Usually if robin is a type of bird then robin is a type of animal. +< bird> ==> animal>>. %0.70;0.90% + +'Robin is a type of animal. + animal>. + +10 + +'I guess robin is a type of bird. +''outputMustContain(' bird>. %1.00;0.36%') \ No newline at end of file diff --git a/Tests/examples/single_step/nal5/nal5.7.nal b/Tests/examples/single_step/nal5/nal5.7.nal new file mode 100644 index 0000000..4e428d0 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.7.nal @@ -0,0 +1,12 @@ +'********** comparison + +'If robin is a type of bird then robin is a type of animal. +< bird> ==> animal>>. + +'If robin is a type of bird then robin can fly. +< bird> ==> [flying]>>. %0.80% + +5 + +'I guess robin is a type of animal if and only if robin can fly. +''outputMustContain('< [flying]> <=> animal>>. %0.80;0.45%') diff --git a/Tests/examples/single_step/nal5/nal5.8.nal b/Tests/examples/single_step/nal5/nal5.8.nal new file mode 100644 index 0000000..703b0f3 --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.8.nal @@ -0,0 +1,12 @@ +'********** comparison + +'If robin is a type of bird then usually robin is a type of animal. +< bird> ==> animal>>. %0.70% + +'If robin can fly then robin is a type of animal. +< [flying]> ==> animal>>. + +19 + +'I guess robin is a type of bird if and only if robin can fly. +''outputMustContain('< [flying]> <=> bird>>. %0.70;0.45%') diff --git a/Tests/examples/single_step/nal5/nal5.9.nal b/Tests/examples/single_step/nal5/nal5.9.nal new file mode 100644 index 0000000..6cf9a2c --- /dev/null +++ b/Tests/examples/single_step/nal5/nal5.9.nal @@ -0,0 +1,13 @@ +'********** analogy + +'If robin is a type of bird then robin is a type of animal. +< bird> ==> animal>>. + +'Usually, robin is a type of bird if and only if robin can fly. +< bird> <=> [flying]>>. %0.80% + +14 + +'If robin can fly then probably robin is a type of animal. +''outputMustContain('< [flying]> ==> animal>>. %0.80;0.65%') + diff --git a/Tests/examples/single_step/nal6/nal6.0.nal b/Tests/examples/single_step/nal6/nal6.0.nal new file mode 100644 index 0000000..8e71937 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.0.nal @@ -0,0 +1,13 @@ +'********** variable unification + +'If something is a bird, then it is a flyer. +<<$x --> bird> ==> <$x --> flyer>>. + +'If something is a bird, then it is not a flyer. +<<$y --> bird> ==> <$y --> flyer>>. %0.00;0.70% + +1 + +'If something is a bird, then usually, it is a flyer. +''outputMustContain('<<$1 --> bird> ==> <$1 --> flyer>>. %0.79;0.92%') + diff --git a/Tests/examples/single_step/nal6/nal6.1.nal b/Tests/examples/single_step/nal6/nal6.1.nal new file mode 100644 index 0000000..a8fb600 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.1.nal @@ -0,0 +1,15 @@ +'********** variable unification + +'If something is a bird, then it is a animal. +<<$x --> bird> ==> <$x --> animal>>. + +'If something is a robin, then it is a bird. +<<$y --> robin> ==> <$y --> bird>>. + +3 + +'If something is a robin, then it is a animal. +''outputMustContain('<<$1 --> robin> ==> <$1 --> animal>>. %1.00;0.81%') + + 'I guess that if something is a animal, then it is a robin. +''outputMustContain('<<$1 --> animal> ==> <$1 --> robin>>. %1.00;0.45%') diff --git a/Tests/examples/single_step/nal6/nal6.10.nal b/Tests/examples/single_step/nal6/nal6.10.nal new file mode 100644 index 0000000..4e1c738 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.10.nal @@ -0,0 +1,13 @@ +'********** variable elimination + +'Some bird can swim. +(&&,<#x --> bird>,<#x --> swimmer>). + +'Swan is a type of bird. + bird>. %0.90% + +3 + +'I guess swan can swim. +''outputMustContain(' swimmer>. %0.90;0.43%') + diff --git a/Tests/examples/single_step/nal6/nal6.11.nal b/Tests/examples/single_step/nal6/nal6.11.nal new file mode 100644 index 0000000..b29bad8 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.11.nal @@ -0,0 +1,13 @@ +'********** variable elimination + +'Tweety has wings. +<{Tweety} --> [with_wings]>. + +'If something can chirp and has wings, then it is a bird. +<(&&,<$x --> [chirping]>,<$x --> [with_wings]>) ==> <$x --> bird>>. + +23 + +'If Tweety can chirp, then it is a bird. +''outputMustContain('<<{Tweety} --> [chirping]> ==> <{Tweety} --> bird>>. %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal6/nal6.12.nal b/Tests/examples/single_step/nal6/nal6.12.nal new file mode 100644 index 0000000..eb36497 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.12.nal @@ -0,0 +1,14 @@ + +'********** variable elimination + +'If something can fly, chirp, and eats worms, then it is a bird. +<(&&,<$x --> flyer>,<$x --> [chirping]>, <(*, $x, worms) --> food>) ==> <$x --> bird>>. + +'Tweety can fly. +<{Tweety} --> flyer>. + +7 + +'If Tweety can chirp and eats worms, then it is a bird. +''outputMustContain('<(&&,<(*,{Tweety},worms) --> food>,<{Tweety} --> [chirping]>) ==> <{Tweety} --> bird>>. %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal6/nal6.13.nal b/Tests/examples/single_step/nal6/nal6.13.nal new file mode 100644 index 0000000..4c982af --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.13.nal @@ -0,0 +1,12 @@ +'********** multiple variable elimination + +'Every lock can be opened by every key. +<(&&,<$x --> key>,<$y --> lock>) ==> <$y --> (/,open,$x,_)>>. + +'Lock-1 is a lock. +<{lock1} --> lock>. + +20 + +'Lock-1 can be opened by every key. +''outputMustContain('<<$1 --> key> ==> <{lock1} --> (/,open,$1,_)>>. %1.00;0.81%') diff --git a/Tests/examples/single_step/nal6/nal6.14.nal b/Tests/examples/single_step/nal6/nal6.14.nal new file mode 100644 index 0000000..a0e4601 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.14.nal @@ -0,0 +1,12 @@ +'********** multiple variable elimination + +'Every lock can be opened by some key. +<<$x --> lock> ==> (&&,<#y --> key>,<$x --> (/,open,#y,_)>)>. + +'Lock-1 is a lock. +<{lock1} --> lock>. + +9 + +'Some key can open Lock-1. +''outputMustContain('(&&,<#1 --> key>,<{lock1} --> (/,open,#1,_)>). %1.00;0.81%') diff --git a/Tests/examples/single_step/nal6/nal6.15.nal b/Tests/examples/single_step/nal6/nal6.15.nal new file mode 100644 index 0000000..c8f88ff --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.15.nal @@ -0,0 +1,13 @@ +'********** multiple variable elimination + +'There is a lock that can be opened by every key. +(&&,<#x --> lock>,<<$y --> key> ==> <#x --> (/,open,$y,_)>>). + +'Lock-1 is a lock. +<{lock1} --> lock>. + +9 + +'I guess Lock-1 can be opened by every key. +''outputMustContain('<<$1 --> key> ==> <{lock1} --> (/,open,$1,_)>>. %1.00;0.43%') + diff --git a/Tests/examples/single_step/nal6/nal6.16.nal b/Tests/examples/single_step/nal6/nal6.16.nal new file mode 100644 index 0000000..52a5703 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.16.nal @@ -0,0 +1,12 @@ +'********** multiple variable elimination + +'There is a key that can open some lock. +(&&,<#x --> (/,open,#y,_)>,<#x --> lock>,<#y --> key>). + +'Lock-1 is a lock. +<{lock1} --> lock>. + +18 + +'I guess there is a key that can open Lock-1. +''outputMustContain('(&&,<#1 --> key>,<{lock1} --> (/,open,#1,_)>). %1.00;0.43%') diff --git a/Tests/examples/single_step/nal6/nal6.17.nal b/Tests/examples/single_step/nal6/nal6.17.nal new file mode 100644 index 0000000..02364fa --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.17.nal @@ -0,0 +1,23 @@ +'********** variable introduction + +'A swan is a bird. + bird>. + +'A swan is usually a swimmer. + swimmer>. %0.80% + +3 + +'I guess a bird is usually a swimmer. +''outputMustContain('<<$1 --> bird> ==> <$1 --> swimmer>>. %0.80;0.45%') + +'I guess a swimmer is a bird. +''outputMustContain('<<$1 --> swimmer> ==> <$1 --> bird>>. %1.00;0.39%') + +'I guess a bird is usually a swimmer, and the other way around. +''outputMustContain('<<$1 --> bird> <=> <$1 --> swimmer>>. %0.80;0.45%') + +'Some bird can swim. +''outputMustContain('(&&,<#1 --> bird>,<#1 --> swimmer>). %0.80;0.81%') + + diff --git a/Tests/examples/single_step/nal6/nal6.18.nal b/Tests/examples/single_step/nal6/nal6.18.nal new file mode 100644 index 0000000..6eda49c --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.18.nal @@ -0,0 +1,22 @@ +'********** variable introduction + +'A gull is a swimmer. + swimmer>. + +'Usually, a swan is a swimmer. + swimmer>. %0.80% + +3 + +'I guess what can be said about gull usually can also be said about swan. +''outputMustContain('< $1> ==> $1>>. %0.80;0.45%') + +'I guess what can be said about swan can also be said about gull. +''outputMustContain('< $1> ==> $1>>. %1.00;0.39%') + +'I guess gull and swan share most properties. +''outputMustContain('< $1> <=> $1>>. %0.80;0.45%') + +'Gull and swan have some common property. +''outputMustContain('(&&, #1>, #1>). %0.80;0.81%') + diff --git a/Tests/examples/single_step/nal6/nal6.19.nal b/Tests/examples/single_step/nal6/nal6.19.nal new file mode 100644 index 0000000..c1a178a --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.19.nal @@ -0,0 +1,16 @@ +'********** variables introduction + +'Key-1 opens Lock-1. +<{key1} --> (/,open,_,{lock1})>. + +'Key-1 is a key. +<{key1} --> key>. + +45 + +'I guess every key can open Lock-1. +''outputMustContain('<<$1 --> key> ==> <$1 --> (/,open,_,{lock1})>>. %1.00;0.45%') + +'Some key can open Lock-1. +''//outputMustContain('(&&,<#1 --> (/,open,_,{lock1})>,<#1 --> key>). %1.00;0.81%') //reversed +'' outputMustContain('(&&,<#1 --> (/,open,_,{lock1})>,<#1 --> key>). %1.00;0.25%') diff --git a/Tests/examples/single_step/nal6/nal6.2.nal b/Tests/examples/single_step/nal6/nal6.2.nal new file mode 100644 index 0000000..f608f75 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.2.nal @@ -0,0 +1,26 @@ +'********** variable unification + +'If something is a swan, then it is a bird. +<<$x --> swan> ==> <$x --> bird>>. %1.00;0.80% + +'If something is a swan, then it is a swimmer. +<<$y --> swan> ==> <$y --> swimmer>>. %0.80% + +3 + +'I believe that if something is a swan, then it is a bird or a swimmer. +''outputMustContain('<<$1 --> swan> ==> (||,<$1 --> bird>,<$1 --> swimmer>)>. %1.00;0.72%') + +'I believe that if something is a swan, then usually, it is both a bird and a swimmer. +''outputMustContain('<<$1 --> swan> ==> (&&,<$1 --> bird>,<$1 --> swimmer>)>. %0.80;0.72%') + +'I guess if something is a swimmer, then it is a bird. +''outputMustContain('<<$1 --> swimmer> ==> <$1 --> bird>>. %1.00;0.37%') + +'I guess if something is a bird, then it is a swimmer. +''outputMustContain('<<$1 --> bird> ==> <$1 --> swimmer>>. %0.80;0.42%') + +'I guess something is a bird, if and only if it is a swimmer. +''outputMustContain('<<$1 --> bird> <=> <$1 --> swimmer>>. %0.80;0.42%') + + diff --git a/Tests/examples/single_step/nal6/nal6.20.nal b/Tests/examples/single_step/nal6/nal6.20.nal new file mode 100644 index 0000000..e932200 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.20.nal @@ -0,0 +1,15 @@ +'********** multiple variables introduction + +'Lock-1 can be opened by every key. +<<$x --> key> ==> <{lock1} --> (/,open,$x,_)>>. + +'Lock-1 is a lock. +<{lock1} --> lock>. + +166 + +'There is a lock that can be opened by every key. +''outputMustContain('(&&,<#1 --> lock>,<<$2 --> key> ==> <#1 --> (/,open,$2,_)>>). %1.00;0.81%') + +'I guess every lock can be opened by every key. +''outputMustContain('<(&&,<$1 --> key>,<$2 --> lock>) ==> <$2 --> (/,open,$1,_)>>. %1.00;0.45%') diff --git a/Tests/examples/single_step/nal6/nal6.21.nal b/Tests/examples/single_step/nal6/nal6.21.nal new file mode 100644 index 0000000..d367be0 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.21.nal @@ -0,0 +1,16 @@ +'********** multiple variables introduction + +'Lock-1 can be opened by some key. +(&&,<#x --> key>,<{lock1} --> (/,open,#x,_)>). + +'Lock-1 is a lock. +<{lock1} --> lock>. + +17 + +'There is a key that can open some lock. +''outputMustContain('(&&,<#1 --> key>,<#2 --> (/,open,#1,_)>,<#2 --> lock>). %1.00;0.81%') + +'I guess every lock can be opened by some key. +''outputMustContain('<<$1 --> lock> ==> (&&,<#2 --> key>,<$1 --> (/,open,#2,_)>)>. %1.00;0.45%') + diff --git a/Tests/examples/single_step/nal6/nal6.22.nal b/Tests/examples/single_step/nal6/nal6.22.nal new file mode 100644 index 0000000..ca15cc1 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.22.nal @@ -0,0 +1,11 @@ +'********** recursion +'0 is a number +<0 --> num>. %1.00;0.90% +'If n is a number, n+1 is also a number +<<$1 --> num> ==> <(*,$1) --> num>>. %1.00;0.90% +'3 is a number? +<(*,(*,(*,0))) --> num>? +70000 +'I guess 3 is a number +''outputMustContain('<(*,(*,(*,0))) --> num>. %1.00;0.66%') + diff --git a/Tests/examples/single_step/nal6/nal6.23.nal b/Tests/examples/single_step/nal6/nal6.23.nal new file mode 100644 index 0000000..1f01a55 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.23.nal @@ -0,0 +1,14 @@ +'***** second level variable unification + +'there is a lock which is opened by all keys +(&&,<#1 --> lock>,<<$2 --> key> ==> <#1 --> (/,open,$2,_)>>). %1.00;0.90% + +'key1 is a key +<{key1} --> key>. %1.00;0.90% + +5 + +'there is a lock which is opened by key1 +''outputMustContain('(&&,<#1 --> (/,open,{key1},_)>,<#1 --> lock>). %1.00;0.81%') + + diff --git a/Tests/examples/single_step/nal6/nal6.24.nal b/Tests/examples/single_step/nal6/nal6.24.nal new file mode 100644 index 0000000..226d9c9 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.24.nal @@ -0,0 +1,16 @@ +'***** second level variable unification + +'all locks are opened by some key +<<$1 --> lock> ==> (&&,<#2 --> key>,<$1 --> (/,open,#2,_)>)>. %1.00;0.90% + +'key1 is a key +<{key1} --> key>. %1.00;0.90% + +5 + +'maybe all locks are opened by key1 +''outputMustContain('') +//''outputMustContain('<<$1 --> lock> ==> <$1 --> (/,open,{key1},_)>>. %1.00;0.43%') + + + diff --git a/Tests/examples/single_step/nal6/nal6.25.nal b/Tests/examples/single_step/nal6/nal6.25.nal new file mode 100644 index 0000000..f5a7b67 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.25.nal @@ -0,0 +1,14 @@ +'***** second variable introduction (induction) + +'if something opens lock1, it is a key +< (/,open,$1,_)> ==> <$1 --> key>>. + +'lock1 is a key + lock>. + +7 + +'there is a lock with the property that when opened by something, this something is a key (induction) +''outputMustContain('<(&&,<#1 --> (/,open,$2,_)>,<#1 --> lock>) ==> <$2 --> key>>. %1.00;0.45%') + + diff --git a/Tests/examples/single_step/nal6/nal6.26.nal b/Tests/examples/single_step/nal6/nal6.26.nal new file mode 100644 index 0000000..98d13ce --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.26.nal @@ -0,0 +1,13 @@ +'***** variable elimination (deduction) + +'lock1 is a lock + lock>. %1.00;0.90% + +'there is a lock with the property that when opened by something, this something is a key +<(&&,<#1 --> lock>,<#1 --> (/,open,$2,_)>) ==> <$2 --> key>>. %1.00;0.90% + +4 + +'whatever opens lock1 is a key +''outputMustContain('< (/,open,$1,_)> ==> <$1 --> key>>. %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal6/nal6.27.nal b/Tests/examples/single_step/nal6/nal6.27.nal new file mode 100644 index 0000000..babf189 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.27.nal @@ -0,0 +1,14 @@ +'***** abduction with variable elimination (abduction) + +'whatever opens lock1 is a key +< (/,open,$1,_)> ==> <$1 --> key>>. %1.00;0.90% + +'there is a lock with the property that when opened by something, this something is a key +<(&&,<#1 --> lock>,<#1 --> (/,open,$2,_)>) ==> <$2 --> key>>. %1.00;0.90% + +10 + +'lock1 is a lock +''outputMustContain(' lock>. %1.00;0.45%') + + diff --git a/Tests/examples/single_step/nal6/nal6.3.nal b/Tests/examples/single_step/nal6/nal6.3.nal new file mode 100644 index 0000000..57393b5 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.3.nal @@ -0,0 +1,25 @@ +'********** variable unification + +'What can be said about bird can also be said about robin. +< $x> ==> $x>>. + +'What can be said about swimmer usually can also be said about robin. +< $y> ==> $y>>. %0.70;0.90% + +3 + +'What can be said about bird and swimmer can also be said about robin. +''outputMustContain('<(&&, $1>, $1>) ==> $1>>. %1.00;0.81%') + +'What can be said about bird or swimmer can also be said about robin. +''outputMustContain('<(||, $1>, $1>) ==> $1>>. %0.70;0.81%') + +'I guess what can be said about bird can also be said about swimmer. +''outputMustContain('< $1> ==> $1>>. %1.00;0.36%') + +'I guess what can be said about swimmer can also be said about bird. +''outputMustContain('< $1> ==> $1>>. %0.70;0.45%') + +'I guess bird and swimmer share most properties. +''outputMustContain('< $1> <=> $1>>. %0.70;0.45%') + diff --git a/Tests/examples/single_step/nal6/nal6.4.nal b/Tests/examples/single_step/nal6/nal6.4.nal new file mode 100644 index 0000000..16aca0b --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.4.nal @@ -0,0 +1,13 @@ +'********** variable unification + +'If something can fly and chirp, then it is a bird. +<(&&,<$x --> flyer>,<$x --> [chirping]>) ==> <$x --> bird>>. + +'If something has wings, then it can fly. +<<$y --> [with_wings]> ==> <$y --> flyer>>. + +8 + +'If something can chirp and has wings, then it is a bird. +''outputMustContain('<(&&,<$1 --> [chirping]>,<$1 --> [with_wings]>) ==> <$1 --> bird>>. %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal6/nal6.5.nal b/Tests/examples/single_step/nal6/nal6.5.nal new file mode 100644 index 0000000..4cf1c3b --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.5.nal @@ -0,0 +1,17 @@ +'********** variable unification + +'If something can fly, chirp, and eats worms, then it is a bird. +<(&&,<$x --> flyer>,<$x --> [chirping]>, <(*, $x, worms) --> food>) ==> <$x --> bird>>. + +'If something can chirp and has wings, then it is a bird. +<(&&,<$x --> [chirping]>,<$x --> [with_wings]>) ==> <$x --> bird>>. + +''//6 +12 + +'If something can fly and eats worms, then I guess it has wings. +''outputMustContain('<(&&,<$1 --> flyer>,<(*,$1,worms) --> food>) ==> <$1 --> [with_wings]>>. %1.00;0.45%') + +'I guess if something has wings, then it can fly and eats worms. +''outputMustContain('<<$1 --> [with_wings]> ==> (&&,<$1 --> flyer>,<(*,$1,worms) --> food>)>. %1.00;0.45%') + diff --git a/Tests/examples/single_step/nal6/nal6.6.nal b/Tests/examples/single_step/nal6/nal6.6.nal new file mode 100644 index 0000000..0bafe1c --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.6.nal @@ -0,0 +1,13 @@ +'********** variable unification + +'If something can fly and eats worms, then it is a bird. +<(&&,<$x --> flyer>,<(*,$x,worms) --> food>) ==> <$x --> bird>>. + +'If something can fly, then it has wings. +<<$y --> flyer> ==> <$y --> [with_wings]>>. + +// 4 originally +13 + +'If something has wings and eats worms, then I guess it is a bird. +''outputMustContain('<(&&,<$1 --> [with_wings]>,<(*,$1,worms) --> food>) ==> <$1 --> bird>>. %1.00;0.45%') diff --git a/Tests/examples/single_step/nal6/nal6.7.nal b/Tests/examples/single_step/nal6/nal6.7.nal new file mode 100644 index 0000000..79c338b --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.7.nal @@ -0,0 +1,14 @@ +'****** variable elimination + +'If something is a bird, then it is an animal. +<<$x --> bird> ==> <$x --> animal>>. + +'A robin is a bird. + bird>. + +3 + +'A robin is an animal. +''outputMustContain(' animal>. %1.00;0.81%') + + diff --git a/Tests/examples/single_step/nal6/nal6.8.nal b/Tests/examples/single_step/nal6/nal6.8.nal new file mode 100644 index 0000000..5c69fab --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.8.nal @@ -0,0 +1,14 @@ +'********** variable elimination + +'If something is a bird, then it is an animal. +<<$x --> bird> ==> <$x --> animal>>. + +'A tiger is an animal. + animal>. + +10 + +'I guess that a tiger is a bird. +''outputMustContain(' bird>. %1.00;0.45%') + + diff --git a/Tests/examples/single_step/nal6/nal6.9.nal b/Tests/examples/single_step/nal6/nal6.9.nal new file mode 100644 index 0000000..d9dd2c2 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.9.nal @@ -0,0 +1,13 @@ +'********** variable elimination + +'Something is a animal if and only if it is a bird. +<<$x --> animal> <=> <$x --> bird>>. + +'A robin is a bird. + bird>. + +3 + +'A robin is a animal. +''outputMustContain(' animal>. %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal6/nal6.birdClaimedByBob.nal b/Tests/examples/single_step/nal6/nal6.birdClaimedByBob.nal new file mode 100644 index 0000000..6f50f1a --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.birdClaimedByBob.nal @@ -0,0 +1,8 @@ +' from https://code.google.com/archive/p/open-nars/issues/7 + +<(&,<{Tweety} --> bird>, fly>) --> claimedByBob>. +<<(&,<#1 --> $2>,<$3 --> #1>) --> claimedByBob> ==> <<$3 --> $2> --> claimedByBob>>. + + claimedByBob>? +100 +''outputMustContain('<<{Tweety} --> fly> --> claimedByBob>. %1.00;0.81%') diff --git a/Tests/examples/single_step/nal6/nal6.can_of_worms.nal b/Tests/examples/single_step/nal6/nal6.can_of_worms.nal new file mode 100644 index 0000000..b4c8e87 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.can_of_worms.nal @@ -0,0 +1,6 @@ +<0 --> num>. %1.00;0.90% +<0 --> (/,num,_)>. %1.00;0.90% + +20 + +''outputMustContain('<<$1 --> num> ==> <$1 --> (/,num,_)>>. %1.00;0.45%') diff --git a/Tests/examples/single_step/nal6/nal6.nlp1.nal b/Tests/examples/single_step/nal6/nal6.nlp1.nal new file mode 100644 index 0000000..13a2b5d --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.nlp1.nal @@ -0,0 +1,4 @@ +<(\,REPRESENT,_,CAT) --> cat>. %1.00;0.90% +<(\,(\,REPRESENT,_,<(*,CAT,FISH) --> FOOD>),_,eat,fish) --> cat>. +5 +''outputMustContain('<<(\,REPRESENT,_,$1) --> $2> ==> <(\,(\,REPRESENT,_,<(*,$1,FISH) --> FOOD>),_,eat,fish) --> $2>>. %1.00;0.40%') diff --git a/Tests/examples/single_step/nal6/nal6.nlp2.nal b/Tests/examples/single_step/nal6/nal6.nlp2.nal new file mode 100644 index 0000000..1f4e940 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.nlp2.nal @@ -0,0 +1,4 @@ + (/,(/,REPRESENT,_,<(*,CAT,FISH) --> FOOD>),_,eat,fish)>. + CAT>. %1.00;0.90% +300 +''outputMustContain('<<$1 --> $2> ==> <$1 --> (/,(/,REPRESENT,_,<(*,$2,FISH) --> FOOD>),_,eat,fish)>>. %1.00;0.40%') diff --git a/Tests/examples/single_step/nal6/nal6.redundant.nal b/Tests/examples/single_step/nal6/nal6.redundant.nal new file mode 100644 index 0000000..3055e02 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.redundant.nal @@ -0,0 +1,4 @@ +< (/,open,$1,_)> ==> <$1 --> key>>. +100 +''outputMustNotContain('<(&&, (/,open,$1,_)>,<(*,$1,lock1) --> open>) ==> <$1 --> key>>. %1.00;0.81%') +''outputMustNotContain('<<(*,$1,lock1) --> open> ==> (/,open,$1,_)>>. %1.00;0.45%') diff --git a/Tests/examples/single_step/nal6/nal6.symmetry.nal b/Tests/examples/single_step/nal6/nal6.symmetry.nal new file mode 100644 index 0000000..1e03a6f --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.symmetry.nal @@ -0,0 +1,5 @@ +<(*,a,b) --> like>. %1.00;0.90% +<(*,b,a) --> like>. %1.00;0.90% +<<(*,$1,$2) --> like> <=> <(*,$2,$1) --> like>>? +20 +''outputMustContain('<<(*,$1,$2) --> like> <=> <(*,$2,$1) --> like>>. %1.00;0.40%') diff --git a/Tests/examples/single_step/nal6/nal6.uncle.nal b/Tests/examples/single_step/nal6/nal6.uncle.nal new file mode 100644 index 0000000..cbb0ed0 --- /dev/null +++ b/Tests/examples/single_step/nal6/nal6.uncle.nal @@ -0,0 +1,6 @@ + (/,uncle,_,tom)>. %1.00;0.90% + (/,uncle,tom,_)>. %0.00;0.90% +10 +''outputMustContain('<<$1 --> (/,uncle,_,$2)> ==> <$1 --> (/,uncle,$2,_)>>. %0.00;0.40%') +'would be a strange variable introduction when it would be allowed to use ImageExt and not just looking at PRED> +'this is a strange example I added.. diff --git a/Tests/examples/single_step/nal7/nal7.0.nal b/Tests/examples/single_step/nal7/nal7.0.nal new file mode 100644 index 0000000..69a9391 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.0.nal @@ -0,0 +1,14 @@ +'********** temporal deduction/explification + +'Someone enter the room_101 after he open the door_101 +<<(*, $x, room_101) --> enter> =\> <(*, $x, door_101) --> open>>. %0.9% + +'Someone open the door_101 after he hold the key_101 +<<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.8% + +5 + +'If someone enter room_101, he should hold key_101 before +''outputMustContain('<<(*,$1,room_101) --> enter> =\> <(*,$1,key_101) --> hold>>. %0.72;0.58%') +'If someone hold key_101, he will enter room_101 +''outputMustContain('<<(*,$1,key_101) --> hold> =/> <(*,$1,room_101) --> enter>>. %1.00;0.37%') diff --git a/Tests/examples/single_step/nal7/nal7.1.nal b/Tests/examples/single_step/nal7/nal7.1.nal new file mode 100644 index 0000000..d5cb098 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.1.nal @@ -0,0 +1,17 @@ +'********** temporal induction/comparison + +'Someone open door_101 before he enter room_101 +<<(*, $x, door_101) --> open> =/> <(*, $x, room_101) --> enter>>. %0.9% + +'Someone open door_101 after he hold key_101 +<<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.8% + +5 + +'If someone hold key_101, he will enter room_101 +''outputMustContain('<<(*,$1,key_101) --> hold> =/> <(*,$1,room_101) --> enter>>. %0.90;0.39%') +'If someone enter room_101, he should hold key_101 before +''outputMustContain('<<(*,$1,room_101) --> enter> =\> <(*,$1,key_101) --> hold>>. %0.80;0.42%') +'If someone hold key_101, it means he will enter room_101 +''outputMustContain('<<(*,$1,key_101) --> hold> <(*,$1,room_101) --> enter>>. %0.73;0.44%') + diff --git a/Tests/examples/single_step/nal7/nal7.15.nal b/Tests/examples/single_step/nal7/nal7.15.nal new file mode 100644 index 0000000..499721f --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.15.nal @@ -0,0 +1,12 @@ +'********** temporal analogy + +'If someone open door_101, he will enter room_101 +<<(*, $x, door_101) --> open> =/> <(*, $x, room_101) --> enter>>. %0.95% + +' If someone enter room_101, it means he leave corridor_100 +<<(*, $x, room_101) --> enter> <|> <(*, $x, corridor_100) --> leave>>. + +40 + +'If someone open door_101, he will leave corridor_100 +''outputMustContain('<<(*,$1,door_101) --> open> =/> <(*,$1,corridor_100) --> leave>>. %0.95;0.81%') diff --git a/Tests/examples/single_step/nal7/nal7.18.nal b/Tests/examples/single_step/nal7/nal7.18.nal new file mode 100644 index 0000000..ba3fc2e --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.18.nal @@ -0,0 +1,14 @@ +'********** inference on tense + +'If someone hold key_101, he will enter room_101 (in 100 steps) +<(&/,<(*, $x, key_101) --> hold>,+100) =/> <(*, $x, room_101) --> enter>>. + +'John held the key_101 +<(*, John, key_101) --> hold>. :\: + +210 + +'John will enter room_101 +''outputMustContain('<(*,John,room_101) --> enter>. :!95: %1.00;0.81%') + +'this one is working, but throws an exception diff --git a/Tests/examples/single_step/nal7/nal7.19.nal b/Tests/examples/single_step/nal7/nal7.19.nal new file mode 100644 index 0000000..63583f2 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.19.nal @@ -0,0 +1,12 @@ +'********** inference on tense + +'If someone hold key_101, he will enter room_101 (in 100 steps) +<(&/,<(*, $x, key_101) --> hold>,+100) =/> <(*, $x, room_101) --> enter>>. + +'John is entering room_101 now +<(*,John,room_101) --> enter>. :|: + +15 + +'John held the key_101 (105 steps before) +''outputMustContain('<(*,John,key_101) --> hold>. :!-105: %1.00;0.45%') diff --git a/Tests/examples/single_step/nal7/nal7.2.nal b/Tests/examples/single_step/nal7/nal7.2.nal new file mode 100644 index 0000000..c069d15 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.2.nal @@ -0,0 +1,13 @@ +'********** inference on tense + +'John hold key_101 before he enter room_101 +<<(*,John,key_101) --> hold> =/> <(*,John,room_101) --> enter>>. + +'John is holding key_101 now +<(*,John,key_101) --> hold>. :|: + +20 + +'John will enter the room_101 +''outputMustContain('<(*,John,room_101) --> enter>. :!5: %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal7/nal7.3.nal b/Tests/examples/single_step/nal7/nal7.3.nal new file mode 100644 index 0000000..a9200ff --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.3.nal @@ -0,0 +1,12 @@ +'********** inference on tense + +'John hold key_101 before he enter room_101 +<<(*,John,key_101) --> hold> =/> <(*,John,room_101) --> enter>>. %1.00;0.90% + +'John entered room_101 +<(*,John,room_101) --> enter>. :\: %1.00;0.90% + +3 + +''outputMustContain('<(*,John,key_101) --> hold>. :!-10: %1.00;0.45%') + diff --git a/Tests/examples/single_step/nal7/nal7.35.nal b/Tests/examples/single_step/nal7/nal7.35.nal new file mode 100644 index 0000000..beb7565 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.35.nal @@ -0,0 +1,13 @@ +'********** inference on tense + +'If John hold key_101, he will enter room_101 +<<(*,John,key_101) --> hold> =/> <(*,John,room_101) --> enter>>. + +'John is holding key_101 now +<(*,John,key_101) --> hold>. :|: + +20 + +'John will enter room_101 +''outputMustContain('<(*,John,room_101) --> enter>. :!5: %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal7/nal7.36.nal b/Tests/examples/single_step/nal7/nal7.36.nal new file mode 100644 index 0000000..d96ff3a --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.36.nal @@ -0,0 +1,13 @@ +'********** deduction with interval summation + +' a + 1 = b +<(&/, a, +1) =/> b>. + +' b + 1 = c +<(&/, b, +1) =/> c>. + +10 + +' a + 2 = c +''outputMustContain('<(&/,a,+2) =/> c>. %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal7/nal7.6.nal b/Tests/examples/single_step/nal7/nal7.6.nal new file mode 100644 index 0000000..a9aec70 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.6.nal @@ -0,0 +1,23 @@ +'********** induction on events + +'John is opening door_101 + (/,open,_,door_101)>. :|: + +6 + +'John is entering room_101 + (/,enter,_,room_101)>. :|: + +20 + +'If John enter room_101, he should open door_101 before +''outputMustContain('< (/,enter,_,room_101)> =\> (&/, (/,open,_,door_101)>,+6)>. :!6: %1.00;0.45%') + +'new: variable introduction also in time: + +'If someone enter room_101, he should open door_101 before +''outputMustContain('<<$1 --> (/,enter,_,room_101)> =\> (&/,<$1 --> (/,open,_,door_101)>,+6)>. :!6: %1.00;0.45%') + +'adjusted +2 to +3 in both conditions + +10 diff --git a/Tests/examples/single_step/nal7/nal7.7.nal b/Tests/examples/single_step/nal7/nal7.7.nal new file mode 100644 index 0000000..f64dd4a --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.7.nal @@ -0,0 +1,21 @@ +'********** induction on events + +'John is holding key_101 now +<(*,John,key_101) --> hold>. :|: %1.00;0.90% + +6 + +' irrelevant 'outputMustContain(' (/,hold,_,key_101)>. :\: %1.00;0.90%') +' irrelevant 'outputMustContain(' (/,hold,John,_)>. :\: %1.00;0.90%') +' irrelevant 'outputMustContain(' (/,hold,_,key_101)>. :\: %1.00;0.90%') +' irrelevant 'outputMustContain(' (/,hold,John,_)>. :\: %1.00;0.90%') + +'If John open door_101, he will enter room_101 +<<(*,John,door_101) --> open> =/> <(*,John,room_101) --> enter>>. :|: %1.00;0.90% + +20 + +'If John hold key_101 and open door_101 (after 6 steps), he will enter room_101 +''outputMustContain('<(&/,<(*,John,key_101) --> hold>,+6,<(*,John,door_101) --> open>) =/> <(*,John,room_101) --> enter>>. :!6: %1.00;0.45%') +'changed fomr +2 to +4 due to changes in interval calculations +'this one is working, just throwing exception diff --git a/Tests/examples/single_step/nal7/nal7.8.nal b/Tests/examples/single_step/nal7/nal7.8.nal new file mode 100644 index 0000000..37211ce --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.8.nal @@ -0,0 +1,22 @@ +'********** updating and revision + +'John is holding key_101 now +<(*,John,key_101) --> hold>. :|: + +6 + +'John is not holding key_101 now +<(*,John,key_101) --> hold>. :|: %0% + +'Is John holding key_101 now? +<(*,John,key_101) --> hold>? :|: + +200 + +//revision on events +'John maybe holding key_101 now +''outputMustContain(' (/,hold,_,key_101)>. :!6: %0.50;0.95%') + +//but also looking at it as separate: +'John will not hold key_101 in the future +''outputMustContain(' (/,hold,_,key_101)>. :!6: %0.00;0.90%') diff --git a/Tests/examples/single_step/nal7/nal7.concurrentEqual.res.nal b/Tests/examples/single_step/nal7/nal7.concurrentEqual.res.nal new file mode 100644 index 0000000..bbac4cd --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.concurrentEqual.res.nal @@ -0,0 +1,6 @@ +' (A <|> B), (B <|> C) |- (A <|> C) (Truth:Resemblance) + + B>. + C>. + +''outputMustContain(' C>. %1.00;0.81%') diff --git a/Tests/examples/single_step/nal7/nal7.concurrentImpl.abd.nal b/Tests/examples/single_step/nal7/nal7.concurrentImpl.abd.nal new file mode 100644 index 0000000..d3b7078 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.concurrentImpl.abd.nal @@ -0,0 +1,6 @@ +' (A =|> B), (A =|> C) |- (B =|> C) (Truth:Abduction) + + B>. + C>. + +''outputMustContain(' C>. %1.00;0.45%') diff --git a/Tests/examples/single_step/nal7/nal7.concurrentImpl.ded.nal b/Tests/examples/single_step/nal7/nal7.concurrentImpl.ded.nal new file mode 100644 index 0000000..f824f98 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.concurrentImpl.ded.nal @@ -0,0 +1,6 @@ +' (A =|> B), (B =|> C) |- (A =|> C) (Truth:Deduction) + + B>. + C>. + +''outputMustContain(' C>. %1.00;0.81%') diff --git a/Tests/examples/single_step/nal7/nal7.concurrentImpl.goal.nal b/Tests/examples/single_step/nal7/nal7.concurrentImpl.goal.nal new file mode 100644 index 0000000..e182775 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.concurrentImpl.goal.nal @@ -0,0 +1,6 @@ +<<$1 --> [hardened]> =|> <$1 --> [unscrewing]>>. %1.00;0.90% +<#1 --> [unscrewing]>! + +10 + +''outputMustContain('<#1 --> [hardened]>! %1.00;0.81%') diff --git a/Tests/examples/single_step/nal7/nal7.concurrentImpl.ind.nal b/Tests/examples/single_step/nal7/nal7.concurrentImpl.ind.nal new file mode 100644 index 0000000..6ae95d3 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.concurrentImpl.ind.nal @@ -0,0 +1,6 @@ +' (A =|> B), (C =|> B) |- (A =|> C) (Truth:Induction) + + B>. + B>. + +''outputMustContain(' C>. %1.00;0.45%') diff --git a/Tests/examples/single_step/nal7/nal7.conj.decompose.nal b/Tests/examples/single_step/nal7/nal7.conj.decompose.nal new file mode 100644 index 0000000..efc7f65 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.conj.decompose.nal @@ -0,0 +1,6 @@ +(&&,<#1 --> [unscrewing]>,<#1 --> object>)! %1.00;0.90% + +10 + +''outputMustContain('<#1 --> [unscrewing]>! %1.00;0.81%') +''outputMustContain('<#1 --> object>! %1.00;0.81%') diff --git a/Tests/examples/single_step/nal7/nal7.decomposeCompound.nal b/Tests/examples/single_step/nal7/nal7.decomposeCompound.nal new file mode 100644 index 0000000..a3ac400 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.decomposeCompound.nal @@ -0,0 +1,5 @@ +'********** hit temporal handling of CompositionalRules.decomposeCompound() +<(&/, (||, S, P), +5) =/> M>. %0.9% + M>. %0.9% +100 +''outputMustContain('

M>. %0.19;0.05%') diff --git a/Tests/examples/single_step/nal7/nal7.eventInduction1.nal b/Tests/examples/single_step/nal7/nal7.eventInduction1.nal new file mode 100644 index 0000000..2b8c3ed --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.eventInduction1.nal @@ -0,0 +1,16 @@ +'********** induction on events + +'John is opening door_101 now +<(*,John,door_101) --> open>. :|: + +11 + +'John is not entering room_101 now +<(*,John,room_101) --> enter>. :|: %0% + +10 + +'If John open the door_101, he will not enter room_101 +''outputMustContain('<(&/,<(*,John,door_101) --> open>,+11) =/> <(*,John,room_101) --> enter>>. :!11: %0.00;0.45%') +'If John enter the door_101, it doesn't mean he will enter the room_101 +''outputMustContain('<(&/,<(*,John,door_101) --> open>,+11) <(*,John,room_101) --> enter>>. :!11: %0.00;0.45%') diff --git a/Tests/examples/single_step/nal7/nal7.eventInduction2.nal b/Tests/examples/single_step/nal7/nal7.eventInduction2.nal new file mode 100644 index 0000000..53a41df --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.eventInduction2.nal @@ -0,0 +1,14 @@ +'********** induction on events + +'John is opening door_101 now +<(*,John,door_101) --> open>. :|: + +6 + +'John is entering room_101 now +<(*,John,room_101) --> enter>. :|: + +10 + +'John will enter room_101 after he open door_101 +''outputMustContain('<<(*,John,room_101) --> enter> =\> (&/,<(*,John,door_101) --> open>,+6)>. :!6: %1.00;0.45%') diff --git a/Tests/examples/single_step/nal7/nal7.par.nal b/Tests/examples/single_step/nal7/nal7.par.nal new file mode 100644 index 0000000..58d0b9d --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.par.nal @@ -0,0 +1,5 @@ +A>. :|: +B>. :|: + +10 +''outputMustContain('(&|, A>, B>). :!0: %1.00;0.81%') diff --git a/Tests/examples/single_step/nal7/nal7.predictiveImpl.goal.nal b/Tests/examples/single_step/nal7/nal7.predictiveImpl.goal.nal new file mode 100644 index 0000000..59007a4 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.predictiveImpl.goal.nal @@ -0,0 +1,6 @@ +<<$1 --> [hardened]> =/> <$1 --> [unscrewing]>>. %1.00;0.90% +<#1 --> [unscrewing]>! + +10 + +''outputMustContain('<#1 --> [hardened]>! %1.00;0.81%') diff --git a/Tests/examples/single_step/nal7/nal7.retroImplSeq.nal b/Tests/examples/single_step/nal7/nal7.retroImplSeq.nal new file mode 100644 index 0000000..1c42915 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.retroImplSeq.nal @@ -0,0 +1,8 @@ +A>. :|: +8 +B>. :|: +8 +C>. :|: + +10 +''outputMustContain('< C> =\> (&/, A>,+8, B>,+8)>. :!16: %1.00;0.42%') diff --git a/Tests/examples/single_step/nal7/nal7.retrospectiveImpl.goal.nal b/Tests/examples/single_step/nal7/nal7.retrospectiveImpl.goal.nal new file mode 100644 index 0000000..5c72690 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.retrospectiveImpl.goal.nal @@ -0,0 +1,6 @@ +<<$1 --> [unscrewing]> =\> <$1 --> [hardened]>>. %1.00;0.90% +<#1 --> [unscrewing]>! + +10 + +''outputMustContain('<#1 --> [hardened]>! %1.00;0.45%') diff --git a/Tests/examples/single_step/nal7/nal7.revrev.nal b/Tests/examples/single_step/nal7/nal7.revrev.nal new file mode 100644 index 0000000..a796258 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.revrev.nal @@ -0,0 +1,6 @@ + A>. :|: %1.00;0.90% +8 + A>. :|: %1.00;0.90% +100 +' check for eternalized revised conclusion +''outputMustContain(' A>. %1.00;0.40%') diff --git a/Tests/examples/single_step/nal7/nal7.vardetach1.nal b/Tests/examples/single_step/nal7/nal7.vardetach1.nal new file mode 100644 index 0000000..b2e8fa5 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.vardetach1.nal @@ -0,0 +1,4 @@ +<(&/,<$1 --> A>,+100) =/> <$1 --> B>>. %1.00;0.90% + B>. :|: %1.00;0.90% +30 +''outputMustContain(' A>. :!-105: %1.00;0.45%') diff --git a/Tests/examples/single_step/nal7/nal7.vardetach2.nal b/Tests/examples/single_step/nal7/nal7.vardetach2.nal new file mode 100644 index 0000000..b086459 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.vardetach2.nal @@ -0,0 +1,4 @@ +<(&/,<$1 --> A>,+100) =/> <$1 --> B>>. %1.00;0.90% + A>. :|: %1.00;0.90% +30 +''outputMustContain(' B>. :!100: %1.00;0.81%') diff --git a/Tests/examples/single_step/nal7/nal7.vardetach3.nal b/Tests/examples/single_step/nal7/nal7.vardetach3.nal new file mode 100644 index 0000000..44cb602 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.vardetach3.nal @@ -0,0 +1,5 @@ +//no interval, should we even support it? +<<$1 --> A> =/> <$1 --> B>>. %1.00;0.90% + B>. :|: %1.00;0.90% +30 +''outputMustContain(' A>. :!-5: %1.00;0.45%') diff --git a/Tests/examples/single_step/nal7/nal7.vardetach4.nal b/Tests/examples/single_step/nal7/nal7.vardetach4.nal new file mode 100644 index 0000000..613f6a4 --- /dev/null +++ b/Tests/examples/single_step/nal7/nal7.vardetach4.nal @@ -0,0 +1,5 @@ +//no interval, should we even support it? +<<$1 --> A> =/> <$1 --> B>>. %1.00;0.90% + A>. :|: %1.00;0.90% +30 +''outputMustContain(' B>. :!5: %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.1.0.nal b/Tests/examples/single_step/nal8/nal8.1.0.nal new file mode 100644 index 0000000..b7fd731 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.0.nal @@ -0,0 +1,12 @@ +'********** [01 + 03 -> 10]: + +'The goal is to make t001 opened. +<{t001} --> [opened]>! + +'If the robot hold t002, then go to t001 and open t001, then t001 will be opened. +<(&/,<(*,SELF,{t002}) --> hold>,<(*,SELF,{t001}) --> at>,<(*,{t001}) --> ^open>) =/> <{t001} --> [opened]>>. + +100 + +''outputMustContain('(&/,<(*,SELF,{t002}) --> hold>,<(*,SELF,{t001}) --> at>,(^open,{t001}))! %1.00;0.81%') +' working in GUI but not in testcase, maybe the following string needs some escapes? but where? diff --git a/Tests/examples/single_step/nal8/nal8.1.1.nal b/Tests/examples/single_step/nal8/nal8.1.1.nal new file mode 100644 index 0000000..5daabb5 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.1.nal @@ -0,0 +1,9 @@ +'********** [10 -> 11]: + +'The goal is to hold t002, then arrive t001 and open t001 +(&/,<(*,SELF,{t002}) --> hold>,<(*,SELF,{t001}) --> at>,(^open,{t001}))! + +10 + +'The goal is to hold t002 +''outputMustContain('<(*,SELF,{t002}) --> hold>! %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.1.10.nal b/Tests/examples/single_step/nal8/nal8.1.10.nal new file mode 100644 index 0000000..27f3f2e --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.10.nal @@ -0,0 +1,13 @@ +'********** [19 + 20 -> 21] + +'The robot was at t003. +<{t003} --> (/,at,SELF,_)>. :\: + +'t002 was on the t003. +<{t003} --> (/,on,{t002},_)>. :\: + +33 + +'If the robot was at someting, t002 was also on it. +''outputMustContain('(&&,<#1 --> (/,at,SELF,_)>,<#1 --> (/,on,{t002},_)>). :!-5: %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal8/nal8.1.11.nal b/Tests/examples/single_step/nal8/nal8.1.11.nal new file mode 100644 index 0000000..b8f3a1d --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.11.nal @@ -0,0 +1,9 @@ +'********** [21 -> 22] + +'t002 was on someting and the robot was also at it at the same time. +(&|,<#1 --> (/,on,{t002},_)>,<#1 --> (/,at,SELF,_)>). :\: + +8 + +'t002 was on someting and the robot was also at it at the same time. +''outputMustContain('(&|,<#1 --> (/,on,{t002},_)>,<(*,SELF,#1) --> at>). :!-5: %1.00;0.90%') diff --git a/Tests/examples/single_step/nal8/nal8.1.13.nal b/Tests/examples/single_step/nal8/nal8.1.13.nal new file mode 100644 index 0000000..2797eea --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.13.nal @@ -0,0 +1,12 @@ +'********** [23 + 06 -> 24] + +'t002 is on something, and the robot is also at it at the same time. +(&|,<(*,{t002},#1) --> on>,<(*,SELF,#1) --> at>). :|: + +'If item 1 is on item 2 and the robot is also at item 2 at the same time, the robot will be able to reach item 1. +<(&|,<(*,$1,$2) --> on>,<(*,SELF,$2) --> at>) =|> <(*,SELF,$1) --> reachable>>. + +260 + +'The robot is able to reach t002. +''outputMustContain('<(*,SELF,{t002}) --> reachable>. :!0: %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.1.14.nal b/Tests/examples/single_step/nal8/nal8.1.14.nal new file mode 100644 index 0000000..36b61d0 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.14.nal @@ -0,0 +1,12 @@ +'********** [24 + 12 -> 25] + +'The robot is able to reach t002. +<(*,SELF,{t002}) --> reachable>. :|: + +'The goal for the robot is to make t002 reachable and then pick it. +(&/,<(*,SELF,{t002}) --> reachable>,(^pick,{t002}))! + +45 + +'The goal maybe to pick t002. +''outputMustContain('(^pick,{t002})! %1.00;0.43%') diff --git a/Tests/examples/single_step/nal8/nal8.1.16.nal b/Tests/examples/single_step/nal8/nal8.1.16.nal new file mode 100644 index 0000000..24bfcae --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.16.nal @@ -0,0 +1,12 @@ +'********** [24 + 05 -> 27] + +'The robot is able to reach t002. +<(*,SELF,{t002}) --> reachable>. :|: + +'If the robot reach t002 and pick it, the robot will hold t002. +<(&/,<(*,SELF,{t002}) --> reachable>,(^pick,{t002}))=/><(*,SELF,{t002}) --> hold>>. + +1 + +'If the robot pick t002, it will hold t002. +''outputMustContain('<(^pick,{t002}) =/> <(*,SELF,{t002}) --> hold>>. :!0: %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.1.17.nal b/Tests/examples/single_step/nal8/nal8.1.17.nal new file mode 100644 index 0000000..6358f7e --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.17.nal @@ -0,0 +1,12 @@ +'********** [26 + 27 -> 28] + +'t002 is picked. +(^pick,{t002}). :\: + +'If the robot pick t002, it will hold t002. +<(^pick,{t002})=/><(*,SELF,{t002}) --> hold>>. :\: + +20 + +'The robot is holding t002. +''outputMustContain('<(*,SELF,{t002}) --> hold>. :!0: %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.1.18.nal b/Tests/examples/single_step/nal8/nal8.1.18.nal new file mode 100644 index 0000000..96859f1 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.18.nal @@ -0,0 +1,12 @@ +'********** [28 + 10 -> 29] + +'The robot is holding t002. +<(*,SELF,{t002}) --> hold>. :|: + +'The robot should hold t002, then arrive t001 and open t001. +(&/,<(*,SELF,{t002}) --> hold>,+100,<(*,SELF,{t001}) --> at>,+100,(^open,{SELF},{t001}))! + +30 + +'The robot should arrive t001 then open t001. +''outputMustContain('(&/,<(*,SELF,{t001}) --> at>,+100,(^open,{SELF},{t001}))! :!100: %1.00;0.73%') diff --git a/Tests/examples/single_step/nal8/nal8.1.19.nal b/Tests/examples/single_step/nal8/nal8.1.19.nal new file mode 100644 index 0000000..3d41817 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.19.nal @@ -0,0 +1,12 @@ +'********** [03 + 28 -> 30] + +'If the robot hold t002, then arrive t001 and open it, t001 will be opened. +<(&/,<(*,SELF,{t002}) --> hold>,<(*,SELF,{t001}) --> at>,(^open,{SELF},{t001})) =/> <{t001} --> [opened]>>. %1.00;0.90% + +'The robot is holding t002 now. +<(*,SELF,{t002}) --> hold>. :|: + +200 + +'If the robot arrive t001 and open it, t001 may be opened. +''outputMustContain('<(&/,<(*,SELF,{t001}) --> at>,(^open,{SELF},{t001})) =/> <{t001} --> [opened]>>. %1.00;0.43%') diff --git a/Tests/examples/single_step/nal8/nal8.1.2.nal b/Tests/examples/single_step/nal8/nal8.1.2.nal new file mode 100644 index 0000000..8f239bf --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.2.nal @@ -0,0 +1,9 @@ +'********** [12 -> 13]: + +'The goal for the robot is to make t002 reachable and then pick it. +(&/,<(*,SELF,{t002}) --> reachable>,(^pick,{t002}))! + +5 + +'The goal for the robot is to make t002 reachable. +''outputMustContain('<(*,SELF,{t002}) --> reachable>! %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.1.20.nal b/Tests/examples/single_step/nal8/nal8.1.20.nal new file mode 100644 index 0000000..6e62da3 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.20.nal @@ -0,0 +1,9 @@ +'********** [29 -> 31] + +'The goal for the robot is to reach t001 and then open t001. +(&/,<(*,SELF,{t001}) --> at>,(^open,{t001}))! + +13 + +'The goal for the robot is to reach t001. +''outputMustContain('<(*,SELF,{t001}) --> at>! %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.1.21.nal b/Tests/examples/single_step/nal8/nal8.1.21.nal new file mode 100644 index 0000000..87deb3a --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.21.nal @@ -0,0 +1,12 @@ +'********** [31 + 09 -> 32] + +'The goal is to reach t001. +<(*,SELF,{t001}) --> at>! + +'If go to somewhere, the robot will be at there. +<(^go_to,$1)=/><(*,SELF,$1) --> at>>. + +500 + +'The goal is to go to t001. +''outputMustContain('(^go_to,{t001})! %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.1.23.nal b/Tests/examples/single_step/nal8/nal8.1.23.nal new file mode 100644 index 0000000..319ada6 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.23.nal @@ -0,0 +1,16 @@ +'********** [33 + 09 -> 34] + +'The robot went to t001. +(^go_to,{SELF},{t001}). :\: + +'If go to somewhere, the robot will be at there. +<(^go_to,{SELF},$1)=/><(*,{SELF},$1) --> at>>. + +41 + +'Was the robot at t001? +'IN: <(*,{SELF},{t001}) --> at>? :\: +'140 +'The robot was at t001. +''outputMustContain('<(*,{SELF},{t001}) --> at>. :!0: %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal8/nal8.1.24.nal b/Tests/examples/single_step/nal8/nal8.1.24.nal new file mode 100644 index 0000000..f11a135 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.24.nal @@ -0,0 +1,12 @@ +'********** [34 + 30 -> 35] + +'The robot is at t001 now. +<(*,SELF,{t001}) --> at>. :|: + +'If the robot arrive t001 and open it, t001 will be opened. +<(&/,<(*,SELF,{t001}) --> at>,+100,(^open,{t001}))=/><{t001} --> [opened]>>. :|: + +20 + +'If the robot open t001, t001 will be opened. +''outputMustContain('<(^open,{t001}) =/> <{t001} --> [opened]>>. :!100: %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.1.25.nal b/Tests/examples/single_step/nal8/nal8.1.25.nal new file mode 100644 index 0000000..5a09474 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.25.nal @@ -0,0 +1,12 @@ +'********** [34 + 29 -> 36] + +'The robot is at t001 now. +<(*,SELF,{t001}) --> at>. :|: + +'The goal is to arrive t001 and then open t001. +(&/,<(*,SELF,{t001}) --> at>,(^open,{SELF},{t001}))! + +25 + +'The goal maybe to open t001. +''outputMustContain('(^open,{SELF},{t001})! %1.00;0.43%') diff --git a/Tests/examples/single_step/nal8/nal8.1.27.nal b/Tests/examples/single_step/nal8/nal8.1.27.nal new file mode 100644 index 0000000..6c40e59 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.27.nal @@ -0,0 +1,12 @@ +'********** [35 + 37 -> 38] + +'If the robot open t001, t001 will be opened. +<(^open,{SELF},{t001})=/><{t001} --> [opened]>>. :|: + +'The robot open t001. +(^open,{SELF},{t001}). :|: + +1 + +'t001 is opened. +''outputMustContain('<{t001} --> [opened]>. :!5: %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.1.3.nal b/Tests/examples/single_step/nal8/nal8.1.3.nal new file mode 100644 index 0000000..023ab5a --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.3.nal @@ -0,0 +1,12 @@ +'********** [13 + 06 -> 14]: + +'The goal for the robot is to make t002 reachable. +<(*,SELF,{t002}) --> reachable>! + +'If item 1 is on item 2 and the robot is also at item 2 at the same time, the robot will be able to reach item 1. +<(&|,<(*,$1,#2) --> on>,<(*,SELF,#2) --> at>)=|><(*,SELF,{t002}) --> reachable>>. + +20 + +'The goal is to make the robot at #1 and t002 is on #1 at the same time +''outputMustContain('(&|,<(*,SELF,#1) --> at>,<(*,{t002},#1) --> on>)! :!0: %1.00;0.73%') diff --git a/Tests/examples/single_step/nal8/nal8.1.3.var.nal b/Tests/examples/single_step/nal8/nal8.1.3.var.nal new file mode 100644 index 0000000..480e842 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.3.var.nal @@ -0,0 +1,12 @@ +'********** [13 + 06 -> 14]: + +'The goal for the robot is to make t002 reachable. +<(*,SELF,{t002}) --> reachable>! + +'If item 1 is on item 2 and the robot is also at item 2 at the same time, the robot will be able to reach item 1. +<(&|,<(*,$1,#2) --> on>,<(*,SELF,#2) --> at>)=|><(*,SELF,$1) --> reachable>>. + +10 + +'The goal is to make the robot at #1 and t002 is on #1 at the same time +''outputMustContain('(&|,<(*,SELF,#1) --> at>,<(*,{t002},#1) --> on>)! :!0: %1.00;0.73%') diff --git a/Tests/examples/single_step/nal8/nal8.1.4 copy.nal b/Tests/examples/single_step/nal8/nal8.1.4 copy.nal new file mode 100644 index 0000000..6d76bb4 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.4 copy.nal @@ -0,0 +1,12 @@ +'********** [07 + 14 -> 15]: + +'t002 is on t003 now. +<(*,{t002},{t003}) --> on>. :|: + +'The goal is to make t002 on #1 and #1 is at the robot at same time +(&|,<(*,{t002},#1) --> on>,<(*,#1,SELF) --> at>)! + +350 + +'The goal maybe to make t003 at the robot +''outputMustContain('<(*,{t003},SELF) --> at>! %1.00;0.43%') diff --git a/Tests/examples/single_step/nal8/nal8.1.4.nal b/Tests/examples/single_step/nal8/nal8.1.4.nal new file mode 100644 index 0000000..567265f --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.4.nal @@ -0,0 +1,12 @@ +'********** [07 + 14 -> 15]: + +'t002 is on t003 now. +<(*,{t002},{t003}) --> on>. :|: + +'The goal is to make t002 on #1 and #1 is at the robot at same time +(&|,<(*,{t002},{t003}) --> on>,<(*,{t003},SELF) --> at>)! + +350 + +'The goal maybe to make t003 at the robot +''outputMustContain('<(*,{t003},SELF) --> at>! %1.00;0.43%') diff --git a/Tests/examples/single_step/nal8/nal8.1.5.nal b/Tests/examples/single_step/nal8/nal8.1.5.nal new file mode 100644 index 0000000..a4f71ed --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.5.nal @@ -0,0 +1,12 @@ +'********** [15 + 09 -> 16]: + +'The goal for the robot is to arrive t003. +<(*,SELF,{t003}) --> at>! + +'If go to somewhere, the robot will be at there. +<(^go_to,$1)=/><(*,SELF,$1) --> at>>. + +100 + +'The goal is to go to t003. +''outputMustContain('(^go_to,{t003})! %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.1.7.nal b/Tests/examples/single_step/nal8/nal8.1.7.nal new file mode 100644 index 0000000..c1b5d2b --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.7.nal @@ -0,0 +1,12 @@ +'********** [17 + 09 -> 18] + +'Now the robot is going to t003. +<(*,{t003}) --> ^go_to>. :|: + +'If go to somewhere, the robor will be at there. +<<(*,$1) --> ^go_to> =/> <(*,SELF,$1) --> at>>. + +20 + +'The robot will be at t003. +''outputMustContain('<(*,SELF,{t003}) --> at>. :!5: %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.1.8.nal b/Tests/examples/single_step/nal8/nal8.1.8.nal new file mode 100644 index 0000000..e0f603f --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.8.nal @@ -0,0 +1,9 @@ +'********** [18 -> 19] + +'The robot was at t003. + (/,at,_,{t003})>. :\: + +6 + +'The robot was at t003. +''outputMustContain('<{t003} --> (/,at,SELF,_)>. :!-5: %1.00;0.90%') diff --git a/Tests/examples/single_step/nal8/nal8.1.9.nal b/Tests/examples/single_step/nal8/nal8.1.9.nal new file mode 100644 index 0000000..506f9c6 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.1.9.nal @@ -0,0 +1,9 @@ +'********** [07 -> 20] + +'t002 is on t003 now. +<(*,{t002},{t003}) --> on>. :|: + +6 + +'t002 is on t003 now. +''outputMustContain('<{t003} --> (/,on,{t002},_)>. :!0: %1.00;0.90%') diff --git a/Tests/examples/single_step/nal8/nal8.2.1.nal b/Tests/examples/single_step/nal8/nal8.2.1.nal new file mode 100644 index 0000000..829e80f --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.2.1.nal @@ -0,0 +1,12 @@ +'********** [10 + 05 -> 11] + +'If the robot hold t002, then go to t001 and open it, t001 will be opened. +<(&/,<(*,Self,{t002}) --> hold>,(^go_to,{t001}),(^open,{t001})) =/> <{t001} --> [opened]>>. + +'If the robot is able to reach t002 and pick it, the robot will hold t002. +<(&/,<(*,Self,{t002}) --> reachable>,(^pick,{t002})) =/> <(*,Self,{t002}) --> hold>>. + +40 + +'If the robot is able to reach t002 and pick t002, then go to t001 and open t001, t001 will be opened. +''outputMustContain('<(&/,<(*,Self,{t002}) --> reachable>,(^pick,{t002}),(^go_to,{t001}),(^open,{t001})) =/> <{t001} --> [opened]>>. %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.2.2.nal b/Tests/examples/single_step/nal8/nal8.2.2.nal new file mode 100644 index 0000000..dccfc67 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.2.2.nal @@ -0,0 +1,12 @@ +'********** [06 + 07 -> 12] + +'If item 1 is on item 2 and the robot is at item 2 at the same time, the item 1 is reachable for the robot. +<(&|,<(*,$1,$2) --> on>,<(*,Self,$2) --> at>) =|> <(*,Self,$1) --> reachable>>. + +'t002 is on t003 now. +<(*,{t002},{t003}) --> on>. :|: + +80 + +'If the robot is at t003, then t002 is reachable for the robot. +''outputMustContain('<<(*,Self,{t003}) --> at> =|> <(*,Self,{t002}) --> reachable>>. :!0: %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.2.3.nal b/Tests/examples/single_step/nal8/nal8.2.3.nal new file mode 100644 index 0000000..28f2d7d --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.2.3.nal @@ -0,0 +1,12 @@ +'********** [13 + 09 -> 14] + +'If the robot is at t003 and pick t002, then go to t001 and open t001, t001 will be opened. +<(&/,<(*,Self,{t003}) --> at>,(^pick,{t002}),(^go_to,{t001}),(^open,{t001})) =/> <{t001} --> [opened]>>. :|: + +'If go to somewhere, the robor will be at there. +<(^go_to,$1) =/> <(*,Self,$1) --> at>>. + +16 + +'If the robot go to t003 and pick t002, then go to t001 and open t001, t001 may be opened. +''outputMustContain('<(&/,(^go_to,{t003}),(^pick,{t002}),(^go_to,{t001}),(^open,{t001})) =/> <{t001} --> [opened]>>. %1.00;0.43%') diff --git a/Tests/examples/single_step/nal8/nal8.2.4.nal b/Tests/examples/single_step/nal8/nal8.2.4.nal new file mode 100644 index 0000000..1d5c41d --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.2.4.nal @@ -0,0 +1,12 @@ +'********** [01 + 14 -> 15] + +'The goal is to make t001 opened. +<{t001} --> [opened]>! + +'If the robot go to t003 and pick t002, then go to t001 and open t001, t001 will be opened. +<(&/,(^go_to,{t003}),(^pick,{t002}),(^go_to,{t001}),(^open,{t001})) =/> <{t001} --> [opened]>>. :|: + +24 + +'The goal may be to go to t003 and pick t002, then go to t001 and open t001. +''outputMustContain('(&/,(^go_to,{t003}),(^pick,{t002}),(^go_to,{t001}),(^open,{t001}))! %1.00;0.43%') diff --git a/Tests/examples/single_step/nal8/nal8.3.0.nal b/Tests/examples/single_step/nal8/nal8.3.0.nal new file mode 100644 index 0000000..97901dd --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.3.0.nal @@ -0,0 +1,12 @@ +'********** [01 + 06 -> 10]: + +'The goal is to make t001 opened. +<{t001} --> [opened]>! + +'If the robot is at t001 and break t001, t001 will be opened. +<(&/,<(*,Self,{t001}) --> at>,(^break,{t001})) =/> <{t001} --> [opened]>>. + +28 + +'The goal is to arrrive t001 and break t001. +''outputMustContain('(&/,<(*,Self,{t001}) --> at>,(^break,{t001}))! %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.3.1.nal b/Tests/examples/single_step/nal8/nal8.3.1.nal new file mode 100644 index 0000000..143b2a5 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.3.1.nal @@ -0,0 +1,12 @@ +'********** [10 + 07 -> 11]: + +'The goal is to arrrive t001 and break t001. +(&/,<(*,Self,{t001}) --> at>,(^break,{t001}))! + +'Now the robot is at t001. +<(*,Self,{t001}) --> at>. :|: + +25 + +'The goal maybe to break t001. +''outputMustContain('(^break,{t001})! %1.00;0.43%') diff --git a/Tests/examples/single_step/nal8/nal8.3.2.nal b/Tests/examples/single_step/nal8/nal8.3.2.nal new file mode 100644 index 0000000..11757f0 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.3.2.nal @@ -0,0 +1,12 @@ +'********** [09 + 08 -> 12]: + +'If break something, it will be damaged. +<(^break,$1) =/> <$1 --> [damaged]>>. + +'The goal is to make t001 damaged. +<{t001} --> [damaged]>! + +17 + +'The goal is to break t001. +''outputMustContain('(^break,{t001})! %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.3.3.nal b/Tests/examples/single_step/nal8/nal8.3.3.nal new file mode 100644 index 0000000..3c932c7 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.3.3.nal @@ -0,0 +1,15 @@ +'********** [12 + 11 -> 13]: + +'The goal is not to break t001. +(^break,{t001})! %0.00;0.86% + +1 + +'The goal is to break t001. +(^break,{t001})! %1.00;0.73% + +1 + +'The goal may be not to break t001. +''outputMustContain('(^break,{t001})! %0.31;0.90%') + diff --git a/Tests/examples/single_step/nal8/nal8.3.4.nal b/Tests/examples/single_step/nal8/nal8.3.4.nal new file mode 100644 index 0000000..1d36514 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.3.4.nal @@ -0,0 +1,14 @@ +'********** [13 + 14 -> 15]: + +'The goal may be not to break t001. +(^break,{t001})! %0.31;0.89% + +1 + +'The goal is to break t001 +(^break,{t001})! %1.00;0.95% + +1 + +'The goal may be to break t001. +''outputMustContain('(^break,{t001})! %0.79;0.96%') diff --git a/Tests/examples/single_step/nal8/nal8.4.0.nal b/Tests/examples/single_step/nal8/nal8.4.0.nal new file mode 100644 index 0000000..d97d9c5 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.4.0.nal @@ -0,0 +1,12 @@ +'********** [04 + 03 -> 05]: + +'The robot is able to reach the key001 now. +<(*,Self,key001) --> reachable>. :|: + +'If the robot is able to reach key001, and pick key001, the robot will hold key001. +<(&/,<(*,Self,key001) --> reachable>,(^pick,key001)) =/> <(*,Self,key001) --> hold>>. + +1 + +'If the robot pick key001, it will hold key001. +''outputMustContain('<(^pick,key001) =/> <(*,Self,key001) --> hold>>. :!0: %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.4.1.nal b/Tests/examples/single_step/nal8/nal8.4.1.nal new file mode 100644 index 0000000..e67aafc --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.4.1.nal @@ -0,0 +1,12 @@ +'********** [05 + 01 -> 06]: + +'If the robot pick key001, it will hold key001. +<(^pick,key001) =/> <(*,Self,key001) --> hold>>. :|: + +'The goal is to hold key001. +<(*,Self,key001) --> hold>! + +8 + +'The goal maybe to pick key001. +''outputMustContain('(^pick,key001)! %1.00;0.43%') diff --git a/Tests/examples/single_step/nal8/nal8.4.2.nal b/Tests/examples/single_step/nal8/nal8.4.2.nal new file mode 100644 index 0000000..b2ce529 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.4.2.nal @@ -0,0 +1,13 @@ +'********** [07 + 05 -> 08]: + +'Key001 was picked. +(^pick,key001). :\: + +'If the robot pick key001, it will hold key001. +<(^pick,key001) =/> <(*,Self,key001) --> hold>>. + +15 + +'The robot is holding key001. +''outputMustContain('<(*,Self,key001) --> hold>. :!0: %1.00;0.81%') + diff --git a/Tests/examples/single_step/nal8/nal8.4.3.nal b/Tests/examples/single_step/nal8/nal8.4.3.nal new file mode 100644 index 0000000..ac2901d --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.4.3.nal @@ -0,0 +1,14 @@ +'********** [08 + 09 -> 10]: + +'The robot is holding key001. +<(*,Self,key001) --> hold>. :|: %1.00;0.81% + +1 + +'The robot is holding key001. +<(*,Self,key001) --> hold>. :|: + +5 + +'The robot is holding key001. +''outputMustContain('<(*,Self,key001) --> hold>. :!1: %1.00;0.93%') diff --git a/Tests/examples/single_step/nal8/nal8.4.4.nal b/Tests/examples/single_step/nal8/nal8.4.4.nal new file mode 100644 index 0000000..6cfd4f8 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.4.4.nal @@ -0,0 +1,19 @@ +'********** [02 + 10 -> 11]: + +'The robot is not holding key001. +<(*,Self,key001) --> hold>. :|: %0% + +5 + +'The robot is holding key001. +<(*,Self,key001) --> hold>. :|: %1.00;0.91% + +5 + +'Is the robot holding key001? +<(*,Self,key001) --> hold>? :|: + +50 + +'The robot may be holding key001. +''outputMustContain('<(*,Self,key001) --> hold>. :!5: %0.53;0.95%') diff --git a/Tests/examples/single_step/nal8/nal8.4.6.nal b/Tests/examples/single_step/nal8/nal8.4.6.nal new file mode 100644 index 0000000..748ea58 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.4.6.nal @@ -0,0 +1,12 @@ +'********** [05 + 12 -> 13]: + +'If pick key001, the robot will hold key001. +<(^pick,key001) =/> <(*,Self,key001) --> hold>>. :\: %1.00;0.90% + +'If pick key001, the robot may hold key001. +<(^pick,key001) =/> <(*,Self,key001) --> hold>>. :\: %1.00;0.45% + +35 + +''If pick key001, the robot will hold key001. +''outputMustContain('<(^pick,key001) =/> <(*,Self,key001) --> hold>>. :!-5: %1.00;0.91%') diff --git a/Tests/examples/single_step/nal8/nal8.4.7.nal b/Tests/examples/single_step/nal8/nal8.4.7.nal new file mode 100644 index 0000000..cba142d --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.4.7.nal @@ -0,0 +1,14 @@ +'********** [04 + 13 -> 14]: + +'The key001 is reachable for the robot now. +<(*,Self,key001) --> reachable>. :|: + +11 + +'If the pick key001, the robot will hold key001. +<(^pick,key001) =/> <(*,Self,key001) --> hold>>. :|: + +20 + +'If the key001 is reachable for the robot and the robot picks the key001, the robot may hold hey001. +''outputMustContain('<(&/,<(*,Self,key001) --> reachable>,+11,(^pick,key001)) =/> <(*,Self,key001) --> hold>>. :!11: %1.00;0.45%') diff --git a/Tests/examples/single_step/nal8/nal8.5.0.nal b/Tests/examples/single_step/nal8/nal8.5.0.nal new file mode 100644 index 0000000..ad09170 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.5.0.nal @@ -0,0 +1,12 @@ +'********** [03 + 04 -> 05]: + +'If the key001 is reachable for the robot and the robot picks the key001, the robot will hold hey001. +<(&/,<(*,Self,key001) --> reachable>,(^pick,key001)) =/> <(*,Self,key001) --> hold>>. + +'Now the key001 is reachable. +<(*,Self,key001) --> reachable>. :|: + +1 + +'If the robot pick key001, it will hold key001. +''outputMustContain('<(^pick,key001) =/> <(*,Self,key001) --> hold>>. :!0: %1.00;0.81%') diff --git a/Tests/examples/single_step/nal8/nal8.5.1.nal b/Tests/examples/single_step/nal8/nal8.5.1.nal new file mode 100644 index 0000000..74ef43f --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.5.1.nal @@ -0,0 +1,12 @@ +'********** [05 + 01 -> 06]: + +'If pick the key001 ,the robot will hold the key001. +<(^pick,key001) =/> <(*,Self,key001) --> hold>>. :|: %1.00;0.81% + +'The goal is to hold key001. +<(*,Self,key001) --> hold>! + +10 + +'The goal maybe to pick key001. +''outputMustContain('(^pick,key001)! %1.00;0.40%') diff --git a/Tests/examples/single_step/nal8/nal8.5.2.nal b/Tests/examples/single_step/nal8/nal8.5.2.nal new file mode 100644 index 0000000..cf10765 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.5.2.nal @@ -0,0 +1,12 @@ +'********** [07 + 05 -> 08]: + +'The robot picks key001. +(^pick,key001). :|: + +'If pick the key001 ,the robot will hold the key001. +<(^pick,key001) =/> <(*,Self,key001) --> hold>>. :|: %1.00;0.81% + +8 + +'The robot will hold the key001. +''outputMustContain('<(*,Self,key001) --> hold>. :!5: %1.00;0.73%') diff --git a/Tests/examples/single_step/nal8/nal8.5.3.nal b/Tests/examples/single_step/nal8/nal8.5.3.nal new file mode 100644 index 0000000..503eaed --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.5.3.nal @@ -0,0 +1,15 @@ +'********** [08 + 09 -> 10]: + +'The robot is holding key001. +<(*,Self,key001) --> hold>. :|: %1.00;0.73% + +6 + +'The robot is holding key001 +<(*,Self,key001) --> hold>. :|: %1.00;0.90% + +10 + +'The robot is holding key001 +''outputMustContain('<(*,Self,key001) --> hold>. :!6: %1.00;0.92%') + diff --git a/Tests/examples/single_step/nal8/nal8.5.4.nal b/Tests/examples/single_step/nal8/nal8.5.4.nal new file mode 100644 index 0000000..13d0e52 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.5.4.nal @@ -0,0 +1,21 @@ +'********** [07 + 09 -> 11]: + +' 'a' is a type of 'A' + A>. :|: + +10 + +'The robot picks key001 +(^pick,{SELF},key001). :|: %1.00;0.90% + +11 + +'The robot holds key001 +<(*,Self,key001) --> hold>. :|: + +16 + +'If 'a' is a type of 'A', and the robot pick key001, the robot may hold key001. +''outputMustContain('<(&/, A>,+10,(^pick,{SELF},key001),+11) =/> <(*,Self,key001) --> hold>>. :!21: %1.00;0.42%') +'adjusted +3 to +4 ^ + diff --git a/Tests/examples/single_step/nal8/nal8.5.5.nal b/Tests/examples/single_step/nal8/nal8.5.5.nal new file mode 100644 index 0000000..2c7bbbd --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.5.5.nal @@ -0,0 +1,15 @@ +'********** [05 + 11 -> 12]: + +'If the robot pick key001, it will hold key001. +<(^pick,key001) =/> <(*,Self,key001) --> hold>>. :|: %1.00;0.81% + +1 + +'If the robot pick key001, it may hold key001. +<(^pick,key001) =/> <(*,Self,key001) --> hold>>. :|: %1.00;0.45% + +10 + +'If the robot pick key001, it will hold key001. +''outputMustContain('<(^pick,key001) =/> <(*,Self,key001) --> hold>>. :!1: %1.00;0.84%') + diff --git a/Tests/examples/single_step/nal8/nal8.5.6.nal b/Tests/examples/single_step/nal8/nal8.5.6.nal new file mode 100644 index 0000000..5e41355 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.5.6.nal @@ -0,0 +1,16 @@ +'********** [04 + 12 -> 13]: + +'The key001 is reachable +<(*,Self,key001) --> reachable>. :|: + +11 + +'If pick key001, the robot will hold key001. +<(^pick,key001) =/> <(*,Self,key001) --> hold>>. :|: %1.00;0.84% + +17 + +'If key001 is reachable and the robot pick key001, it may hold key001. +''outputMustContain('<(&/,<(*,Self,key001) --> reachable>,+11,(^pick,key001)) =/> <(*,Self,key001) --> hold>>. :!11: %1.00;0.43%') +'adjusted +3 to +4 + diff --git a/Tests/examples/single_step/nal8/nal8.5.7.nal b/Tests/examples/single_step/nal8/nal8.5.7.nal new file mode 100644 index 0000000..3075fd7 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.5.7.nal @@ -0,0 +1,15 @@ +'********** [03 + 13 -> 14]: + +'If key001 is reachable and the robot pick key001, it will hold key001. +<(&/,<(*,Self,key001) --> reachable>,(^pick,key001)) =/> <(*,Self,key001) --> hold>>. %1.00;0.90% + +1 + +'If key001 is reachable and the robot pick key001, it may hold key001. +<(&/,<(*,Self,key001) --> reachable>,(^pick,key001)) =/> <(*,Self,key001) --> hold>>. %1.00;0.43% + +1 + +'If key001 is reachable and the robot pick key001, it will hold key001. +''outputMustContain('<(&/,<(*,Self,key001) --> reachable>,(^pick,key001)) =/> <(*,Self,key001) --> hold>>. %1.00;0.91%') + diff --git a/Tests/examples/single_step/nal8/nal8.add.nal b/Tests/examples/single_step/nal8/nal8.add.nal new file mode 100644 index 0000000..0a1a431 --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.add.nal @@ -0,0 +1,2 @@ +(^add,{SELF},2,3,?1)! +''outputMustContain('(^add,{SELF},2,3,5). :!0: %1.00;0.90%') diff --git a/Tests/examples/single_step/nal8/nal8.count.nal b/Tests/examples/single_step/nal8/nal8.count.nal new file mode 100644 index 0000000..deb9abf --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.count.nal @@ -0,0 +1,2 @@ +(^count,{SELF},{a,b,c,d},?1)! +''outputMustContain('(^count,{SELF},{a,b,c,d},4). :!0: %1.00;0.90%') diff --git a/Tests/examples/single_step/nal8/nal8.reflect.nal b/Tests/examples/single_step/nal8/nal8.reflect.nal new file mode 100644 index 0000000..b1cda6b --- /dev/null +++ b/Tests/examples/single_step/nal8/nal8.reflect.nal @@ -0,0 +1,3 @@ +(^reflect,{SELF}, animal>,?1)! +10 +''outputMustContain('(^reflect,{SELF}, animal>,<(*,cat,animal) --> inheritance>). :!0: %1.00;0.90%') diff --git a/Tests/examples/single_step/nal9/nal9.anticipate1.nal b/Tests/examples/single_step/nal9/nal9.anticipate1.nal new file mode 100644 index 0000000..549f9f9 --- /dev/null +++ b/Tests/examples/single_step/nal9/nal9.anticipate1.nal @@ -0,0 +1,7 @@ +<(&/, A>,+10) =/> B>>. +'making it observable: + B>. +'ok start: + A>. :|: +10 +''outputMustContain('(^anticipate,{SELF}, B>). :!0: %1.00;0.90%') diff --git a/Tests/examples/single_step/nal9/nal9.believe1.nal b/Tests/examples/single_step/nal9/nal9.believe1.nal new file mode 100644 index 0000000..f7aba58 --- /dev/null +++ b/Tests/examples/single_step/nal9/nal9.believe1.nal @@ -0,0 +1,6 @@ + b>. +'ok, being aware needs attention, so lets ask NARS about it: + b>? +'ok this concept should now be important enough for it so that NARS now knows +2 +''outputMustContain('(^believe,{SELF}, b>,TRUE). :!0: %1.00;0.90%') diff --git a/Tests/examples/single_step/nal9/nal9.believe2.nal b/Tests/examples/single_step/nal9/nal9.believe2.nal new file mode 100644 index 0000000..f90fdc5 --- /dev/null +++ b/Tests/examples/single_step/nal9/nal9.believe2.nal @@ -0,0 +1,3 @@ +(^believe,{SELF}, animal>,FALSE)! +2 +''outputMustContain(' animal>. :!0: %0.00;0.90%') diff --git a/Tests/examples/single_step/nal9/nal9.doubt.nal b/Tests/examples/single_step/nal9/nal9.doubt.nal new file mode 100644 index 0000000..ca88408 --- /dev/null +++ b/Tests/examples/single_step/nal9/nal9.doubt.nal @@ -0,0 +1,7 @@ + b>. %1.00;0.90% +20 +(^doubt,{SELF}, b>)! %1.00;0.90% +20 + b>? +''outputMustContain(' b>. %1.00;0.45%') +10 \ No newline at end of file diff --git a/Tests/examples/single_step/nal9/nal9.evaluate1.nal b/Tests/examples/single_step/nal9/nal9.evaluate1.nal new file mode 100644 index 0000000..49a7516 --- /dev/null +++ b/Tests/examples/single_step/nal9/nal9.evaluate1.nal @@ -0,0 +1,3 @@ + b>@ +10 +''outputMustContain('(^evaluate,{SELF}, b>). :!0: %1.00;0.90%') diff --git a/Tests/examples/single_step/nal9/nal9.evaluate2.nal b/Tests/examples/single_step/nal9/nal9.evaluate2.nal new file mode 100644 index 0000000..fc1add1 --- /dev/null +++ b/Tests/examples/single_step/nal9/nal9.evaluate2.nal @@ -0,0 +1,3 @@ +(^evaluate,{SELF}, animal>)! +10 +''outputMustContain(' animal>@') diff --git a/Tests/examples/single_step/nal9/nal9.hesitate.nal b/Tests/examples/single_step/nal9/nal9.hesitate.nal new file mode 100644 index 0000000..69bea6f --- /dev/null +++ b/Tests/examples/single_step/nal9/nal9.hesitate.nal @@ -0,0 +1,5 @@ + b>! %1.00;0.90% +(^hesitate,{SELF}, b>)! %1.00;0.90% + b>@ +1 +''outputMustContain(' b>! %1.00;0.45%') diff --git a/Tests/examples/single_step/nal9/nal9.want1.nal b/Tests/examples/single_step/nal9/nal9.want1.nal new file mode 100644 index 0000000..3366f0c --- /dev/null +++ b/Tests/examples/single_step/nal9/nal9.want1.nal @@ -0,0 +1,3 @@ +(^want,{SELF}, b>). %1.00;0.90% +10 +''outputMustContain(' b>! %1.00;0.81%') diff --git a/Tests/examples/single_step/nal9/nal9.want12.nal b/Tests/examples/single_step/nal9/nal9.want12.nal new file mode 100644 index 0000000..9e5ad1d --- /dev/null +++ b/Tests/examples/single_step/nal9/nal9.want12.nal @@ -0,0 +1,3 @@ +(^want,{SELF}, b>). :|: %1.00;0.90% +10 +''outputMustContain(' b>! :!0: %1.00;0.81%') diff --git a/Tests/examples/single_step/nal9/nal9.want2.nal b/Tests/examples/single_step/nal9/nal9.want2.nal new file mode 100644 index 0000000..442e4da --- /dev/null +++ b/Tests/examples/single_step/nal9/nal9.want2.nal @@ -0,0 +1,3 @@ +(^want,{SELF}, b>)! %1.00;0.90% +10 +''outputMustContain(' b>! :!0: %1.00;0.90%') diff --git a/Tests/examples/single_step/nal9/nal9.wonder1.nal b/Tests/examples/single_step/nal9/nal9.wonder1.nal new file mode 100644 index 0000000..450c33a --- /dev/null +++ b/Tests/examples/single_step/nal9/nal9.wonder1.nal @@ -0,0 +1,3 @@ + b>? +10 +''outputMustContain('(^wonder,{SELF}, b>). :!0: %1.00;0.90%') diff --git a/Tests/examples/single_step/nal9/nal9.wonder2.nal b/Tests/examples/single_step/nal9/nal9.wonder2.nal new file mode 100644 index 0000000..baa20d4 --- /dev/null +++ b/Tests/examples/single_step/nal9/nal9.wonder2.nal @@ -0,0 +1,3 @@ +(^wonder,{SELF}, animal>)! +10 +''outputMustContain(' animal>?') diff --git a/Tests/examples/single_step/notcontain.nal b/Tests/examples/single_step/notcontain.nal new file mode 100644 index 0000000..e030fb5 --- /dev/null +++ b/Tests/examples/single_step/notcontain.nal @@ -0,0 +1,4 @@ + b>. +2 +''outputMustNotContain(' c>.') +''outputMustContain(' b>.') diff --git a/Tests/examples/stability/long_term_stability.nal b/Tests/examples/stability/long_term_stability.nal new file mode 100644 index 0000000..34b77b8 --- /dev/null +++ b/Tests/examples/stability/long_term_stability.nal @@ -0,0 +1,72 @@ +'the detective claims that tim lives in graz +'<{tim} --> (/,livingIn,_,{graz})>. +'and lawyer claims that this is not the case +<{tim} --> (/,livingIn,_,{graz})>. %0% +100 +'the first deponent, a psychologist, +'claims that people with sunglasses are more aggressive +<<(*,$1,sunglasses) --> own> ==> <$1 --> [aggressive]>>. +'the third deponent claims, that he has seen tom with sunglasses on: +<(*,{tom},sunglasses) --> own>. +'the teacher claims, that people who are aggressive tend to be murders +<<$1 --> [aggressive]> ==> <$1 --> murder>>. +'the second deponent claims, that if the person lives in Graz, he is surely the murder +<<$1 --> (/,livingIn,_,{graz})> ==> <$1 --> murder>>. +'who is the murder? +<{?who} --> murder>? +'the detective claims that tim lives in graz +<{tim} --> (/,livingIn,_,{graz})>. +'the lawyer claims that this is not the case +<{tim} --> (/,livingIn,_,{graz})>. %0% +100 +'the first deponent, a psychologist, +'claims that people with sunglasses are more aggressive +<<(*,$1,sunglasses) --> own> ==> <$1 --> [aggressive]>>. +'the third deponent claims, that he has seen tom with black glasses on: +<(*,{tom},(&,[black],glasses)) --> own>. +'the teacher claims, that people who are aggressive tend to be murders +<<$1 --> [aggressive]> ==> <$1 --> murder>>. +'the second deponent claims, that if the person lives in Graz, he is surely the murder +<<$1 --> (/,livingIn,_,{graz})> ==> <$1 --> murder>>. +'the system knows that sunglasses are a special case of black glasses + (&,[black],glasses)>. +'who is the murder? +<{?who} --> murder>? +<(*,toothbrush,plastic) --> made_of>. +<(&/,<(*,$1,plastic) --> made_of>,(^lighter,{SELF},$1)) =/> <$1 --> [heated]>>. +<<$1 --> [heated]> =/> <$1 --> [melted]>>. +<<$1 --> [melted]> <|> <$1 --> [pliable]>>. +<(&/,<$1 --> [pliable]>,(^reshape,{SELF},$1)) =/> <$1 --> [hardened]>>. +<<$1 --> [hardened]> =|> <$1 --> [unscrewing]>>. + object>. +(&&,<#1 --> object>,<#1 --> [unscrewing]>)! +<{SELF} --> [hurt]>! %0% +<{SELF} --> [hurt]>. :|: %0% +<(&/,<(*,{SELF},wolf) --> close_to>,+1000) =/> <{SELF} --> [hurt]>>. +<(*,{SELF},wolf) --> close_to>. :|: +<(&|,(^want,{SELF},$1,FALSE),(^anticipate,{SELF},$1)) =|> <(*,{SELF},$1) --> afraid_of>>. +<(*,{SELF},?what) --> afraid_of>? + A>. :|: %1.00;0.90% +8 + B>. :|: %1.00;0.90% +8 + C>. :|: %1.00;0.90% +8 + A>. :|: %1.00;0.90% +100 + B>. :|: %1.00;0.90% +100 + C>>? +<(*,cup,plastic) --> made_of>. + object>. + [bendable]>. + [bendable]>. + object>. +<(&/,<(*,$1,plastic) --> made_of>,(^lighter,{SELF},$1)) =/> <$1 --> [heated]>>. +<<$1 --> [heated]> =/> <$1 --> [melted]>>. +<<$1 --> [melted]> <|> <$1 --> [pliable]>>. +<(&/,<$1 --> [pliable]>,(^reshape,{SELF},$1)) =/> <$1 --> [hardened]>>. +<<$1 --> [hardened]> =|> <$1 --> [unscrewing]>>. +(&&,<#1 --> object>,<#1 --> [unscrewing]>)! +2000000 +''outputMustContain('') diff --git a/Tests/examples/var1.nal b/Tests/examples/var1.nal new file mode 100644 index 0000000..054eed5 --- /dev/null +++ b/Tests/examples/var1.nal @@ -0,0 +1,13 @@ +<(&&, <<$x-->A>==><$x-->B>>, <<$y-->C>==><$y-->D>>) ==> E>. +<<$x-->C>==><$x-->D>>. +1000 + +<(&&, A, B) ==> C>. +B. +5 + +<<$x-->A>==><$y-->B>>. +<$x-->B>. + +B>. +B. \ No newline at end of file diff --git a/Tests/test_Bag.py b/Tests/test_Bag.py new file mode 100644 index 0000000..1f49d0e --- /dev/null +++ b/Tests/test_Bag.py @@ -0,0 +1,133 @@ +from Narsese import Budget +import unittest + +from NARS.DataStructures import Bag, Task, Concept +from Narsese import Judgement, Term, Statement, Copula, Truth + +import matplotlib.pyplot as plt + +class TEST_Bag(unittest.TestCase): + def __init__(self, methodName: str = ...) -> None: + super().__init__(methodName=methodName) + + def test_bag_put_task(self): + '''''' + bag = Bag(1000, 100) + task = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('bird')))) + self.assertEqual(len(bag), bag.count()) + self.assertEqual(len(bag), 0) + bag.put(task) + self.assertIn(task, bag) + self.assertEqual(len(bag), bag.count()) + self.assertEqual(len(bag), 1) + pass + + def test_bag_put_task_merge(self): + bag = Bag(1000, 100) + task = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('bird'))), Budget(0.5, 0.5, 0.5)) + self.assertEqual(len(bag), bag.count()) + self.assertEqual(len(bag), 0) + bag.put(task) + self.assertIn(task, bag) + self.assertEqual(len(bag), bag.count()) + self.assertEqual(len(bag), 1) + task = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('bird'))), Budget(1.0, 1.0, 1.0)) + bag.put(task) + self.assertIn(task, bag) + self.assertEqual(len(bag), bag.count()) + self.assertEqual(len(bag), 1) + task = bag.take_by_key(task, False) + self.assertEqual(task.budget.priority, 1.0) + self.assertEqual(task.budget.durability, 1.0) + self.assertEqual(task.budget.quality, 1.0) + task = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('bird'))), Budget(0.9, 0.9, 0.9)) + # bag.put(task, merge=False) + # self.assertIn(task, bag) + # self.assertEqual(len(bag), bag.count()) + # self.assertEqual(len(bag), 1) + # task = bag.take_by_key(task, False) + # self.assertEqual(task.budget.priority, 0.9) + # self.assertEqual(task.budget.durability, 0.9) + # self.assertEqual(task.budget.quality, 0.9) + pass + def test_bag_take_task_by_key(self): + '''''' + bag = Bag(1000, 100) + task1 = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('bird')))) + task2 = Task(Judgement(Statement(Term('bird'), Copula.Inheritance, Term('animal')))) + self.assertEqual(len(bag), bag.count()) + bag.put(task1) + self.assertEqual(len(bag), bag.count()) + task = bag.take_by_key(task2, remove=False) + self.assertIsNone(task) + task = bag.take_by_key(task2, remove=True) + self.assertIsNone(task) + self.assertEqual(len(bag), 1) + task = bag.take_by_key(task1, remove=False) + self.assertEqual(len(bag), 1) + self.assertIsNotNone(task) + task = bag.take_by_key(task1, remove=True) + self.assertEqual(len(bag), bag.count()) + self.assertEqual(len(bag), 0) + self.assertIsNotNone(task) + pass + + def test_bag_take_task(self): + '''take a task using the priority''' + bag = Bag(1000, 100) + task1 = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('bird'))), Budget(0.9, 0.5, 0.5)) + task2 = Task(Judgement(Statement(Term('bird'), Copula.Inheritance, Term('animal'))), Budget(0.5, 0.6, 0.6)) + bag.put(task1) + bag.put(task2) + self.assertEqual(len(bag), bag.count()) + self.assertEqual(len(bag), 2) + + cnt1 = 0 + cnt2 = 0 + for _ in range(10000): + task = bag.take(remove=False) + if task == task1: + cnt1 += 1 + elif task == task2: + cnt2 += 1 + self.assertGreater(cnt1, 3*cnt2) + + bag.take_by_key(task1) + bag.take_by_key(task2) + self.assertEqual(len(bag), 0) + for i in range(0, 100): + p = i/100 + task = Task(Judgement(Statement(Term(f'robin_{i}'), Copula.Inheritance, Term('bird'))), Budget(p, 0.5, 0.5)) + bag.put(task) + + self.assertEqual(len(bag), 100) + import numpy as np + cnt = np.array([0 for _ in range(100)]) + from tqdm import tqdm + n = 10000 + for _ in tqdm(range(n)): + task = bag.take(remove=False) + idx = bag.map_priority(task.budget.priority) + cnt[idx] += 1 + # self.assertTrue(cnt[-1]/cnt[-3] > 2) + plt.figure(1) + plt.bar(list(range(len(cnt))), cnt) + plt.title(f'n={n}') + plt.savefig(f'./Tests/test_Bag_take_{n}') + n = 1000000 + for _ in tqdm(range(n)): + task = bag.take(remove=False) + idx = bag.map_priority(task.budget.priority) + cnt[idx] += 1 + plt.figure(2) + plt.bar(list(range(len(cnt))), cnt) + plt.title(f'n={n}') + plt.savefig(f'./Tests/test_Bag_take_{n}') + plt.show() + + + +if __name__ == '__main__': + unittest.main() + + diff --git a/Tests/test_Compound.py b/Tests/test_Compound.py new file mode 100644 index 0000000..bd1c810 --- /dev/null +++ b/Tests/test_Compound.py @@ -0,0 +1,104 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Table +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector +from NAL.MetaLevelInference.VariableSubstitution import * +from Narsese._py.Variable import VarPrefix, Variable +from utils.IndexVar import IndexVar + +class TEST_Compound(unittest.TestCase): + def test_0(self): + c1 = Compound.ExtensionalSet(Term("A"), Term("B")) + c2 = Compound.ExtensionalSet(Term("B"), Term("C")) + c3 = c1-c2 + c4 = Compound.ExtensionalSet(Term("A")) + self.assertEqual(c3, c4) + pass + + def test_1(self): + c1 = Compound.ExtensionalSet(Term("A"), Term("B")) + c2 = Compound.ExtensionalSet(Term("B"), Term("C")) + c3 = c1-c2 + c4 = Compound.ExtensionalSet(Term("A")) + c5 = c2-c1 + c6 = c1 - Term("A") + c7 = Term("A") - c1 + self.assertEqual(c3, c4) + pass + + def test_mix_several_term_0(self): + c1 = Compound.ExtensionalSet(Term("A"), Term("B")) + c2 = Compound.ExtensionalSet(Term("B"), Term("C")) + c3 = Compound.ExtensionalSet(c1, c2) + c4 = Compound.ExtensionalSet(Term("A"), Term("B"), Term("C")) + self.assertEqual(c3, c4) + + c5 = Compound.ExtensionalSet(c1, c2, Term("A")) + c6 = Compound.ExtensionalSet(c3, Term("A")) + self.assertEqual(c5, c6) + self.assertEqual(str(c5), '{A, B, C}') + + c7 = Compound.IntensionalSet(Term("A")) + c8 = Compound.ExtensionalSet(c1, c2, c7, Term("A")) + c9 = Compound.ExtensionalSet(c3, c7, Term("A")) + self.assertEqual(c8, c9) + self.assertEqual(str(c8), '{A, B, C, [A]}') + pass + + def test_mix_several_terms_1(self): + c1 = Compound.ExtensionalSet(Term("A"), Term("B")) + c2 = Compound.ExtensionalSet(Term("B"), Term("C")) + c3 = Compound.ExtensionalIntersection(c1, c2) + c4 = Compound.ExtensionalSet(Term("B")) + self.assertEqual(c3, c4) + + pass + + def test_mix_several_terms_2(self): + c1 = Compound.IntensionalSet(Term("A"), Term("B")) + c2 = Compound.IntensionalSet(Term("B"), Term("C")) + c3 = Compound.IntensionalIntersection(c1, c2) + c4 = Compound.IntensionalSet(Term("B")) + self.assertEqual(c3, c4) + + pass + + def test_mix_several_terms_3(self): + c1 = Compound.IntensionalSet(Term("A"), Term("B")) + c2 = Compound.IntensionalSet(Term("B"), Term("C")) + c3 = Compound.IntensionalSet(c1, c2) + c4 = Compound.IntensionalSet(Term("A"), Term("B"), Term("C")) + self.assertEqual(c3, c4) + + pass + + def test_compound_variable_0(self): + ''' + <(&&, <$x-->A>, <$y-->A>) ==> (&&, <$x-->B>, <$y-->C>)>. + ''' + term = Narsese.parse("<(&&, <$x-->A>, <$y-->A>) ==> (&&, <$x-->B>, <$y-->C>)>.").term + self.assertEqual(len(term[0].terms), 2) + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_Compound + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_Concept.py b/Tests/test_Concept.py new file mode 100644 index 0000000..da233ee --- /dev/null +++ b/Tests/test_Concept.py @@ -0,0 +1,43 @@ +from Narsese import Budget +import unittest + +from NARS.DataStructures import Bag, Task +from Narsese import Judgement, Term, Statement, Copula, Truth +import Narsese +from NARS.DataStructures import Concept + +class TEST_Concept(unittest.TestCase): + def __init__(self, methodName: str = ...) -> None: + super().__init__(methodName=methodName) + + def test_accept_task(self): + '''''' + # engine = Engine() + + line = 'bird>. %0.5;0.5%' + task = Narsese.parser.parse(line) + concept = Concept(task.term, task.budget) + concept.accept(task) + + line = 'bird>. %0.7;0.7%' + task = Narsese.parser.parse(line) + concept.accept(task) + + line = 'bird>. %0.9;0.9%' + task = Narsese.parser.parse(line) + concept.accept(task) + + line = 'bird>. %0.9;0.9%' + task = Narsese.parser.parse(line) + concept.accept(task) + task1 = task + + self.assertEqual(len(concept.belief_table), 4) + + belief = concept.get_belief() + self.assertEqual(task1, belief) + pass + + +if __name__ == '__main__': + unittest.main() diff --git a/Tests/test_Copula.py b/Tests/test_Copula.py new file mode 100644 index 0000000..133ff0a --- /dev/null +++ b/Tests/test_Copula.py @@ -0,0 +1,22 @@ +from Narsese import Budget +import unittest + +from NARS.DataStructures import Bag, Task +from Narsese import Judgement, Term, Statement, Copula, Truth +import Narsese +from NARS.DataStructures import Concept +from Narsese import Copula + +class TEST_Concept(unittest.TestCase): + def __init__(self, methodName: str = ...) -> None: + super().__init__(methodName=methodName) + + def test_copla_id(self): + '''''' + self.assertEqual(int(Copula.Inheritance), 0) + self.assertEqual(int(Copula.Similarity), 1) + pass + + +if __name__ == '__main__': + unittest.main() diff --git a/Tests/test_EvidentialBase.py b/Tests/test_EvidentialBase.py new file mode 100644 index 0000000..18959a2 --- /dev/null +++ b/Tests/test_EvidentialBase.py @@ -0,0 +1,76 @@ +from Narsese import Budget +import unittest + +from NARS.DataStructures import Bag, Task, Concept +from Narsese import Judgement, Term, Statement, Copula, Truth + +from Narsese import Base, Task + +class TEST_Base(unittest.TestCase): + def __init__(self, methodName: str = ...) -> None: + super().__init__(methodName=methodName) + + def test_add_evidence_1(self): + '''''' + task1 = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('bird')))) + task2 = Task(Judgement(Statement(Term('bird'), Copula.Inheritance, Term('animal')))) + task3 = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('animal')))) + base = Base() + base.add(task1) + base.add(task2) + self.assertEqual(len(base), 2) + base = Base((task1, task2, task3)) + self.assertEqual(len(base), 3) + + + def test_overlap_1(self): + '''''' + task1 = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('bird')))) + task2 = Task(Judgement(Statement(Term('bird'), Copula.Inheritance, Term('animal')))) + task3 = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('animal')))) + base1 = Base((task1, task2)) + base2 = Base((task2, task3)) + self.assertTrue(base1.is_overlaped(base2)) + + def test_overlap_1(self): + '''''' + task1 = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('bird')))) + task2 = Task(Judgement(Statement(Term('bird'), Copula.Inheritance, Term('animal')))) + task3 = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('animal')))) + base1 = Base((task1, task2)) + base2 = Base((task2, task3)) + self.assertFalse(base1==base2) + + def test_add_base_1(self): + '''''' + task1 = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('bird')))) + task2 = Task(Judgement(Statement(Term('bird'), Copula.Inheritance, Term('animal')))) + task3 = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('animal')))) + base1 = Base((task1, task2)) + base2 = Base((task2, task3)) + base3 = base1 | base2 + self.assertEqual(len(base3), 3) + self.assertEqual(len(base1), 2) + self.assertEqual(len(base2), 2) + base2 |= base1 + self.assertEqual(len(base1), 2) + self.assertEqual(len(base2), 3) + pass + + def test_hash_1(self): + task1 = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('bird')))) + task2 = Task(Judgement(Statement(Term('bird'), Copula.Inheritance, Term('animal')))) + task3 = Task(Judgement(Statement(Term('robin'), Copula.Inheritance, Term('animal')))) + base = Base((task1, task2)) + self.assertIsNone(base._hash) + h1 = hash(base) + self.assertIsNotNone(base._hash) + base.add(task3) + self.assertIsNone(base._hash) + h2 = hash(base) + self.assertIsNotNone(base._hash) + self.assertNotEqual(h1, h2) + pass + +if __name__ == '__main__': + unittest.main() diff --git a/Tests/test_Examples.py b/Tests/test_Examples.py new file mode 100644 index 0000000..ffdaec7 --- /dev/null +++ b/Tests/test_Examples.py @@ -0,0 +1,111 @@ +from typing import List +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese + +from utils.Print import out_print, PrintType, print_filename + +examples_path = Path(__file__).parent/'examples' + +def run_file(file: str): + nars = NARS.Reasoner_3_0_4(100, 100) + + with open(file, 'r') as f: + lines = f.readlines() + tasks_derived_all: List[Task] = [] + expect_out_empty = False + output_contains: List[Task] = [] + for i, line in enumerate(lines): + i += 1 + line = line.strip(' \n') + if line.startswith("//"): + continue + elif line.startswith("'''expect.outEmpty"): + expect_out_empty = True + continue + elif line.startswith("''"): + if line.startswith("''outputMustContain('"): + line = line[len("''outputMustContain('"):].rstrip("')\n") + if len(line) == 0: continue + try: + content_check = Narsese.parser.parse(line) + output_contains.append(content_check) + except: + out_print(PrintType.ERROR, f'{file}, line {i}, {line}') + raise + continue + elif line.startswith("'"): + continue + elif line.isdigit(): + n_cycle = int(line) + out_print(PrintType.INFO, f'Run {n_cycle} cycles.') + for _ in range(n_cycle): + tasks_derived = nars.cycle() + tasks_derived_all.extend(tasks_derived) + for task in tasks_derived: out_print(PrintType.OUT, str(task.sentence), *task.budget) + + else: + line = line.rstrip(' \n') + if len(line) == 0: + continue + # content = Narsese.parser.parse(line) + try: + success, task, _ = nars.input_narsese(line, go_cycle=False) + if success: out_print(PrintType.IN, task.sentence, *task.budget) + else: out_print(PrintType.ERROR, f'Invalid input! Failed to parse: {line}') + + tasks_derived = nars.cycle() + tasks_derived_all.extend(tasks_derived) + for task in tasks_derived: out_print(PrintType.OUT, str(task.sentence), *task.budget) + if not success: + raise + except: + out_print(PrintType.ERROR, f'{file}, line {i}, {line}') + raise + if expect_out_empty and len(output_contains)==0: + if len(tasks_derived_all) != 0: raise + else: + output_not_contains = set(output_contains) - set(tasks_derived_all) + if len(output_not_contains) > 0: + for output in output_not_contains: + out_print(PrintType.ERROR, f'Fail to reason out: {output.sentence}') + raise + + +class TEST_Examples_Single_NAL1(unittest.TestCase): + '''Examples files in `application`.''' + + # def test_revision_0(self): + # print('\n') + # file = examples_path/'single_step/nal1.0.nal' + # print_filename(file.name) + # run_file(file) + + def test_deduction_0(self): + print('\n') + file = examples_path/'single_step/nal1/nal1.0.nal' + print_filename(file.name) + run_file(file) + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_Examples_Single_NAL1 + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_GetIndex.py b/Tests/test_GetIndex.py new file mode 100644 index 0000000..5424ab2 --- /dev/null +++ b/Tests/test_GetIndex.py @@ -0,0 +1,95 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Link +from Narsese import Judgement, Term, Statement, Copula, Truth, Connector + +from pathlib import Path +import Narsese +from Narsese._py.Compound import Compound + +from utils.Print import out_print, PrintType, print_filename + +class TEST_Get_Index(unittest.TestCase): + '''Examples files in `application`.''' + + def test_get_index_0(self): + '''If term_component = A, term_compound = <A>-->C>>, then the index = [[0,1], [1,0]].''' + term_component = Term("A") + term_compound = Statement( + Statement(Term("B"), Copula.Inheritance, Term("A")), + Copula.Inheritance, + Statement(Term("A"), Copula.Inheritance, Term("C")), + ) + indexes = Link.get_index(term_component, term_compound) + self.assertEqual(len(indexes), 2) + self.assertEqual(indexes[0], [0, 1]) + self.assertEqual(indexes[1], [1, 0]) + pass + + def test_get_index_1(self): + '''If term_component = A, term_compound = <(&,B,A)-->(&,A,C)>, then the index = [[0,1], [1,0]].''' + term_component = Term("A") + term_compound = Statement( + Compound(Connector.ExtensionalIntersection, Term("B"), Term("A")), + Copula.Inheritance, + Compound(Connector.ExtensionalIntersection, Term("A"), Term("C")) + ) + indexes = Link.get_index(term_component, term_compound) + self.assertEqual(len(indexes), 2) + self.assertEqual(indexes[0], [0, 1]) + self.assertEqual(indexes[1], [1, 0]) + pass + + def test_get_index_2(self): + '''If term_component = A, term_compound = <(&,A>,(|,A,B,C))-->(&,(|, C>, (&, C, A>>)), C)>, then the index = [[0, 0, 1], [0, 1, 0], [1, 0, 0, 0], [1, 0, 1, 1, 1, 1]].''' + term_component = Term("A") + term_compound = Narsese.parser.parse("<(&,A>,(|,A,B,C))-->(&,(|, C>, (&, C, A>>)), C)>.").term + # term_compound = Statement( + # Compound(Connector.ExtensionalIntersection, Term("B"), Term("A")), + # Copula.Inheritence, + # Compound(Connector.ExtensionalIntersection, Term("A"), Term("C")) + # ) + indexes = Link.get_index(term_component, term_compound) + self.assertEqual(len(indexes), 4) + self.assertEqual(indexes[0], [0, 0, 1]) + self.assertEqual(indexes[1], [0, 1, 0]) + self.assertEqual(indexes[2], [1, 0, 0, 0]) + self.assertEqual(indexes[3], [1, 0, 1, 1, 1, 1]) + pass + + def test_get_index_3(self): + '''If term_component = A>, term_compound = <(&,A>,(|,A,B,C))-->(&,(|, C>, (&, C, A>>)), C)>, then the index = [[0, 0], [1, 0, 1, 1, 1]].''' + term_component = Narsese.parser.parse("A>.").term + term_compound = Narsese.parser.parse("<(&,A>,(|,A,B,C))-->(&,(|, C>, (&, C, A>>)), C)>.").term + indexes = Link.get_index(term_component, term_compound) + self.assertEqual(len(indexes), 2) + self.assertEqual(indexes[0], [0, 0]) + self.assertEqual(indexes[1], [1, 0, 1, 1, 1]) + pass + + def test_get_index_5(self): + '''If term_component = B>, term_compound = B>, then the index = [[]].''' + term_component = Narsese.parser.parse("B>.").term + term_compound = Narsese.parser.parse("B>.").term + indexes = Link.get_index(term_component, term_compound) + self.assertEqual(len(indexes), 1) + self.assertEqual(indexes[0], []) + pass +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_Get_Index + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_InferenceEngine.py b/Tests/test_InferenceEngine.py new file mode 100644 index 0000000..1288b5f --- /dev/null +++ b/Tests/test_InferenceEngine.py @@ -0,0 +1,60 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag +from NARS.DataStructures._py.Concept import Concept +from NARS.DataStructures._py.Memory import Memory + +from NARS.InferenceEngine import GeneralEngine +import Narsese + + +class TEST_InferenceEngine(unittest.TestCase): + + def test_validation(self): + '''''' + engine = GeneralEngine() + line = 'bird>. %0.5;0.5%' + task = Narsese.parser.parse(line) + concept = Concept(task.term, task.budget) + concept.accept(task) + + line = 'bird>. %0.7;0.7%' + task = Narsese.parser.parse(line) + concept.accept(task) + + line = 'bird>. %0.9;0.9%' + task = Narsese.parser.parse(line) + concept.accept(task) + task1 = task + + line = 'animal>. %0.9;0.9%' + task = Narsese.parser.parse(line) + task2 = task + + belief = concept.get_belief() + self.assertFalse(engine.match(task1, belief)) + self.assertTrue(engine.match(task2, belief)) + + + pass + + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_InferenceEngine + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_Link.py b/Tests/test_Link.py new file mode 100644 index 0000000..69e1cbe --- /dev/null +++ b/Tests/test_Link.py @@ -0,0 +1,70 @@ +from typing import Dict, List +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Link +from NARS.DataStructures._py.Link import LinkType, TaskLink, TermLink +from NARS.DataStructures._py.Memory import Memory +from Narsese import Judgement, Term, Statement, Copula, Truth, Connector + +from pathlib import Path +import Narsese +from Narsese import Compound, Budget + +from utils.Print import out_print, PrintType, print_filename + +class TEST_Get_Index(unittest.TestCase): + '''Examples files in `application`.''' + + def test_get_index_0(self): + '''If source = A, target = <A>-->C>>, then the type = COMPOUND_STATEMENT, the index = [[0,1], [1,0]]; if source = <A>-->C>>, target = A, then the type = COMPONENT_STATEMENT, the index = [[0,1], [1,0]];''' + source = Concept(Term("A"), Budget(0.5, 0.5, 0.5)) + target = Narsese.parser.parse("<A>-->C>>.") + + term_link = TermLink(source, target, None, True, index=Link.get_index(target.term, source.term)[0]) + self.assertEqual(term_link.type, LinkType.COMPOUND_STATEMENT) + self.assertEqual(term_link.component_index, (0,1)) + + term_link = TermLink(target, source, None, False, index= Link.get_index(target.term, source.term)[1]) + self.assertEqual(term_link.type, LinkType.COMPONENT_STATEMENT) + self.assertEqual(term_link.component_index, (1,0)) + pass + + +class TEST_LinkType(unittest.TestCase): + '''Examples files in `application`.''' + + def test_transform(self): + memory = Memory(100, 100) + # task = Narsese.parse("<(*,acid, base) --> reaction>. %1.00;0.90%") + task = Narsese.parse("<(&&,<(*,x,worms) --> food>,<(*,x,tree) --> live>) ==> bird>>. %1.00;0.90%") + memory.accept(task) + x: Concept = memory.concepts.take_by_key(Term("x")) + links: Dict[int, TaskLink] = x.task_links.item_lut.lut + for link in links.values(): + if len(link.component_index) > 2: + self.assertIs(link.type, LinkType.TRANSFORM) + else: + self.assertIs(link.type, LinkType.COMPOUND_CONDITION) + + + pass + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_Get_Index, + TEST_LinkType + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_Memory.py b/Tests/test_Memory.py new file mode 100644 index 0000000..7409745 --- /dev/null +++ b/Tests/test_Memory.py @@ -0,0 +1,99 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Memory, Concept +from NARS.DataStructures._py.Concept import Concept +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector + + +def test_conceptualize(self): + pass + +class TEST_Memory(unittest.TestCase): + + def test_conceptualize(self): + '''''' + nars = NARS.Reasoner_3_0_4(100, 100) + + line = 'animal>.' + task = Narsese.parser.parse(line) + Concept._conceptualize(nars.memory.concepts, task.term, task.budget) + + line = 'animal>.' + Narsese.parser.parse(line) + Concept._conceptualize(nars.memory.concepts, task.term, task.budget) + + def test_accept_1(self): + '''''' + nars = NARS.Reasoner_3_0_4(100, 100) + + line = '((&&, bird>, animal>) ==> animal>).' + task = Narsese.parser.parse(line) + term1 = Term('robin') + term2 = Term('bird') + term3 = Term('animal') + term4 = Statement(term1, Copula.Inheritance, term2) + term5 = Statement(term2, Copula.Inheritance, term3) + term6 = Statement(term1, Copula.Inheritance, term3) + term7 = Compound(Connector.Conjunction, term4, term5) + term8 = Statement(term7, Copula.Implication, term6) + set1 = set((term1, term2, term3, term4, term5, term6, term7)) + set2 = set1 | set((term8,)) + term = task.term + self.assertEquals(len(term._components), 7) + self.assertEquals(len(term.sub_terms), 8) + self.assertEquals(term._components, set1) + self.assertEquals(term.sub_terms, set2) + + nars.memory.accept(task) + + def test_accept_2(self): + '''''' + nars = NARS.Reasoner_3_0_4(100, 100) + + line = 'animal>.' + task = Narsese.parser.parse(line) + nars.memory.accept(task) + + line = 'animal>.' + task = Narsese.parser.parse(line) + nars.memory.accept(task) + + def test_accept_3(self): + '''''' + memory = Memory(100, 100) + + line = 'bird>. %0.5;0.5%' + task = Narsese.parser.parse(line) + memory.accept(task) + + line = 'animal>. %0.7;0.7%' + task = Narsese.parser.parse(line) + memory.accept(task) + + self.assertEqual(len(memory), 5) + + pass + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_Memory + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_NAL/__init__.py b/Tests/test_NAL/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Tests/test_NAL/test_BUG_NAL4.py b/Tests/test_NAL/test_BUG_NAL4.py new file mode 100644 index 0000000..e30c60d --- /dev/null +++ b/Tests/test_NAL/test_BUG_NAL4.py @@ -0,0 +1,75 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Table +from NARS.DataStructures._py.Link import TaskLink, TermLink +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector +from NAL.MetaLevelInference.VariableSubstitution import * +from Narsese import VarPrefix, Variable +from NARS.RuleMap import RuleMap_v2 +from NARS import Reasoner_3_0_4 as Reasoner + +import Tests.utils_for_test as utils_for_test +from Tests.utils_for_test import * +from NARS.RuleMap import Interface_TransformRules + +# utils_for_test.rule_map = RuleMap_v2() + + + +class TEST_BUG_NAL4(unittest.TestCase): + '''''' + def test_bug_0(self): + ''' + animal>. %1.00;0.90% + (/, ?0, bird, _) + + |- + <(/, ?0, animal, _) --> (/, ?0, bird, _)>. + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' animal>. %1.00;0.90%', + '(/, tree, bird, _).', + 'bird.', is_belief_term=True) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(/, tree, animal, _) --> (/, tree, bird, _)>. %1.00;0.81%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' animal>. %1.00;0.90%', + '(/, ?0, bird, _).', + 'bird.', is_belief_term=True) + if rules is not None: + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertFalse( + output_contains(tasks_derived, '<(/, ?0, animal, _) --> (/, ?0, bird, _)>. %1.00;0.81%') + ) + + pass + + + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_BUG_NAL4 + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) \ No newline at end of file diff --git a/Tests/test_NAL/test_NAL1.py b/Tests/test_NAL/test_NAL1.py new file mode 100644 index 0000000..98280ab --- /dev/null +++ b/Tests/test_NAL/test_NAL1.py @@ -0,0 +1,336 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Table +from NARS.DataStructures._py.Link import TaskLink, TermLink +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector +from NAL.MetaLevelInference.VariableSubstitution import * +from Narsese import VarPrefix, Variable +from NARS.RuleMap import RuleMap_v2 +from NARS import Reasoner_3_0_4 as Reasoner + +import Tests.utils_for_test as utils_for_test +from Tests.utils_for_test import * + +utils_for_test.rule_map = RuleMap_v2() + +class TEST_NAL1(unittest.TestCase): + + def test_revision(self): + ''' + 'Revision ------ + + 'Bird is a type of swimmer. + swimmer>. %1.00;0.90% + + 'Bird is probably not a type of swimmer. + swimmer>. %0.10;0.60% + + 1 + + 'Bird is very likely to be a type of swimmer. + ''outputMustContain(' swimmer>. %0.87;0.91%') + ''' + tasks_derived = memory_accept_revision( + ' swimmer>. %1.00;0.90%', + ' swimmer>. %0.10;0.60%' + ) + self.assertTrue( + output_contains(tasks_derived, ' swimmer>. %0.87;0.91%') + ) + + pass + + + def test_deduction(self): + ''' + 'Deduction + + 'Bird is a type of animal. + animal>. %1.00;0.90% + + 'Robin is a type of bird. + bird>. %1.00;0.90% + + 10 + + 'Robin is a type of animal. + ''outputMustContain(' animal>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' animal>. %1.00;0.90%', + ' bird>. %1.00;0.90%', + 'bird.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' animal>. %1.00;0.81%') + ) + + pass + + + def test_abduction(self): + ''' + 'Abduction + + 'Sport is a type of competition. + competition>. %1.00;0.90% + + 'Chess is a type of competition. + competition>. %0.90;0.90% + + 3 + + 'I guess sport is a type of chess. + ''outputMustContain(' chess>. %1.00;0.42%') + + 'I guess chess is a type of sport. + ''outputMustContain(' sport>. %0.90;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' competition>. %1.00;0.90%', + ' competition>. %0.90;0.90%', + 'competition.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' chess>. %1.00;0.42%') + ) + self.assertTrue( + output_contains(tasks_derived, ' sport>. %0.90;0.45%') + ) + + pass + + + def test_induction(self): + ''' + 'Induction + + 'Swan is a type of swimmer. + swimmer>. %0.90;0.90% + + 'Swan is a type of bird. + bird>. %1.00;0.90% + + 3 + + 'I guess bird is a type of swimmer. + ''outputMustContain(' swimmer>. %0.90;0.45%') + + 'I guess swimmer is a type of bird. + ''outputMustContain(' bird>. %1.00;0.42%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %0.90;0.90%', + ' bird>. %1.00;0.90%', + 'swan.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swimmer>. %0.90;0.45%') + ) + self.assertTrue( + output_contains(tasks_derived, ' bird>. %1.00;0.42%') + ) + + pass + + + def test_exemplification(self): + ''' + 'Exemplification + + 'Robin is a type of bird. + bird>. %1.00;0.90% + + 'A bird is a type of animal. + animal>. %1.00;0.90% + + 3 + + 'I guess animal is a type of robin. + ''outputMustContain(' robin>. %1.00;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' bird>. %1.00;0.90%', + ' animal>. %1.00;0.90%', + 'bird.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' robin>. %1.00;0.45%') + ) + + pass + + + def test_conversion(self): + ''' + 'Conversion + + 'Bird is a type of swimmer. + swimmer>. %1.00;0.90% + + 'Is swimmer a type of bird? + bird>? + + 6 + + 'I guess swimmer is a type of bird. + ''outputMustContain(' bird>. %1.00;0.47%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %1.00;0.90%', + ' bird>?', + 'bird.', + True) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' bird>. %1.00;0.47%') + ) + + pass + + + def test_yn_question(self): + ''' + '"y/n" question + + ' Bird is a type of swimmer. + swimmer>. %1.00;0.90% + + ' Is bird a type of swimmer? + swimmer>? + + 1 + + ' Bird is a type of swimmer. + ''outputMustContain(' swimmer>. %1.00;0.90%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %1.00;0.90%', + ' swimmer>? ', + 'bird.', + True) + _, _, answers_question, _ = result2 + # self.assertIsNone(rules) + # tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(answers_question, ' swimmer>. %1.00;0.90%') + ) + + pass + + def test_wh_question_0(self): + ''' + ' "wh" question + + ' Bird is a type of swimmer. + swimmer>. %1.00;0.80% + + ' What is a type of swimmer? + swimmer>? + + 5 + + ' Bird is a type of swimmer. + ''outputMustContain(' swimmer>. %1.00;0.80%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %1.00;0.80%', + ' swimmer>?', + 'swimmer.', + True) + _, _, _, answers_quest = result2 + # tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(answers_quest, ' swimmer>. %1.00;0.90%') + ) + + pass + + + def test_wh_question_1(self): + ''' + ' "wh" question + + ' Bird is a type of swimmer. + swimmer>. %1.00;0.90% + flyer>. %1.00;0.90% + + ' What is a type of swimmer? + (&&, swimmer>, flyer>)? + + 5 + + ' Bird is a type of swimmer. + ''outputMustContain('(&&, swimmer>, flyer>). %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %1.00;0.80%', + ' swimmer>?', + 'swimmer.', + True) + _, _, _, answers_quest = result2 + # tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(answers_quest, ' swimmer>. %1.00;0.90%') + ) + + pass + + + def test_backward_inference(self): + ''' + ' Backward inference + + ' Bird is a type of swimmer. + swimmer>. %1.00;0.80% + + ' What is a type of swimmer? + swimmer>? + + 5 + + ' What is a type of bird? + ''outputMustContain(' bird>?') + + ' What is the type of bird? + ''outputMustContain(' ?1>?') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %1.00;0.80%', + ' swimmer>?', + 'swimmer.', + True) + task.sentence.repr + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' bird>?') + ) + self.assertTrue( + output_contains(tasks_derived, ' ?1>?') + ) + + pass + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_NAL1 + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) \ No newline at end of file diff --git a/Tests/test_NAL/test_NAL2.py b/Tests/test_NAL/test_NAL2.py new file mode 100644 index 0000000..7addfdf --- /dev/null +++ b/Tests/test_NAL/test_NAL2.py @@ -0,0 +1,537 @@ +import unittest + +import Narsese +from NAL.MetaLevelInference.VariableSubstitution import * + +from Tests.utils_for_test import * + + +class TEST_NAL2(unittest.TestCase): + '''''' + + def test_revision(self): + ''' + 'Revision + + 'Robin is similar to swan. + swan>. %1.00;0.90% + + 'I think robin is not similar to swan. + swan>. %0.10;0.60% + + 1 + + 'Robin is probably similar to swan. + ''outputMustContain(' swan>. %0.87;0.91%') + ''' + + tasks_derived = memory_accept_revision( + ' swan>. %1.00;0.90%', + ' swan>. %0.10;0.60%' + ) + self.assertTrue( + output_contains(tasks_derived, ' swan>. %0.87;0.91%') + ) + + + def test_comparison(self): + ''' + 'Comparison + + 'Swan is a type of swimmer. + swimmer>. %0.90;0.90% + + 'Swan is a type of bird. + bird>. %1.00;0.90% + + 3 + + 'I guess that bird is similar to swimmer. + ''outputMustContain(' swimmer>. %0.90;0.45%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %0.90;0.90%', + ' bird>. %1.00;0.90%', + 'swan.', + True) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swimmer>. %0.90;0.45%') + ) + + pass + + def test_backward_inference(self): + ''' + 'Backward inference + + 'Bird is a type of swimmer. + swimmer>. %1.00;0.90% + + 'What is a swimmer? + <{?1} --> swimmer>? + + 5 + + 'What is a bird? + ''outputMustContain('<{?1} --> bird>?') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %1.00;0.90%', + '<{?1} --> swimmer>?', + 'swimmer.', + True) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<{?1} --> bird>?') + ) + + pass + + + def test_comparison(self): + ''' + 'Comparison + + 'Sport is a type of competition. + competition>. %1.00;0.90% + + 'Chess is a type of competition. + competition>. %0.90;0.90% + + 3 + + 'I guess chess is similar to sport. + ''outputMustContain(' sport>. %0.90;0.45%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' competition>. %1.00;0.90%', + ' competition>. %0.90;0.90% ', + 'competition.', + True) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' sport>. %0.90;0.45%') + ) + + pass + + def test_analogy_0(self): + ''' + 'Analogy + + 'Swan is a type of swimmer. + swimmer>. %1.00;0.90% + + 'Gull is similar to swan. + swan>. %1.00;0.90% + + 3 + + 'I think gull is a type of swimmer. + ''outputMustContain(' swimmer>. %1.00;0.81%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %1.00;0.90%', + ' swan>. %1.00;0.90%', + 'swan.', index_task=(0,), index_belief=(1,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swimmer>. %1.00;0.81%') + ) + pass + + def test_analogy_1(self): + ''' + 'Analogy + + 'Gull is a type of swimmer. + swimmer>. %1.00;0.90% + + 'Gull is similar to a swan. + swan>. %1.00;0.90% + + 3 + + 'I believe a swan is a type of swimmer. + ''outputMustContain(' swimmer>. %1.00;0.81%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %1.00;0.90%', + ' swan>. %1.00;0.90%', + 'gull.', + True) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swimmer>. %1.00;0.81%') + ) + pass + + def test_resemblance(self): + ''' + 'Resemblance + + 'Robin is similar to swan. + swan>. %1.00;0.90% + + 'Gull is similar to swan. + swan>. %1.00;0.90% + + 3 + + 'Gull is similar to robin. + ''outputMustContain(' robin>. %1.00;0.81%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swan>. %1.00;0.90%', + ' swan>. %1.00;0.90%', + 'swan.', + True) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' robin>. %1.00;0.81%') + ) + pass + + + def test_conversions_between_inheritance_and_similarity(self): + ''' + 'Conversions between inheritance and similarity + + 'Swan is a type of bird. + bird>. %1.00;0.90% + + 'Bird is not a type of swan. + swan>. %0.10;0.90% + + 1 + + 'Bird is different from swan. + ''outputMustContain(' swan>. %0.10;0.81%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' bird>. %1.00;0.90%', + ' swan>. %0.10;0.90%', + 'bird.', index_task=(1,), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swan>. %0.10;0.81%') + ) + pass + + def test_structure_transformation_0(self): + ''' + 'Structure transformation + + 'Bright is similar to smart. + smart>. %0.90;0.90% + + 'Is bright thing a type of smart thing? + <[smart] --> [bright]>? + + 6 + + 'Bright thing is a type of smart thing. + ''outputMustContain('<[bright] <-> [smart]>. %0.90;0.90%') + ''outputMustContain('<[smart] --> [bright]>. %0.90;0.66%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' smart>. %0.90;0.90%', + '<[smart] --> [bright]>?', + 'bright.', + True) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<[bright] <-> [smart]>. %0.90;0.90%') + ) + self.assertTrue( + output_contains(tasks_derived, '<[smart] --> [bright]>. %0.90;0.66%') + ) + pass + + def test_structure_transformation_1(self): + ''' + 'Structure transformation + + 'Birdie is similar to Tweety + Tweety>. %0.90;0.90% + + 'Is Birdie similar to Tweety? + <{Birdie} <-> {Tweety}>? + + 6 + + 'Birdie is similar to Tweety. + ''outputMustContain('<{Birdie} <-> {Tweety}>. %0.90;0.73%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' Tweety>. %0.90;0.90%', + '<{Birdie} <-> {Tweety}>?', + 'Birdie.', + True) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<{Birdie} <-> {Tweety}>. %0.90;0.73%') + ) + pass + + def test_conversions_between_inheritance_and_similarity_0(self): + ''' + 'Conversions between inheritance and similarity + + 'Swan is a type of bird. + bird>. %1.00;0.90% + + 'Bird is different from swan. + swan>. %0.10;0.90% + + 1 + + 'Bird is probably not a type of swan. + ''outputMustContain(' swan>. %0.10;0.73%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' bird>. %1.00;0.90%', + ' swan>. %0.10;0.90%', + 'bird.', + True) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swan>. %0.10;0.73%') + ) + pass + + + def test_conversions_between_inheritance_and_similarity_1(self): + ''' + 'Conversions between inheritance and similarity + + 'Swan is a type of bird. + bird>. %0.90;0.90% + + + 'Is bird similar to swan? + swan>? + + 6 + + 'I guess that bird is similar to swan. + ''outputMustContain(' swan>. %0.90;0.47%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' bird>. %0.90;0.90%', + ' swan>?', + 'bird.', + True) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swan>. %0.90;0.47%') + ) + pass + + + def test_conversions_between_inheritance_and_similarity_2(self): + ''' + 'Conversions between inheritance and similarity + + 'a bird is similar to a swan. + swan>. %0.90;0.90% + + 'Is swan a type of bird? + bird>? + + 6 + + 'A swan is a type of bird. + ''outputMustContain(' bird>. %0.90;0.81%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swan>. %0.90;0.90%', + ' bird>?', + 'bird.', + True) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' bird>. %0.90;0.81%') + ) + pass + + + def test_translating_instance_into_inheritance(self): + ''' + 'Translating instance into inheritance + + 'Tweety is a bird. + . %1.00;0.90% + + 1 + + ''outputMustContain('<{Tweety} --> bird>. %1.00;0.90%') + '//expect.outEmpty + ''' + task = Narsese.parse('. %1.00;0.90%') + tasks_derived = [task] + self.assertTrue( + output_contains(tasks_derived, '<{Tweety} --> bird>. %1.00;0.90%') + ) + pass + + + def test_translating_property_into_inheritance(self): + ''' + 'Translating property into inheritance + + 'Ravens are black. + . %1.00;0.90% + + 1 + + ''outputMustContain(' [black]>. %1.00;0.90%') + ''' + task = Narsese.parse('. %1.00;0.90%') + tasks_derived = [task] + self.assertTrue( + output_contains(tasks_derived, ' [black]>. %1.00;0.90%') + ) + pass + + + def test_translating_instance_property_into_inheritance(self): + ''' + 'Translating instance-property into inheritance + + 'Tweety is yellow. + . %1.00;0.90% + + 1 + + ''outputMustContain('<{Tweety} --> [yellow]>. %1.00;0.90%') + ''' + task = Narsese.parse('. %1.00;0.90%') + tasks_derived = [task] + self.assertTrue( + output_contains(tasks_derived, '<{Tweety} --> [yellow]>. %1.00;0.90%') + ) + pass + + def test_set_definition_0(self): + ''' + 'Set definition + + 'Tweety is Birdie. + <{Tweety} --> {Birdie}>. %1.00;0.90% + + 3 + + 'Birdie is similar to Tweety. + ''outputMustContain('<{Birdie} <-> {Tweety}>. %1.00;0.90%') + ''' + task = Narsese.parse('<{Tweety} --> {Birdie}>. %1.00;0.90%') + tasks_derived = [task] + self.assertTrue( + output_contains(tasks_derived, '<{Birdie} <-> {Tweety}>. %1.00;0.90%') + ) + pass + + + def test_set_definition_1(self): + ''' + 'Set definition + + 'Smart thing is a type of bright thing. + <[smart] --> [bright]>. %1.00;0.90% + + 1 + + 'Bright thing is similar to smart thing. + ''outputMustContain('<[bright] <-> [smart]>. %1.00;0.90%') + ''' + task = Narsese.parse('<[smart] --> [bright]>. %1.00;0.90%') + tasks_derived = [task] + self.assertTrue( + output_contains(tasks_derived, '<[bright] <-> [smart]>. %1.00;0.90%') + ) + pass + + + def test_set_definition_2(self): + ''' + 'Set definition + + 'Birdie is similar to Tweety. + <{Birdie} <-> {Tweety}>. %1.00;0.90% + + 1 + + 'Birdie is similar to Tweety. + ''outputMustContain(' Tweety>. %1.00;0.90%') + + 'Tweety is Birdie. + ''outputMustContain('<{Tweety} --> {Birdie}>. %1.00;0.90%') + ''' + task = Narsese.parse('<{Birdie} <-> {Tweety}>. %1.00;0.90%') + tasks_derived = [task] + self.assertTrue( + output_contains(tasks_derived, ' Tweety>. %1.00;0.90%') + ) + + self.assertTrue( + output_contains(tasks_derived, '<{Tweety} --> {Birdie}>. %1.00;0.90%') + ) + pass + + def test_set_definition_3(self): + ''' + 'Set definition + + 'Bright thing is similar to smart thing. + <[bright] <-> [smart]>. %1.00;0.90% + + 1 + + 'Bright is similar to smart. + ''outputMustContain(' smart>. %1.00;0.90%') + + 'Bright thing is a type of smart thing. + ''outputMustContain('<[bright] --> [smart]>. %1.00;0.90%') + ''' + task = Narsese.parse('<[bright] <-> [smart]>. %1.00;0.90%') + tasks_derived = [task] + self.assertTrue( + output_contains(tasks_derived, ' smart>. %1.00;0.90%') + ) + + self.assertTrue( + output_contains(tasks_derived, '<[bright] --> [smart]>. %1.00;0.90%') + ) + pass + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_NAL2 + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) \ No newline at end of file diff --git a/Tests/test_NAL/test_NAL3.py b/Tests/test_NAL/test_NAL3.py new file mode 100644 index 0000000..889691c --- /dev/null +++ b/Tests/test_NAL/test_NAL3.py @@ -0,0 +1,540 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Table +from NARS.DataStructures._py.Link import TaskLink, TermLink +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector +from NAL.MetaLevelInference.VariableSubstitution import * +from Narsese import VarPrefix, Variable +from NARS.RuleMap import RuleMap_v2 +from NARS import Reasoner_3_0_4 as Reasoner + +import Tests.utils_for_test as utils_for_test +from Tests.utils_for_test import * + +# utils_for_test.rule_map = RuleMap_v2() + + +class TEST_NAL3(unittest.TestCase): + '''''' + + def test_compound_intersection_extension(self): + ''' + 'Compound composition, two premises + + 'Swan is a type of swimmer. + swimmer>. %0.90;0.90% + + 'Swan is a type of bird. + bird>. %0.80;0.90% + + 16 + + 'Swan is a type of bird or a type of swimmer. + ''outputMustContain(' (|,bird,swimmer)>. %0.98;0.81%') + + + 'Swan is a type of swimming bird. + ''outputMustContain(' (&,bird,swimmer)>. %0.72;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %0.90;0.90%', + ' bird>. %0.80;0.90%', + 'swan.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' (&,bird,swimmer)>. %0.72;0.81%') + ) + pass + + + def test_compound_union_extension(self): + ''' + 'Compound composition, two premises + + 'Swan is a type of swimmer. + swimmer>. %0.90;0.90% + + 'Swan is a type of bird. + bird>. %0.80;0.90% + + 16 + + 'Swan is a type of bird or a type of swimmer. + ''outputMustContain(' (|,bird,swimmer)>. %0.98;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %0.90;0.90%', + ' bird>. %0.80;0.90%', + 'swan.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' (|,bird,swimmer)>. %0.98;0.81%') + ) + pass + + + def test_compound_intersection_intension(self): + ''' + 'Compound composition, two premises + + 'Sport is a type of competition. + competition>. %0.90;0.90% + + 'Chess is a type of competition. + competition>. %0.80;0.90% + + 16 + + 'If something is either chess or sport, then it is a competition. + ''outputMustContain('<(|,chess,sport) --> competition>. %0.72;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' competition>. %0.90;0.90%', + ' competition>. %0.80;0.90%', + 'competition.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(|,chess,sport) --> competition>. %0.72;0.81%') + ) + pass + + + def test_compound_union_intension(self): + ''' + 'Compound composition, two premises + + 'Sport is a type of competition. + competition>. %0.90;0.90% + + 'Chess is a type of competition. + competition>. %0.80;0.90% + + 16 + + 'If something is both chess and sport, then it is a competition. + ''outputMustContain('<(&,chess,sport) --> competition>. %0.98;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' competition>. %0.90;0.90%', + ' competition>. %0.80;0.90%', + 'competition.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(&,chess,sport) --> competition>. %0.98;0.81%') + ) + pass + + + def test_compound_decomposition_intensional_intersection(self): + ''' + 'Compound decomposition, two premises + + 'Robin is a type of bird or a type of swimmer. + (|,bird,swimmer)>. %1.00;0.90% + + 'Robin is not a type of swimmer. + swimmer>. %0.00;0.90% + + 32 + + 'Robin is a type of bird. + + ''outputMustContain(' bird>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' (|,bird,swimmer)>. %1.00;0.90%', + ' swimmer>. %0.00;0.90%', + 'robin.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' bird>. %1.00;0.81%') + ) + + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %0.00;0.90%', + ' (|,bird,swimmer)>. %1.00;0.90%', + 'robin.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' bird>. %1.00;0.81%') + ) + pass + + + def test_compound_decomposition_extensional_difference(self): + ''' + 'Compound decomposition, two premises + + 'Robin is not a type of swimmer. + swimmer>. %0.00;0.90% + + 'Robin is not a nonswimming mammal. + (-,mammal,swimmer)>. %0.00;0.90% + + 32 + + 'Robin is not a type of mammal. + ''outputMustContain(' mammal>. %0.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' swimmer>. %0.00;0.90%', + ' (-,mammal,swimmer)>. %0.00;0.90%', + 'robin.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' mammal>. %0.00;0.81%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' (-,mammal,swimmer)>. %0.00;0.90%', + ' swimmer>. %0.00;0.90%', + 'robin.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' mammal>. %0.00;0.81%') + ) + + pass + + + def test_set_operations_0(self): + ''' + 'Compound decomposition, two premises + + 'PlanetX is Mars, Pluto, or Venus. + {Mars,Pluto,Venus}>. %0.90;0.90% + + 'PlanetX is probably Pluto or Saturn. + {Pluto,Saturn}>. %0.70;0.90% + + 32 + + 'PlanetX is Mars, Pluto, Saturn, or Venus. + ''outputMustContain(' {Mars,Pluto,Saturn,Venus}>. %0.97;0.81%') + + 'PlanetX is probably Pluto. + ''outputMustContain(' {Pluto}>. %0.63;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' {Mars,Pluto,Venus}>. %0.90;0.90%', + ' {Pluto,Saturn}>. %0.70;0.90%', + 'planetX.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' {Mars,Pluto,Saturn,Venus}>. %0.97;0.81%') + ) + self.assertTrue( + output_contains(tasks_derived, ' {Pluto}>. %0.63;0.81%') + ) + + + pass + + + def test_set_operations_1(self): + ''' + 'Compound decomposition, two premises + 'PlanetX is Mars, Pluto, or Venus. + {Mars,Pluto,Venus}>. %0.90;0.90% + + 'PlanetX is probably neither Pluto nor Saturn. + {Pluto,Saturn}>. %0.10;0.90% + + 32 + + 'PlanetX is Mars, Pluto, Saturn, or Venus. + ''outputMustContain(' {Mars,Pluto,Saturn,Venus}>. %0.91;0.81%') + + 'PlanetX is either Mars or Venus. + ''outputMustContain(' {Mars,Venus}>. %0.81;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' {Mars,Pluto,Venus}>. %0.90;0.90%', + ' {Pluto,Saturn}>. %0.10;0.90%', + 'planetX.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' {Mars,Pluto,Saturn,Venus}>. %0.91;0.81%') + ) + self.assertTrue( + output_contains(tasks_derived, ' {Mars,Venus}>. %0.81;0.81%') + ) + + pass + + + def test_bi_composition_0(self): + ''' + 'Compound decomposition, two premises + + 'Bird is a type of animal. + animal>. %0.90;0.90% + + 'Is a swimming bird a type of swimming animal? + <(&,bird,swimmer) --> (&,animal,swimmer)>? + + 32 + + 'A swimming bird is probably a type of swimming animal. + ''outputMustContain('<(&,bird,swimmer) --> (&,animal,swimmer)>. %0.90;0.73%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' animal>. %0.90;0.90% ', + '(&,bird,swimmer).', + 'bird.', is_belief_term=True) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(&,bird,swimmer) --> (&,animal,swimmer)>. %0.90;0.73%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' animal>. %0.90;0.90% ', + '(&,animal,swimmer).', + 'animal.', is_belief_term=True) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(&,bird,swimmer) --> (&,animal,swimmer)>. %0.90;0.73%') + ) + + pass + + + def test_bi_composition_1(self): + ''' + 'Compound decomposition, two premises + + 'Bird is a type of animal. + animal>. %0.90;0.90% + + 'Is a nonanimal swimmer a type of a nonbird swimmer? + <(-,swimmer,animal) --> (-,swimmer,bird)>? + + 32 + + 'A nonanimal swimmer is probably a type of nonbird swimmer. + ''outputMustContain('<(-,swimmer,animal) --> (-,swimmer,bird)>. %0.90;0.73%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' animal>. %0.90;0.90%', + '(-,swimmer,animal).', + 'animal.', is_belief_term=True) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(-,swimmer,animal) --> (-,swimmer,bird)>. %0.90;0.73%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' animal>. %0.90;0.90%', + '(-,swimmer,bird).', + 'bird.', is_belief_term=True) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(-,swimmer,animal) --> (-,swimmer,bird)>. %0.90;0.73%') + ) + + pass + + + def test_uni_composition_0(self): + ''' + 'Compound decomposition, two premises + + 'Swan is a type of bird. + bird>. %0.90;0.90% + + 'Is a swan a type of bird or swimmer? + (|,bird,swimmer)>? + + 32 + + 'A swan is probably a type of bird or swimmer. + ''outputMustContain(' (|,bird,swimmer)>. %0.90;0.73%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' bird>. %0.90;0.90%', + '(|,bird, swimmer).', + 'bird.', is_belief_term=True) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' (|,bird,swimmer)>. %0.90;0.73%') + ) + + pass + + + def test_uni_composition_1(self): + ''' + 'Compound decomposition, two premises + + bird>. %0.90;0.90% + + 'Swan is a type of bird. + <(&,swan,swimmer) --> bird>? + + 'Is swimming swan a type of bird? + 32 + + 'Swimming swan is a type of bird. + ''outputMustContain('<(&,swan,swimmer) --> bird>. %0.90;0.73%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' bird>. %0.90;0.90%', + '(&,swan,swimmer).', + 'swan.', is_belief_term=True) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(&,swan,swimmer) --> bird>. %0.90;0.73%') + ) + + pass + + + def test_uni_composition_2(self): + ''' + 'Compound decomposition, two premises + + 'Swan is a type of bird. + bird>. %0.90;0.90% + + 'Is swan a type of nonbird swimmer? + (-,swimmer,bird)>? + + 32 + + 'A swan is not a type of nonbird swimmer. + ''outputMustContain(' (-,swimmer,bird)>. %0.10;0.73%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' bird>. %0.90;0.90%', + '(-,swimmer,bird).', + 'bird.', is_belief_term=True) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' (-,swimmer,bird)>. %0.10;0.73%') + ) + + pass + + + def test_uni_decomposition_0(self): + ''' + 'Compound decomposition, two premises + + 'Robin is a type of swimming bird. + (&,bird,swimmer)>. %0.90% + + 32 + + 'Robin is a type of bird. + ''outputMustContain(' bird>. %0.90;0.73%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' (&,bird,swimmer)>. %0.90;0.90%', + 'bird.', + '(&,bird,swimmer).', is_belief_term=True) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' bird>. %0.90;0.73%') + ) + + pass + + + def test_uni_decomposition_1(self): + ''' + 'Compound decomposition, two premises + + 'Robin is a type of nonswimming bird. + (-,bird,swimmer)>. %0.90;0.90% + + 32 + + 'Robin is a type of bird. + ''outputMustContain(' bird>. %0.90;0.73%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' (-,bird,swimmer)>. %0.90;0.90% ', + 'bird.', + '(-,bird,swimmer).', is_belief_term=True) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' bird>. %0.90;0.73%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' (-,bird,swimmer)>. %0.90;0.90% ', + 'swimmer.', + '(-,bird,swimmer).', is_belief_term=True) + if rules is not None: + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertFalse( + output_contains(tasks_derived, ' swimmer>. %0.90;0.73%') + ) + + pass + + def test_uni_decomposition_2(self): + ''' + 'Boys and girls are youth. + <(|, boy, girl) --> youth>. %0.90;0.90% + + 32 + + 'Boys are youth. + ''outputMustContain(' youth>. %0.90;0.73%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(|, boy, girl) --> youth>. %0.90;0.90% ', + 'boy.', + '(|, boy, girl).', is_belief_term=True) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' youth>. %0.90;0.73%') + ) + + pass + + def test_uni_decomposition_3(self): + ''' + 'What differs boys from gials are being strong. + <(~, boy, girl) --> [strong]>. %0.90;0.90% + + 32 + + 'Boys are strong. + ''outputMustContain(' [strong]>. %0.90;0.73%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(~, boy, girl) --> [strong]>. %0.90;0.90%', + 'boy.', + '(~, boy, girl).', is_belief_term=True) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' [strong]>. %0.90;0.73%') + ) + + pass + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_NAL3 + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) \ No newline at end of file diff --git a/Tests/test_NAL/test_NAL4.py b/Tests/test_NAL/test_NAL4.py new file mode 100644 index 0000000..2940e7a --- /dev/null +++ b/Tests/test_NAL/test_NAL4.py @@ -0,0 +1,333 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Table +from NARS.DataStructures._py.Link import TaskLink, TermLink +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector +from NAL.MetaLevelInference.VariableSubstitution import * +from Narsese import VarPrefix, Variable +from NARS.RuleMap import RuleMap_v2 +from NARS import Reasoner_3_0_4 as Reasoner + +import Tests.utils_for_test as utils_for_test +from Tests.utils_for_test import * +from NARS.RuleMap import Interface_TransformRules + +# utils_for_test.rule_map = RuleMap_v2() + + + +class TEST_NAL4(unittest.TestCase): + '''''' + + def test_structural_transformation_0(self): + ''' + 'Structural transformation + + 'An acid and a base can have a reaction. + <(*,acid, base) --> reaction>. %1.00;0.90% + + 2 + + 'Acid can react with base. + ''outputMustContain(' (/,reaction,_,base)>. %1.00;0.90%') + + 'A base is something that has a reaction with an acid. + ''outputMustContain(' (/,reaction,acid,_)>. %1.00;0.90%') + ''' + rules, task, concept, task_link, result1 = rule_map_task_only( + '<(*,acid, base) --> reaction>. %1.00;0.90%', + 'acid', + (0,0) + ) + tasks_derived = [rule(task, None, task_link, None) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, ' (/,reaction,_,base)>. %1.00;0.90%') + ) + + rules, task, concept, task_link, result1 = rule_map_task_only( + '<(*,acid, base) --> reaction>. %1.00;0.90%', + 'base', + (0,1) + ) + tasks_derived = [rule(task, None, task_link, None) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, ' (/,reaction,acid,_)>. %1.00;0.90%') + ) + pass + + + def test_structural_transformation_1(self): + ''' + 'Structural transformation + + 'Acid can react with base. + (/,reaction,_,base)>. %1.00;0.90% + + 3 + + 'An acid and a base can have a reaction. + ''outputMustContain('<(*,acid,base) --> reaction>. %1.00;0.90%') + ''' + rules, task, concept, task_link, result1 = rule_map_task_only( + ' (/,reaction,_,base)>. %1.00;0.90%', + 'base', + (1,2) + ) + tasks_derived = [rule(task, None, task_link, None) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, '<(*,acid,base) --> reaction>. %1.00;0.90%') + ) + pass + + + def test_structural_transformation_2(self): + ''' + 'Structural transformation + + 'Acid can react with base. + (/,reaction,_,base)>. %1.00;0.90% + + 3 + + 'A base is something that has a reaction with an acid. + ''outputMustContain(' (/,reaction,acid,_)>. %1.00;0.90%') + ''' + rules, task, concept, task_link, result1 = rule_map_task_only( + ' (/,reaction,_,base)>. %1.00;0.90%', + 'base', + (1,2) + ) + tasks_derived = [rule(task, None, task_link, None) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, ' (/,reaction,acid,_)>. %1.00;0.90%') + ) + pass + + + def test_structural_transformation_3(self): + ''' + 'Structural transformation + + 'A base is something that has a reaction with an acid. + (/,reaction,acid,_)>. %1.00;0.90% + + 3 + + 'Acid can react with base. + ''outputMustContain(' (/,reaction,_,base)>. %1.00;0.90%') + ''' + rules, task, concept, task_link, result1 = rule_map_task_only( + ' (/,reaction,acid,_)>. %1.00;0.90%', + 'acid', + (1,1) + ) + tasks_derived = [rule(task, None, task_link, None) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, ' (/,reaction,_,base)>. %1.00;0.90%') + ) + pass + + + def test_structural_transformation_4(self): + ''' + 'Structural transformation + + 'Something that can neutralize a base is an acid. + <(\,neutralization,_,base) --> acid>. %1.00;0.90% + + 2 + + 'Neutralization is a relation between an acid and a base. + ''outputMustContain(' (*,acid,base)>. %1.00;0.90%') + ''' + rules, task, concept, task_link, result1 = rule_map_task_only( + '<(\,neutralization,_,base) --> acid>. %1.00;0.90%', + 'base', + (0,2) + ) + tasks_derived = [rule(task, None, task_link, None) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, ' (*,acid,base)>. %1.00;0.90%') + ) + pass + + + def test_structural_transformation_5(self): + ''' + 'Structural transformation + + 'Something that can neutralize a base is an acid. + <(\,neutralization,_,base) --> acid>. %1.00;0.90% + + 2 + + 'Something that can be neutralized by an acid is a base. + ''outputMustContain('<(\,neutralization,acid,_) --> base>. %1.00;0.90%') + ''' + rules, task, concept, task_link, result1 = rule_map_task_only( + '<(\,neutralization,_,base) --> acid>. %1.00;0.90%', + 'base', + (0,2) + ) + tasks_derived = [rule(task, None, task_link, None) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, '<(\,neutralization,acid,_) --> base>. %1.00;0.90%') + ) + pass + + + def test_structural_transformation_6(self): + ''' + 'Structural transformation + + 'Something that can be neutralized by an acid is a base. + <(\,neutralization,acid,_) --> base>. %1.00;0.90% + + 2 + + 'Something that can neutralize a base is an acid. + ''outputMustContain('<(\,neutralization,_,base) --> acid>. %1.00;0.90%') + ''' + rules, task, concept, task_link, result1 = rule_map_task_only( + '<(\,neutralization,acid,_) --> base>. %1.00;0.90%', + 'acid', + (0,1) + ) + tasks_derived = [rule(task, None, task_link, None) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, '<(\,neutralization,_,base) --> acid>. %1.00;0.90%') + ) + pass + + + def test_structural_transformation_7(self): + ''' + 'Structural transformation + + 'Something that can be neutralized by an acid is a base. + <(\,neutralization,acid,_) --> base>. %1.00;0.90% + + 2 + + 'Neutralization is a relation between an acid and a base. + ''outputMustContain(' (*,acid,base)>. %1.00;0.90%') + ''' + rules, task, concept, task_link, result1 = rule_map_task_only( + '<(\,neutralization,acid,_) --> base>. %1.00;0.90%', + 'acid', + (0,1) + ) + tasks_derived = [rule(task, None, task_link, None) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, ' (*,acid,base)>. %1.00;0.90%') + ) + pass + + + def test_structural_transformation_8(self): + ''' + 'Structural transformation + + 'Bird is a type of animal. + animal>. %1.00;0.90% + + 'What is the relation between a bird and a plant? + <(*,bird,plant) --> ?x>? + + 6 + + 'The relation between bird and plant is a type of relation between animal and plant. + ''outputMustContain('<(*,bird,plant) --> (*,animal,plant)>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' animal>. %1.00;0.90%', + '(*,bird,plant).', + 'bird.', is_belief_term=True, index_task=(0,), index_belief=(0,)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(*,bird,plant) --> (*,animal,plant)>. %1.00;0.81%') + ) + pass + + def test_structural_transformation_9(self): + ''' + 'Structural transformation + + 'Neutralization is a type of reaction. + reaction>. %1.00;0.90% + + 'What can be neutralized by acid? + <(\,neutralization,acid,_) --> ?x>? + + 6 + + 'What can be neutralized by acid can react with acid. + ''outputMustContain('<(\,neutralization,acid,_) --> (\,reaction,acid,_)>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' reaction>. %1.00;0.90%', + '(\,neutralization,acid,_).', + 'neutralization.', is_belief_term=True, index_task=(0,), index_belief=(0,)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(\,neutralization,acid,_) --> (\,reaction,acid,_)>. %1.00;0.81%') + ) + pass + + + def test_structural_transformation_10(self): + ''' + 'Structural transformation + + 'Soda is a type of base. + base>. %1.00;0.90% + + 'What is something that can neutralize a base? + <(/,neutralization,_,base) --> ?x>? + + 6 + + 'What can neutraliz base can react with base. + ''outputMustContain('<(/,neutralization,_,base) --> (/,neutralization,_,soda)>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' base>. %1.00;0.90%', + '(/,neutralization,_,base).', + 'base.', is_belief_term=True, index_task=(1,), index_belief=(2,)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(/,neutralization,_,base) --> (/,neutralization,_,soda)>. %1.00;0.81%') + ) + pass + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_NAL4 + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) \ No newline at end of file diff --git a/Tests/test_NAL/test_NAL5.py b/Tests/test_NAL/test_NAL5.py new file mode 100644 index 0000000..de752f0 --- /dev/null +++ b/Tests/test_NAL/test_NAL5.py @@ -0,0 +1,1060 @@ +''' +test NAL5 +''' +import unittest +from pathlib import Path + +import NARS +import Narsese +import Tests.utils_for_test as utils_for_test +from NAL.MetaLevelInference.VariableSubstitution import * +from NARS import Reasoner_3_0_4 as Reasoner +from NARS.DataStructures import Bag, Concept, Table, Task +from NARS.DataStructures._py.Link import TaskLink, TermLink +from NARS.RuleMap import RuleMap_v2 +from Narsese import (Compound, Connector, Copula, Judgement, Statement, Term, + Truth, Variable, VarPrefix) +from Tests.utils_for_test import * + +# utils_for_test.rule_map = RuleMap_v2() + + +class TEST_NAL5(unittest.TestCase): + '''''' + + def test_revision(self): + ''' + 'Revision + + 'If robin can fly then robin is a type of bird. + < [flying]> ==> bird>>. %1.00;0.90% + + 'If robin can fly then robin may not a type of bird. + < [flying]> ==> bird>>. %0.00;0.60% + + 1 + + 'If robin can fly then robin is a type of bird. + ''outputMustContain('< [flying]> ==> bird>>. %0.86;0.91%') + ''' + tasks_derived = memory_accept_revision( + '< [flying]> ==> bird>>. %1.00;0.90%', + '< [flying]> ==> bird>>. %0.00;0.60% ' + ) + self.assertTrue( + output_contains(tasks_derived, '< [flying]> ==> bird>>. %0.86;0.91%') + ) + + pass + + + def test_deduction(self): + ''' + 'Deduction + + 'If robin is a type of bird then robin is a type of animal. + < bird> ==> animal>>. %1.00;0.9% + + 'If robin can fly then robin is a type of bird. + < [flying]> ==> bird>>. %1.00;0.9% + + 14 + + 'If robin can fly then robin is a type of animal. + ''outputMustContain('< [flying]> ==> animal>>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> animal>>. %1.00;0.9%', + '< [flying]> ==> bird>>. %1.00;0.9%', + ' bird>.', index_task=(0,), index_belief=(1,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< [flying]> ==> animal>>. %1.00;0.81%') + ) + pass + + def test_exemplification(self): + ''' + 'Exemplification + + 'If robin can fly then robin is a type of bird. + < [flying]> ==> bird>>. %1.00;0.90% + + 'If robin is a type of bird then robin is a type of animal. + < bird> ==> animal>>. %1.00;0.90% + + 19 + + 'I guess if robin is a type of animal then robin can fly. + ''outputMustContain('< animal> ==> [flying]>>. %1.00;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< [flying]> ==> bird>>. %1.00;0.90%', + '< bird> ==> animal>>. %1.00;0.90%', + ' bird>.', index_task=(1,), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< animal> ==> [flying]>>. %1.00;0.45%') + ) + pass + + + def test_induction(self): + ''' + 'Induction + + 'If robin is a type of bird then robin is a type of animal. + < bird> ==> animal>>. %1.00;0.90% + + 'If robin is a type of bird then robin can fly. + < bird> ==> [flying]>>. %0.80;0.90% + + 140 + + 'I guess if robin can fly then robin is a type of animal. + ''outputMustContain('< [flying]> ==> animal>>. %1.00;0.39%') + + 'I guess if robin is a type of animal then robin can fly. + ''outputMustContain('< animal> ==> [flying]>>. %0.80;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> animal>>. %1.00;0.90%', + '< bird> ==> [flying]>>. %0.80;0.90%', + ' bird>.', index_task=(0,), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< animal> ==> [flying]>>. %0.80;0.45%') + ) + # for task in tasks_derived: + # print(task) + pass + + + def test_abduction(self): + ''' + 'Abduction + + 'If robin is a type of bird then robin is a type of animal. + < bird> ==> animal>>. %1.00;0.90% + + 'If robin can fly then robin is probably a type of animal. + < [flying]> ==> animal>>. %0.80;0.90% + + 19 + + 'I guess if robin is a type of bird then robin can fly. + ''outputMustContain('< bird> ==> [flying]>>. %1.00;0.39%') + + 'I guess if robin can fly then robin is a type of bird. + ''outputMustContain('< [flying]> ==> bird>>. %0.80;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> animal>>. %1.00;0.90%', + '< [flying]> ==> animal>>. %0.80;0.90%', + ' animal>.', index_task=(1,), index_belief=(1,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< [flying]> ==> bird>>. %0.80;0.45%') + ) + for task in tasks_derived: + print(task) + pass + + + def test_conditional_deduction_0(self): + ''' + 'Detachment + + 'If robin is a type of bird then robin can fly. + < bird> ==> animal>>. %1.00;0.90% + + 'Robin is a type of bird. + bird>. %1.00;0.90% + + 1 + + 'Robin is a type of animal. + ''outputMustContain(' animal>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> animal>>. %1.00;0.90%', + ' bird>. %1.00;0.90%', + ' bird>.', index_task=(0,), index_belief=()) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' animal>. %1.00;0.81%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' bird>. %1.00;0.90%', + '< bird> ==> animal>>. %1.00;0.90%', + ' bird>.', index_task=(), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' animal>. %1.00;0.81%') + ) + pass + + + def test_conditional_abduction(self): + ''' + 'Detachment + + 'Usually if robin is a type of bird then robin is a type of animal. + < bird> ==> animal>>. %0.70;0.90% + + 'Robin is a type of animal. + animal>. %1.00;0.90% + + 1 + + 'I guess robin is a type of bird. + ''outputMustContain(' bird>. %1.00;0.36%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> animal>>. %0.70;0.90%', + ' animal>. %1.00;0.90%', + ' animal>.', index_task=(1,), index_belief=()) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' bird>. %1.00;0.36%') + ) + pass + + + + def test_comparison_0(self): + ''' + 'Detachment + + 'If robin is a type of bird then robin is a type of animal. + < bird> ==> animal>>. %1.00;0.90% + + 'If robin is a type of bird then robin can fly. + < bird> ==> [flying]>>. %0.80;0.90% + + 14 + + 'I guess robin is a type of animal if and only if robin can fly. + ''outputMustContain('< [flying]> <=> animal>>. %0.80;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> animal>>. %1.00;0.90%', + '< bird> ==> [flying]>>. %0.80;0.90%', + ' bird>.', index_task=(0,), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< [flying]> <=> animal>>. %0.80;0.45%') + ) + pass + + + def test_comparison_1(self): + ''' + 'Detachment + + 'If robin is a type of bird then usually robin is a type of animal. + < bird> ==> animal>>. %0.70;0.90% + + 'If robin can fly then robin is a type of animal. + < [flying]> ==> animal>>. %1.00;0.90% + + 19 + + 'I guess robin is a type of bird if and only if robin can fly. + ''outputMustContain('< [flying]> <=> bird>>. %0.70;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> animal>>. %0.70;0.90%', + '< [flying]> ==> animal>>. %1.00;0.90%', + ' animal>.', index_task=(1,), index_belief=(1,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< [flying]> <=> bird>>. %0.70;0.45%') + ) + pass + + + def test_analogy(self): + ''' + 'Detachment + + 'If robin is a type of bird then robin is a type of animal. + < bird> ==> animal>>. %1.00;0.90% + + 'Usually, robin is a type of bird if and only if robin can fly. + < bird> <=> [flying]>>. %0.80;0.90% + + 14 + + 'If robin can fly then probably robin is a type of animal. + ''outputMustContain('< [flying]> ==> animal>>. %0.80;0.65%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> animal>>. %1.00;0.90%', + '< bird> <=> [flying]>>. %0.80;0.90%', + ' bird>.', index_task=(0,), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< [flying]> ==> animal>>. %0.80;0.65%') + ) + pass + + + def test_conditional_analogy(self): + ''' + 'Detachment + + 'Robin is a type of bird. + bird>. %1.00;0.90% + + 'Usually, robin is a type of bird if and only if robin can fly. + < bird> <=> [flying]>>. %0.80;0.90% + + 1 + + 'I guess usually robin can fly. + ''outputMustContain(' [flying]>. %0.80;0.65%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' bird>. %1.00;0.90%', + '< bird> <=> [flying]>>. %0.80;0.90%', + ' bird>.', index_task=(), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' [flying]>. %0.80;0.65%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> <=> [flying]>>. %0.80;0.90%', + ' bird>. %1.00;0.90%', + ' bird>.', index_task=(0,), index_belief=()) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' [flying]>. %0.80;0.65%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' bird>. %1.00;0.90%', + '< [flying]> <=> bird>>. %0.80;0.90%', + ' bird>.', index_task=(), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' [flying]>. %0.80;0.65%') + ) + + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< [flying]> <=> bird>>. %0.80;0.90%', + ' bird>. %1.00;0.90%', + ' bird>.', index_task=(0,), index_belief=()) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' [flying]>. %0.80;0.65%') + ) + pass + + + def test_resemblance(self): + ''' + 'Detachment + + 'Robin is a type of animal if and only if robin is a type of bird. + < animal> <=> bird>>. %1.00;0.90% + + 'Robin is a type of bird if and only if robin can fly. + < bird> <=> [flying]>>. %0.90;0.90% + + 19 + + 'Robin is a type of animal if and only if robin can fly. + ''outputMustContain('< [flying]> <=> animal>>. %0.90;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< animal> <=> bird>>. %1.00;0.90%', + '< bird> <=> [flying]>>. %0.90;0.90% ', + ' bird>.', index_task=(1,), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< [flying]> <=> animal>>. %0.90;0.81%') + ) + pass + + + def test_conversions_between_implication_and_equivalence(self): + ''' + 'conversions between Implication and Equivalence + + 'If robin can fly then robin is a type of bird. + < [flying]> ==> bird>>. %0.90;0.90% + + 'If robin is a type of bird then robin can fly. + < bird> ==> [flying]>>. %0.90;0.90% + + 7 + + 'Robin can fly if and only if robin is a type of bird. + ''outputMustContain('< [flying]> <=> bird>>. %0.81;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< [flying]> ==> bird>>. %0.90;0.90%', + '< bird> ==> [flying]>>. %0.90;0.90%', + ' bird>.', index_task=(1,), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< [flying]> <=> bird>>. %0.81;0.81%') + ) + pass + + + def test_conjunction_0(self): + ''' + 'compound composition, two premises + + 'If robin is a type of bird then robin is a type of animal. + < bird> ==> animal>>. %1.00;0.90% + + 'If robin is a type of bird then robin can fly. + < bird> ==> [flying]>>. %0.90;0.90% + + 14 + + 'If robin is a type of bird then usually robin is a type of animal and can fly. + ''outputMustContain('< bird> ==> (&&, [flying]>, animal>)>. %0.90;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> animal>>. %1.00;0.90%', + '< bird> ==> [flying]>>. %0.90;0.90%', + ' bird>.', index_task=(0,), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< bird> ==> (&&, [flying]>, animal>)>. %0.90;0.81%') + ) + + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> animal>>. %1.00;0.90%', + '< bird> ==> [flying]>>. %0.90;0.90%', + 'robin.', index_task=(0,0), index_belief=(0,0)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< bird> ==> (&&, [flying]>, animal>)>. %0.90;0.81%') + ) + pass + + + def test_conjunction_1(self): + ''' + 'compound composition, two premises + + 'If robin is a type of bird then robin is a type of animal. + < bird> ==> animal>>. %1.00;0.90% + + 'If robin can fly then robin is a type of animal. + < [flying]> ==> animal>>. %0.90;0.90% + + 19 + + 'If robin can fly and is a type of bird then robin is a type of animal. + ''outputMustContain('<(&&, [flying]>, bird>) ==> animal>>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> animal>>. %1.00;0.90%', + '< [flying]> ==> animal>>. %0.90;0.90% ', + ' animal>.', index_task=(1,), index_belief=(1,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(&&, [flying]>, bird>) ==> animal>>. %1.00;0.81%') + ) + pass + + + def test_disjunction_0(self): + ''' + 'compound composition, two premises + + 'If robin is a type of bird then robin is a type of animal. + < bird> ==> animal>>. %1.00;0.90% + + 'If robin is a type of bird then robin can fly. + < bird> ==> [flying]>>. %0.90;0.90% + + 14 + + 'If robin is a type of bird then robin is a type of animal or can fly. + ''outputMustContain('< bird> ==> (||, [flying]>, animal>)>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> animal>>. %1.00;0.90%', + '< bird> ==> [flying]>>. %0.90;0.90%', + ' bird>.', index_task=(0,), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< bird> ==> (||, [flying]>, animal>)>. %1.00;0.81%') + ) + pass + + def test_disjunction_1(self): + ''' + 'compound composition, two premises + + 'If robin is a type of bird then robin is a type of animal. + < bird> ==> animal>>. %1.00;0.90% + + 'If robin can fly then robin is a type of animal. + < [flying]> ==> animal>>. %0.90;0.90% + + 19 + + 'If robin can fly or is a type of bird then robin is a type of animal. + ''outputMustContain('<(||, [flying]>, bird>) ==> animal>>. %0.90;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> animal>>. %1.00;0.90%', + '< [flying]> ==> animal>>. %0.90;0.90% ', + ' animal>.', index_task=(1,), index_belief=(1,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(||, [flying]>, bird>) ==> animal>>. %0.90;0.81%') + ) + pass + + + def test_decomposition_0(self): + ''' + 'compound decomposition, two premises + + 'If robin is a type of bird then robin is not a type of flying animal. + < bird> ==> (&&, animal>, [flying]>)>. %0.00;0.90% + + 'If robin is a type of bird then robin can fly. + < bird> ==> [flying]>>. %1.00;0.90% + + 8 + + 'It is unlikely that if a robin is a type of bird then robin is a type of animal. + ''outputMustContain('< bird> ==> animal>>. %0.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< bird> ==> (&&, animal>, [flying]>)>. %0.00;0.90%', + '< bird> ==> [flying]>>. %1.00;0.90%', + ' bird>.', index_task=(0,), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< bird> ==> animal>>. %0.00;0.81%') + ) + pass + + + def test_decomposition_1(self): + ''' + 'compound decomposition, two premises + + 'Robin cannot be both a flyer and a swimmer. + (&&, [flying]>, swimmer>). %0.00;0.90% + + 'Robin can fly. + [flying]>. %1.00;0.90% + + 6 + + 'Robin cannot swim. + ''outputMustContain(' swimmer>. %0.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '(&&, [flying]>, swimmer>). %0.00;0.90% ', + ' [flying]>. %1.00;0.90%', + 'robin.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swimmer>. %0.00;0.81%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '(&&, [flying]>, swimmer>). %0.00;0.90% ', + ' [flying]>. %1.00;0.90%', + '(&&, [flying]>, swimmer>).') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swimmer>. %0.00;0.81%') + ) + + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' [flying]>. %1.00;0.90%', + '(&&, [flying]>, swimmer>). %0.00;0.90% ', + 'robin.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swimmer>. %0.00;0.81%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' [flying]>. %1.00;0.90%', + '(&&, [flying]>, swimmer>). %0.00;0.90% ', + ' [flying]>.') + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swimmer>. %0.00;0.81%') + ) + pass + + + def test_decomposition_2(self): + ''' + 'compound decomposition, two premises + + 'Robin can fly or swim. + (||, [flying]>, swimmer>). %1.00;0.90% + + 'Robin cannot swim. + swimmer>. %0.00;0.90% + + 2 + + 'Robin can fly. + ''outputMustContain(' [flying]>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '(||, [flying]>, swimmer>). %1.00;0.90% ', + ' swimmer>. %0.00;0.90%', + 'robin.', index_task=(0,0), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' [flying]>. %1.00;0.81%') + ) + + + def test_composition_0(self): + ''' + 'compound decomposition, two premises + + 'Robin can fly. + [flying]>. %1.00;0.90% + + 'Can robin fly or swim? + (||, [flying]>, swimmer>)? + + 7 + ''//+1 from original + + 'Robin can fly or swim. + ''outputMustContain('(||, [flying]>, swimmer>). %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' [flying]>. %1.00;0.90%', + '(||, [flying]>, swimmer>)?', + ' [flying]>.', is_belief_term=True, index_task=(), index_belief=(0,)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '(||, [flying]>, swimmer>). %1.00;0.81%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' [flying]>. %1.00;0.90%', + '(||, [flying]>, swimmer>)?', + 'robin.', is_belief_term=True, index_task=(0,), index_belief=(0,0)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '(||, [flying]>, swimmer>). %1.00;0.81%') + ) + + + def test_composition_1(self): + ''' + 'compound decomposition, two premises + + 'Robin can fly and swim. + $0.90;0.90$ (&&, swimmer>, [flying]>). %0.90;0.90% + + + 1 + + 'Robin can swim. + ''outputMustContain(' swimmer>. %0.90;0.73%') + + 5 + ''//+2 from original + + 'Robin can fly. + ''outputMustContain(' [flying]>. %0.90;0.73%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '$0.90;0.90$ (&&, swimmer>, [flying]>). %0.90;0.90%', + ' swimmer>.', + '(&&, swimmer>, [flying]>).', is_belief_term=True, index_task=(), index_belief=(0,)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swimmer>. %0.90;0.73%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '$0.90;0.90$ (&&, swimmer>, [flying]>). %0.90;0.90%', + ' [flying]>.', + '(&&, swimmer>, [flying]>).', is_belief_term=True, index_task=(), index_belief=(1,)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' [flying]>. %0.90;0.73%') + ) + + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '$0.90;0.90$ (&&, swimmer>, [flying]>). %0.90;0.90%', + ' swimmer>.', + 'robin.', is_belief_term=True, index_task=(0,0), index_belief=(0,)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swimmer>. %0.90;0.73%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '$0.90;0.90$ (&&, swimmer>, [flying]>). %0.90;0.90%', + ' [flying]>.', + 'robin.', is_belief_term=True, index_task=(1,0), index_belief=(0,)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' [flying]>. %0.90;0.73%') + ) + + + def test_negation_0(self): + ''' + 'negation + + 'It is unlikely that robin cannot fly. + (--, [flying]>). %0.10;0.90% + + 3 + + 'Robin can fly. + ''outputMustContain(' [flying]>. %0.90;0.90%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' [flying]>. %0.90;0.90%', + '(--, [flying]>)?', + ' [flying]>.', is_belief_term=True, index_task=(), index_belief=(0,)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '(--, [flying]>). %0.10;0.90%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' [flying]>. %0.90;0.90%', + '(--, [flying]>)?', + 'robin.', is_belief_term=True, index_task=(0,), index_belief=(0,0)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '(--, [flying]>). %0.10;0.90%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '(--, [flying]>). %0.10;0.90%', + ' [flying]>.', + 'robin.', is_belief_term=True, index_task=(0,0), index_belief=(0,)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' [flying]>. %0.90;0.90%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '(--, [flying]>). %0.10;0.90%', + 'robin.', + '(--, [flying]>).', is_belief_term=True, index_task=(), index_belief=(0,0)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' [flying]>. %0.90;0.90%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '(--, [flying]>). %0.10;0.90%', + ' [flying]>.', + '(--, [flying]>).', is_belief_term=True, index_task=(), index_belief=(0,)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' [flying]>. %0.90;0.90%') + ) + + + def test_negation_1(self): + ''' + 'negation + + 'Robin can fly. + [flying]>. %0.90;0.90% + + 'Can robin fly or not? + (--, [flying]>)? + + ''//15 + 30 + + 'It is unlikely that robin cannot fly. + ''outputMustContain('(--, [flying]>). %0.10;0.90%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' [flying]>. %0.90;0.90%', + '(--, [flying]>)?', + ' [flying]>.', is_belief_term=True, index_task=(), index_belief=(0,)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '(--, [flying]>). %0.10;0.90%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' [flying]>. %0.90;0.90%', + '(--, [flying]>)?', + 'robin.', is_belief_term=True, index_task=(0,), index_belief=(0,0)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '(--, [flying]>). %0.10;0.90%') + ) + + + def test_contraposition_0(self): + ''' + 'contraposition + + 'It is unlikely that if robin is not a type of bird then robin can fly. + <(--, bird>) ==> [flying]>>. %0.10;0.90% + + 'If robin cannot fly then is robin a type of bird? + <(--, [flying]>) ==> bird>>? + + 29 + + 'I guess it is unlikely that if robin cannot fly then robin is a type of bird. + ''outputMustContain('<(--, [flying]>) ==> bird>>. %0.00;0.45%') + + 561 + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(--, bird>) ==> [flying]>>. %0.10;0.90%', + ' bird>.', + '(--, bird>).', is_belief_term=True, index_task=(0,), index_belief=(0,)) + tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(--, [flying]>) ==> bird>>. %0.00;0.45%') + ) + pass + + # rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + # '<(--, bird>) ==> [flying]>>. %0.10;0.90%', + # '<(--, [flying]>) ==> bird>>?', + # ' [flying]>.', is_belief_term=True, index_task=(1,), index_belief=(0,0)) + # tasks_derived = [rule(task, belief.term, task_link, term_link) for rule in rules] + # self.assertTrue( + # output_contains(tasks_derived, '<(--, [flying]>) ==> bird>>. %0.00;0.45%') + # ) + # pass + + + def test_conditional_deduction_compound_eliminate_0(self): + ''' + 'conditional deduction + + 'If robin can fly and has wings then robin is a bird. + <(&&, [flying]>, [with_wings]>) ==> bird>>. %1.00;0.90% + + 'robin can fly. + [flying]>. %1.00;0.90% + + 1 + + 'If robin has wings then robin is a bird + ''outputMustContain('< [with_wings]> ==> bird>>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(&&, [flying]>, [with_wings]>) ==> bird>>. %1.00;0.90%', + ' [flying]>. %1.00;0.90%', + 'robin.', index_task=(0,0,0), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< [with_wings]> ==> bird>>. %1.00;0.81%') + ) + pass + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' [flying]>. %1.00;0.90%', + '<(&&, [flying]>, [with_wings]>) ==> bird>>. %1.00;0.90%', + 'robin.', index_task=(0,), index_belief=(0,0,0)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< [with_wings]> ==> bird>>. %1.00;0.81%') + ) + pass + + def test_conditional_deduction_compound_eliminate_1(self): + ''' + 'conditional deduction + + 'If robin can fly, has wings, and chirps, then robin is a bird + <(&&, [chirping]>, [flying]>, [with_wings]>) ==> bird>>. %1.00;0.90% + + 'robin can fly. + [flying]>. %1.00;0.90% + + 5 + + 'If robin has wings and chirps then robin is a bird. + ''outputMustContain('<(&&, [chirping]>, [with_wings]>) ==> bird>>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(&&, [chirping]>, [flying]>, [with_wings]>) ==> bird>>. %1.00;0.90%', + ' [flying]>. %1.00;0.90%', + 'robin.', index_task=(0,0,0), index_belief=(0,)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(&&, [chirping]>, [with_wings]>) ==> bird>>. %1.00;0.81%') + ) + pass + + + def test_conditional_deduction_compound_replace_0(self): + ''' + 'conditional deduction + + 'If robin is a bird and it's living, then robin is an animal + <(&&, bird>, [living]>) ==> animal>>. %1.00;0.90% + + 'If robin can fly, then robin is a bird + < [flying]> ==> bird>>. %1.00;0.90% + + 1 + + 'If robin is living and it can fly, then robin is an animal. + ''outputMustContain('<(&&, [flying]>, [living]>) ==> animal>>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(&&, bird>, [living]>) ==> animal>>. %1.00;0.90%', + '< [flying]> ==> bird>>. %1.00;0.90% ', + 'robin.', index_task=(0,0,0), index_belief=(0,0)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(&&, [flying]>, [living]>) ==> animal>>. %1.00;0.81%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< [flying]> ==> bird>>. %1.00;0.90% ', + '<(&&, bird>, [living]>) ==> animal>>. %1.00;0.90%', + 'robin.', index_task=(0,0), index_belief=(0,0,0)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(&&, [flying]>, [living]>) ==> animal>>. %1.00;0.81%') + ) + pass + + + def test_conditional_abduction_compound_replace_1(self): + ''' + 'conditional abduction + + 'If robin can fly then robin is a bird. + < [flying]> ==> bird>>. %1.00;0.90% + + 'If robin both swims and flys then robin is a bird. + <(&&, swimmer>, [flying]>) ==> bird>>. %1.00;0.90% + + 7 + + 'I guess robin swims. + ''outputMustContain(' swimmer>. %1.00;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(&&, swimmer>, [flying]>) ==> bird>>. %1.00;0.90%', + '< [flying]> ==> bird>>. %1.00;0.90%', + 'robin.', index_task=(0,0,0), index_belief=(0,0)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' swimmer>. %1.00;0.45%') + ) + pass + + + def test_conditional_abduction_compound_replace_2(self): + ''' + 'conditional abduction + + 'If robin can fly and it has wings, then robin is living. + <(&&, [flying]>, [with_wings]>) ==> [living]>>. %0.90;0.90% + + 'If robin can fly and robin is a bird then robin is living. + <(&&, [flying]>, bird>) ==> [living]>>. %1.00;0.90% + + 18 + + 'I guess if robin is a bird, then robin has wings. + ''outputMustContain('< bird> ==> [with_wings]>>. %1.00;0.42%') + + 'I guess if robin has wings, then robin is a bird. + ''outputMustContain('< [with_wings]> ==> bird>>. %0.90;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(&&, [flying]>, [with_wings]>) ==> [living]>>. %0.90;0.90%', + '<(&&, [flying]>, bird>) ==> [living]>>. %1.00;0.90%', + 'robin.', index_task=(0,0,0), index_belief=(0,0,0)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< bird> ==> [with_wings]>>. %1.00;0.42%') + ) + + self.assertTrue( + output_contains(tasks_derived, '< [with_wings]> ==> bird>>. %0.90;0.45%') + ) + pass + + + def test_conditional_induction_compose(self): + ''' + 'conditional induction + + 'If robin can fly and robin chirps, then robin is a bird + <(&&, [chirping]>, [flying]>) ==> bird>>. %1.00;0.90% + + 'If robin can fly then usually robin has a beak. + < [flying]> ==> [with_beak]>>. %0.90;0.90% + + 18 + + 'I guess that if robin chirps and robin has a beak, then robin is a bird. + ''outputMustContain('<(&&, [chirping]>, [with_beak]>) ==> bird>>. %1.00;0.42%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(&&, [chirping]>, [flying]>) ==> bird>>. %1.00;0.90%', + '< [flying]> ==> [with_beak]>>. %0.90;0.90%', + 'robin.', index_task=(0,0,0), index_belief=(0,0)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(&&, [chirping]>, [with_beak]>) ==> bird>>. %1.00;0.42%') + ) + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< [flying]> ==> [with_beak]>>. %0.90;0.90%', + '<(&&, [chirping]>, [flying]>) ==> bird>>. %1.00;0.90%', + 'robin.', index_task=(0,0), index_belief=(0,0,0)) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(&&, [chirping]>, [with_beak]>) ==> bird>>. %1.00;0.42%') + ) + pass + + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_NAL5 + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_NAL/test_NAL6.py b/Tests/test_NAL/test_NAL6.py new file mode 100644 index 0000000..8a4b2fb --- /dev/null +++ b/Tests/test_NAL/test_NAL6.py @@ -0,0 +1,948 @@ +import unittest + +from NARS.DataStructures import Task +from NAL.MetaLevelInference.VariableSubstitution import * +from NARS.RuleMap import RuleMap_v2 + +import Tests.utils_for_test as utils_for_test +from Tests.utils_for_test import * +from utils.Print import PrintType, out_print + +# utils_for_test.rule_map = RuleMap_v2() + +class SubstituteVar: + '''''' + def __init__(self, mapping_ivar: bidict, mapping_dvar: bidict, mapping_qvar: bidict) -> None: + self.mapping_ivar = mapping_ivar + self.mapping_dvar = mapping_dvar + self.mapping_qvar = mapping_qvar + + @property + def is_valid(self): + return len(self.mapping_dvar) > 0 or len(self.mapping_ivar) > 0 or len(self.mapping_qvar) > 0 + + @property + def is_qvar_valid(self): + return len(self.mapping_qvar) > 0 + + @property + def is_dvar_valid(self): + return len(self.mapping_dvar) > 0 + + @property + def is_ivar_valid(self): + return len(self.mapping_ivar) > 0 + + def apply(self, term1: Term, term2: Term, inverse=False): + mapping_ivar = self.mapping_ivar + mapping_dvar = self.mapping_dvar + mapping_qvar = self.mapping_qvar + if inverse: + term1, term2 = term2, term1 + mapping_ivar = mapping_ivar.inverse + mapping_dvar = mapping_dvar.inverse + mapping_qvar = mapping_qvar.inverse + ivar = [int(var) for var in term2._index_var.var_independent] + dvar = [int(var) for var in term2._index_var.var_dependent] + qvar = [int(var) for var in term2._index_var.var_query] + + term2._index_var.var_independent = [var(mapping_ivar[var_int]) for var, var_int in zip(term2._index_var.var_independent, ivar)] + term2._index_var.var_dependent = [var(mapping_dvar[var_int]) for var, var_int in zip(term2._index_var.var_dependent, dvar)] + term2._index_var.var_query = [var(mapping_qvar[var_int]) for var, var_int in zip(term2._index_var.var_query, qvar)] + # TODO: to recursively apply the variable-mapping to the terms. + + + +find_var_with_pos: Callable = lambda pos_search, variables, positions: [var for var, pos in zip(variables, positions) if pos[:len(pos_search)] == pos_search] + +def _build_mapping(variables1, variables2, var_common1, var_common2): + if len(variables1) == 0 and len(variables2) == 0: + mapping = bidict() + elif len(variables1) > 0 and len(variables2) > 0: + var_diff1 = sorted(list(set(variables1)-set(var_common1))) + var_diff2 = sorted(list(set(variables2)-set(var_common2))) + var_bias1 = max(variables1) + 1 + var_bias2 = max(variables2) + 1 + var_diff_new1 = [ivar+var_bias2 for ivar in var_diff1] + var_diff_new2 = [ivar+var_bias1 for ivar in var_diff2] + # mapping the second to the first + mapping = bidict({int(key): int(value) for key, value in (*zip(var_common2, var_common1), *zip(var_diff2, var_diff_new2), *zip(var_diff_new1, var_diff1))}) + else: # (len(variables1) > 0) ^ (len(variables2) > 0) + + mapping = bidict() + pass + return mapping + +def unification_variable(term1: Term, term2: Term, pos_common1: List[int], pos_common2: List[int]): + '''''' + # 1. find the variables in the first common position + ivar1 = find_var_with_pos(pos_common1, term1._index_var.var_independent, term1._index_var.positions_ivar) + dvar1 = find_var_with_pos(pos_common1, term1._index_var.var_dependent, term1._index_var.positions_dvar) + qvar1 = find_var_with_pos(pos_common1, term1._index_var.var_query, term1._index_var.positions_qvar) + + # 2. find the variables in the second common position + ivar2 = find_var_with_pos(pos_common2, term2._index_var.var_independent, term2._index_var.positions_ivar) + dvar2 = find_var_with_pos(pos_common2, term2._index_var.var_dependent, term2._index_var.positions_dvar) + qvar2 = find_var_with_pos(pos_common2, term2._index_var.var_query, term2._index_var.positions_qvar) + + # 3. build the mapping + mapping_ivar = _build_mapping(term1._index_var.var_independent, term2._index_var.var_independent, ivar1, ivar2) + mapping_dvar = _build_mapping(term1._index_var.var_dependent, term2._index_var.var_dependent, dvar1, dvar2) + mapping_qvar = _build_mapping(term1._index_var.var_query, term2._index_var.var_query, qvar1, qvar2) + + return SubstituteVar(mapping_ivar, mapping_dvar, mapping_qvar) + + + +class TEST_NAL6(unittest.TestCase): + '''''' + + def test_substition_var_to_var(self): + ''' + <(&&, <#x-->A>, <#x-->B>, <<$y-->C>==><$y-->D>>, <$z-->E>) ==> <$z-->F>>. + <<$x-->F>==><$x-->H>>. + |- + <(&&, <#x-->A>, <#x-->B>, <<$y-->C>==><$y-->D>>, <$z-->E>) ==> <$x-->H>>. + ''' + term1 = Narsese.parse("<<$x-->F>==><$x-->H>>.").term + term2 = Narsese.parse("<(&&, <#x-->A>, <#x-->B>, <<$y-->C>==><$y-->D>>, <$z-->E>) ==> <$z-->F>>.").term + subst_var = unification_variable(term1, term2, [0], [1]) # to find possible replacement. + subst_var.apply(term1, term2) + # subst_var.apply() + term3 = Statement.Implication(term1[0], term2[1]) + # term_substitution = substitution(compound, Term("A"), Term("D")) + # self.assertEqual(term_substitution, term_new) + pass + + def test_unification_0(self): + ''' + 'Variable unification + + 'If something is a bird, then it is a flyer. + <<$x --> bird> ==> <$x --> flyer>>. %1.00;0.90% + filyer> + 'If something is a bird, then it is not a flyer. + <<$y --> bird> ==> <$y --> flyer>>. %0.00;0.70% + + 1 + + 'If something is a bird, then usually, it is a flyer. + ''outputMustContain('<<$1 --> bird> ==> <$1 --> flyer>>. %0.79;0.92%') + ''' + tasks_derived = memory_accept_revision( + '<<$x --> bird> ==> <$x --> flyer>>. %1.00;0.90%', + '<<$y --> bird> ==> <$y --> flyer>>. %0.00;0.70%' + ) + self.assertTrue( + output_contains(tasks_derived, '<<$1 --> bird> ==> <$1 --> flyer>>. %0.79;0.92%') + ) + pass + + + def test_unification_1(self): + ''' + 'Variable unification + + 'If something is a bird, then it is a animal. + <<$x --> bird> ==> <$x --> animal>>. %1.00;0.90% + + 'If something is a robin, then it is a bird. + <<$y --> robin> ==> <$y --> bird>>. %1.00;0.90% + + 3 + + 'If something is a robin, then it is a animal. + ''outputMustContain('<<$1 --> robin> ==> <$1 --> animal>>. %1.00;0.81%') + + 'I guess that if something is a animal, then it is a robin. + ''outputMustContain('<<$1 --> animal> ==> <$1 --> robin>>. %1.00;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<$x --> bird> ==> <$x --> animal>>. %1.00;0.90%', + '<<$y --> robin> ==> <$y --> bird>>. %1.00;0.90%', + '<$x --> bird>.', index_task=(0,), index_belief=(1,) + ) + self.assertNotEqual(rules, None) + + subst_var = unification_variable(task.term, belief.term, [0], [1]) # to find possible replacement. + subst_var.apply(task.term, belief.term) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + + repr(tasks_derived[0].term) + self.assertTrue( + output_contains(tasks_derived, '<<$1 --> robin> ==> <$1 --> animal>>. %1.00;0.81%') + ) + self.assertTrue( + output_contains(tasks_derived, '<<$1 --> animal> ==> <$1 --> robin>>. %1.00;0.45%') + ) + + self.assertTrue( + not output_contains(tasks_derived, '<<$1 --> animal> ==> <$2 --> robin>>. %1.00;0.45%') + ) + pass + + + def test_unification_2(self): + ''' + 'Variable unification + + 'If something is a swan, then it is a bird. + <<$x --> swan> ==> <$x --> bird>>. %1.00;0.80% + + 'If something is a swan, then it is a swimmer. + <<$y --> swan> ==> <$y --> swimmer>>. %0.80;0.90% + + 3 + + 'I believe that if something is a swan, then it is a bird or a swimmer. + ''outputMustContain('<<$1 --> swan> ==> (||,<$1 --> bird>,<$1 --> swimmer>)>. %1.00;0.72%') + + 'I believe that if something is a swan, then usually, it is both a bird and a swimmer. + ''outputMustContain('<<$1 --> swan> ==> (&&,<$1 --> bird>,<$1 --> swimmer>)>. %0.80;0.72%') + + 'I guess if something is a swimmer, then it is a bird. + ''outputMustContain('<<$1 --> swimmer> ==> <$1 --> bird>>. %1.00;0.37%') + + 'I guess if something is a bird, then it is a swimmer. + ''outputMustContain('<<$1 --> bird> ==> <$1 --> swimmer>>. %0.80;0.42%') + + 'I guess something is a bird, if and only if it is a swimmer. + ''outputMustContain('<<$1 --> bird> <=> <$1 --> swimmer>>. %0.80;0.42%') + ''' + + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<$x --> swan> ==> <$x --> bird>>. %1.00;0.80% ', + '<<$y --> swan> ==> <$y --> swimmer>>. %0.80;0.90%', + '<$x --> swan>.' + ) + self.assertNotEqual(rules, None) + + subst_var = unification_variable(task.term, belief.term, [0], [0]) # to find possible replacement. + subst_var.apply(task.term, belief.term) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + + + self.assertTrue( + output_contains(tasks_derived, '<<$1 --> swan> ==> (||,<$1 --> bird>,<$1 --> swimmer>)>. %1.00;0.72%') + ) + self.assertTrue( + output_contains(tasks_derived, '<<$1 --> swan> ==> (&&,<$1 --> bird>,<$1 --> swimmer>)>. %0.80;0.72%') + ) + self.assertTrue( + output_contains(tasks_derived, '<<$1 --> swimmer> ==> <$1 --> bird>>. %1.00;0.37%') + ) + self.assertTrue( + output_contains(tasks_derived, '<<$1 --> bird> ==> <$1 --> swimmer>>. %0.80;0.42%') + ) + self.assertTrue( + output_contains(tasks_derived, '<<$1 --> bird> <=> <$1 --> swimmer>>. %0.80;0.42%') + ) + pass + + + def test_unification_3(self): + ''' + 'Variable unification + + 'What can be said about bird can also be said about robin. + < $x> ==> $x>>. %1.00;0.90% + + 'What can be said about swimmer usually can also be said about robin. + < $y> ==> $y>>. %0.70;0.90% + + 3 + + 'What can be said about bird and swimmer can also be said about robin. + ''outputMustContain('<(&&, $1>, $1>) ==> $1>>. %1.00;0.81%') + + 'What can be said about bird or swimmer can also be said about robin. + ''outputMustContain('<(||, $1>, $1>) ==> $1>>. %0.70;0.81%') + + 'I guess what can be said about bird can also be said about swimmer. + ''outputMustContain('< $1> ==> $1>>. %1.00;0.36%') + + 'I guess what can be said about swimmer can also be said about bird. + ''outputMustContain('< $1> ==> $1>>. %0.70;0.45%') + + 'I guess bird and swimmer share most properties. + ''outputMustContain('< $1> <=> $1>>. %0.70;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '< $x> ==> $x>>. %1.00;0.90%', + '< $y> ==> $y>>. %0.70;0.90%', + ' $x>.' + ) + self.assertNotEqual(rules, None) + + subst_var = unification_variable(task.term, belief.term, [1], [1]) # to find possible replacement. + subst_var.apply(task.term, belief.term) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, '<(&&, $1>, $1>) ==> $1>>. %1.00;0.81%') + ) + self.assertTrue( + output_contains(tasks_derived, '<(||, $1>, $1>) ==> $1>>. %0.70;0.81%') + ) + self.assertTrue( + output_contains(tasks_derived, '< $1> ==> $1>>. %1.00;0.36%') + ) + self.assertTrue( + output_contains(tasks_derived, '< $1> ==> $1>>. %0.70;0.45%') + ) + self.assertTrue( + output_contains(tasks_derived, '< $1> <=> $1>>. %0.70;0.45%') + ) + + + def test_unification_4(self): + ''' + 'Variable unification + + 'If something can fly and chirp, then it is a bird. + <(&&,<$x --> flyer>,<$x --> [chirping]>) ==> <$x --> bird>>. %1.00;0.90% + + 'If something has wings, then it can fly. + <<$y --> [with_wings]> ==> <$y --> flyer>>. %1.00;0.90% + + 8 + + 'If something can chirp and has wings, then it is a bird. + ''outputMustContain('<(&&,<$1 --> [chirping]>,<$1 --> [with_wings]>) ==> <$1 --> bird>>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(&&,<$x --> flyer>,<$x --> [chirping]>) ==> <$x --> bird>>. %1.00;0.90%', + '<<$y --> [with_wings]> ==> <$y --> flyer>>. %1.00;0.90%', + '<$y --> flyer>.' + ) + self.assertNotEqual(rules, None) + + subst_var = unification_variable(task.term, belief.term, [0,0], [1]) # to find possible replacement. + subst_var.apply(task.term, belief.term) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, '<(&&,<$1 --> [chirping]>,<$1 --> [with_wings]>) ==> <$1 --> bird>>. %1.00;0.81%') + ) + pass + + + def test_unification_5(self): + ''' + 'Variable unification + + 'If something can fly, chirp, and eats worms, then it is a bird. + <(&&,<$x --> flyer>,<$x --> [chirping]>, <(*, $x, worms) --> food>) ==> <$x --> bird>>. + + 'If something can chirp and has wings, then it is a bird. + <(&&,<$x --> [chirping]>,<$x --> [with_wings]>) ==> <$x --> bird>>. + + ''//6 + 12 + + 'If something can fly and eats worms, then I guess it has wings. + ''outputMustContain('<(&&,<$1 --> flyer>,<(*,$1,worms) --> food>) ==> <$1 --> [with_wings]>>. %1.00;0.45%') + + 'I guess if something has wings, then it can fly and eats worms. + ''outputMustContain('<<$1 --> [with_wings]> ==> (&&,<$1 --> flyer>,<(*,$1,worms) --> food>)>. %1.00;0.45%') + ''' + pass + + + def test_unification_6(self): + ''' + 'Variable unification + + 'If something can fly and eats worms, then it is a bird. + <(&&,<$x --> flyer>,<(*,$x,worms) --> food>) ==> <$x --> bird>>. + + 'If something can fly, then it has wings. + <<$y --> flyer> ==> <$y --> [with_wings]>>. + + // 4 originally + 13 + + 'If something has wings and eats worms, then I guess it is a bird. + ''outputMustContain('<(&&,<$1 --> [with_wings]>,<(*,$1,worms) --> food>) ==> <$1 --> bird>>. %1.00;0.45%') + ''' + pass + + + def test_elimination_0(self): + ''' + 'Variable elimination + + 'If something is a bird, then it is an animal. + <<$x --> bird> ==> <$x --> animal>>. %1.00;0.90% + + 'A robin is a bird. + bird>. %1.00;0.90% + + 3 + + 'A robin is an animal. + ''outputMustContain(' animal>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<$x --> bird> ==> <$x --> animal>>. %1.00;0.90%', + ' bird>. %1.00;0.90%', + 'bird.' + ) + self.assertNotEqual(rules, None) + + subst_var = unification_variable(task.term, belief.term, [0], [0]) # to find possible replacement. + subst_var.apply(task.term, belief.term) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + + + self.assertTrue( + output_contains(tasks_derived, ' animal>. %1.00;0.81%') + ) + pass + + + def test_elimination_1(self): + ''' + 'Variable elimination + + 'If something is a bird, then it is an animal. + <<$x --> bird> ==> <$x --> animal>>. + + 'A tiger is an animal. + animal>. + + 10 + + 'I guess that a tiger is a bird. + ''outputMustContain(' bird>. %1.00;0.45%') + ''' + pass + + + def test_elimination_2(self): + ''' + 'Variable elimination + + 'Something is a animal if and only if it is a bird. + <<$x --> animal> <=> <$x --> bird>>. + + 'A robin is a bird. + bird>. + + 3 + + 'A robin is a animal. + ''outputMustContain(' animal>. %1.00;0.81%') + ''' + pass + + + def test_elimination_3(self): + ''' + 'Variable elimination + + 'Some bird can swim. + (&&,<#x --> bird>,<#x --> swimmer>). + + 'Swan is a type of bird. + bird>. %0.90% + + 3 + + 'I guess swan can swim. + ''outputMustContain(' swimmer>. %0.90;0.43%') + ''' + pass + + + def test_elimination_4(self): + ''' + 'Variable elimination + + 'Tweety has wings. + <{Tweety} --> [with_wings]>. + + 'If something can chirp and has wings, then it is a bird. + <(&&,<$x --> [chirping]>,<$x --> [with_wings]>) ==> <$x --> bird>>. + + 23 + + 'If Tweety can chirp, then it is a bird. + ''outputMustContain('<<{Tweety} --> [chirping]> ==> <{Tweety} --> bird>>. %1.00;0.81%') + ''' + pass + + + def test_elimination_5(self): + ''' + 'Variable elimination + + 'If something can fly, chirp, and eats worms, then it is a bird. + <(&&,<$x --> flyer>,<$x --> [chirping]>, <(*, $x, worms) --> food>) ==> <$x --> bird>>. + + 'Tweety can fly. + <{Tweety} --> flyer>. + + 7 + + 'If Tweety can chirp and eats worms, then it is a bird. + ''outputMustContain('<(&&,<(*,{Tweety},worms) --> food>,<{Tweety} --> [chirping]>) ==> <{Tweety} --> bird>>. %1.00;0.81%') + ''' + pass + + + def test_elimination_6(self): + ''' + 'Variable elimination + + 'Every lock can be opened by every key. + <(&&,<$x --> key>,<$y --> lock>) ==> <$y --> (/,open,$x,_)>>. + + 'Lock-1 is a lock. + <{lock1} --> lock>. + + 20 + + 'Lock-1 can be opened by every key. + ''outputMustContain('<<$1 --> key> ==> <{lock1} --> (/,open,$1,_)>>. %1.00;0.81%') + ''' + pass + + + def test_multiple_variable_elimination_0(self): + ''' + 'Multiple variable elimination + + 'Every lock can be opened by some key. + <<$x --> lock> ==> (&&,<#y --> key>,<$x --> (/,open,#y,_)>)>. %1.00;0.90% + + 'Lock-1 is a lock. + <{lock1} --> lock>. %1.00;0.90% + + 9 + + 'Some key can open Lock-1. + ''outputMustContain('(&&,<#1 --> key>,<{lock1} --> (/,open,#1,_)>). %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<$x --> lock> ==> (&&,<#y --> key>,<$x --> (/,open,#y,_)>)>. %1.00;0.90%', + '<{lock1} --> lock>. %1.00;0.90%', + 'lock.' + ) + self.assertNotEqual(rules, None) + + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, '(&&,<#1 --> key>,<{lock1} --> (/,open,#1,_)>). %1.00;0.81%') + ) + + pass + + + def test_multiple_variable_elimination_1(self): + ''' + 'Multiple variable elimination + + 'There is a lock that can be opened by every key. + (&&,<#x --> lock>,<<$y --> key> ==> <#x --> (/,open,$y,_)>>). + + 'Lock-1 is a lock. + <{lock1} --> lock>. + + 9 + + 'I guess Lock-1 can be opened by every key. + ''outputMustContain('<<$1 --> key> ==> <{lock1} --> (/,open,$1,_)>>. %1.00;0.43%') + ''' + pass + + + def test_multiple_variable_elimination_2(self): + ''' + 'Multiple variable elimination + + 'There is a key that can open some lock. + (&&,<#x --> (/,open,#y,_)>,<#x --> lock>,<#y --> key>). + + 'Lock-1 is a lock. + <{lock1} --> lock>. + + 18 + + 'I guess there is a key that can open Lock-1. + ''outputMustContain('(&&,<#1 --> key>,<{lock1} --> (/,open,#1,_)>). %1.00;0.43%') + ''' + pass + + + def test_introduction_0(self): + ''' + 'Introduction + + 'A swan is a bird. + bird>. + + 'A swan is usually a swimmer. + swimmer>. %0.80% + + 3 + + 'I guess a bird is usually a swimmer. + ''outputMustContain('<<$1 --> bird> ==> <$1 --> swimmer>>. %0.80;0.45%') + + 'I guess a swimmer is a bird. + ''outputMustContain('<<$1 --> swimmer> ==> <$1 --> bird>>. %1.00;0.39%') + + 'I guess a bird is usually a swimmer, and the other way around. + ''outputMustContain('<<$1 --> bird> <=> <$1 --> swimmer>>. %0.80;0.45%') + + 'Some bird can swim. + ''outputMustContain('(&&,<#1 --> bird>,<#1 --> swimmer>). %0.80;0.81%') + ''' + pass + + + def test_introduction_1(self): + ''' + 'Introduction + + 'A gull is a swimmer. + swimmer>. + + 'Usually, a swan is a swimmer. + swimmer>. %0.80% + + 3 + + 'I guess what can be said about gull usually can also be said about swan. + ''outputMustContain('< $1> ==> $1>>. %0.80;0.45%') + + 'I guess what can be said about swan can also be said about gull. + ''outputMustContain('< $1> ==> $1>>. %1.00;0.39%') + + 'I guess gull and swan share most properties. + ''outputMustContain('< $1> <=> $1>>. %0.80;0.45%') + + 'Gull and swan have some common property. + ''outputMustContain('(&&, #1>, #1>). %0.80;0.81%') + ''' + pass + + + def test_introduction_2(self): + ''' + 'Introduction + + 'Key-1 opens Lock-1. + <{key1} --> (/,open,_,{lock1})>. + + 'Key-1 is a key. + <{key1} --> key>. + + 45 + + 'I guess every key can open Lock-1. + ''outputMustContain('<<$1 --> key> ==> <$1 --> (/,open,_,{lock1})>>. %1.00;0.45%') + + 'Some key can open Lock-1. + ''//outputMustContain('(&&,<#1 --> (/,open,_,{lock1})>,<#1 --> key>). %1.00;0.81%') //reversed + '' outputMustContain('(&&,<#1 --> (/,open,_,{lock1})>,<#1 --> key>). %1.00;0.25%') + ''' + pass + + + def test_multiple_variables_introduction_0(self): + ''' + 'Multiple variables introduction + + 'Lock-1 can be opened by every key. + <<$x --> key> ==> <{lock1} --> (/,open,$x,_)>>. + + 'Lock-1 is a lock. + <{lock1} --> lock>. + + 166 + + 'There is a lock that can be opened by every key. + ''outputMustContain('(&&,<#1 --> lock>,<<$2 --> key> ==> <#1 --> (/,open,$2,_)>>). %1.00;0.81%') + + 'I guess every lock can be opened by every key. + ''outputMustContain('<(&&,<$1 --> key>,<$2 --> lock>) ==> <$2 --> (/,open,$1,_)>>. %1.00;0.45%') + ''' + pass + + + def test_multiple_variables_introduction_1(self): + ''' + 'Multiple variables introduction + + 'Lock-1 can be opened by some key. + (&&,<#x --> key>,<{lock1} --> (/,open,#x,_)>). + + 'Lock-1 is a lock. + <{lock1} --> lock>. + + 17 + + 'There is a key that can open some lock. + ''outputMustContain('(&&,<#1 --> key>,<#2 --> (/,open,#1,_)>,<#2 --> lock>). %1.00;0.81%') + + 'I guess every lock can be opened by some key. + ''outputMustContain('<<$1 --> lock> ==> (&&,<#2 --> key>,<$1 --> (/,open,#2,_)>)>. %1.00;0.45%') + ''' + pass + + + def test_recursion(self): + ''' + 'Recursion + + '0 is a number + <0 --> num>. %1.00;0.90% + 'If n is a number, n+1 is also a number + <<$1 --> num> ==> <(*,$1) --> num>>. %1.00;0.90% + '3 is a number? + <(*,(*,(*,0))) --> num>? + 70000 + 'I guess 3 is a number + ''outputMustContain('<(*,(*,(*,0))) --> num>. %1.00;0.66%') + ''' + pass + + + def test_second_level_variable_unification_0(self): + ''' + 'Second level variable unification + + 'there is a lock which is opened by all keys + (&&,<#1 --> lock>,<<$2 --> key> ==> <#1 --> (/,open,$2,_)>>). %1.00;0.90% + + 'key1 is a key + <{key1} --> key>. %1.00;0.90% + + 5 + + 'there is a lock which is opened by key1 + ''outputMustContain('(&&,<#1 --> (/,open,{key1},_)>,<#1 --> lock>). %1.00;0.81%') + ''' + pass + + + def test_second_level_variable_unification_1(self): + ''' + 'Second level variable unification + + 'all locks are opened by some key + <<$1 --> lock> ==> (&&,<#2 --> key>,<$1 --> (/,open,#2,_)>)>. %1.00;0.90% + + 'key1 is a key + <{key1} --> key>. %1.00;0.90% + + 5 + + 'maybe all locks are opened by key1 + ''outputMustContain('') + //''outputMustContain('<<$1 --> lock> ==> <$1 --> (/,open,{key1},_)>>. %1.00;0.43%') + ''' + pass + + + def test_second_variable_introduction_induction(self): + ''' + 'Second variable introduction (induction) + + 'if something opens lock1, it is a key + < (/,open,$1,_)> ==> <$1 --> key>>. + + 'lock1 is a key + lock>. + + 7 + + 'there is a lock with the property that when opened by something, this something is a key (induction) + ''outputMustContain('<(&&,<#1 --> (/,open,$2,_)>,<#1 --> lock>) ==> <$2 --> key>>. %1.00;0.45%') + ''' + pass + + + def test_variable_elimination_deduction(self): + ''' + 'Second variable introduction (induction) + + 'lock1 is a lock + lock>. %1.00;0.90% + + 'there is a lock with the property that when opened by something, this something is a key + <(&&,<#1 --> lock>,<#1 --> (/,open,$2,_)>) ==> <$2 --> key>>. %1.00;0.90% + + 4 + + 'whatever opens lock1 is a key + ''outputMustContain('< (/,open,$1,_)> ==> <$1 --> key>>. %1.00;0.81%') + ''' + pass + + + def test_abduction_with_variable_elimination_abduction(self): + ''' + 'Abduction with variable elimination (abduction) + + 'whatever opens lock1 is a key + < (/,open,$1,_)> ==> <$1 --> key>>. %1.00;0.90% + + 'there is a lock with the property that when opened by something, this something is a key + <(&&,<#1 --> lock>,<#1 --> (/,open,$2,_)>) ==> <$2 --> key>>. %1.00;0.90% + + 10 + + 'lock1 is a lock + ''outputMustContain(' lock>. %1.00;0.45%') + ''' + pass + + + def test_birdClaimedByBob(self): + ''' + 'from https://code.google.com/archive/p/open-nars/issues/7 + + <(&,<{Tweety} --> bird>, fly>) --> claimedByBob>. + <<(&,<#1 --> $2>,<$3 --> #1>) --> claimedByBob> ==> <<$3 --> $2> --> claimedByBob>>. + + claimedByBob>? + 100 + ''outputMustContain('<<{Tweety} --> fly> --> claimedByBob>. %1.00;0.81%') + ''' + pass + + + def test_can_of_worms(self): + ''' + <0 --> num>. %1.00;0.90% + <0 --> (/,num,_)>. %1.00;0.90% + + 20 + + ''outputMustContain('<<$1 --> num> ==> <$1 --> (/,num,_)>>. %1.00;0.45%') + ''' + pass + + + def test_nlp1(self): + ''' + <(\,REPRESENT,_,CAT) --> cat>. %1.00;0.90% + <(\,(\,REPRESENT,_,<(*,CAT,FISH) --> FOOD>),_,eat,fish) --> cat>. + 5 + ''outputMustContain('<<(\,REPRESENT,_,$1) --> $2> ==> <(\,(\,REPRESENT,_,<(*,$1,FISH) --> FOOD>),_,eat,fish) --> $2>>. %1.00;0.40%') + ''' + pass + + + def test_nlp2(self): + ''' + (/,(/,REPRESENT,_,<(*,CAT,FISH) --> FOOD>),_,eat,fish)>. + CAT>. %1.00;0.90% + 300 + ''outputMustContain('<<$1 --> $2> ==> <$1 --> (/,(/,REPRESENT,_,<(*,$2,FISH) --> FOOD>),_,eat,fish)>>. %1.00;0.40%') + ''' + pass + + + def test_redundant(self): + ''' + < (/,open,$1,_)> ==> <$1 --> key>>. + 100 + ''outputMustNotContain('<(&&, (/,open,$1,_)>,<(*,$1,lock1) --> open>) ==> <$1 --> key>>. %1.00;0.81%') + ''outputMustNotContain('<<(*,$1,lock1) --> open> ==> (/,open,$1,_)>>. %1.00;0.45%') + ''' + pass + + + def test_symmetry(self): + ''' + <(*,a,b) --> like>. %1.00;0.90% + <(*,b,a) --> like>. %1.00;0.90% + <<(*,$1,$2) --> like> <=> <(*,$2,$1) --> like>>? + 20 + ''outputMustContain('<<(*,$1,$2) --> like> <=> <(*,$2,$1) --> like>>. %1.00;0.40%') + ''' + pass + + + def test_uncle(self): + ''' + (/,uncle,_,tom)>. %1.00;0.90% + (/,uncle,tom,_)>. %0.00;0.90% + 10 + ''outputMustContain('<<$1 --> (/,uncle,_,$2)> ==> <$1 --> (/,uncle,$2,_)>>. %0.00;0.40%') + 'would be a strange variable introduction when it would be allowed to use ImageExt and not just looking at PRED> + 'this is a strange example I added.. + ''' + pass + + + def test_unification_a1(self): + ''' + 'Variable unification + + 'If something is a bird, then it is a animal. + <<#x-->A> ==> (&&, <#y-->B>, <#x-->C>)>. %1.00;0.90% + + 'If something is a robin, then it is a bird. + <(&&, <#x-->B>, <#y-->C>) ==> <#x --> D>>. %1.00;0.90% + + 3 + + 'If something is a robin, then it is a animal. + ''outputMustContain('<<#1 --> A> ==> <#2 --> D>>. %1.00;0.81%') + + 'I guess that if something is a animal, then it is a robin. + ''outputMustContain('<<#1 --> D> ==> <#2 --> A>>. %1.00;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<#x-->A> ==> (&&, <#y-->B>, <#x-->C>)>. %1.00;0.90%', + '<(&&, <#x-->B>, <#y-->C>) ==> <#x --> D>>. %1.00;0.90% ', + '(&&, <#y-->B>, <#x-->C>).' + ) + self.assertNotEqual(rules, None) + + subst_var = unification_variable(task.term, belief.term, [1], [0]) # to find possible replacement. + subst_var.apply(task.term, belief.term) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, '<<#1 --> A> ==> <#2 --> D>>. %1.00;0.81%') + ) + self.assertTrue( + output_contains(tasks_derived, '<<#1 --> D> ==> <#2 --> A>>. %1.00;0.45%') + ) + + self.assertTrue( + not output_contains(tasks_derived, '<<$1 --> D> ==> <$1 --> A>>. %1.00;0.45%') + ) + print("") + out_print(PrintType.IN, task.sentence.repr, *task.budget) + out_print(PrintType.IN, belief.sentence.repr, *belief.budget) + for task in tasks_derived: + task: Task + out_print(PrintType.OUT, task.sentence.repr, *task.budget) + pass + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_NAL6 + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) \ No newline at end of file diff --git a/Tests/test_NAL/test_NAL7.py b/Tests/test_NAL/test_NAL7.py new file mode 100644 index 0000000..7b511bd --- /dev/null +++ b/Tests/test_NAL/test_NAL7.py @@ -0,0 +1,649 @@ +import unittest + +from NAL.MetaLevelInference.VariableSubstitution import * +from NARS.InferenceEngine import GeneralEngine +from NARS.RuleMap import RuleMap_v2 + +import Tests.utils_for_test as utils_for_test +from Tests.utils_for_test import * + +# utils_for_test.rule_map = RuleMap_v2() + +class TEST_NAL7(unittest.TestCase): + '''''' + + def test_deduction(self): + ''' + 'Temporal deduction + + 'Someone enter the room_101 after he open the door_101 + <<(*, $x, room_101) --> enter> =\> <(*, $x, door_101) --> open>>. %0.90;0.90% + + 'Someone open the door_101 after he hold the key_101 + <<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.80;0.90% + + 100 + + 'If someone enter room_101, he should hold key_101 before + ''outputMustContain('<<(*,$1,room_101) --> enter> =\> <(*,$1,key_101) --> hold>>. %0.72;0.58%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<(*, $x, room_101) --> enter> =\> <(*, $x, door_101) --> open>>. %0.90;0.90%', + '<<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.80;0.90%', + '<(*, $y, door_101) --> open>.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<<(*,$1,room_101) --> enter> =\> <(*,$1,key_101) --> hold>>. %0.72;0.58%') + ) + pass + + def test_expemplification(self): + ''' + 'Temporal explification + + 'Someone enter the room_101 after he open the door_101 + <<(*, $x, room_101) --> enter> =\> <(*, $x, door_101) --> open>>. %0.90;0.90% + + 'Someone open the door_101 after he hold the key_101 + <<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.80;0.90% + + 100 + + 'If someone enter room_101, he should hold key_101 before + ''outputMustContain('<<(*,$1,key_101) --> hold> =/> <(*,$1,room_101) --> enter>>. %1.00;0.37%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<(*, $x, room_101) --> enter> =\> <(*, $x, door_101) --> open>>. %0.90;0.90%', + '<<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.80;0.90%', + '<(*, $y, door_101) --> open>.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<<(*,$1,key_101) --> hold> =/> <(*,$1,room_101) --> enter>>. %1.00;0.37%') + ) + pass + + def test_induction_0(self): + ''' + 'Temporal induction + + 'Someone open door_101 before he enter room_101 + <<(*, $x, door_101) --> open> =/> <(*, $x, room_101) --> enter>>. %0.90;0.90% + + 'Someone open door_101 after he hold key_101 + <<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.80;0.90% + + 100 + + 'If someone hold key_101, he will enter room_101 + ''outputMustContain('<<(*,$1,key_101) --> hold> =/> <(*,$1,room_101) --> enter>>. %0.90;0.39%') + 'If someone enter room_101, he should hold key_101 before + ''outputMustContain('<<(*,$1,room_101) --> enter> =\> <(*,$1,key_101) --> hold>>. %0.80;0.42%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<(*, $x, door_101) --> open> =/> <(*, $x, room_101) --> enter>>. %0.90;0.90%', + '<<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.80;0.90%', + '<(*, $y, door_101) --> open>.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<<(*,$1,key_101) --> hold> =/> <(*,$1,room_101) --> enter>>. %0.90;0.39%') + ) + self.assertTrue( + output_contains(tasks_derived, '<<(*,$1,room_101) --> enter> =\> <(*,$1,key_101) --> hold>>. %0.80;0.42%') + ) + pass + + def test_induction_1(self): + ''' + 'Temporal induction + + 'Someone open door_101 after he hold key_101 + <<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.80;0.90% + + 'Someone open door_101 before he enter room_101 + <<(*, $x, door_101) --> open> =/> <(*, $x, room_101) --> enter>>. %0.90;0.90% + + 100 + + 'If someone hold key_101, he will enter room_101 + ''outputMustContain('<<(*,$1,key_101) --> hold> =/> <(*,$1,room_101) --> enter>>. %0.90;0.39%') + 'If someone enter room_101, he should hold key_101 before + ''outputMustContain('<<(*,$1,room_101) --> enter> =\> <(*,$1,key_101) --> hold>>. %0.80;0.42%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.80;0.90%', + '<<(*, $x, door_101) --> open> =/> <(*, $x, room_101) --> enter>>. %0.90;0.90%', + '<(*, $y, door_101) --> open>.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<<(*,$1,key_101) --> hold> =/> <(*,$1,room_101) --> enter>>. %0.90;0.39%') + ) + self.assertTrue( + output_contains(tasks_derived, '<<(*,$1,room_101) --> enter> =\> <(*,$1,key_101) --> hold>>. %0.80;0.42%') + ) + pass + + + def test_comparison_0(self): + ''' + 'Temporal comparison + + 'Someone open door_101 before he enter room_101 + <<(*, $x, door_101) --> open> =/> <(*, $x, room_101) --> enter>>. %0.90;0.90% + + 'Someone open door_101 after he hold key_101 + <<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.80;0.90% + + 100 + + 'If someone hold key_101, it means he will enter room_101 + ''outputMustContain('<<(*,$1,key_101) --> hold> <(*,$1,room_101) --> enter>>. %0.73;0.44%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<(*, $x, door_101) --> open> =/> <(*, $x, room_101) --> enter>>. %0.90;0.90%', + '<<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.80;0.90%', + '<(*, $y, door_101) --> open>.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<<(*,$1,key_101) --> hold> <(*,$1,room_101) --> enter>>. %0.73;0.44%') + ) + pass + + def test_comparison_1(self): + ''' + 'Temporal comparison + + 'Someone open door_101 after he hold key_101 + <<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.80;0.90% + + 'Someone open door_101 before he enter room_101 + <<(*, $x, door_101) --> open> =/> <(*, $x, room_101) --> enter>>. %0.90;0.90% + + 100 + + 'If someone hold key_101, it means he will enter room_101 + ''outputMustContain('<<(*,$1,key_101) --> hold> <(*,$1,room_101) --> enter>>. %0.73;0.44%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<(*, $y, door_101) --> open> =\> <(*, $y, key_101) --> hold>>. %0.80;0.90%', + '<<(*, $x, door_101) --> open> =/> <(*, $x, room_101) --> enter>>. %0.90;0.90%', + '<(*, $y, door_101) --> open>.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<<(*,$1,key_101) --> hold> <(*,$1,room_101) --> enter>>. %0.73;0.44%') + ) + pass + + def test_comparison_2(self): + ''' + 'Temporal comparison + + 'Someone open door_101 before he enter room_101 + <<(*, $x, room_101) --> enter> =/> <(*, $x, door_101) --> open>>. %0.90;0.90% + + 'Someone open door_101 after he hold key_101 + <<(*, $y, key_101) --> hold> =\> <(*, $y, door_101) --> open>>. %0.80;0.90% + + 100 + + 'If someone hold key_101, it means he will enter room_101 + ''outputMustContain('<<(*,$1,room_101) --> enter> <(*,$1,key_101) --> hold>>. %0.73;0.44%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<(*, $x, room_101) --> enter> =/> <(*, $x, door_101) --> open>>. %0.90;0.90%', + '<<(*, $y, key_101) --> hold> =\> <(*, $y, door_101) --> open>>. %0.80;0.90%', + '<(*, $y, door_101) --> open>.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<<(*,$1,room_101) --> enter> <(*,$1,key_101) --> hold>>. %0.73;0.44%') + ) + pass + + def test_comparison_3(self): + ''' + 'Temporal comparison + + 'Someone open door_101 after he hold key_101 + <<(*, $y, key_101) --> hold> =\> <(*, $y, door_101) --> open>>. %0.80;0.90% + + 'Someone open door_101 before he enter room_101 + <<(*, $x, room_101) --> enter> =/> <(*, $x, door_101) --> open>>. %0.90;0.90% + + 100 + + 'If someone hold key_101, it means he will enter room_101 + ''outputMustContain('<<(*,$1,room_101) --> enter> <(*,$1,key_101) --> hold>>. %0.73;0.44%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<(*, $y, key_101) --> hold> =\> <(*, $y, door_101) --> open>>. %0.80;0.90%', + '<<(*, $x, room_101) --> enter> =/> <(*, $x, door_101) --> open>>. %0.90;0.90%', + '<(*, $y, door_101) --> open>.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<<(*,$1,room_101) --> enter> <(*,$1,key_101) --> hold>>. %0.73;0.44%') + ) + pass + + + def test_abduction(self): + ''' + 'Temporal abudction + + B>. %0.80;0.90% + B>. %0.90;0.90% + + 100 + + ''outputMustContain(' C>. %0.80;0.42%') + ''outputMustContain(' A>. %0.90;0.39%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' B>. %0.80;0.90%', + ' B>. %0.90;0.90%', + 'B.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' C>. %0.80;0.42%') + ) + self.assertTrue( + output_contains(tasks_derived, ' A>. %0.90;0.39%') + ) + pass + + + def test_inference_on_tense_0(self): + ''' + 'Inference on tense + + 'John hold key_101 before he enter room_101 + <<(*,John,key_101) --> hold> =/> <(*,John,room_101) --> enter>>. %1.00;0.90% + + 'John is holding key_101 now + <(*,John,key_101) --> hold>. :|: %1.00;0.90% + + 20 + + 'John will enter the room_101 + ''outputMustContain('<(*,John,room_101) --> enter>. :!5: %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(*,John,key_101) --> hold>. :|: %1.00;0.90%', + '<<(*,John,key_101) --> hold> =/> <(*,John,room_101) --> enter>>. %1.00;0.90%', + '<(*,John,key_101) --> hold>.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(*,John,room_101) --> enter>. :!5: %1.00;0.81%') + ) + pass + + + def test_inference_on_tense_1(self): + ''' + ' inference on tense + + 'John hold key_101 before he enter room_101 + <<(*,John,key_101) --> hold> =/> <(*,John,room_101) --> enter>>. %1.00;0.90% + + 'John entered room_101 + <(*,John,room_101) --> enter>. :\: %1.00;0.90% + + 3 + + ''outputMustContain('<(*,John,key_101) --> hold>. :!-10: %1.00;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(*,John,room_101) --> enter>. :\: %1.00;0.90%', + '<<(*,John,key_101) --> hold> =/> <(*,John,room_101) --> enter>>. %1.00;0.90%', + '<(*,John,room_101) --> enter>.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(*,John,key_101) --> hold>. :!-10: %1.00;0.45%') + ) + pass + + + def test_inference_on_tense_2(self): + ''' + ' inference on tense + + 'John hold key_101 before he enter room_101 + <<(*,John,key_101) --> hold> =/> <(*,John,room_101) --> enter>>. %1.00;0.90% + + 'John entered room_101 + <(*,John,room_101) --> enter>. :\: %1.00;0.90% + + 3 + + ''outputMustContain('<(*,John,key_101) --> hold>. %1.00;0.30%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<(*,John,key_101) --> hold> =/> <(*,John,room_101) --> enter>>. %1.00;0.90%', + '<(*,John,room_101) --> enter>. :\: %1.00;0.90%', + 'enter.' + ) + tasks_derived = GeneralEngine.inference(task, belief, belief.term, task_link, term_link, rules) + self.assertTrue( + output_contains(tasks_derived, '<(*,John,key_101) --> hold>. %1.00;0.30%') + ) + pass + + def test_induction_on_tense_0_0(self): + ''' + nal7.6.nal + + 'induction on events + + 'John is opening door_101 + (/,open,_,door_101)>. :|: + + 6 + + 'John is entering room_101 + (/,enter,_,room_101)>. :|: + + 20 + + 'If John enter room_101, he should open door_101 before + ''outputMustContain('< (/,enter,_,room_101)> =\> (&/, (/,open,_,door_101)>,+6)>. :!6: %1.00;0.45%') + + 'new: variable introduction also in time: + + 'If someone enter room_101, he should open door_101 before + ''outputMustContain('<<$1 --> (/,enter,_,room_101)> =\> (&/,<$1 --> (/,open,_,door_101)>,+6)>. :!6: %1.00;0.45%') + + 'adjusted +2 to +3 in both conditions + + 10 + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' (/,open,_,door_101)>. :|: ', + ' (/,enter,_,room_101)>. :|: ', + 'John.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '< (/,enter,_,room_101)> =\> (&/, (/,open,_,door_101)>,+6)>. :!6: %1.00;0.45%') + ) + pass + + + def test_induction_on_tense_0_1(self): + ''' + nal7.6.nal + + 'induction on events + + 'John is opening door_101 + (/,open,_,door_101)>. :|: + + 6 + + 'John is entering room_101 + (/,enter,_,room_101)>. :|: + + 20 + + 'If John enter room_101, he should open door_101 before + ''outputMustContain('< (/,enter,_,room_101)> =\> (&/, (/,open,_,door_101)>,+6)>. :!6: %1.00;0.45%') + + 'new: variable introduction also in time: + + 'If someone enter room_101, he should open door_101 before + ''outputMustContain('<<$1 --> (/,enter,_,room_101)> =\> (&/,<$1 --> (/,open,_,door_101)>,+6)>. :!6: %1.00;0.45%') + + 'adjusted +2 to +3 in both conditions + + 10 + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + ' (/,open,_,door_101)>. :|: ', + ' (/,enter,_,room_101)>. :|: ', + 'John.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<<$1 --> (/,enter,_,room_101)> =\> (&/,<$1 --> (/,open,_,door_101)>,+6)>. :!6: %1.00;0.45%') + ) + pass + + + + def test_induction_on_tense_1(self): + ''' + nal7.7.nal + + 'John is holding key_101 now + <(*,John,key_101) --> hold>. :|: %1.00;0.90% + + 6 + + 'If John open door_101, he will enter room_101 + <<(*,John,door_101) --> open> =/> <(*,John,room_101) --> enter>>. :|: %1.00;0.90% + + 20 + + 'If John hold key_101 and open door_101 (after 6 steps), he will enter room_101 + ''outputMustContain('<(&/,<(*,John,key_101) --> hold>,+6,<(*,John,door_101) --> open>) =/> <(*,John,room_101) --> enter>>. :!6: %1.00;0.45%') + 'changed fomr +2 to +4 due to changes in interval calculations + 'this one is working, just throwing exception + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(*,John,key_101) --> hold>. :|: %1.00;0.90% ', + '<<(*,John,door_101) --> open> =/> <(*,John,room_101) --> enter>>. :|: %1.00;0.90%', + 'John.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(&/,<(*,John,key_101) --> hold>,+6,<(*,John,door_101) --> open>) =/> <(*,John,room_101) --> enter>>. :!6: %1.00;0.45%') + ) + pass + + def test_analogy(self): + ''' + nal7.15.nal + + 'If someone open door_101, he will enter room_101 + <<(*, $x, door_101) --> open> =/> <(*, $x, room_101) --> enter>>. %0.95;0.90% + + ' If someone enter room_101, it means he leave corridor_100 + <<(*, $x, room_101) --> enter> <|> <(*, $x, corridor_100) --> leave>>. %1.00;0.90% + + 40 + + 'If someone open door_101, he will leave corridor_100 + ''outputMustContain('<<(*,$1,door_101) --> open> =/> <(*,$1,corridor_100) --> leave>>. %0.95;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<<(*, $x, door_101) --> open> =/> <(*, $x, room_101) --> enter>>. %0.95;0.90%', + '<<(*, $x, room_101) --> enter> <|> <(*, $x, corridor_100) --> leave>>. %1.00;0.90%', + '<(*, $x, room_101) --> enter>.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<<(*,$1,door_101) --> open> =/> <(*,$1,corridor_100) --> leave>>. %0.95;0.81%') + ) + pass + + + def test_deduction_sequence_eliminate_0(self): + ''' + nal7.18.nal + + 'If someone hold key_101, he will enter room_101 (in 100 steps) + <(&/,<(*, John, key_101) --> hold>,+100) =/> <(*, John, room_101) --> enter>>. %1.00;0.90% + + 'John held the key_101 + <(*, John, key_101) --> hold>. :\: %1.00;0.90% + + 210 + + 'John will enter room_101 + ''outputMustContain('<(*,John,room_101) --> enter>. :!100: %1.00;0.81%') + + 'this one is working, but throws an exception + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(*, John, key_101) --> hold>. :\: %1.00;0.90%', + '<(&/,<(*, John, key_101) --> hold>,+100) =/> <(*, John, room_101) --> enter>>. %1.00;0.90%', + 'hold.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(*,John,room_101) --> enter>. :!100: %1.00;0.81%') + ) + pass + + def test_deduction_sequence_eliminate_0_1(self): + ''' + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '(&/,A,+100,B). :\: %1.00;0.90%', + '<(&/,A,+100,B,+100) =/> D>. %1.00;0.90%', + 'B.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, ' hold>,+100) =/> <(*, $x, room_101) --> enter>>. %1.00;0.90% + + 'John held the key_101 + <(*, John, key_101) --> hold>. :\: %1.00;0.90% + + 210 + + 'John will enter room_101 + ''outputMustContain('<(*,John,room_101) --> enter>. :!95: %1.00;0.81%') + + 'this one is working, but throws an exception + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(&/,<(*, $x, key_101) --> hold>,+100) =/> <(*, $x, room_101) --> enter>>. %1.00;0.90%', + '<(*, John, key_101) --> hold>. :\: %1.00;0.90%', + 'hold.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(*,John,room_101) --> enter>. :!95: %1.00;0.81%') + ) + pass + + + def test_abduction_sequence_eliminate_0(self): + ''' + nal7.19.nal + + 'If someone hold key_101, he will enter room_101 (in 100 steps) + <(&/,<(*, John, key_101) --> hold>,+100) =/> <(*, John, room_101) --> enter>>. %1.00;0.90% + + 'John is entering room_101 now + <(*,John,room_101) --> enter>. :|: %1.00;0.90% + + 15 + + 'John held the key_101 (105 steps before) + ''outputMustContain('<(*,John,key_101) --> hold>. :!-105: %1.00;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(*,John,room_101) --> enter>. :|: %1.00;0.90%', + '<(&/,<(*, John, key_101) --> hold>,+100) =/> <(*, John, room_101) --> enter>>. %1.00;0.90%', + '<(*,John,room_101) --> enter>.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(*,John,key_101) --> hold>. :!-105: %1.00;0.45%') + ) + pass + + + def test_abduction_sequence_eliminate_1(self): + ''' + nal7.19.nal + + 'If someone hold key_101, he will enter room_101 (in 100 steps) + <(&/,<(*, $x, key_101) --> hold>,+100) =/> <(*, $x, room_101) --> enter>>. %1.00;0.90% + + 'John is entering room_101 now + <(*,John,room_101) --> enter>. :|: %1.00;0.90% + + 15 + + 'John held the key_101 (105 steps before) + ''outputMustContain('<(*,John,key_101) --> hold>. :!-105: %1.00;0.45%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(*, $x, room_101) --> enter>. :|: %1.00;0.90%', + '<(&/,<(*, $x, key_101) --> hold>,+100) =/> <(*, $x, room_101) --> enter>>. %1.00;0.90%', + '<(*, $x, room_101) --> enter>.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(*,John,key_101) --> hold>. :!-105: %1.00;0.45%') + ) + pass + + + def test_deduction_sequence(self): + ''' + nal7.36.nal + + 'deduction with interval summation + + ' a + 1 = b + <(&/, a, +1) =/> b>. %1.00;0.90% + + ' b + 1 = c + <(&/, b, +1) =/> c>. %1.00;0.90% + + 10 + + ' a + 2 = c + ''outputMustContain('<(&/,a,+2) =/> c>. %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(&/, a, +1) =/> b>. %1.00;0.90%', + '<(&/, b, +1) =/> c>. %1.00;0.90%', + 'b.' + ) + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + self.assertTrue( + output_contains(tasks_derived, '<(&/,a,+2) =/> c>. %1.00;0.81%') + ) + pass + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_NAL7 + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) \ No newline at end of file diff --git a/Tests/test_NAL/test_NAL8.py b/Tests/test_NAL/test_NAL8.py new file mode 100644 index 0000000..59d46a9 --- /dev/null +++ b/Tests/test_NAL/test_NAL8.py @@ -0,0 +1,157 @@ +import unittest + +from NAL.MetaLevelInference.VariableSubstitution import * +from NARS.RuleMap import RuleMap_v2 + +import Tests.utils_for_test as utils_for_test +from Tests.utils_for_test import * + +utils_for_test.rule_map = RuleMap_v2() + + +class TEST_NAL8(unittest.TestCase): + '''''' + def test_0_0(self): + ''' + nal8.1.0.nal + + '********** [01 + 03 -> 10]: + + 'The goal is to make t001 opened. + <{t001} --> [opened]>! %1.00;0.90% + + 'If the robot hold t002, then go to t001 and open t001, then t001 will be opened. + <(&/,<(*,SELF,{t002}) --> hold>,<(*,SELF,{t001}) --> at>,<(*,{t001}) --> ^open>) =/> <{t001} --> [opened]>>. %1.00;0.90% + + 20 + + ''outputMustContain('(&/,<(*,SELF,{t002}) --> hold>,<(*,SELF,{t001}) --> at>,(^open,{t001}))! %1.00;0.81%') + ' working in GUI but not in testcase, maybe the following string needs some escapes? but where? + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<{t001} --> [opened]>! %1.00;0.90%', + '<(&/,<(*,SELF,{t002}) --> hold>,<(*,SELF,{t001}) --> at>,<(*,{t001}) --> ^open>) =/> <{t001} --> [opened]>>. %1.00;0.90%', + '<{t001} --> [opened]>.' + ) + + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, '(&/,<(*,SELF,{t002}) --> hold>,<(*,SELF,{t001}) --> at>,(^open,{t001}))! %1.00;0.81%') + ) + + + def test_0_1(self): + ''' + nal8.1.3.nal + + 'The goal for the robot is to make t002 reachable. + <(*,SELF,{t002}) --> reachable>! %1.00;0.90% + + 'If item 1 is on item 2 and the robot is also at item 2 at the same time, the robot will be able to reach item 1. + <(&|,<(*,{t002},#2) --> on>,<(*,SELF,#2) --> at>)=|><(*,SELF,{t002}) --> reachable>>. %1.00;0.90% + + 10 + + 'The goal is to make the robot at #1 and t002 is on #1 at the same time + ''outputMustContain('(&|,<(*,SELF,#1) --> at>,<(*,{t002},#1) --> on>)! %1.00;0.81%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(*,SELF,{t002}) --> reachable>! %1.00;0.90%', + '<(&|,<(*,{t002},#2) --> on>,<(*,SELF,#2) --> at>)=|><(*,SELF,{t002}) --> reachable>>. %1.00;0.90%', + '<(*,SELF,{t002}) --> reachable>.' + ) + + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, '(&|,<(*,SELF,#1) --> at>,<(*,{t002},#1) --> on>)! %1.00;0.81%') + ) + + + def test_0_1_var(self): + ''' + nal8.1.3.nal + + 'The goal for the robot is to make t002 reachable. + <(*,SELF,{t002}) --> reachable>! %1.00;0.90% + + 'If item 1 is on item 2 and the robot is also at item 2 at the same time, the robot will be able to reach item 1. + <(&|,<(*,$1,#2) --> on>,<(*,SELF,#2) --> at>)=|><(*,SELF,$1) --> reachable>>. %1.00;0.90% + + 10 + + 'The goal is to make the robot at #1 and t002 is on #1 at the same time + ''outputMustContain('(&|,<(*,SELF,#1) --> at>,<(*,{t002},#1) --> on>)! :!0: %1.00;0.73%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(*,SELF,{t002}) --> reachable>! %1.00;0.90%', + '<(&|,<(*,$1,#2) --> on>,<(*,SELF,#2) --> at>)=|><(*,SELF,$1) --> reachable>>. %1.00;0.90%', + '<(*,SELF,{t002}) --> reachable>.' + ) + + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, '(&|,<(*,SELF,#1) --> at>,<(*,{t002},#1) --> on>)! :!0: %1.00;0.73%') + ) + + def test_0_2_var(self): + ''' + nal8.1.4.nal + + '********** [07 + 14 -> 15]: + + 't002 is on t003 now. + <(*,{t002},{t003}) --> on>. :|: + + 'The goal is to make t002 on #1 and #1 is at the robot at same time + (&|,<(*,{t002},{t003}) --> on>,<(*,{t003},SELF) --> at>)! + + 350 + + 'The goal maybe to make t003 at the robot + ''outputMustContain('<(*,{t003},SELF) --> at>! %1.00;0.43%') + ''' + rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + '<(*,{t002},{t003}) --> on>. :|:', + '(&|,<(*,{t002},{t003}) --> on>,<(*,{t003},SELF) --> at>)!', + '<(*,{t002},{t003}) --> on>.' + ) + + tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + + self.assertTrue( + output_contains(tasks_derived, '<(*,{t003},SELF) --> at>! %1.00;0.43%') + ) + + def test_1(self): + ''' + '********** [10 -> 11]: + + 'The goal is to hold t002, then arrive t001 and open t001 + (&/,<(*,SELF,{t002}) --> hold>,<(*,SELF,{t001}) --> at>,(^open,{t001}))! + + 10 + + 'The goal is to hold t002 + ''outputMustContain('<(*,SELF,{t002}) --> hold>! %1.00;0.81%') + ''' + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_NAL8 + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) \ No newline at end of file diff --git a/Tests/test_NAL/test_NAL9.py b/Tests/test_NAL/test_NAL9.py new file mode 100644 index 0000000..fce6a01 --- /dev/null +++ b/Tests/test_NAL/test_NAL9.py @@ -0,0 +1,97 @@ +import unittest + +from NAL.MetaLevelInference.VariableSubstitution import * +from NARS.RuleMap import RuleMap_v2 + +import Tests.utils_for_test as utils_for_test +from Tests.utils_for_test import * +from NAL.MentalOperation import execute + +nars = utils_for_test.nars + +class TEST_NAL9(unittest.TestCase): + '''''' + def test_anticipate_0(self): + ''' + <<(&/, A>,+10) =/> B>>. + 'making it observable: + B>. + 'ok start: + A>. :|: + 10 + ''outputMustContain('(^anticipate,{SELF}, B>). :!0: %1.00;0.90%') + ''' + + + def test_believe_0(self): + ''' + b>. + 'ok, being aware needs attention, so lets ask NARS about it: + b>? + 'ok this concept should now be important enough for it so that NARS now knows + 2 + ''outputMustContain('(^believe,{SELF}, b>,TRUE). :!0: %1.00;0.90%') + ''' + # rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises( + # '<(*,John,key_101) --> hold>. :|: %1.00;0.90%', + # '<<(*,John,key_101) --> hold> =/> <(*,John,room_101) --> enter>>. %1.00;0.90%', + # '<(*,John,key_101) --> hold>.' + # ) + # tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules] + # self.assertTrue( + # output_contains(tasks_derived, '<(*,John,room_101) --> enter>. :|: %1.00;0.81%') + # ) + # pass + + def test_believe_1(self): + ''' + (^believe,{SELF}, animal>,FALSE)! + 10 + ''outputMustContain(' animal>. :!0: %0.00;0.90%') + ''' + + premise1 = '(^believe,{SELF}, animal>,FALSE)!' + + nars.reset() + premise1: Task = Narsese.parse(premise1) + tasks_derived = [execute_one_premise(premise1)] + + self.assertTrue( + output_contains(tasks_derived, ' animal>. :!0: %0.00;0.90%') + ) + pass + + def test_doubt_0(self): + ''' + b>. %1.00;0.90% + 20 + (^doubt,{SELF}, b>)! %1.00;0.90% + 20 + b>? + ''outputMustContain(' b>. %1.00;0.45%') + + ''' + premise1 = '(^doubt,{SELF}, b>)! %1.00;0.90%' + + nars.reset() + premise1: Task = Narsese.parse(premise1) + tasks_derived = [execute_one_premise(premise1)] + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_NAL9 + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) \ No newline at end of file diff --git a/Tests/test_ParseExamples.py b/Tests/test_ParseExamples.py new file mode 100644 index 0000000..461c392 --- /dev/null +++ b/Tests/test_ParseExamples.py @@ -0,0 +1,217 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese + +from utils.Print import out_print, PrintType, print_filename + +examples_path = Path(__file__).parent/'examples' +single_step_path = examples_path/'single_step' +multi_step_path = examples_path/'multi_step' +application_step_path = examples_path/'application' +stability_step_path = examples_path/'stability' + +'''methods''' + + +def parse_file(file: str): + with open(file, 'r') as f: + lines = f.readlines() + + for i, line in enumerate(lines): + i += 1 + line = line.strip(' \n') + if line.startswith("//"): + continue + elif line.startswith("'''expect.outEmpty"): + pass # TODO: check the outputs + continue + elif line.startswith("''"): + if line.startswith("''outputMustContain('"): + line = line[len("''outputMustContain('"):].rstrip("')\n") + if len(line) == 0: continue + try: + content_check = Narsese.parser.parse(line) # TODO: check the outputs + except: + out_print(PrintType.ERROR, f'{file}, line {i}, {line}') + raise + continue + elif line.startswith("'"): + continue + elif line.isdigit(): + n_cycle = int(line) + out_print(PrintType.INFO, f'Run {n_cycle} cycles.') + + else: + line = line.rstrip(' \n') + if len(line) == 0: + continue + # content = Narsese.parser.parse(line) + try: + content = Narsese.parser.parse(line) # TODO: check the outputs + out_print(PrintType.IN, str(content.sentence), *content.budget) + except: + out_print(PrintType.ERROR, f'{file}, line {i}, {line}') + raise + + + +class TEST_Examples_Parse_Single(unittest.TestCase): + '''Examples files in `single_step`.''' + + def __init__(self, methodName: str = ...) -> None: + super().__init__(methodName=methodName) + + '''test''' + def test_single_step_nal1(self): + print('\n') + files = list(single_step_path.glob("nal1*.nal")) + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal1.2.nal') + print_filename(file.name) + parse_file(file) + + # break + + def test_single_step_nal2(self): + print('\n') + files = list(single_step_path.glob("nal2*.nal")) + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal2.8.nal') + print_filename(file.name) + parse_file(file) + # break + def test_single_step_nal3(self): + print('\n') + files = list(single_step_path.glob("nal3*.nal")) + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal2.8.nal') + print_filename(file.name) + parse_file(file) + + def test_single_step_nal4(self): + print('\n') + files = list(single_step_path.glob("nal4*.nal")) + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal2.8.nal') + print_filename(file.name) + parse_file(file) + + def test_single_step_nal5(self): + print('\n') + files = list(single_step_path.glob("nal5*.nal")) + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal5.19.nal') + print_filename(file.name) + parse_file(file) + # break + + def test_single_step_nal6(self): + print('\n') + files = list(single_step_path.glob("nal6*.nal")) + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal6.24.nal') + print_filename(file.name) + parse_file(file) + + def test_single_step_nal7(self): + print('\n') + files = list(single_step_path.glob("nal7*.nal")) + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal2.8.nal') + print_filename(file.name) + parse_file(file) + + def test_single_step_nal8(self): + print('\n') + files = list(single_step_path.glob("nal8*.nal")) + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal2.8.nal') + print_filename(file.name) + parse_file(file) + + def test_single_step_nal9(self): + print('\n') + files = list(single_step_path.glob("nal9*.nal")) + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal2.8.nal') + print_filename(file.name) + parse_file(file) + + def test_single_step_nal9_4(self): + print('\n') + files = list(single_step_path.glob("nal9.4.nal")) + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal2.8.nal') + print_filename(file.name) + parse_file(file) + + def test_others(self): + print('\n') + files = [set(single_step_path.glob(f"nal{i}*.nal")) for i in range(1,10)] + files = set.union(*files) + files = set(single_step_path.glob(f"*.nal")) - files + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal2.8.nal') + print_filename(file.name) + parse_file(file) + +class TEST_Examples_Parse_Multi(unittest.TestCase): + '''Examples files in `multi_step`.''' + + def test_multi_step_nal(self): + print('\n') + files = list(multi_step_path.glob("*.nal")) + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal2.8.nal') + print_filename(file.name) + parse_file(file) + +class TEST_Examples_Parse_Application(unittest.TestCase): + '''Examples files in `application`.''' + + def test_multi_step_nal(self): + print('\n') + files = list(application_step_path.glob("*.nal")) + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal2.8.nal') + print_filename(file.name) + parse_file(file) + +class TEST_Examples_Parse_Stability(unittest.TestCase): + '''Examples files in `application`.''' + + def test_multi_step_nal(self): + print('\n') + files = list(stability_step_path.glob("*.nal")) + for file in files: + # file = Path(r'D:\Codes\py-nars\Tests\examples\single_step\nal2.8.nal') + print_filename(file.name) + parse_file(file) + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_Examples_Parse_Stability, + TEST_Examples_Parse_Single, + TEST_Examples_Parse_Multi, + TEST_Examples_Parse_Application, + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) + + diff --git a/Tests/test_Parser.py b/Tests/test_Parser.py new file mode 100644 index 0000000..787bfe8 --- /dev/null +++ b/Tests/test_Parser.py @@ -0,0 +1,295 @@ + +from Narsese._py.Variable import VarPrefix, Variable +from Narsese._py.Sentence import Sentence, Tense +import Narsese +from Narsese import Budget +from Narsese import Compound, Connector +import unittest +from Narsese import Question, Quest, Judgement, Goal + +from NARS.DataStructures import Bag, Task, Concept +from Narsese import Judgement, Term, Statement, Copula, Truth + +class TEST_Parser(unittest.TestCase): + def __init__(self, methodName: str = ...) -> None: + super().__init__(methodName=methodName) + + def test_truthvalue(self): + line = 'bird>. %1.0; 0.9%' + content: Judgement = Narsese.parser.parse(line).sentence + self.assertEqual(content.truth.f, 1.0) + self.assertEqual(content.truth.c, 0.9) + line = 'bird>. %0.5; 0.4%' + content = Narsese.parser.parse(line).sentence + self.assertEqual(content.truth.f, 0.5) + self.assertEqual(content.truth.c, 0.4) + + def test_first_order_copula_1(self): + line = 'bird>.' + content = Narsese.parser.parse(line).sentence + pass + + def test_higer_order_copula_1(self): + line = '<bird>==>P>.' + content = Narsese.parser.parse(line).sentence + line = '<bird>==>animal>>.' + content = Narsese.parser.parse(line).sentence + pass + + def test_tense(self): + line = 'bird>. :|:' + content: Sentence = Narsese.parser.parse(line).sentence + self.assertEqual(content.tense, Tense.Present) + line = 'bird>. :/:' + content: Sentence = Narsese.parser.parse(line).sentence + self.assertEqual(content.tense, Tense.Future) + line = 'bird>. :\:' + content: Sentence = Narsese.parser.parse(line).sentence + self.assertEqual(content.tense, Tense.Past) + pass + + def test_compound_multi_1(self): + line = '(&&, IsFlyer, IsSwimmer, IsSinger).' + content = Narsese.parser.parse(line).sentence + line = '(||, IsFlyer, IsSwimmer, IsSinger).' + content = Narsese.parser.parse(line).sentence + line = '(&|, IsFlyer, IsSwimmer, IsSinger).' + content = Narsese.parser.parse(line).sentence + line = '(&|, IsFlyer, IsSwimmer, IsSinger).' + content = Narsese.parser.parse(line).sentence + line = '(&/, IsFlyer, IsSwimmer, IsSinger).' + content = Narsese.parser.parse(line).sentence + pass + + def test_compound_multi_2(self): + line = '<(&, flyer, swimmer, singer)-->foo>.' + content = Narsese.parser.parse(line).sentence + line = '<(|, flyer, swimmer, singer)-->foo>.' + content = Narsese.parser.parse(line).sentence + pass + + def test_compound_multi_3(self): + line = '(IsFlyer || IsSwimmer && IsSinger).' + # (||, IsFlyer, (&&, IsSwimmer, IsSinger)). + content: Judgement = Narsese.parser.parse(line).sentence + cmpd: Compound = content.term + self.assertEqual(cmpd.connector, Connector.Disjunction) + self.assertTrue(cmpd[0].word == 'IsFlyer') + cmpd1: Compound = cmpd[1] + self.assertEqual(cmpd1.connector, Connector.Conjunction) + self.assertTrue(cmpd1[0].word == 'IsSwimmer') + self.assertTrue(cmpd1[1].word == 'IsSinger') + + line = '(IsFlyer && IsSwimmer || IsSinger).' + content = Narsese.parser.parse(line).sentence + # (||, (&&, IsFlyer, IsSwimmer), IsSinger). + content: Judgement = Narsese.parser.parse(line).sentence + cmpd: Compound = content.term + self.assertEqual(cmpd.connector, Connector.Disjunction) + self.assertTrue(cmpd[1].word == 'IsSinger') + cmpd1: Compound = cmpd[0] + self.assertIsInstance(cmpd1, Compound) + self.assertEqual(cmpd1.connector, Connector.Conjunction) + self.assertTrue(cmpd1[0].word == 'IsFlyer') + self.assertTrue(cmpd1[1].word == 'IsSwimmer') + pass + + def test_compound_multi_4(self): + line = '<(*, acid, base)-->neutralization>.' + content = Narsese.parser.parse(line).sentence + line = '<(acid * base)-->neutralization>.' + content = Narsese.parser.parse(line).sentence + line = '<(acid, base)-->neutralization>.' + content = Narsese.parser.parse(line).sentence + pass + + def test_compound_multi_5(self): + line = '<(A*B*C*D)-->R>.' + content = Narsese.parser.parse(line).sentence + line = '<(A&B&C&D)-->R>.' + content = Narsese.parser.parse(line).sentence + line = '<(A|B|C|D)-->R>.' + content = Narsese.parser.parse(line).sentence + pass + + def test_compound_multi_6(self): + line = '<(A * B|C * D)-->R>.' + content: Judgement = Narsese.parser.parse(line).sentence + # <(*, A, (|, B, C), D)-->R>. + stat: Statement = content.term + cmpd: Compound = stat.subject + self.assertIsInstance(cmpd, Compound) + self.assertEqual(cmpd.connector, Connector.Product) + self.assertEqual(cmpd.count(), 3) + cmpd1: Compound = cmpd[0] + cmpd2: Compound = cmpd[1] + cmpd3: Compound = cmpd[2] + self.assertIsInstance(cmpd1, Term) + self.assertIsInstance(cmpd2, Compound) + self.assertIsInstance(cmpd3, Term) + self.assertEqual(cmpd2.connector, Connector.IntensionalIntersection) + + line = '<(A&B | C&D)-->R>.' + content = Narsese.parser.parse(line).sentence + # <(|, (&, A, B), (&, C, D))-->R>. + stat: Statement = content.term + cmpd: Compound = stat.subject + self.assertIsInstance(cmpd, Compound) + self.assertEqual(cmpd.connector, Connector.IntensionalIntersection) + self.assertEqual(cmpd.count(), 2) + cmpd1: Compound = cmpd[0] + cmpd2: Compound = cmpd[1] + self.assertIsInstance(cmpd1, Compound) + self.assertIsInstance(cmpd2, Compound) + self.assertEqual(cmpd1.connector, Connector.ExtensionalIntersection) + self.assertEqual(cmpd2.connector, Connector.ExtensionalIntersection) + + line = '<(A*B&C*D|E*F&G*H)-->R>.' + content = Narsese.parser.parse(line).sentence + # <(*, A, (&, B, C), (|, D, E), (&, F, G), H)-->R>. + stat: Statement = content.term + cmpd: Compound = stat.subject + self.assertIsInstance(cmpd, Compound) + self.assertEqual(cmpd.connector, Connector.Product) + self.assertEqual(cmpd.count(), 5) + cmpd1: Compound = cmpd[0] + cmpd2: Compound = cmpd[1] + cmpd3: Compound = cmpd[2] + cmpd4: Compound = cmpd[3] + cmpd5: Compound = cmpd[4] + self.assertIsInstance(cmpd1, Term) + self.assertIsInstance(cmpd2, Compound) + self.assertIsInstance(cmpd3, Compound) + self.assertIsInstance(cmpd4, Compound) + self.assertIsInstance(cmpd5, Term) + self.assertEqual(cmpd2.connector, Connector.ExtensionalIntersection) + self.assertEqual(cmpd3.connector, Connector.IntensionalIntersection) + self.assertEqual(cmpd4.connector, Connector.ExtensionalIntersection) + + + pass + + def test_compound_negation(self): + line = '(--, A).' + content: Judgement = Narsese.parser.parse(line).sentence + cmpd: Compound = content.term + self.assertIsInstance(cmpd, Compound) + self.assertEqual(len(cmpd), 1) + self.assertEqual(cmpd.connector, Connector.Negation) + + line = '-- A.' + content = Narsese.parser.parse(line).sentence + cmpd: Compound = content.term + self.assertIsInstance(cmpd, Compound) + self.assertEqual(len(cmpd), 1) + self.assertEqual(cmpd.connector, Connector.Negation) + pass + + def test_compound_single_1(self): + line = '(A-B).' + content: Judgement = Narsese.parser.parse(line).sentence + cmpd: Compound = content.term + self.assertIsInstance(cmpd, Compound) + self.assertEqual(len(cmpd), 2) + self.assertEqual(cmpd.connector, Connector.ExtensionalDifference) + + line = '(A~B).' + content: Judgement = Narsese.parser.parse(line).sentence + cmpd: Compound = content.term + self.assertIsInstance(cmpd, Compound) + self.assertEqual(len(cmpd), 2) + self.assertEqual(cmpd.connector, Connector.IntensionalDifference) + + def test_compound_single_2(self): + line = '(A*B-C*D).' + content: Judgement = Narsese.parser.parse(line).sentence + cmpd: Compound = content.term + self.assertIsInstance(cmpd, Compound) + self.assertEqual(cmpd.count(), 2) + self.assertEqual(cmpd.connector, Connector.ExtensionalDifference) + + def test_question_1(self): + line = 'B>?' + content: Question = Narsese.parser.parse(line).sentence + pass + + def test_quest_1(self): + line = 'B>@' + content: Quest = Narsese.parser.parse(line).sentence + pass + + def test_goal_1(self): + line = 'B>!' + content: Goal = Narsese.parser.parse(line).sentence + line = 'B>! :/:' + content: Goal = Narsese.parser.parse(line).sentence + line = 'B>! %0.9;0.9%' + content: Goal = Narsese.parser.parse(line).sentence + line = 'B>! :/: %0.9;0.9%' + content: Goal = Narsese.parser.parse(line).sentence + pass + + def test_variable_1(self): + line = '<$x-->A>.' + content: Judgement = Narsese.parser.parse(line).sentence + stat: Statement = content.term + self.assertIsInstance(stat.subject, Variable) + self.assertEqual(stat.subject.prefix, VarPrefix.Independent) + line = '<#x-->A>.' + content: Judgement = Narsese.parser.parse(line).sentence + stat: Statement = content.term + self.assertIsInstance(stat.subject, Variable) + self.assertEqual(stat.subject.prefix, VarPrefix.Dependent) + + line = ' (&,[red],apple)>?' + content: Judgement = Narsese.parser.parse(line).sentence + stat: Statement = content.term + self.assertIsInstance(stat.subject, Variable) + self.assertEqual(stat.subject.prefix, VarPrefix.Query) + pass + + def test_budget_1(self): + line = r'$0.90;0.90$ bird>.' + content = Narsese.parser.parse(line).sentence + line = '$0.90;0.90$ (&&, swimmer>, [flying]>). %0.9%' + content = Narsese.parser.parse(line).sentence + pass + + def test_error_cases_1(self): + line = '<(&&, [flying]>, [with_wings]>) ==> bird>>.' + content = Narsese.parser.parse(line) + line = '<<$1 --> num> ==> <(*,$1) --> num>>. %1.00;0.90%' + content = Narsese.parser.parse(line) + line = '(&/,<(*,SELF,{t002}) --> hold>,<(*,SELF,{t001}) --> at>,(^open,{t001}))!' + line = '(^open,{t001})!' + content = Narsese.parser.parse(line) + line = ' ^open>.' + content = Narsese.parser.parse(line) + line = '<(&/,<(*,SELF,{t002}) --> hold>,<(*,SELF,{t001}) --> at>,<(*,{t001}) --> ^open>) =/> <{t001} --> [opened]>>.' + content = Narsese.parser.parse(line) + + line = 'open(aa).' + content = Narsese.parser.parse(line) + + line = '<<$1 --> (/,livingIn,_,{graz})> ==> <$1 --> murder>>.' + content = Narsese.parser.parse(line) + + + pass + + def test_chinese(self): + line = '<(&, 会飞, 会游泳) <-> 会飞且会游泳>.' + content = Narsese.parser.parse(line).sentence + line = '<(会飞&会游泳) <-> 会飞且会游泳>.' + content = Narsese.parser.parse(line).sentence + pass + + # def test_list(self): + # line = '(#,a,b,c,d).' + # content = Narsese.parser.parse(line).sentence + # pass + +if __name__ == '__main__': + unittest.main() + + diff --git a/Tests/test_RuleMap/__init__.py b/Tests/test_RuleMap/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Tests/test_RuleMap/_trash/test_RuleLUT.py b/Tests/test_RuleMap/_trash/test_RuleLUT.py new file mode 100644 index 0000000..90eb189 --- /dev/null +++ b/Tests/test_RuleMap/_trash/test_RuleLUT.py @@ -0,0 +1,105 @@ +from utils.RuleLUT import RuleLUT, Any + +from NARS.DataStructures._py.Link import LinkType +from NARS.RuleMap import Interface_SyllogisticRules, RuleMap_v2 +from Narsese import Budget +import unittest + +from NARS.DataStructures import Bag, Task, Concept +from Narsese import Judgement, Term, Statement, Copula, Truth + +from utils.SparseLUT import SparseLUT + +from NARS.RuleMap.RuleMap_v2 import CommonId + +class TEST_RuleLUT(unittest.TestCase): + + def test_0(self): + ''' + (3,3,3) + [ + ([0, 1, 2], "r1"), + ([0, 1, 1], "r2"), + ([Any, 0, 1], "r3"), + ([2, [0,1], [1,2]], "r4"), + ([2, [1,2], [2,3]], "r5"), + ] + [ + [0, 1, 2], + [0, 1, 1], + [[0,1,2], 0, 1], + [2, [0,1], [1,2]], + [2, [1,2], [2,3]], + ] + r1 + [0, 1, 2], + r2 + [0, 1, 1], + r3 + [0, 0, 1], + [1, 0, 1], + [2, 0, 1], + r4 + [2, 0, 1], + [2, 0, 2], + [2, 1, 1], + [2, 1, 2], + r5 + [2, 1, 2], + [2, 1, 3], + [2, 2, 2], + [2, 2, 3], + + + [0, 1, 2], r1 + [0, 1, 1], r2 + [0, 0, 1], r3 + [1, 0, 1], r3 + [2, 0, 1], r3, r4 + [2, 0, 2], r4 + [2, 1, 1], r4 + [2, 1, 2], r4, r5 + [2, 1, 3], r5 + [2, 2, 2], r5 + [2, 2, 3], r5 + ] + ''' + lut = RuleLUT((3, 3, 4)) + lut.add("r1", [0, 1, 2]) + lut.add("r2", [0, 1, 1]) + lut.add("r3", [Any, 0, 1]) + lut.add("r4", [2, [0,1], [1,2]]) + lut.add("r5", [2, [0,2], [2,3]]) + lut.build() + + # self.assertEqual(lut[0, 1, 2], ["r1", "r4", "r5"]) + # self.assertEqual(lut[0, 1, 1], ["r1", "r4", "r5"]) + # self.assertEqual(lut[0, 0, 1], ["r1", "r4", "r5"]) + # self.assertEqual(lut[2, 0, 1], ["r1", "r4", "r5"]) + # self.assertEqual(lut[2, 0, 2], ["r1", "r4", "r5"]) + # self.assertEqual(lut[2, 1, 1], ["r1", "r4", "r5"]) + # self.assertEqual(lut[2, 1, 2], ["r1", "r4", "r5"]) + # self.assertEqual(lut[2, 1, 3], ["r1", "r4", "r5"]) + # self.assertEqual(lut[2, 2, 2], ["r1", "r4", "r5"]) + # self.assertEqual(lut[2, 2, 3], ["r1", "r4", "r5"]) + pass + + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_RuleLUT + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_RuleMap/test_RuleMap.py b/Tests/test_RuleMap/test_RuleMap.py new file mode 100644 index 0000000..e47e9de --- /dev/null +++ b/Tests/test_RuleMap/test_RuleMap.py @@ -0,0 +1,22 @@ +from Narsese import Budget +import unittest + +from NARS.DataStructures import Bag, Task, Concept +from Narsese import Judgement, Term, Statement, Copula, Truth + +from NARS.RuleMap import RuleMap_v1 + +class TEST_RuleMap(unittest.TestCase): + def __init__(self, methodName: str = ...) -> None: + super().__init__(methodName=methodName) + self.rule_map = RuleMap_v1() + + def test_1(self): + rule_map = self.rule_map + + + +if __name__ == '__main__': + unittest.main() + + diff --git a/Tests/test_RuleMap/test_RuleMap_v2.py b/Tests/test_RuleMap/test_RuleMap_v2.py new file mode 100644 index 0000000..f96176f --- /dev/null +++ b/Tests/test_RuleMap/test_RuleMap_v2.py @@ -0,0 +1,26 @@ +from Narsese import Budget +import unittest + +from NARS.DataStructures import Bag, Task +from Narsese import Judgement, Term, Statement, Copula, Truth +import Narsese +from NARS.DataStructures import Concept +from Narsese import Copula +from NARS.RuleMap import RuleMap_v2 +from NARS.RuleMap.RuleMap_v2 import CommonId +from NARS.DataStructures import LinkType + +class TEST_RuleMap_v2(unittest.TestCase): + def __init__(self, methodName: str = ...) -> None: + super().__init__(methodName=methodName) + + def test_0(self): + rulemap = RuleMap_v2(False) + rulemap.build(False) + rulemap.draw(show_labels=False) + pass + + + +if __name__ == '__main__': + unittest.main() diff --git a/Tests/test_RuleMap/test_sparse_lut.py b/Tests/test_RuleMap/test_sparse_lut.py new file mode 100644 index 0000000..506deb5 --- /dev/null +++ b/Tests/test_RuleMap/test_sparse_lut.py @@ -0,0 +1,359 @@ +from networkx.generators.random_graphs import fast_gnp_random_graph +from NARS.DataStructures._py.Link import LinkType +from NARS.RuleMap import Interface_SyllogisticRules, RuleMap_v2 +from Narsese import Budget +import unittest + +from NARS.DataStructures import Bag, Task, Concept +from Narsese import Judgement, Term, Statement, Copula, Truth + +from utils.SparseLUT import SparseLUT, Any +from utils.tools import get_size +from NARS.RuleMap.RuleMap_v2 import CommonId +from utils.SparseLUT.sparse_lut_v3.branch_list import Node + +class TEST_SparseLUT(unittest.TestCase): + + def test_new_0(self): + lut = SparseLUT((3, 3, 3)) + lut.add([0, Any, Any], "A") + lut.add([Any, 0, 1], "B") + lut.build(False) + lut.blist.draw() + pass + + def test_0_0(self): + lut = SparseLUT((10, 20)) + lut[1, 1] = "foo" + lut.build() + self.assertEqual(lut[1, 1], ["foo"]) + self.assertEqual(lut[2, 2], None) + pass + + def test_0_1(self): + lut = SparseLUT((3, 3, 3)) + lut.add([0, Any, Any], "A") + lut.add([Any, 0, 1], "B") + lut.build(False) + lut.draw() + self.assertEqual(lut[0, 2, 2], ["A"]) + self.assertEqual(lut[2, 0 ,1], ["B"]) + self.assertEqual(lut[0, 0 ,1], ["A", "B"]) + pass + + def test_2(self): + # TODO: the SparseLUT seems still improvable. This case shows why. + # [0, 0, ...] and [0, 1, ...] can share the rest part with [[1,2,None], 0, ...] and [[1,2,None], 1, ...] correspondingly. + lut = SparseLUT((3, 3, 3, 3, 3)) + lut.add([Any, 0, Any, Any, Any], "A") + lut.add([Any, 1, Any, Any, Any], "B") + lut.add([0, 2, Any, Any, Any], "C") + lut.build(False) + lut.draw() + pass + + def test_3(self): + lut = SparseLUT((3, 3, 3, 4, 3, 3, 3)) + lut.add([[0,1,2],0,[0,1,2],[0,1,2],[0,1,2],[1],[0,1,2]], "A") + lut.add([[0,1,2],0,[0,1,2],[0,1,2],[0,1,2],[0,1,2],[0,1,2]], "B") + lut.add([[0,1,2],1,[0,1,2],[0,1,2],[0,1,2],[0,1,2],[0,1,2]], "C") + # lut.add([0,2,[0,1,2],[0,1,2],[0,1,2],[0,1,2],[0,1,2]], "D") + # lut.add([[1,2],0,[0,1,2],[1,3],[0,1,2],2,[0,1,2]], "E") + lut.build(False) + lut.draw() + pass + + def test_4(self): + lut = SparseLUT((3, 3, 3)) + lut.add([[0,1,2],[0,1],[0,1,2]], "A") + lut.add([[0,1,2],[1,2],[0,1,2]], "B") + lut.add([[0],[2],[0,1,2]], "C") + lut.build(False) + lut.draw() + pass + + def test_6(self): + lut = SparseLUT((3, 3)) + lut.add([[0,1,2],[0,1]], "A") + lut.add([[0],[1,2]], "B") + lut.build(False) + lut.draw() + pass + + def test_7(self): + lut = SparseLUT((3,3,3)) + lut.add([[0,1,2],0,[0,1]], "A") + lut.add([[0,1,2],1,[1,2]], "B") + lut.add([[0,1,2],[0,1,2],[0,1,2]], "C") + lut.build(False) + lut.draw() + pass + + + def test_8(self): + ''' + [[0,1,2],0,1,[0,1,2]] + [[0,1,2],0,[0,1,2],[0,1,2]] + [[0,1,2],1,[0,1,2],[0,1,2]] + [0,2,[0,1,2],[0,1,2]] + + [[1,2],0,[0,2],[0,1,2]] + ''' + lut = SparseLUT((3, 3, 3, 3)) + lut.add([[0,1,2],0,1,[0,1,2]], "A") + lut.add([[0,1,2],0,[0,1,2],[0,1,2]], "B") + lut.add([[0,1,2],1,[0,1,2],[0,1,2]], "C") + lut.add([0,2,[0,1,2],[0,1,2]], "D") + lut.add([[1,2],0,[0,2],[0,1,2]], "E") + lut.build(False) + lut.draw() + pass + + + def test_9(self): + lut = SparseLUT((3, 3, 3)) + lut.add([[0,1,2], [0,1], [0,1,2]], "A") + lut.add([0,[1], [0,1,2]], "B") + # lut.add([[1,2],0,[0,2],[0,1,2]], "E") + lut.build(False) + lut.draw() + pass + + def test_9_1(self): + lut = SparseLUT((3, 3, 3, 3)) + lut.add([[0,1,2], [0,1], [0,1,2], [0,1,2]], "A") + lut.add([0, 1, 0, [0,1,2]], "B") + # lut.add([[1,2],0,[0,2],[0,1,2]], "E") + lut.build(False) + lut.draw() + pass + + def test_9_2(self): + lut = SparseLUT((3, 3, 3, 3)) + lut.add([[0,1,2], [0,1], [0,1,2], [0,1,2]], "A") + lut.add([0, 1, 0, 0], "B") + # lut.add([[1,2],0,[0,2],[0,1,2]], "E") + lut.build(False) + lut.draw() + pass + + + def test_10(self): + lut = SparseLUT((3, 3, 3, 3, 3)) + lut.add([[0,1,2], 0, 0, 0, [0,1,2]], "A") + lut.add([0, 0, 0, 0, 0], "B") + lut.build(False) + lut.draw() + pass + + def test_11(self): + lut = SparseLUT((3,)) + lut.add([[0,1]], "A") + lut.add([[0,2]], "B") + lut.build(False) + lut.draw() + pass + + def test_12(self): + lut = SparseLUT((3, 3, 3, 3, 3)) + lut.add([[0,1,2], 0, 0, 0, [0,1,2]], "A") + lut.add([0, 0, 0, 0, 0], "B") + # lut.add([[1,2],0,[0,2],[0,1,2]], "E") + lut.build(False) + lut.draw() + pass + + def test_13(self): + lut = SparseLUT((3, 3, 3, 3, 3)) + lut.add([[0,1,2], [0,1], [0,1,2], [0,1,2], [0,1,2]], "A") + lut.add([0, 1, [0,1,2], [0,1,2], [0,1,2]], "B") + # lut.add([[1,2],0,[0,2],[0,1,2]], "E") + lut.build(False) + lut.draw() + pass + + def test_14_0(self): + lut = SparseLUT((3, 3, 3)) + lut.add([[0,1,2], [0,1,2], [0,1,2]], "A") + lut.add([[0,1,2], [1,2], [0,1,2]], "B") + lut.build(False) + lut.draw() + pass + + def test_14_1(self): + lut = SparseLUT((3, 3, 3, 3)) + lut.add([[0,1,2], [0,1,2], [0,1,2], [0,1,2]], "A") + lut.add([[0,1,2], [1,2], [1], [0,1,2]], "B") + lut.build(False) + lut.draw() + pass + + def test_14_1_2(self): + lut = SparseLUT((3, 3, 3)) + lut.add([[0,1,2], [0,1,2], [0,1,2]], "A") + lut.add([[0,1,2], [1,2], [1]], "B") + lut.build(False) + lut.draw() + pass + + def test_14_2(self): + lut = SparseLUT((3, 3, 3, 3, 3, 3)) + lut.add([[0,1,2], [0,1,2], [0,1,2], [0,1,2], [0,1,2], [0,1,2]], "A") + lut.add([[0,1,2], [1,2], [1], [1,2], [0,1,2], [0,1,2]], "B") + lut.build(False) + lut.draw() + pass + + def test_15(self): + lut = SparseLUT((3, 3, 3, 3, 3, 3)) + lut.add([[0,1,2], [0,1,2], [0,1,2], [0,1,2], [0,1,2], [0,1,2]], "A") + lut.add([[0,1,2], [1,2], [0], [0,1,2], [0,1,2], [0,1,2]], "B") + lut.build(False) + lut.draw() + pass + + def test_15_1(self): + lut = SparseLUT((3, 3, 3, 3, 3, 3)) + lut.add([[0,1,2], [0,1,2], [0,1,2], [0,1,2], [0,1,2], [0,1,2]], "A") + lut.add([[0,1,2], [1,2], [0], [0,1,2], [0,1,2], [0,1,2]], "B") + lut.add([[0,1,2], [1,2], [1,2], [0,1,2], [0,1,2], [0,1,2]], "C") + lut.build(False) + lut.draw() + pass + + def test_15_2(self): + lut = SparseLUT((3, 3, 3, 3, 3, 3)) + lut.add([[0,1,2], [0,1,2], [0,1,2], [0,1,2], [0,1,2], [0,1,2]], "A") + lut.add([[0,1,2], [1,2], [0], [0,1,2], [0,1,2], [0,1,2]], "B") + lut.add([[0,1,2], [1,2], [1,2], [1,2], [0,1,2], [0,1,2]], "C") + lut.build(False) + lut.draw() + pass + + def test_15_3(self): + lut = SparseLUT((3, 3, 3, 3, 3)) + lut.add([[0,1,2], [0,1,2], [0,1,2], [0,1,2], [0,1,2]], "A") + lut.add([[1,2], [0], [0,1,2], [0,1,2], [0,1,2]], "B") + lut.add([[1,2], [1], [0,1,2], [0,1,2], [0,1,2]], "C") + lut.build(False) + lut.draw() + pass + + + # def test_complex_0(self): + # '''''' + # lut = SparseLUT([8, 8, 2, 2, 2, 2, 2, 2, 2, 12, 12, 2, 4, 15, 15, 4]) + # lut.add((4, 4, [1, 0], None, None, None, None, None, None, 0, 0, 0, [2, 1], None, None, None), "foo") + # lut.build() + + # def test_complex_1(self): + # '''''' + # lut = SparseLUT([8, 8, 2, 2, 2, 2, 2, 2, 2, 12, 12, 2, 4, 15, 15, 4]) + # lut.add((4, 4, 1, None, None, None, None, None, None, 0, 0, 0, 1, None, None, None), "foo1") + # lut.add((4, 4, 1, None, None, None, None, None, None, 0, 0, 0, 2, None, None, None), "foo2") + # lut.add((4, 4, 1, None, None, None, None, None, None, 0, 0, 0, 1, None, None, None), "foo3") + # lut.add((4, 4, 1, None, None, None, None, None, None, 0, 0, 0, 2, None, None, None), "foo4") + # lut.add((4, 4, 1, None, None, None, None, None, None, 0, 0, 0, 0, None, None, None), "foo5") + # lut.add((4, 4, 1, None, None, None, None, None, None, 0, 0, 0, 0, None, None, None), "foo6") + # lut.add((4, 4, 1, None, None, None, None, None, None, 0, 0, 0, 3, None, None, None), "foo7") + # lut.add((4, 4, 1, None, None, None, None, None, None, 0, 0, 0, 3, None, None, None), "foo8") + # lut.add((4, 4, 1, None, None, None, None, None, None, 0, 0, 1, None, None, None, None), "foo9") + # lut.add((4, 4, 1, None, None, None, None, None, None, 0, 0, 0, 0, None, None, [0]), "foo10") + # lut.add((4, 4, 1, None, None, None, None, None, None, 0, 0, 0, 0, None, None, [0]), "foo11") + # lut.add((4, 4, 1, None, None, None, None, None, None, 0, 0, 0, 3, None, None, [0]), "foo12") + # lut.add((4, 4, 1, None, None, None, None, None, None, 0, 0, 0, 3, None, None, [0]), "foo13") + # lut.add((4, 4, 1, None, None, None, 1, 0, 1, 0, 0, 0, 0, 5, 5, [0]), "foo14") + # lut.add((4, 4, 1, None, None, None, 1, 1, 0, 0, 0, 0, 0, 5, 5, [0]), "foo15") + # lut.build(False) + # # lut.print(False) + # lut[(4, 4, 1, None, None, None, 1, 0, 1, 0, 0, 0, 0, 5, 5, 0)] + # lut[(4, 4, 1, None, None, None, 1, 1, 0, 0, 0, 0, 0, 5, 5, 0)] + # pass + + # def test_rulemap_0(self): + # rulemap = RuleMap_v2(False) + + # indices1 = rulemap._add_rule(Interface_SyllogisticRules._syllogistic__deduction__0_1, + # LinkType1 = LinkType.COMPOUND_STATEMENT, + # LinkType2 = LinkType.COMPOUND_STATEMENT, + # has_common_id = True, + # Copula1 = Copula.Inheritance, + # Copula2 = Copula.Inheritance, + # match_reverse = False, + # common_id = CommonId(0, 1) + # ) + + # indices2 = rulemap._add_rule(Interface_SyllogisticRules._syllogistic__deduction__1_0, + # LinkType1 = LinkType.COMPOUND_STATEMENT, + # LinkType2 = LinkType.COMPOUND_STATEMENT, + # has_common_id = [True, False], + # Copula1 = Copula.Inheritance, + # Copula2 = Copula.Inheritance, + # match_reverse = False, + # common_id = [CommonId(1, 0), CommonId(0, 1)] + # ) + # rulemap.build(False) + # rulemap.map.draw() + # pass + + +class TEST_SparseLUT_v3(unittest.TestCase): + + def test_node_0(self): + node1 = Node({1,2,3}) + node2 = Node({4,5,6}) + node3 = Node({1,2,3}) + node1.append(node2) + node3.append(node2) + self.assertEqual(len(node2.last_nodes_list), 2) + node2.remove_last(node1) + self.assertEqual(len(node2.last_nodes_list), 0) + pass + + def test_duplicate_shallow(self): + node1 = Node({0,1,2}) + node1.append(Node({3})) + + Node({0,2}).append(node1) + Node({1}).append(node1) + node2 = node1.duplicate_shallow({1,2}) + node1.reset_index({0}) + + self.assertEqual(node1.last_nodes_list, node2.last_nodes_list) + self.assertEqual(node1.next_nodes_list, node2.next_nodes_list) + pass + + + def test_pop_identical(self): + node1 = Node({1,2,3}) + node2 = Node({4,5,6}) + node3 = Node({1,2,3}) + node1.append(node2) + node3.append(node2) + node2.remove_last(node1, True) + self.assertEqual(len(node2.last_nodes_list), 1) + self.assertEqual(node2.last_nodes_list[0], node3) + self.assertEqual(id(node2.last_nodes_list[0]), id(node3)) + node2.remove_last(node3, True) + self.assertEqual(len(node2.last_nodes_list), 0) + + pass + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_SparseLUT, + # TEST_SparseLUT_v3, + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_Table.py b/Tests/test_Table.py new file mode 100644 index 0000000..ddc00c4 --- /dev/null +++ b/Tests/test_Table.py @@ -0,0 +1,58 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Table +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector + + +class TEST_Table(unittest.TestCase): + + def test_index(self): + '''''' + table = Table(100) + + line = 'bird>. %0.5;0.5%' + task = Narsese.parser.parse(line) + table.add(task, task.sentence.truth.e) + task1 = task + + line = 'bird>. %0.7;0.7%' + task = Narsese.parser.parse(line) + table.add(task, task.sentence.truth.e) + + line = 'bird>. %0.9;0.9%' + task = Narsese.parser.parse(line) + table.add(task, task.sentence.truth.e) + task2 = task + + line = 'bird>. %0.9;0.9%' + task = Narsese.parser.parse(line) + table.add(task, task.sentence.truth.e) + + self.assertEqual(table.first(), task2) + self.assertEqual(table.last(), task1) + pass + + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_Table + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_color.py b/Tests/test_color.py new file mode 100644 index 0000000..f76a5d9 --- /dev/null +++ b/Tests/test_color.py @@ -0,0 +1,18 @@ +import tqdm +from sty import fg, bg, ef, rs + +foo = fg.red + 'This is red text!' + fg.rs +bar = bg.blue + 'This has a blue background!' + bg.rs +baz = ef.italic + 'This is italic text' + rs.italic +qux = fg(201) + 'This is pink text using 8bit colors' + fg.rs +qui = bg(255, 10, 10) + 'This is red text using 24bit colors.' + bg.rs + +# Add custom colors: + +from sty import Style, RgbFg + +fg.orange = Style(RgbFg(255, 150, 50)) + +buf = fg.orange + 'Yay, Im orange.' + fg.rs + +print(foo, bar, baz, qux, qui, buf, sep='\n') \ No newline at end of file diff --git a/Tests/test_extract_feature.py b/Tests/test_extract_feature.py new file mode 100644 index 0000000..60a8581 --- /dev/null +++ b/Tests/test_extract_feature.py @@ -0,0 +1,95 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Table +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector + +from NARS.RuleMap.RuleMap_v2 import extract_feature + +class TEST_ExtractFeature(unittest.TestCase): + + def test_statement_statement_0(self): + '''''' + # P>, S> + premise1 = Narsese.parse("P>.").term + premise2 = Narsese.parse("S>.").term + feature = extract_feature(premise1, premise2) + self.assertTrue(feature.match_reverse) + pass + + + def test_statement_statement_1(self): + '''''' + # S>, P> + premise1 = Narsese.parse("S>.").term + premise2 = Narsese.parse("P>.").term + feature = extract_feature(premise1, premise2) + self.assertFalse(feature.match_reverse) + self.assertTrue(feature.has_common_id) + self.assertEqual(feature.common_id_task, 0) + self.assertEqual(feature.common_id_belief, 0) + + # P>, M> + premise1 = Narsese.parse("P>.").term + premise2 = Narsese.parse("M>.").term + feature = extract_feature(premise1, premise2) + self.assertFalse(feature.match_reverse) + self.assertTrue(feature.has_common_id) + self.assertEqual(feature.common_id_task, 0) + self.assertEqual(feature.common_id_belief, 1) + + # M>, P> + premise1 = Narsese.parse("M>.").term + premise2 = Narsese.parse("P>.").term + feature = extract_feature(premise1, premise2) + self.assertFalse(feature.match_reverse) + self.assertTrue(feature.has_common_id) + self.assertEqual(feature.common_id_task, 1) + self.assertEqual(feature.common_id_belief, 0) + + # M>, M> + premise1 = Narsese.parse("M>.").term + premise2 = Narsese.parse("M>.").term + feature = extract_feature(premise1, premise2) + self.assertFalse(feature.match_reverse) + self.assertTrue(feature.has_common_id) + self.assertEqual(feature.common_id_task, 1) + self.assertEqual(feature.common_id_belief, 1) + + pass + + + def test_statement_statement_2(self): + '''''' + # P>, S> + premise1 = Narsese.parse("P>.").term + premise2 = Narsese.parse("S>.").term + feature = extract_feature(premise1, premise2) + self.assertTrue(feature.match_reverse) + self.assertIsNone(feature.has_common_id) + pass + + + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_ExtractFeature + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_variable/__init__.py b/Tests/test_variable/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/Tests/test_variable/_trash/test_Substitution_v1.py b/Tests/test_variable/_trash/test_Substitution_v1.py new file mode 100644 index 0000000..cb3d0c9 --- /dev/null +++ b/Tests/test_variable/_trash/test_Substitution_v1.py @@ -0,0 +1,72 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Table +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector +from NAL.MetaLevelInference.VariableSubstitution import * + +class TEST_Substitution(unittest.TestCase): + + def test_compound(self): + '''''' + compound = Compound(Connector.Conjunction, Term("A"), Term("B"), Term("C")) + term_new = Compound(Connector.Conjunction, Term("D"), Term("B"), Term("C")) + term_substitution = substitution(compound, Term("A"), Term("D")) + self.assertEqual(term_substitution, term_new) + pass + + def test_statement(self): + '''''' + statement = Statement(Term("A"), Copula.Inheritance, Term("B")) + term_new = Statement(Term("C"), Copula.Inheritance, Term("B")) + term_substitution = substitution(statement, Term("A"), Term("C")) + self.assertEqual(term_substitution, term_new) + pass + + def test_term(self): + '''''' + term = Term("A") + term_new = Term("B") + term_substitution = substitution(term, Term("A"), Term("B")) + self.assertEqual(term_substitution, term_new) + + term = Term("A") + term_new = Term("A") + term_substitution = substitution(term, Term("B"), Term("B")) + self.assertEqual(term_substitution, term_new) + pass + + def test_complex(self): + '''''' + statement1 = Statement(Term("A"), Copula.Inheritance, Term("B")) + statement2 = Statement(Term("A"), Copula.Inheritance, Term("C")) + compound = Compound(Connector.Conjunction, statement1, statement2) + statement1 = Statement(Term("D"), Copula.Inheritance, Term("B")) + statement2 = Statement(Term("D"), Copula.Inheritance, Term("C")) + term_new = Compound(Connector.Conjunction, statement1, statement2) + term_substitution = substitution(compound, Term("A"), Term("D")) + self.assertEqual(term_substitution, term_new) + pass + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_Substitution + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_variable/test_Substitution.py b/Tests/test_variable/test_Substitution.py new file mode 100644 index 0000000..529003b --- /dev/null +++ b/Tests/test_variable/test_Substitution.py @@ -0,0 +1,141 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Table +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector +from NAL.MetaLevelInference.VariableSubstitution import * +from Narsese import Terms + +class TEST_Substitution(unittest.TestCase): + + def test_substition_var_to_var(self): + ''' + <(&&, <#x-->A>, <#x-->B>, <<$y-->C>==><$y-->D>>, <$z-->E>) ==> <$z-->F>>. + <<$x-->F>==><$x-->H>>. + |- + <(&&, <#x-->A>, <#x-->B>, <<$y-->C>==><$y-->D>>, <$z-->E>) ==> <$x-->H>>. + ''' + term1 = Narsese.parse("<(&&, <#x-->A>, <#x-->B>, <<$y-->C>==><$y-->D>>, <$z-->E>) ==> <$z-->F>>.").term + term2 = Narsese.parse("<<$x-->F>==><$x-->H>>.").term + subst_var = unification__var_var(term1, term2, [1], [0]) # to find possible replacement. + term2 = subst_var.apply(inverse=True) + term3 = Statement.Implication(term1[0], term2[1]) + # term_substitution = substitution(compound, Term("A"), Term("D")) + # self.assertEqual(term_substitution, term_new) + pass + + def test_substition_var_to_const_0(self): + ''' + <<$x-->A>==><$x-->B>>. + <B>==>D>>. + |- + <A>==>D>> + ''' + term1 = Narsese.parse("<<$x-->A>==><$x-->B>>.").term + term2 = Narsese.parse("<B>==>D>>.").term + self.assertTrue(term1[1].equal(term2[0])) + subst_var = unification__var_const(term1, term2, [1], [0]) # to find possible replacement. + term2 = subst_var.apply(inverse=True) + term3 = Statement.Implication(term1[0], term2[1]) + # term_substitution = substitution(compound, Term("A"), Term("D")) + # self.assertEqual(term_substitution, term_new) + pass + + def test_substition_var_to_const_1(self): + ''' + <(&&, <$x-->A>, <$y-->A>) ==> (&&, <$x-->B>, <$y-->C>)>. + <E> ==> (&&, A>, A>)>. + |- + <E> ==> (&&, B>, C>)>. + <E> ==> (&&, B>, C>)>. + ''' + term1 = Narsese.parse("<(&&, <$x-->A>, <$y-->A>) ==> (&&, <$x-->B>, <$y-->C>)>.").term + term2 = Narsese.parse("<E> ==> (&&, A>, A>)>.").term + self.assertTrue(term1[0].equal(term2[1])) + subst_var = unification__var_const(term1, term2, [0], [1]) # to find possible replacement. + term2 = subst_var.apply() + term3 = Statement.Implication(term1[0], term2[1]) + # term_substitution = substitution(compound, Term("A"), Term("D")) + # self.assertEqual(term_substitution, term_new) + pass + + def test_check_conflict(self): + ''' + <<$x-->A>==><$x-->B>>. + <B>==>D>>. + |- + <A>==>D>> + ''' + + is_conflict, mapping = Elimination.check_conflict([0,0], [Term("C"), Term("D")]) + self.assertTrue(is_conflict) + pass + + def test_statement_equal_0(self): + ''' + <<$x-->A><=><$x-->B>>. + <A><=>B>>. + ''' + term1 = Narsese.parse("<<$x-->A><=><$x-->B>>.").term + term2 = Narsese.parse("<B><=>A>>.").term + self.assertTrue(term1.equal(term2)) + pass + + def test_compound_equal_0(self): + ''' + (&/, <$x-->A>, <$x-->B>). + (&/, A>, B>). + ''' + term1 = Narsese.parse("(&/, <$x-->A>, <$x-->B>).").term + term2 = Narsese.parse("(&/, A>, B>).").term + self.assertTrue(term1.equal(term2)) + pass + + def test_compound_equal_1(self): + ''' + (&&, <$x-->A>, <$x-->B>, D>). + (&&, D>, B>, A>). + ''' + term1 = Narsese.parse("(&&, <$x-->A>, <$x-->B>, D>).").term + term2 = Narsese.parse("(&&, D>, B>, A>).").term + self.assertTrue(term1.equal(term2)) + pass + + def test_compound_equal_2(self): + ''' + (&&, <$x-->A>, <$x-->B>, D>). + (&&, D>, B>, B>). + ''' + term1 = Narsese.parse("(&&, <$x-->A>, <$x-->B>, D>).").term + term2 = Narsese.parse("(&&, D>, B>, B>).").term + self.assertFalse(term1.equal(term2)) + pass + + def test_substitution_0(self): + term1 = Narsese.parse("(&&, <$x-->A>, <$x-->B>, D>).").term + term2 = Narsese.parse("(&&, D>, B>, B>).").term + + + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_Substitution + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_variable/test_Term_Equal.py b/Tests/test_variable/test_Term_Equal.py new file mode 100644 index 0000000..c62cdae --- /dev/null +++ b/Tests/test_variable/test_Term_Equal.py @@ -0,0 +1,185 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Table +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector +from NAL.MetaLevelInference.VariableSubstitution import * +from Narsese._py.Variable import VarPrefix, Variable +from utils.IndexVar import IndexVar + +class TEST_Term_Equal(unittest.TestCase): + + def test_identical(self): + '''''' + ''' + (&&, <#x-->A>, C>) and (&&, C>, <{A1}-->A>) are equal, thought not identical. + ''' + term1 = Narsese.parse('(&&, <#x-->A>, C>).').term + term2 = Narsese.parse('(&&, C>, <#y-->A>).').term + self.assertTrue(term1.identical(term2)) + pass + + def test_term_equal_0(self): + term1 = Term("bird") + term2 = Term("bird") + self.assertTrue(term1.equal(term2)[0]) + pass + + def test_term_equal_1(self): + term1 = Term("bird") + term2 = Term("animal") + self.assertFalse(term1.equal(term2)[0]) + pass + + def test_term_equal_2(self): + term1 = Term("bird") + term2 = Variable.Independent("x") + self.assertTrue(term1.equal(term2)[0]) + pass + + def test_mix_equal_0(self): + term1 = Term("bird") + term2 = Statement.Inheritance(Term("brid"), Term("animal")) + self.assertFalse(term1.equal(term2)[0]) + self.assertFalse(term2.equal(term1)[0]) + pass + + + def test_mix_equal_1(self): + term1 = Term("bird") + term2 = Compound.ExtensionalIntersection(Term("brid"), Term("monkey")) + self.assertFalse(term1.equal(term2)[0]) + self.assertFalse(term2.equal(term1)[0]) + pass + + + def test_mix_equal_2(self): + term1 = Statement.Inheritance(Term("brid"), Term("animal")) + term2 = Compound.ExtensionalIntersection(Term("brid"), Term("animal")) + self.assertFalse(term1.equal(term2)[0]) + self.assertFalse(term2.equal(term1)[0]) + pass + + + def test_mix_equal_3(self): + term1: Statement = Narsese.parse("<<$x-->A> ==> <$x-->B>>.").term + term2 = Narsese.parse("<(&&, <#x-->C>, <#x-->D>)-->A>.").term + self.assertFalse(term1.subject.equal(term2)[0]) + self.assertFalse(term2.equal(term1.subject)[0]) + pass + + + def test_statement_equal_0(self): + term1 = Statement.Inheritance(Term("bird"), Term("animal")) + term2 = Statement.Inheritance(Term("bird"), Term("animal")) + self.assertTrue(term1.identical(term2)) + self.assertTrue(term1.equal(term2)[0]) + pass + + + def test_statement_equal_1(self): + term1 = Statement.Inheritance(Term("bird"), Term("animal")) + term2 = Statement.Inheritance(Term("robin"), Term("animal")) + self.assertFalse(term1.equal(term2)[0]) + pass + + + def test_statement_equal_2(self): + term1 = Statement.Inheritance(Term("bird"), Term("animal")) + term2 = Statement.Inheritance(Variable.Independent("x"), Term("animal")) + self.assertTrue(term1.equal(term2)[0]) + self.assertTrue(term2.equal(term1)[0]) + pass + + + def test_statement_equal_3(self): + term1 = Statement.Inheritance(Variable.Independent("y"), Term("animal")) + term2 = Statement.Inheritance(Variable.Independent("x"), Term("animal")) + self.assertTrue(term1.equal(term2)[0]) + self.assertTrue(term2.equal(term1)[0]) + pass + + + def test_statement_equal_4(self): + term1 = Statement.Inheritance(Term("bird"), Variable.Independent("x")) + term2 = Statement.Inheritance(Variable.Independent("x"), Term("animal")) + self.assertTrue(term1.equal(term2)[0]) + self.assertTrue(term2.equal(term1)[0]) + pass + + + def test_statement_equal_5(self): + term1 = Statement.Inheritance(Variable.Independent("y"), Term("animal")) + term2 = Statement.Inheritance(Variable.Dependent("x"), Term("animal")) + self.assertFalse(term1.equal(term2)[0]) + self.assertFalse(term2.equal(term1)[0]) + pass + + + def test_statement_equal_6(self): + term1 = Statement.Inheritance(Variable.Dependent("x"), Variable.Independent("y")) + term2 = Statement.Inheritance(Variable.Dependent("a"), Variable.Independent("b")) + self.assertTrue(term1.equal(term2)[0]) + self.assertTrue(term2.equal(term1)[0]) + pass + + + def test_statement_equal_7(self): + term1 = Statement.Inheritance(Variable.Independent("x"), Term("animal")) + term2 = Statement.Inheritance(Compound.ExtensionalIntersection(Term("brid"), Term("flyer")), Term("animal")) + self.assertTrue(term1.equal(term2)[0]) + self.assertTrue(term2.equal(term1)[0]) + pass + + + def test_compound_equal_0(self): + term1 = Narsese.parse('(&&, B>, C>).').term + term2 = Narsese.parse('(&&, C>, B>).').term + self.assertTrue(term1.identical(term2)) + self.assertTrue(term1.equal(term2)[0]) + self.assertTrue(term2.equal(term1)[0]) + pass + + + def test_compound_equal_1(self): + ''' + (&&, <#x-->A>, C>) and (&&, C>, <{A1}-->A>) are equal, thought not identical. + ''' + term1 = Narsese.parse('(&&, <#x-->A>, C>).').term + term2 = Narsese.parse('(&&, C>, <{A1}-->A>).').term + self.assertFalse(term1.identical(term2)) + self.assertTrue(term1.equal(term2)[0]) + self.assertTrue(term2.equal(term1)[0]) + pass + + + def test_compound_equal_2(self): + term1 = Compound.Conjunction(Variable.Independent("x"), Term("animal")) + term2 = Compound.Conjunction(Compound.ExtensionalIntersection(Term("brid"), Term("flyer")), Term("animal")) + self.assertTrue(term1.equal(term2)[0]) + self.assertTrue(term2.equal(term1)[0]) + pass + + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_Term_Equal + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_variable/test_Terms.py b/Tests/test_variable/test_Terms.py new file mode 100644 index 0000000..6a16f90 --- /dev/null +++ b/Tests/test_variable/test_Terms.py @@ -0,0 +1,121 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Table +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector +from NAL.MetaLevelInference.VariableSubstitution import * +from Narsese import Terms + +class TEST_Terms(unittest.TestCase): + + def test_terms_0(self): + + term1 = Narsese.parse("<#x-->A>.").term + term2 = Narsese.parse("<#x-->B>.").term + term3 = Narsese.parse("<#y-->A>.").term + term4 = Narsese.parse("<#y-->C>.").term + terms1 = Terms((term1, term2), True, True) + terms2 = Terms((term3, term4), True, True) + terms3 = Terms((term1, term2, term3, term4), True, True) + + terms_union_1_2 = Terms.union(terms1, terms2) + terms_inter_1_3 = Terms.intersection(terms1, terms3) + terms_difference_3_2 = Terms.difference(terms3, terms2) + + pass + + def test_terms_1(self): + + term1 = Narsese.parse("<#x-->A>.").term + term2 = Narsese.parse("<#x-->B>.").term + term3 = Narsese.parse("<#y-->A>.").term + term4 = Narsese.parse("<#y-->C>.").term + terms1 = Terms((term1, term2), True, True) + terms2 = Terms((term3, term4), False, True) + terms3 = Terms((term1, term2, term3, term4), True, True) + + terms_union_1_2 = Terms.union(terms1, terms2) + terms_inter_1_3 = Terms.intersection(terms1, terms3) + terms_difference_3_2 = Terms.difference(terms3, terms2) + + terms_union_2_1 = Terms.union(terms2, terms1) + pass + + def test_compound_0(self): + '''''' + term1 = Narsese.parse("(&&, <$x-->A>, <$y-->A>).").term + repr(term1) + term2 = Narsese.parse("(&/, <$x-->A>, <$y-->A>).").term + pass + + def test_compound_1(self): + ''' + {{<$x-->A>}, {<$y-->A>}}. + |- + {<$x-->A>, <$y-->A>} + ''' + term1 = Narsese.parse("{{<$x-->A>}, {<$y-->A>}}.").term + repr(term1) + pass + + def test_compound_2(self): + ''' + (&, {A, B}, {B,C}). + ''' + term1 = Narsese.parse("(&, {A, B}, {B,C}).").term + repr(term1) + pass + + def test_compound_3(self): + ''' + (&, [B>], [D>]). + ''' + term1 = Narsese.parse("(&, [B>], [D>]).").term + repr(term1) + pass + + def test_compound_4(self): + ''' + (&, {A,B,C}, {B,C,D}, [E, F], [F, G]). + |- + (&, {B,C}, [E, F, G]). + ''' + term1 = Narsese.parse("(&, {A,B,C}, {B,C,D}, [E, F], [F, G]).").term + repr(term1) + self.assertEqual(str(term1).replace(' ', ''), '(&, {B,C}, [E, F, G])'.replace(' ', '')) + pass + + + def test_compound_5(self): + ''' + (&, {<$x-->A>, <$x-->B>, <$y-->A>}, {<$y-->B>, <$y-->A>}, [<$x-->A>, <$x-->B>, <$y-->A>], [<$y-->B>, <$y-->A>]). + |- + (&, {<$y-->A>}, [<$x-->A>, <$x-->B>, <$y-->A>, <$y-->B>]). + ''' + term1 = Narsese.parse("(&, {<$x-->A>, <$x-->B>, <$y-->A>}, {<$y-->B>, <$y-->A>}, [<$x-->A>, <$x-->B>, <$y-->A>], [<$y-->B>, <$y-->A>]).").term + repr(term1) + pass + + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_Terms + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_variable/test_Variable.py b/Tests/test_variable/test_Variable.py new file mode 100644 index 0000000..686b839 --- /dev/null +++ b/Tests/test_variable/test_Variable.py @@ -0,0 +1,237 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Table +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector +from NAL.MetaLevelInference.VariableSubstitution import * +from Narsese._py.Variable import VarPrefix, Variable +from utils.IndexVar import IndexVar + +class TEST_Variable(unittest.TestCase): + + def test_index_var_0(self): + '''''' + index1 = IndexVar() + index1.var_independent = [0, 1] + index2 = IndexVar() + index2.var_independent = [1, 0] + self.assertEqual(index1, index2) + pass + + def test_index_var_1(self): + '''''' + index1 = IndexVar() + index1.var_independent = [0, 1, 0] + index2 = IndexVar() + index2.var_independent = [0, 1, 1] + self.assertNotEqual(index1, index2) + pass + + def test_index_var_2(self): + '''''' + term1 = Narsese.parse("<#x-->bird>.").term + self.assertEqual(term1._index_var.positions_dvar, [[0]]) + term2 = Narsese.parse("#x>.").term + self.assertEqual(term2._index_var.positions_dvar, [[1]]) + term = Narsese.parse("<<$x-->bird> ==> <$x-->animal>>.").term + self.assertEqual(term._index_var.positions_ivar, [[0,0], [1,0]]) + term = Narsese.parse("<$x> ==> $x>>.").term + self.assertEqual(term._index_var.positions_ivar, [[0,1], [1,1]]) + pass + + + def test_index_var_3(self): + '''''' + term = Narsese.parse("<$x> ==> $x>>.").term + self.assertEqual(term._index_var.var_independent, [0, 0]) + + term = Narsese.parse("(&&, <$x-->A>, <#y-->B>, <<$z-->C>==><$x-->A>>).").term + self.assertEqual(term._index_var.var_independent, [0, 2, 0]) + self.assertEqual(term._index_var.var_dependent, [1]) + + term = Narsese.parse("(&&, <$x-->A>, <#y-->B>, <$z==><$x-->A>>).").term + # self.assertEqual(term.index_var.var_independent, [0, 2, 0]) + pass + + + def test_index_var_4(self): + '''''' + term1 = Narsese.parse("(&&,<#y --> key>,<$x --> (/,open,#y,_)>).").term + self.assertEqual(term1._index_var.positions_dvar, [[0,0],[1,1,1]]) + self.assertEqual(term1._index_var.positions_ivar, [[1,0]]) + pass + + + def test_unification_0(self): + '''''' + stat1: Statement = Narsese.parse("<<$x-->A>==><<$y-->B>==><$x-->C>>>.").term + stat2: Statement = Narsese.parse("<<<$x-->B>==><$y-->C>>==><$x-->D>>.").term + + self.assertEqual(stat1.predicate, stat2.subject) + stat1[1,0] + R = unification(stat1, stat2, stat1.predicate, stat2.subject) + + pass + + + def test_unification_1(self): + ''' + (&&, <#x-->A>, C>) and (&&, C>, <{A1}-->A>) are equal, thought not identical. + ''' + term1 = Narsese.parse('(&&, <#x-->A>, C>).').term + term2 = Narsese.parse('(&&, C>, <{A1}-->A>).').term + self.assertFalse(term1.identical(term2)) + self.assertTrue(term1.equal(term2)[0]) + pass + + def test_unification_2(self): + term1: Statement = Narsese.parse("<<$x-->A> ==> <$x-->B>>.").term + term2 = Narsese.parse("<(&&, <#x-->C>, <#x-->D>)-->A>.").term + self.assertTrue(term1.subject.equal(term2)[0]) + self.assertTrue(term2.equal(term1.subject)[0]) + # 1. to find the substitution + term1.subject.equal(term2) + term1[0,0] + term1[0] + + term3 = Narsese.parse("<<(&&, <#x-->C>, <#x-->D>)-->A> ==> <(&&, <#x-->C>, <#x-->D>)-->B>>.").term + pass + + def test_unification_3(self): + ''' + 'Multiple variable elimination + + 'Every lock can be opened by some key. + <<$x --> lock> ==> (&&,<#y --> key>,<$x --> (/,open,#y,_)>)>. %1.00;0.90% + + 'Lock-1 is a lock. + <{lock1} --> lock>. %1.00;0.90% + + 9 + + 'Some key can open Lock-1. + ''outputMustContain('(&&,<#1 --> key>,<{lock1} --> (/,open,#1,_)>). %1.00;0.81%') + ''' + premise1 = Narsese.parse('<<$x --> lock> ==> (&&,<#y --> key>,<$x --> (/,open,#y,_)>)>. %1.00;0.90%').term + premise2 = Narsese.parse('<{lock1} --> lock>. %1.00;0.90%').term + + + pass + + + def test_index_repr_0(self): + '''''' + term = Narsese.parse("<$x-->bird>.").term + self.assertEqual(repr(term), "bird>>") + + pass + + def test_index_repr_1(self): + '''''' + term = Narsese.parse("<$x>==>$y>>.").term + self.assertEqual(repr(term), "$0>==>$1>>>") + + pass + + def test_eq_0(self): + ''' + (&&, <$1-->A>, <$2-->B>) + (&&, <$1-->A>, <$1-->B>) + the two should be unequal. + ''' + compound1 = Compound(Connector.Conjunction, Statement(Variable(VarPrefix.Independent, "x"), Copula.Inheritance, Term("A")), Statement(Variable(VarPrefix.Independent, "y"), Copula.Inheritance, Term("B"))) + compound2 = Compound(Connector.Conjunction, Statement(Variable(VarPrefix.Independent, "x"), Copula.Inheritance, Term("A")), Statement(Variable(VarPrefix.Independent, "x"), Copula.Inheritance, Term("B"))) + self.assertNotEqual(compound1, compound2) + pass + + + + + # def test_eq_1(self): + # ''' + # stat1 = (&&, <$1-->A>, <<$2-->B>==><$1-->C>>) + # stat2 = <<<$1-->B>==><$2-->C>> ==> <$2-->D>> + # |- + # stat3 = (&&, <$1-->A>, <$1-->D>>) + + # stat1[1] == stat2[1] + # stat1 and stat2 should derive stat3 + # ''' + # pass + + + # def test_eq_2(self): + # ''' + # stat1 = <(&&, <$1-->A>, <$2-->B>) ==> <$3-->C>> + # stat2 = <<$1-->B>==><$2-->D>> + # |- + # stat3 = <(&&, <$1-->A>, <$2-->D>) ==> <$3-->C>> + + # stat1[0][1] == stat2[0] + # stat1 and stat2 should derive stat3 + # ''' + # pass + + # def test_eq_3(self): + # ''' + # stat1: (&&, <$1-->A>, <$2-->B>) + # stat2: (&&, <$1-->A>, <#2-->B>) + # stat1 != stat2 + # ''' + + # def test_eq_4(self): + # ''' + # stat1 = (&&, <$1-->A>, <<$2-->B>==><#1-->C>>) + # stat2 = <<<$1-->B>==><#2-->C>> ==> <$2-->D>> + # |= + # stat3 = (&&, <$1-->A>, <$1-->D>>) + + # stat1[1] == stat2[0] + # stat1 and stat2 should derive stat3 + # ''' + + # pass + + # def test_eq_5(self): + # ''' + # stat1 = (&&, <$1-->A>, <<$2-->B>==>(&&, <#1-->C>, <$1-->D>)>) + # stat2 = <<<$1-->B>==>(&&, <#2-->C>, <$2-->D>)> ==> <$2-->E>> + # |= + # stat3 = (&&, <$1-->A>, <$1-->E>>) + + # stat1[1] == stat2[1] + # stat1 and stat2 should derive stat3 + # ''' + # pass + + # def test_eq_6(self): + # ''' + # stat1 = (&&, <$1-->A>, <<$2-->B>==>(&&, <#1-->C>, <$1-->D>)>) + # stat2 = <<<$1-->B>==>(&&, <#1-->C>, <$2-->D>)> ==> <$2-->E>> + + # stat1[1] != stat2[1] + # ''' + # pass + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_Variable + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/test_variable/test_Variable2.py b/Tests/test_variable/test_Variable2.py new file mode 100644 index 0000000..2821421 --- /dev/null +++ b/Tests/test_variable/test_Variable2.py @@ -0,0 +1,53 @@ +import NARS +import unittest + +from NARS.DataStructures import Bag, Task, Concept, Table +from Narsese import Judgement, Term, Statement, Copula, Truth + +from pathlib import Path +import Narsese +from Narsese import Compound, Connector +from NAL.MetaLevelInference.VariableSubstitution import * +from Narsese._py.Variable import VarPrefix, Variable +from utils.IndexVar import IndexVar + +class TEST_Variable(unittest.TestCase): + + def test_equal_0(self): + c1 = Narsese.parse("(&&, <#x-->A>, <#x -->B>, <#y-->A>, <#y -->C>).").term + c2 = Narsese.parse("(&&, <#x-->A>, <#y -->B>, <#y-->A>, <#x -->C>).").term + + + def test_equal_1(self): + '''''' + term1 = Narsese.parse("bird>.").term + term2 = Narsese.parse("<#1-->bird>.").term + term3 = Narsese.parse("<$1-->bird>.").term + term4 = Narsese.parse("$1>.").term + self.assertTrue(term1.equal(term2)) + self.assertTrue(term1.equal(term3)) + self.assertTrue(term1.equal(term4)) + self.assertTrue(term2.equal(term4)) + self.assertTrue(term3.equal(term4)) + self.assertFalse(term2.equal(term3)) + pass + + + +if __name__ == '__main__': + + test_classes_to_run = [ + TEST_Variable + ] + + loader = unittest.TestLoader() + + suites = [] + for test_class in test_classes_to_run: + suite = loader.loadTestsFromTestCase(test_class) + suites.append(suite) + + suites = unittest.TestSuite(suites) + + runner = unittest.TextTestRunner() + results = runner.run(suites) diff --git a/Tests/utils_for_test.py b/Tests/utils_for_test.py new file mode 100644 index 0000000..acbeb54 --- /dev/null +++ b/Tests/utils_for_test.py @@ -0,0 +1,99 @@ +from typing import List, Tuple +from NARS.DataStructures._py.Concept import Concept +from NARS.DataStructures._py.Link import Link, TaskLink, TermLink +from Narsese import Task +import Narsese +from NARS.RuleMap import RuleMap_v2, RuleCallable +from NARS import Reasoner_3_0_4 as Reasoner +from Narsese._py.Statement import Statement +from Narsese._py.Task import Belief +from Narsese._py.Term import Term +from NAL.MentalOperation import execute + +nars = Reasoner(100, 100) +rule_map = nars.inference.rule_map + + +def rule_map_two_premises(premise1: str, premise2: str, term_common: str, inverse: bool=False, is_belief_term: bool=False, index_task=None, index_belief=None) -> Tuple[List[RuleCallable], Task, Belief, Concept, TaskLink, TermLink, Tuple[Task, Task, Task, Task]]: + '''''' + nars.reset() + premise1: Task = Narsese.parse(premise1) + result1 = nars.memory.accept(premise1) + premise2: Task = Narsese.parse(premise2) + result2 = nars.memory.accept(premise2) + + task, belief = (premise1, premise2) if not inverse else( premise2, premise1) + + term_common: Term = Narsese.parse(term_common).term + concept = nars.memory.take_by_key(term_common) + + if index_task is None: + if task.term == concept.term: index_task = () + else: + if task.term.complexity > concept.term.complexity: indices_task = Link.get_index(task.term, concept.term) + else: indices_task = Link.get_index(concept.term, task.term) + if indices_task is not None: index_task = indices_task[0] + + + if index_belief is None: + if belief.term == concept.term: index_task = () + else: + if belief.term.complexity > concept.term.complexity: indices_belief = Link.get_index(belief.term, concept.term) + else: indices_belief = Link.get_index(concept.term, belief.term) + if indices_belief is not None: index_belief = indices_belief[0] + + + task_link = concept.task_links.take_by_key(TaskLink(concept, task, None, index=index_task)) + term_link = concept.term_links.take_by_key(TermLink(concept, belief, None, index=index_belief)) + + belief: Belief + rules = rule_map.match(task, (belief if not is_belief_term else None), belief.term, task_link, term_link) + return rules, task, belief, concept, task_link, term_link, result1, result2 + +def rule_map_task_only(premise1: str, conecept_term: str, index_concept_task: tuple): + '''''' + task = Narsese.parse(premise1) + result1 = nars.memory.accept(task) + concept_term = Narsese.parse(conecept_term+".").term + + concept = nars.memory.take_by_key(concept_term) + task_link = concept.task_links.take_by_key(TaskLink(concept, task, None, index=index_concept_task)) + + rules = rule_map.match(task, None, None, task_link, None) + return rules, task, concept, task_link, result1 + + +def memory_accept_revision(judgement1: str, judgement2: str): + task1 = Narsese.parse(judgement1) + nars.memory.accept(task1) + task2 = Narsese.parse(judgement2) + task_derived, *_ = nars.memory.accept(task2) + return [task_derived] + + +def execute_one_premise(premise: Task): + '''''' + stat: Statement = premise.term + if stat.is_executable: + op = stat.predicate + args = stat.subject.terms + return execute(op, *args) + else: + raise "Invalide case." + +def output_contains(outputs: List[Task], target: str): + target: Task = Narsese.parse(target) + for output in outputs: + flag_contain = output.term == target.term + if output.truth is None: + flag_contain &= target.truth is None + else: + flag_contain &= round(output.truth.f, 2) == round(target.truth.f, 2) + flag_contain &= round(output.truth.c, 2) == round(target.truth.c, 2) + flag_contain &= target.sentence.is_eternal == output.sentence.is_eternal + # compare the time stamp + if not target.sentence.is_eternal: + flag_contain &= target.stamp.t_occurrence == output.stamp.t_occurrence + if flag_contain: + return True + return False \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000..bde2d03 --- /dev/null +++ b/config.json @@ -0,0 +1,52 @@ + +{ + "PROGRAM": { + "VERSION": "0.0.1", + "DRIVER": "py" // py: python, pyx: cython, cypy: cython with python style, cpp: c++ + }, + "HYPER-PARAMS": { + "DEFAULT": { + "BUDGET": { + "PRIORITY_JUDGEMENT": 0.8, + "DURABILITY_JUDGEMENT": 0.5, + "PRIORITY_QUESTION": 0.9, + "DURABILITY_QUESTION": 0.9, + "PRIORITY_QUEST": 0.9, + "DURABILITY_QUEST": 0.9, + "PRIORITY_GOAL": 0.9, + "DURABILITY_GOAL": 0.9, + "THRESHOLD": 0.01 + }, + "NUM_BUCKETS": 100, + "TRUTH": { + "FREQUENCY": 1.0, + "CONFIDENCE": 0.9, + "CONFIDENCE_JUDGEMENT": 0.9, + "CONFIDENCE_GOAL": 0.9, + "K": 1 + }, + "MAX_DURATION": 1000, + "CONCEPT": { + "NUM_LEVELS_TASKLINK_BAG": 1000, + "CAPACITY_TASKLINK_BAG": 10000, + "NUM_LEVELS_TERMLINK_BAG": 1000, + "CAPACITY_TERMLINK_BAG": 10000, + "CAPACITY_TABLE": 100 + }, + "COMPLEXITY_UNIT": 1.0, //1.0 - oo + "QUALITY_MIN": 0.3, + "CYCLES_PER_DURATION": 5, + "NUM_FORGET_DURATIONS": 2, + "REVISION_MAX_OCCURRENCE_DISTANCE": 10, + "RATE_DISCOUNT_CONFIDENCE": 0.5, // The rate of confidence decrease in mental operations Doubt and Hesitate + "RATE_DISCOUNT_PRIORITY_INTERNAL_EXPERIENCE": 0.1, + "RATE_DISCOUNT_DURABILITY_INTERNAL_EXPERIENCE": 0.1, + "TEMPORAL_DURATION": 5, + "NUM_SEQUENCE_ATTEMPTS": 10, + "NUM_OP_CONDITION_ATTEMPTS": 10 + }, + "TRUTH_EPSILON": 0.01, + "BUDGET_EPSILON": 0.0001, + "COMPLEXITY_UNIT": 1.0 + } +} \ No newline at end of file diff --git a/utils/.vscode/launch.json b/utils/.vscode/launch.json new file mode 100644 index 0000000..76c5bee --- /dev/null +++ b/utils/.vscode/launch.json @@ -0,0 +1,21 @@ +{ + // 使用 IntelliSense 了解相关属性。 + // 悬停以查看现有属性的描述。 + // 欲了解更多信息,请访问: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Python: SparseLUT", + "type": "python", + "request": "launch", + "module": "SparseLUT" + }, + { + "name": "Python: 当前文件", + "type": "python", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" + } + ] +} \ No newline at end of file diff --git a/utils/IdEnum.py b/utils/IdEnum.py new file mode 100644 index 0000000..70e2609 --- /dev/null +++ b/utils/IdEnum.py @@ -0,0 +1,13 @@ +from enum import Enum + +class IdEnum(Enum): + def __new__(cls, value): + if not hasattr(cls, '_copula_id'): cls._copula_id = 0 + member = object.__new__(cls) + member._value_ = value + member._copula_id = cls._copula_id + cls._copula_id += 1 + return member + + def __int__(self): + return self._copula_id \ No newline at end of file diff --git a/utils/Index.py b/utils/Index.py new file mode 100644 index 0000000..bf7d275 --- /dev/null +++ b/utils/Index.py @@ -0,0 +1,4 @@ + +class Index: + def __init__(self) -> None: + pass \ No newline at end of file diff --git a/utils/IndexVar.py b/utils/IndexVar.py new file mode 100644 index 0000000..2c5543e --- /dev/null +++ b/utils/IndexVar.py @@ -0,0 +1,228 @@ +from copy import deepcopy +from typing import Union +import enum +from typing import Callable, List, Tuple, Type +from ordered_set import OrderedSet +from bidict import bidict + +from numpy import prod + +class IntVar: + def __init__(self, num: int) -> None: + self.num = int(num) + + def __eq__(self, o: Type['IntVar']) -> bool: + if isinstance(o, IntVar): return self.num == o.num + else: return self.num == o + + def __int__(self) -> bool: + return self.num + + def __hash__(self) -> int: + return hash(self.num) + + def __repr__(self) -> str: + return str(self.num) + + def __call__(self, num: Union[int, None]): + if num is not None: self.num = int(num) + return self + + def __gt__(self, o: Type['IntVar']): + if isinstance(o, IntVar): o = int(o) + return self.num > int(o) + + def __ge__(self, o: Type['IntVar']): + if isinstance(o, IntVar): o = int(o) + return self.num >= int(o) + + def __lt__(self, o: Type['IntVar']): + if isinstance(o, IntVar): o = int(o) + return self.num < int(o) + + def __le__(self, o: Type['IntVar']): + return self.num <= int(o) + + def __ne__(self, o: Type['IntVar']): + if isinstance(o, IntVar): o = int(o) + return self.num != int(o) + + def __add__(self, o: Type['IntVar']): + return self.num + o + + def __radd__(self, o: int): + return self + o + + def __sub__(self, o: Type['IntVar']): + return self.num - o + + def __rsub__(self, o: int): + return -self.num + o + + def __mul__(self, o: Type['IntVar']): + return self.num * o + + def __rmul__(self, o: Type['IntVar']): + return o * self.num + + def __div__(self, o: Type['IntVar']): + return self.num / o + + def __rdiv__(self, o: Type['IntVar']): + return o / self.num + + def __pos__(self, o: Type['IntVar']): + return self + + def __neg__(self, o: Type['IntVar']): + return IntVar(-self.num) + +class IndexVar: + ''' + Examples: + (&&, <$1-->A>, <$2-->B>, <$1-->C>) + positions = [[0, 2], [1]] + positions_unfolded = [[0, 2], [1]] + variables = [0, 1, 0] + + (&&, <<$1-->A>==><$2-->B>>, <$2-->C>, <$3-->D>) + positions = [[(0, 0)], [(0, 1), 1], [2]] + positions_unfolded = [[0], [1, 2], [3]] + variables = [[0, 1], 1, 2] + variables_unfolded = [0, 1, 1, 2] + ''' + + _positions_normalized: tuple = None + _hash_value = None + + def __init__(self) -> None: + self.positions_dvar = [] # the positions of each dependent variable + self.positions_ivar = [] # the positions of each independent variable + self.positions_qvar = [] # the positions of each query variable + + self.var_dependent = [] # the dependent variable in each position. + self.var_independent = [] # the independent variable in each position. + self.var_query = [] # the query variable in each position. + + self.dependents: List[tuple] = [] + + self.names_var: bidict = bidict() + + + # def add_position_ivar(self, index: int): + # self.positions_ivar.append(index) + + + # def add_position_dvar(self, index: int): + # self.positions_dvar.append(index) + + + # def add_position_qvar(self, index: int): + # self.positions_qvar.append(index) + + + def add_ivar(self, index: int, name: str=None, index_var_component: Type['IndexVar']=None): + self._add_var(self.positions_ivar, self.var_independent, index, name, index_var_component) + + + def add_dvar(self, index: int, name: str=None, index_var_component: Type['IndexVar']=None): + self._add_var(self.positions_dvar, self.var_dependent, index, name, index_var_component) + + + def add_qvar(self, index: int, name: str=None, index_var_component: Type['IndexVar']=None): + self._add_var(self.positions_qvar, self.var_query, index, name, index_var_component) + + + def _add_var(self, positions: list, variables: list, index, name: str=None, index_var_component: Type['IndexVar']=None): + positions.append(index) + if name is not None: + if name not in self.names_var: + self.names_var[name] = len(self.names_var) + variables.append(IntVar(self.names_var[name])) + # if index_var_component is not None: + # pass + + def merge(self, *indices_var: Type['IndexVar'], is_input: bool, substitution=None): + if isinstance(indices_var, IndexVar): indices_var: Tuple[IndexVar] = (indices_var,) + if len(indices_var) == 0: return + + # ivar_new = [] + # dvar_new = [] + # qvar_new = [] + ivar_new = self.var_independent + dvar_new = self.var_dependent + qvar_new = self.var_query + + set_names = OrderedSet(name for index_var in indices_var for name in index_var.names_var.keys()) + + names_var_new = bidict({name_var: i for i, name_var in enumerate(set_names-set(self.names_var.keys()), start=len(self.names_var))}) + # self.names_var = names_var_new + self.names_var.update(names_var_new) + if is_input: + + # mapping: Callable[[int, IndexVar], Int] = lambda var, index_var: names_var_new[index_var.names_var.inverse[int(var)]] + mapping: Callable[[int, IndexVar], IntVar] = lambda var, index_var: var(names_var_new[index_var.names_var.inverse[int(var)]]) + + for index_var in indices_var: + ivar_new.extend([mapping(var, index_var) for var in index_var.var_independent]) + dvar_new.extend([mapping(var, index_var) for var in index_var.var_dependent]) + qvar_new.extend([mapping(var, index_var) for var in index_var.var_query]) + index_var.names_var.update({key:value for key, value in names_var_new.items() if key in index_var.names_var}) + + + + elif substitution is not None: + raise # TODO + else: + for index_var in indices_var: + ivar_new.extend(index_var.var_independent) + dvar_new.extend(index_var.var_dependent) + qvar_new.extend(index_var.var_query) + + + + self.var_independent = ivar_new + self.var_dependent = dvar_new + self.var_query = qvar_new + + + # for index_var in indices_var: index_var.names_var = None # the names of variables are discarded. + + + def normalize(self): + '''normalize the index, so that the index is unique in terms of one statement which has variable(s).''' + if self._positions_normalized is None: + self._positions_normalized = ( + _normalize([int(var) for var in self.var_independent]), + _normalize([int(var) for var in self.var_dependent]), + _normalize([int(var) for var in self.var_query]) + ) + return self._positions_normalized + + @property + def postions_normalized(self): + return self.normalize() if self._positions_normalized is None else self._positions_normalized + + + def do_hashing(self): + # if self._positions_normalized is None: self._normalize() + self._hash_value = hash(self.postions_normalized) + return self._hash_value + + + def __hash__(self) -> int: + return self._hash_value if self._hash_value is not None else self.do_hashing() + + + def __eq__(self, o: Type['IndexVar']) -> bool: + return hash(self) == hash(o) + + def clone(self): + return deepcopy(self) + + +def _normalize(variables): + p1 = list(OrderedSet(variables)) + p2 = list(range(len(p1))) + mapping = dict(zip(p1, p2)) + return tuple(mapping[p] for p in variables) \ No newline at end of file diff --git a/utils/Print.py b/utils/Print.py new file mode 100644 index 0000000..df545e4 --- /dev/null +++ b/utils/Print.py @@ -0,0 +1,52 @@ +from enum import Enum +from sty import bg, fg, ef, rs + + +class PrintType(Enum): + IN = f'{fg.cyan}IN :{fg.rs}' + OUT = f'{fg.yellow}OUT :{fg.rs}' + ERROR = f'{fg.red}ERROR :{fg.rs}' + ANSWER = f'{fg.green}ANSWER:{fg.rs}' + EXE = f'{fg.green}EXE :{fg.rs}' + INFO = f'{fg.blue}INFO :{fg.rs}' + COMMENT = F'{fg.grey}COMMENT:{fg.rs}' + +def print_filename(filename): + print(f'{fg.li_blue}-- File: {ef.italic}{filename}{rs.italic} --{fg.rs}') + +def out_print(type: PrintType, content, p: float=None, d: float=None, q: float=None, comment_title: str=None, end: str=None): + # show_budget = True + # if isinstance(p, float) and isinstance(d, float) and isinstance(q, float): + # if p<0 or p>1 or q<0 or q>1 or d<0 or d>1: + # show_budget = False + # else: + # show_budget = False + + if isinstance(p, float) and p>=0 and p<=1: + bg1 = bg(min(255, int(255*p/2+10)),10,10) + p = f'{round(p, 2):.2f}' + else: + p = ' ' + bg1 = '' + if isinstance(d, float) and d>=0 and d<=1: + bg2 = bg(10,min(255, int(255*d/2+10)),10) + d = f'{round(d, 2):.2f}' + else: + d = ' ' + bg2 = '' + if isinstance(q, float) and q>=0 and q<=1: + bg3 = bg(10,10,min(255, int(255*q/2+10))) + q = f'{round(q, 2):.2f}' + else: + q = ' ' + bg3 = '' + + # print(F'{bg(int(256*p),0,0)} {p} {bg(0,int(256*q),0)} {q} {bg(0,0,int(256*d))} {d} {bg.rs}{type.value} {str(content)}') + + if type is PrintType.COMMENT and comment_title is not None: + print(f'{fg.da_grey}{comment_title}: {str(content)}{fg.rs}', end=end) + elif type is PrintType.INFO: + print(f'{bg1} {p} {bg.rs}{bg2} {d} {bg.rs}{bg3} {q} {bg.rs} {type.value} {fg.grey}{str(content)}{fg.rs}', end=end, ) + else: + print(f'{bg1} {p} {bg.rs}{bg2} {d} {bg.rs}{bg3} {q} {bg.rs} {type.value} {str(content)}', end=end, ) + \ No newline at end of file diff --git a/utils/SparseLUT b/utils/SparseLUT new file mode 160000 index 0000000..3d9fac0 --- /dev/null +++ b/utils/SparseLUT @@ -0,0 +1 @@ +Subproject commit 3d9fac0a025ed2c6403b0cc66c0f812daab95beb diff --git a/utils/_trash/RuleLUT.py b/utils/_trash/RuleLUT.py new file mode 100644 index 0000000..17bc386 --- /dev/null +++ b/utils/_trash/RuleLUT.py @@ -0,0 +1,170 @@ +from copy import copy +from typing import Any, List +import networkx as nx +from ordered_set import OrderedSet +# import matplotlib.pyplot as plt + +class RuleLUT: + ''' + ''' + def __init__(self, shape: tuple) -> None: + self.shape = tuple(shape) + self._rules_list = [] + self._lut = dict() # which should be initialized in `self.build(...)` + self.depth = len(self.shape) + pass + + + def add(self, value, indices: list): + ''' + Each element of indices shoud be one of the following three types: + 1) int + 2) Any + 3) List[int] + ''' + indices = list(indices) + + '''Check the validation of the input param `indices`.''' + assert len(indices) == len(self.shape) + indices_norm = [] + for index, n_shape in zip(indices, self.shape): + if isinstance(index, int): + assert index < n_shape + index = (index, ) + elif index is Any: + index = tuple(range(n_shape)) + elif isinstance(index, list): + for idx in index: + assert isinstance(idx, int) + assert idx < n_shape + index = tuple(index) + else: raise 'Invalid case.' + indices_norm.append(index) + + + '''Store it for further building. See `self.build(...)`''' + self._rules_list.append([indices_norm, value]) + + def new_graph(self, indices: list): + def add_nodes(range_node, i_nodes, i_layer): + for node, i_node in zip(range_node, i_nodes): + g.add_node(node, cnt_pass=0, layer=i_layer, nodes_postedge={}, index=i_node) + return range_node + + g = nx.DiGraph() + + nodes0 = add_nodes(range(len(indices[0])), indices[0], 0) + nodes = nodes0 + for i_layer, i_nodes_next in enumerate(indices[1:]): + i_layer_next = i_layer+1 + nodes_next = add_nodes(range(nodes[-1]+1, nodes[-1]+len(i_nodes_next)+1), i_nodes_next, i_layer_next) + for node in nodes: + for node_next, i_node_next in zip(nodes_next, i_nodes_next): + g.add_edge(node, node_next, cnt_pass=0, index=i_node_next) + nodes = nodes_next + + node_root = "root" + g.add_node(node_root, layer=-1) + for node, i_node in zip(nodes0, indices[0]): + g.add_edge(node_root, node, cnt_pass=0, index=i_node) + + # back-propagate the cnt_pass + + return g + + + def build(self): + ''' + given a list of indices to the rule (i.e. `self._rules_list`), build a dict (i.e. `self._rules_dict`) for further accessing. + + ''' + indices_all, values_all = zip(*self._rules_list) + + # for indices in indices_all: + # g_new = self.new_graph(indices) + # plt.clf() + # pos = nx.multipartite_layout(g_new, subset_key="layer") + # nx.draw(g_new, pos, with_labels=True,) + # plt.show() + # g_base = nx.DiGraph() + + g0 = self.new_graph(indices_all[0]) + g1 = self.new_graph(indices_all[2]) + + + + def remove_path(g: nx.DiGraph, path: List[int]): + pass + + def add_pass(g: nx.DiGraph, path: List[int]): + node_current = "root" + nodes = list(g[node_current]) + nodes_index = [node['index'] for node in g[node_current].values()] + for index in path: + i_node = nodes_index.index(index) + if i_node == -1: + remove_path(g, path) + break + node_next = nodes[i_node] + + g.nodes[node_current]['cnt_pass'] += 1 + g.edges[node_current, node_next]['cnt_pass'] += 1 + + node_current = node_next + nodes = list(g[node_current]) + nodes_index = [node['index'] for node in g[node_current].values()] + + + + # 1. seperate intersectant sub-graph between g0 and g1 + indices1 = indices_all[2] + def get_accessible(g: nx.DiGraph, node_last, indices_all: List[tuple], curr_idx: List[int], i_depth, depth, ret=None): + ret = [] if ret is None else ret + + nodes = list(g[node_last]) + nodes_index = set(node['index'] for node in g[node_last].values()) + + indices = indices_all[i_depth] + if i_depth == depth-1: + for index in indices: + if index in nodes_index: + curr_idx.append(index) + ret.append(curr_idx) + else: + for index, node in zip(indices, nodes): + if index in nodes_index: + get_accessible(g, node, indices_all, curr_idx+[index], i_depth+1, depth, ret) + return ret + intersect = get_accessible(g0, "root", indices1, [], 0, len(indices1)) + + + def eliminate_path(g, path): + for i_layer, i_node in enumerate(path): + pass + + + for path in intersect: + pass + + pass + + def __getitem__(self, indices: tuple): + lut = self._lut + if isinstance(indices, int): return lut.get(indices, None) + for index in indices[:-1]: + lut = lut.get(index, None) + if lut is None: return None + index = indices[-1] + return lut.get(index, None) + + def __repr__(self) -> str: + return f'' + + +if __name__ == '__main__': + lut = RuleLUT((5, 5, 3, 4, 5, 5)) + lut.add("A", [0, 0, 0, Any, 0, 0]) + lut.add("B", [0, 0, Any, 0, 0, 0]) + lut.add("C", [0, 0, Any, [1,2], 0, 0]) + lut.build() + pass diff --git a/utils/_trash/SparseLUT.py b/utils/_trash/SparseLUT.py new file mode 100644 index 0000000..540f378 --- /dev/null +++ b/utils/_trash/SparseLUT.py @@ -0,0 +1,177 @@ +import operator +from typing import Callable, Dict, List +from copy import copy +from ordered_set import OrderedSet +from copy import deepcopy + +class SparseLUT: + '''''' + def __init__(self, shape: tuple) -> None: + self.shape = tuple(shape) + self.lut = dict() + self.depth = len(shape) - 1 + + def _slice_to_tuple(self, s: slice, depth: int): + + n = self.shape[depth] + start = 0 if s.start is None else s.start + stop = n if s.stop is None else s.stop + step = 1 if s.step is None else s.step + range_slice = range(start, stop, step) + if s == slice(None): range_slice = (*range_slice, None) + + return range_slice + + def _set_value(self, current_index: List[int], lut_dict: dict, index, value, slot_match=None, match_key: Callable=None, ret=None): + '''''' + ret = dict() if ret is None else ret + + val_lut: list = lut_dict.get(index, None) + if match_key is not None: + if match_key(val_lut, slot_match): + lut_dict[index] = value + else: + content = ret.get(id(val_lut), None) + if content is None: + content = (val_lut, []) + ret[id(val_lut)] = content + content[1].append(current_index) + + else: + lut_dict[index] = value + return ret + + def _setitem_slice(self, current_index: List[int], lut: dict, value: object, range_slice: list, indices: tuple, depth: int, slot_match=None, match_key=None, ret=None, is_updating=False): + ret = dict() if ret is None else ret + + if depth == self.depth: + for index in range_slice: + current_index.append(index) + ret = self._set_value(current_index, lut, index, value, slot_match, match_key, ret) + else: + # TODO: There seems a bug + current_index.append(tuple(range_slice)) + range_slice = iter(range_slice) + index = next(range_slice) + if not is_updating: + lut_next = lut.get(index, None) + if lut_next is None: + lut_next = dict() + lut[index] = (True, lut_next) + else: + lut_next = lut_next[1] + else: + lut_next = dict() + lut[index] = (True, lut_next) + self._setitem(current_index, lut_next, value, indices, depth+1, slot_match, match_key, ret) + for index in range_slice: + lut[index] = (True, lut_next) + return ret + + + + def _setitem(self, current_index: List[int], lut: dict, value: object, indices: tuple, depth: int, slot_match=None, match_key=None, ret=None, is_updating=False): + '''''' + ret = dict() if ret is None else ret + + for depth, index in enumerate(indices[depth:], depth): + if depth == self.depth: + if isinstance(index, int): + current_index.append(index) + ret = self._set_value(current_index, lut, index, value, slot_match, match_key, ret) + elif isinstance(index, slice): + range_slice = self._slice_to_tuple(index, depth) + for index in range_slice: + curr_idx = copy(current_index) + curr_idx.append(index) + ret = self._set_value(curr_idx, lut, index, value, slot_match, match_key, ret) + elif isinstance(index, list): + index_ = index + for index in index_: + curr_idx = copy(current_index) + curr_idx.append(index) + ret = self._set_value(curr_idx, lut, index, value, slot_match, match_key, ret) + else: + if isinstance(index, int): + current_index.append(index) + lut_next = lut.get(index, None) + if not is_updating: + if lut_next is None: + lut_next = dict() + lut[index] = (False, lut_next) + else: + lut_next = lut_next[1] + else: + if lut_next is not None: + is_shared_item, lut_next = lut_next + if lut_next is None or is_shared_item: + lut_next = dict() + lut[index] = (False, lut_next) + lut = lut_next + elif isinstance(index, slice): + range_slice = self._slice_to_tuple(index, depth) + self._setitem_slice(current_index, lut, value, range_slice, indices, depth, slot_match, match_key, ret, is_updating=is_updating) + break + elif isinstance(index, list): + self._setitem_slice(current_index, lut, value, index, indices, depth, slot_match, match_key, ret, is_updating=is_updating) + break + return ret + + + + def __setitem__(self, indices: tuple, value): + indices = tuple(index if index is not None else slice(None) for index in indices) + self._setitem([], self.lut, value, indices, 0, is_updating=True) + + def add(self, rule, indices: tuple): + value = OrderedSet([rule]) + indices = tuple(index if index is not None else slice(None) for index in indices) + ret = self._setitem([], self.lut, value, indices, 0, None, operator.is_) + slots = list(ret.values()) + for slot, indices in slots: + indices: list + slot: OrderedSet = deepcopy(slot) + indices = [set(index) for index in zip(*indices)] + indices_ = tuple(list(index)[0] if len(index) <= 1 else list(index) for index in indices) + indices = [] + for index in indices_: + if isinstance(index, int): + indices.append(index) + elif isinstance(index, tuple): + index = list(index) + indices.append(index) + elif isinstance(index, list): + index_ = index + index = [] + for idx in index_: + if isinstance(idx, int) or idx is None: + index.append(idx) + elif isinstance(idx, tuple): + raise "Invalid case." + index.extend(idx) + else: raise "Invalid case." + index = list(set(index)) + indices.append(index) + else: raise "Invalid case." + + slot.add(rule) + self[indices] = slot + return + + + + def get(self, indices: tuple): + lut = self.lut + for index in indices[:-1]: + lut = lut.get(index, None) + if lut is None: return None + lut = lut[1] + index = indices[-1] + return lut.get(index, None) + + def __getitem__(self, indices: tuple): + if isinstance(indices, int): indices = (indices,) + return self.get(indices) + + def __repr__(self) -> str: + return repr(self.lut) \ No newline at end of file diff --git a/utils/_trash/sparse_lut_v2/branch_list.py b/utils/_trash/sparse_lut_v2/branch_list.py new file mode 100644 index 0000000..b681b2b --- /dev/null +++ b/utils/_trash/sparse_lut_v2/branch_list.py @@ -0,0 +1,442 @@ + + +from ctypes import Union +from typing import Any, Callable, Dict, List, Tuple, Type, Set +import typing +from collections import OrderedDict +from copy import deepcopy, copy +import marshal +# import matplotlib +import networkx as nx +import matplotlib.pyplot as plt +from ordered_set import OrderedSet +import sty +import cython + +# deepcopy = lambda x: marshal.loads(marshal.dumps(x)) +deepcopy2 = lambda x: marshal.loads(marshal.dumps(x)) + + +class Node: + next_nodes: Union[typing.OrderedDict[tuple, 'Node'], Set] + last_nodes: typing.OrderedDict[tuple, 'Node'] + is_end: bool + def __init__(self, index: set, is_end=False, depth: int=-1, next_nodes: typing.OrderedDict[tuple, 'Node']=None, last_nodes: typing.OrderedDict[tuple, 'Node']=None) -> None: + self.index = index + self.is_end = is_end + self.next_nodes = next_nodes or (None if is_end else OrderedDict()) + self.last_nodes= last_nodes or OrderedDict() + self.depth = depth + pass + + def append(self, node: Type['Node']): + self.next_nodes[tuple(node.index)] = node + node.last_nodes[(tuple(self.index), id(self))] = self + + + def duplicate_shallow(self, index: set=None): + node = Node(index or self.index, self.is_end, self.depth) + for next_node in self.next_nodes_list: + node.append(next_node) + return node + + + def duplicate_deep(self, index:set=None): + node = Node(index or self.index, self.is_end, self.depth) + for next_node in self.next_nodes_list: + next_node = next_node.duplicate_deep() + node.append(next_node) + + return node + + + def remove_next(self, node: Type['Node']): + '''''' + self.next_nodes.pop(tuple(node.index), None) + node.last_nodes.pop((tuple(self.index), id(self)), None) + + + def remove_last(self, node: Type['Node']): + '''''' + self.last_nodes.pop((tuple(node.index), id(node)), None) + node.next_nodes.pop(tuple(self.index), None) + + def reset_index(self, index): + '''''' + # index_old = tuple(self.index) + next_nodes: List[Node] = self.next_nodes_list + last_nodes: List[Node] = self.last_nodes_list + for node in next_nodes: node.remove_last(self) + for node in last_nodes: node.remove_next(self) + self.index = index + for node in next_nodes: self.append(node) + for node in last_nodes: node.append(self) + + + @property + def is_fan_in(self): + return (self.last_nodes is not None) and (len(self.last_nodes) > 1) + + + @property + def next_nodes_list(self): + return list(self.next_nodes.values()) if self.next_nodes is not None else [] + + @property + def last_nodes_list(self): + return list(self.last_nodes.values()) if self.last_nodes is not None else [] + + def __getitem__(self, i): + if i == 0: return self.index + elif i == 1: return self.next_nodes + else: return None + + + def __setitem__(self, i, value): + if i == 0: self.index = value + elif i == 1: self.next_nodes = value + else: raise "Invalid case." + + + def __repr__(self) -> str: + return f'' + + +class BranchList: + blists: Node + def __init__(self, shape: tuple) -> None: + self.shape = tuple(shape) + + self.blists = Node({}) + self.lists = [] + self.depth = len(self.shape) - 1 + + def _normalize(self, indices: list): + indices_norm = [] + for i, index in enumerate(indices): + if isinstance(index, int): + indices_norm.append(set((index,))) + elif isinstance(index, list) or isinstance(index, tuple): + indices_norm.append(set(index)) + elif index is Any or index is None: + indices_norm.append(set((*range(self.shape[i]), None))) + else: + raise "Invalid case." + + return indices_norm + + + def _merge(self, blists: List[Node], blist_in: List[Node], blist_last: Node=None, blist_in_last: Node=None, is_new_blist: bool=False, depth=0): + '''merge the new indices into `self.blist` + + blists: all the blist under the current depth. + blist_in: the new blist. Non-branch should be ensured. + depth: the current depth. + ''' + if depth == 0: # OK + if len(blist_in) > 0: blist_in: Node = blist_in[0] + else: return + index_new = blist_in.index + index_new_diff = index_new - set().union(*(blist[0] for blist in blists)) + if len(index_new_diff) > 0: + blist_new_diff = blist_in.duplicate_deep(index_new_diff) + blist_last.append(blist_new_diff) + + _is_new_blist = is_new_blist + for blist in blists: + '''''' + is_new_blist = _is_new_blist + # get index_common and index_old_diff. + index_old = blist[0] # e.g. index_old = {0, 1} + index_common = index_new & index_old + index_old_diff = index_old - index_new + index_new_diff = index_new - index_old + index_new = index_new_diff + + if len(index_old_diff) > 0: + # keep the old one + blist.reset_index(index_old_diff) + + # build the new one + if len(index_common) > 0: + # BUG + # since the indexes of son-nodes of `blist_last` are all different (orthogonal), the `index_common`s of the `blist`s are also different. + blist_in_common = blist_in.duplicate_deep(index_common) + # blist_in_common = Node(blist_in.index, blist_in.is_end, blist_in.depth) + blist_last.append(blist_in_common) + self._merge(blist.next_nodes_list, blist_in_common.next_nodes_list, blist, blist_in_common, True, depth+1) + + else: + if len(index_common) > 0: + # needn't to build a new link + self._merge(blist.next_nodes_list, blist_in.next_nodes_list, blist, blist_in, False, depth+1) + pass + + elif 0 < depth <= self.depth: + # if len(blist_in) > 0: blist_in: Node = blist_in[0] + # else: return + blist_in: Node = blist_in[0] + + # blist_in_last.next_nodes = OrderedDict() # TODO: modify the edges. + + index_new = blist_in.index + index_new_diff = index_new - set().union(*(blist[0] for blist in blists)) + if len(index_new_diff) > 0: + # the new one to be add + blist_new = blist_in.duplicate_deep(index_new_diff) + if not is_new_blist: blist_last.append(blist_new) + else: blist_in_last.append(blist_new) + + _is_new_blist = is_new_blist + for blist in blists: + '''''' + is_new_blist = _is_new_blist + # get index_common and index_old_diff. + index_old = blist[0] # e.g. index_old = {0, 1} + index_common = index_new & index_old + index_old_diff = index_old - index_new + index_new_diff = index_new - index_old + index_new = index_new_diff + + if blist.is_fan_in: # there are multiple input nodes. + # remove the blist from fan-in, and add a copied one. + if len(index_common) > 0: + blist_last.remove_next(blist) + blist = blist.duplicate_shallow() + blist_last.append(blist) + + if len(index_old_diff) > 0: + # keep the old one + blist_old_diff = blist + blist_old_diff.reset_index(index_old_diff) + if is_new_blist: + blist_in_last.append(blist_old_diff) + + # build the new one + if len(index_common) > 0: + # since the indexes of son-nodes of `blist_last` are all different (orthogonal), the `index_common`s of the `blist`s are also different. + blist_old_common = blist.duplicate_shallow(index_common) + blist_last.append(blist_old_common) + # blist_in_common = blist_in.duplicate_deep(index_common) + blist_in_common = Node(blist_in.index, blist_in.is_end, blist_in.depth) + if is_new_blist: # TODO: check here + blist_in_last.append(blist_in_common) # or blist_last.append(blist_in_common)? + else: + blist_last.append(blist_in_common) + self._merge(blist_old_common.next_nodes_list, blist_in.next_nodes_list, blist_old_common, blist_in_common, True, depth+1) + else: # len(index_old_diff) == 0 + # BUG + if len(index_common) > 0: + # needn't to build a new link + # blist_in_common = blist_in.duplicate_shallow() + blist_in_common = blist_in + if is_new_blist: + blist_in_last.append(blist_in_common) + self._merge(blist.next_nodes_list, blist_in.next_nodes_list, blist, blist_in_common, True, depth+1) + pass + pass + else: # not fan_in + if len(index_old_diff) > 0: + # keep the old one + blist.reset_index(index_old_diff) + + if is_new_blist: + blist_in_last.append(blist) + + # build the new one + if len(index_common) > 0: + # since the indexes of son-nodes of `blist_last` are all different (orthogonal), the `index_common`s of the `blist`s are also different. + blist_old_common = blist.duplicate_shallow(index_common) + blist_last.append(blist_old_common) + # blist_in_common = blist_in.duplicate_deep(index_common) + blist_in_common = Node(blist_in.index, blist_in.is_end, blist_in.depth) + blist_in_last.append(blist_in_common) + self._merge(blist.next_nodes_list, blist_in.next_nodes_list, blist_old_common, blist_in_common, True, depth+1) + else: + # BUG + if len(index_common) > 0: + # needn't to build a new link + blist_in_common = blist_in.duplicate_shallow() + if is_new_blist: + blist_in_last.append(blist_in_common) + # if is_new_blist: + # # blist_in_common = blist_in.duplicate_deep(index_common) + # blist_in_common = Node(index_common, blist_in.index, blist_in.depth) + # blist_in_last.append(blist_in_common) + # else: + # blist_in_common = blist_in + # # blist_in_last.append(blist_in_common) + self._merge(blist.next_nodes_list, blist_in.next_nodes_list, blist, blist_in_common, is_new_blist, depth+1) + pass + pass + + + + elif depth == self.depth: + pass + else: + pass + + # @cython.cfunc + def _make_blist(self, indices): + blist_original = blist = Node(indices[0], False, 0) + + if self.depth == 0: return blist_original + + for i_depth, index in enumerate(indices[1:], 1): + if i_depth == self.depth: + blist.append(Node(index, True, i_depth)) + else: + blist_new = Node(index, False, i_depth) + blist.append(blist_new) + blist = blist_new + + return blist_original + + + def add(self, indices: list, value): + indices = self._normalize(indices) + self.lists.append((indices, value)) + + if len(self.blists.next_nodes) == 0: + self.blists.append(self._make_blist(indices)) + return + + # Now, `self.blist` is not None + + blist_index = self._make_blist(indices) + self._merge(self.blists.next_nodes_list, [blist_index], self.blists) + pass + + + def build(self, value_func: Callable=OrderedSet, add_func: Callable=OrderedSet.add): # list, OrderedSet, etc. + # @cython.cfunc + # @cython.returns(cython.void) + # @cython.locals() + def set_value_by_func(blists: List[Node], indices, value_func: Callable, depth=0): + '''it should be ensured that `indices` is in `blists`''' + index: set = indices[depth] + for blist in blists: + if index.issuperset(blist[0]): + if depth < self.depth: + set_value_by_func(list(blist[1].values()), indices, value_func, depth+1) + else: + blist[1] = value_func() + + # @cython.cfunc + # @cython.returns() + # @cython.locals() + def get_value(blists, indices, depth=0, ret=None): + '''it should be ensured that `indices` is in `blists`''' + ret = [] if ret is None else ret + index: set = indices[depth] + for blist in blists: + if index.issuperset(blist[0]): + if depth < self.depth: + get_value(list(blist[1].values()), indices, depth+1, ret) + else: + ret.append(blist[1]) + return ret + + blists = list(self.blists[1].values()) + for indices, _ in self.lists: + set_value_by_func(blists, indices, value_func) + + for indices, value in self.lists: + list_values = get_value(blists, indices) + for values in list_values: + assert values is not None + add_func(values, value) + + + def clear(self): + '''''' + blists = list(self.blists[1].values()) + if len(self.blists[1]) == 0: return + for blist in blists: + del blist + del blists + self.blists[1] = None + + + def draw(self, blists: List[Node]=None, show_labels=True): + '''''' + # from matplotlib.text import Text + blists = list(self.blists[1].values()) + if len(blists) == 0: + print('None BranchList.') + return + g = nx.DiGraph() + def add_nodes(g: nx.DiGraph, node_current, label_node_current, blists_next: List[Node], i_layer=0): + n_node = g.number_of_nodes() + args_next = [] + if node_current not in g: + g.add_node(node_current, label=label_node_current, layer=i_layer) + n_node += 1 + if i_layer <= self.depth: + # for node, blist_next in enumerate(blists_next, n_node): + for blist_next in blists_next: + label = blist_next[0] + node = id(blist_next) + g.add_node(node, label=label, layer=i_layer+1) + g.add_edge(node_current, node) + if blist_next[1] is not None and len(blist_next[1]) > 0: + args_next.append((node, label, (list(blist_next[1].values()) if i_layer < self.depth else blist_next[1]), i_layer+1)) + + + for arg_next in args_next: + add_nodes(g, *arg_next) + else: + node = id(blists_next) + g.add_node(node, label=blists_next, layer=i_layer+1) + g.add_edge(node_current, node) + pass + + add_nodes(g, "root", "root", blists) + plt.clf() + labels = {node: attr['label'] for node, attr in g.nodes.items()} + pos = nx.multipartite_layout(g, subset_key="layer") + if show_labels: + labels = nx.draw_networkx_labels(g,pos, labels) + for t in labels.values(): + t.set_rotation(30) + nx.draw(g, pos, with_labels=False, node_size=5) + # nx.draw(g) + plt.show() + + pass + + + +if __name__ == '__main__': + blist = BranchList((4, 4, 4, 4)) + blist.add([0, 0, 0, 1], "A") + blist.add([0, [1,2], 0, 1], "B") + blist.add([0, 3, 0, 1], "C") + blist.add([0, [0,1,2,3], 0, [1,2]], "D") + blist.draw() + blist.build() + blist.draw() + + + + # blist = BranchList((3,3,3,4,3,3)) + # blist.add([0, 0, 1, 0, 2, 3]) + # blist.add([0, 0, 1, 2, 0, 1]) + # blist.add([0, 0, 2, 0, 2, 3]) + # blist.add([0, 0, [1,2], 0, [0,2], 3]) + + # blist.add([0,0,0,Any,0,0]) + # blist.add([0,0,Any,[1,2],0,0]) + # all = [] + # bl = blist.blist + # for _ in range(blist.depth): + # all.append(bl[0]) + # bl = bl[1][0] + pass + + +# if cython.compiled: +# print(f"{sty.fg.blue}[BranchList]Info{sty.fg.rs}: {sty.fg.green}Cython{sty.fg.rs} version.") +# else: +# print(f"{sty.fg.cyan}[BranchList]Warning{sty.fg.cyan}: {sty.fg.red}Python{sty.fg.red} version.") \ No newline at end of file diff --git a/utils/_trash/sparse_lut_v2/sparse_lut.py b/utils/_trash/sparse_lut_v2/sparse_lut.py new file mode 100644 index 0000000..23e15a3 --- /dev/null +++ b/utils/_trash/sparse_lut_v2/sparse_lut.py @@ -0,0 +1,92 @@ +from pathlib import Path +import pickle +from typing import List +from ordered_set import OrderedSet +from .branch_list import BranchList, Any, Node +from copy import deepcopy +from tqdm import tqdm +import cython +import sty + + + +class SparseLUT: + def __init__(self, shape: tuple) -> None: + self.shape = tuple(shape) + self.depth = len(self.shape) - 1 + self.blist = BranchList(shape) + self.lut = dict() + self.data = [] + + def add(self, indices: list | tuple, value): + self.data.append((indices, value)) + + + def build(self, clear=True): + if len(self.data) == 0: return + for i, (indices, value) in enumerate(tqdm(self.data)): + self.blist.add(indices, value) + self.blist.build(OrderedSet, OrderedSet.add) + # lut = self.lut + + def set_value(blists:List[Node], lut, i_depth=0): + '''''' + # if i_depth <= self.depth: + for blist in blists: + if i_depth < self.depth: + keys, blist_next = blist.index, list(blist.next_nodes.values()) + lut_next = dict() + set_value(blist_next, lut_next, i_depth+1) + for key in keys: + lut[key] = lut_next + + else: + keys, blist_next = blist.index, blist.next_nodes + for key in keys: + lut[key] = deepcopy(blist.next_nodes) + # else: + + + + set_value(list(self.blist.blists[1].values()), self.lut) + + + if clear: self.blist.clear() + + def clear(self): + self.blist.clear() + del self.lut + self.lut = dict() + + + def dump(self, root_path: str): + with open(Path(root_path)/'LUT.pkl', 'wb') as f: + pickle.dump((self.data, self.lut), f) + + def load(self, root_path: str): + with open(Path(root_path)/'LUT.pkl', 'rb') as f: + self.data, self.lut = pickle.load(f) + + def draw(self, show_labels=True): + self.blist.draw(show_labels=show_labels) + + def __setitem__(self, indices: tuple, value): + # indices = tuple(index if index is not None else slice(None) for index in indices) + self.add(indices, value) + + def get(self, indices: tuple): + ''' + each item in indices should be int, Any/None. + ''' + lut = self.lut + for index in indices[:-1]: + # print(index) + if index is Any: index = None + lut = lut.get(index, None) + if lut is None: return None + index = indices[-1] if indices is not Any else None + return lut.get(index, None) + + def __getitem__(self, indices: tuple): + if isinstance(indices, int): indices = (indices,) + return self.get(indices) diff --git a/utils/tools.py b/utils/tools.py new file mode 100644 index 0000000..d6f5c7c --- /dev/null +++ b/utils/tools.py @@ -0,0 +1,63 @@ +import sys +from typing import Callable, List + +try: + sys.getsizeof(0) + getsizeof = lambda x: sys.getsizeof(x) +except: + # import resource + getsizeof = lambda _: 1#resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + +def get_size(obj, seen=None): + """Recursively finds size of objects""" + size = getsizeof(obj) + if seen is None: + seen = set() + + obj_id = id(obj) + if obj_id in seen: + return 0 + + # Important mark as seen *before* entering recursion to gracefully handle + # self-referential objects + seen.add(obj_id) + + if isinstance(obj, dict): + size += sum([get_size(v, seen) for v in obj.values()]) + size += sum([get_size(k, seen) for k in obj.keys()]) + elif hasattr(obj, '__dict__'): + size += get_size(obj.__dict__, seen) + elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)): + size += sum([get_size(i, seen) for i in obj]) + + return size + + +def list_contains(base_list, obj_list): + '''''' + if len(base_list) < len(obj_list): return False + + obj0 = obj_list[0] + for i, base in enumerate(base_list[:len(base_list)+1 - len(obj_list)]): + if base == obj0: + if base_list[i: i+len(obj_list)] == obj_list: + return True + return False + + +def rand_seed(x: int): + import random + random.seed(x) + + import numpy as np + np.random.seed(x) + + # if using pytorch, set its seed! + # # import torch + # # torch.manual_seed(x) + # # torch.cuda.manual_seed(x) + # # torch.cuda.manual_seed_all(x) + + +find_var_with_pos: Callable[[list, list, List[list]], list] = lambda pos_search, variables, positions: [var for var, pos in zip(variables, positions) if pos[:len(pos_search)] == pos_search] # find those variables with a common head of position. e.g. pos_search=[0], variables=[1, 1, 2, 2], and positions=[[0, 2, 0, 0], [0, 2, 1, 0], [0, 3, 0], [1, 0]], then return [1, 1, 2] +find_pos_with_pos: Callable[[list, List[list]], list] = lambda pos_search, positions: [pos for pos in positions if pos[:len(pos_search)] == pos_search] \ No newline at end of file