From 80f4f2415058a92698194d1cfa81a3c64a763629 Mon Sep 17 00:00:00 2001 From: Tory Li Date: Wed, 20 Sep 2023 14:55:26 -0400 Subject: [PATCH 1/4] A small demo showing nars-core and channels working parallely. --- pynars/NARS/DataStructures/MC/BufferMC.py | 71 ++++++++++ .../MC/SampleChannels/WordNetChannel.py | 132 ++++++++++++++++++ pynars/NARS/DataStructures/MC/draft.py | 84 +++++++++++ 3 files changed, 287 insertions(+) create mode 100644 pynars/NARS/DataStructures/MC/BufferMC.py create mode 100644 pynars/NARS/DataStructures/MC/SampleChannels/WordNetChannel.py create mode 100644 pynars/NARS/DataStructures/MC/draft.py diff --git a/pynars/NARS/DataStructures/MC/BufferMC.py b/pynars/NARS/DataStructures/MC/BufferMC.py new file mode 100644 index 00000000..b6f77d27 --- /dev/null +++ b/pynars/NARS/DataStructures/MC/BufferMC.py @@ -0,0 +1,71 @@ +from pynars.NAL.Functions import Budget_decay +from pynars.NARS.DataStructures import Memory +from pynars.Narsese import Task + + +def priority_value(task: Task, memory: Memory): + """ + calculating the priority value, combined with "memory-based evaluation". + """ + truth = task.truth + budget = task.budget + complexity = task.term.complexity + pv = truth.e * budget.priority * budget.quality * 0.9 ** complexity # the original value + + task_in_memory = memory.take_by_key(task, remove=False) + if task_in_memory is not None: + # if one task is already in the memory, then its pv is multiplied. + pv *= 1 + task_in_memory.budget.priority + else: + sub_concepts = filter(lambda x: x is not None, + [memory.concepts.take_by_key(each, remove=False) for each in task.term.sub_terms]) + for sub_concept in sub_concepts: + # for each sub-concept in the memory, its pv is increased. + pv *= 1 + sub_concept.budget.priority + + return pv + + +class Buffer: + """ + Buffer is a basic class of all advanced buffers (including, event buffer, global buffer, internal buffer). + + Buffer cycle: + 1. Get a lot of inputs (Tasks), put them to the priority queue based on the inputting truth and budget. + 2. Check these inputs with the memory, this will change the priority value (the original truth and budget will + not be changed). + + In implementation, checking the memory when each individual input is in. + + 3. Pop the most prioritized input. + """ + + def __init__(self, size, memory: Memory): + self.size = size + self.memory = memory + self.priority_queue = [] + + def buffer_cycle(self, tasks: [Task]): + """ + It takes some inputs, and return the one with the maximum priority. + If there are no inputs, say tasks = [], then it will keep popping till the buffer is empty. + """ + for task in tasks: + pv = priority_value(task, self.memory) + if len(self.priority_queue) == 0: + self.priority_queue = [(pv, task)] + continue + mark = False + for i in range(len(self.priority_queue)): + if pv > self.priority_queue[i][0]: + self.priority_queue = self.priority_queue[:i] + [(pv, task)] + self.priority_queue[i:] + mark = True + break + if not mark: + self.priority_queue = self.priority_queue + [(pv, task)] + if len(self.priority_queue) > self.size: + self.priority_queue = self.priority_queue[:self.size] + if len(self.priority_queue) > 0: + return self.priority_queue.pop(0) + else: + return None diff --git a/pynars/NARS/DataStructures/MC/SampleChannels/WordNetChannel.py b/pynars/NARS/DataStructures/MC/SampleChannels/WordNetChannel.py new file mode 100644 index 00000000..40fc7949 --- /dev/null +++ b/pynars/NARS/DataStructures/MC/SampleChannels/WordNetChannel.py @@ -0,0 +1,132 @@ +""" +Compatible with BufferMC. + +ChannelMC (the old one) class is kept there for further changes. +""" +import re + +from nltk.corpus import wordnet as wn + +from pynars.Narsese import parser + + +def convert_util(sentence: str): + if len(re.findall("[a-zA-Z0-9]+", sentence)) == 1: + return sentence + else: + return "\"" + sentence + "\"" + + +def word2narsese(word: str): + """ + Two stages: + 1) pre-synset processing: build links between i) word and its own synsets, ii) word and its synonyms, iii) word's + synonyms and their synsets + 2) synset processing: build links between i) synset and its related synsets, ii) synset and its lemmas, iii) + lemmas with their antonyms (with a higher priority) + :return: list[str] + """ + + ret = [] + + # stage 1 + # ================================================================================================================== + + synsets = [] # for stage 2 processing + + synonyms = wn.synonyms(word) # get synonyms first, they are word-level (not give in synsets) + # build links between word and its synonyms + for synonym in synonyms: + if len(synonym) != 0: + for each_synonym in synonym: + # synonym + ret.append(parser.parse("<" + convert_util(word) + " <-> " + convert_util(each_synonym) + ">.")) + for each_synonym_synset in wn.synsets(each_synonym): + synsets.append(each_synonym_synset) # add to stage 2 processing + synset_t = convert_util(each_synonym_synset.name()) # synset term + ret.append(parser.parse("<" + synset_t + " --> " + convert_util(each_synonym) + ">.")) + + # build links between word and its synsets + for each_synset in wn.synsets(word): + synsets.append(each_synset) # add to stage 2 processing + synset_t = convert_util(each_synset.name()) # synset term + ret.append(parser.parse("<" + synset_t + " --> " + convert_util(word) + ">.")) + + # stage 2 + # ================================================================================================================== + + for synset in synsets: + + synset_t = convert_util(synset.name()) # synset term + + for each in synset.hypernyms(): # hypernyms + ret.append(parser.parse("<" + synset_t + " --> " + convert_util(each.name()) + ">.")) + for each in synset.hyponyms(): # hyponyms + ret.append(parser.parse("<" + convert_util(each.name()) + " --> " + synset_t + ">.")) + for each in synset.instance_hypernyms(): # instance hypernyms + ret.append(parser.parse("<{" + synset_t + "} --> " + convert_util(each.name()) + ">.")) + for each in synset.instance_hyponyms(): # instance hyponyms + ret.append(parser.parse("<{" + convert_util(each.name()) + "} --> " + synset_t + ">.")) + for each in synset.member_holonyms(): # member holonyms + ret.append(parser.parse("<(*," + convert_util(each.name()) + "," + synset_t + ") --> MemberOf>.")) + for each in synset.substance_holonyms(): # substance holonyms + ret.append(parser.parse("<(*," + convert_util(each.name()) + "," + synset_t + ") --> SubstanceOf>.")) + for each in synset.part_holonyms(): # part holonyms + ret.append(parser.parse("<(*," + convert_util(each.name()) + "," + synset_t + ") --> PartOf>.")) + for each in synset.member_meronyms(): # member meronyms + ret.append(parser.parse("<(*," + synset_t + "," + convert_util(each.name()) + ") --> MemberOf>.")) + for each in synset.substance_meronyms(): # substance meronyms + ret.append(parser.parse("<(*," + synset_t + "," + convert_util(each.name()) + ") --> SubstanceOf>.")) + for each in synset.part_meronyms(): # part meronyms + ret.append(parser.parse("<(*," + synset_t + "," + convert_util(each.name()) + ") --> PartOf>.")) + for each in synset.attributes(): # attributes + ret.append(parser.parse("<(&," + convert_util(each.name()) + "," + synset_t + ") --> " + synset_t + ">.")) + for each in synset.entailments(): # entailments + ret.append(parser.parse("<(*," + convert_util(each.name()) + "," + synset_t + ") --> After>.")) + for each in synset.causes(): # causes + ret.append(parser.parse("<(*," + synset_t + "," + convert_util(each.name()) + ") --> After>.")) + for each in synset.also_sees(): # also sees + ret.append(parser.parse("<(*," + synset_t + "," + convert_util(each.name()) + ") --> SameTime>.")) + for each in synset.verb_groups(): # verb groups + ret.append(parser.parse("<" + synset_t + " <-> " + convert_util(each.name()) + ">.")) + for each in synset.similar_tos(): # similar-to's + ret.append(parser.parse("<" + synset_t + " <-> " + convert_util(each.name()) + ">.")) + + lemmas = synset.lemmas() + for lemma in lemmas: # lemmas + lemma_t = convert_util(lemma.name()) + ret.append(parser.parse("<" + lemma_t + " --> " + synset_t + ">.")) + for antonym in lemma.antonyms(): # antonyms, higher priority + ret.append(parser.parse( + "$0.9; 0.9; 0.5$ <" + convert_util(antonym.name()) + " <-> " + lemma_t + ">. %0.0; 0.9%")) + + return ret + + +class WordNetChannel: + + def __init__(self, ID, buffer): + self.ID = ID + self.buffer = buffer + + def WordNetQuery(self, task=None): + """ + Query WordNet with a single natural language word. Can be an empty query, if so, then it will pop previous + remaining queries. + """ + tasks = [] + if task is not None and task.is_goal: + + try: + """ + Something is wrong with the variable related functionalities. + Here is a relatively fixed format extracting the query word. + """ + query_word = \ + [x.word for x in task.term.subject.sub_terms if + x.word != "WordNet" and x.word != task.term.subject.word][0] + tasks = word2narsese(query_word) + except: + tasks = [] + + return self.buffer.buffer_cycle(tasks) diff --git a/pynars/NARS/DataStructures/MC/draft.py b/pynars/NARS/DataStructures/MC/draft.py new file mode 100644 index 00000000..d70c6e8d --- /dev/null +++ b/pynars/NARS/DataStructures/MC/draft.py @@ -0,0 +1,84 @@ +from multiprocessing import Process + +from BufferMC import Buffer +from SampleChannels.WordNetChannel import WordNetChannel +from pynars.NARS import Reasoner +from pynars.Narsese import Task + + +def nars_core(): + """ + It will try to read from the nars_inputs list, and write the results in nars_outputs (just write for recording). + And it will also append some results to wcn_inputs for further processing. (hand the task to the channel) + """ + global nars_input + global nars_output + global wcn_input + global wcn_output + for _ in range(300): + # here it runs for 300 cycles, but it is designed to run forever, if so, please use while(True) + if len(nars_input) == 0: + ret = nars.cycle() + else: + tmp = nars_input.pop(0) + if isinstance(tmp, Task): + success, task, _, ret = nars.input_narsese(text=str(tmp), go_cycle=True) + else: + success, task, _, ret = nars.input_narsese(text=tmp, go_cycle=True) + nars_output.append(ret) + if len(ret[0]) != 0: + for each in ret[0]: + wcn_input.append(each) + + +def wcn_core(): + global nars_input + global nars_output + global wcn_input + global wcn_output + for _ in range(300): + # here it runs for 300 cycles, but it is designed to run forever, if so, please use while(True) + if len(wcn_input) == 0: + ret = wcn.WordNetQuery() + else: + ret = wcn.WordNetQuery(wcn_input.pop(0)) + wcn_output.append(ret) + if ret is not None: + nars_input.append(each) + + +if __name__ == "__main__": + + apriori_knowledge = ["< #y> ==> [KNOWN]>>.", + "<<#y --> cat> ==> [KNOWN]>>.", + " [KNOWN]>>."] + # including product as images + + nars = Reasoner(100, 100) + for each in apriori_knowledge: + nars.input_narsese(each, True) + + buff = Buffer(100, nars.memory) + wcn = WordNetChannel("WCN", buff) + + # global data + nars_input = [" [KNOWN]>!", " cat>?"] # though there is a question here, the answer is not used + nars_output = [] + wcn_input = [] + wcn_output = [] + + process_list = [] + + p = Process(target=nars_core(), args=('Python',)) + p.start() + process_list.append(p) + + p = Process(target=wcn_core(), args=('Python',)) + p.start() + process_list.append(p) + + for i in range(len(process_list)): + process_list[i].join() + + print(nars_output) + print(wcn_output) From 1785328fc5b010c9d1bdfdad3ce723500a88569a Mon Sep 17 00:00:00 2001 From: Tory Li Date: Thu, 21 Sep 2023 10:58:24 -0400 Subject: [PATCH 2/4] A small demo showing nars-core and channels working parallely. --- pynars/NARS/DataStructures/MC/draft.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/pynars/NARS/DataStructures/MC/draft.py b/pynars/NARS/DataStructures/MC/draft.py index d70c6e8d..18d72b3c 100644 --- a/pynars/NARS/DataStructures/MC/draft.py +++ b/pynars/NARS/DataStructures/MC/draft.py @@ -13,8 +13,8 @@ def nars_core(): """ global nars_input global nars_output - global wcn_input - global wcn_output + global wch_input + global wch_output for _ in range(300): # here it runs for 300 cycles, but it is designed to run forever, if so, please use while(True) if len(nars_input) == 0: @@ -28,30 +28,28 @@ def nars_core(): nars_output.append(ret) if len(ret[0]) != 0: for each in ret[0]: - wcn_input.append(each) + wch_input.append(each) def wcn_core(): global nars_input global nars_output - global wcn_input - global wcn_output + global wch_input + global wch_output for _ in range(300): # here it runs for 300 cycles, but it is designed to run forever, if so, please use while(True) - if len(wcn_input) == 0: + if len(wch_input) == 0: ret = wcn.WordNetQuery() else: - ret = wcn.WordNetQuery(wcn_input.pop(0)) - wcn_output.append(ret) + ret = wcn.WordNetQuery(wch_input.pop(0)) + wch_output.append(ret) if ret is not None: nars_input.append(each) if __name__ == "__main__": - apriori_knowledge = ["< #y> ==> [KNOWN]>>.", - "<<#y --> cat> ==> [KNOWN]>>.", - " [KNOWN]>>."] + apriori_knowledge = [" [KNOWN]>>."] # including product as images nars = Reasoner(100, 100) @@ -64,8 +62,8 @@ def wcn_core(): # global data nars_input = [" [KNOWN]>!", " cat>?"] # though there is a question here, the answer is not used nars_output = [] - wcn_input = [] - wcn_output = [] + wch_input = [] + wch_output = [] process_list = [] @@ -81,4 +79,4 @@ def wcn_core(): process_list[i].join() print(nars_output) - print(wcn_output) + print(wch_output) From 01586ee413a030c3580162d09c353245e90f0cb4 Mon Sep 17 00:00:00 2001 From: Tory Li Date: Sun, 1 Oct 2023 13:25:06 -0400 Subject: [PATCH 3/4] Introduce draft2.py, which is a sample of using NARS to generate pos and neg labels. --- pynars/NARS/DataStructures/MC/draft.py | 5 + pynars/NARS/DataStructures/MC/draft2.py | 96 +++++ .../NARS/DataStructures/MC/util/__init__.py | 0 .../MC/util/word2narsese_exAll.py | 358 ++++++++++++++++++ 4 files changed, 459 insertions(+) create mode 100644 pynars/NARS/DataStructures/MC/draft2.py create mode 100644 pynars/NARS/DataStructures/MC/util/__init__.py create mode 100644 pynars/NARS/DataStructures/MC/util/word2narsese_exAll.py diff --git a/pynars/NARS/DataStructures/MC/draft.py b/pynars/NARS/DataStructures/MC/draft.py index 18d72b3c..10defa95 100644 --- a/pynars/NARS/DataStructures/MC/draft.py +++ b/pynars/NARS/DataStructures/MC/draft.py @@ -1,3 +1,8 @@ +""" +draft.py is just for showing the structure how channels and NARS core communicate +""" + + from multiprocessing import Process from BufferMC import Buffer diff --git a/pynars/NARS/DataStructures/MC/draft2.py b/pynars/NARS/DataStructures/MC/draft2.py new file mode 100644 index 00000000..bfdc397d --- /dev/null +++ b/pynars/NARS/DataStructures/MC/draft2.py @@ -0,0 +1,96 @@ +""" +draft2.py uses wordnet as an example +""" + + +from multiprocessing import Process + +from BufferMC import Buffer +from SampleChannels.WordNetChannel import WordNetChannel +from pynars.NARS import Reasoner +from pynars.Narsese import Task +from pynars.NARS.DataStructures.MC.util.word2narsese_exAll import words2narsese + + +def nars_core(): + """ + It will try to read from the nars_inputs list, and write the results in nars_outputs (just write for recording). + And it will also append some results to wcn_inputs for further processing. (hand the task to the channel) + """ + global nars_input + global nars_output + global wch_input + global wch_output + for _ in range(300): + # here it runs for 300 cycles, but it is designed to run forever, if so, please use while(True) + if len(nars_input) == 0: + ret = nars.cycle() + else: + tmp = nars_input.pop(0) + + print("narsese input:", tmp) + + if isinstance(tmp, Task): + success, task, _, ret = nars.input_narsese(text=str(tmp), go_cycle=True) + else: + success, task, _, ret = nars.input_narsese(text=tmp, go_cycle=True) + nars_output.append(ret) + if len(ret[0]) != 0: + for each in ret[0]: + wch_input.append(each) + + +def wcn_core(): + global nars_input + global nars_output + global wch_input + global wch_output + for _ in range(300): + # here it runs for 300 cycles, but it is designed to run forever, if so, please use while(True) + if len(wch_input) == 0: + ret = wcn.WordNetQuery() + else: + tmp = wch_input.pop(0) + print("channel input:", tmp) + ret = wcn.WordNetQuery(tmp) + wch_output.append(ret) + if ret is not None: + nars_input.append(each) + + +if __name__ == "__main__": + + apriori_knowledge = [" <$x --> [KNOWN]>>.", + "<<$label --> X> ==> <$label --> [pos]>>. %0.6;0.99%", + " X>.", + " X>.", + " X>."] # note this X is for one single case + + nars = Reasoner(1000, 1000) + for each in apriori_knowledge: + nars.input_narsese(each, True) + + buff = Buffer(100, nars.memory) + wcn = WordNetChannel("WCN", buff) + + # global data + nars_input = [" [KNOWN]>!", " [pos]>?"] + nars_output = [] + wch_input = [] + wch_output = [] + + process_list = [] + + p = Process(target=nars_core(), args=('Python',)) + p.start() + process_list.append(p) + + p = Process(target=wcn_core(), args=('Python',)) + p.start() + process_list.append(p) + + for i in range(len(process_list)): + process_list[i].join() + + print(nars_output) + print(wch_output) diff --git a/pynars/NARS/DataStructures/MC/util/__init__.py b/pynars/NARS/DataStructures/MC/util/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pynars/NARS/DataStructures/MC/util/word2narsese_exAll.py b/pynars/NARS/DataStructures/MC/util/word2narsese_exAll.py new file mode 100644 index 00000000..367d493e --- /dev/null +++ b/pynars/NARS/DataStructures/MC/util/word2narsese_exAll.py @@ -0,0 +1,358 @@ +import re +from copy import deepcopy +from typing import List, Tuple + +import nltk +from nltk.corpus import wordnet as wn + +from pynars import Narsese +from pynars.NARS import Reasoner as Reasoner +from pynars.Narsese import Task +from pynars.utils.Print import print_out, PrintType + +nltk.download('wordnet') + + +def convert_util(sentence: str): + return "\"" + sentence + "\"" + + +def word2narsese(word: str): + """ + Two stages: + 1) pre-synset processing: build links between i) word and its own synsets, ii) word and its synonyms, iii) word's + synonyms and their synsets + 2) synset processing: build links between i) synset and its related synsets, ii) synset and its lemmas, iii) + lemmas with their antonyms (with a higher priority) + :return: list[str] + """ + + ret = [] + + # stage 1 + # ================================================================================================================== + + synsets = [] # for stage 2 processing + + synonyms = wn.synonyms(word) # get synonyms first, they are word-level (not give in synsets) + # build links between word and its synonyms + for synonym in synonyms: + if len(synonym) != 0: + for each_synonym in synonym: + # synonym + ret.append("<" + convert_util(word) + " <-> " + convert_util(each_synonym) + ">.") + for each_synonym_synset in wn.synsets(each_synonym): + synsets.append(each_synonym_synset) # add to stage 2 processing + synset_t = convert_util(each_synonym_synset.name()) # synset term + ret.append("<" + synset_t + " --> " + convert_util(each_synonym) + ">.") + + # build links between word and its synsets + for each_synset in wn.synsets(word): + synsets.append(each_synset) # add to stage 2 processing + synset_t = convert_util(each_synset.name()) # synset term + ret.append("<" + synset_t + " --> " + convert_util(word) + ">.") + + # stage 2 + # ================================================================================================================== + + for synset in synsets: + + synset_t = convert_util(synset.name()) # synset term + + for each in synset.hypernyms(): # hypernyms + ret.append("<" + synset_t + " --> " + convert_util(each.name()) + ">.") + for each in synset.hyponyms(): # hyponyms + ret.append("<" + convert_util(each.name()) + " --> " + synset_t + ">.") + for each in synset.instance_hypernyms(): # instance hypernyms + ret.append("<{" + synset_t + "} --> " + convert_util(each.name()) + ">.") + for each in synset.instance_hyponyms(): # instance hyponyms + ret.append("<{" + convert_util(each.name()) + "} --> " + synset_t + ">.") + for each in synset.member_holonyms(): # member holonyms + ret.append("<(*," + convert_util(each.name()) + "," + synset_t + ") --> MemberOf>.") + for each in synset.substance_holonyms(): # substance holonyms + ret.append("<(*," + convert_util(each.name()) + "," + synset_t + ") --> SubstanceOf>.") + for each in synset.part_holonyms(): # part holonyms + ret.append("<(*," + convert_util(each.name()) + "," + synset_t + ") --> PartOf>.") + for each in synset.member_meronyms(): # member meronyms + ret.append("<(*," + synset_t + "," + convert_util(each.name()) + ") --> MemberOf>.") + for each in synset.substance_meronyms(): # substance meronyms + ret.append("<(*," + synset_t + "," + convert_util(each.name()) + ") --> SubstanceOf>.") + for each in synset.part_meronyms(): # part meronyms + ret.append("<(*," + synset_t + "," + convert_util(each.name()) + ") --> PartOf>.") + for each in synset.attributes(): # attributes + ret.append("<(&," + convert_util(each.name()) + "," + synset_t + ") --> " + synset_t + ">.") + for each in synset.entailments(): # entailments + ret.append("<" + convert_util(each.name()) + " =/> " + synset_t + ">.") + for each in synset.causes(): # causes + ret.append("<" + synset_t + " =/> " + convert_util(each.name()) + ">.") + for each in synset.also_sees(): # also sees + ret.append("<" + synset_t + " <|> " + convert_util(each.name()) + ">.") + for each in synset.verb_groups(): # verb groups + ret.append("<" + synset_t + " <-> " + convert_util(each.name()) + ">.") + for each in synset.similar_tos(): # similar-to's + ret.append("<" + synset_t + " <-> " + convert_util(each.name()) + ">.") + + lemmas = synset.lemmas() + for lemma in lemmas: # lemmas + lemma_t = convert_util(lemma.name()) + ret.append("<" + lemma_t + " --> " + synset_t + ">.") + for antonym in lemma.antonyms(): # antonyms, higher priority + ret.append("$0.9; 0.9; 0.5$ <" + convert_util(antonym.name()) + " <-> " + lemma_t + ">. %0.0; 0.9%") + + return "\n".join(ret) + + +def words2narsese(words: list[str]): + ret = [] + + for word in words: + ret.append(word2narsese(word)) + + return "\n".join(ret) + + +def run_line(nars: Reasoner, line: str): # PyNARS call + line = line.strip(' \n') + if line.startswith("//"): + return None + elif line.startswith("''"): + if line.startswith("''outputMustContain('"): + line = line[len("''outputMustContain('"):].rstrip("')\n") + if len(line) == 0: return + try: + content_check = Narsese.parser.parse(line) + # out_print(PrintType.INFO, f'OutputContains({content_check.sentence.repr()})') + except: + print_out(PrintType.ERROR, f'Invalid input! Failed to parse: {line}') + return + elif line.startswith("'"): + return None + elif line.isdigit(): + n_cycle = int(line) + print_out(PrintType.INFO, f'Run {n_cycle} cycles.') + tasks_all_cycles = [] + for _ in range(n_cycle): + tasks_all = nars.cycle() + tasks_all_cycles.append(deepcopy(tasks_all)) + return tasks_all_cycles + else: + line = line.rstrip(' \n') + if len(line) == 0: + return None + try: + success, task, _ = nars.input_narsese(line, go_cycle=True) + if success: + print_out(PrintType.IN, task.sentence.repr(), *task.budget) + else: + print_out(PrintType.ERROR, f'Invalid input! Failed to parse: {line}') + + tasks_all = nars.cycle() + return [deepcopy(tasks_all)] + except: + print_out(PrintType.ERROR, f'Unknown error: {line}') + + +def handle_lines(nars: Reasoner, lines: str): # PyNARS call + tasks_lines = [] + for line in lines.split('\n'): + if len(line) == 0: continue + + tasks_line = run_line(nars, line) + if tasks_line is not None: + tasks_lines.extend(tasks_line) + + check_list = set() + ret = [] + + tasks_lines: List[Tuple[List[Task], Task, Task, List[Task], Task, Tuple[Task, Task]]] + for tasks_line in tasks_lines: + tasks_derived, judgement_revised, goal_revised, answers_question, answers_quest, ( + task_operation_return, task_executed) = tasks_line + + for task in tasks_derived: + if task.term.word not in check_list: + check_list.add(task.term.word) + ret.append(task) + + if judgement_revised is not None: + if judgement_revised.term.word not in check_list: + check_list.add(judgement_revised.term.word) + ret.append(judgement_revised) + + if goal_revised is not None: + if goal_revised.term.word not in check_list: + check_list.add(goal_revised.term.word) + ret.append(goal_revised) + + if answers_question is not None: + for answer in answers_question: + if answer.term.word not in check_list: + check_list.add(answer.term.word) + ret.append(answer) + + if answers_quest is not None: + for answer in answers_quest: + if answer.term.word not in check_list: + check_list.add(answer.term.word) + ret.append(answer) + + return ret + + +def result_filtering(reasoning_results): + # find positive/negative judgments + pos = [] + neg = [] + + for each in reasoning_results: + if each.truth.f > 0.9: + pos.append(each) + elif each.truth.f < 0.1: + neg.append(each) + + return pos, neg + + +def next_rank(base, reasoning_results, lower_ranks): + """ + If we have a sentence B>., and if we call A (or B) the rank_i term, then B (or A) is the rank_i+1 term. + "RANK" represents how many sentences are needed. + If two same terms are of different ranks, the smaller rank will be chosen. + :param base: dic + :param reasoning_results: list[Task] + :param lower_ranks: list[dic] + :return: dic + """ + rkn = {} + for each_result in reasoning_results: + words = [each.word.replace("\"", "") for each in each_result.term.terms] # get sub-terms + for i, word in enumerate(words): + # if a sub-term is in the base, then all remaining sub-terms will be rank_next terms + if word in base: + for j in range(len(words)): + if j == i: + continue + elif words[j] in rkn: + rkn[words[j]] = [rkn[words[j]][0] + 1, (rkn[words[j]][1] + each_result.truth.e) / 2] + else: + rkn.update({words[j]: [1, each_result.truth.e]}) + tmp = deepcopy(rkn) + for each in rkn: + if rkn[each][0] < 3: + tmp.pop(each) + continue + for lower_rank in lower_ranks: + if each in lower_rank: + tmp.pop(each) + break + return tmp + + +def term2nl_util(term, ret): + if term[0] != "(": + if "." not in term: + return ret.union({term}) + else: + return ret.union({wn.synset(term).definition()}) + else: + sub_terms = term[4:-1].split(", ") + for sub_term in sub_terms: + ret = ret.union(term2nl_util(sub_term, ret)) + + return ret + + +def term2nl(term, connector=None): + if connector is not None: + components = term2nl_util(term, set()) + return connector.join(components) + else: + return list(term2nl_util(term, set()))[0] + + +def terms2nl(terms): + ret = [] + for term in terms: + try: + if term[1] == "&": + ret.append(term2nl(term, " and ")) + elif term[1] == "|": + ret.append(term2nl(term, " or ")) + else: + ret.append(term2nl(term)) + except: + continue + return ret + + +if __name__ == "__main__": + # some compound words cannot be found in wordnet directly, but can also be represented by NARS + # e.g., "car accident", (&, car, accident) + + narsese = words2narsese(["abuse", + "burglary", + "robbery", + "stealing", + "shooting", + "shoplifting", + "assault", + "fighting", + "arson", + "explosion", + "arrest", + "car", + "accident", + "vandalism", + "normal", + "event"]) + + original_labels = {"abuse": [1, 0.9], + "burglary": [1, 0.9], + "robbery": [1, 0.9], + "stealing": [1, 0.9], + "shooting": [1, 0.9], + "shoplifting": [1, 0.9], + "assault": [1, 0.9], + "fighting": [1, 0.9], + "arson": [1, 0.9], + "explosion": [1, 0.9], + "arrest": [1, 0.9], + "(&, car, accident)": [1, 0.9], + "(&, accident, car)": [1, 0.9], + "vandalism": [1, 0.9], + "(&, normal, event)": [1, 0.9], + "(&, event, normal)": [1, 0.9]} + + nars = Reasoner(100000, 100000) + + reasoning_results = handle_lines(nars, narsese + "\n1000") + + pos, neg = result_filtering(reasoning_results) + + rk1 = next_rank(original_labels, pos, [original_labels]) + A = sorted([[each, rk1[each][0], rk1[each][1]] for each in rk1], key=lambda x: x[2], reverse=True) + rk2 = next_rank(rk1, pos, [original_labels, rk1]) + rk3 = next_rank(rk2, pos, [original_labels, rk1, rk2]) + + print(1) + + # pos_terms = rk1.union(*[rk2, rk3]) + # pos_scores = sorted(terms_score(pos_terms, pos), key=lambda x: x[1], reverse=True)[:400] + # pos_terms = set([each[0] for each in pos_scores]) + # + # neg_terms = next_rank(original_labels.union(pos_terms), neg, original_labels.union(pos_terms)) + # + # pos_labels = terms2nl(pos_terms) + # neg_labels = terms2nl(neg_terms) + # + # print("pos labels: ", pos_labels) + # print("========") + # print("neg labels: ", neg_labels) + # + # with open("expanded labels ExAll pos.txt", "w") as file: + # for each in pos_labels: + # file.write(each + "\n") + # + # with open("expanded labels ExAll neg.txt", "w") as file: + # for each in neg_labels: + # file.write(each + "\n") From 5895a259560a7be301140bdc6f7ebf28398b673b Mon Sep 17 00:00:00 2001 From: Bowen Xu Date: Wed, 22 Nov 2023 22:55:33 -0500 Subject: [PATCH 4/4] Init folder `Channels` --- .../Channels/WordNetChannel/WordNetChannel.py | 132 +++++++ .../NARS/Channels/WordNetChannel/__init__.py | 0 pynars/NARS/Channels/WordNetChannel/draft2.py | 95 +++++ .../WordNetChannel/word2narsese_exAll.py | 358 ++++++++++++++++++ pynars/NARS/Channels/__init__.py | 1 + 5 files changed, 586 insertions(+) create mode 100644 pynars/NARS/Channels/WordNetChannel/WordNetChannel.py create mode 100644 pynars/NARS/Channels/WordNetChannel/__init__.py create mode 100644 pynars/NARS/Channels/WordNetChannel/draft2.py create mode 100644 pynars/NARS/Channels/WordNetChannel/word2narsese_exAll.py create mode 100644 pynars/NARS/Channels/__init__.py diff --git a/pynars/NARS/Channels/WordNetChannel/WordNetChannel.py b/pynars/NARS/Channels/WordNetChannel/WordNetChannel.py new file mode 100644 index 00000000..40fc7949 --- /dev/null +++ b/pynars/NARS/Channels/WordNetChannel/WordNetChannel.py @@ -0,0 +1,132 @@ +""" +Compatible with BufferMC. + +ChannelMC (the old one) class is kept there for further changes. +""" +import re + +from nltk.corpus import wordnet as wn + +from pynars.Narsese import parser + + +def convert_util(sentence: str): + if len(re.findall("[a-zA-Z0-9]+", sentence)) == 1: + return sentence + else: + return "\"" + sentence + "\"" + + +def word2narsese(word: str): + """ + Two stages: + 1) pre-synset processing: build links between i) word and its own synsets, ii) word and its synonyms, iii) word's + synonyms and their synsets + 2) synset processing: build links between i) synset and its related synsets, ii) synset and its lemmas, iii) + lemmas with their antonyms (with a higher priority) + :return: list[str] + """ + + ret = [] + + # stage 1 + # ================================================================================================================== + + synsets = [] # for stage 2 processing + + synonyms = wn.synonyms(word) # get synonyms first, they are word-level (not give in synsets) + # build links between word and its synonyms + for synonym in synonyms: + if len(synonym) != 0: + for each_synonym in synonym: + # synonym + ret.append(parser.parse("<" + convert_util(word) + " <-> " + convert_util(each_synonym) + ">.")) + for each_synonym_synset in wn.synsets(each_synonym): + synsets.append(each_synonym_synset) # add to stage 2 processing + synset_t = convert_util(each_synonym_synset.name()) # synset term + ret.append(parser.parse("<" + synset_t + " --> " + convert_util(each_synonym) + ">.")) + + # build links between word and its synsets + for each_synset in wn.synsets(word): + synsets.append(each_synset) # add to stage 2 processing + synset_t = convert_util(each_synset.name()) # synset term + ret.append(parser.parse("<" + synset_t + " --> " + convert_util(word) + ">.")) + + # stage 2 + # ================================================================================================================== + + for synset in synsets: + + synset_t = convert_util(synset.name()) # synset term + + for each in synset.hypernyms(): # hypernyms + ret.append(parser.parse("<" + synset_t + " --> " + convert_util(each.name()) + ">.")) + for each in synset.hyponyms(): # hyponyms + ret.append(parser.parse("<" + convert_util(each.name()) + " --> " + synset_t + ">.")) + for each in synset.instance_hypernyms(): # instance hypernyms + ret.append(parser.parse("<{" + synset_t + "} --> " + convert_util(each.name()) + ">.")) + for each in synset.instance_hyponyms(): # instance hyponyms + ret.append(parser.parse("<{" + convert_util(each.name()) + "} --> " + synset_t + ">.")) + for each in synset.member_holonyms(): # member holonyms + ret.append(parser.parse("<(*," + convert_util(each.name()) + "," + synset_t + ") --> MemberOf>.")) + for each in synset.substance_holonyms(): # substance holonyms + ret.append(parser.parse("<(*," + convert_util(each.name()) + "," + synset_t + ") --> SubstanceOf>.")) + for each in synset.part_holonyms(): # part holonyms + ret.append(parser.parse("<(*," + convert_util(each.name()) + "," + synset_t + ") --> PartOf>.")) + for each in synset.member_meronyms(): # member meronyms + ret.append(parser.parse("<(*," + synset_t + "," + convert_util(each.name()) + ") --> MemberOf>.")) + for each in synset.substance_meronyms(): # substance meronyms + ret.append(parser.parse("<(*," + synset_t + "," + convert_util(each.name()) + ") --> SubstanceOf>.")) + for each in synset.part_meronyms(): # part meronyms + ret.append(parser.parse("<(*," + synset_t + "," + convert_util(each.name()) + ") --> PartOf>.")) + for each in synset.attributes(): # attributes + ret.append(parser.parse("<(&," + convert_util(each.name()) + "," + synset_t + ") --> " + synset_t + ">.")) + for each in synset.entailments(): # entailments + ret.append(parser.parse("<(*," + convert_util(each.name()) + "," + synset_t + ") --> After>.")) + for each in synset.causes(): # causes + ret.append(parser.parse("<(*," + synset_t + "," + convert_util(each.name()) + ") --> After>.")) + for each in synset.also_sees(): # also sees + ret.append(parser.parse("<(*," + synset_t + "," + convert_util(each.name()) + ") --> SameTime>.")) + for each in synset.verb_groups(): # verb groups + ret.append(parser.parse("<" + synset_t + " <-> " + convert_util(each.name()) + ">.")) + for each in synset.similar_tos(): # similar-to's + ret.append(parser.parse("<" + synset_t + " <-> " + convert_util(each.name()) + ">.")) + + lemmas = synset.lemmas() + for lemma in lemmas: # lemmas + lemma_t = convert_util(lemma.name()) + ret.append(parser.parse("<" + lemma_t + " --> " + synset_t + ">.")) + for antonym in lemma.antonyms(): # antonyms, higher priority + ret.append(parser.parse( + "$0.9; 0.9; 0.5$ <" + convert_util(antonym.name()) + " <-> " + lemma_t + ">. %0.0; 0.9%")) + + return ret + + +class WordNetChannel: + + def __init__(self, ID, buffer): + self.ID = ID + self.buffer = buffer + + def WordNetQuery(self, task=None): + """ + Query WordNet with a single natural language word. Can be an empty query, if so, then it will pop previous + remaining queries. + """ + tasks = [] + if task is not None and task.is_goal: + + try: + """ + Something is wrong with the variable related functionalities. + Here is a relatively fixed format extracting the query word. + """ + query_word = \ + [x.word for x in task.term.subject.sub_terms if + x.word != "WordNet" and x.word != task.term.subject.word][0] + tasks = word2narsese(query_word) + except: + tasks = [] + + return self.buffer.buffer_cycle(tasks) diff --git a/pynars/NARS/Channels/WordNetChannel/__init__.py b/pynars/NARS/Channels/WordNetChannel/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/pynars/NARS/Channels/WordNetChannel/draft2.py b/pynars/NARS/Channels/WordNetChannel/draft2.py new file mode 100644 index 00000000..36135bd2 --- /dev/null +++ b/pynars/NARS/Channels/WordNetChannel/draft2.py @@ -0,0 +1,95 @@ +""" +draft2.py uses wordnet as an example +""" + + +from multiprocessing import Process + +from .WordNetChannel import WordNetChannel +from pynars.NARS import Reasoner +from pynars.Narsese import Task +from pynars.NARS.DataStructures.MC.util.word2narsese_exAll import words2narsese + + +def nars_core(): + """ + It will try to read from the nars_inputs list, and write the results in nars_outputs (just write for recording). + And it will also append some results to wcn_inputs for further processing. (hand the task to the channel) + """ + global nars_input + global nars_output + global wch_input + global wch_output + for _ in range(300): + # here it runs for 300 cycles, but it is designed to run forever, if so, please use while(True) + if len(nars_input) == 0: + ret = nars.cycle() + else: + tmp = nars_input.pop(0) + + print("narsese input:", tmp) + + if isinstance(tmp, Task): + success, task, _, ret = nars.input_narsese(text=str(tmp), go_cycle=True) + else: + success, task, _, ret = nars.input_narsese(text=tmp, go_cycle=True) + nars_output.append(ret) + if len(ret[0]) != 0: + for each in ret[0]: + wch_input.append(each) + + +def wcn_core(): + global nars_input + global nars_output + global wch_input + global wch_output + for _ in range(300): + # here it runs for 300 cycles, but it is designed to run forever, if so, please use while(True) + if len(wch_input) == 0: + ret = wcn.WordNetQuery() + else: + tmp = wch_input.pop(0) + print("channel input:", tmp) + ret = wcn.WordNetQuery(tmp) + wch_output.append(ret) + if ret is not None: + nars_input.append(each) + + +if __name__ == "__main__": + + apriori_knowledge = [" <$x --> [KNOWN]>>.", + "<<$label --> X> ==> <$label --> [pos]>>. %0.6;0.99%", + " X>.", + " X>.", + " X>."] # note this X is for one single case + + nars = Reasoner(1000, 1000) + for each in apriori_knowledge: + nars.input_narsese(each, True) + + buff = Buffer(100, nars.memory) + wcn = WordNetChannel("WCN", buff) + + # global data + nars_input = [" [KNOWN]>!", " [pos]>?"] + nars_output = [] + wch_input = [] + wch_output = [] + + process_list = [] + + p = Process(target=nars_core(), args=('Python',)) + p.start() + process_list.append(p) + + p = Process(target=wcn_core(), args=('Python',)) + p.start() + process_list.append(p) + + for i in range(len(process_list)): + process_list[i].join() + + print(nars_output) + print(wch_output) diff --git a/pynars/NARS/Channels/WordNetChannel/word2narsese_exAll.py b/pynars/NARS/Channels/WordNetChannel/word2narsese_exAll.py new file mode 100644 index 00000000..0f23f263 --- /dev/null +++ b/pynars/NARS/Channels/WordNetChannel/word2narsese_exAll.py @@ -0,0 +1,358 @@ +import re +from copy import deepcopy +from typing import List, Tuple + +import nltk +from nltk.corpus import wordnet as wn + +from pynars import Narsese +from pynars.NARS import Reasoner as Reasoner +from pynars.Narsese import Task +from pynars.utils.Print import print_out, PrintType + +nltk.download('wordnet') + + +def convert_util(sentence: str): + return "\"" + sentence + "\"" + + +def word2narsese(word: str): + """ + Two stages: + 1) pre-synset processing: build links between i) word and its own synsets, ii) word and its synonyms, iii) word's + synonyms and their synsets + 2) synset processing: build links between i) synset and its related synsets, ii) synset and its lemmas, iii) + lemmas with their antonyms (with a higher priority) + :return: list[str] + """ + + ret = [] + + # stage 1 + # ================================================================================================================== + + synsets = [] # for stage 2 processing + + synonyms = wn.synonyms(word) # get synonyms first, they are word-level (not give in synsets) + # build links between word and its synonyms + for synonym in synonyms: + if len(synonym) != 0: + for each_synonym in synonym: + # synonym + ret.append("<" + convert_util(word) + " <-> " + convert_util(each_synonym) + ">.") + for each_synonym_synset in wn.synsets(each_synonym): + synsets.append(each_synonym_synset) # add to stage 2 processing + synset_t = convert_util(each_synonym_synset.name()) # synset term + ret.append("<" + synset_t + " --> " + convert_util(each_synonym) + ">.") + + # build links between word and its synsets + for each_synset in wn.synsets(word): + synsets.append(each_synset) # add to stage 2 processing + synset_t = convert_util(each_synset.name()) # synset term + ret.append("<" + synset_t + " --> " + convert_util(word) + ">.") + + # stage 2 + # ================================================================================================================== + + for synset in synsets: + + synset_t = convert_util(synset.name()) # synset term + + for each in synset.hypernyms(): # hypernyms + ret.append("<" + synset_t + " --> " + convert_util(each.name()) + ">.") + for each in synset.hyponyms(): # hyponyms + ret.append("<" + convert_util(each.name()) + " --> " + synset_t + ">.") + for each in synset.instance_hypernyms(): # instance hypernyms + ret.append("<{" + synset_t + "} --> " + convert_util(each.name()) + ">.") + for each in synset.instance_hyponyms(): # instance hyponyms + ret.append("<{" + convert_util(each.name()) + "} --> " + synset_t + ">.") + for each in synset.member_holonyms(): # member holonyms + ret.append("<(*," + convert_util(each.name()) + "," + synset_t + ") --> MemberOf>.") + for each in synset.substance_holonyms(): # substance holonyms + ret.append("<(*," + convert_util(each.name()) + "," + synset_t + ") --> SubstanceOf>.") + for each in synset.part_holonyms(): # part holonyms + ret.append("<(*," + convert_util(each.name()) + "," + synset_t + ") --> PartOf>.") + for each in synset.member_meronyms(): # member meronyms + ret.append("<(*," + synset_t + "," + convert_util(each.name()) + ") --> MemberOf>.") + for each in synset.substance_meronyms(): # substance meronyms + ret.append("<(*," + synset_t + "," + convert_util(each.name()) + ") --> SubstanceOf>.") + for each in synset.part_meronyms(): # part meronyms + ret.append("<(*," + synset_t + "," + convert_util(each.name()) + ") --> PartOf>.") + for each in synset.attributes(): # attributes + ret.append("<(&," + convert_util(each.name()) + "," + synset_t + ") --> " + synset_t + ">.") + for each in synset.entailments(): # entailments + ret.append("<" + convert_util(each.name()) + " =/> " + synset_t + ">.") + for each in synset.causes(): # causes + ret.append("<" + synset_t + " =/> " + convert_util(each.name()) + ">.") + for each in synset.also_sees(): # also sees + ret.append("<" + synset_t + " <|> " + convert_util(each.name()) + ">.") + for each in synset.verb_groups(): # verb groups + ret.append("<" + synset_t + " <-> " + convert_util(each.name()) + ">.") + for each in synset.similar_tos(): # similar-to's + ret.append("<" + synset_t + " <-> " + convert_util(each.name()) + ">.") + + lemmas = synset.lemmas() + for lemma in lemmas: # lemmas + lemma_t = convert_util(lemma.name()) + ret.append("<" + lemma_t + " --> " + synset_t + ">.") + for antonym in lemma.antonyms(): # antonyms, higher priority + ret.append("$0.9; 0.9; 0.5$ <" + convert_util(antonym.name()) + " <-> " + lemma_t + ">. %0.0; 0.9%") + + return "\n".join(ret) + + +def words2narsese(words: List[str]): + ret = [] + + for word in words: + ret.append(word2narsese(word)) + + return "\n".join(ret) + + +def run_line(nars: Reasoner, line: str): # PyNARS call + line = line.strip(' \n') + if line.startswith("//"): + return None + elif line.startswith("''"): + if line.startswith("''outputMustContain('"): + line = line[len("''outputMustContain('"):].rstrip("')\n") + if len(line) == 0: return + try: + content_check = Narsese.parser.parse(line) + # out_print(PrintType.INFO, f'OutputContains({content_check.sentence.repr()})') + except: + print_out(PrintType.ERROR, f'Invalid input! Failed to parse: {line}') + return + elif line.startswith("'"): + return None + elif line.isdigit(): + n_cycle = int(line) + print_out(PrintType.INFO, f'Run {n_cycle} cycles.') + tasks_all_cycles = [] + for _ in range(n_cycle): + tasks_all = nars.cycle() + tasks_all_cycles.append(deepcopy(tasks_all)) + return tasks_all_cycles + else: + line = line.rstrip(' \n') + if len(line) == 0: + return None + try: + success, task, _ = nars.input_narsese(line, go_cycle=True) + if success: + print_out(PrintType.IN, task.sentence.repr(), *task.budget) + else: + print_out(PrintType.ERROR, f'Invalid input! Failed to parse: {line}') + + tasks_all = nars.cycle() + return [deepcopy(tasks_all)] + except: + print_out(PrintType.ERROR, f'Unknown error: {line}') + + +def handle_lines(nars: Reasoner, lines: str): # PyNARS call + tasks_lines = [] + for line in lines.split('\n'): + if len(line) == 0: continue + + tasks_line = run_line(nars, line) + if tasks_line is not None: + tasks_lines.extend(tasks_line) + + check_list = set() + ret = [] + + tasks_lines: List[Tuple[List[Task], Task, Task, List[Task], Task, Tuple[Task, Task]]] + for tasks_line in tasks_lines: + tasks_derived, judgement_revised, goal_revised, answers_question, answers_quest, ( + task_operation_return, task_executed) = tasks_line + + for task in tasks_derived: + if task.term.word not in check_list: + check_list.add(task.term.word) + ret.append(task) + + if judgement_revised is not None: + if judgement_revised.term.word not in check_list: + check_list.add(judgement_revised.term.word) + ret.append(judgement_revised) + + if goal_revised is not None: + if goal_revised.term.word not in check_list: + check_list.add(goal_revised.term.word) + ret.append(goal_revised) + + if answers_question is not None: + for answer in answers_question: + if answer.term.word not in check_list: + check_list.add(answer.term.word) + ret.append(answer) + + if answers_quest is not None: + for answer in answers_quest: + if answer.term.word not in check_list: + check_list.add(answer.term.word) + ret.append(answer) + + return ret + + +def result_filtering(reasoning_results): + # find positive/negative judgments + pos = [] + neg = [] + + for each in reasoning_results: + if each.truth.f > 0.9: + pos.append(each) + elif each.truth.f < 0.1: + neg.append(each) + + return pos, neg + + +def next_rank(base, reasoning_results, lower_ranks): + """ + If we have a sentence B>., and if we call A (or B) the rank_i term, then B (or A) is the rank_i+1 term. + "RANK" represents how many sentences are needed. + If two same terms are of different ranks, the smaller rank will be chosen. + :param base: dic + :param reasoning_results: list[Task] + :param lower_ranks: list[dic] + :return: dic + """ + rkn = {} + for each_result in reasoning_results: + words = [each.word.replace("\"", "") for each in each_result.term.terms] # get sub-terms + for i, word in enumerate(words): + # if a sub-term is in the base, then all remaining sub-terms will be rank_next terms + if word in base: + for j in range(len(words)): + if j == i: + continue + elif words[j] in rkn: + rkn[words[j]] = [rkn[words[j]][0] + 1, (rkn[words[j]][1] + each_result.truth.e) / 2] + else: + rkn.update({words[j]: [1, each_result.truth.e]}) + tmp = deepcopy(rkn) + for each in rkn: + if rkn[each][0] < 3: + tmp.pop(each) + continue + for lower_rank in lower_ranks: + if each in lower_rank: + tmp.pop(each) + break + return tmp + + +def term2nl_util(term, ret): + if term[0] != "(": + if "." not in term: + return ret.union({term}) + else: + return ret.union({wn.synset(term).definition()}) + else: + sub_terms = term[4:-1].split(", ") + for sub_term in sub_terms: + ret = ret.union(term2nl_util(sub_term, ret)) + + return ret + + +def term2nl(term, connector=None): + if connector is not None: + components = term2nl_util(term, set()) + return connector.join(components) + else: + return list(term2nl_util(term, set()))[0] + + +def terms2nl(terms): + ret = [] + for term in terms: + try: + if term[1] == "&": + ret.append(term2nl(term, " and ")) + elif term[1] == "|": + ret.append(term2nl(term, " or ")) + else: + ret.append(term2nl(term)) + except: + continue + return ret + + +if __name__ == "__main__": + # some compound words cannot be found in wordnet directly, but can also be represented by NARS + # e.g., "car accident", (&, car, accident) + + narsese = words2narsese(["abuse", + "burglary", + "robbery", + "stealing", + "shooting", + "shoplifting", + "assault", + "fighting", + "arson", + "explosion", + "arrest", + "car", + "accident", + "vandalism", + "normal", + "event"]) + + original_labels = {"abuse": [1, 0.9], + "burglary": [1, 0.9], + "robbery": [1, 0.9], + "stealing": [1, 0.9], + "shooting": [1, 0.9], + "shoplifting": [1, 0.9], + "assault": [1, 0.9], + "fighting": [1, 0.9], + "arson": [1, 0.9], + "explosion": [1, 0.9], + "arrest": [1, 0.9], + "(&, car, accident)": [1, 0.9], + "(&, accident, car)": [1, 0.9], + "vandalism": [1, 0.9], + "(&, normal, event)": [1, 0.9], + "(&, event, normal)": [1, 0.9]} + + nars = Reasoner(100000, 100000) + + reasoning_results = handle_lines(nars, narsese + "\n1000") + + pos, neg = result_filtering(reasoning_results) + + rk1 = next_rank(original_labels, pos, [original_labels]) + A = sorted([[each, rk1[each][0], rk1[each][1]] for each in rk1], key=lambda x: x[2], reverse=True) + rk2 = next_rank(rk1, pos, [original_labels, rk1]) + rk3 = next_rank(rk2, pos, [original_labels, rk1, rk2]) + + print(1) + + # pos_terms = rk1.union(*[rk2, rk3]) + # pos_scores = sorted(terms_score(pos_terms, pos), key=lambda x: x[1], reverse=True)[:400] + # pos_terms = set([each[0] for each in pos_scores]) + # + # neg_terms = next_rank(original_labels.union(pos_terms), neg, original_labels.union(pos_terms)) + # + # pos_labels = terms2nl(pos_terms) + # neg_labels = terms2nl(neg_terms) + # + # print("pos labels: ", pos_labels) + # print("========") + # print("neg labels: ", neg_labels) + # + # with open("expanded labels ExAll pos.txt", "w") as file: + # for each in pos_labels: + # file.write(each + "\n") + # + # with open("expanded labels ExAll neg.txt", "w") as file: + # for each in neg_labels: + # file.write(each + "\n") diff --git a/pynars/NARS/Channels/__init__.py b/pynars/NARS/Channels/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/pynars/NARS/Channels/__init__.py @@ -0,0 +1 @@ +