forked from mrvgao/Python-advanced-programming
-
Notifications
You must be signed in to change notification settings - Fork 1
/
eliza.py
89 lines (62 loc) · 2.34 KB
/
eliza.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import re
from flatten import flatten
import random
from eliza_rules import rule_responses
from functools import cache
PAT = re.compile(r'(\?[\*|\+](\w))')
CLEAN_1 = re.compile(r'([a-z])')
CLEAN_2 = re.compile(r'\?([a-z])')
def split_by_rule(text, rule):
not_placeholders = [s.strip() for s in PAT.sub(' ', rule).split()]
return split_by(text, not_placeholders)
def split_by(string, keywords):
pat = re.compile('|'.join(keywords))
spans = [s.span() for s in pat.finditer(string)]
all_split_indices = flatten(spans)
if all_split_indices and all_split_indices[0] != 0: all_split_indices = [0] + all_split_indices
if all_split_indices and all_split_indices[-1] != len(string): all_split_indices.append(len(string))
split_sentence = [string[index: all_split_indices[i + 1]] for i, index in enumerate(all_split_indices[:-1])]
return split_sentence
@cache
def generate_match_case_by_rule(rule):
split_rule = split_by_rule(rule, rule)
rule_clean = PAT.sub(r'\g<2>', str(split_rule))
rule_clean = CLEAN_1.sub(r'\g<1>', str(rule_clean))
return rule_clean
@cache
def get_clean_2_sub(string):
return CLEAN_2.sub('{\g<1>}', string)
def give_response(response):
var_with_arg = [get_clean_2_sub(s) for s in response]
return var_with_arg
def text_match(split_text, rule, response):
pattern = generate_match_case_by_rule(rule)
res = random.choice(give_response(response))
script = f"""def _match(split_text):
match split_text:
case {pattern}:
# print(split_text)
# print('匹配到了')
print(f"Q: {''.join(split_text)}")
print(f"回答:{res}")
return True
case _:
# print('匹配不到')
return False
"""
print(f"生成的程序是: {script}")
exec(script)
match = eval("_match(split_text)")
return match
if __name__ == '__main__':
test_cases = [
"Minquan我想开飞机",
"我觉得这个世界可能是虚拟的",
"医生我昨天梦见一只山羊",
"医生你为什么不去读个博士",
"简直就是开玩笑",
"大壮和小强和小明还有秃头都是很坏的人"
]
for text in test_cases:
for rule, response in rule_responses.items():
text_match(split_by_rule(text, rule), rule, response)