forked from IDEMSInternational/mathscamp-playing-cards
-
Notifications
You must be signed in to change notification settings - Fork 0
/
make_flows.py
226 lines (194 loc) · 9.07 KB
/
make_flows.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
import json
import csv
import uuid
import re
# From https://stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
class UuidCollector:
def __init__(self):
self.uuids = dict()
def collect_uuids(self, data):
if type(data) is not dict:
return
for k,v in data.items():
if k == "type" and v == "enter_flow":
# We don't want replace uuids of other flows, so don't recurse
break
if type(v) is dict:
self.collect_uuids(v)
elif type(v) is list:
for entry in v:
self.collect_uuids(entry)
elif k.find('uuid') != -1:
# We found a field whose name contains "uuid"
# We record this uuid and assign a replacement uuid
if v is not None:
self.uuids[v] = str(uuid.uuid4()) # Generate new random UUID to replace it with
uuid_collector = UuidCollector()
ftypes = ["puzzle", "funfact", "game"]
for ftype in ftypes:
template = "template_" + ftype + ".json"
with open(template, "r") as read_file:
data = json.load(read_file)
uuid_collector.collect_uuids(data)
f_replacements = open("replacements.json", "r")
replacements = json.load(f_replacements)
f_replacements.close()
container_file = open("template_container.json", "r")
container = json.load(container_file)
flow_info_file = open("flows_info.json", "r")
flow_info = json.load(flow_info_file)
flow_info = []
cardcsv = open('cards.csv')
reader = csv.reader(cardcsv)
for row in reader:
card = row[0]
filebase = row[1].lower().replace(" ", "-")
# Get flow info (if existing)
corresp_flow_info = list(filter(lambda fl: fl["doc name"] == row[1], flow_info ))
if len(corresp_flow_info)== 1:
have_flow_info = True
corresp_flow_info = corresp_flow_info[0]
elif len(corresp_flow_info) == 0:
have_flow_info = False
else:
print("error: multiple info for flow " + row[1])
break
# Get content specific json file to read from
filename = "json/" + filebase + ".json"
file = open(filename, "r")
data = json.load(file)
# Get type of content
ftype = data["metadata"]["type"]
ftype = ftype.lower().replace(" ", "") # account for different spellings of "funfact"
ftype_raw = ftype # the raw type distinguishes between puzzle and counting
if ftype == "counting":
# counting uses the same format as puzzle.
ftype = "puzzle"
# Get type specific flow template
flow_fname = "template_" + ftype + ".json"
if ftype == "puzzle" and "extension_2" not in data:
flow_fname = "template_puzzle_1extension.json" # puzzles have two extensions by default, but can have one
flow_template = open(flow_fname, "r").read()
# Replace title and all UUIDs
flow_template = flow_template.replace(replacements[ftype]["name"], "Content-" + ftype_raw.title() + "-" + filebase)
for k,v in uuid_collector.uuids.items():
flow_template = flow_template.replace(k, v)
# After all the text replacements, we read flow_template as JSON.
new_flow = json.loads(flow_template)
# if the flow is in the flow info list, replace the flow uuid with the corresponding one,
# if it is not, add new object to the flow info list with infos about this flow
if have_flow_info:
new_flow.update(uuid = corresp_flow_info["uuid"])
else:
corresp_flow_info = {}
corresp_flow_info["flow name"] = new_flow["name"]
corresp_flow_info["doc name"] = row[1]
corresp_flow_info["uuid"] = new_flow["uuid"]
corresp_flow_info["card number"] = card
flow_info.append(corresp_flow_info)
# Collect text snippets to replace in the flow
replacement_dict = dict()
for repl in replacements[ftype]["texts"]:
sections = repl.split(" - ")
# Find the data corresponding to the subsection specified in the replacement
current_section = data
found = True
try:
# Find the subsection of interest
for s in sections:
current_section = current_section[s.strip().lower().replace(" ", "_")]
except KeyError:
if ftype != "puzzle" and sections[0] != "Extension 2":
# It's ok for puzzles not to have a second extension
print("Section " + repl + " not found in " + filebase)
found = False
if found:
# References is a list of refs rather than a string
if type(current_section) == list:
current_section = ''.join(current_section)
# if sections[0] != "Extension 2":
if len(current_section) == 0:
print("Warning: blank answer in Section " + repl + " of " + filebase)
current_section = " "
elif cleanhtml(current_section)[0] == '[':
print("Warning: template answer in Section " + repl + " of " + filebase)
# Put content of section into the replacement dict.
replacement_dict[repl] = current_section
# Check for NRICH references and remove starting node if there aren't any.
if ftype == "puzzle":
# For puzzles: We determine whether the puzzle is an NRICH puzzle.
references = ''.join(data["additional_information"]["references"])
# The references node should be the first node in the flow.
assert new_flow["nodes"][0]["actions"][0]["text"] == "Additional information - References"
if references.lower().find("nrich") != -1:
# This puzzle is an NRICH puzzle. Fill references node with NRICH information.
# print("NRICH PUZZLE: " + filebase)
new_flow["nodes"][0]["actions"][0]["text"] = "The following is based on an NRICH puzzle\\n https://nrich.maths.org/"
else:
# Not an NRICH puzzle. Remove references node from flow and UI.
# print("not NRICH PUZZLE: " + filebase)
uuid_to_remove = new_flow["nodes"][0]["uuid"]
del new_flow["nodes"][0]
new_flow["_ui"]["nodes"].pop(uuid_to_remove)
# # Replace all these text snippets.
# # We do a lazy text replacement on the unparsed JSON.
# for k,v in replacement_dict.items():
# flow_template = flow_template.replace(k, v)
# For each flow node, check the text for whether it should be replaced.
# If so, strip the target text to replace the template from images,
# and add these images as attachments instead.
for node in new_flow["nodes"]:
if "actions" in node:
for action in node["actions"]:
if "text" not in action:
continue
text = action["text"]
for k,v in replacement_dict.items():
if text == k:
paragraphs = v.split("</p>\n<p>")
paragraphs[0] = paragraphs[0].replace("<p>","")
paragraphs[-1] = paragraphs[-1].replace("</p>","")
images = re.findall(r'<img.*?>', v)
for image in images:
image_filename = re.search(r'src=\".*?\"', image).group()[12:-1]
# print(image[4:-1])
action["attachments"].append("@(fields.image_path & \"{}\")".format(image_filename))
v_stripped = re.sub(r'<img.*?>', "", v).strip() # strip image tags and trailing whitespace
# We also strip Markdown formatting and replace linebreaks with \n,
# because RapidPro doesn't support Markdown and JSON doesn't allow linebreaks.
v_stripped = cleanhtml(v_stripped).replace("\n\n", "\n")
action["text"] = v_stripped
if "router" in node and "cases" in node["router"]:
for case in node["router"]["cases"]:
if "arguments" not in case:
continue
for i,arg in enumerate(case["arguments"]):
for k,v in replacement_dict.items():
if arg == k:
case["arguments"][i] = v
# Add this flow to the template_container.
container["flows"].append(new_flow)
#create trigger for flow
new_trigger = {
"trigger_type": "K",
"keyword": "VMC" + corresp_flow_info["card number"],
"flow": {
"uuid": corresp_flow_info["uuid"],
"name": corresp_flow_info["flow name"]
},
"groups": [],
"channel": None
}
# add trigger word
container["triggers"].append(new_trigger)
# Save filled up template container as JSON file
generated_flows = open("./output/new_newgenerated_flows.json", "w")
json.dump(container, generated_flows, indent=2) # ensure_ascii=False
generated_flows.close()
# Update flow info file if flows were added
with open('./flows_info_new.json', 'w') as outfile:
json.dump(flow_info, outfile,indent=2)