-
Notifications
You must be signed in to change notification settings - Fork 1
/
loan-tester.py
290 lines (221 loc) · 11.5 KB
/
loan-tester.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
# This is a very basic tool to allow you to provide FOLIO with a CSV file of settings UUIDs
# which FOLIO then sends back and tells you what circulation policies would be applied.
##
# It should function in FOLIO for the Lotus release and later - in Kiwi, there is a permission
# issue that prevents the overdue and lost item policies from being retrieved.
##
# Your input file should be in CSV format like so:
##
# patron_type_id,loan_type_id,item_type_id,location_id
# patrontypeUUID,loantypeUUID,itemtypeUUID,locationUUID
# ...
# ...
# ...
##
# "item_type" in this script is referring to what appears as "material type" in the UI - the API calls it
# item type, I think that is tech debt from very early project decisions.
##
# You must run this as a user who has the following specific permissions:
##
# circulation.rules.loan-policy.get
# circulation.rules.overdue-fine-policy.get
# circulation.rules.lost-item-policy.get
# circulation.rules.request-policy.get
# circulation.rules.notice-policy.get
##
# These permissions are hidden by default, so you will need administrator access to assign these permissions to a user.
from ast import arg
import requests
import csv
import sys
from time import perf_counter
from datetime import datetime
import tk_token
def fetch_json(server, session, *args):
if args:
url = f'{server}{"".join(args)}'
else:
url = server
req = session.get(url)
return req.json()
def make_friendly(id, json_list, key):
for i in json_list:
if i['id'] == id:
return i[key]
def loc_dict_maker(loc_array):
output = {}
for i in loc_array:
output[i['id']] = {'code': i['code'], 'libloc': i['libraryId']}
return output
def makeUrl(base_url, endpoint, loan_type, item_type, patron_type, location_type):
return f"{base_url}{endpoint}loan_type_id={loan_type}&item_type_id={item_type}&patron_type_id={patron_type}&location_id={location_type}"
def main():
# Set up variables for use in the script
# If you are repurposing this for another institution, you'll want to add the
# appropriate okapi URLs, tenant names, tokens, etc. and make sure
# that the appropriate points throughout the script have your variables.
# okapi environments that can be used
snapshotEnvironment = "https://folio-snapshot-okapi.dev.folio.org"
snapshot2Environment = "https://okapi-fivecolleges.folio.ebsco.com"
# tenant names
snapshotTenant = "diku"
snapshot2Tenant = "fs00001006"
# headers for use with forming API calls
snapshotPostHeaders = {
'x-okapi-tenant': snapshotTenant,
'x-okapi-token': "snapshotToken",
'Content-Type': 'application/json'
}
snapshot2PostHeaders = {
'Content-Type': 'application/json',
'x-okapi-tenant': snapshot2Tenant,
'x-okapi-token': tk_token.tk["token"]
}
# Now you can start asking for input from the person running the script
# They need to specify the name of the server they want to test on
##
# Again, if you are tweaking for another environment, you need to make appropriate
# updates here.
environment = input(
"What server do you want to test on? (snapshot, snapshot2) ")
if environment == 'snapshot':
testServer = snapshotEnvironment
postHeaders = snapshotPostHeaders
snapshotToken = input("provide the token for snapshot ")
elif environment == 'snapshot2':
testServer = snapshot2Environment
postHeaders = snapshot2PostHeaders
snapshotToken = tk_token.tk["token"]
# Print start time for script -
# fetch settings files to query in the script; makes things faster
s = requests.Session()
s.headers = postHeaders
# fetch patron groups
patronGroupsJson = fetch_json(testServer, s, '/groups?limit=1000')
# fetch loan types
loanTypesJson = fetch_json(testServer, s, '/loan-types?limit=1000')
# fetch material types
materialTypesJson = fetch_json(testServer, s, '/material-types?limit=1000')
# fetch locations
locationsJson = fetch_json(testServer, s, '/locations?limit=1500')
locdict = loc_dict_maker(locationsJson['locations'])
# fetch loan policies
loanPoliciesJson = fetch_json(
testServer, s, '/loan-policy-storage/loan-policies?limit=500')
# fetch notice policies
noticePoliciesJson = fetch_json(
testServer, s, '/patron-notice-policy-storage/patron-notice-policies?limit=100')
# fetch request policies
requestPoliciesJson = fetch_json(
testServer, s, '/request-policy-storage/request-policies?limit=50')
# fetch overdue policies
overduePoliciesJson = fetch_json(
testServer, s, '/overdue-fines-policies?limit=100')
# fetch lost item policies
lostItemPoliciesJson = fetch_json(
testServer, s, '/lost-item-fees-policies?limit=100')
# fetch libraries
librariesJson = fetch_json(
testServer, s, '/location-units/libraries?limit=100')
# open the file with test information - assumes name of file is loan_tester.csv but that's easy to change
#
# encoding = 'utf-8-sig' tells Python to compensate for Excel encoding
# first row should have four values - patron_type_id, item_type_id, loan_type_id, location_id
# then you put in the values for each loan scenario as a row in the file
#
# values are specified in UUIDs, but output will be in friendly name.
# the API calls the material type id the "item type id" - tech debt artifact from early FOLIO, I think
initialFile = open('loan_tester2.csv', newline='', encoding='utf-8')
# create a python dictionary to store the results with friendly names that you want to put into a file
final_output = []
# turn your file of patron/loan/material type/location into python dictionary that can be
# used to query the APIs
testLoanScenarios = csv.DictReader(initialFile)
pftime = perf_counter()
startTime = datetime.now()
count=0
for count, row in enumerate(testLoanScenarios):
friendlyResults = {}
# provides a simple counter and output to know the script is still running
print(count, row)
# first thing is to pull the UUIDs; you'll need these to look up the friendly names, and to
# correctly form the API call to see what policy comes back
patron_type_id, loan_type_id, item_type_id, location_id = row[
"patron_type_id"], row["loan_type_id"], row["item_type_id"], row["location_id"]
# pull patron_type_id friendly name
friendlyResults['patron_group'] = make_friendly(
patron_type_id, patronGroupsJson['usergroups'], 'group')
if 'patron_group' not in friendlyResults:
friendlyResults['patron_group'] = "Patron group not found"
# pull loan type friendly name
friendlyResults['loan_type'] = make_friendly(
loan_type_id, loanTypesJson['loantypes'], 'name')
if 'loan_type' not in friendlyResults:
friendlyResults['loan_type'] = "Loan type not found"
# pull material type friendly name (API refers to it as item_type_id)
friendlyResults['material_type'] = make_friendly(
item_type_id, materialTypesJson['mtypes'], 'name')
if 'material_type' not in friendlyResults:
friendlyResults['material_type'] = "Material type not found"
# pull location friendly name - using location code since a lot of our location names have commas in them
# which makes working with CSV a little too messy+df
#
# also pulling library friendly name so that it can be used in sorting/reviewing results in the
# output file
# lookup location id in locdict and return code and library Id
friendlyResults['locations'] = locdict[location_id]["code"]
libloc = locdict[location_id]['libloc']
friendlyResults['libraryName'] = make_friendly(
libloc, librariesJson['loclibs'], 'name') # and pull the name
if 'libraryName' not in friendlyResults:
friendlyResults['libraryName'], friendlyResults['location'] = "Library not found", "Location not found"
if 'locations' not in friendlyResults:
friendlyResults['locations'] = "Location not found"
# now, you'll use the UUID values to query the APIs, get the results back, and then form
# the full row in friendlyResults with the friendly names
# first, let's make the URLs
urlLoanPolicy = makeUrl(testServer, '/circulation/rules/loan-policy?',
loan_type_id, item_type_id, patron_type_id, location_id)
urlRequestPolicy = makeUrl(testServer, '/circulation/rules/request-policy?',
loan_type_id, item_type_id, patron_type_id, location_id)
urlNoticePolicy = makeUrl(testServer, '/circulation/rules/notice-policy?',
loan_type_id, item_type_id, patron_type_id, location_id)
urlOverduePolicy = makeUrl(testServer, '/circulation/rules/overdue-fine-policy?',
loan_type_id, item_type_id, patron_type_id, location_id)
urlLostItemPolicy = makeUrl(testServer, '/circulation/rules/lost-item-policy?',
loan_type_id, item_type_id, patron_type_id, location_id)
# now, check all of the policies.
#
# you could make one giant loop for this, but I found that it seemed like I got a bit of a performance improvement by
# doing individual loops through the smaller chunks of data / discrete sections
postLoanPoliciesJson = fetch_json(urlLoanPolicy, s)
friendlyResults['loanPolicy'] = make_friendly(
postLoanPoliciesJson['loanPolicyId'], loanPoliciesJson['loanPolicies'], 'name')
postRequestPoliciesJson = fetch_json(urlRequestPolicy, s)
friendlyResults['requestPolicy'] = make_friendly(
postRequestPoliciesJson['requestPolicyId'], requestPoliciesJson['requestPolicies'], 'name')
postNoticePoliciesJson = fetch_json(urlNoticePolicy, s)
friendlyResults['noticePolicy'] = make_friendly(
postNoticePoliciesJson['noticePolicyId'], noticePoliciesJson['patronNoticePolicies'], 'name')
postOverduePoliciesJson = fetch_json(urlOverduePolicy, s)
friendlyResults['overduePolicy'] = make_friendly(
postOverduePoliciesJson['overdueFinePolicyId'], overduePoliciesJson['overdueFinePolicies'], 'name')
postLostItemPoliciesJson = fetch_json(urlLostItemPolicy, s)
friendlyResults['lostItemPolicy'] = make_friendly(
postLostItemPoliciesJson['lostItemPolicyId'], lostItemPoliciesJson['lostItemFeePolicies'], 'name')
print(friendlyResults)
final_output.append(friendlyResults)
with open("friendlyOutput-%s.csv" % startTime.strftime("%d-%m-%Y-%H%M%S"), 'w', newline='') as output_file:
headers = ['loan_type', 'patron_group', 'libraryName','locations', 'material_type', 'loanPolicy', 'overduePolicy','noticePolicy','lostItemPolicy','requestPolicy' ]
writer = csv.DictWriter(output_file, fieldnames=list(headers))
writer.writeheader()
for row in final_output:
writer.writerow(row)
# when the tester is finally done, give some basic time information so you
# know how long it took
# close the initial file of scenarios
initialFile.close()
etime = perf_counter()
print(f"{count+1} records processed in {etime - pftime:0.4f} seconds")
if __name__ == "__main__":
main()