diff --git a/.gitignore b/.gitignore index 219c60e..8882be7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ .venv +.vscode .DS_STORE tmp -dist \ No newline at end of file +dist diff --git a/src/boardlib/api/aurora.py b/src/boardlib/api/aurora.py index 7639bff..fa76293 100644 --- a/src/boardlib/api/aurora.py +++ b/src/boardlib/api/aurora.py @@ -48,6 +48,35 @@ 34: "9A", } +SHARED_TABLES = [ + "products", + "product_sizes", + "holes", + "leds", + "products_angles", + "layouts", + "product_sizes_layouts_sets", + "placements", + "sets", + "placement_roles", + "climbs", + "climb_stats", + "beta_links", + "attempts", + "kits", +] + +USER_TABLES = [ + "users", + "walls", + "wall_expungements", + "draft_climbs", + "ascents", + "bids", + "tags", + "circuits", +] + def login(board, username, password): response = requests.post( @@ -116,6 +145,15 @@ def get_climb_stats(board, token, climb_id, angle): return response.json() +def get_climb(board, token, climb_id): + response = requests.get( + f"{API_HOSTS[board]}/v1/climbs/{climb_id}/info", + headers={"authorization": f"Bearer {token}"}, + params={"angle": angle}, + ) + response.raise_for_status() + + def get_climb_name(board, climb_id): response = requests.get( f"{WEB_HOSTS[board]}/climbs/{climb_id}", @@ -124,7 +162,16 @@ def get_climb_name(board, climb_id): return bs4.BeautifulSoup(response.text, "html.parser").find("h1").text -def sync(board, token, user_id, tables=[], walls=[], wall_expungements=[]): +def user_sync( + board, + token, + user_id, + tables=[], + walls=[], + wall_expungements=[], + shared_syncs=[], + user_syncs=[], +): """ :param tables: list of tables to download. The following are valid: "products", @@ -153,6 +200,9 @@ def sync(board, token, user_id, tables=[], walls=[], wall_expungements=[]): :param walls: list of walls to upload :param wall_expungements: list of walls to delete + :parm shared_syncs: list of {"table_name": , "last_synchronized_at": } + e.g. [{'table_name': 'climbs', 'last_synchronized_at': '2023-06-07 20:36:41.578003'}] + It looks like the largest table (climbs) won't synchronize unless it has a shared_sync with last_synchronized_at set. """ response = requests.post( f"{API_HOSTS[board]}/v1/sync", @@ -167,8 +217,8 @@ def sync(board, token, user_id, tables=[], walls=[], wall_expungements=[]): "GET": { "query": { "syncs": { - "shared_syncs": [], - "user_syncs": [], + "shared_syncs": shared_syncs, + "user_syncs": user_syncs, }, "tables": tables, "user_id": user_id, @@ -187,6 +237,57 @@ def sync(board, token, user_id, tables=[], walls=[], wall_expungements=[]): return response.json() +def shared_sync( + board, + tables=[], + shared_syncs=[], +): + """ + Shared syncs are used to download data from the board. They are not authenticated. + + :param tables: list of tables to download. The following are valid: + "products", + "product_sizes", + "holes", + "leds", + "products_angles", + "layouts", + "product_sizes_layouts_sets", + "placements", + "sets", + "placement_roles", + "climbs", + "climb_stats", + "beta_links", + "attempts", + "kits", + """ + response = requests.post( + f"{API_HOSTS[board]}/v1/sync", + json={ + "client": { + "enforces_product_passwords": 1, + "enforces_layout_passwords": 1, + "manages_power_responsibly": 1, + "ufd": 1, + }, + "GET": { + "query": { + "syncs": { + "shared_syncs": shared_syncs, + }, + "tables": tables, + "include_multiframe_climbs": 1, + "include_all_beta_links": 1, + "include_null_climb_stats": 1, + } + }, + }, + ) + response.raise_for_status() + return response.json() + + def logbook_entries(board, username, password, grade_type="font"): login_info = login(board, username, password) raw_entries = get_logbook(board, login_info["token"], login_info["user_id"]) diff --git a/src/boardlib/db/__init__.py b/src/boardlib/db/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/boardlib/db/aurora.py b/src/boardlib/db/aurora.py new file mode 100644 index 0000000..f4fa004 --- /dev/null +++ b/src/boardlib/db/aurora.py @@ -0,0 +1,108 @@ +import collections +import io +import sqlite3 +import zipfile + +import requests + +import boardlib.api.aurora + + +APP_PACKAGE_NAMES = { + "aurora": "auroraboard", + "decoy": "decoyboard", + "grasshopper": "grasshopperboard", + "kilter": "kilterboard", + "tension": "tensionboard2", + "touchstone": "touchstoneboard", +} + + +def download_database(board, output_file): + """ + The sqlite3 database is stored in the assets folder of the APK files for the Android app of each board. + + This function downloads the latest APK file for the board's Android app and extracts the database from it. + :param board: The board to download the database for. + :param output_file: The file to write the database to. + """ + response = requests.get( + f"https://d.apkpure.com/b/APK/com.auroraclimbing.{APP_PACKAGE_NAMES[board]}", + params={"version": "latest"}, + # Some user-agent is required, 403 if not included + headers={ + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36" + }, + ) + response.raise_for_status() + apk_file = io.BytesIO(response.content) + with zipfile.ZipFile(apk_file, "r") as zip_file: + with open(output_file, "wb") as output_file: + output_file.write(zip_file.read("assets/db.sqlite3")) + + +def sync_shared_tables(board, database): + """ + Syncs the public tables from the remote database to the local database. + If the last sync is too old, it is possible that the remote will respond with an empty object. + There appears to be some limit to the amount of data that can be transferred via a sync, but this limit is opaque. + + :param board: The board to sync the database for. + :param database: The sqlite3 database file to sync. + """ + with sqlite3.connect(database) as connection: + result = connection.execute( + "SELECT table_name, last_synchronized_at FROM shared_syncs" + ) + shared_syncs = [ + {"table_name": table_name, "last_synchronized_at": last_synchronized_at} + for table_name, last_synchronized_at in result.fetchall() + ] + shared_sync_result = boardlib.api.aurora.shared_sync( + board, tables=boardlib.api.aurora.SHARED_TABLES, shared_syncs=shared_syncs + ) + for table_name, rows in shared_sync_result["PUT"].items(): + ROW_INSERTERS.get(table_name, insert_rows_default)( + connection, table_name, rows + ) + + +def insert_rows_default(connection, table_name, rows): + pragma_result = connection.execute(f"PRAGMA table_info('{table_name}')") + value_params = ", ".join(f":{row[1]}" for row in pragma_result.fetchall()) + connection.executemany( + f"INSERT OR REPLACE INTO {table_name} VALUES ({value_params})", + (collections.defaultdict(lambda: None, row) for row in rows), + ) + + +def insert_rows_climb_stats(connection, table_name, rows): + pragma_result = connection.execute(f"PRAGMA table_info('{table_name}')") + value_params = ", ".join(f":{row[1]}" for row in pragma_result.fetchall()) + insert_rows = [] + delete_rows = [] + for row in rows: + row_dict = collections.defaultdict( + lambda: None, + row, + display_difficulty=row["benchmark_difficulty"] + if row.get("benchmark_difficulty") + else row["difficulty_average"], + ) + row_list = insert_rows if row_dict["display_difficulty"] else delete_rows + row_list.append(row_dict) + + connection.executemany( + f"INSERT OR REPLACE INTO {table_name} VALUES ({value_params})", + insert_rows, + ) + for row in delete_rows: + connection.execute( + f"DELETE FROM {table_name} WHERE climb_uuid = :climb_uuid AND angle = :angle", + row, + ) + + +ROW_INSERTERS = { + "climb_stats": insert_rows_climb_stats, +}