From 4377815c2a89d78e334271fac6ae1a62fd128443 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20Loipf=C3=BChrer?= Date: Tue, 31 Oct 2023 21:30:03 +0100 Subject: [PATCH] refactor: split up database code and revisions --- abrechnung/admin.py | 10 +- abrechnung/application/__init__.py | 57 -- abrechnung/application/accounts.py | 432 +++++---- abrechnung/application/groups.py | 917 +++++++++--------- abrechnung/application/transactions.py | 792 +++++++-------- abrechnung/application/users.py | 517 +++++----- abrechnung/config.py | 10 +- abrechnung/core/__init__.py | 0 abrechnung/core/auth.py | 46 + abrechnung/core/errors.py | 6 + abrechnung/core/service.py | 9 + abrechnung/database/cli.py | 67 +- abrechnung/database/code/0001_views.sql | 415 ++++++++ abrechnung/database/code/0002_triggers.sql | 540 +++++++++++ abrechnung/database/code/0003_constraints.sql | 465 +++++++++ abrechnung/database/code/0004_functions.sql | 598 ++++++++++++ abrechnung/database/database.py | 57 -- abrechnung/database/migrations.py | 47 + .../revisions/0001_initial_schema.sql | 513 +--------- .../database/revisions/0002_subscriptions.sql | 640 ------------ .../revisions/0003_purchase_items.sql | 376 +------ .../revisions/0004_relax_db_constraints.sql | 3 - .../revisions/0005_partial_data_fetching.sql | 30 - .../database/revisions/0008_file_upload.sql | 626 +----------- .../revisions/0009_robust_notifications.sql | 669 +------------ .../revisions/0010_clearing_accounts.sql | 324 ------- .../revisions/0012_correct_change_dates.sql | 114 --- .../0014_associate_accounts_with_users.sql | 127 --- ...5_metadata_fields_and_revision_changed.sql | 835 ---------------- abrechnung/database/revisions/__init__.py | 149 --- abrechnung/demo.py | 4 +- abrechnung/framework/__init__.py | 0 abrechnung/framework/async_utils.py | 44 + abrechnung/framework/database.py | 426 ++++++++ abrechnung/framework/decorators.py | 26 + abrechnung/http/cli.py | 6 +- abrechnung/http/routers/auth.py | 2 +- abrechnung/http/routers/websocket.py | 3 +- abrechnung/mailer.py | 6 +- tests/common.py | 8 +- tests/http_tests/common.py | 2 +- tests/http_tests/test_auth.py | 2 +- tests/http_tests/test_groups.py | 8 +- tests/test_auth.py | 2 +- tests/test_mailer.py | 6 +- tests/test_transaction_logic.py | 2 +- tools/create_revision.py | 7 +- tools/generate_dummy_data.py | 5 +- 48 files changed, 4021 insertions(+), 5929 deletions(-) create mode 100644 abrechnung/core/__init__.py create mode 100644 abrechnung/core/auth.py create mode 100644 abrechnung/core/errors.py create mode 100644 abrechnung/core/service.py create mode 100644 abrechnung/database/code/0001_views.sql create mode 100644 abrechnung/database/code/0002_triggers.sql create mode 100644 abrechnung/database/code/0003_constraints.sql create mode 100644 abrechnung/database/code/0004_functions.sql delete mode 100644 abrechnung/database/database.py create mode 100644 abrechnung/database/migrations.py delete mode 100644 abrechnung/database/revisions/__init__.py create mode 100644 abrechnung/framework/__init__.py create mode 100644 abrechnung/framework/async_utils.py create mode 100644 abrechnung/framework/database.py create mode 100644 abrechnung/framework/decorators.py diff --git a/abrechnung/admin.py b/abrechnung/admin.py index d21836bd..ce8c62e9 100644 --- a/abrechnung/admin.py +++ b/abrechnung/admin.py @@ -5,7 +5,7 @@ from abrechnung import subcommand from abrechnung.application.users import UserService from abrechnung.config import Config -from abrechnung.database.database import create_db_pool +from abrechnung.framework.database import create_db_pool class AdminCli(subcommand.SubCommand): @@ -36,13 +36,15 @@ async def handle_create_user_command(self): print("Passwords do not match!") return - db_pool = await create_db_pool(self.config) + db_pool = await create_db_pool(self.config.database) user_service = UserService(db_pool, self.config) user_service.enable_registration = True if self.args["skip_email_check"]: user_service.valid_email_domains = None - await user_service.register_user( - username=self.args["name"], email=self.args["email"], password=password + await user_service.register_user( # pylint: disable=missing-kwoa + username=self.args["name"], + email=self.args["email"], + password=password, ) async def run(self): diff --git a/abrechnung/application/__init__.py b/abrechnung/application/__init__.py index b3c1219f..21021da9 100644 --- a/abrechnung/application/__init__.py +++ b/abrechnung/application/__init__.py @@ -5,60 +5,3 @@ from abrechnung.config import Config from abrechnung.domain.users import User - - -class NotFoundError(Exception): - pass - - -class InvalidCommand(Exception): - pass - - -class Application: - def __init__(self, db_pool: Pool, config: Config): - self.db_pool = db_pool - self.cfg = config - - -async def check_group_permissions( - conn: asyncpg.Connection, - group_id: int, - user: User, - is_owner: bool = False, - can_write: bool = False, -) -> tuple[bool, bool]: - membership = await conn.fetchrow( - "select is_owner, can_write from group_membership where group_id = $1 and user_id = $2", - group_id, - user.id, - ) - if membership is None: - raise NotFoundError(f"group not found") - - if can_write and not (membership["is_owner"] or membership["can_write"]): - raise PermissionError(f"write access to group denied") - - if is_owner and not membership["is_owner"]: - raise PermissionError(f"owner access to group denied") - - return membership["can_write"], membership["is_owner"] - - -async def create_group_log( - conn: asyncpg.Connection, - group_id: int, - user: User, - type: str, - message: Optional[str] = None, - affected_user_id: Optional[int] = None, -): - await conn.execute( - "insert into group_log (group_id, user_id, type, message, affected) " - "values ($1, $2, $3, $4, $5)", - group_id, - user.id, - type, - "" if message is None else message, - affected_user_id, - ) diff --git a/abrechnung/application/accounts.py b/abrechnung/application/accounts.py index 1fd3f62f..6d72e496 100644 --- a/abrechnung/application/accounts.py +++ b/abrechnung/application/accounts.py @@ -5,16 +5,22 @@ import asyncpg -from abrechnung.domain.accounts import Account, AccountType, AccountDetails -from . import ( - Application, - NotFoundError, +from abrechnung.core.auth import ( check_group_permissions, - InvalidCommand, create_group_log, ) +from abrechnung.core.errors import ( + NotFoundError, + InvalidCommand, +) +from abrechnung.core.service import ( + Service, +) +from abrechnung.domain.accounts import Account, AccountType, AccountDetails +from abrechnung.domain.users import User +from abrechnung.framework.database import Connection +from abrechnung.framework.decorators import with_db_transaction, with_db_connection from .common import _get_or_create_tag_ids -from ..domain.users import User @dataclass @@ -31,7 +37,7 @@ class RawAccount: clearing_shares: Optional[dict[int, float]] = field(default=None) -class AccountService(Application): +class AccountService(Service): @staticmethod async def _get_or_create_revision( conn: asyncpg.Connection, user: User, account_id: int @@ -221,39 +227,42 @@ def _account_db_row(self, account: asyncpg.Record) -> Account: pending_details=pending_details, ) - async def list_accounts(self, *, user: User, group_id: int) -> list[Account]: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await check_group_permissions(conn=conn, group_id=group_id, user=user) - cur = conn.cursor( - "select account_id, group_id, type, last_changed, is_wip, " - " committed_details, pending_details " - "from full_account_state_valid_at($1) " - "where group_id = $2", - user.id, - group_id, - ) + @with_db_transaction + async def list_accounts( + self, *, conn: Connection, user: User, group_id: int + ) -> list[Account]: + await check_group_permissions(conn=conn, group_id=group_id, user=user) + cur = conn.cursor( + "select account_id, group_id, type, last_changed, is_wip, " + " committed_details, pending_details " + "from full_account_state_valid_at($1) " + "where group_id = $2", + user.id, + group_id, + ) - result = [] - async for account in cur: - result.append(self._account_db_row(account)) + result = [] + async for account in cur: + result.append(self._account_db_row(account)) - return result + return result - async def get_account(self, *, user: User, account_id: int) -> Account: - async with self.db_pool.acquire() as conn: - await self._check_account_permissions( - conn=conn, user=user, account_id=account_id - ) - account = await conn.fetchrow( - "select account_id, group_id, type, last_changed, is_wip, " - " committed_details, pending_details " - "from full_account_state_valid_at($1) " - "where account_id = $2", - user.id, - account_id, - ) - return self._account_db_row(account) + @with_db_connection + async def get_account( + self, *, conn: Connection, user: User, account_id: int + ) -> Account: + await self._check_account_permissions( + conn=conn, user=user, account_id=account_id + ) + account = await conn.fetchrow( + "select account_id, group_id, type, last_changed, is_wip, " + " committed_details, pending_details " + "from full_account_state_valid_at($1) " + "where account_id = $2", + user.id, + account_id, + ) + return self._account_db_row(account) async def _add_tags_to_revision( self, @@ -353,9 +362,11 @@ async def _create_account( ) return account_id + @with_db_transaction async def create_account( self, *, + conn: Connection, user: User, group_id: int, type: str, @@ -366,20 +377,18 @@ async def create_account( date_info: Optional[date] = None, clearing_shares: Optional[dict[int, float]] = None, ) -> int: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - return await self._create_account( - conn=conn, - user=user, - group_id=group_id, - type=type, - name=name, - tags=tags, - description=description, - owning_user_id=owning_user_id, - date_info=date_info, - clearing_shares=clearing_shares, - ) + return await self._create_account( + conn=conn, + user=user, + group_id=group_id, + type=type, + name=name, + tags=tags, + description=description, + owning_user_id=owning_user_id, + date_info=date_info, + clearing_shares=clearing_shares, + ) async def _update_account( self, @@ -469,8 +478,11 @@ async def _update_account( revision_id, ) + @with_db_transaction async def update_account( self, + *, + conn: Connection, user: User, account_id: int, name: str, @@ -480,153 +492,152 @@ async def update_account( date_info: Optional[date] = None, clearing_shares: Optional[dict[int, float]] = None, ): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - return await self._update_account( - conn=conn, - user=user, - account_id=account_id, - name=name, - description=description, - tags=tags, - owning_user_id=owning_user_id, - date_info=date_info, - clearing_shares=clearing_shares, - ) + return await self._update_account( + conn=conn, + user=user, + account_id=account_id, + name=name, + description=description, + tags=tags, + owning_user_id=owning_user_id, + date_info=date_info, + clearing_shares=clearing_shares, + ) + @with_db_transaction async def delete_account( self, + *, + conn: Connection, user: User, account_id: int, ): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - group_id, _ = await self._check_account_permissions( - conn=conn, user=user, account_id=account_id, can_write=True - ) - row = await conn.fetchrow( - "select id from account where id = $1", - account_id, - ) - if row is None: - raise InvalidCommand(f"Account does not exist") + group_id, _ = await self._check_account_permissions( + conn=conn, user=user, account_id=account_id, can_write=True + ) + row = await conn.fetchrow( + "select id from account where id = $1", + account_id, + ) + if row is None: + raise InvalidCommand(f"Account does not exist") - # TODO: FIXME move this check into the database + # TODO: FIXME move this check into the database - has_committed_shares = await conn.fetchval( - "select 1 " - "from committed_transaction_state_valid_at() t " - "where not deleted and $1 = any(involved_accounts)", - account_id, - ) - has_pending_shares = await conn.fetchval( - "select 1 " - "from aggregated_pending_transaction_history t " - "where $1 = any(t.involved_accounts)", - account_id, - ) + has_committed_shares = await conn.fetchval( + "select 1 " + "from committed_transaction_state_valid_at() t " + "where not deleted and $1 = any(involved_accounts)", + account_id, + ) + has_pending_shares = await conn.fetchval( + "select 1 " + "from aggregated_pending_transaction_history t " + "where $1 = any(t.involved_accounts)", + account_id, + ) - has_committed_clearing_shares = await conn.fetchval( - "select 1 " - "from committed_account_state_valid_at() a " - "where not deleted and $1 = any(involved_accounts)", - account_id, - ) - has_pending_clearing_shares = await conn.fetchval( - "select 1 " - "from aggregated_pending_account_history a " - "where $1 = any(a.involved_accounts)", - account_id, - ) + has_committed_clearing_shares = await conn.fetchval( + "select 1 " + "from committed_account_state_valid_at() a " + "where not deleted and $1 = any(involved_accounts)", + account_id, + ) + has_pending_clearing_shares = await conn.fetchval( + "select 1 " + "from aggregated_pending_account_history a " + "where $1 = any(a.involved_accounts)", + account_id, + ) - has_committed_usages = await conn.fetchval( - "select 1 " - "from committed_transaction_position_state_valid_at() p " - "join transaction t on t.id = p.transaction_id " - "where not p.deleted and $1 = any(p.involved_accounts)", - account_id, - ) + has_committed_usages = await conn.fetchval( + "select 1 " + "from committed_transaction_position_state_valid_at() p " + "join transaction t on t.id = p.transaction_id " + "where not p.deleted and $1 = any(p.involved_accounts)", + account_id, + ) - has_pending_usages = await conn.fetchval( - "select 1 " - "from aggregated_pending_transaction_position_history p " - "join transaction t on t.id = p.transaction_id " - "where $1 = any(p.involved_accounts)", - account_id, - ) + has_pending_usages = await conn.fetchval( + "select 1 " + "from aggregated_pending_transaction_position_history p " + "join transaction t on t.id = p.transaction_id " + "where $1 = any(p.involved_accounts)", + account_id, + ) - if ( - has_committed_shares - or has_pending_shares - or has_committed_usages - or has_pending_usages - ): - raise InvalidCommand( - f"Cannot delete an account that is references by a transaction" - ) + if ( + has_committed_shares + or has_pending_shares + or has_committed_usages + or has_pending_usages + ): + raise InvalidCommand( + f"Cannot delete an account that is references by a transaction" + ) - if has_committed_clearing_shares or has_pending_clearing_shares: - raise InvalidCommand( - f"Cannot delete an account that is references by a clearing account" - ) + if has_committed_clearing_shares or has_pending_clearing_shares: + raise InvalidCommand( + f"Cannot delete an account that is references by a clearing account" + ) - row = await conn.fetchrow( - "select name, revision_id, deleted " - "from committed_account_state_valid_at() " - "where account_id = $1", - account_id, - ) - if row is None: - raise InvalidCommand( - f"Cannot delete an account without any committed changes" - ) + row = await conn.fetchrow( + "select name, revision_id, deleted " + "from committed_account_state_valid_at() " + "where account_id = $1", + account_id, + ) + if row is None: + raise InvalidCommand( + f"Cannot delete an account without any committed changes" + ) - if row["deleted"]: - raise InvalidCommand(f"Cannot delete an already deleted account") + if row["deleted"]: + raise InvalidCommand(f"Cannot delete an already deleted account") - has_committed_clearing_shares = await conn.fetchval( - "select 1 " - "from committed_account_state_valid_at() p " - "where not p.deleted and $1 = any(p.involved_accounts)", - account_id, - ) + has_committed_clearing_shares = await conn.fetchval( + "select 1 " + "from committed_account_state_valid_at() p " + "where not p.deleted and $1 = any(p.involved_accounts)", + account_id, + ) - has_pending_clearing_shares = await conn.fetchval( - "select 1 " - "from aggregated_pending_account_history p " - "where $1 = any(p.involved_accounts)", - account_id, - ) - if has_committed_clearing_shares or has_pending_clearing_shares: - raise InvalidCommand( - f"Cannot delete an account that is references by another clearing account" - ) + has_pending_clearing_shares = await conn.fetchval( + "select 1 " + "from aggregated_pending_account_history p " + "where $1 = any(p.involved_accounts)", + account_id, + ) + if has_committed_clearing_shares or has_pending_clearing_shares: + raise InvalidCommand( + f"Cannot delete an account that is references by another clearing account" + ) - now = datetime.now(tz=timezone.utc) - revision_id = await conn.fetchval( - "insert into account_revision (user_id, account_id, started, committed) " - "values ($1, $2, $3, $4) returning id", - user.id, - account_id, - now, - now, - ) - await conn.execute( - "insert into account_history (id, revision_id, name, description, owning_user_id, date_info, deleted) " - "select $1, $2, name, description, owning_user_id, date_info, true " - "from account_history ah where ah.id = $1 and ah.revision_id = $3 ", - account_id, - revision_id, - row["revision_id"], - ) + now = datetime.now(tz=timezone.utc) + revision_id = await conn.fetchval( + "insert into account_revision (user_id, account_id, started, committed) " + "values ($1, $2, $3, $4) returning id", + user.id, + account_id, + now, + now, + ) + await conn.execute( + "insert into account_history (id, revision_id, name, description, owning_user_id, date_info, deleted) " + "select $1, $2, name, description, owning_user_id, date_info, true " + "from account_history ah where ah.id = $1 and ah.revision_id = $3 ", + account_id, + revision_id, + row["revision_id"], + ) - await create_group_log( - conn=conn, - group_id=group_id, - user=user, - type="account-deleted", - message=f"deleted account account {row['name']}", - ) + await create_group_log( + conn=conn, + group_id=group_id, + user=user, + type="account-deleted", + message=f"deleted account account {row['name']}", + ) async def sync_account( self, *, conn: asyncpg.Connection, user: User, account: RawAccount @@ -661,51 +672,48 @@ async def sync_account( ) return account.id, new_acc_id + @with_db_transaction async def sync_accounts( - self, *, user: User, group_id: int, accounts: list[RawAccount] + self, *, conn: Connection, user: User, group_id: int, accounts: list[RawAccount] ) -> dict[int, int]: all_accounts_in_same_group = all([a.group_id == group_id for a in accounts]) if not all_accounts_in_same_group: raise InvalidCommand("all accounts must belong to the same group") - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - can_write, _ = await check_group_permissions( - conn=conn, group_id=group_id, user=user, can_write=True - ) + can_write, _ = await check_group_permissions( + conn=conn, group_id=group_id, user=user, can_write=True + ) - if not can_write: - raise PermissionError("need write access to group") + if not can_write: + raise PermissionError("need write access to group") - new_account_id_map: dict[int, int] = {} + new_account_id_map: dict[int, int] = {} - for account in filter( - lambda acc: acc.type == AccountType.personal, accounts + for account in filter(lambda acc: acc.type == AccountType.personal, accounts): + old_acc_id, new_acc_id = await self.sync_account( + conn=conn, user=user, account=account + ) + new_account_id_map[old_acc_id] = new_acc_id + + clearing_accounts = list( + filter(lambda acc: acc.type == AccountType.clearing, accounts) + ) + # TODO: improve this very inefficient implementation + # first step: use a dict instead of a list + while len(clearing_accounts) > 0: + for account in clearing_accounts[:]: # copy as we remove items + if account.clearing_shares: + account.clearing_shares = { + new_account_id_map.get(k, k): v + for k, v in account.clearing_shares.items() + } + if account.clearing_shares and all( + [x > 0 for x in account.clearing_shares.keys()] ): old_acc_id, new_acc_id = await self.sync_account( conn=conn, user=user, account=account ) new_account_id_map[old_acc_id] = new_acc_id + clearing_accounts.remove(account) - clearing_accounts = list( - filter(lambda acc: acc.type == AccountType.clearing, accounts) - ) - # TODO: improve this very inefficient implementation - # first step: use a dict instead of a list - while len(clearing_accounts) > 0: - for account in clearing_accounts[:]: # copy as we remove items - if account.clearing_shares: - account.clearing_shares = { - new_account_id_map.get(k, k): v - for k, v in account.clearing_shares.items() - } - if account.clearing_shares and all( - [x > 0 for x in account.clearing_shares.keys()] - ): - old_acc_id, new_acc_id = await self.sync_account( - conn=conn, user=user, account=account - ) - new_account_id_map[old_acc_id] = new_acc_id - clearing_accounts.remove(account) - - return new_account_id_map + return new_account_id_map diff --git a/abrechnung/application/groups.py b/abrechnung/application/groups.py index 06cc75fc..94041835 100644 --- a/abrechnung/application/groups.py +++ b/abrechnung/application/groups.py @@ -2,13 +2,17 @@ import asyncpg -from abrechnung.application import ( - Application, - NotFoundError, +from abrechnung.core.auth import ( check_group_permissions, - InvalidCommand, create_group_log, ) +from abrechnung.core.errors import ( + NotFoundError, + InvalidCommand, +) +from abrechnung.core.service import ( + Service, +) from abrechnung.domain.accounts import AccountType from abrechnung.domain.groups import ( Group, @@ -18,12 +22,16 @@ GroupLog, ) from abrechnung.domain.users import User +from abrechnung.framework.database import Connection +from abrechnung.framework.decorators import with_db_transaction -class GroupService(Application): +class GroupService(Service): + @with_db_transaction async def create_group( self, *, + conn: Connection, user: User, name: str, description: str, @@ -36,49 +44,47 @@ async def create_group( f"guest users are not allowed to create group new groups" ) - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - group_id = await conn.fetchval( - "insert into grp (name, description, currency_symbol, terms, add_user_account_on_join, created_by) " - "values ($1, $2, $3, $4, $5, $6) returning id", - name, - description, - currency_symbol, - terms, - add_user_account_on_join, - user.id, - ) - await conn.execute( - "insert into group_membership (user_id, group_id, is_owner, can_write, description) " - "values ($1, $2, $3, $4, $5)", - user.id, - group_id, - True, - True, - "group founder", - ) + group_id = await conn.fetchval( + "insert into grp (name, description, currency_symbol, terms, add_user_account_on_join, created_by) " + "values ($1, $2, $3, $4, $5, $6) returning id", + name, + description, + currency_symbol, + terms, + add_user_account_on_join, + user.id, + ) + await conn.execute( + "insert into group_membership (user_id, group_id, is_owner, can_write, description) " + "values ($1, $2, $3, $4, $5)", + user.id, + group_id, + True, + True, + "group founder", + ) - if add_user_account_on_join: - await self._create_user_account( - conn=conn, group_id=group_id, user=user - ) + if add_user_account_on_join: + await self._create_user_account(conn=conn, group_id=group_id, user=user) - await create_group_log( - conn=conn, group_id=group_id, user=user, type="group-created" - ) - await create_group_log( - conn=conn, - group_id=group_id, - user=user, - type="member-joined", - affected_user_id=user.id, - ) + await create_group_log( + conn=conn, group_id=group_id, user=user, type="group-created" + ) + await create_group_log( + conn=conn, + group_id=group_id, + user=user, + type="member-joined", + affected_user_id=user.id, + ) - return group_id + return group_id + @with_db_transaction async def create_invite( self, *, + conn: Connection, user: User, group_id: int, description: str, @@ -91,47 +97,45 @@ async def create_invite( f"guest users are not allowed to create group invites" ) - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await check_group_permissions( - conn=conn, group_id=group_id, user=user, can_write=True - ) - await create_group_log( - conn=conn, group_id=group_id, user=user, type="invite-created" - ) - return await conn.fetchval( - "insert into group_invite(group_id, description, created_by, valid_until, single_use, join_as_editor)" - " values ($1, $2, $3, $4, $5, $6) returning id", - group_id, - description, - user.id, - valid_until, - single_use, - join_as_editor, - ) + await check_group_permissions( + conn=conn, group_id=group_id, user=user, can_write=True + ) + await create_group_log( + conn=conn, group_id=group_id, user=user, type="invite-created" + ) + return await conn.fetchval( + "insert into group_invite(group_id, description, created_by, valid_until, single_use, join_as_editor)" + " values ($1, $2, $3, $4, $5, $6) returning id", + group_id, + description, + user.id, + valid_until, + single_use, + join_as_editor, + ) + @with_db_transaction async def delete_invite( self, *, + conn: Connection, user: User, group_id: int, invite_id: int, ): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await check_group_permissions( - conn=conn, group_id=group_id, user=user, can_write=True - ) - deleted_id = await conn.fetchval( - "delete from group_invite where id = $1 and group_id = $2 returning id", - invite_id, - group_id, - ) - if not deleted_id: - raise NotFoundError(f"No invite with the given id exists") - await create_group_log( - conn=conn, group_id=group_id, user=user, type="invite-deleted" - ) + await check_group_permissions( + conn=conn, group_id=group_id, user=user, can_write=True + ) + deleted_id = await conn.fetchval( + "delete from group_invite where id = $1 and group_id = $2 returning id", + invite_id, + group_id, + ) + if not deleted_id: + raise NotFoundError(f"No invite with the given id exists") + await create_group_log( + conn=conn, group_id=group_id, user=user, type="invite-deleted" + ) async def _create_user_account( self, conn: asyncpg.Connection, group_id: int, user: User @@ -158,105 +162,103 @@ async def _create_user_account( ) return account_id - async def join_group(self, user: User, invite_token: str) -> int: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - invite = await conn.fetchrow( - "select id, group_id, created_by, single_use, join_as_editor from group_invite gi " - "where gi.token = $1 and gi.valid_until > now()", - invite_token, - ) - if not invite: - raise PermissionError(f"Invalid invite token") + @with_db_transaction + async def join_group( + self, *, conn: Connection, user: User, invite_token: str + ) -> int: + invite = await conn.fetchrow( + "select id, group_id, created_by, single_use, join_as_editor from group_invite gi " + "where gi.token = $1 and gi.valid_until > now()", + invite_token, + ) + if not invite: + raise PermissionError(f"Invalid invite token") - group = await conn.fetchrow( - "select id, add_user_account_on_join from grp " "where grp.id = $1", - invite["group_id"], - ) - if not group: - raise PermissionError(f"Invalid invite token") - - await conn.execute( - "insert into group_membership (user_id, group_id, invited_by, can_write, is_owner) " - "values ($1, $2, $3, $4, false)", - user.id, - invite["group_id"], - invite["created_by"], - invite["join_as_editor"], - ) + group = await conn.fetchrow( + "select id, add_user_account_on_join from grp " "where grp.id = $1", + invite["group_id"], + ) + if not group: + raise PermissionError(f"Invalid invite token") - if group["add_user_account_on_join"]: - await self._create_user_account( - conn=conn, group_id=group["id"], user=user - ) + await conn.execute( + "insert into group_membership (user_id, group_id, invited_by, can_write, is_owner) " + "values ($1, $2, $3, $4, false)", + user.id, + invite["group_id"], + invite["created_by"], + invite["join_as_editor"], + ) - await create_group_log( - conn=conn, - group_id=invite["group_id"], - user=user, - type="member-joined", - affected_user_id=user.id, - ) + if group["add_user_account_on_join"]: + await self._create_user_account(conn=conn, group_id=group["id"], user=user) + + await create_group_log( + conn=conn, + group_id=invite["group_id"], + user=user, + type="member-joined", + affected_user_id=user.id, + ) - if invite["single_use"]: - await conn.execute( - "delete from group_invite where id = $1", invite["id"] - ) - return group["id"] - - async def list_groups(self, user: User) -> list[Group]: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - cur = conn.cursor( - "select grp.id, grp.name, grp.description, grp.terms, grp.currency_symbol, grp.created_at, " - "grp.created_by, grp.add_user_account_on_join " - "from grp " - "join group_membership gm on grp.id = gm.group_id where gm.user_id = $1", - user.id, + if invite["single_use"]: + await conn.execute("delete from group_invite where id = $1", invite["id"]) + return group["id"] + + @with_db_transaction + async def list_groups(self, *, conn: Connection, user: User) -> list[Group]: + cur = conn.cursor( + "select grp.id, grp.name, grp.description, grp.terms, grp.currency_symbol, grp.created_at, " + "grp.created_by, grp.add_user_account_on_join " + "from grp " + "join group_membership gm on grp.id = gm.group_id where gm.user_id = $1", + user.id, + ) + result = [] + async for group in cur: + result.append( + Group( + id=group["id"], + name=group["name"], + description=group["description"], + currency_symbol=group["currency_symbol"], + terms=group["terms"], + created_at=group["created_at"], + created_by=group["created_by"], + add_user_account_on_join=group["add_user_account_on_join"], ) - result = [] - async for group in cur: - result.append( - Group( - id=group["id"], - name=group["name"], - description=group["description"], - currency_symbol=group["currency_symbol"], - terms=group["terms"], - created_at=group["created_at"], - created_by=group["created_by"], - add_user_account_on_join=group["add_user_account_on_join"], - ) - ) - - return result - - async def get_group(self, *, user: User, group_id: int) -> Group: - async with self.db_pool.acquire() as conn: - await check_group_permissions(conn=conn, group_id=group_id, user=user) - group = await conn.fetchrow( - "select id, name, description, terms, currency_symbol, created_at, created_by, add_user_account_on_join " - "from grp " - "where grp.id = $1", - group_id, - ) - if not group: - raise NotFoundError(f"Group with id {group_id} does not exist") - - return Group( - id=group["id"], - name=group["name"], - description=group["description"], - currency_symbol=group["currency_symbol"], - terms=group["terms"], - created_at=group["created_at"], - created_by=group["created_by"], - add_user_account_on_join=group["add_user_account_on_join"], ) + return result + + @with_db_transaction + async def get_group(self, *, conn: Connection, user: User, group_id: int) -> Group: + await check_group_permissions(conn=conn, group_id=group_id, user=user) + group = await conn.fetchrow( + "select id, name, description, terms, currency_symbol, created_at, created_by, add_user_account_on_join " + "from grp " + "where grp.id = $1", + group_id, + ) + if not group: + raise NotFoundError(f"Group with id {group_id} does not exist") + + return Group( + id=group["id"], + name=group["name"], + description=group["description"], + currency_symbol=group["currency_symbol"], + terms=group["terms"], + created_at=group["created_at"], + created_by=group["created_by"], + add_user_account_on_join=group["add_user_account_on_join"], + ) + + @with_db_transaction async def update_group( self, *, + conn: Connection, user: User, group_id: int, name: str, @@ -265,330 +267,327 @@ async def update_group( add_user_account_on_join: bool, terms: str, ): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await check_group_permissions( - conn=conn, group_id=group_id, user=user, is_owner=True - ) - await conn.execute( - "update grp set name = $2, description = $3, currency_symbol = $4, terms = $5, add_user_account_on_join = $6 " - "where grp.id = $1", - group_id, - name, - description, - currency_symbol, - terms, - add_user_account_on_join, - ) - await create_group_log( - conn=conn, group_id=group_id, user=user, type="group-updated" - ) + await check_group_permissions( + conn=conn, group_id=group_id, user=user, is_owner=True + ) + await conn.execute( + "update grp set name = $2, description = $3, currency_symbol = $4, terms = $5, add_user_account_on_join = $6 " + "where grp.id = $1", + group_id, + name, + description, + currency_symbol, + terms, + add_user_account_on_join, + ) + await create_group_log( + conn=conn, group_id=group_id, user=user, type="group-updated" + ) + @with_db_transaction async def update_member_permissions( self, *, + conn: Connection, user: User, group_id: int, member_id: int, can_write: bool, is_owner: bool, ): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - if user.id == member_id: - raise InvalidCommand( - f"group members cannot modify their own privileges" - ) - - # not possible to have an owner without can_write - can_write = can_write if not is_owner else True - - user_can_write, user_is_owner = await check_group_permissions( - conn=conn, group_id=group_id, user=user, can_write=True - ) - membership = await conn.fetchrow( - "select is_owner, can_write from group_membership where group_id = $1 and user_id = $2", - group_id, - member_id, - ) - if membership is None: - raise NotFoundError(f"member with id {member_id} does not exist") - - if ( - membership["is_owner"] == is_owner - and membership["can_write"] == can_write - ): # no changes - return - - if is_owner and not user_is_owner: - raise PermissionError( - f"group members cannot promote others to owner without being an owner" - ) - - if membership["is_owner"]: - raise PermissionError( - f"group owners cannot be demoted by other group members" - ) - - if is_owner: - await create_group_log( - conn=conn, - group_id=group_id, - user=user, - type="owner-granted", - affected_user_id=member_id, - ) - elif can_write: - if membership["is_owner"]: - await create_group_log( - conn=conn, - group_id=group_id, - user=user, - type="owner-revoked", - affected_user_id=member_id, - ) - else: - await create_group_log( - conn=conn, - group_id=group_id, - user=user, - type="write-granted", - affected_user_id=member_id, - ) - else: - if membership["is_owner"]: - await create_group_log( - conn=conn, - group_id=group_id, - user=user, - type="owner-revoked", - affected_user_id=member_id, - ) - await create_group_log( - conn=conn, - group_id=group_id, - user=user, - type="write-revoked", - affected_user_id=member_id, - ) - - await conn.execute( - "update group_membership gm set can_write = $3, is_owner = $4 " - "where gm.user_id = $1 and gm.group_id = $2", - member_id, - group_id, - can_write, - is_owner, - ) + if user.id == member_id: + raise InvalidCommand(f"group members cannot modify their own privileges") - async def delete_group(self, *, user: User, group_id: int): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await check_group_permissions( - conn=conn, group_id=group_id, user=user, is_owner=True - ) + # not possible to have an owner without can_write + can_write = can_write if not is_owner else True - n_members = await conn.fetchval( - "select count(user_id) from group_membership gm where gm.group_id = $1", - group_id, - ) - if n_members != 1: - raise PermissionError( - f"Can only delete a group when you are the last member" - ) + user_can_write, user_is_owner = await check_group_permissions( + conn=conn, group_id=group_id, user=user, can_write=True + ) + membership = await conn.fetchrow( + "select is_owner, can_write from group_membership where group_id = $1 and user_id = $2", + group_id, + member_id, + ) + if membership is None: + raise NotFoundError(f"member with id {member_id} does not exist") - await conn.execute("delete from grp where id = $1", group_id) + if ( + membership["is_owner"] == is_owner and membership["can_write"] == can_write + ): # no changes + return - async def leave_group(self, *, user: User, group_id: int): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await check_group_permissions(conn=conn, group_id=group_id, user=user) + if is_owner and not user_is_owner: + raise PermissionError( + f"group members cannot promote others to owner without being an owner" + ) - n_members = await conn.fetchval( - "select count(user_id) from group_membership gm where gm.group_id = $1", - group_id, + if membership["is_owner"]: + raise PermissionError( + f"group owners cannot be demoted by other group members" + ) + + if is_owner: + await create_group_log( + conn=conn, + group_id=group_id, + user=user, + type="owner-granted", + affected_user_id=member_id, + ) + elif can_write: + if membership["is_owner"]: + await create_group_log( + conn=conn, + group_id=group_id, + user=user, + type="owner-revoked", + affected_user_id=member_id, + ) + else: + await create_group_log( + conn=conn, + group_id=group_id, + user=user, + type="write-granted", + affected_user_id=member_id, ) - if ( - n_members == 1 - ): # our user is the last member -> delete the group, membership will be cascaded - await conn.execute("delete from grp where id = $1", group_id) - else: - await conn.execute( - "delete from group_membership gm where gm.group_id = $1 and gm.user_id = $2", - group_id, - user.id, - ) - - async def preview_group(self, *, invite_token: str) -> GroupPreview: - async with self.db_pool.acquire() as conn: - group = await conn.fetchrow( - "select grp.id as group_id, " - "grp.name, grp.description, grp.terms, grp.currency_symbol, grp.created_at, " - "inv.description as invite_description, inv.valid_until as invite_valid_until, " - "inv.single_use as invite_single_use " - "from grp " - "join group_invite inv on grp.id = inv.group_id " - "where inv.token = $1", - invite_token, + else: + if membership["is_owner"]: + await create_group_log( + conn=conn, + group_id=group_id, + user=user, + type="owner-revoked", + affected_user_id=member_id, + ) + await create_group_log( + conn=conn, + group_id=group_id, + user=user, + type="write-revoked", + affected_user_id=member_id, ) - if not group: - raise PermissionError(f"invalid invite token to preview group") - - return GroupPreview( - id=group["group_id"], - name=group["name"], - description=group["description"], - terms=group["terms"], - currency_symbol=group["currency_symbol"], - created_at=group["created_at"], - invite_description=group["invite_description"], - invite_valid_until=group["invite_valid_until"], - invite_single_use=group["invite_single_use"], + + await conn.execute( + "update group_membership gm set can_write = $3, is_owner = $4 " + "where gm.user_id = $1 and gm.group_id = $2", + member_id, + group_id, + can_write, + is_owner, + ) + + @with_db_transaction + async def delete_group(self, *, conn: Connection, user: User, group_id: int): + await check_group_permissions( + conn=conn, group_id=group_id, user=user, is_owner=True + ) + + n_members = await conn.fetchval( + "select count(user_id) from group_membership gm where gm.group_id = $1", + group_id, + ) + if n_members != 1: + raise PermissionError( + f"Can only delete a group when you are the last member" ) - async def list_invites(self, *, user: User, group_id: int) -> list[GroupInvite]: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await check_group_permissions(conn=conn, group_id=group_id, user=user) - cur = conn.cursor( - "select id, case when created_by = $1 then token else null end as token, description, created_by, " - "valid_until, single_use, join_as_editor " - "from group_invite gi " - "where gi.group_id = $2", - user.id, - group_id, + await conn.execute("delete from grp where id = $1", group_id) + + @with_db_transaction + async def leave_group(self, *, conn: Connection, user: User, group_id: int): + await check_group_permissions(conn=conn, group_id=group_id, user=user) + + n_members = await conn.fetchval( + "select count(user_id) from group_membership gm where gm.group_id = $1", + group_id, + ) + if ( + n_members == 1 + ): # our user is the last member -> delete the group, membership will be cascaded + await conn.execute("delete from grp where id = $1", group_id) + else: + await conn.execute( + "delete from group_membership gm where gm.group_id = $1 and gm.user_id = $2", + group_id, + user.id, + ) + + @with_db_transaction + async def preview_group( + self, *, conn: Connection, invite_token: str + ) -> GroupPreview: + group = await conn.fetchrow( + "select grp.id as group_id, " + "grp.name, grp.description, grp.terms, grp.currency_symbol, grp.created_at, " + "inv.description as invite_description, inv.valid_until as invite_valid_until, " + "inv.single_use as invite_single_use " + "from grp " + "join group_invite inv on grp.id = inv.group_id " + "where inv.token = $1", + invite_token, + ) + if not group: + raise PermissionError(f"invalid invite token to preview group") + + return GroupPreview( + id=group["group_id"], + name=group["name"], + description=group["description"], + terms=group["terms"], + currency_symbol=group["currency_symbol"], + created_at=group["created_at"], + invite_description=group["invite_description"], + invite_valid_until=group["invite_valid_until"], + invite_single_use=group["invite_single_use"], + ) + + @with_db_transaction + async def list_invites( + self, *, conn: Connection, user: User, group_id: int + ) -> list[GroupInvite]: + await check_group_permissions(conn=conn, group_id=group_id, user=user) + cur = conn.cursor( + "select id, case when created_by = $1 then token else null end as token, description, created_by, " + "valid_until, single_use, join_as_editor " + "from group_invite gi " + "where gi.group_id = $2", + user.id, + group_id, + ) + result = [] + async for invite in cur: + result.append( + GroupInvite( + id=invite["id"], + token=str(invite["token"]) if "token" in invite else None, + created_by=invite["created_by"], + valid_until=invite["valid_until"], + single_use=invite["single_use"], + description=invite["description"], + join_as_editor=invite["join_as_editor"], ) - result = [] - async for invite in cur: - result.append( - GroupInvite( - id=invite["id"], - token=str(invite["token"]) if "token" in invite else None, - created_by=invite["created_by"], - valid_until=invite["valid_until"], - single_use=invite["single_use"], - description=invite["description"], - join_as_editor=invite["join_as_editor"], - ) - ) - return result + ) + return result + @with_db_transaction async def get_invite( - self, *, user: User, group_id: int, invite_id: int + self, *, conn: Connection, user: User, group_id: int, invite_id: int ) -> GroupInvite: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await check_group_permissions(conn=conn, group_id=group_id, user=user) - row = await conn.fetchrow( - "select id, case when created_by = $1 then token else null end as token, description, created_by, " - "valid_until, single_use, join_as_editor " - "from group_invite gi " - "where gi.group_id = $2 and id = $3", - user.id, - group_id, - invite_id, - ) - if not row: - raise NotFoundError() - return GroupInvite( - id=row["id"], - token=str(row["token"]) if "token" in row else None, - created_by=row["created_by"], - valid_until=row["valid_until"], - single_use=row["single_use"], - description=row["description"], - join_as_editor=row["join_as_editor"], - ) + await check_group_permissions(conn=conn, group_id=group_id, user=user) + row = await conn.fetchrow( + "select id, case when created_by = $1 then token else null end as token, description, created_by, " + "valid_until, single_use, join_as_editor " + "from group_invite gi " + "where gi.group_id = $2 and id = $3", + user.id, + group_id, + invite_id, + ) + if not row: + raise NotFoundError() + return GroupInvite( + id=row["id"], + token=str(row["token"]) if "token" in row else None, + created_by=row["created_by"], + valid_until=row["valid_until"], + single_use=row["single_use"], + description=row["description"], + join_as_editor=row["join_as_editor"], + ) - async def list_members(self, *, user: User, group_id: int) -> list[GroupMember]: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await check_group_permissions(conn=conn, group_id=group_id, user=user) - cur = conn.cursor( - "select usr.id, usr.username, gm.is_owner, gm.can_write, gm.description, " - "gm.invited_by, gm.joined_at " - "from usr " - "join group_membership gm on gm.user_id = usr.id " - "where gm.group_id = $1", - group_id, + @with_db_transaction + async def list_members( + self, *, conn: Connection, user: User, group_id: int + ) -> list[GroupMember]: + await check_group_permissions(conn=conn, group_id=group_id, user=user) + cur = conn.cursor( + "select usr.id, usr.username, gm.is_owner, gm.can_write, gm.description, " + "gm.invited_by, gm.joined_at " + "from usr " + "join group_membership gm on gm.user_id = usr.id " + "where gm.group_id = $1", + group_id, + ) + result = [] + async for group in cur: + result.append( + GroupMember( + user_id=group["id"], + username=group["username"], + is_owner=group["is_owner"], + can_write=group["can_write"], + invited_by=group["invited_by"], + joined_at=group["joined_at"], + description=group["description"], ) - result = [] - async for group in cur: - result.append( - GroupMember( - user_id=group["id"], - username=group["username"], - is_owner=group["is_owner"], - can_write=group["can_write"], - invited_by=group["invited_by"], - joined_at=group["joined_at"], - description=group["description"], - ) - ) - return result + ) + return result + @with_db_transaction async def get_member( - self, *, user: User, group_id: int, member_id: int + self, *, conn: Connection, user: User, group_id: int, member_id: int ) -> GroupMember: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await check_group_permissions(conn=conn, group_id=group_id, user=user) - row = await conn.fetchrow( - "select usr.id, usr.username, gm.is_owner, gm.can_write, gm.description, " - "gm.invited_by, gm.joined_at " - "from usr " - "join group_membership gm on gm.user_id = usr.id " - "where gm.group_id = $1 and gm.user_id = $2", - group_id, - member_id, - ) - if not row: - raise NotFoundError() - return GroupMember( - user_id=row["id"], - username=row["username"], - is_owner=row["is_owner"], - can_write=row["can_write"], - invited_by=row["invited_by"], - joined_at=row["joined_at"], - description=row["description"], - ) + await check_group_permissions(conn=conn, group_id=group_id, user=user) + row = await conn.fetchrow( + "select usr.id, usr.username, gm.is_owner, gm.can_write, gm.description, " + "gm.invited_by, gm.joined_at " + "from usr " + "join group_membership gm on gm.user_id = usr.id " + "where gm.group_id = $1 and gm.user_id = $2", + group_id, + member_id, + ) + if not row: + raise NotFoundError() + return GroupMember( + user_id=row["id"], + username=row["username"], + is_owner=row["is_owner"], + can_write=row["can_write"], + invited_by=row["invited_by"], + joined_at=row["joined_at"], + description=row["description"], + ) - async def list_log(self, *, user: User, group_id: int) -> list[GroupLog]: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await check_group_permissions(conn=conn, group_id=group_id, user=user) - cur = conn.cursor( - "select id, user_id, logged_at, type, message, affected " - "from group_log " - "where group_id = $1", - group_id, - ) + @with_db_transaction + async def list_log( + self, *, conn: Connection, user: User, group_id: int + ) -> list[GroupLog]: + await check_group_permissions(conn=conn, group_id=group_id, user=user) + cur = conn.cursor( + "select id, user_id, logged_at, type, message, affected " + "from group_log " + "where group_id = $1", + group_id, + ) - result = [] - async for log in cur: - result.append( - GroupLog( - id=log["id"], - user_id=log["user_id"], - logged_at=log["logged_at"], - type=log["type"], - message=log["message"], - affected=log["affected"], - ) - ) - return result - - async def send_group_message(self, *, user: User, group_id: int, message: str): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await check_group_permissions( - conn=conn, group_id=group_id, user=user, can_write=True - ) - await conn.execute( - "insert into group_log (group_id, user_id, type, message) " - "values ($1, $2, 'text-message', $3)", - group_id, - user.id, - message, + result = [] + async for log in cur: + result.append( + GroupLog( + id=log["id"], + user_id=log["user_id"], + logged_at=log["logged_at"], + type=log["type"], + message=log["message"], + affected=log["affected"], ) + ) + return result + + @with_db_transaction + async def send_group_message( + self, *, conn: Connection, user: User, group_id: int, message: str + ): + await check_group_permissions( + conn=conn, group_id=group_id, user=user, can_write=True + ) + await conn.execute( + "insert into group_log (group_id, user_id, type, message) " + "values ($1, $2, 'text-message', $3)", + group_id, + user.id, + message, + ) diff --git a/abrechnung/application/transactions.py b/abrechnung/application/transactions.py index 24fa5a78..1cff112f 100644 --- a/abrechnung/application/transactions.py +++ b/abrechnung/application/transactions.py @@ -5,14 +5,18 @@ import asyncpg -from abrechnung.application import ( - Application, - NotFoundError, +from abrechnung.application.common import _get_or_create_tag_ids +from abrechnung.core.auth import ( check_group_permissions, - InvalidCommand, create_group_log, ) -from abrechnung.application.common import _get_or_create_tag_ids +from abrechnung.core.errors import ( + NotFoundError, + InvalidCommand, +) +from abrechnung.core.service import ( + Service, +) from abrechnung.domain.transactions import ( Transaction, TransactionDetails, @@ -21,6 +25,8 @@ TransactionType, ) from abrechnung.domain.users import User +from abrechnung.framework.database import Connection +from abrechnung.framework.decorators import with_db_transaction @dataclass @@ -42,7 +48,7 @@ class RawTransaction: positions: list[TransactionPosition] -class TransactionService(Application): +class TransactionService(Service): @staticmethod async def _check_transaction_permissions( conn: asyncpg.Connection, @@ -186,66 +192,68 @@ def _transaction_db_row(self, transaction: asyncpg.Record) -> Transaction: pending_files=pending_files, ) + @with_db_transaction async def list_transactions( self, *, + conn: Connection, user: User, group_id: int, min_last_changed: Optional[datetime] = None, additional_transactions: Optional[list[int]] = None, ) -> list[Transaction]: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await check_group_permissions(conn=conn, group_id=group_id, user=user) - - if min_last_changed: - # if a minimum last changed value is specified we must also return all transactions the current - # user has pending changes with to properly sync state across different devices of the user - cur = conn.cursor( - "select transaction_id, group_id, type, last_changed, is_wip, " - " committed_details, pending_details, " - " committed_positions, pending_positions, committed_files, pending_files " - "from full_transaction_state_valid_at($1) " - "where group_id = $2 " - " and (last_changed >= $3 or (($4::int[]) is not null and transaction_id = any($4::int[])))", - user.id, - group_id, - min_last_changed, - additional_transactions, - ) - else: - cur = conn.cursor( - "select transaction_id, group_id, type, last_changed, is_wip, " - " committed_details, pending_details, " - " committed_positions, pending_positions, committed_files, pending_files " - "from full_transaction_state_valid_at($1) " - "where group_id = $2", - user.id, - group_id, - ) - - result = [] - async for transaction in cur: - result.append(self._transaction_db_row(transaction)) - - return result - - async def get_transaction(self, *, user: User, transaction_id: int) -> Transaction: - async with self.db_pool.acquire() as conn: - group_id = await self._check_transaction_permissions( - conn=conn, user=user, transaction_id=transaction_id + await check_group_permissions(conn=conn, group_id=group_id, user=user) + + if min_last_changed: + # if a minimum last changed value is specified we must also return all transactions the current + # user has pending changes with to properly sync state across different devices of the user + cur = conn.cursor( + "select transaction_id, group_id, type, last_changed, is_wip, " + " committed_details, pending_details, " + " committed_positions, pending_positions, committed_files, pending_files " + "from full_transaction_state_valid_at($1) " + "where group_id = $2 " + " and (last_changed >= $3 or (($4::int[]) is not null and transaction_id = any($4::int[])))", + user.id, + group_id, + min_last_changed, + additional_transactions, ) - transaction = await conn.fetchrow( + else: + cur = conn.cursor( "select transaction_id, group_id, type, last_changed, is_wip, " " committed_details, pending_details, " " committed_positions, pending_positions, committed_files, pending_files " "from full_transaction_state_valid_at($1) " - "where group_id = $2 and transaction_id = $3", + "where group_id = $2", user.id, group_id, - transaction_id, ) - return self._transaction_db_row(transaction) + + result = [] + async for transaction in cur: + result.append(self._transaction_db_row(transaction)) + + return result + + @with_db_transaction + async def get_transaction( + self, *, conn: Connection, user: User, transaction_id: int + ) -> Transaction: + group_id = await self._check_transaction_permissions( + conn=conn, user=user, transaction_id=transaction_id + ) + transaction = await conn.fetchrow( + "select transaction_id, group_id, type, last_changed, is_wip, " + " committed_details, pending_details, " + " committed_positions, pending_positions, committed_files, pending_files " + "from full_transaction_state_valid_at($1) " + "where group_id = $2 and transaction_id = $3", + user.id, + group_id, + transaction_id, + ) + return self._transaction_db_row(transaction) async def _add_tags_to_revision( self, @@ -355,9 +363,11 @@ async def _create_transaction( return transaction_id + @with_db_transaction async def create_transaction( self, *, + conn: Connection, user: User, group_id: int, type: str, @@ -373,25 +383,23 @@ async def create_transaction( positions: Optional[list[TransactionPosition]] = None, perform_commit: bool = False, ) -> int: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - return await self._create_transaction( - conn=conn, - user=user, - group_id=group_id, - type=type, - name=name, - description=description, - billed_at=billed_at, - currency_symbol=currency_symbol, - currency_conversion_rate=currency_conversion_rate, - tags=tags, - value=value, - debitor_shares=debitor_shares, - creditor_shares=creditor_shares, - positions=positions, - perform_commit=perform_commit, - ) + return await self._create_transaction( + conn=conn, + user=user, + group_id=group_id, + type=type, + name=name, + description=description, + billed_at=billed_at, + currency_symbol=currency_symbol, + currency_conversion_rate=currency_conversion_rate, + tags=tags, + value=value, + debitor_shares=debitor_shares, + creditor_shares=creditor_shares, + positions=positions, + perform_commit=perform_commit, + ) @staticmethod async def _put_transaction_debitor_shares( @@ -449,39 +457,39 @@ async def _put_transaction_creditor_shares( value, ) - async def commit_transaction(self, *, user: User, transaction_id: int) -> None: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - group_id = await self._check_transaction_permissions( - conn=conn, user=user, transaction_id=transaction_id - ) - revision_id = await conn.fetchval( - "select id from transaction_revision " - "where transaction_id = $1 and user_id = $2 and committed is null", - transaction_id, - user.id, - ) - if revision_id is None: - raise InvalidCommand( - f"Cannot commit a transaction without pending changes" - ) - - await conn.execute( - "update transaction_revision " - "set committed = now() where id = $1", - revision_id, - ) - await create_group_log( - conn=conn, - group_id=group_id, - user=user, - type="transaction-committed", - message=f"updated transaction with id {transaction_id}", - ) + @with_db_transaction + async def commit_transaction( + self, *, conn: Connection, user: User, transaction_id: int + ) -> None: + group_id = await self._check_transaction_permissions( + conn=conn, user=user, transaction_id=transaction_id + ) + revision_id = await conn.fetchval( + "select id from transaction_revision " + "where transaction_id = $1 and user_id = $2 and committed is null", + transaction_id, + user.id, + ) + if revision_id is None: + raise InvalidCommand(f"Cannot commit a transaction without pending changes") + await conn.execute( + "update transaction_revision " "set committed = now() where id = $1", + revision_id, + ) + await create_group_log( + conn=conn, + group_id=group_id, + user=user, + type="transaction-committed", + message=f"updated transaction with id {transaction_id}", + ) + + @with_db_transaction async def upload_file( self, *, + conn: Connection, user: User, transaction_id: int, filename: str, @@ -502,117 +510,116 @@ async def upload_file( if "." in filename: raise InvalidCommand(f"Dots '.' are not allowed in file names") - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await self._check_transaction_permissions( - conn=conn, - user=user, - transaction_id=transaction_id, - can_write=True, - ) - revision_id = await self._get_or_create_revision( - conn=conn, user=user, transaction_id=transaction_id - ) - - blob_id = await conn.fetchval( - "insert into blob (content, mime_type) values ($1, $2) returning id", - content, - mime_type, - ) - file_id = await conn.fetchval( - "insert into file (transaction_id) values ($1) returning id", - transaction_id, - ) - await conn.execute( - "insert into file_history (id, revision_id, filename, blob_id) values ($1, $2, $3, $4)", - file_id, - revision_id, - filename, - blob_id, - ) - return file_id - - async def delete_file(self, *, user: User, file_id: int) -> tuple[int, int]: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - perms = await conn.fetchrow( - "select t.id as transaction_id " - "from group_membership gm " - " join transaction t on gm.group_id = t.group_id and gm.user_id = $1 " - " join file f on t.id = f.transaction_id " - "where f.id = $2 and gm.can_write", - user.id, - file_id, - ) - if not perms: - raise InvalidCommand("File not found") + await self._check_transaction_permissions( + conn=conn, + user=user, + transaction_id=transaction_id, + can_write=True, + ) + revision_id = await self._get_or_create_revision( + conn=conn, user=user, transaction_id=transaction_id + ) - transaction_id = perms["transaction_id"] - committed_state = await conn.fetchrow( - "select filename, deleted from committed_file_state_valid_at() where file_id = $1", - file_id, - ) - if committed_state is not None and committed_state["deleted"]: - raise InvalidCommand("Cannot delete file as it is already deleted") - - if committed_state is None: - # file is only attached to a pending change, fully delete it right away, blob will be cleaned up - pending_state = await conn.fetchrow( - "select revision_id from aggregated_pending_file_history " - "where file_id = $1 and changed_by = $2", - file_id, - user.id, - ) - if pending_state is None: - raise InvalidCommand("Unknown error occurred") - - revision_id = pending_state["revision_id"] - - await conn.execute( - "update file_history fh set deleted = true, blob_id = null where id = $1 and revision_id = $2", - file_id, - revision_id, - ) - return transaction_id, pending_state["revision_id"] - - revision_id = await self._get_or_create_revision( - conn=conn, user=user, transaction_id=transaction_id - ) + blob_id = await conn.fetchval( + "insert into blob (content, mime_type) values ($1, $2) returning id", + content, + mime_type, + ) + file_id = await conn.fetchval( + "insert into file (transaction_id) values ($1) returning id", + transaction_id, + ) + await conn.execute( + "insert into file_history (id, revision_id, filename, blob_id) values ($1, $2, $3, $4)", + file_id, + revision_id, + filename, + blob_id, + ) + return file_id - await conn.execute( - "insert into file_history(id, revision_id, filename, blob_id, deleted) " - "values ($1, $2, $3, null, true)", - file_id, - revision_id, - committed_state["filename"], - ) - return transaction_id, revision_id + @with_db_transaction + async def delete_file( + self, *, conn: Connection, user: User, file_id: int + ) -> tuple[int, int]: + perms = await conn.fetchrow( + "select t.id as transaction_id " + "from group_membership gm " + " join transaction t on gm.group_id = t.group_id and gm.user_id = $1 " + " join file f on t.id = f.transaction_id " + "where f.id = $2 and gm.can_write", + user.id, + file_id, + ) + if not perms: + raise InvalidCommand("File not found") - async def read_file_contents( - self, user: User, file_id: int, blob_id: int - ) -> tuple[str, bytes]: - async with self.db_pool.acquire() as conn: - perms = await conn.fetchrow( - "select f.id " - "from group_membership gm " - " join transaction t on gm.group_id = t.group_id and gm.user_id = $1 " - " join file f on t.id = f.transaction_id and f.id = $2" - " join file_history fh on f.id = fh.id " - "where fh.blob_id = $3", - user.id, + transaction_id = perms["transaction_id"] + committed_state = await conn.fetchrow( + "select filename, deleted from committed_file_state_valid_at() where file_id = $1", + file_id, + ) + if committed_state is not None and committed_state["deleted"]: + raise InvalidCommand("Cannot delete file as it is already deleted") + + if committed_state is None: + # file is only attached to a pending change, fully delete it right away, blob will be cleaned up + pending_state = await conn.fetchrow( + "select revision_id from aggregated_pending_file_history " + "where file_id = $1 and changed_by = $2", file_id, - blob_id, + user.id, ) - if not perms: - raise InvalidCommand("File not found") + if pending_state is None: + raise InvalidCommand("Unknown error occurred") - blob = await conn.fetchrow( - "select content, mime_type from blob where id = $1", blob_id + revision_id = pending_state["revision_id"] + + await conn.execute( + "update file_history fh set deleted = true, blob_id = null where id = $1 and revision_id = $2", + file_id, + revision_id, ) - if not blob: - raise InvalidCommand("File not found") + return transaction_id, pending_state["revision_id"] - return blob["mime_type"], blob["content"] + revision_id = await self._get_or_create_revision( + conn=conn, user=user, transaction_id=transaction_id + ) + + await conn.execute( + "insert into file_history(id, revision_id, filename, blob_id, deleted) " + "values ($1, $2, $3, null, true)", + file_id, + revision_id, + committed_state["filename"], + ) + return transaction_id, revision_id + + @with_db_transaction + async def read_file_contents( + self, *, conn: Connection, user: User, file_id: int, blob_id: int + ) -> tuple[str, bytes]: + perms = await conn.fetchrow( + "select f.id " + "from group_membership gm " + " join transaction t on gm.group_id = t.group_id and gm.user_id = $1 " + " join file f on t.id = f.transaction_id and f.id = $2" + " join file_history fh on f.id = fh.id " + "where fh.blob_id = $3", + user.id, + file_id, + blob_id, + ) + if not perms: + raise InvalidCommand("File not found") + + blob = await conn.fetchrow( + "select content, mime_type from blob where id = $1", blob_id + ) + if not blob: + raise InvalidCommand("File not found") + + return blob["mime_type"], blob["content"] @staticmethod async def _put_position_usages( @@ -827,9 +834,11 @@ async def _update_transaction( revision_id, ) + @with_db_transaction async def update_transaction( self, *, + conn: Connection, user: User, transaction_id: int, value: float, @@ -844,196 +853,197 @@ async def update_transaction( positions: Optional[list[TransactionPosition]] = None, perform_commit: bool = False, ): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await self._update_transaction( - conn=conn, - user=user, - transaction_id=transaction_id, - value=value, - name=name, - description=description, - billed_at=billed_at, - currency_symbol=currency_symbol, - currency_conversion_rate=currency_conversion_rate, - tags=tags, - debitor_shares=debitor_shares, - creditor_shares=creditor_shares, - positions=positions, - perform_commit=perform_commit, - ) + await self._update_transaction( + conn=conn, + user=user, + transaction_id=transaction_id, + value=value, + name=name, + description=description, + billed_at=billed_at, + currency_symbol=currency_symbol, + currency_conversion_rate=currency_conversion_rate, + tags=tags, + debitor_shares=debitor_shares, + creditor_shares=creditor_shares, + positions=positions, + perform_commit=perform_commit, + ) + @with_db_transaction async def update_transaction_positions( self, *, + conn: Connection, user: User, transaction_id: int, positions: list[TransactionPosition], perform_commit: bool = False, ): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - group_id = await self._check_transaction_permissions( - conn=conn, - user=user, - transaction_id=transaction_id, - can_write=True, - transaction_type=TransactionType.purchase.value, - ) - revision_id = await self._get_or_create_revision( - conn=conn, user=user, transaction_id=transaction_id - ) + group_id = await self._check_transaction_permissions( + conn=conn, + user=user, + transaction_id=transaction_id, + can_write=True, + transaction_type=TransactionType.purchase.value, + ) + revision_id = await self._get_or_create_revision( + conn=conn, user=user, transaction_id=transaction_id + ) - for position in positions: - await self._process_position_update( - conn=conn, - transaction_id=transaction_id, - group_id=group_id, - revision_id=revision_id, - position=position, - ) - - if perform_commit: - await conn.execute( - "update transaction_revision set committed = now() where id = $1", - revision_id, - ) - - async def create_transaction_change(self, *, user: User, transaction_id: int): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await self._check_transaction_permissions( - conn=conn, - user=user, - transaction_id=transaction_id, - can_write=True, - ) - await self._get_or_create_revision( - conn=conn, user=user, transaction_id=transaction_id - ) + for position in positions: + await self._process_position_update( + conn=conn, + transaction_id=transaction_id, + group_id=group_id, + revision_id=revision_id, + position=position, + ) - async def discard_transaction_changes(self, *, user: User, transaction_id: int): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - await self._check_transaction_permissions( - conn=conn, - user=user, - transaction_id=transaction_id, - can_write=True, - ) + if perform_commit: + await conn.execute( + "update transaction_revision set committed = now() where id = $1", + revision_id, + ) - revision_id = await conn.fetchval( - "select id " - "from transaction_revision tr where tr.user_id = $1 and tr.transaction_id = $2 " - " and tr.committed is null", - user.id, - transaction_id, - ) - if revision_id is None: - raise InvalidCommand( - f"No changes to discard for transaction {transaction_id}" - ) - - last_committed_revision = await conn.fetchval( - "select id " - "from transaction_revision tr where tr.transaction_id = $1 and tr.committed is not null", - transaction_id, - ) - if ( - last_committed_revision is None - ): # we have a newly created transaction - disallow discarding changes - raise InvalidCommand( - f"Cannot discard transaction changes without any committed changes" - ) - else: - await conn.execute( - "delete from transaction_revision tr " "where tr.id = $1", - revision_id, - ) - - async def delete_transaction(self, *, user: User, transaction_id: int): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - group_id = await self._check_transaction_permissions( - conn=conn, - user=user, - transaction_id=transaction_id, - can_write=True, - ) + @with_db_transaction + async def create_transaction_change( + self, *, conn: Connection, user: User, transaction_id: int + ): + await self._check_transaction_permissions( + conn=conn, + user=user, + transaction_id=transaction_id, + can_write=True, + ) + await self._get_or_create_revision( + conn=conn, user=user, transaction_id=transaction_id + ) + + @with_db_transaction + async def discard_transaction_changes( + self, *, conn: Connection, user: User, transaction_id: int + ): + await self._check_transaction_permissions( + conn=conn, + user=user, + transaction_id=transaction_id, + can_write=True, + ) + + revision_id = await conn.fetchval( + "select id " + "from transaction_revision tr where tr.user_id = $1 and tr.transaction_id = $2 " + " and tr.committed is null", + user.id, + transaction_id, + ) + if revision_id is None: + raise InvalidCommand( + f"No changes to discard for transaction {transaction_id}" + ) + + last_committed_revision = await conn.fetchval( + "select id " + "from transaction_revision tr where tr.transaction_id = $1 and tr.committed is not null", + transaction_id, + ) + if ( + last_committed_revision is None + ): # we have a newly created transaction - disallow discarding changes + raise InvalidCommand( + f"Cannot discard transaction changes without any committed changes" + ) + else: + await conn.execute( + "delete from transaction_revision tr " "where tr.id = $1", + revision_id, + ) + + @with_db_transaction + async def delete_transaction( + self, *, conn: Connection, user: User, transaction_id: int + ): + group_id = await self._check_transaction_permissions( + conn=conn, + user=user, + transaction_id=transaction_id, + can_write=True, + ) + + row = await conn.fetchrow( + "select name, description, revision_id, deleted " + "from committed_transaction_state_valid_at() " + "where transaction_id = $1", + transaction_id, + ) + if row is not None and row["deleted"]: + raise InvalidCommand( + f"Cannot delete transaction {transaction_id} as it already is deleted" + ) - row = await conn.fetchrow( - "select name, description, revision_id, deleted " - "from committed_transaction_state_valid_at() " - "where transaction_id = $1", - transaction_id, + await create_group_log( + conn=conn, + group_id=group_id, + user=user, + type="transaction-deleted", + message=f"deleted transaction with id {transaction_id}", + ) + + if ( + row is None + ): # the transaction has no committed changes, we can only delete it if we created it + revision_id = await conn.fetchval( + "select id from transaction_revision tr " + "where tr.user_id = $1 and tr.transaction_id = $2 and tr.committed is null", + user.id, + transaction_id, + ) + if revision_id is None: + raise InvalidCommand( + f"Cannot delete uncommitted transaction {transaction_id} of another user" ) - if row is not None and row["deleted"]: - raise InvalidCommand( - f"Cannot delete transaction {transaction_id} as it already is deleted" - ) - await create_group_log( - conn=conn, - group_id=group_id, - user=user, - type="transaction-deleted", - message=f"deleted transaction with id {transaction_id}", + # here we assume there has already been a transaction_history entry, if not something weird has + # happened + t_id = await conn.fetchval( + "update transaction_history th set deleted = true " + "where th.id = $1 and th.revision_id = $2 returning id", + transaction_id, + revision_id, + ) + if t_id is None: + raise InvalidCommand( + f"something weird has happened deleting uncommitted transaction " + f"{transaction_id}, please consult your local IT admin" ) - if ( - row is None - ): # the transaction has no committed changes, we can only delete it if we created it - revision_id = await conn.fetchval( - "select id from transaction_revision tr " - "where tr.user_id = $1 and tr.transaction_id = $2 and tr.committed is null", - user.id, - transaction_id, - ) - if revision_id is None: - raise InvalidCommand( - f"Cannot delete uncommitted transaction {transaction_id} of another user" - ) - - # here we assume there has already been a transaction_history entry, if not something weird has - # happened - t_id = await conn.fetchval( - "update transaction_history th set deleted = true " - "where th.id = $1 and th.revision_id = $2 returning id", - transaction_id, - revision_id, - ) - if t_id is None: - raise InvalidCommand( - f"something weird has happened deleting uncommitted transaction " - f"{transaction_id}, please consult your local IT admin" - ) - - # now commit this change - await conn.execute( - "update transaction_revision tr set committed = now() " - "where tr.user_id = $1 and tr.transaction_id = $2 and tr.committed is null", - user.id, - transaction_id, - ) - return - - else: # we have at least one committed change for this transaction - revision_id = await self._get_or_create_pending_transaction_change( - conn=conn, user=user, transaction_id=transaction_id - ) - - await conn.execute( - "update transaction_history th set deleted = true " - "where th.id = $1 and th.revision_id = $2", - transaction_id, - revision_id, - ) - - await conn.execute( - "update transaction_revision tr set committed = NOW() " - "where tr.id = $1", - revision_id, - ) + # now commit this change + await conn.execute( + "update transaction_revision tr set committed = now() " + "where tr.user_id = $1 and tr.transaction_id = $2 and tr.committed is null", + user.id, + transaction_id, + ) + return + + else: # we have at least one committed change for this transaction + revision_id = await self._get_or_create_pending_transaction_change( + conn=conn, user=user, transaction_id=transaction_id + ) + + await conn.execute( + "update transaction_history th set deleted = true " + "where th.id = $1 and th.revision_id = $2", + transaction_id, + revision_id, + ) + + await conn.execute( + "update transaction_revision tr set committed = NOW() " + "where tr.id = $1", + revision_id, + ) async def _get_or_create_revision( self, conn: asyncpg.Connection, user: User, transaction_id: int @@ -1161,8 +1171,14 @@ async def sync_transaction( return transaction.id, new_transaction_id + @with_db_transaction async def sync_transactions( - self, *, user: User, group_id: int, transactions: list[RawTransaction] + self, + *, + conn: Connection, + user: User, + group_id: int, + transactions: list[RawTransaction], ) -> dict[int, int]: all_transactions_in_same_group = all( [a.group_id == group_id for a in transactions] @@ -1170,21 +1186,19 @@ async def sync_transactions( if not all_transactions_in_same_group: raise InvalidCommand("all accounts must belong to the same group") - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - can_write, _ = await check_group_permissions( - conn=conn, group_id=group_id, user=user, can_write=True - ) + can_write, _ = await check_group_permissions( + conn=conn, group_id=group_id, user=user, can_write=True + ) - if not can_write: - raise PermissionError("need write access to group") + if not can_write: + raise PermissionError("need write access to group") - new_transaction_id_map: dict[int, int] = {} + new_transaction_id_map: dict[int, int] = {} - for transaction in transactions: - old_acc_id, new_acc_id = await self.sync_transaction( - conn=conn, user=user, transaction=transaction - ) - new_transaction_id_map[old_acc_id] = new_acc_id + for transaction in transactions: + old_acc_id, new_acc_id = await self.sync_transaction( + conn=conn, user=user, transaction=transaction + ) + new_transaction_id_map[old_acc_id] = new_acc_id - return new_transaction_id_map + return new_transaction_id_map diff --git a/abrechnung/application/users.py b/abrechnung/application/users.py index a2e2e7e0..98eef1d4 100644 --- a/abrechnung/application/users.py +++ b/abrechnung/application/users.py @@ -8,9 +8,17 @@ from passlib.context import CryptContext from pydantic import BaseModel +from abrechnung.config import Config +from abrechnung.core.errors import ( + NotFoundError, + InvalidCommand, +) +from abrechnung.core.service import ( + Service, +) from abrechnung.domain.users import User, Session -from . import Application, NotFoundError, InvalidCommand -from ..config import Config +from abrechnung.framework.database import Connection +from abrechnung.framework.decorators import with_db_transaction ALGORITHM = "HS256" @@ -28,7 +36,7 @@ class TokenMetadata(BaseModel): session_id: int -class UserService(Application): +class UserService(Service): def __init__( self, db_pool: Pool, @@ -67,23 +75,22 @@ def decode_jwt_payload(self, token: str) -> TokenMetadata: except JWTError: raise PermissionError - async def get_user_from_token(self, token: str) -> User: + @with_db_transaction + async def get_user_from_token(self, *, conn: Connection, token: str) -> User: token_metadata = self.decode_jwt_payload(token) - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - sess = await conn.fetchval( - "select id from session " - "where id = $1 and user_id = $2 and valid_until is null or valid_until > now()", - token_metadata.session_id, - token_metadata.user_id, - ) - if not sess: - raise PermissionError - user = await self._get_user(conn=conn, user_id=token_metadata.user_id) - if user is None: - raise PermissionError - return user + sess = await conn.fetchval( + "select id from session " + "where id = $1 and user_id = $2 and valid_until is null or valid_until > now()", + token_metadata.session_id, + token_metadata.user_id, + ) + if not sess: + raise PermissionError + user = await self._get_user(conn=conn, user_id=token_metadata.user_id) + if user is None: + raise PermissionError + return user async def _verify_user_password(self, user_id: int, password: str) -> bool: async with self.db_pool.acquire() as conn: @@ -100,90 +107,88 @@ async def _verify_user_password(self, user_id: int, password: str) -> bool: return self._check_password(password, user["hashed_password"]) async def get_access_token_from_session_token(self, session_token: str) -> str: - res = await self.is_session_token_valid(session_token) + res = await self.is_session_token_valid(token=session_token) if res is None: raise PermissionError("invalid session token") user_id, session_id = res return self._create_access_token({"user_id": user_id, "session_id": session_id}) - async def is_session_token_valid(self, token: str) -> Optional[tuple[int, int]]: + @with_db_transaction + async def is_session_token_valid( + self, *, conn: Connection, token: str + ) -> Optional[tuple[int, int]]: """returns the session id""" - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - row = await conn.fetchrow( - "select user_id, id from session where token = $1 and valid_until is null or valid_until > now()", - token, - ) - if row: - await conn.execute( - "update session set last_seen = now() where token = $1", token - ) - - return row + row = await conn.fetchrow( + "select user_id, id from session where token = $1 and valid_until is null or valid_until > now()", + token, + ) + if row: + await conn.execute( + "update session set last_seen = now() where token = $1", token + ) + return row + + @with_db_transaction async def login_user( - self, username: str, password: str, session_name: str + self, *, conn: Connection, username: str, password: str, session_name: str ) -> tuple[int, int, str]: """ validate whether a given user can login If successful return the user id, a new session id and a session token """ - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - user = await conn.fetchrow( - "select id, hashed_password, pending, deleted from usr where username = $1 or email = $1", - username, - ) - if user is None: - raise InvalidCommand(f"Login failed") - - if not self._check_password(password, user["hashed_password"]): - raise InvalidCommand(f"Login failed") - - if user["deleted"]: - raise InvalidCommand(f"User is not permitted to login") - - if user["pending"]: - raise InvalidCommand( - f"You need to confirm your email before logging in" - ) - - session_token, session_id = await conn.fetchrow( - "insert into session (user_id, name) values ($1, $2) returning token, id", - user["id"], - session_name, - ) - - return user["id"], session_id, str(session_token) - - async def logout_user(self, *, user: User, session_id: int): - async with self.db_pool.acquire(timeout=1) as conn: - async with conn.transaction(): - sess_id = await conn.fetchval( - "delete from session where id = $1 and user_id = $2 returning id", - session_id, - user.id, - ) - if sess_id is None: - raise InvalidCommand(f"Already logged out") - - async def demo_register_user(self, username: str, email: str, password: str) -> int: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - hashed_password = self._hash_password(password) - user_id = await conn.fetchval( - "insert into usr (username, email, hashed_password, pending) " - "values ($1, $2, $3, false) returning id", - username, - email, - hashed_password, - ) - if user_id is None: - raise InvalidCommand(f"Registering new user failed") - - return user_id + user = await conn.fetchrow( + "select id, hashed_password, pending, deleted from usr where username = $1 or email = $1", + username, + ) + if user is None: + raise InvalidCommand(f"Login failed") + + if not self._check_password(password, user["hashed_password"]): + raise InvalidCommand(f"Login failed") + + if user["deleted"]: + raise InvalidCommand(f"User is not permitted to login") + + if user["pending"]: + raise InvalidCommand(f"You need to confirm your email before logging in") + + session_token, session_id = await conn.fetchrow( + "insert into session (user_id, name) values ($1, $2) returning token, id", + user["id"], + session_name, + ) + + return user["id"], session_id, str(session_token) + + @with_db_transaction + async def logout_user(self, *, conn: Connection, user: User, session_id: int): + sess_id = await conn.fetchval( + "delete from session where id = $1 and user_id = $2 returning id", + session_id, + user.id, + ) + if sess_id is None: + raise InvalidCommand(f"Already logged out") + + @with_db_transaction + async def demo_register_user( + self, *, conn: Connection, username: str, email: str, password: str + ) -> int: + hashed_password = self._hash_password(password) + user_id = await conn.fetchval( + "insert into usr (username, email, hashed_password, pending) " + "values ($1, $2, $3, false) returning id", + username, + email, + hashed_password, + ) + if user_id is None: + raise InvalidCommand(f"Registering new user failed") + + return user_id def _validate_email_address(self, email: str) -> str: try: @@ -204,8 +209,11 @@ def _validate_email_domain(self, email: str) -> bool: return True + @with_db_transaction async def register_user( self, + *, + conn: Connection, username: str, email: str, password: str, @@ -217,72 +225,63 @@ async def register_user( email = self._validate_email_address(email) - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - is_guest_user = False - has_valid_email = self._validate_email_domain(email) - - if ( - invite_token is not None - and self.allow_guest_users - and not has_valid_email - ): - invite = await conn.fetchval( - "select id " - "from group_invite where token = $1 and valid_until > now()", - invite_token, - ) - if invite is None: - raise InvalidCommand("Invalid invite token") - is_guest_user = True - if self.enable_registration and has_valid_email: - self._validate_email_domain(email) - elif not has_valid_email: - raise PermissionError( - f"Only users with emails out of the following domains are " - f"allowed: {self.valid_email_domains}" - ) - - hashed_password = self._hash_password(password) - user_id = await conn.fetchval( - "insert into usr (username, email, hashed_password, is_guest_user) values ($1, $2, $3, $4) returning id", - username, - email, - hashed_password, - is_guest_user, - ) - if user_id is None: - raise InvalidCommand(f"Registering new user failed") - - await conn.execute( - "insert into pending_registration (user_id) values ($1)", user_id - ) - - return user_id - - async def confirm_registration(self, token: str) -> int: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - row = await conn.fetchrow( - "select user_id, valid_until from pending_registration where token = $1", - token, - ) - if row is None: - raise PermissionError(f"Invalid registration token") - - user_id = row["user_id"] - valid_until = row["valid_until"] - if valid_until is None or valid_until < datetime.now(tz=timezone.utc): - raise PermissionError(f"Invalid registration token") - - await conn.execute( - "delete from pending_registration where user_id = $1", user_id - ) - await conn.execute( - "update usr set pending = false where id = $1", user_id - ) - - return user_id + is_guest_user = False + has_valid_email = self._validate_email_domain(email) + + if invite_token is not None and self.allow_guest_users and not has_valid_email: + invite = await conn.fetchval( + "select id " + "from group_invite where token = $1 and valid_until > now()", + invite_token, + ) + if invite is None: + raise InvalidCommand("Invalid invite token") + is_guest_user = True + if self.enable_registration and has_valid_email: + self._validate_email_domain(email) + elif not has_valid_email: + raise PermissionError( + f"Only users with emails out of the following domains are " + f"allowed: {self.valid_email_domains}" + ) + + hashed_password = self._hash_password(password) + user_id = await conn.fetchval( + "insert into usr (username, email, hashed_password, is_guest_user) values ($1, $2, $3, $4) returning id", + username, + email, + hashed_password, + is_guest_user, + ) + if user_id is None: + raise InvalidCommand(f"Registering new user failed") + + await conn.execute( + "insert into pending_registration (user_id) values ($1)", user_id + ) + + return user_id + + @with_db_transaction + async def confirm_registration(self, *, conn: Connection, token: str) -> int: + row = await conn.fetchrow( + "select user_id, valid_until from pending_registration where token = $1", + token, + ) + if row is None: + raise PermissionError(f"Invalid registration token") + + user_id = row["user_id"] + valid_until = row["valid_until"] + if valid_until is None or valid_until < datetime.now(tz=timezone.utc): + raise PermissionError(f"Invalid registration token") + + await conn.execute( + "delete from pending_registration where user_id = $1", user_id + ) + await conn.execute("update usr set pending = false where id = $1", user_id) + + return user_id async def _get_user(self, conn: asyncpg.Connection, user_id: int) -> User: user = await conn.fetchrow( @@ -319,120 +318,120 @@ async def _get_user(self, conn: asyncpg.Connection, user_id: int) -> User: sessions=sessions, ) - async def get_user(self, user_id: int) -> User: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - return await self._get_user(conn, user_id) + @with_db_transaction + async def get_user(self, *, conn: Connection, user_id: int) -> User: + return await self._get_user(conn, user_id) - async def delete_session(self, user: User, session_id: int): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - sess_id = await conn.fetchval( - "delete from session where id = $1 and user_id = $2 returning id", - session_id, - user.id, - ) - if not sess_id: - raise NotFoundError(f"no such session found with id {session_id}") - - async def rename_session(self, user: User, session_id: int, name: str): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - sess_id = await conn.fetchval( - "update session set name = $3 where id = $1 and user_id = $2 returning id", - session_id, - user.id, - name, - ) - if not sess_id: - raise NotFoundError(f"no such session found with id {session_id}") - - async def change_password(self, user: User, old_password: str, new_password: str): - async with self.db_pool.acquire() as conn: - valid_pw = await self._verify_user_password(user.id, old_password) - if not valid_pw: - raise InvalidPassword + @with_db_transaction + async def delete_session(self, *, conn: Connection, user: User, session_id: int): + sess_id = await conn.fetchval( + "delete from session where id = $1 and user_id = $2 returning id", + session_id, + user.id, + ) + if not sess_id: + raise NotFoundError(f"no such session found with id {session_id}") - hashed_password = self._hash_password(new_password) - await conn.execute( - "update usr set hashed_password = $1 where id = $2", - hashed_password, - user.id, - ) + @with_db_transaction + async def rename_session( + self, *, conn: Connection, user: User, session_id: int, name: str + ): + sess_id = await conn.fetchval( + "update session set name = $3 where id = $1 and user_id = $2 returning id", + session_id, + user.id, + name, + ) + if not sess_id: + raise NotFoundError(f"no such session found with id {session_id}") - async def request_email_change(self, user: User, password: str, email: str): + @with_db_transaction + async def change_password( + self, *, conn: Connection, user: User, old_password: str, new_password: str + ): + valid_pw = await self._verify_user_password(user.id, old_password) + if not valid_pw: + raise InvalidPassword + + hashed_password = self._hash_password(new_password) + await conn.execute( + "update usr set hashed_password = $1 where id = $2", + hashed_password, + user.id, + ) + + @with_db_transaction + async def request_email_change( + self, *, conn: Connection, user: User, password: str, email: str + ): try: valid = validate_email(email) email = valid.email except EmailNotValidError as e: raise InvalidCommand(str(e)) - async with self.db_pool.acquire() as conn: - valid_pw = await self._verify_user_password(user.id, password) - if not valid_pw: - raise InvalidPassword + valid_pw = await self._verify_user_password(user.id, password) + if not valid_pw: + raise InvalidPassword - await conn.execute( - "insert into pending_email_change (user_id, new_email) values ($1, $2)", - user.id, - email, - ) + await conn.execute( + "insert into pending_email_change (user_id, new_email) values ($1, $2)", + user.id, + email, + ) - async def confirm_email_change(self, token: str) -> int: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - row = await conn.fetchrow( - "select user_id, new_email, valid_until from pending_email_change where token = $1", - token, - ) - user_id = row["user_id"] - valid_until = row["valid_until"] - if valid_until is None or valid_until < datetime.now(tz=timezone.utc): - raise PermissionError - - await conn.execute( - "delete from pending_email_change where user_id = $1", user_id - ) - await conn.execute( - "update usr set email = $2 where id = $1", user_id, row["new_email"] - ) - - return user_id - - async def request_password_recovery(self, email: str): - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - user_id = await conn.fetchval( - "select id from usr where email = $1", email - ) - if not user_id: - raise PermissionError - - await conn.execute( - "insert into pending_password_recovery (user_id) values ($1)", - user_id, - ) - - async def confirm_password_recovery(self, token: str, new_password: str) -> int: - async with self.db_pool.acquire() as conn: - async with conn.transaction(): - row = await conn.fetchrow( - "select user_id, valid_until from pending_password_recovery where token = $1", - token, - ) - user_id = row["user_id"] - valid_until = row["valid_until"] - if valid_until is None or valid_until < datetime.now(tz=timezone.utc): - raise PermissionError - - await conn.execute( - "delete from pending_password_recovery where user_id = $1", user_id - ) - hashed_password = self._hash_password(password=new_password) - await conn.execute( - "update usr set hashed_password = $2 where id = $1", - user_id, - hashed_password, - ) - - return user_id + @with_db_transaction + async def confirm_email_change(self, *, conn: Connection, token: str) -> int: + row = await conn.fetchrow( + "select user_id, new_email, valid_until from pending_email_change where token = $1", + token, + ) + user_id = row["user_id"] + valid_until = row["valid_until"] + if valid_until is None or valid_until < datetime.now(tz=timezone.utc): + raise PermissionError + + await conn.execute( + "delete from pending_email_change where user_id = $1", user_id + ) + await conn.execute( + "update usr set email = $2 where id = $1", user_id, row["new_email"] + ) + + return user_id + + @with_db_transaction + async def request_password_recovery(self, *, conn: Connection, email: str): + user_id = await conn.fetchval("select id from usr where email = $1", email) + if not user_id: + raise PermissionError + + await conn.execute( + "insert into pending_password_recovery (user_id) values ($1)", + user_id, + ) + + @with_db_transaction + async def confirm_password_recovery( + self, *, conn: Connection, token: str, new_password: str + ) -> int: + row = await conn.fetchrow( + "select user_id, valid_until from pending_password_recovery where token = $1", + token, + ) + user_id = row["user_id"] + valid_until = row["valid_until"] + if valid_until is None or valid_until < datetime.now(tz=timezone.utc): + raise PermissionError + + await conn.execute( + "delete from pending_password_recovery where user_id = $1", user_id + ) + hashed_password = self._hash_password(password=new_password) + await conn.execute( + "update usr set hashed_password = $2 where id = $1", + user_id, + hashed_password, + ) + + return user_id diff --git a/abrechnung/config.py b/abrechnung/config.py index e93ff579..b9dfd84e 100644 --- a/abrechnung/config.py +++ b/abrechnung/config.py @@ -5,6 +5,8 @@ import yaml from pydantic import BaseModel +from abrechnung.framework.database import DatabaseConfig + class ServiceConfig(BaseModel): url: str @@ -33,14 +35,6 @@ class RegistrationConfig(BaseModel): valid_email_domains: Optional[List[str]] = None -class DatabaseConfig(BaseModel): - user: Optional[str] = None - password: Optional[str] = None - dbname: str - host: Optional[str] = None - port: Optional[int] = 5432 - - class EmailConfig(BaseModel): class AuthConfig(BaseModel): username: str diff --git a/abrechnung/core/__init__.py b/abrechnung/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/abrechnung/core/auth.py b/abrechnung/core/auth.py new file mode 100644 index 00000000..f449b9ef --- /dev/null +++ b/abrechnung/core/auth.py @@ -0,0 +1,46 @@ +from .errors import NotFoundError +from abrechnung.framework.database import Connection +from abrechnung.domain.users import User + + +async def check_group_permissions( + conn: Connection, + group_id: int, + user: User, + is_owner: bool = False, + can_write: bool = False, +) -> tuple[bool, bool]: + membership = await conn.fetchrow( + "select is_owner, can_write from group_membership where group_id = $1 and user_id = $2", + group_id, + user.id, + ) + if membership is None: + raise NotFoundError(f"group not found") + + if can_write and not (membership["is_owner"] or membership["can_write"]): + raise PermissionError(f"write access to group denied") + + if is_owner and not membership["is_owner"]: + raise PermissionError(f"owner access to group denied") + + return membership["can_write"], membership["is_owner"] + + +async def create_group_log( + conn: Connection, + group_id: int, + user: User, + type: str, + message: str | None = None, + affected_user_id: int | None = None, +): + await conn.execute( + "insert into group_log (group_id, user_id, type, message, affected) " + "values ($1, $2, $3, $4, $5)", + group_id, + user.id, + type, + "" if message is None else message, + affected_user_id, + ) diff --git a/abrechnung/core/errors.py b/abrechnung/core/errors.py new file mode 100644 index 00000000..2f3bc2fc --- /dev/null +++ b/abrechnung/core/errors.py @@ -0,0 +1,6 @@ +class NotFoundError(Exception): + pass + + +class InvalidCommand(Exception): + pass diff --git a/abrechnung/core/service.py b/abrechnung/core/service.py new file mode 100644 index 00000000..32e81d2b --- /dev/null +++ b/abrechnung/core/service.py @@ -0,0 +1,9 @@ +import asyncpg + +from abrechnung.config import Config + + +class Service: + def __init__(self, db_pool: asyncpg.Pool, config: Config): + self.db_pool = db_pool + self.cfg = config diff --git a/abrechnung/database/cli.py b/abrechnung/database/cli.py index 0ca68b49..b756cd23 100644 --- a/abrechnung/database/cli.py +++ b/abrechnung/database/cli.py @@ -1,16 +1,11 @@ -import contextlib import logging -import os -import shutil -import tempfile from asyncpg.pool import Pool from abrechnung import subcommand -from abrechnung import util from abrechnung.config import Config -from . import revisions -from .database import create_db_pool +from abrechnung.framework.database import create_db_pool, psql_attach +from .migrations import reset_schema, apply_revisions logger = logging.getLogger(__name__) @@ -45,67 +40,19 @@ async def _clean(self, db_pool: Pool): ")" ) - async def _attach(self): - with contextlib.ExitStack() as exitstack: - env = dict(os.environ) - env["PGDATABASE"] = self.config.database.dbname - - if self.config.database.user is None: - if self.config.database.host is not None: - raise ValueError("database user is None, but host is set") - if self.config.database.password is not None: - raise ValueError("database user is None, " "but password is set") - else: - - def escape_colon(str): - return str.replace("\\", "\\\\").replace(":", "\\:") - - passfile = exitstack.enter_context(tempfile.NamedTemporaryFile("w")) - os.chmod(passfile.name, 0o600) - - passfile.write( - ":".join( - [ - escape_colon(self.config.database.host), - "*", - escape_colon(self.config.database.dbname), - escape_colon(self.config.database.user), - escape_colon(self.config.database.password), - ] - ) - ) - passfile.write("\n") - passfile.flush() - - env["PGHOST"] = self.config.database.host - env["PGUSER"] = self.config.database.user - env["PGPASSFILE"] = passfile.name - - command = ["psql", "--variable", "ON_ERROR_STOP=1"] - if shutil.which("pgcli") is not None: - # if pgcli is installed, use that instead! - command = ["pgcli"] - - cwd = os.path.join(os.path.dirname(__file__), "revisions") - ret = await util.run_as_fg_process(command, env=env, cwd=cwd) - - if ret != 0: - print(util.format_error("psql failed")) - return ret - async def run(self): """ CLI entry point """ if self.action == "attach": - return await self._attach() + return await psql_attach(self.config.database) - db_pool = await create_db_pool(self.config) + db_pool = await create_db_pool(self.config.database) if self.action == "migrate": - await revisions.apply_revisions(db_pool=db_pool) + await apply_revisions(db_pool=db_pool) elif self.action == "rebuild": - await revisions.reset_schema(db_pool=db_pool) - await revisions.apply_revisions(db_pool=db_pool) + await reset_schema(db_pool=db_pool) + await apply_revisions(db_pool=db_pool) elif self.action == "clean": await self._clean(db_pool=db_pool) diff --git a/abrechnung/database/code/0001_views.sql b/abrechnung/database/code/0001_views.sql new file mode 100644 index 00000000..75fad09e --- /dev/null +++ b/abrechnung/database/code/0001_views.sql @@ -0,0 +1,415 @@ +create view clearing_account_shares_as_json(revision_id, account_id, n_shares, involved_accounts, shares) as + SELECT + cas.revision_id, + cas.account_id, + sum(cas.shares) AS n_shares, + array_agg(cas.share_account_id) AS involved_accounts, + jsonb_agg(cas.*) AS shares + FROM + clearing_account_share cas + GROUP BY + cas.revision_id, cas.account_id; + +create view creditor_shares_as_json(revision_id, transaction_id, n_shares, involved_accounts, shares) as + SELECT + cs.revision_id, + cs.transaction_id, + sum(cs.shares) AS n_shares, + array_agg(cs.account_id) AS involved_accounts, + jsonb_agg(cs.*) AS shares + FROM + creditor_share cs + GROUP BY + cs.revision_id, cs.transaction_id; + +create view debitor_shares_as_json(revision_id, transaction_id, n_shares, involved_accounts, shares) as + SELECT + ds.revision_id, + ds.transaction_id, + sum(ds.shares) AS n_shares, + array_agg(ds.account_id) AS involved_accounts, + jsonb_agg(ds.*) AS shares + FROM + debitor_share ds + GROUP BY + ds.revision_id, ds.transaction_id; + +create view purchase_item_usages_as_json(revision_id, item_id, n_usages, involved_accounts, usages) as + SELECT + piu.revision_id, + piu.item_id, + sum(piu.share_amount) AS n_usages, + array_agg(piu.account_id) AS involved_accounts, + jsonb_agg(piu.*) AS usages + FROM + purchase_item_usage piu + GROUP BY + piu.revision_id, piu.item_id; + +create view transaction_tags (transaction_id, revision_id, tag_names) as + select + ttt.transaction_id, + ttt.revision_id, + array_agg(tag.name) AS tag_names + from + transaction_to_tag ttt + join tag on ttt.tag_id = tag.id + group by + ttt.transaction_id, ttt.revision_id; + +create or replace view account_tags (account_id, revision_id, tag_names) as + select + att.account_id, + att.revision_id, + array_agg(tag.name) AS tag_names + from + account_to_tag att + join tag on att.tag_id = tag.id + group by + att.account_id, att.revision_id; + +create or replace view aggregated_committed_account_history as + ( + select + sub.revision_id, + sub.account_id, + sub.user_id, + sub.group_id, + sub.type, + sub.started as revision_started, + sub.committed as revision_committed, + sub.last_changed as last_changed, + first_value(sub.description) over outer_window as description, + first_value(sub.name) over outer_window as name, + first_value(sub.owning_user_id) over outer_window as owning_user_id, + first_value(sub.date_info) over outer_window as date_info, + first_value(sub.deleted) over outer_window as deleted, + first_value(sub.n_clearing_shares) over outer_window as n_clearing_shares, + first_value(sub.clearing_shares) over outer_window as clearing_shares, + first_value(sub.involved_accounts) over outer_window as involved_accounts, + first_value(sub.tags) over outer_window as tags + from + ( + select + ar.id as revision_id, + ar.account_id, + ar.user_id, + ar.started, + ar.committed, + ar.last_changed, + a.group_id, + a.type, + count(a.id) over wnd as id_partition, + ah.name, + ah.description, + ah.owning_user_id, + ah.date_info, + ah.deleted, + coalesce(cas.n_shares, 0) as n_clearing_shares, + coalesce(cas.shares, '[]'::jsonb) as clearing_shares, + coalesce(cas.involved_accounts, array []::int[]) as involved_accounts, + coalesce(t.tag_names, array []::varchar(255)[]) as tags + from + account_revision ar + join account a on a.id = ar.account_id + left join account_history ah on ah.id = a.id and ar.id = ah.revision_id + left join clearing_account_shares_as_json cas on a.id = cas.account_id and ar.id = cas.revision_id + left join account_tags t on a.id = t.account_id and ar.id = t.revision_id + where + ar.committed is not null window wnd as (partition by a.id order by committed asc) + ) as sub window outer_window as (partition by sub.account_id, sub.id_partition order by sub.revision_id) ); + +create or replace function committed_account_state_valid_at( + valid_at timestamptz = now() +) + returns table ( + account_id int, + revision_id bigint, + type text, + changed_by int, + group_id int, + revision_started timestamptz, + revision_committed timestamptz, + last_changed timestamptz, + name text, + description text, + owning_user_id int, + date_info date, + deleted bool, + n_clearing_shares int, + clearing_shares jsonb, + involved_accounts int[], + tags varchar(255)[] + ) +as +$$ +select distinct on (acah.account_id) + acah.account_id, + acah.revision_id, + acah.type, + acah.user_id, + acah.group_id, + acah.revision_started, + acah.revision_committed, + acah.last_changed, + acah.name, + acah.description, + acah.owning_user_id, + acah.date_info, + acah.deleted, + acah.n_clearing_shares, + acah.clearing_shares, + acah.involved_accounts, + acah.tags +from + aggregated_committed_account_history acah +where + acah.revision_committed <= committed_account_state_valid_at.valid_at +order by + acah.account_id, acah.revision_committed desc +$$ language sql + security invoker + stable; + + +create or replace view aggregated_pending_account_history as + ( + select + ar.account_id, + ar.id as revision_id, + ar.user_id as changed_by, + ar.started as revision_started, + ar.last_changed as last_changed, + a.group_id, + a.type, + ah.name, + ah.description, + ah.owning_user_id, + ah.date_info, + ah.deleted, + coalesce(cas.n_shares, 0) as n_clearing_shares, + coalesce(cas.shares, '[]'::jsonb) as clearing_shares, + coalesce(cas.involved_accounts, array []::int[]) as involved_accounts, + coalesce(t.tag_names, array []::varchar(255)[]) as tags + from + account_revision ar + join account a on ar.account_id = a.id + join account_history ah on a.id = ah.id and ar.id = ah.revision_id + left join clearing_account_shares_as_json cas on a.id = cas.account_id and ar.id = cas.revision_id + left join account_tags t on a.id = t.account_id and ar.id = t.revision_id + where + ar.committed is null ); + +create or replace view aggregated_pending_transaction_position_history as + SELECT + tr.id AS revision_id, + tr.transaction_id, + tr.user_id AS changed_by, + tr.started AS revision_started, + tr.last_changed AS last_changed, + pi.id AS item_id, + pih.name, + pih.price, + pih.communist_shares, + pih.deleted, + coalesce(piu.n_usages, 0::double precision) AS n_usages, + coalesce(piu.usages, '[]'::jsonb) AS usages, + coalesce(piu.involved_accounts, array []::integer[]) AS involved_accounts + FROM + transaction_revision tr + JOIN purchase_item pi ON tr.transaction_id = pi.transaction_id + JOIN purchase_item_history pih ON pih.id = pi.id AND tr.id = pih.revision_id + LEFT JOIN purchase_item_usages_as_json piu ON pi.id = piu.item_id AND tr.id = piu.revision_id + WHERE + tr.committed IS NULL; + +create or replace view aggregated_pending_transaction_history as + SELECT + tr.id AS revision_id, + tr.transaction_id, + tr.user_id AS changed_by, + tr.started AS revision_started, + tr.last_changed AS last_changed, + t.group_id, + t.type, + th.value, + th.currency_symbol, + th.currency_conversion_rate, + th.name, + th.description, + th.billed_at, + th.deleted, + coalesce(csaj.n_shares, 0::double precision) AS n_creditor_shares, + coalesce(csaj.shares, '[]'::jsonb) AS creditor_shares, + coalesce(dsaj.n_shares, 0::double precision) AS n_debitor_shares, + coalesce(dsaj.shares, '[]'::jsonb) AS debitor_shares, + coalesce(dsaj.involved_accounts, ARRAY []::integer[]) AS involved_accounts, + coalesce(tt.tag_names, array []::varchar(255)[]) as tags + FROM + transaction_revision tr + JOIN transaction t ON tr.transaction_id = t.id + JOIN transaction_history th ON t.id = th.id AND tr.id = th.revision_id + LEFT JOIN creditor_shares_as_json csaj ON t.id = csaj.transaction_id AND tr.id = csaj.revision_id + LEFT JOIN debitor_shares_as_json dsaj ON t.id = dsaj.transaction_id AND tr.id = dsaj.revision_id + left join transaction_tags tt on tt.transaction_id = t.id and tt.revision_id = tr.id + WHERE + tr.committed IS NULL; + +create or replace view aggregated_pending_file_history as + SELECT + tr.id AS revision_id, + tr.transaction_id, + tr.user_id AS changed_by, + tr.started AS revision_started, + tr.last_changed AS last_changed, + f.id AS file_id, + fh.filename, + blob.mime_type, + fh.blob_id, + fh.deleted + FROM + transaction_revision tr + JOIN file f ON tr.transaction_id = f.transaction_id + JOIN file_history fh ON fh.id = f.id AND tr.id = fh.revision_id + LEFT JOIN blob ON blob.id = fh.blob_id + WHERE + tr.committed IS NULL; + +create or replace view aggregated_committed_transaction_position_history as + SELECT + sub.revision_id, + sub.transaction_id, + sub.item_id, + sub.user_id, + sub.started AS revision_started, + sub.committed AS revision_committed, + sub.last_changed AS last_changed, + first_value(sub.name) OVER outer_window AS name, + first_value(sub.price) OVER outer_window AS price, + first_value(sub.communist_shares) OVER outer_window AS communist_shares, + first_value(sub.deleted) OVER outer_window AS deleted, + first_value(sub.n_usages) OVER outer_window AS n_usages, + first_value(sub.usages) OVER outer_window AS usages, + first_value(sub.involved_accounts) OVER outer_window AS involved_accounts + FROM + ( + SELECT + tr.id AS revision_id, + tr.transaction_id, + tr.user_id, + tr.started, + tr.committed, + tr.last_changed, + pi.id AS item_id, + count(pi.id) OVER wnd AS id_partition, + pih.name, + pih.price, + pih.communist_shares, + pih.deleted, + COALESCE(piu.n_usages, 0::double precision) AS n_usages, + COALESCE(piu.usages, '[]'::jsonb) AS usages, + COALESCE(piu.involved_accounts, ARRAY []::integer[]) AS involved_accounts + FROM + transaction_revision tr + JOIN purchase_item pi ON tr.transaction_id = pi.transaction_id + LEFT JOIN purchase_item_history pih ON pih.id = pi.id AND tr.id = pih.revision_id + LEFT JOIN purchase_item_usages_as_json piu ON pi.id = piu.item_id AND tr.id = piu.revision_id + WHERE + tr.committed IS NOT NULL WINDOW wnd AS (PARTITION BY pi.id ORDER BY tr.committed) + ) sub WINDOW outer_window AS (PARTITION BY sub.item_id, sub.id_partition ORDER BY sub.revision_id); + +create or replace view aggregated_committed_transaction_history as + SELECT + sub.revision_id, + sub.transaction_id, + sub.user_id, + sub.group_id, + sub.started AS revision_started, + sub.committed AS revision_committed, + sub.last_changed AS last_changed, + sub.type, + first_value(sub.value) OVER outer_window AS value, + first_value(sub.name) OVER outer_window AS name, + first_value(sub.description) OVER outer_window AS description, + first_value(sub.currency_symbol) OVER outer_window AS currency_symbol, + first_value(sub.currency_conversion_rate) OVER outer_window AS currency_conversion_rate, + first_value(sub.billed_at) OVER outer_window AS billed_at, + first_value(sub.deleted) OVER outer_window AS deleted, + first_value(sub.n_creditor_shares) OVER outer_window AS n_creditor_shares, + first_value(sub.creditor_shares) OVER outer_window AS creditor_shares, + first_value(sub.n_debitor_shares) OVER outer_window AS n_debitor_shares, + first_value(sub.debitor_shares) OVER outer_window AS debitor_shares, + first_value(sub.involved_accounts) OVER outer_window AS involved_accounts, + first_value(sub.tags) over outer_window as tags + FROM + ( + SELECT + tr.id AS revision_id, + tr.transaction_id, + tr.user_id, + tr.started, + tr.committed, + tr.last_changed, + t.group_id, + t.type, + count(th.id) OVER wnd AS id_partition, + th.value, + th.currency_symbol, + th.currency_conversion_rate, + th.name, + th.description, + th.billed_at, + th.deleted, + COALESCE(csaj.n_shares, 0::double precision) AS n_creditor_shares, + COALESCE(csaj.shares, '[]'::jsonb) AS creditor_shares, + COALESCE(dsaj.n_shares, 0::double precision) AS n_debitor_shares, + COALESCE(dsaj.shares, '[]'::jsonb) AS debitor_shares, + coalesce(csaj.involved_accounts, array[]::int[]) || coalesce(dsaj.involved_accounts, array[]::int[]) as involved_accounts, + coalesce(tt.tag_names, array []::varchar(255)[]) as tags + FROM + transaction_revision tr + JOIN transaction t ON tr.transaction_id = t.id + LEFT JOIN transaction_history th ON t.id = th.id AND tr.id = th.revision_id + LEFT JOIN creditor_shares_as_json csaj ON t.id = csaj.transaction_id AND tr.id = csaj.revision_id + LEFT JOIN debitor_shares_as_json dsaj ON t.id = dsaj.transaction_id AND tr.id = dsaj.revision_id + left join transaction_tags tt on tt.transaction_id = t.id and tt.revision_id = tr.id + WHERE + tr.committed IS NOT NULL WINDOW wnd AS (PARTITION BY tr.transaction_id ORDER BY tr.committed) + ) sub WINDOW outer_window AS (PARTITION BY sub.transaction_id, sub.id_partition ORDER BY sub.revision_id); + +create or replace view aggregated_committed_file_history as + SELECT + sub.revision_id, + sub.transaction_id, + sub.file_id, + sub.user_id, + sub.started AS revision_started, + sub.committed AS revision_committed, + sub.last_changed AS last_changed, + first_value(sub.filename) OVER outer_window AS filename, + first_value(sub.mime_type) OVER outer_window AS mime_type, + first_value(sub.blob_id) OVER outer_window AS blob_id, + first_value(sub.deleted) OVER outer_window AS deleted + FROM + ( + SELECT + tr.id AS revision_id, + tr.transaction_id, + tr.user_id, + tr.started, + tr.committed, + tr.last_changed, + f.id AS file_id, + count(f.id) OVER wnd AS id_partition, + fh.filename, + blob.mime_type, + fh.blob_id, + fh.deleted + FROM + transaction_revision tr + JOIN file f ON tr.transaction_id = f.transaction_id + LEFT JOIN file_history fh ON fh.id = f.id AND tr.id = fh.revision_id + LEFT JOIN blob ON blob.id = fh.blob_id + WHERE + tr.committed IS NOT NULL WINDOW wnd AS (PARTITION BY f.id ORDER BY tr.committed) + ) sub WINDOW outer_window AS (PARTITION BY sub.file_id, sub.id_partition ORDER BY sub.revision_id); diff --git a/abrechnung/database/code/0002_triggers.sql b/abrechnung/database/code/0002_triggers.sql new file mode 100644 index 00000000..224e0a3a --- /dev/null +++ b/abrechnung/database/code/0002_triggers.sql @@ -0,0 +1,540 @@ + +-- notify the mailer service on inserts or updates in the above tables +create or replace function pending_registration_updated() returns trigger as +$$ +begin + perform pg_notify('mailer', 'pending_registration'); + + return null; +end; +$$ language plpgsql; + +create trigger pending_registration_trig + after insert or update + on pending_registration + for each row +execute function pending_registration_updated(); + +create or replace function pending_password_recovery_updated() returns trigger as +$$ +begin + perform pg_notify('mailer', 'pending_password_recovery'); + + return null; +end; +$$ language plpgsql; + +create trigger pending_password_recovery_trig + after insert or update + on pending_password_recovery + for each row +execute function pending_password_recovery_updated(); + +create or replace function pending_email_change_updated() returns trigger as +$$ +begin + perform pg_notify('mailer', 'pending_email_change'); + + return null; +end; +$$ language plpgsql; + +create trigger pending_email_change_trig + after insert or update + on pending_email_change + for each row +execute function pending_email_change_updated(); + +create or replace function group_updated() returns trigger as +$$ +begin + if NEW is null then -- we cannot infer the group memberships after a group has been deleted + return NULL; + end if; + + call notify_group('group', NEW.id, NEW.id::bigint, json_build_object('element_id', NEW.id)); + return NULL; +end; +$$ language plpgsql; + +create trigger group_update_trig + after insert or update + on grp + for each row +execute function group_updated(); + +create or replace function group_deleted() returns trigger as +$$ +<> declare + user_info record; +begin + for user_info in select + user_id + from + group_membership gm + where + gm.group_id = OLD.id loop + call notify_user('group', user_info.user_id, user_info.user_id::bigint, json_build_object('element_id', user_info.user_id)); + end loop; + + return OLD; +end; +$$ language plpgsql; + +create trigger group_delete_trig + before delete + on grp + for each row +execute function group_deleted(); + +-- notifications for changes in sessions +create or replace function session_updated() returns trigger as +$$ +begin + if NEW is null then + call notify_user('session', OLD.user_id, OLD.user_id::bigint, + json_build_object('element_id', OLD.user_id, 'session_id', OLD.id)); + else + call notify_user('session', NEW.user_id, NEW.user_id::bigint, + json_build_object('element_id', NEW.user_id, 'session_id', NEW.id)); + end if; + return NULL; +end; +$$ language plpgsql; + +create trigger session_update_trig + after insert or update or delete + on session + for each row +execute function session_updated(); + +-- notifications for changes in group memberships +create or replace function group_membership_updated() returns trigger as +$$ +begin + if NEW is null then + call notify_group('group_member', OLD.group_id, OLD.group_id::bigint, + json_build_object('element_id', OLD.group_id, 'user_id', OLD.user_id)); + else + call notify_user('group', NEW.user_id, NEW.user_id::bigint, + json_build_object('element_id', NEW.user_id, 'group_id', NEW.group_id)); + call notify_group('group_member', NEW.group_id, NEW.group_id::bigint, + json_build_object('element_id', NEW.group_id, 'user_id', NEW.user_id)); + end if; + + return NULL; +end; +$$ language plpgsql; + +create trigger group_membership_update_trig + after insert or update or delete + on group_membership + for each row +execute function group_membership_updated(); + +-- notifications for changes in group invites +create or replace function group_invite_updated() returns trigger as +$$ +begin + -- raise 'notifying group invite for element id % and group_id %', NEW.id, NEW.group_id; + if NEW is null then + call notify_group('group_invite', OLD.group_id, OLD.group_id::bigint, + json_build_object('element_id', OLD.group_id, 'invite_id', OLD.id)); + else + call notify_group('group_invite', NEW.group_id, NEW.group_id::bigint, + json_build_object('element_id', NEW.group_id, 'invite_id', NEW.id)); + end if; + return NULL; +end; +$$ language plpgsql; + +create trigger group_invite_update_trig + after insert or update or delete + on group_invite + for each row +execute function group_invite_updated(); + +-- notifications for changes in a users profile details +create or replace function user_updated() returns trigger as +$$ +begin + -- raise 'notifying group invite for element id % and group_id %', NEW.id, NEW.group_id; + if NEW is null then + call notify_user('user', OLD.id, OLD.id::bigint, + json_build_object('element_id', OLD.id)); + else + call notify_user('user', NEW.id, NEW.id::bigint, + json_build_object('element_id', NEW.id)); + end if; + return NULL; +end; +$$ language plpgsql; + +create trigger user_update_trig + after update + on usr + for each row +execute function user_updated(); + +-- notifications for changes in group logs entries +create or replace function group_log_updated() returns trigger as +$$ +begin + -- raise 'notifying group invite for element id % and group_id %', NEW.id, NEW.group_id; + if NEW is null then + call notify_group('group_log', OLD.group_id, OLD.group_id::bigint, + json_build_object('element_id', OLD.group_id, 'log-id', OLD.id)); + else + call notify_group('group_log', NEW.group_id, NEW.group_id::bigint, + json_build_object('element_id', NEW.group_id, 'log_id', NEW.id)); + end if; + return NULL; +end; +$$ language plpgsql; + +create trigger group_log_update_trig + after insert or update or delete + on group_log + for each row +execute function group_log_updated(); + +create or replace function transaction_history_updated() returns trigger as +$$ +<> declare + group_id grp.id%TYPE; + transaction_id integer; +begin + -- A deletion should not be possible therefore NEW should never be NULL + if NEW is null then + select + transaction.group_id, + transaction.id + into locals.group_id, locals.transaction_id + from + transaction + where + transaction.id = OLD.id; + else + select + transaction.group_id, + transaction.id + into locals.group_id, locals.transaction_id + from + transaction + where + transaction.id = NEW.id; + end if; + + call notify_group('transaction', locals.group_id, locals.group_id::bigint, + json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); + return NULL; +end; +$$ language plpgsql; + +create trigger transaction_history_update_trig + after insert or update or delete + on transaction_history + for each row +execute function transaction_history_updated(); + +create or replace function transaction_share_updated() returns trigger as +$$ +<> declare + group_id grp.id%TYPE; + transaction_id integer; +begin + -- A deletion should not be possible therefore NEW should never be NULL + if NEW is null then + select + transaction.group_id, + transaction.id + into locals.group_id, locals.transaction_id + from + transaction + join transaction_history th on transaction.id = th.id + where th.revision_id = OLD.revision_id and th.id = OLD.transaction_id; + else + select + transaction.group_id, + transaction.id + into locals.group_id, locals.transaction_id + from + transaction + join transaction_history th on transaction.id = th.id + where th.revision_id = NEW.revision_id and th.id = NEW.transaction_id; + end if; + + call notify_group('transaction', locals.group_id, locals.group_id::bigint, + json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); + return NULL; +end; +$$ language plpgsql; + +create trigger creditor_share_trig + after insert or update or delete + on creditor_share + for each row +execute function transaction_share_updated(); + +create trigger debitor_share_trig + after insert or update or delete + on debitor_share + for each row +execute function transaction_share_updated(); + +create or replace function purchase_item_updated() returns trigger as +$$ +<> declare + group_id grp.id%TYPE; + transaction_id integer; +begin + -- A deletion should not be possible therefore NEW should never be NULL + if NEW is null then return null; end if; + + select + transaction.group_id, + transaction.id + into locals.group_id, locals.transaction_id + from + transaction + join purchase_item pi on transaction.id = pi.transaction_id + where + pi.id = NEW.id; + + call notify_group('transaction', locals.group_id, locals.group_id::bigint, + json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); + return null; +end; +$$ language plpgsql; + +create trigger purchase_item_trig + after insert or update or delete + on purchase_item_history + for each row +execute function purchase_item_updated(); + +create or replace function purchase_item_usage_updated() returns trigger as +$$ +<> declare + group_id grp.id%TYPE; + transaction_id integer; +begin + if NEW is null then + select + transaction.group_id, + transaction.id + into locals.group_id, locals.transaction_id + from + transaction + join purchase_item pi on transaction.id = pi.transaction_id + where + pi.id = OLD.item_id; + else + select + transaction.group_id, + transaction.id + into locals.group_id, locals.transaction_id + from + transaction + join purchase_item pi on transaction.id = pi.transaction_id + where + pi.id = NEW.item_id; + end if; + + + call notify_group('transaction', locals.group_id, locals.group_id::bigint, + json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); + return null; +end; +$$ language plpgsql; + +create trigger purchase_item_usage_trig + after insert or update or delete + on purchase_item_usage + for each row +execute function purchase_item_usage_updated(); + +create or replace function update_last_changed() returns trigger as +$$ +begin + NEW.last_changed = now(); + return NEW; +end; +$$ language plpgsql; + +create or replace function update_related_transaction_last_changed() returns trigger as +$$ +begin + update transaction_revision set last_changed = now() where id = NEW.revision_id; + return null; +end; +$$ language plpgsql; + +create or replace function update_related_account_last_changed() returns trigger as +$$ +begin + update account_revision set last_changed = now() where id = NEW.revision_id; + return null; +end; +$$ language plpgsql; + +create trigger account_revision_last_change_update_trig + after insert or update + on account_revision + for each row +execute function update_last_changed(); + +create trigger transaction_revision_last_change_update_trig + after insert or update + on transaction_revision + for each row +execute function update_last_changed(); + +create trigger transaction_history_last_changed_update_trig + after insert or update + on transaction_history + for each row +execute function update_related_transaction_last_changed(); + +create trigger purchase_item_last_changed_update_trig + after insert or update + on purchase_item_history + for each row +execute function update_related_transaction_last_changed(); + +create trigger account_last_changed_update_trig + after insert or update + on account_history + for each row +execute function update_related_account_last_changed(); + +create or replace function transaction_revision_updated() returns trigger as +$$ +<> declare + group_id grp.id%TYPE; + transaction_id integer; +begin + select + t.group_id, + t.id + into locals.group_id, locals.transaction_id + from + transaction t + where t.id = (case when NEW is null then OLD.transaction_id else NEW.transaction_id end); + + -- A deletion should only be able to occur for uncommitted revisions + if NEW is null then + call notify_user('transaction', OLD.user_id, locals.group_id::bigint, + json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id, 'revision_started', OLD.started, 'revision_version', OLD.version, 'revision_committed', OLD.committed, 'deleted', true)); + elseif NEW.committed is null then + call notify_user('transaction', NEW.user_id, locals.group_id::bigint, + json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id, 'revision_started', NEW.started, 'revision_version', NEW.version, 'revision_committed', NEW.committed, 'deleted', false)); + else + call notify_group('transaction', locals.group_id, locals.group_id::bigint, + json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id, 'revision_started', NEW.started, 'revision_version', NEW.version, 'revision_committed', NEW.committed, 'deleted', false)); + end if; + + return null; +end; +$$ language plpgsql; + +create trigger transaction_revision_trig + after insert or update or delete + on transaction_revision + for each row +execute function transaction_revision_updated(); + +create or replace function account_history_updated() returns trigger as +$$ +<> declare + group_id grp.id%TYPE; + account_id integer; +begin + -- A deletion should not be possible therefore NEW should never be NULL + if NEW is null then + select account.group_id, account.id + into locals.group_id, locals.account_id + from account + where account.id = OLD.id; + else + select account.group_id, account.id + into locals.group_id, locals.account_id + from account + where account.id = NEW.id; + end if; + + call notify_group('account', locals.group_id, locals.group_id::bigint, + json_build_object('element_id', locals.group_id, 'account_id', locals.account_id)); + return NULL; +end; +$$ language plpgsql; + +create trigger account_history_update_trig + after insert or update or delete + on account_history + for each row +execute function account_history_updated(); + +create or replace function account_revision_updated() returns trigger as +$$ +<> declare + group_id grp.id%TYPE; + account_id integer; +begin + select + a.group_id, + a.id + into locals.group_id, locals.account_id + from + account a + where a.id = (case when NEW is null then OLD.account_id else NEW.account_id end); + + -- A deletion should only be able to occur for uncommitted revisions + if NEW is null then + call notify_user('account', OLD.user_id, locals.group_id::bigint, + json_build_object('element_id', locals.group_id, 'account_id', locals.account_id, 'revision_started', OLD.started, 'revision_version', OLD.version, 'revision_committed', OLD.committed, 'deleted', true)); + elseif NEW.committed is null then + call notify_user('account', NEW.user_id, locals.group_id::bigint, + json_build_object('element_id', locals.group_id, 'account_id', locals.account_id, 'revision_started', NEW.started, 'revision_version', NEW.version, 'revision_committed', NEW.committed, 'deleted', false)); + else + call notify_group('account', locals.group_id, locals.group_id::bigint, + json_build_object('element_id', locals.group_id, 'account_id', locals.account_id, 'revision_started', NEW.started, 'revision_version', NEW.version, 'revision_committed', NEW.committed, 'deleted', false)); + end if; + + return null; +end; +$$ language plpgsql; + +create trigger account_revision_trig + after insert or update or delete + on account_revision + for each row +execute function account_revision_updated(); + +create or replace function file_history_updated() returns trigger as +$$ +<> declare + group_id grp.id%TYPE; + transaction_id integer; +begin + select + transaction.group_id, + transaction.id + into locals.group_id, locals.transaction_id + from + transaction + join file f on transaction.id = f.transaction_id + where + f.id = (case when NEW is null then OLD.id else NEW.id end); + + call notify_group('transaction', locals.group_id, locals.group_id::bigint, + json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); + return null; +end; +$$ language plpgsql; + +create trigger file_history_trig + after insert or update or delete + on file_history + for each row +execute function file_history_updated(); diff --git a/abrechnung/database/code/0003_constraints.sql b/abrechnung/database/code/0003_constraints.sql new file mode 100644 index 00000000..bf49717d --- /dev/null +++ b/abrechnung/database/code/0003_constraints.sql @@ -0,0 +1,465 @@ +-- a share that a transaction's debitor has in the transaction value +-- see the transaction_type documentation on what this means for the particular +-- transaction types. +-- transactions can only be evaluated if the sum of their debitor shares is > 0. +create or replace function check_debitor_shares( + transaction_id integer, + revision_id bigint, + account_id integer +) returns boolean as +$$ +<> declare + is_valid boolean; +begin + with relevant_entries as ( + select * + from + debitor_share cs + where + cs.transaction_id = check_debitor_shares.transaction_id + and cs.revision_id = check_debitor_shares.revision_id + and cs.account_id != check_debitor_shares.account_id + ) + select + not (t.type in ('transfer') and cs_counts.share_count >= 1) + into locals.is_valid + from + transaction t + join ( + select + cs.transaction_id, + cs.revision_id, + count(*) as share_count + from + relevant_entries cs + group by cs.transaction_id, cs.revision_id + ) cs_counts on cs_counts.transaction_id = t.id; + + if not locals.is_valid then raise '"transfer" type transactions can only have one debitor share'; end if; + + return locals.is_valid; +end +$$ language plpgsql; + +alter table debitor_share add constraint check_debitor_share_gt_zero check ( shares > 0 ); +alter table debitor_share add constraint check_debitor_shares + check (check_debitor_shares(transaction_id, revision_id, account_id)); + +-- a share that a transaction's creditor has in the transaction value +-- see the transaction_type documentation on what this means for the particular +-- transaction types. +-- transactions can only be evaluated if the sum of their creditor shares is > 0. +create or replace function check_creditor_shares( + transaction_id integer, + revision_id bigint, + account_id integer +) returns boolean as +$$ +<> declare + is_valid boolean; +begin + with relevant_entries as ( + select * + from + creditor_share cs + where + cs.transaction_id = check_creditor_shares.transaction_id + and cs.revision_id = check_creditor_shares.revision_id + and cs.account_id != check_creditor_shares.account_id + ) + select + not (t.type in ('purchase', 'transfer') and cs_counts.share_count >= 1) + into locals.is_valid + from + transaction t + join ( + select + cs.transaction_id, + cs.revision_id, + count(*) as share_count + from + relevant_entries cs + group by cs.transaction_id, cs.revision_id + ) cs_counts on cs_counts.transaction_id = t.id; + + if not locals.is_valid then + raise '"purchase" and "transfer" type transactions can only have one creditor share'; + end if; + + return locals.is_valid; +end +$$ language plpgsql; + +alter table creditor_share add constraint check_creditor_share_gt_zero check ( shares > 0 ); +alter table creditor_share add constraint check_creditor_shares + check (check_creditor_shares(transaction_id, revision_id, account_id)); + +create or replace function check_committed_transactions( + revision_id bigint, + transaction_id integer, + started timestamptz, + committed timestamptz +) returns boolean as +$$ +<> declare + n_creditor_shares integer; + n_debitor_shares integer; + transaction_type text; + transaction_deleted boolean; +begin + if committed is null then return true; end if; + + perform + from + transaction_revision tr + where + tr.transaction_id = check_committed_transactions.transaction_id + and tr.id != check_committed_transactions.revision_id + and tr.committed between check_committed_transactions.started and check_committed_transactions.committed; + + if found then raise 'another change was committed earlier, committing is not possible due to conflicts'; end if; + + select + t.type, + th.deleted + into locals.transaction_type, locals.transaction_deleted + from + transaction_history th + join transaction t on t.id = th.id + where + th.revision_id = check_committed_transactions.revision_id; + + if locals.transaction_deleted then -- if the transaction is deleted we simply accept anything as we dont care + return true; + end if; + + select + count(cs.account_id) + into locals.n_creditor_shares + from + creditor_share cs + where + cs.transaction_id = check_committed_transactions.transaction_id + and cs.revision_id = check_committed_transactions.revision_id; + + select + count(ds.account_id) + into locals.n_debitor_shares + from + debitor_share ds + where + ds.transaction_id = check_committed_transactions.transaction_id + and ds.revision_id = check_committed_transactions.revision_id; + + -- check that the number of shares fits the transaction type + if locals.transaction_type = 'transfer' then + if locals.n_creditor_shares != 1 then + raise '"transfer" type transactions must have exactly one creditor share % %', locals.n_creditor_shares, locals.n_debitor_shares; + end if; + + if locals.n_debitor_shares != 1 then + raise '"transfer" type transactions must have exactly one debitor share'; + end if; + end if; + + if locals.transaction_type = 'purchase' then + if locals.n_creditor_shares != 1 then + raise '"purchase" type transactions must have exactly one creditor share'; + end if; + if locals.n_debitor_shares < 1 then + raise '"purchase" type transactions must have at least one debitor share'; + end if; + + -- check that all purchase items have at least an item share or communist shares > 0 + -- i.e. we look for a purchase item at the current revision that has sum(usages) + communist_shares <= 0 + -- if such a one is found we raise an exception + perform from purchase_item pi + join purchase_item_history pih on pi.id = pih.id + left join purchase_item_usage piu on pih.revision_id = piu.revision_id and pi.id = piu.item_id + where pih.revision_id = check_committed_transactions.revision_id + and pi.transaction_id = check_committed_transactions.transaction_id + and not pih.deleted + group by pi.id + having sum(coalesce(piu.share_amount, 0) + pih.communist_shares) <= 0; + + if found then + raise 'all transaction positions must have at least one account assigned or their common shares set greater than 0'; + end if; + end if; + + if locals.transaction_type = 'mimo' then + if locals.n_creditor_shares < 1 then + raise '"mimo" type transactions must have at least one creditor share'; + end if; + if locals.n_debitor_shares < 1 then + raise '"mimo" type transactions must have at least one debitor share'; + end if; + end if; + + return true; +end +$$ language plpgsql; + +create or replace function check_transaction_revisions_change_per_user( + transaction_id integer, + user_id integer, + committed timestamp with time zone +) returns boolean +as +$$ +<> declare + n_uncommitted int; +begin + if committed is not null then return true; end if; + + select count(*) into locals.n_uncommitted + from + transaction_revision tr + where + tr.transaction_id = check_transaction_revisions_change_per_user.transaction_id + and tr.user_id = check_transaction_revisions_change_per_user.user_id + and tr.committed is null; + + if locals.n_uncommitted > 1 then raise 'users can only have one pending change per transaction'; end if; + + return true; +end +$$ language plpgsql; + +alter table transaction_revision add constraint check_committed_transactions + check (check_committed_transactions(id, transaction_id, started, committed)); +alter table transaction_revision add constraint check_transaction_revisions_change_per_user + check (check_transaction_revisions_change_per_user(transaction_id, user_id, committed)); + +alter table purchase_item_history add constraint check_purchase_item_communist_shares_gte_zero check (communist_shares >= 0); +alter table purchase_item_usage add constraint check_purchase_item_usage_shares_gt_zero check (share_amount > 0); + +create or replace function check_committed_accounts( + revision_id bigint, account_id integer, started timestamp with time zone, committed timestamp with time zone +) returns boolean + language plpgsql as +$$ +<> declare + n_tags int; + n_clearing_shares int; + group_id int; + account_type text; + date_info date; +begin + if committed is null then return true; end if; + + perform + from + account_revision ar + where + ar.account_id = check_committed_accounts.account_id + and ar.id != check_committed_accounts.revision_id + and ar.committed between check_committed_accounts.started and check_committed_accounts.committed; + + if found then raise 'another change was committed earlier, committing is not possible due to conflicts'; end if; + + select + a.type, + a.group_id + into locals.account_type, locals.group_id + from + account a + where + a.id = check_committed_accounts.account_id; + + select + count(cas.share_account_id) + into locals.n_clearing_shares + from + clearing_account_share cas + where + cas.account_id = check_committed_accounts.account_id + and cas.revision_id = check_committed_accounts.revision_id; + + select + count(*) + into locals.n_tags + from + account_to_tag att + where + att.account_id = check_committed_accounts.account_id + and att.revision_id = check_committed_accounts.revision_id; + + select + ah.date_info + into locals.date_info + from + account_history ah + where + ah.id = check_committed_accounts.account_id + and ah.revision_id = check_committed_accounts.revision_id; + + if locals.account_type = 'personal' then + if locals.n_clearing_shares != 0 then + raise '"personal" type accounts cannot have associated settlement distribution shares'; + end if; + if locals.date_info is not null then + raise '"personal" type accounts cannot have a date set'; + end if; + if locals.n_tags != 0 then + raise '"personal" type accounts cannot have tags'; + end if; + elsif locals.account_type = 'clearing' then + if locals.date_info is null then + raise '"clearing" type accounts must have a date set'; + end if; + end if; + + return true; +end +$$; + +create or replace function check_account_revisions_change_per_user( + account_id integer, + user_id integer, + committed timestamp with time zone +) returns boolean +as +$$ +<> declare + n_uncommitted int; +begin + if committed is not null then return true; end if; + + select count(*) into locals.n_uncommitted + from + account_revision ar + where + ar.account_id = check_account_revisions_change_per_user.account_id + and ar.user_id = check_account_revisions_change_per_user.user_id + and ar.committed is null; + + if locals.n_uncommitted > 1 then raise 'users can only have one pending change per account'; end if; + + return true; +end +$$ language plpgsql; + +create or replace function check_committed_accounts( + revision_id bigint, + account_id integer, + started timestamptz, + committed timestamptz +) returns boolean as +$$ +<> declare + n_clearing_shares int; + group_id int; + account_type text; + account_deleted boolean; +begin + if committed is null then return true; end if; + + perform + from + account_revision ar + where + ar.account_id = check_committed_accounts.account_id + and ar.id != check_committed_accounts.revision_id + and ar.committed between check_committed_accounts.started and check_committed_accounts.committed; + + if found then raise 'another change was committed earlier, committing is not possible due to conflicts'; end if; + + select + a.type, + ah.deleted, + a.group_id + into locals.account_type, locals.account_deleted, locals.group_id + from + account a + left join account_history ah on a.id = ah.id and ah.revision_id = check_committed_accounts.revision_id + where a.id = check_committed_accounts.account_id; + + select + count(cas.share_account_id) + into locals.n_clearing_shares + from + clearing_account_share cas + where + cas.account_id = check_committed_accounts.account_id + and cas.revision_id = check_committed_accounts.revision_id; + + if locals.account_type = 'personal' then + if locals.n_clearing_shares != 0 then + raise '"personal" type accounts cannot have associated settlement distribution shares'; + end if; + end if; + + return true; +end +$$ language plpgsql; + +alter table account_revision add constraint check_committed_accounts + check (check_committed_accounts(id, account_id, started, committed)); +alter table account_revision add constraint check_account_revisions_change_per_user + check (check_account_revisions_change_per_user(account_id, user_id, committed)); + +create or replace function check_clearing_accounts_for_cyclic_dependencies( + revision_id bigint, + account_id integer, + committed timestamptz +) returns boolean as +$$ +<> declare + group_id int; + account_type text; + + n_clearing_shares int; + + cycle_path int[]; +begin + if committed is null then return true; end if; + + select + a.type, + a.group_id + into locals.account_type, locals.group_id + from + account a + where a.id = check_clearing_accounts_for_cyclic_dependencies.account_id; + + select + count(cas.share_account_id) + into locals.n_clearing_shares + from + clearing_account_share cas + where + cas.account_id = check_clearing_accounts_for_cyclic_dependencies.account_id + and cas.revision_id = check_clearing_accounts_for_cyclic_dependencies.revision_id; + + -- now for the juicy part - check if we have circular dependencies in clearing account relations + with recursive search_graph(account_id, share_account_id, depth, path, cycle) as ( + select shares.account_id, shares.share_account_id, 1, array[shares.account_id], false + from clearing_account_share shares + where shares.revision_id = check_clearing_accounts_for_cyclic_dependencies.revision_id + union all + select shares.account_id, shares.share_account_id, sg.depth + 1, sg.path || shares.account_id, shares.account_id = any(sg.path) + from clearing_account_share shares + join account a on shares.account_id = a.id + join search_graph sg on sg.share_account_id = shares.account_id and not sg.cycle + where a.group_id = locals.group_id -- slight optimization for runtime + ) + select path into locals.cycle_path from search_graph where cycle limit 1; + -- TODO: good error message and print out all resulting cycles + if found then + raise 'this change would result in a cyclic dependency between clearing accounts: %', locals.cycle_path; + end if; + + return true; +end +$$ language plpgsql; + +alter table account_revision add constraint account_revision_check_cyclic + check (check_clearing_accounts_for_cyclic_dependencies(id, account_id, committed)); + +alter table account_history add constraint name_not_empty check ( name <> '' ); +alter table transaction_history add constraint description_not_empty check ( description <> '' ); +alter table grp add constraint name_not_empty check ( name <> '' ); +alter table purchase_item_history add constraint name_not_empty check ( name <> '' ); + +alter table file_history add constraint filename_not_empty check (filename <> ''); diff --git a/abrechnung/database/code/0004_functions.sql b/abrechnung/database/code/0004_functions.sql new file mode 100644 index 00000000..32750243 --- /dev/null +++ b/abrechnung/database/code/0004_functions.sql @@ -0,0 +1,598 @@ +-- creates a row in the forwarder table (if not exists) +-- listens to channel +-- returns channel_id (use as f"channel{channel_id}" when listening!) +create or replace function forwarder_boot( + id text, out channel_id bigint +) as +$$ +<> declare + channel_number forwarder.channel_id%type; +begin + -- check if this forwarder is already connected + select forwarder.channel_id into locals.channel_number from forwarder where forwarder.id = forwarder_boot.id; + + -- either register the new forwarder + if locals.channel_number is null then + insert into forwarder ( + id + ) + values ( + forwarder_boot.id + ) + returning forwarder.channel_id into locals.channel_number; + else -- or get rid of potential old entries of a re-booted forwarder + -- (these are left over if a forwarder crashes) + delete + from + connection + where + connection.channel_id in ( + select forwarder.channel_id from forwarder where forwarder.id = forwarder_boot.id + ); + end if; + + forwarder_boot.channel_id := locals.channel_number; +end +$$ language plpgsql; + +-- to be called by a forwarder whenever a new client connects to it via websocket. +-- creates a row in the connection table, +-- so we know under what channel this client is reachable. +-- returns connection_id +create or replace function client_connected( + channel_id integer, out connection_id bigint +) as +$$ +begin + insert into connection ( + channel_id + ) + values ( + client_connected.channel_id + ) + returning id into client_connected.connection_id; +end +$$ language plpgsql; + +-- to be called by a forwarder whenever a websocket connection is closed +-- deletes the row in the connection table +-- raises bad-connection-id if the connection has not existed +create or replace procedure client_disconnected( + connection_id bigint +) as +$$ +begin + delete from connection where connection.id = client_disconnected.connection_id; + + if not found then raise exception 'bad-connection-id:no connection with the given connection id'; end if; + + -- ON DELETE CASCADE actions will take care of cleaning up the connection and subscriptions +end +$$ language plpgsql; + +-- to be called by a forwarder when it shuts down +-- deletes all associated channels +-- returns the number of channels that were terminated +create or replace function forwarder_stop( + id text, out deleted_connections integer +) returns integer as +$$ +begin + delete + from + connection + where + connection.channel_id in ( + select channel_id + from forwarder + where forwarder.id = forwarder_stop.id + ); + + get diagnostics forwarder_stop.deleted_connections = row_count; + + delete from forwarder where forwarder.id = forwarder_stop.id; + + -- ON DELETE CASCADE actions will take care of cleaning up the connections and subscriptions +end +$$ language plpgsql; + +-- sends a notification with 'data' to the given clients +create or replace procedure notify_connections( + connection_ids bigint[], event text, data json +) as +$$ +<> declare + forwarder_info record; +begin + for forwarder_info in select + concat('channel', connection.channel_id) as channel_name, + array_agg(connection.id) as connections + from + connection + where + connection.id = any (notify_connections.connection_ids) + group by connection.channel_id loop + perform pg_notify(forwarder_info.channel_name, + json_build_object('connections', forwarder_info.connections, 'event', event, 'data', + data)::text); + end loop; +end +$$ language plpgsql; + +-- sends a notification with 'data' to the calling client +-- allows clients to test the notification system +create or replace procedure notify_me( + connection_id bigint, event text, data json +) as +$$ +begin + call notify_connections(ARRAY [connection_id], event, data); +end +$$ language plpgsql; + +-- functions for event subscription management +-- called by the client so it receives change-events + +-- subscribe user of given token to notifications +create or replace procedure subscribe( + connection_id bigint, + user_id integer, + subscription_type text, + element_id bigint +) as +$$ +begin + if subscribe.element_id is null then raise exception 'invalid element_id value'; end if; + + -- type-specific permission/value checks + if subscribe.subscription_type = 'test' then + -- only allow element_id == user_id + if subscribe.element_id != subscribe.user_id then raise 'test requires correct element_id'; end if; + + elseif subscribe.subscription_type = 'user' then + -- but the element we watch has to be the user id + if subscribe.element_id != subscribe.user_id then raise 'element_id not logged in user user'; end if; + + elseif subscribe.subscription_type = 'group' then + if subscribe.element_id != subscribe.user_id then + raise 'bad-subscription:group: element_id not token user'; + end if; -- Rewrite this since practically every notification apart from a few is group based such that we normally perform + + -- the group auth instead of the normal auth + elseif subscribe.subscription_type in ('account', 'group_member', 'group_invite', 'group_log', 'transaction') then + perform + from group_membership gm + where gm.user_id = subscribe.user_id and gm.group_id = subscribe.element_id::integer; + if not found then + raise 'user % tried to subscribe to changes in a group without being a member', subscribe.user_id; + end if; + + else + raise exception 'unknown subscription type'; + end if; + + insert into subscription ( + connection_id, user_id, subscription_type, element_id + ) + values ( + subscribe.connection_id, + subscribe.user_id, + subscribe.subscription_type, + subscribe.element_id + ) + on conflict on constraint subscription_conn_type_elem do update set user_id = subscribe.user_id; +end +$$ language plpgsql; + +-- unsubscribe user of given token to notifications +-- if the user could create the subscription on the connection_id, +-- he can also remove it. +create or replace procedure unsubscribe( + connection_id bigint, + user_id integer, + subscription_type text, + element_id bigint +) as +$$ +begin + delete + from + subscription + where + subscription.connection_id = unsubscribe.connection_id + and subscription.subscription_type = unsubscribe.subscription_type + and subscription.element_id = unsubscribe.element_id + and subscription.user_id = unsubscribe.user_id; +end +$$ language plpgsql; + +-- deliver a notification of given type +-- to all subscribers +create or replace procedure notify_user( + subscription_type text, + user_id subscription.user_id%TYPE, + element_id subscription.element_id%TYPE, + data json +) as +$$ +<> declare + connections bigint[]; +begin + -- this query directly uses the subscription_deliver_idx + select + array_agg(connection_id) + into locals.connections + from + subscription + where + subscription.subscription_type = notify_user.subscription_type + and notify_user.user_id = subscription.user_id + and notify_user.element_id = subscription.element_id; + + call notify_connections(locals.connections, notify_user.subscription_type::text, notify_user.data); +end; +$$ language plpgsql; + +create or replace procedure notify_users( + subscription_type text, + user_ids int[], + element_id subscription.element_id%TYPE, + data json +) as +$$ +<> declare + connections bigint[]; +begin + -- this query directly uses the subscription_deliver_idx + select + array_agg(connection_id) + into locals.connections + from + subscription + where + subscription.subscription_type = notify_users.subscription_type + and subscription.user_id = any (notify_users.user_ids) + and notify_users.element_id = subscription.element_id; + + call notify_connections(locals.connections, notify_users.subscription_type::text, notify_users.data); +end; +$$ language plpgsql; + +-- deliver a notification of given type to all users of a group +create or replace procedure notify_group( + subscription_type text, + group_id grp.id%TYPE, + element_id subscription.element_id%TYPE, + data json +) as +$$ +<> declare + user_ids int[]; +begin + select + array_agg(gm.user_id) + into locals.user_ids + from + group_membership gm + where + gm.group_id = notify_group.group_id + group by gm.group_id; + + if locals.user_ids is null then return; end if; + + call notify_users(notify_group.subscription_type, locals.user_ids, notify_group.element_id, notify_group.data); +end; +$$ language plpgsql; + +create or replace function full_account_state_valid_at( + seen_by_user integer, valid_at timestamp with time zone DEFAULT now() +) + returns TABLE ( + account_id integer, + type text, + group_id integer, + last_changed timestamptz, + is_wip boolean, + committed_details jsonb, + pending_details jsonb + ) + stable + language sql +as +$$ +select + a.id as account_id, + a.type, + a.group_id, + greatest(committed_details.last_changed, pending_details.last_changed) as last_changed, + exists(select + 1 + from + account_revision ar + where + ar.account_id = a.id + and ar.user_id = full_account_state_valid_at.seen_by_user + and ar.committed is null) as is_wip, + committed_details.json_state as committed_details, + pending_details.json_state as pending_details +from + account a + left join ( + select + casa.account_id, + jsonb_agg(casa) as json_state, + max(casa.last_changed) as last_changed + from + committed_account_state_valid_at(full_account_state_valid_at.valid_at) casa + group by casa.account_id + ) committed_details on a.id = committed_details.account_id + left join ( + select + apah.account_id, + jsonb_agg(apah) as json_state, + max(apah.last_changed) as last_changed + from + aggregated_pending_account_history apah + where + apah.changed_by = full_account_state_valid_at.seen_by_user + group by apah.account_id + ) pending_details on a.id = pending_details.account_id +where + committed_details.json_state is not null + or pending_details.json_state is not null +$$; + +create or replace function committed_file_state_valid_at( + valid_at timestamp with time zone DEFAULT now() +) + returns TABLE ( + file_id integer, + revision_id bigint, + transaction_id integer, + changed_by integer, + revision_started timestamptz, + revision_committed timestamptz, + last_changed timestamptz, + filename text, + mime_type text, + blob_id integer, + deleted boolean + ) + stable + language sql +as +$$ +select distinct on (file_id) + file_id, + revision_id, + transaction_id, + user_id as changed_by, + revision_started, + revision_committed, + last_changed, + filename, + mime_type, + blob_id, + deleted +from + aggregated_committed_file_history +where + revision_committed <= committed_file_state_valid_at.valid_at + and filename is not null +order by + file_id, revision_committed desc +$$; + +create or replace function committed_transaction_position_state_valid_at( + valid_at timestamp with time zone DEFAULT now() +) + returns TABLE ( + item_id integer, + revision_id bigint, + transaction_id integer, + changed_by integer, + revision_started timestamptz, + revision_committed timestamptz, + last_changed timestamptz, + name text, + price double precision, + communist_shares double precision, + deleted boolean, + n_usages integer, + usages jsonb, + involved_accounts integer[] + ) + stable + language sql +as +$$ +select distinct on (acph.item_id) + acph.item_id, + acph.revision_id, + acph.transaction_id, + acph.user_id as changed_by, + acph.revision_started, + acph.revision_committed, + acph.last_changed, + acph.name, + acph.price, + acph.communist_shares, + acph.deleted, + acph.n_usages, + acph.usages, + acph.involved_accounts +from + aggregated_committed_transaction_position_history acph +where + acph.revision_committed <= committed_transaction_position_state_valid_at.valid_at + and acph.name is not null +order by + acph.item_id, acph.revision_committed desc +$$; + +create or replace function committed_transaction_state_valid_at( + valid_at timestamp with time zone DEFAULT now() +) + returns TABLE ( + revision_id bigint, + transaction_id integer, + changed_by integer, + revision_started timestamptz, + revision_committed timestamptz, + last_changed timestamptz, + group_id integer, + type text, + value double precision, + currency_symbol text, + currency_conversion_rate double precision, + name text, + description text, + billed_at date, + deleted boolean, + n_creditor_shares integer, + creditor_shares jsonb, + n_debitor_shares integer, + debitor_shares jsonb, + involved_accounts integer[], + tags varchar(255)[] + ) + stable + language sql +as +$$ +select distinct on (acth.transaction_id) + acth.revision_id, + acth.transaction_id, + acth.user_id as changed_by, + acth.revision_started, + acth.revision_committed, + acth.last_changed, + acth.group_id, + acth.type, + acth.value, + acth.currency_symbol, + acth.currency_conversion_rate, + acth.name, + acth.description, + acth.billed_at, + acth.deleted, + acth.n_creditor_shares, + acth.creditor_shares, + acth.n_debitor_shares, + acth.debitor_shares, + acth.involved_accounts, + acth.tags +from + aggregated_committed_transaction_history acth +where + acth.revision_committed <= committed_transaction_state_valid_at.valid_at +order by + acth.transaction_id, acth.revision_committed desc +$$; + +create or replace function full_transaction_state_valid_at( + seen_by_user integer, valid_at timestamp with time zone DEFAULT now() +) + returns TABLE ( + transaction_id integer, + type text, + group_id integer, + last_changed timestamp with time zone, + is_wip boolean, + committed_details jsonb, + pending_details jsonb, + committed_positions jsonb, + pending_positions jsonb, + committed_files jsonb, + pending_files jsonb + ) + stable + language sql +as +$$ +select + t.id as transaction_id, + t.type, + t.group_id, + greatest(committed_details.last_changed, committed_positions.last_changed, committed_files.last_changed, + pending_details.last_changed, pending_positions.last_changed, pending_files.last_changed) as last_changed, + exists(select + 1 + from + transaction_revision tr + where + tr.transaction_id = t.id + and tr.user_id = full_transaction_state_valid_at.seen_by_user + and tr.committed is null) as is_wip, + committed_details.json_state as committed_details, + pending_details.json_state as pending_details, + committed_positions.json_state as committed_positions, + pending_positions.json_state as pending_positions, + committed_files.json_state as committed_files, + pending_files.json_state as pending_files +from + transaction t + left join ( + select + ctsa.transaction_id, + jsonb_agg(ctsa) as json_state, + max(ctsa.last_changed) as last_changed + from + committed_transaction_state_valid_at(full_transaction_state_valid_at.valid_at) ctsa + group by ctsa.transaction_id + ) committed_details on t.id = committed_details.transaction_id + left join ( + select + apth.transaction_id, + jsonb_agg(apth) as json_state, + max(apth.last_changed) as last_changed + from + aggregated_pending_transaction_history apth + where + apth.changed_by = full_transaction_state_valid_at.seen_by_user + group by apth.transaction_id + ) pending_details on t.id = pending_details.transaction_id + left join ( + select + ctpsa.transaction_id, + jsonb_agg(ctpsa) as json_state, + max(ctpsa.last_changed) as last_changed + from + committed_transaction_position_state_valid_at(full_transaction_state_valid_at.valid_at) ctpsa + group by ctpsa.transaction_id + ) committed_positions on t.id = committed_positions.transaction_id + left join ( + select + aptph.transaction_id, + jsonb_agg(aptph) as json_state, + max(aptph.last_changed) as last_changed + from + aggregated_pending_transaction_position_history aptph + where + aptph.changed_by = full_transaction_state_valid_at.seen_by_user + group by aptph.transaction_id + ) pending_positions on t.id = pending_positions.transaction_id + left join ( + select + cfsva.transaction_id, + jsonb_agg(cfsva) as json_state, + max(cfsva.last_changed) as last_changed + from + committed_file_state_valid_at(full_transaction_state_valid_at.valid_at) cfsva + group by cfsva.transaction_id + ) committed_files on t.id = committed_files.transaction_id + left join ( + select + apfh.transaction_id, + jsonb_agg(apfh) as json_state, + max(apfh.last_changed) as last_changed + from + aggregated_pending_file_history apfh + where + apfh.changed_by = full_transaction_state_valid_at.seen_by_user + group by apfh.transaction_id + ) pending_files on t.id = pending_files.transaction_id +where + committed_details.json_state is not null + or pending_details.json_state is not null +$$; diff --git a/abrechnung/database/database.py b/abrechnung/database/database.py deleted file mode 100644 index 6d6a8c30..00000000 --- a/abrechnung/database/database.py +++ /dev/null @@ -1,57 +0,0 @@ -import logging -import os -from typing import Union - -import asyncpg -from asyncpg import Connection -from asyncpg.pool import Pool - -from abrechnung.config import Config - -logger = logging.getLogger(__name__) - - -def _make_connection_args(config: Config) -> dict: - args: dict[str, Union[int, str, None]] = dict() - args["user"] = config.database.user - args["password"] = config.database.password - args["host"] = config.database.host - args["port"] = config.database.port - args["database"] = config.database.dbname - - # since marshmallow can't model a "one arg implies all"-relation, we need to warn here - if not config.database.host or os.path.isdir(config.database.host): - if config.database.user or config.database.password: - logger.warning( - "Username and/or password specified but no remote host therefore using socket " - "authentication. I am ignoring these settings since we don't need them." - ) - del args["user"] - del args["password"] - if os.getenv("PGHOST") or os.getenv("PGPORT"): - # asyncpg can read the PGHOST env variable. We don't want that. - logger.warning( - "We do not support setting the PGHOST or PGPORT environment variable and therefore will " - "ignore it. Consider specifying the hostname in the config file." - ) - - if os.environ.get("PGHOST"): - del os.environ["PGHOST"] - if os.environ.get("PGPORT"): - del os.environ["PGPORT"] - return args - - -async def create_db_pool(config: Config) -> Pool: - # username: str, password: str, database: str, host: str, port: int = 5432 - """ - get a connection pool to the database - """ - pool_args = _make_connection_args(config) - pool_args["max_size"] = 100 - return await asyncpg.create_pool(**pool_args) - - -async def create_db_connection(config: Config) -> Connection: - connection_args = _make_connection_args(config) - return await asyncpg.connect(**connection_args) diff --git a/abrechnung/database/migrations.py b/abrechnung/database/migrations.py new file mode 100644 index 00000000..c40b0650 --- /dev/null +++ b/abrechnung/database/migrations.py @@ -0,0 +1,47 @@ +from pathlib import Path + +import asyncpg + +from abrechnung.framework.database import ( + SchemaRevision, + apply_revisions as framework_apply_revisions, +) + +REVISION_TABLE = "schema_revision" +REVISION_PATH = Path(__file__).parent / "revisions" +DB_CODE_PATH = Path(__file__).parent / "code" +CURRENT_REVISION = "" + + +def list_revisions(): + revisions = SchemaRevision.revisions_from_dir(REVISION_PATH) + for revision in revisions: + print( + f"Revision: {revision.version}, requires revision: {revision.requires}, filename: {revision.file_name}" + ) + + +async def check_revision_version(db_pool: asyncpg.Pool): + curr_revision = await db_pool.fetchval( + f"select version from {REVISION_TABLE} limit 1" + ) + if curr_revision != CURRENT_REVISION: + raise RuntimeError( + f"Invalid database revision, expected {CURRENT_REVISION}, database is at revision {curr_revision}" + ) + + +async def reset_schema(db_pool: asyncpg.Pool): + async with db_pool.acquire() as conn: + async with conn.transaction(): + await conn.execute("drop schema public cascade") + await conn.execute("create schema public") + + +async def apply_revisions(db_pool: asyncpg.Pool, until_revision: str | None = None): + await framework_apply_revisions( + db_pool=db_pool, + revision_path=REVISION_PATH, + code_path=DB_CODE_PATH, + until_revision=until_revision, + ) diff --git a/abrechnung/database/revisions/0001_initial_schema.sql b/abrechnung/database/revisions/0001_initial_schema.sql index 43b02550..9a74c81b 100644 --- a/abrechnung/database/revisions/0001_initial_schema.sql +++ b/abrechnung/database/revisions/0001_initial_schema.sql @@ -71,55 +71,6 @@ create table if not exists pending_email_change ( mail_next_attempt timestamptz default now() ); --- notify the mailer service on inserts or updates in the above tables -create or replace function pending_registration_updated() returns trigger as -$$ -begin - perform pg_notify('mailer', 'pending_registration'); - - return null; -end; -$$ language plpgsql; - -drop trigger if exists pending_registration_trig on pending_registration; -create trigger pending_registration_trig - after insert or update - on pending_registration - for each row -execute function pending_registration_updated(); - -create or replace function pending_password_recovery_updated() returns trigger as -$$ -begin - perform pg_notify('mailer', 'pending_password_recovery'); - - return null; -end; -$$ language plpgsql; - -drop trigger if exists pending_password_recovery_trig on pending_password_recovery; -create trigger pending_password_recovery_trig - after insert or update - on pending_password_recovery - for each row -execute function pending_password_recovery_updated(); - -create or replace function pending_email_change_updated() returns trigger as -$$ -begin - perform pg_notify('mailer', 'pending_email_change'); - - return null; -end; -$$ language plpgsql; - -drop trigger if exists pending_email_change_trig on pending_email_change; -create trigger pending_email_change_trig - after insert or update - on pending_email_change - for each row -execute function pending_email_change_updated(); - -- tracking of login sessions -- authtokens authenticate users directly -- sessions can persist indefinitely and are typically bound to a certain client/device @@ -295,54 +246,6 @@ create table if not exists account ( type text not null references account_type (name) ); -create or replace function check_committed_accounts( - revision_id bigint, - account_id integer, - started timestamptz, - committed timestamptz -) returns boolean as -$$ -begin - if committed is null then return true; end if; - - perform - from - account_revision ar - where - ar.account_id = check_committed_accounts.account_id - and ar.id != check_committed_accounts.revision_id - and ar.committed between check_committed_accounts.started and check_committed_accounts.committed; - - if found then raise 'another change was committed earlier, committing is not possible due to conflicts'; end if; - - return true; -end -$$ language plpgsql; - -create or replace function check_account_revisions_change_per_user( - account_id integer, - user_id integer, - committed timestamptz -) returns boolean as -$$ -<> declare -begin - if committed is not null then return true; end if; - - perform - from - account_revision ar - where - ar.account_id = check_account_revisions_change_per_user.account_id - and ar.user_id = check_account_revisions_change_per_user.user_id - and ar.committed is null; - - if found then raise 'users can only have one pending change per account'; end if; - - return true; -end -$$ language plpgsql; - create table if not exists account_revision ( id bigserial primary key, @@ -352,10 +255,7 @@ create table if not exists account_revision ( account_id integer references account (id) on delete cascade, started timestamptz not null default now(), - committed timestamptz default null, - - check (check_committed_accounts(id, account_id, started, committed)), - check (check_account_revisions_change_per_user(account_id, user_id, committed)) + committed timestamptz default null ); create table if not exists account_history ( @@ -372,26 +272,6 @@ create table if not exists account_history ( deleted bool not null default false ); -create or replace view latest_account as - select distinct on (account.id, gm.user_id) - account.id as id, - account.type as type, - account.group_id as group_id, - first_value(history.revision_id) over wnd as revision_id, - first_value(history.deleted) over wnd as deleted, - first_value(history.name) over wnd as name, - first_value(history.description) over wnd as description, - first_value(history.priority) over wnd as priority, - gm.user_id as user_id - from - account_history history - join account on account.id = history.id - join account_revision r on r.id = history.revision_id - join group_membership gm on account.group_id = gm.group_id - where - ((r.committed is null and r.user_id = gm.user_id) or - r.committed is not null) window wnd as ( partition by account.id, gm.user_id order by r.committed desc nulls first ); - -- a regular 'purchase' transaction, where multiple people purchased -- things, and one person paid the balance. -- must have exactly one creditor share (the person who paid) @@ -446,132 +326,6 @@ create table if not exists transaction ( type text references transaction_type (name) not null ); --- every data and history entry in a group references a change as a foreign key. --- entries that have just been added, but not yet committed, reference a change --- where the committed timestamp is null; --- these uncommitted changes are only visible if a user explicitly requests --- to see them. --- uncommitted changes are created when users start to change a group, --- and receive a 'committed' timestamp when the user clicks 'commit'. --- changes that have been committed can no longer be modified. -create or replace function check_committed_transactions( - revision_id bigint, - transaction_id integer, - started timestamptz, - committed timestamptz -) returns boolean as -$$ -<> declare - n_creditor_shares integer; - n_debitor_shares integer; - transaction_type text; - transaction_deleted boolean; -begin - if committed is null then return true; end if; - - perform - from - transaction_revision tr - where - tr.transaction_id = check_committed_transactions.transaction_id - and tr.id != check_committed_transactions.revision_id - and tr.committed between check_committed_transactions.started and check_committed_transactions.committed; - - if found then raise 'another change was committed earlier, committing is not possible due to conflicts'; end if; - - select - t.type, - th.deleted - into locals.transaction_type, locals.transaction_deleted - from - transaction_history th - join transaction t on t.id = th.id - where - th.revision_id = check_committed_transactions.revision_id; - - select - count(cs.account_id) - into locals.n_creditor_shares - from - creditor_share cs - where - cs.transaction_id = check_committed_transactions.transaction_id - and cs.revision_id = check_committed_transactions.revision_id; - - select - count(ds.account_id) - into locals.n_debitor_shares - from - debitor_share ds - where - ds.transaction_id = check_committed_transactions.transaction_id - and ds.revision_id = check_committed_transactions.revision_id; - - -- check that the number of shares fits the transaction type and that deleted transactions have 0 shares. - if locals.transaction_deleted then - if locals.n_creditor_shares = 0 and locals.n_debitor_shares = 0 then - return true; - else - raise 'deleted transaction cannot have any associated creditor or debitor shares'; - end if; - end if; - - if locals.transaction_type = 'transfer' then - if locals.n_creditor_shares != 1 then - raise '"transfer" type transactions must have exactly one creditor share % %', locals.n_creditor_shares, locals.n_debitor_shares; - end if; - - if locals.n_debitor_shares != 1 then - raise '"transfer" type transactions must have exactly one debitor share'; - end if; - end if; - - if locals.transaction_type = 'purchase' then - if locals.n_creditor_shares != 1 then - raise '"purchase" type transactions must have exactly one creditor share'; - end if; - if locals.n_debitor_shares < 1 then - raise '"purchase" type transactions must have at least one debitor share'; - end if; - end if; - - if locals.transaction_type = 'mimo' then - if locals.n_creditor_shares < 1 then - raise '"mimo" type transactions must have at least one creditor share'; - end if; - if locals.n_debitor_shares < 1 then - raise '"mimo" type transactions must have at least one debitor share'; - end if; - end if; - - return true; -end -$$ language plpgsql; - -create or replace function check_transaction_revisions_change_per_user( - transaction_id integer, - user_id integer, - committed timestamptz -) returns boolean as -$$ -<> declare -begin - if committed is not null then return true; end if; - - perform - from - transaction_revision tr - where - tr.transaction_id = check_transaction_revisions_change_per_user.transaction_id - and tr.user_id = check_transaction_revisions_change_per_user.user_id - and tr.committed is null; - - if found then raise 'users can only have one pending change per transaction'; end if; - - return true; -end -$$ language plpgsql; - create table if not exists transaction_revision ( id bigserial primary key, @@ -580,10 +334,7 @@ create table if not exists transaction_revision ( transaction_id integer not null references transaction (id) on delete cascade, started timestamptz not null default now(), - committed timestamptz default null, - - check (check_committed_transactions(id, transaction_id, started, committed)), - check (check_transaction_revisions_change_per_user(transaction_id, user_id, committed)) + committed timestamptz default null ); create table if not exists transaction_history ( @@ -596,7 +347,7 @@ create table if not exists transaction_history ( -- calculating group account balances. currency_conversion_rate double precision not null, -- total value of the transaction, in the transaction currency - value double precision not null check ( value > 0 ), + value double precision not null, billed_at date not null, @@ -606,50 +357,6 @@ create table if not exists transaction_history ( deleted bool not null default false ); --- a share that a transaction's creditor has in the transaction value --- see the transaction_type documentation on what this means for the particular --- transaction types. --- transactions can only be evaluated if the sum of their creditor shares is > 0. -create or replace function check_creditor_shares( - transaction_id integer, - revision_id bigint, - account_id integer -) returns boolean as -$$ -<> declare - is_valid boolean; -begin - with relevant_entries as ( - select * - from - creditor_share cs - where - cs.transaction_id = check_creditor_shares.transaction_id - and cs.revision_id = check_creditor_shares.revision_id - and cs.account_id != check_creditor_shares.account_id - ) - select - not (t.type in ('purchase', 'transfer') and cs_counts.share_count >= 1) - into locals.is_valid - from - transaction t - join ( - select - cs.transaction_id, - cs.revision_id, - count(*) as share_count - from - relevant_entries cs - group by cs.transaction_id, cs.revision_id - ) cs_counts on cs_counts.transaction_id = t.id; - - if not locals.is_valid then - raise '"purchase" and "transfer" type transactions can only have one creditor share'; - end if; - - return locals.is_valid; -end -$$ language plpgsql; create table if not exists creditor_share ( transaction_id integer references transaction (id) on delete cascade, @@ -660,53 +367,9 @@ create table if not exists creditor_share ( primary key (transaction_id, revision_id, account_id), - shares double precision not null default 1.0 check ( shares > 0 ), - - constraint creditor_share_account_count check (check_creditor_shares(transaction_id, revision_id, account_id)) + shares double precision not null default 1.0 ); --- a share that a transaction's debitor has in the transaction value --- see the transaction_type documentation on what this means for the particular --- transaction types. --- transactions can only be evaluated if the sum of their debitor shares is > 0. -create or replace function check_debitor_shares( - transaction_id integer, - revision_id bigint, - account_id integer -) returns boolean as -$$ -<> declare - is_valid boolean; -begin - with relevant_entries as ( - select * - from - debitor_share cs - where - cs.transaction_id = check_debitor_shares.transaction_id - and cs.revision_id = check_debitor_shares.revision_id - and cs.account_id != check_debitor_shares.account_id - ) - select - not (t.type in ('transfer') and cs_counts.share_count >= 1) - into locals.is_valid - from - transaction t - join ( - select - cs.transaction_id, - cs.revision_id, - count(*) as share_count - from - relevant_entries cs - group by cs.transaction_id, cs.revision_id - ) cs_counts on cs_counts.transaction_id = t.id; - - if not locals.is_valid then raise '"transfer" type transactions can only have one debitor share'; end if; - - return locals.is_valid; -end -$$ language plpgsql; create table if not exists debitor_share ( transaction_id integer references transaction (id) on delete cascade, @@ -717,171 +380,5 @@ create table if not exists debitor_share ( primary key (transaction_id, revision_id, account_id), - shares double precision not null default 1.0 check ( shares > 0 ), - - check (check_debitor_shares(transaction_id, revision_id, account_id)) + shares double precision not null default 1.0 ); - -create or replace view creditor_shares_as_json as - select - cs.revision_id as revision_id, - cs.transaction_id as transaction_id, - sum(cs.shares) as n_shares, - json_agg(cs) as shares - from - creditor_share cs - group by - cs.revision_id, cs.transaction_id; - - -create or replace view debitor_shares_as_json as - select - ds.revision_id as revision_id, - ds.transaction_id as transaction_id, - sum(ds.shares) as n_shares, - json_agg(ds) as shares - from - debitor_share ds - group by - ds.revision_id, ds.transaction_id; - -create or replace view pending_transaction_history as - select distinct on (transaction.id, gm.user_id) - transaction.id as id, - transaction.type as type, - transaction.group_id as group_id, - history.revision_id as revision_id, - r.started as revision_started, - r.committed as revision_committed, - history.deleted as deleted, - history.description as description, - history.value as value, - history.billed_at as billed_at, - r.user_id as last_changed_by, - history.currency_symbol as currency_symbol, - history.currency_conversion_rate as currency_conversion_rate, - gm.user_id as user_id - from - transaction_history history - join transaction on transaction.id = history.id - join transaction_revision r on r.id = history.revision_id - join group_membership gm on transaction.group_id = gm.group_id and gm.user_id = r.user_id - where - r.committed is null; - -create or replace view pending_transaction_revisions as - select - history.id as id, - history.type as type, - history.group_id as group_id, - history.revision_id as revision_id, - history.revision_started as revision_started, - history.revision_committed as revision_committed, - history.deleted as deleted, - history.description as description, - history.value as value, - history.billed_at as billed_at, - history.last_changed_by as last_changed_by, - history.currency_symbol as currency_symbol, - history.currency_conversion_rate as currency_conversion_rate, - cs.n_shares as n_creditor_shares, - ds.n_shares as n_debitor_shares, - coalesce(cs.shares, '[]'::json) as creditor_shares, - coalesce(ds.shares, '[]'::json) as debitor_shares, - history.user_id as user_id - from - pending_transaction_history history - left join creditor_shares_as_json cs on cs.revision_id = history.revision_id and cs.transaction_id = history.id - left join debitor_shares_as_json ds on ds.revision_id = history.revision_id and ds.transaction_id = history.id; - -create or replace view committed_transaction_history as - select distinct on (transaction.id) - transaction.id as id, - transaction.type as type, - transaction.group_id as group_id, - first_value(history.revision_id) over wnd as revision_id, - first_value(r.started) over wnd as revision_started, - first_value(r.committed) over wnd as revision_committed, - first_value(history.deleted) over wnd as deleted, - first_value(history.description) over wnd as description, - first_value(history.billed_at) over wnd as billed_at, - first_value(history.value) over wnd as value, - first_value(r.user_id) over wnd as last_changed_by, - first_value(history.currency_symbol) over wnd as currency_symbol, - first_value(history.currency_conversion_rate) over wnd as currency_conversion_rate - from - transaction_history history - join transaction on transaction.id = history.id - join transaction_revision r on r.id = history.revision_id - where - r.committed is not null window wnd as ( partition by transaction.id order by r.committed desc ); - -create or replace view committed_transaction_state as - select - history.id as id, - history.type as type, - history.group_id as group_id, - history.revision_id as revision_id, - history.revision_started as revision_started, - history.revision_committed as revision_committed, - history.deleted as deleted, - history.description as description, - history.value as value, - history.billed_at as billed_at, - history.last_changed_by as last_changed_by, - history.currency_symbol as currency_symbol, - history.currency_conversion_rate as currency_conversion_rate, - cs.n_shares as n_creditor_shares, - ds.n_shares as n_debitor_shares, - coalesce(cs.shares, '[]'::json) as creditor_shares, - coalesce(ds.shares, '[]'::json) as debitor_shares - from - committed_transaction_history history - left join creditor_shares_as_json cs on cs.revision_id = history.revision_id and cs.transaction_id = history.id - left join debitor_shares_as_json ds on ds.revision_id = history.revision_id and ds.transaction_id = history.id; - -create or replace view current_transaction_state as - select - transaction.id as id, - transaction.type as type, - transaction.group_id as group_id, - curr_state_json.state as current_state, - pending_json.state as pending_changes - from - transaction - left join ( - select id, json_agg(curr_state) as state from committed_transaction_state curr_state group by id - ) curr_state_json on curr_state_json.id = transaction.id - left join ( - select id, json_agg(pending) as state from pending_transaction_revisions pending group by id - ) pending_json on pending_json.id = transaction.id; - -create or replace view account_balance as - select - a.id as account_id, - a.group_id as group_id, - coalesce(cb.creditor_balance, 0) + coalesce(db.debitor_balance, 0) as balance - from - account a - left join ( - select - cs.account_id as account_id, - sum(cs.shares / coalesce(t.n_creditor_shares, 1) * t.value) as creditor_balance - from - committed_transaction_state t - join creditor_share cs on t.revision_id = cs.revision_id and t.id = cs.transaction_id - where - t.deleted = false - group by cs.account_id - ) cb on a.id = cb.account_id - left join ( - select - ds.account_id as account_id, - -sum(ds.shares / coalesce(t.n_debitor_shares, 1) * t.value) as debitor_balance - from - committed_transaction_state t - join debitor_share ds on t.revision_id = ds.revision_id and t.id = ds.transaction_id - where - t.deleted = false - group by ds.account_id - ) db on a.id = db.account_id; diff --git a/abrechnung/database/revisions/0002_subscriptions.sql b/abrechnung/database/revisions/0002_subscriptions.sql index 0c605e74..bcf29bbf 100644 --- a/abrechnung/database/revisions/0002_subscriptions.sql +++ b/abrechnung/database/revisions/0002_subscriptions.sql @@ -1,111 +1,6 @@ -- revision: 83a50a30 -- requires: 62df6b55 --- functions for managing websocket connections --- these should be used by the websocket forwarder - --- creates a row in the forwarder table (if not exists) --- listens to channel --- returns channel_id (use as f"channel{channel_id}" when listening!) -create or replace function forwarder_boot( - id text, out channel_id bigint -) as -$$ -<> declare - channel_number forwarder.channel_id%type; -begin - -- check if this forwarder is already connected - select forwarder.channel_id into locals.channel_number from forwarder where forwarder.id = forwarder_boot.id; - - -- either register the new forwarder - if locals.channel_number is null then - insert into forwarder ( - id - ) - values ( - forwarder_boot.id - ) - returning forwarder.channel_id into locals.channel_number; - else -- or get rid of potential old entries of a re-booted forwarder - -- (these are left over if a forwarder crashes) - delete - from - connection - where - connection.channel_id in ( - select forwarder.channel_id from forwarder where forwarder.id = forwarder_boot.id - ); - end if; - - forwarder_boot.channel_id := locals.channel_number; -end -$$ language plpgsql; - --- to be called by a forwarder whenever a new client connects to it via websocket. --- creates a row in the connection table, --- so we know under what channel this client is reachable. --- returns connection_id -create or replace function client_connected( - channel_id integer, out connection_id bigint -) as -$$ -begin - insert into connection ( - channel_id - ) - values ( - client_connected.channel_id - ) - returning id into client_connected.connection_id; -end -$$ language plpgsql; - --- to be called by a forwarder whenever a websocket connection is closed --- deletes the row in the connection table --- raises bad-connection-id if the connection has not existed -create or replace procedure client_disconnected( - connection_id bigint -) as -$$ -begin - delete from connection where connection.id = client_disconnected.connection_id; - - if not found then raise exception 'bad-connection-id:no connection with the given connection id'; end if; - - -- ON DELETE CASCADE actions will take care of cleaning up the connection and subscriptions -end -$$ language plpgsql; - --- to be called by a forwarder when it shuts down --- deletes all associated channels --- returns the number of channels that were terminated -create or replace function forwarder_stop( - id text, out deleted_connections integer -) returns integer as -$$ -begin - delete - from - connection - where - connection.channel_id in ( - select channel_id - from forwarder - where forwarder.id = forwarder_stop.id - ); - - get diagnostics forwarder_stop.deleted_connections = row_count; - - delete from forwarder where forwarder.id = forwarder_stop.id; - - -- ON DELETE CASCADE actions will take care of cleaning up the connections and subscriptions -end -$$ language plpgsql; - -------------------------------------------------------------------------------- --- event subscriptions - - -- which table or event variant is a subscription for. create table if not exists subscription_type ( name text not null primary key @@ -176,538 +71,3 @@ create table if not exists subscription ( ); -- notification delivery create index subscription_deliver_idx on subscription (user_id, subscription_type, element_id) include (connection_id); - - --- sends a notification with 'data' to the given clients -create or replace procedure notify_connections( - connection_ids bigint[], event text, data json -) as -$$ -<> declare - forwarder_info record; -begin - for forwarder_info in select - concat('channel', connection.channel_id) as channel_name, - array_agg(connection.id) as connections - from - connection - where - connection.id = any (notify_connections.connection_ids) - group by connection.channel_id loop - perform pg_notify(forwarder_info.channel_name, - json_build_object('connections', forwarder_info.connections, 'event', event, 'data', - data)::text); - end loop; -end -$$ language plpgsql; - --- sends a notification with 'data' to the calling client --- allows clients to test the notification system -create or replace procedure notify_me( - connection_id bigint, event text, data json -) as -$$ -begin - call notify_connections(ARRAY [connection_id], event, data); -end -$$ language plpgsql; - --- functions for event subscription management --- called by the client so it receives change-events - --- subscribe user of given token to notifications -create or replace procedure subscribe( - connection_id bigint, - user_id integer, - subscription_type text, - element_id bigint -) as -$$ -begin - if subscribe.element_id is null then raise exception 'invalid element_id value'; end if; - - -- type-specific permission/value checks - if subscribe.subscription_type = 'test' then - -- only allow element_id == user_id - if subscribe.element_id != subscribe.user_id then raise 'test requires correct element_id'; end if; - - elseif subscribe.subscription_type = 'user' then - -- but the element we watch has to be the user id - if subscribe.element_id != subscribe.user_id then raise 'element_id not logged in user user'; end if; - - elseif subscribe.subscription_type = 'group' then - if subscribe.element_id != subscribe.user_id then - raise 'bad-subscription:group: element_id not token user'; - end if; -- Rewrite this since practically every notification apart from a few is group based such that we normally perform - - -- the group auth instead of the normal auth - elseif subscribe.subscription_type in ('account', 'group_member', 'group_invite', 'group_log', 'transaction') then - perform - from group_membership gm - where gm.user_id = subscribe.user_id and gm.group_id = subscribe.element_id::integer; - if not found then - raise 'user % tried to subscribe to changes in a group without being a member', subscribe.user_id; - end if; - - else - raise exception 'unknown subscription type'; - end if; - - insert into subscription ( - connection_id, user_id, subscription_type, element_id - ) - values ( - subscribe.connection_id, - subscribe.user_id, - subscribe.subscription_type, - subscribe.element_id - ) - on conflict on constraint subscription_conn_type_elem do update set user_id = subscribe.user_id; -end -$$ language plpgsql; - --- unsubscribe user of given token to notifications --- if the user could create the subscription on the connection_id, --- he can also remove it. -create or replace procedure unsubscribe( - connection_id bigint, - user_id integer, - subscription_type text, - element_id bigint -) as -$$ -begin - delete - from - subscription - where - subscription.connection_id = unsubscribe.connection_id - and subscription.subscription_type = unsubscribe.subscription_type - and subscription.element_id = unsubscribe.element_id - and subscription.user_id = unsubscribe.user_id; -end -$$ language plpgsql; - --- deliver a notification of given type --- to all subscribers -create or replace procedure notify_user( - subscription_type text, - user_id subscription.user_id%TYPE, - element_id subscription.element_id%TYPE, - data json -) as -$$ -<> declare - connections bigint[]; -begin - -- this query directly uses the subscription_deliver_idx - select - array_agg(connection_id) - into locals.connections - from - subscription - where - subscription.subscription_type = notify_user.subscription_type - and notify_user.user_id = subscription.user_id - and notify_user.element_id = subscription.element_id; - - call notify_connections(locals.connections, notify_user.subscription_type::text, notify_user.data); -end; -$$ language plpgsql; - -create or replace procedure notify_users( - subscription_type text, - user_ids int[], - element_id subscription.element_id%TYPE, - data json -) as -$$ -<> declare - connections bigint[]; -begin - -- this query directly uses the subscription_deliver_idx - select - array_agg(connection_id) - into locals.connections - from - subscription - where - subscription.subscription_type = notify_users.subscription_type - and subscription.user_id = any (notify_users.user_ids) - and notify_users.element_id = subscription.element_id; - - call notify_connections(locals.connections, notify_users.subscription_type::text, notify_users.data); -end; -$$ language plpgsql; - --- deliver a notification of given type to all users of a group -create or replace procedure notify_group( - subscription_type text, - group_id grp.id%TYPE, - element_id subscription.element_id%TYPE, - data json -) as -$$ -<> declare - user_ids int[]; -begin - select - array_agg(gm.user_id) - into locals.user_ids - from - group_membership gm - where - gm.group_id = notify_group.group_id - group by gm.group_id; - - if locals.user_ids is null then return; end if; - - call notify_users(notify_group.subscription_type, locals.user_ids, notify_group.element_id, notify_group.data); -end; -$$ language plpgsql; - - - --------------------------------------------------------- --- Triggers to actually perform the notifications - --- notifications for changes in transactions -create or replace function transaction_history_updated() returns trigger as -$$ -<> declare - group_id grp.id%TYPE; - transaction_id integer; -begin - -- A deletion should not be possible therefore NEW should never be NULL - if NEW is null then - select - transaction.group_id, - transaction.id - into locals.group_id, locals.transaction_id - from - transaction - where - transaction.id = OLD.id; - else - select - transaction.group_id, - transaction.id - into locals.group_id, locals.transaction_id - from - transaction - where - transaction.id = NEW.id; - end if; - - call notify_group('transaction', locals.group_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); - return NULL; -end; -$$ language plpgsql; - -drop trigger if exists transaction_history_update_trig on transaction_history; -create trigger transaction_history_update_trig - after insert or update or delete - on transaction_history - for each row -execute function transaction_history_updated(); --- TODO: the very complicated stuff of transaction share updates - --- notifications for changes in creditor or debitor shares -create or replace function transaction_share_updated() returns trigger as -$$ -<> declare - group_id grp.id%TYPE; - transaction_id integer; -begin - -- A deletion should not be possible therefore NEW should never be NULL - if NEW is null then - select - transaction.group_id, - transaction.id - into locals.group_id, locals.transaction_id - from - transaction - join transaction_history th on transaction.id = th.id - where th.revision_id = OLD.revision_id and th.id = OLD.transaction_id; - else - select - transaction.group_id, - transaction.id - into locals.group_id, locals.transaction_id - from - transaction - join transaction_history th on transaction.id = th.id - where th.revision_id = NEW.revision_id and th.id = NEW.transaction_id; - end if; - - call notify_group('transaction', locals.group_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); - return NULL; -end; -$$ language plpgsql; - -drop trigger if exists creditor_share_trig on creditor_share; -create trigger creditor_share_trig - after insert or update or delete - on creditor_share - for each row -execute function transaction_share_updated(); - -drop trigger if exists debitor_share_trig on debitor_share; -create trigger debitor_share_trig - after insert or update or delete - on debitor_share - for each row -execute function transaction_share_updated(); - --- notifications for committing -create or replace function transaction_revision_updated() returns trigger as -$$ -<> declare - group_id grp.id%TYPE; - transaction_id integer; -begin - -- A deletion should not be possible therefore NEW should never be NULL - if NEW is null or NEW.committed is null then - return null; - end if; - - select - transaction.group_id, - transaction.id - into locals.group_id, locals.transaction_id - from - transaction - join transaction_history th on transaction.id = th.id - where th.revision_id = NEW.id; - - call notify_group('transaction', locals.group_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); - return null; -end; -$$ language plpgsql; - -drop trigger if exists transaction_revision_trig on transaction_revision; -create trigger transaction_revision_trig - after insert or update or delete - on transaction_revision - for each row -execute function transaction_revision_updated(); - --- notifications for changes in accounts -create or replace function account_history_updated() returns trigger as -$$ -<> declare - group_id grp.id%TYPE; - account_id integer; -begin - -- A deletion should not be possible therefore NEW should never be NULL - if NEW is null then - select account.group_id, account.id - into locals.group_id, locals.account_id - from account - where account.id = OLD.id; - else - select account.group_id, account.id - into locals.group_id, locals.account_id - from account - where account.id = NEW.id; - end if; - - call notify_group('account', locals.group_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'account_id', locals.account_id)); - return NULL; -end; -$$ language plpgsql; - -drop trigger if exists account_history_update_trig on account_history; -create trigger account_history_update_trig - after insert or update or delete - on account_history - for each row -execute function account_history_updated(); - --- notifications for changes in group memberships -create or replace function group_updated() returns trigger as -$$ -begin - if NEW is null then -- we cannot infer the group memberships after a group has been deleted - return NULL; - end if; - - call notify_group('group', NEW.id, NEW.id::bigint, json_build_object('element_id', NEW.id)); - return NULL; -end; -$$ language plpgsql; - -drop trigger if exists group_update_trig on grp; -create trigger group_update_trig - after insert or update - on grp - for each row -execute function group_updated(); - -create or replace function group_deleted() returns trigger as -$$ -<> declare - user_info record; -begin - for user_info in select - user_id - from - group_membership gm - where - gm.group_id = OLD.id loop - call notify_user('group', user_info.user_id, user_info.user_id::bigint, json_build_object('element_id', user_info.user_id)); - end loop; - - return OLD; -end; -$$ language plpgsql; - -drop trigger if exists group_delete_trig on grp; -create trigger group_delete_trig - before delete - on grp - for each row -execute function group_deleted(); - --- notifications for changes in sessions -create or replace function session_updated() returns trigger as -$$ -begin - if NEW is null then - call notify_user('session', OLD.user_id, OLD.user_id::bigint, - json_build_object('element_id', OLD.user_id, 'session_id', OLD.id)); - else - call notify_user('session', NEW.user_id, NEW.user_id::bigint, - json_build_object('element_id', NEW.user_id, 'session_id', NEW.id)); - end if; - return NULL; -end; -$$ language plpgsql; - -drop trigger if exists session_update_trig on session; -create trigger session_update_trig - after insert or update or delete - on session - for each row -execute function session_updated(); - --- notifications for changes in group memberships -create or replace function group_membership_updated() returns trigger as -$$ -begin - if NEW is null then - call notify_group('group_member', OLD.group_id, OLD.group_id::bigint, - json_build_object('element_id', OLD.group_id, 'user_id', OLD.user_id)); - else - call notify_user('group', NEW.user_id, NEW.user_id::bigint, - json_build_object('element_id', NEW.user_id, 'group_id', NEW.group_id)); - call notify_group('group_member', NEW.group_id, NEW.group_id::bigint, - json_build_object('element_id', NEW.group_id, 'user_id', NEW.user_id)); - end if; - - return NULL; -end; -$$ language plpgsql; - -drop trigger if exists group_membership_update_trig on group_membership; -create trigger group_membership_update_trig - after insert or update or delete - on group_membership - for each row -execute function group_membership_updated(); - --- notifications for changes in group invites -create or replace function group_invite_updated() returns trigger as -$$ -begin - -- raise 'notifying group invite for element id % and group_id %', NEW.id, NEW.group_id; - if NEW is null then - call notify_group('group_invite', OLD.group_id, OLD.group_id::bigint, - json_build_object('element_id', OLD.group_id, 'invite_id', OLD.id)); - else - call notify_group('group_invite', NEW.group_id, NEW.group_id::bigint, - json_build_object('element_id', NEW.group_id, 'invite_id', NEW.id)); - end if; - return NULL; -end; -$$ language plpgsql; - -drop trigger if exists group_invite_update_trig on group_invite; -create trigger group_invite_update_trig - after insert or update or delete - on group_invite - for each row -execute function group_invite_updated(); - --- notifications for changes in sessions -create or replace function user_sessions_updated() returns trigger as -$$ -begin - -- raise 'notifying group invite for element id % and group_id %', NEW.id, NEW.group_id; - if NEW is null then - call notify_user('user', OLD.user_id, OLD.user_id::bigint, - json_build_object('element_id', OLD.user_id, 'session_id', OLD.id)); - else - call notify_user('user', NEW.user_id, NEW.user_id::bigint, - json_build_object('element_id', NEW.user_id, 'session_id', NEW.id)); - end if; - return NULL; -end; -$$ language plpgsql; - -drop trigger if exists session_update_trig on session; -create trigger session_update_trig - after insert or update or delete - on session - for each row -execute function user_sessions_updated(); - --- notifications for changes in a users profile details -create or replace function user_updated() returns trigger as -$$ -begin - -- raise 'notifying group invite for element id % and group_id %', NEW.id, NEW.group_id; - if NEW is null then - call notify_user('user', OLD.id, OLD.id::bigint, - json_build_object('element_id', OLD.id)); - else - call notify_user('user', NEW.id, NEW.id::bigint, - json_build_object('element_id', NEW.id)); - end if; - return NULL; -end; -$$ language plpgsql; - -drop trigger if exists user_update_trig on usr; -create trigger user_update_trig - after update - on usr - for each row -execute function user_updated(); - --- notifications for changes in group logs entries -create or replace function group_log_updated() returns trigger as -$$ -begin - -- raise 'notifying group invite for element id % and group_id %', NEW.id, NEW.group_id; - if NEW is null then - call notify_group('group_log', OLD.group_id, OLD.group_id::bigint, - json_build_object('element_id', OLD.group_id, 'log-id', OLD.id)); - else - call notify_group('group_log', NEW.group_id, NEW.group_id::bigint, - json_build_object('element_id', NEW.group_id, 'log_id', NEW.id)); - end if; - return NULL; -end; -$$ language plpgsql; - -drop trigger if exists group_log_update_trig on group_log; -create trigger group_log_update_trig - after insert or update or delete - on group_log - for each row -execute function group_log_updated(); diff --git a/abrechnung/database/revisions/0003_purchase_items.sql b/abrechnung/database/revisions/0003_purchase_items.sql index 62c55c6b..a67b1a2d 100644 --- a/abrechnung/database/revisions/0003_purchase_items.sql +++ b/abrechnung/database/revisions/0003_purchase_items.sql @@ -17,9 +17,9 @@ create table if not exists purchase_item_history ( -- the name of the item name text not null, -- the total price, in the transaction currency. - price double precision not null check ( price > 0 ), + price double precision not null, -- part of the communist transaction shares this item is billed to - communist_shares double precision not null default 1.0 check (communist_shares >= 0) + communist_shares double precision not null default 1.0 ); -- an usage of an item by an account, @@ -33,373 +33,5 @@ create table if not exists purchase_item_usage ( primary key (item_id, revision_id, account_id), -- amount of shares this account has from the purchase item - share_amount double precision not null check ( share_amount > 0 ) -); - -create or replace view purchase_item_usages_as_json as - select - piu.revision_id as revision_id, - piu.item_id as item_id, - sum(piu.share_amount) as n_usages, - json_agg(piu) as usages - from - purchase_item_usage piu - group by - piu.revision_id, piu.item_id; - -create or replace view pending_purchase_item_history as - select distinct on (pi.id, gm.user_id) - pi.id as id, - pi.transaction_id as transaction_id, - transaction.group_id as group_id, - pih.revision_id as revision_id, - r.started as revision_started, - r.committed as revision_committed, - pih.deleted as deleted, - pih.name as name, - pih.price as price, - pih.communist_shares as communist_shares, - r.user_id as last_changed_by, - gm.user_id as user_id - from - purchase_item_history pih - join purchase_item pi on pih.id = pi.id - join transaction on transaction.id = pi.transaction_id - join transaction_revision r on r.id = pih.revision_id and r.transaction_id = transaction.id - join group_membership gm on transaction.group_id = gm.group_id and gm.user_id = r.user_id - where - r.committed is null; - -create or replace view pending_purchase_item_full as - select - history.id as id, - history.transaction_id as transaction_id, - history.group_id as group_id, - history.revision_id as revision_id, - history.revision_started as revision_started, - history.revision_committed as revision_committed, - history.deleted as deleted, - history.name as name, - history.price as price, - history.communist_shares as communist_shares, - history.last_changed_by as last_changed_by, - piu.n_usages as n_usages, - coalesce(piu.usages, '[]'::json) as usages, - history.user_id as user_id - from - pending_purchase_item_history history - left join purchase_item_usages_as_json piu - on piu.revision_id = history.revision_id and history.id = piu.item_id; - -create or replace view committed_purchase_item_history as - select distinct on (pi.id) - pi.id as id, - pi.transaction_id as transaction_id, - transaction.group_id as group_id, - first_value(pih.revision_id) over wnd as revision_id, - first_value(r.started) over wnd as revision_started, - first_value(r.committed) over wnd as revision_committed, - first_value(pih.deleted) over wnd as deleted, - first_value(pih.name) over wnd as name, - first_value(pih.price) over wnd as price, - first_value(pih.communist_shares) over wnd as communist_shares, - first_value(r.user_id) over wnd as last_changed_by - from - purchase_item_history pih - join purchase_item pi on pih.id = pi.id - join transaction on transaction.id = pi.transaction_id - join transaction_revision r on r.id = pih.revision_id and r.transaction_id = pi.transaction_id - where - r.committed is not null window wnd as ( partition by pi.id order by r.committed desc ); - -create or replace view committed_purchase_item_state as - select - history.id as id, - history.transaction_id as transaction_id, - history.group_id as group_id, - history.revision_id as revision_id, - history.revision_started as revision_started, - history.revision_committed as revision_committed, - history.deleted as deleted, - history.name as name, - history.price as price, - history.communist_shares as communist_shares, - history.last_changed_by as last_changed_by, - piu.n_usages as n_usages, - coalesce(piu.usages, '[]'::json) as usages - from - committed_purchase_item_history history - left join purchase_item_usages_as_json piu - on piu.revision_id = history.revision_id and piu.item_id = history.id; - -drop view if exists account_balance; -drop view if exists current_transaction_state; -drop view if exists pending_transaction_revisions; -drop view if exists committed_transaction_state; - -create or replace view pending_transaction_revisions as - select - history.id as id, - history.type as type, - history.group_id as group_id, - history.revision_id as revision_id, - history.revision_started as revision_started, - history.revision_committed as revision_committed, - history.deleted as deleted, - history.description as description, - history.value as value, - history.billed_at as billed_at, - history.last_changed_by as last_changed_by, - history.currency_symbol as currency_symbol, - history.currency_conversion_rate as currency_conversion_rate, - cs.n_shares as n_creditor_shares, - ds.n_shares as n_debitor_shares, - coalesce(cs.shares, '[]'::json) as creditor_shares, - coalesce(ds.shares, '[]'::json) as debitor_shares, - purchase_items.purchase_items as purchase_items, - history.user_id as user_id - from - pending_transaction_history history - left join creditor_shares_as_json cs on cs.revision_id = history.revision_id and cs.transaction_id = history.id - left join debitor_shares_as_json ds on ds.revision_id = history.revision_id and ds.transaction_id = history.id - left join ( - select - transaction_id, - user_id, - json_agg(ppif) as purchase_items - from - pending_purchase_item_full ppif - group by ppif.transaction_id, ppif.user_id - ) purchase_items - on purchase_items.user_id = history.user_id and purchase_items.transaction_id = history.id; - -create or replace view committed_transaction_state as - select - history.id as id, - history.type as type, - history.group_id as group_id, - history.revision_id as revision_id, - history.revision_started as revision_started, - history.revision_committed as revision_committed, - history.deleted as deleted, - history.description as description, - history.value as value, - history.billed_at as billed_at, - history.last_changed_by as last_changed_by, - history.currency_symbol as currency_symbol, - history.currency_conversion_rate as currency_conversion_rate, - cs.n_shares as n_creditor_shares, - ds.n_shares as n_debitor_shares, - coalesce(cs.shares, '[]'::json) as creditor_shares, - coalesce(ds.shares, '[]'::json) as debitor_shares, - purchase_items.purchase_items as purchase_items - from - committed_transaction_history history - left join creditor_shares_as_json cs on cs.revision_id = history.revision_id and cs.transaction_id = history.id - left join debitor_shares_as_json ds on ds.revision_id = history.revision_id and ds.transaction_id = history.id - left join ( - select - transaction_id, - json_agg(cpis) as purchase_items - from - committed_purchase_item_state cpis - group by cpis.transaction_id - ) purchase_items on purchase_items.transaction_id = history.id; - -create or replace view current_transaction_state as - select - transaction.id as id, - transaction.type as type, - transaction.group_id as group_id, - curr_state_json.state as current_state, - pending_json.state as pending_changes - from - transaction - left join ( - select id, json_agg(curr_state) as state from committed_transaction_state curr_state group by id - ) curr_state_json on curr_state_json.id = transaction.id - left join ( - select id, json_agg(pending) as state from pending_transaction_revisions pending group by id - ) pending_json on pending_json.id = transaction.id; - --- notifications for purchase items -create or replace function purchase_item_updated() returns trigger as -$$ -<> declare - group_id grp.id%TYPE; - transaction_id integer; -begin - -- A deletion should not be possible therefore NEW should never be NULL - if NEW is null then return null; end if; - - select - transaction.group_id, - transaction.id - into locals.group_id, locals.transaction_id - from - transaction - join purchase_item pi on transaction.id = pi.transaction_id - where - pi.id = NEW.id; - - call notify_group('transaction', locals.group_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); - return null; -end; -$$ language plpgsql; - -drop trigger if exists purchase_item_trig on purchase_item_history; -create trigger purchase_item_trig - after insert or update or delete - on purchase_item_history - for each row -execute function purchase_item_updated(); - -create or replace function purchase_item_usage_updated() returns trigger as -$$ -<> declare - group_id grp.id%TYPE; - transaction_id integer; -begin - if NEW is null then - select - transaction.group_id, - transaction.id - into locals.group_id, locals.transaction_id - from - transaction - join purchase_item pi on transaction.id = pi.transaction_id - where - pi.id = OLD.item_id; - else - select - transaction.group_id, - transaction.id - into locals.group_id, locals.transaction_id - from - transaction - join purchase_item pi on transaction.id = pi.transaction_id - where - pi.id = NEW.item_id; - end if; - - - call notify_group('transaction', locals.group_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); - return null; -end; -$$ language plpgsql; - -drop trigger if exists purchase_item_usage_trig on purchase_item_usage; -create trigger purchase_item_usage_trig - after insert or update or delete - on purchase_item_usage - for each row -execute function purchase_item_usage_updated(); - -create or replace function check_committed_transactions( - revision_id bigint, - transaction_id integer, - started timestamptz, - committed timestamptz -) returns boolean as -$$ -<> declare - n_creditor_shares integer; - n_debitor_shares integer; - transaction_type text; - transaction_deleted boolean; -begin - if committed is null then return true; end if; - - perform - from - transaction_revision tr - where - tr.transaction_id = check_committed_transactions.transaction_id - and tr.id != check_committed_transactions.revision_id - and tr.committed between check_committed_transactions.started and check_committed_transactions.committed; - - if found then raise 'another change was committed earlier, committing is not possible due to conflicts'; end if; - - select - t.type, - th.deleted - into locals.transaction_type, locals.transaction_deleted - from - transaction_history th - join transaction t on t.id = th.id - where - th.revision_id = check_committed_transactions.revision_id; - - if locals.transaction_deleted then -- if the transaction is deleted we simply accept anything as we dont care - return true; - end if; - - select - count(cs.account_id) - into locals.n_creditor_shares - from - creditor_share cs - where - cs.transaction_id = check_committed_transactions.transaction_id - and cs.revision_id = check_committed_transactions.revision_id; - - select - count(ds.account_id) - into locals.n_debitor_shares - from - debitor_share ds - where - ds.transaction_id = check_committed_transactions.transaction_id - and ds.revision_id = check_committed_transactions.revision_id; - - -- check that the number of shares fits the transaction type - if locals.transaction_type = 'transfer' then - if locals.n_creditor_shares != 1 then - raise '"transfer" type transactions must have exactly one creditor share % %', locals.n_creditor_shares, locals.n_debitor_shares; - end if; - - if locals.n_debitor_shares != 1 then - raise '"transfer" type transactions must have exactly one debitor share'; - end if; - end if; - - if locals.transaction_type = 'purchase' then - if locals.n_creditor_shares != 1 then - raise '"purchase" type transactions must have exactly one creditor share'; - end if; - if locals.n_debitor_shares < 1 then - raise '"purchase" type transactions must have at least one debitor share'; - end if; - - -- check that all purchase items have at least an item share or communist shares > 0 - -- i.e. we look for a purchase item at the current revision that has sum(usages) + communist_shares <= 0 - -- if such a one is found we raise an exception - perform from purchase_item pi - join purchase_item_history pih on pi.id = pih.id - left join purchase_item_usage piu on pih.revision_id = piu.revision_id and pi.id = piu.item_id - where pih.revision_id = check_committed_transactions.revision_id - and pi.transaction_id = check_committed_transactions.transaction_id - and not pih.deleted - group by pi.id - having sum(coalesce(piu.share_amount, 0) + pih.communist_shares) <= 0; - - if found then - raise 'all transaction positions must have at least one account assigned or their common shares set greater than 0'; - end if; - end if; - - if locals.transaction_type = 'mimo' then - if locals.n_creditor_shares < 1 then - raise '"mimo" type transactions must have at least one creditor share'; - end if; - if locals.n_debitor_shares < 1 then - raise '"mimo" type transactions must have at least one debitor share'; - end if; - end if; - - return true; -end -$$ language plpgsql; - + share_amount double precision not null +); \ No newline at end of file diff --git a/abrechnung/database/revisions/0004_relax_db_constraints.sql b/abrechnung/database/revisions/0004_relax_db_constraints.sql index f93ce36c..c8340c64 100644 --- a/abrechnung/database/revisions/0004_relax_db_constraints.sql +++ b/abrechnung/database/revisions/0004_relax_db_constraints.sql @@ -1,5 +1,2 @@ -- revision: f133b1d3 -- requires: b32893f6 - -alter table purchase_item_history drop constraint purchase_item_history_price_check; -alter table transaction_history drop constraint transaction_history_value_check; diff --git a/abrechnung/database/revisions/0005_partial_data_fetching.sql b/abrechnung/database/revisions/0005_partial_data_fetching.sql index 5b1c6caf..e2e0b25a 100644 --- a/abrechnung/database/revisions/0005_partial_data_fetching.sql +++ b/abrechnung/database/revisions/0005_partial_data_fetching.sql @@ -1,32 +1,2 @@ -- revision: 64df13c9 -- requires: f133b1d3 - -drop view current_transaction_state; - -create or replace view current_transaction_state as -select - transaction.id as id, - transaction.type as type, - transaction.group_id as group_id, - curr_state_json.state as current_state, - pending_json.state as pending_changes, - curr_state_json.last_changed as last_changed, - pending_json.pending_user_ids as users_with_pending_changes -from - transaction - left join ( - select - id, - max(curr_state.revision_committed) as last_changed, - json_agg(curr_state) as state - from committed_transaction_state curr_state - group by id - ) curr_state_json on curr_state_json.id = transaction.id - left join ( - select - id, - array_agg(pending.user_id) as pending_user_ids, - json_agg(pending) as state - from pending_transaction_revisions pending - group by id - ) pending_json on pending_json.id = transaction.id; diff --git a/abrechnung/database/revisions/0008_file_upload.sql b/abrechnung/database/revisions/0008_file_upload.sql index 46a4c40f..51f7bcd3 100644 --- a/abrechnung/database/revisions/0008_file_upload.sql +++ b/abrechnung/database/revisions/0008_file_upload.sql @@ -1,60 +1,6 @@ -- revision: 156aef63 -- requires: dbcccb58 -create or replace function transaction_revision_updated() returns trigger as -$$ -<> declare - group_id grp.id%TYPE; - transaction_id integer; -begin - select - t.group_id, - t.id - into locals.group_id, locals.transaction_id - from - transaction t - where t.id = (case when NEW is null then OLD.transaction_id else NEW.transaction_id end); - - -- A deletion should only be able to occur for uncommitted revisions - if NEW is null then - call notify_user('transaction', OLD.user_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); - elseif NEW.committed is null then - call notify_user('transaction', NEW.user_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); - else - call notify_group('transaction', locals.group_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); - end if; - - return null; -end; -$$ language plpgsql; - --- disallow empty strings in certain fields -alter table account_history add constraint name_not_empty check ( name <> '' ); -alter table transaction_history add constraint description_not_empty check ( description <> '' ); -alter table grp add constraint name_not_empty check ( name <> '' ); -alter table purchase_item_history add constraint name_not_empty check ( name <> '' ); - -drop view current_transaction_state; -drop view committed_transaction_state; -drop view committed_transaction_history; - -drop view pending_transaction_revisions; -drop view pending_transaction_history; - -drop view pending_purchase_item_full; -drop view pending_purchase_item_history; -drop view committed_purchase_item_state; -drop view committed_purchase_item_history; - -drop view latest_account; - -drop view purchase_item_usages_as_json; -drop view debitor_shares_as_json; -drop view creditor_shares_as_json; - create table if not exists blob ( id serial primary key, content bytea not null, @@ -70,577 +16,7 @@ create table if not exists file_history ( id integer references file(id) on delete cascade, revision_id bigint not null references transaction_revision (id) on delete cascade, primary key (id, revision_id), - filename text not null constraint filename_not_empty check (filename <> ''), + filename text not null, blob_id integer references blob(id) on delete cascade, deleted bool default false ); - --- notifications for transaction file attachments -create or replace function file_history_updated() returns trigger as -$$ -<> declare - group_id grp.id%TYPE; - transaction_id integer; -begin - select - transaction.group_id, - transaction.id - into locals.group_id, locals.transaction_id - from - transaction - join file f on transaction.id = f.transaction_id - where - f.id = (case when NEW is null then OLD.id else NEW.id end); - - call notify_group('transaction', locals.group_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id)); - return null; -end; -$$ language plpgsql; - -drop trigger if exists file_history_trig on file_history; -create trigger file_history_trig - after insert or update or delete - on file_history - for each row -execute function file_history_updated(); - -create or replace view purchase_item_usages_as_json as - select - piu.revision_id, - piu.item_id, - sum(piu.share_amount) as n_usages, - array_agg(piu.account_id) as involved_accounts, - jsonb_agg(piu.*) as usages - from - purchase_item_usage piu - group by - piu.revision_id, piu.item_id; - -create or replace view debitor_shares_as_json as - select - ds.revision_id, - ds.transaction_id, - sum(ds.shares) as n_shares, - array_agg(ds.account_id) as involved_accounts, - jsonb_agg(ds.*) as shares - from - debitor_share ds - group by - ds.revision_id, ds.transaction_id; - -create or replace view creditor_shares_as_json as - select - cs.revision_id, - cs.transaction_id, - sum(cs.shares) as n_shares, - array_agg(cs.account_id) as involved_accounts, - jsonb_agg(cs.*) as shares - from - creditor_share cs - group by - cs.revision_id, cs.transaction_id; - -create or replace view aggregated_committed_file_history as ( -select - sub.revision_id, - sub.transaction_id, - sub.file_id, - sub.user_id, - sub.started as revision_started, - sub.committed as revision_committed, - first_value(sub.filename) over outer_window as filename, - first_value(sub.mime_type) over outer_window as mime_type, - first_value(sub.blob_id) over outer_window as blob_id, - first_value(sub.deleted) over outer_window as deleted -from ( - select - tr.id as revision_id, - tr.transaction_id, - tr.user_id, - tr.started, - tr.committed, - f.id as file_id, - count(f.id) over wnd as id_partition, - fh.filename, - blob.mime_type, - fh.blob_id, - fh.deleted - from - transaction_revision tr - join file f on tr.transaction_id = f.transaction_id - left join file_history fh on fh.id = f.id and tr.id = fh.revision_id - left join blob on blob.id = fh.blob_id - where - tr.committed is not null window wnd as (partition by f.id order by committed asc) -) as sub window outer_window as (partition by sub.file_id, sub.id_partition order by sub.revision_id) -); - -create or replace view aggregated_pending_file_history as ( -select - tr.id as revision_id, - tr.transaction_id, - tr.user_id as changed_by, - tr.started as revision_started, - f.id as file_id, - fh.filename, - blob.mime_type, - fh.blob_id, - fh.deleted -from - transaction_revision tr - join file f on tr.transaction_id = f.transaction_id - join file_history fh on fh.id = f.id and tr.id = fh.revision_id - left join blob on blob.id = fh.blob_id -where - tr.committed is null -); - -create or replace function committed_file_state_valid_at( - valid_at timestamptz = now() -) -returns table ( - file_id int, - revision_id bigint, - transaction_id int, - changed_by int, - revision_started timestamptz, - revision_committed timestamptz, - filename text, - mime_type text, - blob_id int, - deleted bool -) -as -$$ -select distinct on (file_id) - file_id, - revision_id, - transaction_id, - user_id as changed_by, - revision_started, - revision_committed, - filename, - mime_type, - blob_id, - deleted -from - aggregated_committed_file_history -where - revision_committed <= committed_file_state_valid_at.valid_at - and filename is not null -order by - file_id, revision_committed desc -$$ language sql - security invoker - stable; - -create or replace view aggregated_committed_transaction_position_history as ( - select - sub.revision_id, - sub.transaction_id, - sub.item_id, - sub.user_id, - sub.started as revision_started, - sub.committed as revision_committed, - first_value(sub.name) over outer_window as name, - first_value(sub.price) over outer_window as price, - first_value(sub.communist_shares) over outer_window as communist_shares, - first_value(sub.deleted) over outer_window as deleted, - first_value(sub.n_usages) over outer_window as n_usages, - first_value(sub.usages) over outer_window as usages, - first_value(sub.involved_accounts) over outer_window as involved_accounts - from ( - select - tr.id as revision_id, - tr.transaction_id, - tr.user_id, - tr.started, - tr.committed, - pi.id as item_id, - count(pi.id) over wnd as id_partition, - pih.name, - pih.price, - pih.communist_shares, - pih.deleted, - coalesce(piu.n_usages, 0) as n_usages, - coalesce(piu.usages, '[]'::jsonb) as usages, - coalesce(piu.involved_accounts, array[]::int[]) as involved_accounts - from - transaction_revision tr - join purchase_item pi on tr.transaction_id = pi.transaction_id - left join purchase_item_history pih on pih.id = pi.id and tr.id = pih.revision_id - left join purchase_item_usages_as_json piu on pi.id = piu.item_id and tr.id = piu.revision_id - where - tr.committed is not null window wnd as (partition by pi.id order by committed asc) - ) as sub window outer_window as (partition by sub.item_id, sub.id_partition order by sub.revision_id) -); - -create or replace view aggregated_pending_transaction_position_history as ( - select - tr.id as revision_id, - tr.transaction_id, - tr.user_id as changed_by, - tr.started as revision_started, - pi.id as item_id, - pih.name, - pih.price, - pih.communist_shares, - pih.deleted, - coalesce(piu.n_usages, 0) as n_usages, - coalesce(piu.usages, '[]'::jsonb) as usages, - coalesce(piu.involved_accounts, array[]::int[]) as involved_accounts - from - transaction_revision tr - join purchase_item pi on tr.transaction_id = pi.transaction_id - join purchase_item_history pih on pih.id = pi.id and tr.id = pih.revision_id - left join purchase_item_usages_as_json piu on pi.id = piu.item_id and tr.id = piu.revision_id - where - tr.committed is null -); - -create or replace function committed_transaction_position_state_valid_at( - valid_at timestamptz = now() -) -returns table ( - item_id int, - revision_id bigint, - transaction_id int, - changed_by int, - revision_started timestamptz, - revision_committed timestamptz, - name text, - price double precision, - communist_shares double precision, - deleted bool, - n_usages int, - usages jsonb, - involved_accounts int[] -) -as -$$ -select distinct on (acph.item_id) - acph.item_id, - acph.revision_id, - acph.transaction_id, - acph.user_id as changed_by, - acph.revision_started, - acph.revision_committed, - acph.name, - acph.price, - acph.communist_shares, - acph.deleted, - acph.n_usages, - acph.usages, - acph.involved_accounts -from - aggregated_committed_transaction_position_history acph -where - acph.revision_committed <= committed_transaction_position_state_valid_at.valid_at - and acph.name is not null -order by - acph.item_id, acph.revision_committed desc -$$ language sql - security invoker - stable; - -create or replace view aggregated_committed_transaction_history as ( - select - sub.revision_id, - sub.transaction_id, - sub.user_id, - sub.group_id, - sub.started as revision_started, - sub.committed as revision_committed, - sub.type, - first_value(sub.value) over outer_window as value, - first_value(sub.description) over outer_window as description, - first_value(sub.currency_symbol) over outer_window as currency_symbol, - first_value(sub.currency_conversion_rate) over outer_window as currency_conversion_rate, - first_value(sub.billed_at) over outer_window as billed_at, - first_value(sub.deleted) over outer_window as deleted, - first_value(sub.n_creditor_shares) over outer_window as n_creditor_shares, - first_value(sub.creditor_shares) over outer_window as creditor_shares, - first_value(sub.n_debitor_shares) over outer_window as n_debitor_shares, - first_value(sub.debitor_shares) over outer_window as debitor_shares, - first_value(sub.involved_accounts) over outer_window as involved_accounts - from ( - select - tr.id as revision_id, - tr.transaction_id, - tr.user_id, - tr.started, - tr.committed, - t.group_id, - t.type, - count(th.id) over wnd as id_partition, - th.value, - th.currency_symbol, - th.currency_conversion_rate, - th.description, - th.billed_at, - th.deleted, - coalesce(csaj.n_shares, 0) as n_creditor_shares, - coalesce(csaj.shares, '[]'::jsonb) as creditor_shares, - coalesce(dsaj.n_shares, 0) as n_debitor_shares, - coalesce(dsaj.shares, '[]'::jsonb) as debitor_shares, - coalesce(csaj.involved_accounts, array[]::int[]) || coalesce(dsaj.involved_accounts, array[]::int[]) as involved_accounts - from - transaction_revision tr - join transaction t on tr.transaction_id = t.id - left join transaction_history th on t.id = th.id and tr.id = th.revision_id - left join creditor_shares_as_json csaj on t.id = csaj.transaction_id and tr.id = csaj.revision_id - left join debitor_shares_as_json dsaj on t.id = dsaj.transaction_id and tr.id = dsaj.revision_id - where - tr.committed is not null window wnd as (partition by tr.transaction_id order by committed asc) - ) as sub window outer_window as (partition by sub.transaction_id, sub.id_partition order by sub.revision_id) -); - -create or replace view aggregated_pending_transaction_history as ( - select - tr.id as revision_id, - tr.transaction_id, - tr.user_id as changed_by, - tr.started as revision_started, - t.group_id, - t.type, - th.value, - th.currency_symbol, - th.currency_conversion_rate, - th.description, - th.billed_at, - th.deleted, - coalesce(csaj.n_shares, 0) as n_creditor_shares, - coalesce(csaj.shares, '[]'::jsonb) as creditor_shares, - coalesce(dsaj.n_shares, 0) as n_debitor_shares, - coalesce(dsaj.shares, '[]'::jsonb) as debitor_shares, - coalesce(csaj.involved_accounts, array[]::int[]) || coalesce(dsaj.involved_accounts, array[]::int[]) as involved_accounts - from - transaction_revision tr - join transaction t on tr.transaction_id = t.id - join transaction_history th on t.id = th.id and tr.id = th.revision_id - left join creditor_shares_as_json csaj on t.id = csaj.transaction_id and tr.id = csaj.revision_id - left join debitor_shares_as_json dsaj on t.id = dsaj.transaction_id and tr.id = dsaj.revision_id - where - tr.committed is null -); - -create or replace function committed_transaction_state_valid_at( - valid_at timestamptz = now() -) -returns table ( - revision_id bigint, - transaction_id int, - changed_by int, - revision_started timestamptz, - revision_committed timestamptz, - group_id int, - type text, - value double precision, - currency_symbol text, - currency_conversion_rate double precision, - description text, - billed_at date, - deleted bool, - n_creditor_shares int, - creditor_shares jsonb, - n_debitor_shares int, - debitor_shares jsonb, - involved_accounts int[] -) -as -$$ -select distinct on (acth.transaction_id) - acth.revision_id, - acth.transaction_id, - acth.user_id as changed_by, - acth.revision_started, - acth.revision_committed, - acth.group_id, - acth.type, - acth.value, - acth.currency_symbol, - acth.currency_conversion_rate, - acth.description, - acth.billed_at, - acth.deleted, - acth.n_creditor_shares, - acth.creditor_shares, - acth.n_debitor_shares, - acth.debitor_shares, - acth.involved_accounts -from - aggregated_committed_transaction_history acth -where - acth.revision_committed <= committed_transaction_state_valid_at.valid_at - and acth.description is not null -order by - acth.transaction_id, acth.revision_committed desc -$$ language sql - security invoker - stable; - -create or replace function full_transaction_state_valid_at( - seen_by_user int, - valid_at timestamptz = now() -) -returns table ( - transaction_id int, - type text, - group_id int, - last_changed timestamptz, - is_wip bool, - committed_details jsonb, - pending_details jsonb, - committed_positions jsonb, - pending_positions jsonb, - committed_files jsonb, - pending_files jsonb -) -as -$$ -select - t.id as transaction_id, - t.type, - t.group_id, - greatest(committed_details.last_changed, committed_positions.last_changed, committed_files.last_changed) as last_changed, - exists( - select 1 from transaction_revision tr - where tr.transaction_id = t.id and tr.user_id = full_transaction_state_valid_at.seen_by_user and tr.committed is null - ) as is_wip, - committed_details.json_state as committed_details, - pending_details.json_state as pending_details, - committed_positions.json_state as committed_positions, - pending_positions.json_state as pending_positions, - committed_files.json_state as committed_files, - pending_files.json_state as pending_files -from - transaction t -left join ( - select - ctsa.transaction_id, - jsonb_agg(ctsa) as json_state, - max(ctsa.revision_committed) as last_changed - from committed_transaction_state_valid_at(full_transaction_state_valid_at.valid_at) ctsa - group by ctsa.transaction_id -) committed_details on t.id = committed_details.transaction_id -left join ( - select - apth.transaction_id, - jsonb_agg(apth) as json_state - from aggregated_pending_transaction_history apth - where apth.changed_by = full_transaction_state_valid_at.seen_by_user - group by apth.transaction_id -) pending_details on t.id = pending_details.transaction_id -left join ( - select - ctpsa.transaction_id, - jsonb_agg(ctpsa) as json_state, - max(ctpsa.revision_committed) as last_changed - from committed_transaction_position_state_valid_at(full_transaction_state_valid_at.valid_at) ctpsa - group by ctpsa.transaction_id -) committed_positions on t.id = committed_positions.transaction_id -left join ( - select - aptph.transaction_id, - jsonb_agg(aptph) as json_state - from aggregated_pending_transaction_position_history aptph - where aptph.changed_by = full_transaction_state_valid_at.seen_by_user - group by aptph.transaction_id -) pending_positions on t.id = pending_positions.transaction_id -left join ( - select - cfsva.transaction_id, - jsonb_agg(cfsva) as json_state, - max(cfsva.revision_committed) as last_changed - from committed_file_state_valid_at(full_transaction_state_valid_at.valid_at) cfsva - group by cfsva.transaction_id -) committed_files on t.id = committed_files.transaction_id -left join ( - select - apfh.transaction_id, - jsonb_agg(apfh) as json_state - from aggregated_pending_file_history apfh - where apfh.changed_by = full_transaction_state_valid_at.seen_by_user - group by apfh.transaction_id -) pending_files on t.id = pending_files.transaction_id -where committed_details.json_state is not null or pending_details.json_state is not null -$$ language sql - security invoker - stable; - -create or replace view aggregated_committed_account_history as ( - select - sub.revision_id, - sub.account_id, - sub.user_id, - sub.group_id, - sub.type, - sub.started as revision_started, - sub.committed as revision_committed, - first_value(sub.description) over outer_window as description, - first_value(sub.name) over outer_window as name, - first_value(sub.priority) over outer_window as priority, - first_value(sub.deleted) over outer_window as deleted - from ( - select - ar.id as revision_id, - ar.account_id, - ar.user_id, - ar.started, - ar.committed, - a.group_id, - a.type, - count(a.id) over wnd as id_partition, - ah.name, - ah.description, - ah.priority, - ah.deleted - from - account_revision ar - join account a on a.id = ar.account_id - left join account_history ah on ah.id = a.id and ar.id = ah.revision_id - where - ar.committed is not null window wnd as (partition by a.id order by committed asc) - ) as sub window outer_window as (partition by sub.account_id, sub.id_partition order by sub.revision_id) -); - -create or replace function committed_account_state_valid_at( - valid_at timestamptz = now() -) -returns table ( - account_id int, - revision_id bigint, - type text, - user_id int, - group_id int, - revision_started timestamptz, - revision_committed timestamptz, - name text, - description text, - priority int, - deleted bool -) -as -$$ -select distinct on (acah.account_id) - acah.account_id, - acah.revision_id, - acah.type, - acah.user_id, - acah.group_id, - acah.revision_started, - acah.revision_committed, - acah.name, - acah.description, - acah.priority, - acah.deleted -from - aggregated_committed_account_history acah -where - acah.revision_committed <= committed_account_state_valid_at.valid_at -order by - acah.account_id, acah.revision_committed desc -$$ language sql - security invoker - stable; diff --git a/abrechnung/database/revisions/0009_robust_notifications.sql b/abrechnung/database/revisions/0009_robust_notifications.sql index 4593f368..65c39b5d 100644 --- a/abrechnung/database/revisions/0009_robust_notifications.sql +++ b/abrechnung/database/revisions/0009_robust_notifications.sql @@ -2,671 +2,4 @@ -- requires: 156aef63 alter table transaction_revision add column version int default 0; - --- drop all notification triggers since we are rebuilding parts of them -drop trigger if exists transaction_history_update_trig on transaction_history; -drop trigger if exists transaction_revision_trig on transaction_revision; -drop trigger if exists creditor_share_trig on creditor_share; -drop trigger if exists debitor_share_trig on debitor_share; -drop trigger if exists purchase_item_trig on purchase_item_history; -drop trigger if exists purchase_item_usage_trig on purchase_item_usage; - -drop function if exists transaction_history_updated(); -drop function if exists transaction_share_updated(); -drop function if exists transaction_revision_updated(); -drop function if exists purchase_item_updated(); -drop function if exists purchase_item_usage_updated(); - -create or replace function check_transaction_revisions_change_per_user(transaction_id integer, user_id integer, committed timestamp with time zone) -returns boolean -as -$$ -<> declare - n_uncommitted int; -begin - if committed is not null then return true; end if; - - select count(*) into locals.n_uncommitted - from - transaction_revision tr - where - tr.transaction_id = check_transaction_revisions_change_per_user.transaction_id - and tr.user_id = check_transaction_revisions_change_per_user.user_id - and tr.committed is null; - - if locals.n_uncommitted > 1 then raise 'users can only have one pending change per transaction'; end if; - - return true; -end -$$ language plpgsql; - -create or replace function transaction_revision_updated() returns trigger as -$$ -<> declare - group_id grp.id%TYPE; - transaction_id integer; -begin - select - t.group_id, - t.id - into locals.group_id, locals.transaction_id - from - transaction t - where t.id = (case when NEW is null then OLD.transaction_id else NEW.transaction_id end); - - -- A deletion should only be able to occur for uncommitted revisions - if NEW is null then - call notify_user('transaction', OLD.user_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id, 'revision_started', OLD.started, 'revision_version', OLD.version, 'revision_committed', OLD.committed, 'deleted', true)); - elseif NEW.committed is null then - call notify_user('transaction', NEW.user_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id, 'revision_started', NEW.started, 'revision_version', NEW.version, 'revision_committed', NEW.committed, 'deleted', false)); - else - call notify_group('transaction', locals.group_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'transaction_id', locals.transaction_id, 'revision_started', NEW.started, 'revision_version', NEW.version, 'revision_committed', NEW.committed, 'deleted', false)); - end if; - - return null; -end; -$$ language plpgsql; - -drop trigger if exists transaction_revision_trig on transaction_revision; -create trigger transaction_revision_trig - after insert or update or delete - on transaction_revision - for each row -execute function transaction_revision_updated(); - -drop function full_transaction_state_valid_at; -drop function committed_file_state_valid_at; -drop function committed_transaction_position_state_valid_at; -drop function committed_transaction_state_valid_at; -drop view aggregated_pending_file_history; -drop view aggregated_committed_file_history; -drop view aggregated_committed_transaction_history; -drop view aggregated_pending_transaction_history; -drop view aggregated_pending_transaction_position_history; -drop view aggregated_committed_transaction_position_history; - -create or replace view aggregated_committed_file_history as ( -select - sub.revision_id, - sub.transaction_id, - sub.file_id, - sub.user_id, - sub.started as revision_started, - sub.committed as revision_committed, - sub.version as revision_version, - first_value(sub.filename) over outer_window as filename, - first_value(sub.mime_type) over outer_window as mime_type, - first_value(sub.blob_id) over outer_window as blob_id, - first_value(sub.deleted) over outer_window as deleted -from ( - select - tr.id as revision_id, - tr.transaction_id, - tr.user_id, - tr.started, - tr.committed, - tr.version, - f.id as file_id, - count(f.id) over wnd as id_partition, - fh.filename, - blob.mime_type, - fh.blob_id, - fh.deleted - from - transaction_revision tr - join file f on tr.transaction_id = f.transaction_id - left join file_history fh on fh.id = f.id and tr.id = fh.revision_id - left join blob on blob.id = fh.blob_id - where - tr.committed is not null window wnd as (partition by f.id order by committed asc) -) as sub window outer_window as (partition by sub.file_id, sub.id_partition order by sub.revision_id) -); - -create or replace view aggregated_pending_file_history as ( -select - tr.id as revision_id, - tr.transaction_id, - tr.user_id as changed_by, - tr.started as revision_started, - tr.version as revision_version, - f.id as file_id, - fh.filename, - blob.mime_type, - fh.blob_id, - fh.deleted -from - transaction_revision tr - join file f on tr.transaction_id = f.transaction_id - join file_history fh on fh.id = f.id and tr.id = fh.revision_id - left join blob on blob.id = fh.blob_id -where - tr.committed is null -); - -create or replace function committed_file_state_valid_at( - valid_at timestamptz = now() -) -returns table ( - file_id int, - revision_id bigint, - transaction_id int, - changed_by int, - revision_started timestamptz, - revision_committed timestamptz, - revision_version int, - filename text, - mime_type text, - blob_id int, - deleted bool -) -as -$$ -select distinct on (file_id) - file_id, - revision_id, - transaction_id, - user_id as changed_by, - revision_started, - revision_committed, - revision_version, - filename, - mime_type, - blob_id, - deleted -from - aggregated_committed_file_history -where - revision_committed <= committed_file_state_valid_at.valid_at - and filename is not null -order by - file_id, revision_committed desc -$$ language sql - security invoker - stable; - -create or replace view aggregated_committed_transaction_position_history as ( - select - sub.revision_id, - sub.transaction_id, - sub.item_id, - sub.user_id, - sub.started as revision_started, - sub.committed as revision_committed, - sub.version as revision_version, - first_value(sub.name) over outer_window as name, - first_value(sub.price) over outer_window as price, - first_value(sub.communist_shares) over outer_window as communist_shares, - first_value(sub.deleted) over outer_window as deleted, - first_value(sub.n_usages) over outer_window as n_usages, - first_value(sub.usages) over outer_window as usages, - first_value(sub.involved_accounts) over outer_window as involved_accounts - from ( - select - tr.id as revision_id, - tr.transaction_id, - tr.user_id, - tr.started, - tr.committed, - tr.version, - pi.id as item_id, - count(pi.id) over wnd as id_partition, - pih.name, - pih.price, - pih.communist_shares, - pih.deleted, - coalesce(piu.n_usages, 0) as n_usages, - coalesce(piu.usages, '[]'::jsonb) as usages, - coalesce(piu.involved_accounts, array[]::int[]) as involved_accounts - from - transaction_revision tr - join purchase_item pi on tr.transaction_id = pi.transaction_id - left join purchase_item_history pih on pih.id = pi.id and tr.id = pih.revision_id - left join purchase_item_usages_as_json piu on pi.id = piu.item_id and tr.id = piu.revision_id - where - tr.committed is not null window wnd as (partition by pi.id order by committed asc) - ) as sub window outer_window as (partition by sub.item_id, sub.id_partition order by sub.revision_id) -); - -create or replace view aggregated_pending_transaction_position_history as ( - select - tr.id as revision_id, - tr.transaction_id, - tr.user_id as changed_by, - tr.started as revision_started, - tr.version as revision_version, - pi.id as item_id, - pih.name, - pih.price, - pih.communist_shares, - pih.deleted, - coalesce(piu.n_usages, 0) as n_usages, - coalesce(piu.usages, '[]'::jsonb) as usages, - coalesce(piu.involved_accounts, array[]::int[]) as involved_accounts - from - transaction_revision tr - join purchase_item pi on tr.transaction_id = pi.transaction_id - join purchase_item_history pih on pih.id = pi.id and tr.id = pih.revision_id - left join purchase_item_usages_as_json piu on pi.id = piu.item_id and tr.id = piu.revision_id - where - tr.committed is null -); - -create or replace function committed_transaction_position_state_valid_at( - valid_at timestamptz = now() -) -returns table ( - item_id int, - revision_id bigint, - transaction_id int, - changed_by int, - revision_started timestamptz, - revision_committed timestamptz, - revision_version int, - name text, - price double precision, - communist_shares double precision, - deleted bool, - n_usages int, - usages jsonb, - involved_accounts int[] -) -as -$$ -select distinct on (acph.item_id) - acph.item_id, - acph.revision_id, - acph.transaction_id, - acph.user_id as changed_by, - acph.revision_started, - acph.revision_committed, - acph.revision_version, - acph.name, - acph.price, - acph.communist_shares, - acph.deleted, - acph.n_usages, - acph.usages, - acph.involved_accounts -from - aggregated_committed_transaction_position_history acph -where - acph.revision_committed <= committed_transaction_position_state_valid_at.valid_at - and acph.name is not null -order by - acph.item_id, acph.revision_committed desc -$$ language sql - security invoker - stable; - -create or replace view aggregated_committed_transaction_history as ( - select - sub.revision_id, - sub.transaction_id, - sub.user_id, - sub.group_id, - sub.started as revision_started, - sub.committed as revision_committed, - sub.version as revision_version, - sub.type, - first_value(sub.value) over outer_window as value, - first_value(sub.description) over outer_window as description, - first_value(sub.currency_symbol) over outer_window as currency_symbol, - first_value(sub.currency_conversion_rate) over outer_window as currency_conversion_rate, - first_value(sub.billed_at) over outer_window as billed_at, - first_value(sub.deleted) over outer_window as deleted, - first_value(sub.n_creditor_shares) over outer_window as n_creditor_shares, - first_value(sub.creditor_shares) over outer_window as creditor_shares, - first_value(sub.n_debitor_shares) over outer_window as n_debitor_shares, - first_value(sub.debitor_shares) over outer_window as debitor_shares, - first_value(sub.involved_accounts) over outer_window as involved_accounts - from ( - select - tr.id as revision_id, - tr.transaction_id, - tr.user_id, - tr.started, - tr.committed, - tr.version, - t.group_id, - t.type, - count(th.id) over wnd as id_partition, - th.value, - th.currency_symbol, - th.currency_conversion_rate, - th.description, - th.billed_at, - th.deleted, - coalesce(csaj.n_shares, 0) as n_creditor_shares, - coalesce(csaj.shares, '[]'::jsonb) as creditor_shares, - coalesce(dsaj.n_shares, 0) as n_debitor_shares, - coalesce(dsaj.shares, '[]'::jsonb) as debitor_shares, - coalesce(csaj.involved_accounts, array[]::int[]) || coalesce(dsaj.involved_accounts, array[]::int[]) as involved_accounts - from - transaction_revision tr - join transaction t on tr.transaction_id = t.id - left join transaction_history th on t.id = th.id and tr.id = th.revision_id - left join creditor_shares_as_json csaj on t.id = csaj.transaction_id and tr.id = csaj.revision_id - left join debitor_shares_as_json dsaj on t.id = dsaj.transaction_id and tr.id = dsaj.revision_id - where - tr.committed is not null window wnd as (partition by tr.transaction_id order by committed asc) - ) as sub window outer_window as (partition by sub.transaction_id, sub.id_partition order by sub.revision_id) -); - -create or replace view aggregated_pending_transaction_history as ( - select - tr.id as revision_id, - tr.transaction_id, - tr.user_id as changed_by, - tr.started as revision_started, - tr.version as revision_version, - t.group_id, - t.type, - th.value, - th.currency_symbol, - th.currency_conversion_rate, - th.description, - th.billed_at, - th.deleted, - coalesce(csaj.n_shares, 0) as n_creditor_shares, - coalesce(csaj.shares, '[]'::jsonb) as creditor_shares, - coalesce(dsaj.n_shares, 0) as n_debitor_shares, - coalesce(dsaj.shares, '[]'::jsonb) as debitor_shares, - coalesce(csaj.involved_accounts, array[]::int[]) || coalesce(dsaj.involved_accounts, array[]::int[]) as involved_accounts - from - transaction_revision tr - join transaction t on tr.transaction_id = t.id - join transaction_history th on t.id = th.id and tr.id = th.revision_id - left join creditor_shares_as_json csaj on t.id = csaj.transaction_id and tr.id = csaj.revision_id - left join debitor_shares_as_json dsaj on t.id = dsaj.transaction_id and tr.id = dsaj.revision_id - where - tr.committed is null -); - -create or replace function committed_transaction_state_valid_at( - valid_at timestamptz = now() -) -returns table ( - revision_id bigint, - transaction_id int, - changed_by int, - revision_started timestamptz, - revision_committed timestamptz, - revision_version int, - group_id int, - type text, - value double precision, - currency_symbol text, - currency_conversion_rate double precision, - description text, - billed_at date, - deleted bool, - n_creditor_shares int, - creditor_shares jsonb, - n_debitor_shares int, - debitor_shares jsonb, - involved_accounts int[] -) -as -$$ -select distinct on (acth.transaction_id) - acth.revision_id, - acth.transaction_id, - acth.user_id as changed_by, - acth.revision_started, - acth.revision_committed, - acth.revision_version, - acth.group_id, - acth.type, - acth.value, - acth.currency_symbol, - acth.currency_conversion_rate, - acth.description, - acth.billed_at, - acth.deleted, - acth.n_creditor_shares, - acth.creditor_shares, - acth.n_debitor_shares, - acth.debitor_shares, - acth.involved_accounts -from - aggregated_committed_transaction_history acth -where - acth.revision_committed <= committed_transaction_state_valid_at.valid_at - and acth.description is not null -order by - acth.transaction_id, acth.revision_committed desc -$$ language sql - security invoker - stable; - -create or replace function full_transaction_state_valid_at( - seen_by_user int, - valid_at timestamptz = now() -) -returns table ( - transaction_id int, - type text, - group_id int, - last_changed timestamptz, - version int, - is_wip bool, - committed_details jsonb, - pending_details jsonb, - committed_positions jsonb, - pending_positions jsonb, - committed_files jsonb, - pending_files jsonb -) -as -$$ -select - t.id as transaction_id, - t.type, - t.group_id, - greatest(committed_details.last_changed, committed_positions.last_changed, committed_files.last_changed) as last_changed, - greatest( - committed_details.revision_version, - committed_positions.revision_version, - committed_files.revision_version, - pending_details.revision_version, - pending_positions.revision_version, - pending_files.revision_version - ) as version, - exists( - select 1 from transaction_revision tr - where tr.transaction_id = t.id and tr.user_id = full_transaction_state_valid_at.seen_by_user and tr.committed is null - ) as is_wip, - committed_details.json_state as committed_details, - pending_details.json_state as pending_details, - committed_positions.json_state as committed_positions, - pending_positions.json_state as pending_positions, - committed_files.json_state as committed_files, - pending_files.json_state as pending_files -from - transaction t -left join ( - select - ctsa.transaction_id, - jsonb_agg(ctsa) as json_state, - max(ctsa.revision_committed) as last_changed, - max(ctsa.revision_version) as revision_version - from committed_transaction_state_valid_at(full_transaction_state_valid_at.valid_at) ctsa - group by ctsa.transaction_id -) committed_details on t.id = committed_details.transaction_id -left join ( - select - apth.transaction_id, - jsonb_agg(apth) as json_state, - max(apth.revision_version) as revision_version - from aggregated_pending_transaction_history apth - where apth.changed_by = full_transaction_state_valid_at.seen_by_user - group by apth.transaction_id -) pending_details on t.id = pending_details.transaction_id -left join ( - select - ctpsa.transaction_id, - jsonb_agg(ctpsa) as json_state, - max(ctpsa.revision_committed) as last_changed, - max(ctpsa.revision_version) as revision_version - from committed_transaction_position_state_valid_at(full_transaction_state_valid_at.valid_at) ctpsa - group by ctpsa.transaction_id -) committed_positions on t.id = committed_positions.transaction_id -left join ( - select - aptph.transaction_id, - jsonb_agg(aptph) as json_state, - max(aptph.revision_version) as revision_version - from aggregated_pending_transaction_position_history aptph - where aptph.changed_by = full_transaction_state_valid_at.seen_by_user - group by aptph.transaction_id -) pending_positions on t.id = pending_positions.transaction_id -left join ( - select - cfsva.transaction_id, - jsonb_agg(cfsva) as json_state, - max(cfsva.revision_committed) as last_changed, - max(cfsva.revision_version) as revision_version - from committed_file_state_valid_at(full_transaction_state_valid_at.valid_at) cfsva - group by cfsva.transaction_id -) committed_files on t.id = committed_files.transaction_id -left join ( - select - apfh.transaction_id, - jsonb_agg(apfh) as json_state, - max(apfh.revision_version) as revision_version - from aggregated_pending_file_history apfh - where apfh.changed_by = full_transaction_state_valid_at.seen_by_user - group by apfh.transaction_id -) pending_files on t.id = pending_files.transaction_id -where committed_details.json_state is not null or pending_details.json_state is not null -$$ language sql - security invoker - stable; - -alter table account_revision add column version int default 0; -drop trigger if exists account_history_update_trig on account_history; -drop function if exists account_history_updated(); - -create or replace function account_revision_updated() returns trigger as -$$ -<> declare - group_id grp.id%TYPE; - account_id integer; -begin - select - a.group_id, - a.id - into locals.group_id, locals.account_id - from - account a - where a.id = (case when NEW is null then OLD.account_id else NEW.account_id end); - - -- A deletion should only be able to occur for uncommitted revisions - if NEW is null then - call notify_user('account', OLD.user_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'account_id', locals.account_id, 'revision_started', OLD.started, 'revision_version', OLD.version, 'revision_committed', OLD.committed, 'deleted', true)); - elseif NEW.committed is null then - call notify_user('account', NEW.user_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'account_id', locals.account_id, 'revision_started', NEW.started, 'revision_version', NEW.version, 'revision_committed', NEW.committed, 'deleted', false)); - else - call notify_group('account', locals.group_id, locals.group_id::bigint, - json_build_object('element_id', locals.group_id, 'account_id', locals.account_id, 'revision_started', NEW.started, 'revision_version', NEW.version, 'revision_committed', NEW.committed, 'deleted', false)); - end if; - - return null; -end; -$$ language plpgsql; - -drop trigger if exists account_revision_trig on account_revision; -create trigger account_revision_trig - after insert or update or delete - on account_revision - for each row -execute function account_revision_updated(); - -drop function if exists committed_account_state_valid_at; -drop view if exists aggregated_committed_account_history; - -create or replace view aggregated_committed_account_history as ( - select - sub.revision_id, - sub.account_id, - sub.user_id, - sub.group_id, - sub.type, - sub.started as revision_started, - sub.committed as revision_committed, - sub.version as revision_version, - first_value(sub.description) over outer_window as description, - first_value(sub.name) over outer_window as name, - first_value(sub.priority) over outer_window as priority, - first_value(sub.deleted) over outer_window as deleted - from ( - select - ar.id as revision_id, - ar.account_id, - ar.user_id, - ar.started, - ar.committed, - ar.version, - a.group_id, - a.type, - count(a.id) over wnd as id_partition, - ah.name, - ah.description, - ah.priority, - ah.deleted - from - account_revision ar - join account a on a.id = ar.account_id - left join account_history ah on ah.id = a.id and ar.id = ah.revision_id - where - ar.committed is not null window wnd as (partition by a.id order by committed asc) - ) as sub window outer_window as (partition by sub.account_id, sub.id_partition order by sub.revision_id) -); - -create or replace function committed_account_state_valid_at( - valid_at timestamptz = now() -) -returns table ( - account_id int, - revision_id bigint, - type text, - user_id int, - group_id int, - revision_started timestamptz, - revision_committed timestamptz, - revision_version int, - name text, - description text, - priority int, - deleted bool -) -as -$$ -select distinct on (acah.account_id) - acah.account_id, - acah.revision_id, - acah.type, - acah.user_id, - acah.group_id, - acah.revision_started, - acah.revision_committed, - acah.revision_version, - acah.name, - acah.description, - acah.priority, - acah.deleted -from - aggregated_committed_account_history acah -where - acah.revision_committed <= committed_account_state_valid_at.valid_at -order by - acah.account_id, acah.revision_committed desc -$$ language sql - security invoker - stable; - +alter table account_revision add column version int default 0; \ No newline at end of file diff --git a/abrechnung/database/revisions/0010_clearing_accounts.sql b/abrechnung/database/revisions/0010_clearing_accounts.sql index e758d9cf..03a5a76c 100644 --- a/abrechnung/database/revisions/0010_clearing_accounts.sql +++ b/abrechnung/database/revisions/0010_clearing_accounts.sql @@ -11,327 +11,3 @@ create table if not exists clearing_account_share ( shares double precision not null ); - --- fix the committed change check -create or replace function check_account_revisions_change_per_user(account_id integer, user_id integer, committed timestamp with time zone) -returns boolean -as -$$ -<> declare - n_uncommitted int; -begin - if committed is not null then return true; end if; - - select count(*) into locals.n_uncommitted - from - account_revision ar - where - ar.account_id = check_account_revisions_change_per_user.account_id - and ar.user_id = check_account_revisions_change_per_user.user_id - and ar.committed is null; - - if locals.n_uncommitted > 1 then raise 'users can only have one pending change per account'; end if; - - return true; -end -$$ language plpgsql; - -create or replace function check_committed_accounts( - revision_id bigint, - account_id integer, - started timestamptz, - committed timestamptz -) returns boolean as -$$ -<> declare - n_clearing_shares int; - group_id int; - account_type text; - account_deleted boolean; -begin - if committed is null then return true; end if; - - perform - from - account_revision ar - where - ar.account_id = check_committed_accounts.account_id - and ar.id != check_committed_accounts.revision_id - and ar.committed between check_committed_accounts.started and check_committed_accounts.committed; - - if found then raise 'another change was committed earlier, committing is not possible due to conflicts'; end if; - - select - a.type, - ah.deleted, - a.group_id - into locals.account_type, locals.account_deleted, locals.group_id - from - account a - left join account_history ah on a.id = ah.id and ah.revision_id = check_committed_accounts.revision_id - where a.id = check_committed_accounts.account_id; - - select - count(cas.share_account_id) - into locals.n_clearing_shares - from - clearing_account_share cas - where - cas.account_id = check_committed_accounts.account_id - and cas.revision_id = check_committed_accounts.revision_id; - - if locals.account_type = 'personal' then - if locals.n_clearing_shares != 0 then - raise '"personal" type accounts cannot have associated settlement distribution shares'; - end if; - end if; - - return true; -end -$$ language plpgsql; - -create or replace function check_clearing_accounts_for_cyclic_dependencies( - revision_id bigint, - account_id integer, - committed timestamptz -) returns boolean as -$$ -<> declare - group_id int; - account_type text; - - n_clearing_shares int; - - cycle_path int[]; -begin - if committed is null then return true; end if; - - select - a.type, - a.group_id - into locals.account_type, locals.group_id - from - account a - where a.id = check_clearing_accounts_for_cyclic_dependencies.account_id; - - select - count(cas.share_account_id) - into locals.n_clearing_shares - from - clearing_account_share cas - where - cas.account_id = check_clearing_accounts_for_cyclic_dependencies.account_id - and cas.revision_id = check_clearing_accounts_for_cyclic_dependencies.revision_id; - - -- now for the juicy part - check if we have circular dependencies in clearing account relations - with recursive search_graph(account_id, share_account_id, depth, path, cycle) as ( - select shares.account_id, shares.share_account_id, 1, array[shares.account_id], false - from clearing_account_share shares - where shares.revision_id = check_clearing_accounts_for_cyclic_dependencies.revision_id - union all - select shares.account_id, shares.share_account_id, sg.depth + 1, sg.path || shares.account_id, shares.account_id = any(sg.path) - from clearing_account_share shares - join account a on shares.account_id = a.id - join search_graph sg on sg.share_account_id = shares.account_id and not sg.cycle - where a.group_id = locals.group_id -- slight optimization for runtime - ) - select path into locals.cycle_path from search_graph where cycle limit 1; - -- TODO: good error message and print out all resulting cycles - if found then - raise 'this change would result in a cyclic dependency between clearing accounts: %', locals.cycle_path; - end if; - - return true; -end -$$ language plpgsql; - -alter table account_revision add constraint account_revision_check_cyclic - check (check_clearing_accounts_for_cyclic_dependencies(id, account_id, committed)); - -create or replace view clearing_account_shares_as_json as -select - revision_id, - account_id, - sum(shares) as n_shares, - array_agg(share_account_id) as involved_accounts, - jsonb_agg(cas.*) as shares -from clearing_account_share cas -group by revision_id, account_id; - -drop function committed_account_state_valid_at; - -create or replace view aggregated_committed_account_history as ( - select - sub.revision_id, - sub.account_id, - sub.user_id, - sub.group_id, - sub.type, - sub.started as revision_started, - sub.committed as revision_committed, - sub.version as revision_version, - first_value(sub.description) over outer_window as description, - first_value(sub.name) over outer_window as name, - first_value(sub.priority) over outer_window as priority, - first_value(sub.deleted) over outer_window as deleted, - first_value(sub.n_clearing_shares) over outer_window as n_clearing_shares, - first_value(sub.clearing_shares) over outer_window as clearing_shares, - first_value(sub.involved_accounts) over outer_window as involved_accounts - from ( - select - ar.id as revision_id, - ar.account_id, - ar.user_id, - ar.started, - ar.committed, - ar.version, - a.group_id, - a.type, - count(a.id) over wnd as id_partition, - ah.name, - ah.description, - ah.priority, - ah.deleted, - coalesce(cas.n_shares, 0) as n_clearing_shares, - coalesce(cas.shares, '[]'::jsonb) as clearing_shares, - coalesce(cas.involved_accounts, array[]::int[]) as involved_accounts - from - account_revision ar - join account a on a.id = ar.account_id - left join account_history ah on ah.id = a.id and ar.id = ah.revision_id - left join clearing_account_shares_as_json cas on a.id = cas.account_id and ar.id = cas.revision_id - where - ar.committed is not null window wnd as (partition by a.id order by committed asc) - ) as sub window outer_window as (partition by sub.account_id, sub.id_partition order by sub.revision_id) -); - -create or replace function committed_account_state_valid_at( - valid_at timestamptz = now() -) -returns table ( - account_id int, - revision_id bigint, - type text, - changed_by int, - group_id int, - revision_started timestamptz, - revision_committed timestamptz, - revision_version int, - name text, - description text, - priority int, - deleted bool, - n_clearing_shares int, - clearing_shares jsonb, - involved_accounts int[] -) -as -$$ -select distinct on (acah.account_id) - acah.account_id, - acah.revision_id, - acah.type, - acah.user_id, - acah.group_id, - acah.revision_started, - acah.revision_committed, - acah.revision_version, - acah.name, - acah.description, - acah.priority, - acah.deleted, - acah.n_clearing_shares, - acah.clearing_shares, - acah.involved_accounts -from - aggregated_committed_account_history acah -where - acah.revision_committed <= committed_account_state_valid_at.valid_at -order by - acah.account_id, acah.revision_committed desc -$$ language sql - security invoker - stable; - - -create or replace view aggregated_pending_account_history as ( - select - ar.account_id, - ar.id as revision_id, - ar.user_id as changed_by, - ar.started as revision_started, - ar.version as revision_version, - a.group_id, - a.type, - ah.name, - ah.description, - ah.priority, - ah.deleted, - coalesce(cas.n_shares, 0) as n_clearing_shares, - coalesce(cas.shares, '[]'::jsonb) as clearing_shares, - coalesce(cas.involved_accounts, array[]::int[]) || coalesce(cas.involved_accounts, array[]::int[]) as involved_accounts - from - account_revision ar - join account a on ar.account_id = a.id - join account_history ah on a.id = ah.id and ar.id = ah.revision_id - left join clearing_account_shares_as_json cas on a.id = cas.account_id and ar.id = cas.revision_id - where - ar.committed is null -); - - -create or replace function full_account_state_valid_at( - seen_by_user int, - valid_at timestamptz = now() -) -returns table ( - account_id int, - type text, - group_id int, - last_changed timestamptz, - version int, - is_wip bool, - committed_details jsonb, - pending_details jsonb -) -as -$$ -select - a.id as account_id, - a.type, - a.group_id, - committed_details.last_changed as last_changed, - greatest( - committed_details.revision_version, - pending_details.revision_version - ) as version, - exists( - select 1 from account_revision ar - where ar.account_id = a.id and ar.user_id = full_account_state_valid_at.seen_by_user and ar.committed is null - ) as is_wip, - committed_details.json_state as committed_details, - pending_details.json_state as pending_details -from - account a -left join ( - select - casa.account_id, - jsonb_agg(casa) as json_state, - max(casa.revision_committed) as last_changed, - max(casa.revision_version) as revision_version - from committed_account_state_valid_at(full_account_state_valid_at.valid_at) casa - group by casa.account_id -) committed_details on a.id = committed_details.account_id -left join ( - select - apah.account_id, - jsonb_agg(apah) as json_state, - max(apah.revision_version) as revision_version - from aggregated_pending_account_history apah - where apah.changed_by = full_account_state_valid_at.seen_by_user - group by apah.account_id -) pending_details on a.id = pending_details.account_id -where committed_details.json_state is not null or pending_details.json_state is not null -$$ language sql - security invoker - stable; diff --git a/abrechnung/database/revisions/0012_correct_change_dates.sql b/abrechnung/database/revisions/0012_correct_change_dates.sql index 68450b4d..a39b8e1b 100644 --- a/abrechnung/database/revisions/0012_correct_change_dates.sql +++ b/abrechnung/database/revisions/0012_correct_change_dates.sql @@ -1,116 +1,2 @@ -- revision: 5b333d87 -- requires: f6c9ff0b - -create or replace function full_transaction_state_valid_at( - seen_by_user int, - valid_at timestamptz = now() -) - returns table ( - transaction_id int, - type text, - group_id int, - last_changed timestamptz, - version int, - is_wip bool, - committed_details jsonb, - pending_details jsonb, - committed_positions jsonb, - pending_positions jsonb, - committed_files jsonb, - pending_files jsonb - ) -as -$$ -select - t.id as transaction_id, - t.type, - t.group_id, - greatest( - committed_details.last_changed, - committed_positions.last_changed, - committed_files.last_changed, - pending_details.last_changed, - pending_positions.last_changed, - pending_files.last_changed - ) as last_changed, - greatest( - committed_details.revision_version, - committed_positions.revision_version, - committed_files.revision_version, - pending_details.revision_version, - pending_positions.revision_version, - pending_files.revision_version - ) as version, - exists( - select 1 from transaction_revision tr - where tr.transaction_id = t.id and tr.user_id = full_transaction_state_valid_at.seen_by_user and tr.committed is null - ) as is_wip, - committed_details.json_state as committed_details, - pending_details.json_state as pending_details, - committed_positions.json_state as committed_positions, - pending_positions.json_state as pending_positions, - committed_files.json_state as committed_files, - pending_files.json_state as pending_files -from - transaction t - left join ( - select - ctsa.transaction_id, - jsonb_agg(ctsa) as json_state, - max(ctsa.revision_committed) as last_changed, - max(ctsa.revision_version) as revision_version - from committed_transaction_state_valid_at(full_transaction_state_valid_at.valid_at) ctsa - group by ctsa.transaction_id - ) committed_details on t.id = committed_details.transaction_id - left join ( - select - apth.transaction_id, - jsonb_agg(apth) as json_state, - max(apth.revision_started) as last_changed, - max(apth.revision_version) as revision_version - from aggregated_pending_transaction_history apth - where apth.changed_by = full_transaction_state_valid_at.seen_by_user - group by apth.transaction_id - ) pending_details on t.id = pending_details.transaction_id - left join ( - select - ctpsa.transaction_id, - jsonb_agg(ctpsa) as json_state, - max(ctpsa.revision_committed) as last_changed, - max(ctpsa.revision_version) as revision_version - from committed_transaction_position_state_valid_at(full_transaction_state_valid_at.valid_at) ctpsa - group by ctpsa.transaction_id - ) committed_positions on t.id = committed_positions.transaction_id - left join ( - select - aptph.transaction_id, - jsonb_agg(aptph) as json_state, - max(aptph.revision_started) as last_changed, - max(aptph.revision_version) as revision_version - from aggregated_pending_transaction_position_history aptph - where aptph.changed_by = full_transaction_state_valid_at.seen_by_user - group by aptph.transaction_id - ) pending_positions on t.id = pending_positions.transaction_id - left join ( - select - cfsva.transaction_id, - jsonb_agg(cfsva) as json_state, - max(cfsva.revision_committed) as last_changed, - max(cfsva.revision_version) as revision_version - from committed_file_state_valid_at(full_transaction_state_valid_at.valid_at) cfsva - group by cfsva.transaction_id - ) committed_files on t.id = committed_files.transaction_id - left join ( - select - apfh.transaction_id, - jsonb_agg(apfh) as json_state, - max(apfh.revision_started) as last_changed, - max(apfh.revision_version) as revision_version - from aggregated_pending_file_history apfh - where apfh.changed_by = full_transaction_state_valid_at.seen_by_user - group by apfh.transaction_id - ) pending_files on t.id = pending_files.transaction_id -where committed_details.json_state is not null or pending_details.json_state is not null -$$ language sql - security invoker - stable; diff --git a/abrechnung/database/revisions/0014_associate_accounts_with_users.sql b/abrechnung/database/revisions/0014_associate_accounts_with_users.sql index a7d3719f..8f0070c2 100644 --- a/abrechnung/database/revisions/0014_associate_accounts_with_users.sql +++ b/abrechnung/database/revisions/0014_associate_accounts_with_users.sql @@ -4,130 +4,3 @@ alter table grp add column add_user_account_on_join boolean default false not null; alter table account_history add column owning_user_id integer references usr(id); - -drop view aggregated_committed_account_history; -create or replace view aggregated_committed_account_history as ( - select - sub.revision_id, - sub.account_id, - sub.user_id, - sub.group_id, - sub.type, - sub.started as revision_started, - sub.committed as revision_committed, - sub.version as revision_version, - first_value(sub.description) over outer_window as description, - first_value(sub.name) over outer_window as name, - first_value(sub.priority) over outer_window as priority, - first_value(sub.owning_user_id) over outer_window as owning_user_id, - first_value(sub.deleted) over outer_window as deleted, - first_value(sub.n_clearing_shares) over outer_window as n_clearing_shares, - first_value(sub.clearing_shares) over outer_window as clearing_shares, - first_value(sub.involved_accounts) over outer_window as involved_accounts - from ( - select - ar.id as revision_id, - ar.account_id, - ar.user_id, - ar.started, - ar.committed, - ar.version, - a.group_id, - a.type, - count(a.id) over wnd as id_partition, - ah.name, - ah.description, - ah.priority, - ah.owning_user_id, - ah.deleted, - coalesce(cas.n_shares, 0) as n_clearing_shares, - coalesce(cas.shares, '[]'::jsonb) as clearing_shares, - coalesce(cas.involved_accounts, array[]::int[]) as involved_accounts - from - account_revision ar - join account a on a.id = ar.account_id - left join account_history ah on ah.id = a.id and ar.id = ah.revision_id - left join clearing_account_shares_as_json cas on a.id = cas.account_id and ar.id = cas.revision_id - where - ar.committed is not null window wnd as (partition by a.id order by committed asc) - ) as sub window outer_window as (partition by sub.account_id, sub.id_partition order by sub.revision_id) -); - -drop function committed_account_state_valid_at; -create or replace function committed_account_state_valid_at( - valid_at timestamptz = now() -) -returns table ( - account_id int, - revision_id bigint, - type text, - changed_by int, - group_id int, - revision_started timestamptz, - revision_committed timestamptz, - revision_version int, - name text, - description text, - priority int, - owning_user_id int, - deleted bool, - n_clearing_shares int, - clearing_shares jsonb, - involved_accounts int[] -) -as -$$ -select distinct on (acah.account_id) - acah.account_id, - acah.revision_id, - acah.type, - acah.user_id, - acah.group_id, - acah.revision_started, - acah.revision_committed, - acah.revision_version, - acah.name, - acah.description, - acah.priority, - acah.owning_user_id, - acah.deleted, - acah.n_clearing_shares, - acah.clearing_shares, - acah.involved_accounts -from - aggregated_committed_account_history acah -where - acah.revision_committed <= committed_account_state_valid_at.valid_at -order by - acah.account_id, acah.revision_committed desc -$$ language sql - security invoker - stable; - - -drop view aggregated_pending_account_history; -create or replace view aggregated_pending_account_history as ( - select - ar.account_id, - ar.id as revision_id, - ar.user_id as changed_by, - ar.started as revision_started, - ar.version as revision_version, - a.group_id, - a.type, - ah.name, - ah.description, - ah.priority, - ah.owning_user_id, - ah.deleted, - coalesce(cas.n_shares, 0) as n_clearing_shares, - coalesce(cas.shares, '[]'::jsonb) as clearing_shares, - coalesce(cas.involved_accounts, array[]::int[]) || coalesce(cas.involved_accounts, array[]::int[]) as involved_accounts - from - account_revision ar - join account a on ar.account_id = a.id - join account_history ah on a.id = ah.id and ar.id = ah.revision_id - left join clearing_account_shares_as_json cas on a.id = cas.account_id and ar.id = cas.revision_id - where - ar.committed is null -); diff --git a/abrechnung/database/revisions/0015_metadata_fields_and_revision_changed.sql b/abrechnung/database/revisions/0015_metadata_fields_and_revision_changed.sql index 0b8b6a00..3671eb6d 100644 --- a/abrechnung/database/revisions/0015_metadata_fields_and_revision_changed.sql +++ b/abrechnung/database/revisions/0015_metadata_fields_and_revision_changed.sql @@ -1,46 +1,6 @@ -- revision: ee5d2b35 -- requires: 174ef0fc -drop view aggregated_committed_account_history; -drop function committed_account_state_valid_at; -drop view aggregated_pending_account_history; -drop function full_account_state_valid_at; - -drop view aggregated_committed_file_history; -drop view aggregated_committed_transaction_position_history; -drop view aggregated_committed_transaction_history; -drop view aggregated_pending_file_history; -drop view aggregated_pending_transaction_position_history; -drop view aggregated_pending_transaction_history; -drop function committed_transaction_state_valid_at; -drop function committed_file_state_valid_at; -drop function committed_transaction_position_state_valid_at; -drop function full_transaction_state_valid_at; - -create or replace function update_last_changed() returns trigger as -$$ -begin - NEW.last_changed = now(); - return NEW; -end; -$$ language plpgsql; - -create or replace function update_related_transaction_last_changed() returns trigger as -$$ -begin - update transaction_revision set last_changed = now() where id = NEW.revision_id; - return null; -end; -$$ language plpgsql; - -create or replace function update_related_account_last_changed() returns trigger as -$$ -begin - update account_revision set last_changed = now() where id = NEW.revision_id; - return null; -end; -$$ language plpgsql; - alter table account_revision add column last_changed timestamptz; update account_revision @@ -51,13 +11,6 @@ alter table account_revision alter table account_revision alter column last_changed set default now(); -drop trigger if exists account_revision_last_change_update_trig on account_revision; -create trigger account_revision_last_change_update_trig - after insert or update - on account_revision - for each row -execute function update_last_changed(); - alter table transaction_revision add column last_changed timestamptz; update transaction_revision @@ -68,34 +21,6 @@ alter table transaction_revision alter table transaction_revision alter column last_changed set default now(); -drop trigger if exists transaction_revision_last_change_update_trig on transaction_revision; -create trigger transaction_revision_last_change_update_trig - after insert or update - on transaction_revision - for each row -execute function update_last_changed(); - -drop trigger if exists transaction_history_last_changed_update_trig on transaction_history; -create trigger transaction_history_last_changed_update_trig - after insert or update - on transaction_history - for each row -execute function update_related_transaction_last_changed(); - -drop trigger if exists purchase_item_last_changed_update_trig on purchase_item_history; -create trigger purchase_item_last_changed_update_trig - after insert or update - on purchase_item_history - for each row -execute function update_related_transaction_last_changed(); - -drop trigger if exists account_last_changed_update_trig on account_history; -create trigger account_last_changed_update_trig - after insert or update - on account_history - for each row -execute function update_related_account_last_changed(); - create table tag ( id serial primary key, group_id integer not null references grp (id), @@ -110,17 +35,6 @@ create table transaction_to_tag ( primary key (transaction_id, revision_id, tag_id) ); -create view transaction_tags (transaction_id, revision_id, tag_names) as - SELECT - ttt.transaction_id, - ttt.revision_id, - array_agg(tag.name) AS tag_names - FROM - transaction_to_tag ttt - join tag on ttt.tag_id = tag.id - GROUP BY - ttt.transaction_id, ttt.revision_id; - alter table transaction_history rename column description to name; alter table transaction_history @@ -140,752 +54,3 @@ create table account_to_tag ( primary key (account_id, revision_id, tag_id) ); -create or replace view account_tags (account_id, revision_id, tag_names) as - SELECT - att.account_id, - att.revision_id, - array_agg(tag.name) AS tag_names - FROM - account_to_tag att - join tag on att.tag_id = tag.id - GROUP BY - att.account_id, att.revision_id; - -create or replace function check_committed_accounts( - revision_id bigint, account_id integer, started timestamp with time zone, committed timestamp with time zone -) returns boolean - language plpgsql as -$$ -<> declare - n_tags int; - n_clearing_shares int; - group_id int; - account_type text; - date_info date; -begin - if committed is null then return true; end if; - - perform - from - account_revision ar - where - ar.account_id = check_committed_accounts.account_id - and ar.id != check_committed_accounts.revision_id - and ar.committed between check_committed_accounts.started and check_committed_accounts.committed; - - if found then raise 'another change was committed earlier, committing is not possible due to conflicts'; end if; - - select - a.type, - a.group_id - into locals.account_type, locals.group_id - from - account a - where - a.id = check_committed_accounts.account_id; - - select - count(cas.share_account_id) - into locals.n_clearing_shares - from - clearing_account_share cas - where - cas.account_id = check_committed_accounts.account_id - and cas.revision_id = check_committed_accounts.revision_id; - - select - count(*) - into locals.n_tags - from - account_to_tag att - where - att.account_id = check_committed_accounts.account_id - and att.revision_id = check_committed_accounts.revision_id; - - select - ah.date_info - into locals.date_info - from - account_history ah - where - ah.id = check_committed_accounts.account_id - and ah.revision_id = check_committed_accounts.revision_id; - - if locals.account_type = 'personal' then - if locals.n_clearing_shares != 0 then - raise '"personal" type accounts cannot have associated settlement distribution shares'; - end if; - if locals.date_info is not null then - raise '"personal" type accounts cannot have a date set'; - end if; - if locals.n_tags != 0 then - raise '"personal" type accounts cannot have tags'; - end if; - elsif locals.account_type = 'clearing' then - if locals.date_info is null then - raise '"clearing" type accounts must have a date set'; - end if; - end if; - - return true; -end -$$; - -create or replace view aggregated_committed_account_history as - ( - select - sub.revision_id, - sub.account_id, - sub.user_id, - sub.group_id, - sub.type, - sub.started as revision_started, - sub.committed as revision_committed, - sub.last_changed as last_changed, - first_value(sub.description) over outer_window as description, - first_value(sub.name) over outer_window as name, - first_value(sub.owning_user_id) over outer_window as owning_user_id, - first_value(sub.date_info) over outer_window as date_info, - first_value(sub.deleted) over outer_window as deleted, - first_value(sub.n_clearing_shares) over outer_window as n_clearing_shares, - first_value(sub.clearing_shares) over outer_window as clearing_shares, - first_value(sub.involved_accounts) over outer_window as involved_accounts, - first_value(sub.tags) over outer_window as tags - from - ( - select - ar.id as revision_id, - ar.account_id, - ar.user_id, - ar.started, - ar.committed, - ar.last_changed, - a.group_id, - a.type, - count(a.id) over wnd as id_partition, - ah.name, - ah.description, - ah.owning_user_id, - ah.date_info, - ah.deleted, - coalesce(cas.n_shares, 0) as n_clearing_shares, - coalesce(cas.shares, '[]'::jsonb) as clearing_shares, - coalesce(cas.involved_accounts, array []::int[]) as involved_accounts, - coalesce(t.tag_names, array []::varchar(255)[]) as tags - from - account_revision ar - join account a on a.id = ar.account_id - left join account_history ah on ah.id = a.id and ar.id = ah.revision_id - left join clearing_account_shares_as_json cas on a.id = cas.account_id and ar.id = cas.revision_id - left join account_tags t on a.id = t.account_id and ar.id = t.revision_id - where - ar.committed is not null window wnd as (partition by a.id order by committed asc) - ) as sub window outer_window as (partition by sub.account_id, sub.id_partition order by sub.revision_id) ); - -create or replace function committed_account_state_valid_at( - valid_at timestamptz = now() -) - returns table ( - account_id int, - revision_id bigint, - type text, - changed_by int, - group_id int, - revision_started timestamptz, - revision_committed timestamptz, - last_changed timestamptz, - name text, - description text, - owning_user_id int, - date_info date, - deleted bool, - n_clearing_shares int, - clearing_shares jsonb, - involved_accounts int[], - tags varchar(255)[] - ) -as -$$ -select distinct on (acah.account_id) - acah.account_id, - acah.revision_id, - acah.type, - acah.user_id, - acah.group_id, - acah.revision_started, - acah.revision_committed, - acah.last_changed, - acah.name, - acah.description, - acah.owning_user_id, - acah.date_info, - acah.deleted, - acah.n_clearing_shares, - acah.clearing_shares, - acah.involved_accounts, - acah.tags -from - aggregated_committed_account_history acah -where - acah.revision_committed <= committed_account_state_valid_at.valid_at -order by - acah.account_id, acah.revision_committed desc -$$ language sql - security invoker - stable; - - -create or replace view aggregated_pending_account_history as - ( - select - ar.account_id, - ar.id as revision_id, - ar.user_id as changed_by, - ar.started as revision_started, - ar.last_changed as last_changed, - a.group_id, - a.type, - ah.name, - ah.description, - ah.owning_user_id, - ah.date_info, - ah.deleted, - coalesce(cas.n_shares, 0) as n_clearing_shares, - coalesce(cas.shares, '[]'::jsonb) as clearing_shares, - coalesce(cas.involved_accounts, array []::int[]) as involved_accounts, - coalesce(t.tag_names, array []::varchar(255)[]) as tags - from - account_revision ar - join account a on ar.account_id = a.id - join account_history ah on a.id = ah.id and ar.id = ah.revision_id - left join clearing_account_shares_as_json cas on a.id = cas.account_id and ar.id = cas.revision_id - left join account_tags t on a.id = t.account_id and ar.id = t.revision_id - where - ar.committed is null ); - -create or replace function full_account_state_valid_at( - seen_by_user integer, valid_at timestamp with time zone DEFAULT now() -) - returns TABLE ( - account_id integer, - type text, - group_id integer, - last_changed timestamptz, - is_wip boolean, - committed_details jsonb, - pending_details jsonb - ) - stable - language sql -as -$$ -select - a.id as account_id, - a.type, - a.group_id, - greatest(committed_details.last_changed, pending_details.last_changed) as last_changed, - exists(select - 1 - from - account_revision ar - where - ar.account_id = a.id - and ar.user_id = full_account_state_valid_at.seen_by_user - and ar.committed is null) as is_wip, - committed_details.json_state as committed_details, - pending_details.json_state as pending_details -from - account a - left join ( - select - casa.account_id, - jsonb_agg(casa) as json_state, - max(casa.last_changed) as last_changed - from - committed_account_state_valid_at(full_account_state_valid_at.valid_at) casa - group by casa.account_id - ) committed_details on a.id = committed_details.account_id - left join ( - select - apah.account_id, - jsonb_agg(apah) as json_state, - max(apah.last_changed) as last_changed - from - aggregated_pending_account_history apah - where - apah.changed_by = full_account_state_valid_at.seen_by_user - group by apah.account_id - ) pending_details on a.id = pending_details.account_id -where - committed_details.json_state is not null - or pending_details.json_state is not null -$$; - -create or replace view aggregated_pending_transaction_position_history as - SELECT - tr.id AS revision_id, - tr.transaction_id, - tr.user_id AS changed_by, - tr.started AS revision_started, - tr.last_changed AS last_changed, - pi.id AS item_id, - pih.name, - pih.price, - pih.communist_shares, - pih.deleted, - coalesce(piu.n_usages, 0::double precision) AS n_usages, - coalesce(piu.usages, '[]'::jsonb) AS usages, - coalesce(piu.involved_accounts, array []::integer[]) AS involved_accounts - FROM - transaction_revision tr - JOIN purchase_item pi ON tr.transaction_id = pi.transaction_id - JOIN purchase_item_history pih ON pih.id = pi.id AND tr.id = pih.revision_id - LEFT JOIN purchase_item_usages_as_json piu ON pi.id = piu.item_id AND tr.id = piu.revision_id - WHERE - tr.committed IS NULL; - -create or replace view aggregated_pending_transaction_history as - SELECT - tr.id AS revision_id, - tr.transaction_id, - tr.user_id AS changed_by, - tr.started AS revision_started, - tr.last_changed AS last_changed, - t.group_id, - t.type, - th.value, - th.currency_symbol, - th.currency_conversion_rate, - th.name, - th.description, - th.billed_at, - th.deleted, - coalesce(csaj.n_shares, 0::double precision) AS n_creditor_shares, - coalesce(csaj.shares, '[]'::jsonb) AS creditor_shares, - coalesce(dsaj.n_shares, 0::double precision) AS n_debitor_shares, - coalesce(dsaj.shares, '[]'::jsonb) AS debitor_shares, - coalesce(dsaj.involved_accounts, ARRAY []::integer[]) AS involved_accounts, - coalesce(tt.tag_names, array []::varchar(255)[]) as tags - FROM - transaction_revision tr - JOIN transaction t ON tr.transaction_id = t.id - JOIN transaction_history th ON t.id = th.id AND tr.id = th.revision_id - LEFT JOIN creditor_shares_as_json csaj ON t.id = csaj.transaction_id AND tr.id = csaj.revision_id - LEFT JOIN debitor_shares_as_json dsaj ON t.id = dsaj.transaction_id AND tr.id = dsaj.revision_id - left join transaction_tags tt on tt.transaction_id = t.id and tt.revision_id = tr.id - WHERE - tr.committed IS NULL; - -create or replace view aggregated_pending_file_history as - SELECT - tr.id AS revision_id, - tr.transaction_id, - tr.user_id AS changed_by, - tr.started AS revision_started, - tr.last_changed AS last_changed, - f.id AS file_id, - fh.filename, - blob.mime_type, - fh.blob_id, - fh.deleted - FROM - transaction_revision tr - JOIN file f ON tr.transaction_id = f.transaction_id - JOIN file_history fh ON fh.id = f.id AND tr.id = fh.revision_id - LEFT JOIN blob ON blob.id = fh.blob_id - WHERE - tr.committed IS NULL; - -create or replace view aggregated_committed_transaction_position_history as - SELECT - sub.revision_id, - sub.transaction_id, - sub.item_id, - sub.user_id, - sub.started AS revision_started, - sub.committed AS revision_committed, - sub.last_changed AS last_changed, - first_value(sub.name) OVER outer_window AS name, - first_value(sub.price) OVER outer_window AS price, - first_value(sub.communist_shares) OVER outer_window AS communist_shares, - first_value(sub.deleted) OVER outer_window AS deleted, - first_value(sub.n_usages) OVER outer_window AS n_usages, - first_value(sub.usages) OVER outer_window AS usages, - first_value(sub.involved_accounts) OVER outer_window AS involved_accounts - FROM - ( - SELECT - tr.id AS revision_id, - tr.transaction_id, - tr.user_id, - tr.started, - tr.committed, - tr.last_changed, - pi.id AS item_id, - count(pi.id) OVER wnd AS id_partition, - pih.name, - pih.price, - pih.communist_shares, - pih.deleted, - COALESCE(piu.n_usages, 0::double precision) AS n_usages, - COALESCE(piu.usages, '[]'::jsonb) AS usages, - COALESCE(piu.involved_accounts, ARRAY []::integer[]) AS involved_accounts - FROM - transaction_revision tr - JOIN purchase_item pi ON tr.transaction_id = pi.transaction_id - LEFT JOIN purchase_item_history pih ON pih.id = pi.id AND tr.id = pih.revision_id - LEFT JOIN purchase_item_usages_as_json piu ON pi.id = piu.item_id AND tr.id = piu.revision_id - WHERE - tr.committed IS NOT NULL WINDOW wnd AS (PARTITION BY pi.id ORDER BY tr.committed) - ) sub WINDOW outer_window AS (PARTITION BY sub.item_id, sub.id_partition ORDER BY sub.revision_id); - -create or replace view aggregated_committed_transaction_history as - SELECT - sub.revision_id, - sub.transaction_id, - sub.user_id, - sub.group_id, - sub.started AS revision_started, - sub.committed AS revision_committed, - sub.last_changed AS last_changed, - sub.type, - first_value(sub.value) OVER outer_window AS value, - first_value(sub.name) OVER outer_window AS name, - first_value(sub.description) OVER outer_window AS description, - first_value(sub.currency_symbol) OVER outer_window AS currency_symbol, - first_value(sub.currency_conversion_rate) OVER outer_window AS currency_conversion_rate, - first_value(sub.billed_at) OVER outer_window AS billed_at, - first_value(sub.deleted) OVER outer_window AS deleted, - first_value(sub.n_creditor_shares) OVER outer_window AS n_creditor_shares, - first_value(sub.creditor_shares) OVER outer_window AS creditor_shares, - first_value(sub.n_debitor_shares) OVER outer_window AS n_debitor_shares, - first_value(sub.debitor_shares) OVER outer_window AS debitor_shares, - first_value(sub.involved_accounts) OVER outer_window AS involved_accounts, - first_value(sub.tags) over outer_window as tags - FROM - ( - SELECT - tr.id AS revision_id, - tr.transaction_id, - tr.user_id, - tr.started, - tr.committed, - tr.last_changed, - t.group_id, - t.type, - count(th.id) OVER wnd AS id_partition, - th.value, - th.currency_symbol, - th.currency_conversion_rate, - th.name, - th.description, - th.billed_at, - th.deleted, - COALESCE(csaj.n_shares, 0::double precision) AS n_creditor_shares, - COALESCE(csaj.shares, '[]'::jsonb) AS creditor_shares, - COALESCE(dsaj.n_shares, 0::double precision) AS n_debitor_shares, - COALESCE(dsaj.shares, '[]'::jsonb) AS debitor_shares, - coalesce(csaj.involved_accounts, array[]::int[]) || coalesce(dsaj.involved_accounts, array[]::int[]) as involved_accounts, - coalesce(tt.tag_names, array []::varchar(255)[]) as tags - FROM - transaction_revision tr - JOIN transaction t ON tr.transaction_id = t.id - LEFT JOIN transaction_history th ON t.id = th.id AND tr.id = th.revision_id - LEFT JOIN creditor_shares_as_json csaj ON t.id = csaj.transaction_id AND tr.id = csaj.revision_id - LEFT JOIN debitor_shares_as_json dsaj ON t.id = dsaj.transaction_id AND tr.id = dsaj.revision_id - left join transaction_tags tt on tt.transaction_id = t.id and tt.revision_id = tr.id - WHERE - tr.committed IS NOT NULL WINDOW wnd AS (PARTITION BY tr.transaction_id ORDER BY tr.committed) - ) sub WINDOW outer_window AS (PARTITION BY sub.transaction_id, sub.id_partition ORDER BY sub.revision_id); - -create or replace view aggregated_committed_file_history as - SELECT - sub.revision_id, - sub.transaction_id, - sub.file_id, - sub.user_id, - sub.started AS revision_started, - sub.committed AS revision_committed, - sub.last_changed AS last_changed, - first_value(sub.filename) OVER outer_window AS filename, - first_value(sub.mime_type) OVER outer_window AS mime_type, - first_value(sub.blob_id) OVER outer_window AS blob_id, - first_value(sub.deleted) OVER outer_window AS deleted - FROM - ( - SELECT - tr.id AS revision_id, - tr.transaction_id, - tr.user_id, - tr.started, - tr.committed, - tr.last_changed, - f.id AS file_id, - count(f.id) OVER wnd AS id_partition, - fh.filename, - blob.mime_type, - fh.blob_id, - fh.deleted - FROM - transaction_revision tr - JOIN file f ON tr.transaction_id = f.transaction_id - LEFT JOIN file_history fh ON fh.id = f.id AND tr.id = fh.revision_id - LEFT JOIN blob ON blob.id = fh.blob_id - WHERE - tr.committed IS NOT NULL WINDOW wnd AS (PARTITION BY f.id ORDER BY tr.committed) - ) sub WINDOW outer_window AS (PARTITION BY sub.file_id, sub.id_partition ORDER BY sub.revision_id); - -create or replace function committed_file_state_valid_at( - valid_at timestamp with time zone DEFAULT now() -) - returns TABLE ( - file_id integer, - revision_id bigint, - transaction_id integer, - changed_by integer, - revision_started timestamptz, - revision_committed timestamptz, - last_changed timestamptz, - filename text, - mime_type text, - blob_id integer, - deleted boolean - ) - stable - language sql -as -$$ -select distinct on (file_id) - file_id, - revision_id, - transaction_id, - user_id as changed_by, - revision_started, - revision_committed, - last_changed, - filename, - mime_type, - blob_id, - deleted -from - aggregated_committed_file_history -where - revision_committed <= committed_file_state_valid_at.valid_at - and filename is not null -order by - file_id, revision_committed desc -$$; - -create or replace function committed_transaction_position_state_valid_at( - valid_at timestamp with time zone DEFAULT now() -) - returns TABLE ( - item_id integer, - revision_id bigint, - transaction_id integer, - changed_by integer, - revision_started timestamptz, - revision_committed timestamptz, - last_changed timestamptz, - name text, - price double precision, - communist_shares double precision, - deleted boolean, - n_usages integer, - usages jsonb, - involved_accounts integer[] - ) - stable - language sql -as -$$ -select distinct on (acph.item_id) - acph.item_id, - acph.revision_id, - acph.transaction_id, - acph.user_id as changed_by, - acph.revision_started, - acph.revision_committed, - acph.last_changed, - acph.name, - acph.price, - acph.communist_shares, - acph.deleted, - acph.n_usages, - acph.usages, - acph.involved_accounts -from - aggregated_committed_transaction_position_history acph -where - acph.revision_committed <= committed_transaction_position_state_valid_at.valid_at - and acph.name is not null -order by - acph.item_id, acph.revision_committed desc -$$; - -create or replace function committed_transaction_state_valid_at( - valid_at timestamp with time zone DEFAULT now() -) - returns TABLE ( - revision_id bigint, - transaction_id integer, - changed_by integer, - revision_started timestamptz, - revision_committed timestamptz, - last_changed timestamptz, - group_id integer, - type text, - value double precision, - currency_symbol text, - currency_conversion_rate double precision, - name text, - description text, - billed_at date, - deleted boolean, - n_creditor_shares integer, - creditor_shares jsonb, - n_debitor_shares integer, - debitor_shares jsonb, - involved_accounts integer[], - tags varchar(255)[] - ) - stable - language sql -as -$$ -select distinct on (acth.transaction_id) - acth.revision_id, - acth.transaction_id, - acth.user_id as changed_by, - acth.revision_started, - acth.revision_committed, - acth.last_changed, - acth.group_id, - acth.type, - acth.value, - acth.currency_symbol, - acth.currency_conversion_rate, - acth.name, - acth.description, - acth.billed_at, - acth.deleted, - acth.n_creditor_shares, - acth.creditor_shares, - acth.n_debitor_shares, - acth.debitor_shares, - acth.involved_accounts, - acth.tags -from - aggregated_committed_transaction_history acth -where - acth.revision_committed <= committed_transaction_state_valid_at.valid_at -order by - acth.transaction_id, acth.revision_committed desc -$$; - -create or replace function full_transaction_state_valid_at( - seen_by_user integer, valid_at timestamp with time zone DEFAULT now() -) - returns TABLE ( - transaction_id integer, - type text, - group_id integer, - last_changed timestamp with time zone, - is_wip boolean, - committed_details jsonb, - pending_details jsonb, - committed_positions jsonb, - pending_positions jsonb, - committed_files jsonb, - pending_files jsonb - ) - stable - language sql -as -$$ -select - t.id as transaction_id, - t.type, - t.group_id, - greatest(committed_details.last_changed, committed_positions.last_changed, committed_files.last_changed, - pending_details.last_changed, pending_positions.last_changed, pending_files.last_changed) as last_changed, - exists(select - 1 - from - transaction_revision tr - where - tr.transaction_id = t.id - and tr.user_id = full_transaction_state_valid_at.seen_by_user - and tr.committed is null) as is_wip, - committed_details.json_state as committed_details, - pending_details.json_state as pending_details, - committed_positions.json_state as committed_positions, - pending_positions.json_state as pending_positions, - committed_files.json_state as committed_files, - pending_files.json_state as pending_files -from - transaction t - left join ( - select - ctsa.transaction_id, - jsonb_agg(ctsa) as json_state, - max(ctsa.last_changed) as last_changed - from - committed_transaction_state_valid_at(full_transaction_state_valid_at.valid_at) ctsa - group by ctsa.transaction_id - ) committed_details on t.id = committed_details.transaction_id - left join ( - select - apth.transaction_id, - jsonb_agg(apth) as json_state, - max(apth.last_changed) as last_changed - from - aggregated_pending_transaction_history apth - where - apth.changed_by = full_transaction_state_valid_at.seen_by_user - group by apth.transaction_id - ) pending_details on t.id = pending_details.transaction_id - left join ( - select - ctpsa.transaction_id, - jsonb_agg(ctpsa) as json_state, - max(ctpsa.last_changed) as last_changed - from - committed_transaction_position_state_valid_at(full_transaction_state_valid_at.valid_at) ctpsa - group by ctpsa.transaction_id - ) committed_positions on t.id = committed_positions.transaction_id - left join ( - select - aptph.transaction_id, - jsonb_agg(aptph) as json_state, - max(aptph.last_changed) as last_changed - from - aggregated_pending_transaction_position_history aptph - where - aptph.changed_by = full_transaction_state_valid_at.seen_by_user - group by aptph.transaction_id - ) pending_positions on t.id = pending_positions.transaction_id - left join ( - select - cfsva.transaction_id, - jsonb_agg(cfsva) as json_state, - max(cfsva.last_changed) as last_changed - from - committed_file_state_valid_at(full_transaction_state_valid_at.valid_at) cfsva - group by cfsva.transaction_id - ) committed_files on t.id = committed_files.transaction_id - left join ( - select - apfh.transaction_id, - jsonb_agg(apfh) as json_state, - max(apfh.last_changed) as last_changed - from - aggregated_pending_file_history apfh - where - apfh.changed_by = full_transaction_state_valid_at.seen_by_user - group by apfh.transaction_id - ) pending_files on t.id = pending_files.transaction_id -where - committed_details.json_state is not null - or pending_details.json_state is not null -$$; diff --git a/abrechnung/database/revisions/__init__.py b/abrechnung/database/revisions/__init__.py deleted file mode 100644 index c2f14f26..00000000 --- a/abrechnung/database/revisions/__init__.py +++ /dev/null @@ -1,149 +0,0 @@ -import logging -import re -from pathlib import Path -from typing import Optional - -import asyncpg -from asyncpg.pool import Pool - -REVISION_DIR = Path(__file__).parent -logger = logging.getLogger(__name__) - -REVISION_VERSION_RE = re.compile(r"^-- revision: (?P\w+)$") -REVISION_REQUIRES_RE = re.compile(r"^-- requires: (?P\w+)$") -REVISION_TABLE = "schema_revision" - - -class SchemaRevision: - def __init__( - self, file_name: Path, code: str, version: str, requires: Optional[str] - ): - self.file_name = file_name - self.code = code - self.version = version - self.requires = requires - - async def apply(self, conn: asyncpg.Connection): - logger.info( - f"Applying revision {self.file_name.name} with version {self.version}" - ) - if self.requires: - version = await conn.fetchval( - f"update {REVISION_TABLE} set version = $1 where version = $2 returning version", - self.version, - self.requires, - ) - if version != self.version: - raise ValueError( - f"Found other revision present than {self.requires} which was required" - ) - else: - n_table = await conn.fetchval(f"select count(*) from {REVISION_TABLE}") - if n_table != 0: - raise ValueError( - f"Could not apply revision {self.version} as there appears to be a revision present," - f"none was expected" - ) - await conn.execute( - f"insert into {REVISION_TABLE} (version) values ($1)", self.version - ) - - # now we can actually apply the revision - await conn.execute(self.code) - - @classmethod - def revisions_from_dir(cls, file_path: Path) -> list["SchemaRevision"]: - """ - returns an ordered list of revisions with their dependencies resolved - """ - revisions = [] - for revision in sorted(REVISION_DIR.glob("*.sql")): - revision_content = revision.read_text("utf-8") - lines = revision_content.splitlines() - if not len(lines) > 2: - raise ValueError(f"Revision {revision} is invalid") - - if (version_match := REVISION_VERSION_RE.match(lines[0])) is None: - raise ValueError( - f"Invalid version string in revision {revision}, " - f"should be of form '-- revision: '" - ) - if (requires_match := REVISION_REQUIRES_RE.match(lines[1])) is None: - raise ValueError( - f"Invalid requires string in revision {revision}, " - f"should be of form '-- requires: '" - ) - - version = version_match["version"] - requires: Optional[str] = requires_match["version"] - - if requires == "null": - requires = None - - revisions.append( - cls( - revision, - revision_content, - version, - requires, - ) - ) - - if len(revisions) == 0: - return revisions - - # now for the purpose of sorting the revisions according to their dependencies - first_revision = next((x for x in revisions if x.requires is None), None) - if first_revision is None: - raise ValueError(f"Could not find a revision without any dependencies") - - # TODO: detect revision branches - sorted_revisions = [first_revision] - while len(sorted_revisions) < len(revisions): - curr_revision = sorted_revisions[-1] - next_revision = next( - (x for x in revisions if x.requires == curr_revision.version), None - ) - if next_revision is None: - raise ValueError( - f"Could not find the successor to revision {curr_revision.version}" - ) - sorted_revisions.append(next_revision) - - return sorted_revisions - - -async def reset_schema(db_pool: Pool): - async with db_pool.acquire() as conn: - async with conn.transaction(): - await conn.execute("drop schema public cascade") - await conn.execute("create schema public") - - -async def apply_revisions(db_pool: Pool): - revisions = SchemaRevision.revisions_from_dir(REVISION_DIR) - - async with db_pool.acquire() as conn: - async with conn.transaction(): - await conn.execute( - f"create table if not exists {REVISION_TABLE} (" - " version text not null primary key" - ")" - ) - - curr_revision = await conn.fetchval( - f"select version from {REVISION_TABLE} limit 1" - ) - - found = curr_revision is None - for revision in revisions: - if found: - await revision.apply(conn) - - if revision.version == curr_revision: - found = True - - if not found: - raise ValueError( - f"Unknown revision {curr_revision} present in database" - ) diff --git a/abrechnung/demo.py b/abrechnung/demo.py index 86575168..552e41e6 100644 --- a/abrechnung/demo.py +++ b/abrechnung/demo.py @@ -4,7 +4,7 @@ from abrechnung import subcommand from abrechnung.config import Config -from abrechnung.database.database import create_db_pool +from abrechnung.framework.database import create_db_pool class DemoCli(subcommand.SubCommand): @@ -29,7 +29,7 @@ async def handle_cleanup_command(self): deletion_threshold = datetime.now() - self.config.demo.wipe_interval - db_pool = await create_db_pool(self.config) + db_pool = await create_db_pool(self.config.database) async with db_pool.acquire() as conn: async with conn.transaction(): n_rows_groups = await conn.fetchval( diff --git a/abrechnung/framework/__init__.py b/abrechnung/framework/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/abrechnung/framework/async_utils.py b/abrechnung/framework/async_utils.py new file mode 100644 index 00000000..cf0f7ada --- /dev/null +++ b/abrechnung/framework/async_utils.py @@ -0,0 +1,44 @@ +import asyncio +import threading +import traceback +from functools import wraps + +from stustapay.framework.database import create_db_pool + + +def with_db_pool(func): + @wraps(func) + async def wrapper(self, *args, **kwargs): + db_pool = await create_db_pool(self.config.database) + try: + return await func(self, *args, db_pool=db_pool, **kwargs) + finally: + await db_pool.close() + + return wrapper + + +class AsyncThread: + def __init__(self, coroutine_callable): + self.loop = asyncio.new_event_loop() + self.thread = threading.Thread(target=self.loop.run_forever) + self.callable = coroutine_callable + self.future = None + + def start(self): + async def runner(): + try: + await self.callable() + except: # pylint: disable=bare-except + traceback.print_exc() + + self.thread.start() + self.future = asyncio.run_coroutine_threadsafe(runner(), self.loop) + + def join(self): + self.thread.join() + + def stop(self): + if self.future: + self.future.cancel() + self.loop.stop() diff --git a/abrechnung/framework/database.py b/abrechnung/framework/database.py new file mode 100644 index 00000000..c4c59eda --- /dev/null +++ b/abrechnung/framework/database.py @@ -0,0 +1,426 @@ +import asyncio +import contextlib +import json +import logging +import os +import re +import shutil +import ssl +import tempfile +from pathlib import Path +from typing import Literal, Optional, Type, TypeVar, Union + +import asyncpg +from pydantic import BaseModel + +from abrechnung import util + +logger = logging.getLogger(__name__) + +REVISION_VERSION_RE = re.compile(r"^-- revision: (?P\w+)$") +REVISION_REQUIRES_RE = re.compile(r"^-- requires: (?P\w+)$") +REVISION_TABLE = "schema_revision" + + +class DatabaseConfig(BaseModel): + user: Optional[str] = None + password: Optional[str] = None + host: Optional[str] = None + port: Optional[int] = 5432 + dbname: str + require_ssl: bool = False + sslrootcert: Optional[str] = None + + +async def psql_attach(config: DatabaseConfig): + with contextlib.ExitStack() as exitstack: + env = dict(os.environ) + env["PGDATABASE"] = config.dbname + + if config.user is None: + if config.host is not None: + raise ValueError("database user is None, but host is set") + if config.password is not None: + raise ValueError("database user is None, but password is set") + else: + + def escape_colon(value: str): + return value.replace("\\", "\\\\").replace(":", "\\:") + + if ( + config.user is not None + and config.password is not None + and config.host is not None + ): + passfile = exitstack.enter_context(tempfile.NamedTemporaryFile("w")) + os.chmod(passfile.name, 0o600) + + passfile.write( + ":".join( + [ + escape_colon(config.host), + "*", + escape_colon(config.dbname), + escape_colon(config.user), + escape_colon(config.password), + ] + ) + ) + passfile.write("\n") + passfile.flush() + env["PGPASSFILE"] = passfile.name + env["PGHOST"] = config.host + env["PGUSER"] = config.user + + command = ["psql", "--variable", "ON_ERROR_STOP=1"] + if shutil.which("pgcli") is not None: + # if pgcli is installed, use that instead! + command = ["pgcli"] + + cwd = os.path.join(os.path.dirname(__file__)) + ret = await util.run_as_fg_process(command, env=env, cwd=cwd) + return ret + + +async def drop_all_views(conn: asyncpg.Connection, schema: str): + # TODO: we might have to find out the dependency order of the views if drop cascade does not work + result = await conn.fetch( + "select table_name from information_schema.views where table_schema = $1 and table_name !~ '^pg_';", + schema, + ) + views = [row["table_name"] for row in result] + if len(views) == 0: + return + + # we use drop if exists here as the cascade dropping might lead the view to being already dropped + # due to being a dependency of another view + drop_statements = "\n".join( + [f"drop view if exists {view} cascade;" for view in views] + ) + await conn.execute(drop_statements) + + +async def drop_all_triggers(conn: asyncpg.Connection, schema: str): + result = await conn.fetch( + "select distinct on (trigger_name, event_object_table) trigger_name, event_object_table " + "from information_schema.triggers where trigger_schema = $1", + schema, + ) + statements = [] + for row in result: + trigger_name = row["trigger_name"] + table = row["event_object_table"] + statements.append(f"drop trigger {trigger_name} on {table};") + + if len(statements) == 0: + return + + drop_statements = "\n".join(statements) + await conn.execute(drop_statements) + + +async def drop_all_functions(conn: asyncpg.Connection, schema: str): + result = await conn.fetch( + "select proname, prokind from pg_proc where pronamespace = $1::regnamespace;", + schema, + ) + drop_statements = [] + for row in result: + kind = row["prokind"].decode("utf-8") + name = row["proname"] + if kind == "f" or kind == "w": + drop_type = "function" + elif kind == "a": + drop_type = "aggregate" + elif kind == "p": + drop_type = "procedure" + else: + raise RuntimeError(f'Unknown postgres function type "{kind}"') + drop_statements.append(f"drop {drop_type} {name} cascade;") + + if len(drop_statements) == 0: + return + + drop_code = "\n".join(drop_statements) + await conn.execute(drop_code) + + +async def drop_all_constraints(conn: asyncpg.Connection, schema: str): + """drop all constraints in the given schema which are not unique, primary or foreign key constraints""" + result = await conn.fetch( + "select con.conname as constraint_name, rel.relname as table_name, con.contype as constraint_type " + "from pg_catalog.pg_constraint con " + " join pg_catalog.pg_namespace nsp on nsp.oid = con.connamespace " + " left join pg_catalog.pg_class rel on rel.oid = con.conrelid " + "where nsp.nspname = $1 and con.conname !~ '^pg_' " + " and con.contype != 'p' and con.contype != 'f' and con.contype != 'u';", + schema, + ) + constraints = [] + for row in result: + constraint_name = row["constraint_name"] + constraint_type = row["constraint_type"].decode("utf-8") + table_name = row["table_name"] + if constraint_type == "c": + constraints.append( + f"alter table {table_name} drop constraint {constraint_name};" + ) + elif constraint_type == "t": + constraints.append(f"drop constraint trigger {constraint_name};") + else: + raise RuntimeError( + f'Unknown constraint type "{constraint_type}" for constraint "{constraint_name}"' + ) + + if len(constraints) == 0: + return + + drop_statements = "\n".join(constraints) + await conn.execute(drop_statements) + + +async def drop_db_code(conn: asyncpg.Connection, schema: str): + await drop_all_triggers(conn, schema=schema) + await drop_all_functions(conn, schema=schema) + await drop_all_views(conn, schema=schema) + await drop_all_constraints(conn, schema=schema) + + +class SchemaRevision: + def __init__( + self, file_name: Path, code: str, version: str, requires: Optional[str] + ): + self.file_name = file_name + self.code = code + self.version = version + self.requires = requires + + async def apply(self, conn): + logger.info( + f"Applying revision {self.file_name.name} with version {self.version}" + ) + if self.requires: + version = await conn.fetchval( + f"update {REVISION_TABLE} set version = $1 where version = $2 returning version", + self.version, + self.requires, + ) + if version != self.version: + raise ValueError( + f"Found other revision present than {self.requires} which was required" + ) + else: + n_table = await conn.fetchval(f"select count(*) from {REVISION_TABLE}") + if n_table != 0: + raise ValueError( + f"Could not apply revision {self.version} as there appears to be a revision present," + f"none was expected" + ) + await conn.execute( + f"insert into {REVISION_TABLE} (version) values ($1)", self.version + ) + + # now we can actually apply the revision + try: + if ( + len(self.code.splitlines()) > 2 + ): # does not only consist of first two header comment lines + await conn.execute(self.code) + except asyncpg.exceptions.PostgresSyntaxError as exc: + exc_dict = exc.as_dict() + position = int(exc_dict["position"]) + message = exc_dict["message"] + lineno = self.code.count("\n", 0, position) + 1 + raise ValueError( + f"Syntax error when executing SQL code at character " + f"{position} ({self.file_name!s}:{lineno}): {message!r}" + ) from exc + + @classmethod + def revisions_from_dir(cls, revision_dir: Path) -> list["SchemaRevision"]: + """ + returns an ordered list of revisions with their dependencies resolved + """ + revisions = [] + for revision in sorted(revision_dir.glob("*.sql")): + revision_content = revision.read_text("utf-8") + lines = revision_content.splitlines() + if not len(lines) > 2: + logger.warning(f"Revision {revision} is empty") + + if (version_match := REVISION_VERSION_RE.match(lines[0])) is None: + raise ValueError( + f"Invalid version string in revision {revision}, " + f"should be of form '-- revision: '" + ) + if (requires_match := REVISION_REQUIRES_RE.match(lines[1])) is None: + raise ValueError( + f"Invalid requires string in revision {revision}, " + f"should be of form '-- requires: '" + ) + + version = version_match["version"] + requires: Optional[str] = requires_match["version"] + + if requires == "null": + requires = None + + revisions.append( + cls( + revision, + revision_content, + version, + requires, + ) + ) + + if len(revisions) == 0: + return revisions + + # now for the purpose of sorting the revisions according to their dependencies + first_revision = next((x for x in revisions if x.requires is None), None) + if first_revision is None: + raise ValueError("Could not find a revision without any dependencies") + + # TODO: detect revision branches + sorted_revisions = [first_revision] + while len(sorted_revisions) < len(revisions): + curr_revision = sorted_revisions[-1] + next_revision = next( + (x for x in revisions if x.requires == curr_revision.version), None + ) + if next_revision is None: + raise ValueError( + f"Could not find the successor to revision {curr_revision.version}" + ) + sorted_revisions.append(next_revision) + + return sorted_revisions + + +async def _apply_db_code(conn: asyncpg.Connection, code_path: Path): + for code_file in sorted(code_path.glob("*.sql")): + logger.info(f"Applying database code file {code_file.name}") + code = code_file.read_text("utf-8") + await conn.execute(code) + + +async def apply_revisions( + db_pool: asyncpg.Pool, + revision_path: Path, + code_path: Path, + until_revision: Optional[str] = None, +): + revisions = SchemaRevision.revisions_from_dir(revision_path) + + async with db_pool.acquire() as conn: + async with conn.transaction(): + await conn.execute( + f"create table if not exists {REVISION_TABLE} (version text not null primary key)" + ) + + curr_revision = await conn.fetchval( + f"select version from {REVISION_TABLE} limit 1" + ) + + await drop_db_code(conn=conn, schema="public") + # TODO: perform a dry run to check all revisions before doing anything + + found = curr_revision is None + for revision in revisions: + if found: + await revision.apply(conn) + + if revision.version == curr_revision: + found = True + + if until_revision is not None and revision.version == until_revision: + return + + if not found: + raise ValueError( + f"Unknown revision {curr_revision} present in database" + ) + + await _apply_db_code(conn=conn, code_path=code_path) + + +T = TypeVar("T", bound=BaseModel) + + +class Connection(asyncpg.Connection): + async def fetch_one(self, model: Type[T], query: str, *args) -> T: + result: Optional[asyncpg.Record] = await self.fetchrow(query, *args) + if result is None: + raise asyncpg.DataError("not found") + + return model.model_validate(dict(result)) + + async def fetch_maybe_one(self, model: Type[T], query: str, *args) -> Optional[T]: + result: Optional[asyncpg.Record] = await self.fetchrow(query, *args) + if result is None: + return None + + return model.model_validate(dict(result)) + + async def fetch_many(self, model: Type[T], query: str, *args) -> list[T]: + # TODO: also allow async cursor + results: list[asyncpg.Record] = await self.fetch(query, *args) + return [model.model_validate(dict(r)) for r in results] + + +async def init_connection(conn: Connection): + await conn.set_type_codec( + "json", encoder=json.dumps, decoder=json.loads, schema="pg_catalog" + ) + + +async def create_db_pool(cfg: DatabaseConfig, n_connections=10) -> asyncpg.Pool: + """ + get a connection pool to the database + """ + pool = None + + retry_counter = 0 + next_log_at_retry = 0 + while pool is None: + try: + sslctx: Optional[Union[ssl.SSLContext, Literal["verify-full", "prefer"]]] + if cfg.sslrootcert and cfg.require_ssl: + sslctx = ssl.create_default_context( + ssl.Purpose.SERVER_AUTH, + cafile=cfg.sslrootcert, + ) + sslctx.check_hostname = True + else: + sslctx = "verify-full" if cfg.require_ssl else "prefer" + + pool = await asyncpg.create_pool( + user=cfg.user, + password=cfg.password, + database=cfg.dbname, + host=cfg.host, + max_size=n_connections, + connection_class=Connection, + min_size=n_connections, + ssl=sslctx, + # the introspection query of asyncpg (defined as introspection.INTRO_LOOKUP_TYPES) + # can take 1s with the jit. + # the introspection is triggered to create converters for unknown types, + # for example the integer[] (oid = 1007). + # see https://github.com/MagicStack/asyncpg/issues/530 + server_settings={"jit": "off"}, + init=init_connection, + ) + except Exception as e: # pylint: disable=broad-except + sleep_amount = 10 + if next_log_at_retry == retry_counter: + logger.warning( + f"Failed to create database pool: {e}, waiting {sleep_amount} seconds and trying again..." + ) + + retry_counter += 1 + next_log_at_retry = min(retry_counter * 2, 2**9) + await asyncio.sleep(sleep_amount) + + return pool diff --git a/abrechnung/framework/decorators.py b/abrechnung/framework/decorators.py new file mode 100644 index 00000000..178d7465 --- /dev/null +++ b/abrechnung/framework/decorators.py @@ -0,0 +1,26 @@ +from functools import wraps + + +def with_db_connection(func): + @wraps(func) + async def wrapper(self, **kwargs): + if "conn" in kwargs: + return await func(self, **kwargs) + + async with self.db_pool.acquire() as conn: + return await func(self, conn=conn, **kwargs) + + return wrapper + + +def with_db_transaction(func): + @wraps(func) + async def wrapper(self, **kwargs): + if "conn" in kwargs: + return await func(self, **kwargs) + + async with self.db_pool.acquire() as conn: + async with conn.transaction(): + return await func(self, conn=conn, **kwargs) + + return wrapper diff --git a/abrechnung/http/cli.py b/abrechnung/http/cli.py index c83d7d3a..f474a755 100644 --- a/abrechnung/http/cli.py +++ b/abrechnung/http/cli.py @@ -8,13 +8,13 @@ from starlette.exceptions import HTTPException as StarletteHTTPException from abrechnung import __version__ -from abrechnung.application import NotFoundError, InvalidCommand from abrechnung.application.accounts import AccountService from abrechnung.application.groups import GroupService from abrechnung.application.transactions import TransactionService from abrechnung.application.users import UserService from abrechnung.config import Config -from abrechnung.database.database import create_db_pool +from abrechnung.core.errors import NotFoundError, InvalidCommand +from abrechnung.framework.database import create_db_pool from abrechnung.subcommand import SubCommand from .middleware import ContextMiddleware from .routers import transactions, groups, auth, accounts, common, websocket @@ -104,7 +104,7 @@ async def _http_exception_handler( return self._format_error_message(exc.status_code, exc.detail) async def _setup(self): - self.db_pool = await create_db_pool(self.cfg) + self.db_pool = await create_db_pool(self.cfg.database) self.user_service = UserService(db_pool=self.db_pool, config=self.cfg) self.transaction_service = TransactionService( db_pool=self.db_pool, config=self.cfg diff --git a/abrechnung/http/routers/auth.py b/abrechnung/http/routers/auth.py index ec02ff0b..ecb51570 100644 --- a/abrechnung/http/routers/auth.py +++ b/abrechnung/http/routers/auth.py @@ -4,7 +4,7 @@ from fastapi.security import OAuth2PasswordRequestForm from pydantic import BaseModel, EmailStr -from abrechnung.application import InvalidCommand +from abrechnung.core.errors import InvalidCommand from abrechnung.application.users import InvalidPassword from abrechnung.application.users import UserService from abrechnung.config import Config diff --git a/abrechnung/http/routers/websocket.py b/abrechnung/http/routers/websocket.py index 18982084..5f879cd7 100644 --- a/abrechnung/http/routers/websocket.py +++ b/abrechnung/http/routers/websocket.py @@ -10,7 +10,6 @@ from abrechnung.application.users import UserService from abrechnung.config import Config -from abrechnung.database.database import create_db_connection from abrechnung.http.utils import encode_json router = APIRouter( @@ -83,7 +82,7 @@ def __init__(self, config: Config): async def initialize(self, db_pool: asyncpg.Pool): self.db_pool = db_pool - self.connection = await create_db_connection(self.config) + self.connection = await self.db_pool.acquire() await self.connection.set_type_codec( "json", encoder=json.dumps, decoder=json.loads, schema="pg_catalog" ) diff --git a/abrechnung/mailer.py b/abrechnung/mailer.py index 3870f3c8..dd4e883e 100644 --- a/abrechnung/mailer.py +++ b/abrechnung/mailer.py @@ -10,7 +10,7 @@ from . import subcommand from .config import Config -from .database.database import create_db_connection +from abrechnung.framework.database import create_db_pool class MailerCli(subcommand.SubCommand): @@ -44,7 +44,8 @@ async def run(self): if self.events is None: raise RuntimeError("something unexpected happened, self.events is None") - self.psql = await create_db_connection(self.config) + db_pool = await create_db_pool(self.config.database, n_connections=1) + self.psql = await db_pool.acquire() self.psql.add_termination_listener(self.terminate_callback) self.psql.add_log_listener(self.log_callback) await self.psql.add_listener("mailer", self.notification_callback) @@ -66,6 +67,7 @@ async def run(self): await self.psql.remove_listener("mailer", self.notification_callback) await self.psql.close() + await db_pool.close() def get_mailer_instance(self): mode = self.config.email.mode diff --git a/tests/common.py b/tests/common.py index 5680a4bc..e7d4443b 100644 --- a/tests/common.py +++ b/tests/common.py @@ -1,4 +1,4 @@ -# pylint: disable=attribute-defined-outside-init +# pylint: disable=attribute-defined-outside-init,missing-kwoa import asyncio import logging import os @@ -9,7 +9,7 @@ from abrechnung.application.users import UserService from abrechnung.config import Config -from abrechnung.database import revisions +from abrechnung.database.migrations import reset_schema, apply_revisions from abrechnung.domain.users import User lock = asyncio.Lock() @@ -61,8 +61,8 @@ async def get_test_db() -> Pool: max_size=5, ) - await revisions.reset_schema(pool) - await revisions.apply_revisions(pool) + await reset_schema(pool) + await apply_revisions(pool) return pool diff --git a/tests/http_tests/common.py b/tests/http_tests/common.py index 84a532b7..fdc7e4b0 100644 --- a/tests/http_tests/common.py +++ b/tests/http_tests/common.py @@ -33,7 +33,7 @@ async def asyncSetUp(self) -> None: "user1", "user1@email.stuff" ) _, session_id, _ = await self.user_service.login_user( - "user1", password=password, session_name="session1" + username="user1", password=password, session_name="session1" ) _, _, self.session_token = await self.user_service.login_user( username=self.test_user.username, password=password, session_name="foobar" diff --git a/tests/http_tests/test_auth.py b/tests/http_tests/test_auth.py index cb6794c9..6a3c98c8 100644 --- a/tests/http_tests/test_auth.py +++ b/tests/http_tests/test_auth.py @@ -109,7 +109,7 @@ async def test_login_user(self): async def test_change_password(self): user, password = await self._create_test_user("user", "user@email.stuff") _, session_id, session_token = await self.user_service.login_user( - "user", password=password, session_name="session1" + username="user", password=password, session_name="session1" ) token = await self.user_service.get_access_token_from_session_token( session_token diff --git a/tests/http_tests/test_groups.py b/tests/http_tests/test_groups.py index a18b63a7..971cb0d2 100644 --- a/tests/http_tests/test_groups.py +++ b/tests/http_tests/test_groups.py @@ -73,10 +73,10 @@ async def test_create_group(self): async def test_delete_group(self): user2, user2_password = await self._create_test_user("user2", "user2@test.com") _, session_id, session_token = await self.user_service.login_user( - "user2", password=user2_password, session_name="foobar" + username="user2", password=user2_password, session_name="foobar" ) user2_token = await self.user_service.get_access_token_from_session_token( - session_token + session_token=session_token ) group_id = await self.group_service.create_group( user=self.test_user, @@ -441,10 +441,10 @@ async def test_invites(self): user2, password = await self._create_test_user("user", "email2@email.stuff") _, session_id, session_token = await self.user_service.login_user( - "user", password=password, session_name="session1" + username="user", password=password, session_name="session1" ) jwt_token = await self.user_service.get_access_token_from_session_token( - session_token + session_token=session_token ) resp = await self.client.post( "/api/v1/groups/preview", diff --git a/tests/test_auth.py b/tests/test_auth.py index 081d6de1..7cdc058b 100644 --- a/tests/test_auth.py +++ b/tests/test_auth.py @@ -1,4 +1,4 @@ -# pylint: disable=attribute-defined-outside-init +# pylint: disable=attribute-defined-outside-init,missing-kwoa from copy import deepcopy from datetime import datetime, timedelta diff --git a/tests/test_mailer.py b/tests/test_mailer.py index 4848e156..cabb7923 100644 --- a/tests/test_mailer.py +++ b/tests/test_mailer.py @@ -1,4 +1,4 @@ -# pylint: disable=attribute-defined-outside-init +# pylint: disable=attribute-defined-outside-init,missing-kwoa import asyncio from dataclasses import dataclass from typing import Optional @@ -70,7 +70,9 @@ async def asyncTearDown(self) -> None: async def test_registration_mail_delivery(self): user_email = "user@email.com" - await self.user_service.register_user("user1", user_email, "password") + await self.user_service.register_user( + username="user1", email=user_email, password="password" + ) await asyncio.sleep(0.5) mail: smtp.Envelope = self.smtp_handler.mail_queue.get_nowait() diff --git a/tests/test_transaction_logic.py b/tests/test_transaction_logic.py index 8d609354..66c47c3c 100644 --- a/tests/test_transaction_logic.py +++ b/tests/test_transaction_logic.py @@ -1,4 +1,4 @@ -# pylint: disable=attribute-defined-outside-init +# pylint: disable=attribute-defined-outside-init,missing-kwoa import os from datetime import datetime diff --git a/tools/create_revision.py b/tools/create_revision.py index 3a1e367a..49eefd4a 100755 --- a/tools/create_revision.py +++ b/tools/create_revision.py @@ -3,14 +3,15 @@ import argparse import os -from abrechnung.database.revisions import SchemaRevision, REVISION_DIR +from abrechnung.database.migrations import REVISION_PATH +from abrechnung.framework.database import SchemaRevision def main(name: str): - revisions = SchemaRevision.revisions_from_dir(REVISION_DIR) + revisions = SchemaRevision.revisions_from_dir(REVISION_PATH) filename = f"{str(len(revisions) + 1).zfill(4)}_{name}.sql" new_revision_version = os.urandom(4).hex() - file_path = REVISION_DIR / filename + file_path = REVISION_PATH / filename with file_path.open("w+") as f: f.write( f"-- revision: {new_revision_version}\n" diff --git a/tools/generate_dummy_data.py b/tools/generate_dummy_data.py index caa269f3..6ecc3372 100644 --- a/tools/generate_dummy_data.py +++ b/tools/generate_dummy_data.py @@ -1,3 +1,4 @@ +# pylint: disable=missing-kwoa import argparse import asyncio import random @@ -9,7 +10,7 @@ from abrechnung.application.transactions import TransactionService from abrechnung.application.users import UserService from abrechnung.config import read_config -from abrechnung.database.database import create_db_pool +from abrechnung.framework.database import create_db_pool def random_date() -> date: @@ -28,7 +29,7 @@ async def main( ): config = read_config(Path(config_path)) - db_pool = await create_db_pool(config) + db_pool = await create_db_pool(config.database) user_service = UserService(db_pool, config) group_service = GroupService(db_pool, config) account_service = AccountService(db_pool, config)